repo_name
stringclasses
6 values
pr_number
int64
512
78.9k
pr_title
stringlengths
3
144
pr_description
stringlengths
0
30.3k
author
stringlengths
2
21
date_created
timestamp[ns, tz=UTC]
date_merged
timestamp[ns, tz=UTC]
previous_commit
stringlengths
40
40
pr_commit
stringlengths
40
40
query
stringlengths
17
30.4k
filepath
stringlengths
9
210
before_content
stringlengths
0
112M
after_content
stringlengths
0
112M
label
int64
-1
1
dotnet/runtime
66,290
Fix workaround for static virtual methods
The workaround for reflection invoking static virtual methods wasn't working around well enough. Fixes #66028. The repro case also hit a scanning failure due to scanner not scanning a throw helper. Those are more of asserts, so add them to the collection of throw helpers to ignore.
MichalStrehovsky
2022-03-07T11:38:33Z
2022-03-07T15:58:30Z
29e6159623292fe882a2df7ae4bdfa2bd0d1e2ce
154386d63b3ba55ce3251fa4caf3dfa89279d982
Fix workaround for static virtual methods. The workaround for reflection invoking static virtual methods wasn't working around well enough. Fixes #66028. The repro case also hit a scanning failure due to scanner not scanning a throw helper. Those are more of asserts, so add them to the collection of throw helpers to ignore.
./src/coreclr/tools/superpmi/mcs/verbprintjiteeversion.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include "standardpch.h" #include "verbprintjiteeversion.h" #include "runtimedetails.h" // Print the GUID in format a5eec3a4-4176-43a7-8c2b-a05b551d4f49 // // This is useful for tools that want to determine which MCH file to use for a // particular JIT: if the JIT and MCS are built from the same source tree, then // use this function to print out the JITEEVersion, and use that to determine // which MCH files to use. // int verbPrintJITEEVersion::DoWork() { const GUID& g = JITEEVersionIdentifier; printf("%08x-%04x-%04x-%02x%02x-%02x%02x%02x%02x%02x%02x\n", g.Data1, g.Data2, g.Data3, g.Data4[0], g.Data4[1], g.Data4[2], g.Data4[3], g.Data4[4], g.Data4[5], g.Data4[6], g.Data4[7]); return 0; }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include "standardpch.h" #include "verbprintjiteeversion.h" #include "runtimedetails.h" // Print the GUID in format a5eec3a4-4176-43a7-8c2b-a05b551d4f49 // // This is useful for tools that want to determine which MCH file to use for a // particular JIT: if the JIT and MCS are built from the same source tree, then // use this function to print out the JITEEVersion, and use that to determine // which MCH files to use. // int verbPrintJITEEVersion::DoWork() { const GUID& g = JITEEVersionIdentifier; printf("%08x-%04x-%04x-%02x%02x-%02x%02x%02x%02x%02x%02x\n", g.Data1, g.Data2, g.Data3, g.Data4[0], g.Data4[1], g.Data4[2], g.Data4[3], g.Data4[4], g.Data4[5], g.Data4[6], g.Data4[7]); return 0; }
-1
dotnet/runtime
66,290
Fix workaround for static virtual methods
The workaround for reflection invoking static virtual methods wasn't working around well enough. Fixes #66028. The repro case also hit a scanning failure due to scanner not scanning a throw helper. Those are more of asserts, so add them to the collection of throw helpers to ignore.
MichalStrehovsky
2022-03-07T11:38:33Z
2022-03-07T15:58:30Z
29e6159623292fe882a2df7ae4bdfa2bd0d1e2ce
154386d63b3ba55ce3251fa4caf3dfa89279d982
Fix workaround for static virtual methods. The workaround for reflection invoking static virtual methods wasn't working around well enough. Fixes #66028. The repro case also hit a scanning failure due to scanner not scanning a throw helper. Those are more of asserts, so add them to the collection of throw helpers to ignore.
./src/libraries/System.Private.Xml/tests/Xslt/TestFiles/TestData/XsltScenarios/EXslt/out/string-decode-uri.xml
<out> <test1>http://www.example.com/my résumé.html</test1> <test2>http://www.example.com/my résumé.html</test2> <test3></test3> <test4></test4> <test4></test4> <test5>http://www.example.com/my résumé.html</test5> </out>
<out> <test1>http://www.example.com/my résumé.html</test1> <test2>http://www.example.com/my résumé.html</test2> <test3></test3> <test4></test4> <test4></test4> <test5>http://www.example.com/my résumé.html</test5> </out>
-1
dotnet/runtime
66,290
Fix workaround for static virtual methods
The workaround for reflection invoking static virtual methods wasn't working around well enough. Fixes #66028. The repro case also hit a scanning failure due to scanner not scanning a throw helper. Those are more of asserts, so add them to the collection of throw helpers to ignore.
MichalStrehovsky
2022-03-07T11:38:33Z
2022-03-07T15:58:30Z
29e6159623292fe882a2df7ae4bdfa2bd0d1e2ce
154386d63b3ba55ce3251fa4caf3dfa89279d982
Fix workaround for static virtual methods. The workaround for reflection invoking static virtual methods wasn't working around well enough. Fixes #66028. The repro case also hit a scanning failure due to scanner not scanning a throw helper. Those are more of asserts, so add them to the collection of throw helpers to ignore.
./src/libraries/Common/src/Interop/Windows/SspiCli/Interop.LsaConnectUntrusted.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Runtime.InteropServices; using Microsoft.Win32.SafeHandles; internal static partial class Interop { internal static partial class SspiCli { [GeneratedDllImport(Interop.Libraries.SspiCli)] internal static partial int LsaConnectUntrusted(out SafeLsaHandle LsaHandle); } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Runtime.InteropServices; using Microsoft.Win32.SafeHandles; internal static partial class Interop { internal static partial class SspiCli { [GeneratedDllImport(Interop.Libraries.SspiCli)] internal static partial int LsaConnectUntrusted(out SafeLsaHandle LsaHandle); } }
-1
dotnet/runtime
66,290
Fix workaround for static virtual methods
The workaround for reflection invoking static virtual methods wasn't working around well enough. Fixes #66028. The repro case also hit a scanning failure due to scanner not scanning a throw helper. Those are more of asserts, so add them to the collection of throw helpers to ignore.
MichalStrehovsky
2022-03-07T11:38:33Z
2022-03-07T15:58:30Z
29e6159623292fe882a2df7ae4bdfa2bd0d1e2ce
154386d63b3ba55ce3251fa4caf3dfa89279d982
Fix workaround for static virtual methods. The workaround for reflection invoking static virtual methods wasn't working around well enough. Fixes #66028. The repro case also hit a scanning failure due to scanner not scanning a throw helper. Those are more of asserts, so add them to the collection of throw helpers to ignore.
./src/tests/JIT/Regression/JitBlue/DevDiv_280123/DevDiv_280123.csproj
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <PropertyGroup> <DebugType /> <Optimize>True</Optimize> </PropertyGroup> <ItemGroup> <Compile Include="$(MSBuildProjectName).cs" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <PropertyGroup> <DebugType /> <Optimize>True</Optimize> </PropertyGroup> <ItemGroup> <Compile Include="$(MSBuildProjectName).cs" /> </ItemGroup> </Project>
-1
dotnet/runtime
66,290
Fix workaround for static virtual methods
The workaround for reflection invoking static virtual methods wasn't working around well enough. Fixes #66028. The repro case also hit a scanning failure due to scanner not scanning a throw helper. Those are more of asserts, so add them to the collection of throw helpers to ignore.
MichalStrehovsky
2022-03-07T11:38:33Z
2022-03-07T15:58:30Z
29e6159623292fe882a2df7ae4bdfa2bd0d1e2ce
154386d63b3ba55ce3251fa4caf3dfa89279d982
Fix workaround for static virtual methods. The workaround for reflection invoking static virtual methods wasn't working around well enough. Fixes #66028. The repro case also hit a scanning failure due to scanner not scanning a throw helper. Those are more of asserts, so add them to the collection of throw helpers to ignore.
./src/libraries/System.Transactions.Local/src/System/Transactions/DependentTransaction.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Diagnostics; namespace System.Transactions { public sealed class DependentTransaction : Transaction { private readonly bool _blocking; // Create a transaction with the given settings // internal DependentTransaction(IsolationLevel isoLevel, InternalTransaction internalTransaction, bool blocking) : base(isoLevel, internalTransaction) { _blocking = blocking; lock (_internalTransaction) { Debug.Assert(_internalTransaction.State != null); if (blocking) { _internalTransaction.State.CreateBlockingClone(_internalTransaction); } else { _internalTransaction.State.CreateAbortingClone(_internalTransaction); } } } public void Complete() { TransactionsEtwProvider etwLog = TransactionsEtwProvider.Log; if (etwLog.IsEnabled()) { etwLog.MethodEnter(TraceSourceType.TraceSourceLtm, this); } lock (_internalTransaction) { if (Disposed) { throw new ObjectDisposedException(nameof(DependentTransaction)); } if (_complete) { throw TransactionException.CreateTransactionCompletedException(DistributedTxId); } _complete = true; Debug.Assert(_internalTransaction.State != null); if (_blocking) { _internalTransaction.State.CompleteBlockingClone(_internalTransaction); } else { _internalTransaction.State.CompleteAbortingClone(_internalTransaction); } } if (etwLog.IsEnabled()) { etwLog.TransactionDependentCloneComplete(this, "DependentTransaction"); etwLog.MethodExit(TraceSourceType.TraceSourceLtm, this); } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Diagnostics; namespace System.Transactions { public sealed class DependentTransaction : Transaction { private readonly bool _blocking; // Create a transaction with the given settings // internal DependentTransaction(IsolationLevel isoLevel, InternalTransaction internalTransaction, bool blocking) : base(isoLevel, internalTransaction) { _blocking = blocking; lock (_internalTransaction) { Debug.Assert(_internalTransaction.State != null); if (blocking) { _internalTransaction.State.CreateBlockingClone(_internalTransaction); } else { _internalTransaction.State.CreateAbortingClone(_internalTransaction); } } } public void Complete() { TransactionsEtwProvider etwLog = TransactionsEtwProvider.Log; if (etwLog.IsEnabled()) { etwLog.MethodEnter(TraceSourceType.TraceSourceLtm, this); } lock (_internalTransaction) { if (Disposed) { throw new ObjectDisposedException(nameof(DependentTransaction)); } if (_complete) { throw TransactionException.CreateTransactionCompletedException(DistributedTxId); } _complete = true; Debug.Assert(_internalTransaction.State != null); if (_blocking) { _internalTransaction.State.CompleteBlockingClone(_internalTransaction); } else { _internalTransaction.State.CompleteAbortingClone(_internalTransaction); } } if (etwLog.IsEnabled()) { etwLog.TransactionDependentCloneComplete(this, "DependentTransaction"); etwLog.MethodExit(TraceSourceType.TraceSourceLtm, this); } } } }
-1
dotnet/runtime
66,290
Fix workaround for static virtual methods
The workaround for reflection invoking static virtual methods wasn't working around well enough. Fixes #66028. The repro case also hit a scanning failure due to scanner not scanning a throw helper. Those are more of asserts, so add them to the collection of throw helpers to ignore.
MichalStrehovsky
2022-03-07T11:38:33Z
2022-03-07T15:58:30Z
29e6159623292fe882a2df7ae4bdfa2bd0d1e2ce
154386d63b3ba55ce3251fa4caf3dfa89279d982
Fix workaround for static virtual methods. The workaround for reflection invoking static virtual methods wasn't working around well enough. Fixes #66028. The repro case also hit a scanning failure due to scanner not scanning a throw helper. Those are more of asserts, so add them to the collection of throw helpers to ignore.
./src/tests/JIT/Methodical/eh/deadcode/deadEHregionacrossBB.il
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. .assembly extern System.Console { .publickeytoken = (B0 3F 5F 7F 11 D5 0A 3A ) .ver 4:0:0:0 } .assembly extern mscorlib { } .assembly extern eh_common {} .assembly 'deadEHregionacrossBB' { .hash algorithm 0x00008004 .ver 0:0:0:0 } .assembly extern xunit.core {} .imagebase 0x00400000 .subsystem 0x00000003 .file alignment 512 .corflags 0x00000001 .class private auto ansi beforefieldinit test extends [mscorlib]System.Object { .field private static class [eh_common]TestUtil.TestLog testLog .method private hidebysig specialname rtspecialname static void .cctor() cil managed { .maxstack 2 newobj instance void [eh_common]TestUtil.TestLog::.ctor() stsfld class [eh_common]TestUtil.TestLog test::testLog ret } // end of method test::.cctor .method public hidebysig static int32 Main() cil managed { .custom instance void [xunit.core]Xunit.FactAttribute::.ctor() = ( 01 00 00 00 ) .entrypoint .maxstack 1 .locals init (int32 V_0) ldsfld class [eh_common]TestUtil.TestLog test::testLog callvirt instance void [eh_common]TestUtil.TestLog::StartRecording() L: br IL_0019 .try { IL_0000: ldstr "In try" IL_0005: call void [System.Console]System.Console::WriteLine(string) ldloc.0 brfalse L2 leave.s L L2: .try { leave.s L } finally { endfinally } IL_000a: leave.s IL_0019 } // end .try catch [mscorlib]System.Object { IL_000c: pop IL_000d: ldstr "In catch" IL_0012: call void [System.Console]System.Console::WriteLine(string) IL_0017: leave.s IL_0019 } // end handler IL_0019: ldsfld class [eh_common]TestUtil.TestLog test::testLog callvirt instance void [eh_common]TestUtil.TestLog::StopRecording() ldsfld class [eh_common]TestUtil.TestLog test::testLog callvirt instance int32 [eh_common]TestUtil.TestLog::VerifyOutput() IL_0022: ret } // end of method test::Main .method public hidebysig specialname rtspecialname instance void .ctor() cil managed { .maxstack 1 IL_0000: ldarg.0 IL_0001: call instance void [mscorlib]System.Object::.ctor() IL_0006: ret } // end of method test::.ctor } // end of class test
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. .assembly extern System.Console { .publickeytoken = (B0 3F 5F 7F 11 D5 0A 3A ) .ver 4:0:0:0 } .assembly extern mscorlib { } .assembly extern eh_common {} .assembly 'deadEHregionacrossBB' { .hash algorithm 0x00008004 .ver 0:0:0:0 } .assembly extern xunit.core {} .imagebase 0x00400000 .subsystem 0x00000003 .file alignment 512 .corflags 0x00000001 .class private auto ansi beforefieldinit test extends [mscorlib]System.Object { .field private static class [eh_common]TestUtil.TestLog testLog .method private hidebysig specialname rtspecialname static void .cctor() cil managed { .maxstack 2 newobj instance void [eh_common]TestUtil.TestLog::.ctor() stsfld class [eh_common]TestUtil.TestLog test::testLog ret } // end of method test::.cctor .method public hidebysig static int32 Main() cil managed { .custom instance void [xunit.core]Xunit.FactAttribute::.ctor() = ( 01 00 00 00 ) .entrypoint .maxstack 1 .locals init (int32 V_0) ldsfld class [eh_common]TestUtil.TestLog test::testLog callvirt instance void [eh_common]TestUtil.TestLog::StartRecording() L: br IL_0019 .try { IL_0000: ldstr "In try" IL_0005: call void [System.Console]System.Console::WriteLine(string) ldloc.0 brfalse L2 leave.s L L2: .try { leave.s L } finally { endfinally } IL_000a: leave.s IL_0019 } // end .try catch [mscorlib]System.Object { IL_000c: pop IL_000d: ldstr "In catch" IL_0012: call void [System.Console]System.Console::WriteLine(string) IL_0017: leave.s IL_0019 } // end handler IL_0019: ldsfld class [eh_common]TestUtil.TestLog test::testLog callvirt instance void [eh_common]TestUtil.TestLog::StopRecording() ldsfld class [eh_common]TestUtil.TestLog test::testLog callvirt instance int32 [eh_common]TestUtil.TestLog::VerifyOutput() IL_0022: ret } // end of method test::Main .method public hidebysig specialname rtspecialname instance void .ctor() cil managed { .maxstack 1 IL_0000: ldarg.0 IL_0001: call instance void [mscorlib]System.Object::.ctor() IL_0006: ret } // end of method test::.ctor } // end of class test
-1
dotnet/runtime
66,290
Fix workaround for static virtual methods
The workaround for reflection invoking static virtual methods wasn't working around well enough. Fixes #66028. The repro case also hit a scanning failure due to scanner not scanning a throw helper. Those are more of asserts, so add them to the collection of throw helpers to ignore.
MichalStrehovsky
2022-03-07T11:38:33Z
2022-03-07T15:58:30Z
29e6159623292fe882a2df7ae4bdfa2bd0d1e2ce
154386d63b3ba55ce3251fa4caf3dfa89279d982
Fix workaround for static virtual methods. The workaround for reflection invoking static virtual methods wasn't working around well enough. Fixes #66028. The repro case also hit a scanning failure due to scanner not scanning a throw helper. Those are more of asserts, so add them to the collection of throw helpers to ignore.
./src/libraries/System.Diagnostics.TextWriterTraceListener/tests/CommonUtilities.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections; using System.Globalization; using System.IO; using System.Text; namespace System.Diagnostics.TextWriterTraceListenerTests { internal static class CommonUtilities { internal const string DefaultDelimiter = ";"; internal static void DeleteFile(string fileName) { if (File.Exists(fileName)) File.Delete(fileName); } internal static string ExpectedTraceEventOutput(TraceFilter filter, TraceEventCache cache, string source, TraceEventType eventType, int id, string format, object[] args) { if (filter != null && !filter.ShouldTrace(cache, source, eventType, id, format, args, null, null)) return string.Empty; var builder = new StringBuilder(); builder.AppendHeader(source, eventType, id); builder.Append(EscapedString(args != null ? string.Format(format, args) : format)); builder.Append(DefaultDelimiter); builder.Append(DefaultDelimiter); builder.AppendTraceEventCache(cache); return builder.AppendLine().ToString(); } internal static string ExpectedTraceDataOutput(TraceFilter filter, TraceEventCache cache, string source, TraceEventType eventType, int id, object data) { if (filter != null && !filter.ShouldTrace(cache, source, eventType, id, null, null, data, null)) return string.Empty; var builder = new StringBuilder(); builder.AppendHeader(source, eventType, id); builder.Append(DefaultDelimiter); builder.Append(EscapedString(data.ToString())); builder.Append(DefaultDelimiter); builder.AppendTraceEventCache(cache); return builder.AppendLine().ToString(); } internal static string ExpectedTraceDataOutput(string delimiter, TraceFilter filter, TraceEventCache cache, string source, TraceEventType eventType, int id, object[] data) { if (filter != null && !filter.ShouldTrace(cache, source, eventType, id, null, null, data, null)) return string.Empty; string secondDelimiter = delimiter == "," ? DefaultDelimiter : ","; var builder = new StringBuilder(); builder.AppendHeader(source, eventType, id, delimiter); builder.Append(delimiter); if (data != null) { for (int i = 0; i < data.Length; ++i) { if (i != 0) builder.Append(secondDelimiter); builder.Append(EscapedString(data[i].ToString())); } } builder.Append(delimiter); builder.AppendTraceEventCache(cache, delimiter); return builder.AppendLine().ToString(); } private static void AppendHeader(this StringBuilder builder, string source, TraceEventType eventType, int id, string delimiter = DefaultDelimiter) { builder.Append(EscapedString(source)); builder.Append(delimiter); builder.Append(eventType.ToString()); builder.Append(delimiter); builder.Append(id.ToString(CultureInfo.InvariantCulture)); builder.Append(delimiter); } private static void AppendTraceEventCache(this StringBuilder builder, TraceEventCache cache, string delimiter = DefaultDelimiter) { if (cache != null) { builder.Append(cache.ProcessId); builder.Append(delimiter); builder.Append(EscapedStack(cache.LogicalOperationStack)); builder.Append(delimiter); builder.Append(EscapedString(cache.ThreadId)); builder.Append(delimiter); builder.Append(EscapedString(cache.DateTime.ToString("o", CultureInfo.InvariantCulture))); builder.Append(delimiter); builder.Append(cache.Timestamp.ToString(CultureInfo.InvariantCulture)); builder.Append(delimiter); builder.Append(EscapedString(cache.Callstack)); } else { for (int i = 0; i < 5; ++i) builder.Append(delimiter); } } private static string EscapedString(string str) { if (!string.IsNullOrEmpty(str)) { StringBuilder sb = new StringBuilder("\""); EscapeMessage(str, sb); sb.Append("\""); return sb.ToString(); } return string.Empty; } private static string EscapedStack(Stack stack) { StringBuilder sb = new StringBuilder("\""); bool first = true; foreach (object obj in stack) { if (!first) { sb.Append(", "); } else { first = false; } string operation = obj.ToString(); EscapeMessage(operation, sb); } sb.Append("\""); return sb.ToString(); } private static void EscapeMessage(string message, StringBuilder sb) { int index; int lastindex = 0; while ((index = message.IndexOf('"', lastindex)) != -1) { sb.Append(message, lastindex, index - lastindex); sb.Append("\"\""); lastindex = index + 1; } sb.Append(message, lastindex, message.Length - lastindex); } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections; using System.Globalization; using System.IO; using System.Text; namespace System.Diagnostics.TextWriterTraceListenerTests { internal static class CommonUtilities { internal const string DefaultDelimiter = ";"; internal static void DeleteFile(string fileName) { if (File.Exists(fileName)) File.Delete(fileName); } internal static string ExpectedTraceEventOutput(TraceFilter filter, TraceEventCache cache, string source, TraceEventType eventType, int id, string format, object[] args) { if (filter != null && !filter.ShouldTrace(cache, source, eventType, id, format, args, null, null)) return string.Empty; var builder = new StringBuilder(); builder.AppendHeader(source, eventType, id); builder.Append(EscapedString(args != null ? string.Format(format, args) : format)); builder.Append(DefaultDelimiter); builder.Append(DefaultDelimiter); builder.AppendTraceEventCache(cache); return builder.AppendLine().ToString(); } internal static string ExpectedTraceDataOutput(TraceFilter filter, TraceEventCache cache, string source, TraceEventType eventType, int id, object data) { if (filter != null && !filter.ShouldTrace(cache, source, eventType, id, null, null, data, null)) return string.Empty; var builder = new StringBuilder(); builder.AppendHeader(source, eventType, id); builder.Append(DefaultDelimiter); builder.Append(EscapedString(data.ToString())); builder.Append(DefaultDelimiter); builder.AppendTraceEventCache(cache); return builder.AppendLine().ToString(); } internal static string ExpectedTraceDataOutput(string delimiter, TraceFilter filter, TraceEventCache cache, string source, TraceEventType eventType, int id, object[] data) { if (filter != null && !filter.ShouldTrace(cache, source, eventType, id, null, null, data, null)) return string.Empty; string secondDelimiter = delimiter == "," ? DefaultDelimiter : ","; var builder = new StringBuilder(); builder.AppendHeader(source, eventType, id, delimiter); builder.Append(delimiter); if (data != null) { for (int i = 0; i < data.Length; ++i) { if (i != 0) builder.Append(secondDelimiter); builder.Append(EscapedString(data[i].ToString())); } } builder.Append(delimiter); builder.AppendTraceEventCache(cache, delimiter); return builder.AppendLine().ToString(); } private static void AppendHeader(this StringBuilder builder, string source, TraceEventType eventType, int id, string delimiter = DefaultDelimiter) { builder.Append(EscapedString(source)); builder.Append(delimiter); builder.Append(eventType.ToString()); builder.Append(delimiter); builder.Append(id.ToString(CultureInfo.InvariantCulture)); builder.Append(delimiter); } private static void AppendTraceEventCache(this StringBuilder builder, TraceEventCache cache, string delimiter = DefaultDelimiter) { if (cache != null) { builder.Append(cache.ProcessId); builder.Append(delimiter); builder.Append(EscapedStack(cache.LogicalOperationStack)); builder.Append(delimiter); builder.Append(EscapedString(cache.ThreadId)); builder.Append(delimiter); builder.Append(EscapedString(cache.DateTime.ToString("o", CultureInfo.InvariantCulture))); builder.Append(delimiter); builder.Append(cache.Timestamp.ToString(CultureInfo.InvariantCulture)); builder.Append(delimiter); builder.Append(EscapedString(cache.Callstack)); } else { for (int i = 0; i < 5; ++i) builder.Append(delimiter); } } private static string EscapedString(string str) { if (!string.IsNullOrEmpty(str)) { StringBuilder sb = new StringBuilder("\""); EscapeMessage(str, sb); sb.Append("\""); return sb.ToString(); } return string.Empty; } private static string EscapedStack(Stack stack) { StringBuilder sb = new StringBuilder("\""); bool first = true; foreach (object obj in stack) { if (!first) { sb.Append(", "); } else { first = false; } string operation = obj.ToString(); EscapeMessage(operation, sb); } sb.Append("\""); return sb.ToString(); } private static void EscapeMessage(string message, StringBuilder sb) { int index; int lastindex = 0; while ((index = message.IndexOf('"', lastindex)) != -1) { sb.Append(message, lastindex, index - lastindex); sb.Append("\"\""); lastindex = index + 1; } sb.Append(message, lastindex, message.Length - lastindex); } } }
-1
dotnet/runtime
66,290
Fix workaround for static virtual methods
The workaround for reflection invoking static virtual methods wasn't working around well enough. Fixes #66028. The repro case also hit a scanning failure due to scanner not scanning a throw helper. Those are more of asserts, so add them to the collection of throw helpers to ignore.
MichalStrehovsky
2022-03-07T11:38:33Z
2022-03-07T15:58:30Z
29e6159623292fe882a2df7ae4bdfa2bd0d1e2ce
154386d63b3ba55ce3251fa4caf3dfa89279d982
Fix workaround for static virtual methods. The workaround for reflection invoking static virtual methods wasn't working around well enough. Fixes #66028. The repro case also hit a scanning failure due to scanner not scanning a throw helper. Those are more of asserts, so add them to the collection of throw helpers to ignore.
./src/libraries/System.Linq/src/System/Linq/Cast.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections; using System.Collections.Generic; namespace System.Linq { public static partial class Enumerable { public static IEnumerable<TResult> OfType<TResult>(this IEnumerable source) { if (source == null) { ThrowHelper.ThrowArgumentNullException(ExceptionArgument.source); } return OfTypeIterator<TResult>(source); } private static IEnumerable<TResult> OfTypeIterator<TResult>(IEnumerable source) { foreach (object? obj in source) { if (obj is TResult result) { yield return result; } } } public static IEnumerable< #nullable disable // there's no way to annotate the connection of the nullability of TResult to that of the source TResult #nullable restore > Cast<TResult>(this IEnumerable source) { if (source is IEnumerable<TResult> typedSource) { return typedSource; } if (source == null) { ThrowHelper.ThrowArgumentNullException(ExceptionArgument.source); } return CastIterator<TResult>(source); } private static IEnumerable<TResult> CastIterator<TResult>(IEnumerable source) { foreach (object obj in source) { yield return (TResult)obj; } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections; using System.Collections.Generic; namespace System.Linq { public static partial class Enumerable { public static IEnumerable<TResult> OfType<TResult>(this IEnumerable source) { if (source == null) { ThrowHelper.ThrowArgumentNullException(ExceptionArgument.source); } return OfTypeIterator<TResult>(source); } private static IEnumerable<TResult> OfTypeIterator<TResult>(IEnumerable source) { foreach (object? obj in source) { if (obj is TResult result) { yield return result; } } } public static IEnumerable< #nullable disable // there's no way to annotate the connection of the nullability of TResult to that of the source TResult #nullable restore > Cast<TResult>(this IEnumerable source) { if (source is IEnumerable<TResult> typedSource) { return typedSource; } if (source == null) { ThrowHelper.ThrowArgumentNullException(ExceptionArgument.source); } return CastIterator<TResult>(source); } private static IEnumerable<TResult> CastIterator<TResult>(IEnumerable source) { foreach (object obj in source) { yield return (TResult)obj; } } } }
-1
dotnet/runtime
66,282
Enable Fast Tail Call Optimization for ARM32
- Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
clamp03
2022-03-07T05:47:20Z
2022-03-13T11:50:20Z
227ff3d53784f655fa04dad059a98b3e8d291d61
51b90cc60b8528c77829ef18481b0f58db812776
Enable Fast Tail Call Optimization for ARM32. - Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
./src/coreclr/jit/compiler.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Compiler XX XX XX XX Represents the method data we are currently JIT-compiling. XX XX An instance of this class is created for every method we JIT. XX XX This contains all the info needed for the method. So allocating a XX XX a new instance per method makes it thread-safe. XX XX It should be used to do all the memory management for the compiler run. XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ /*****************************************************************************/ #ifndef _COMPILER_H_ #define _COMPILER_H_ /*****************************************************************************/ #include "jit.h" #include "opcode.h" #include "varset.h" #include "jitstd.h" #include "jithashtable.h" #include "gentree.h" #include "debuginfo.h" #include "lir.h" #include "block.h" #include "inline.h" #include "jiteh.h" #include "instr.h" #include "regalloc.h" #include "sm.h" #include "cycletimer.h" #include "blockset.h" #include "arraystack.h" #include "hashbv.h" #include "jitexpandarray.h" #include "tinyarray.h" #include "valuenum.h" #include "jittelemetry.h" #include "namedintrinsiclist.h" #ifdef LATE_DISASM #include "disasm.h" #endif #include "codegeninterface.h" #include "regset.h" #include "jitgcinfo.h" #if DUMP_GC_TABLES && defined(JIT32_GCENCODER) #include "gcdump.h" #endif #include "emit.h" #include "hwintrinsic.h" #include "simd.h" #include "simdashwintrinsic.h" // This is only used locally in the JIT to indicate that // a verification block should be inserted #define SEH_VERIFICATION_EXCEPTION 0xe0564552 // VER /***************************************************************************** * Forward declarations */ struct InfoHdr; // defined in GCInfo.h struct escapeMapping_t; // defined in fgdiagnostic.cpp class emitter; // defined in emit.h struct ShadowParamVarInfo; // defined in GSChecks.cpp struct InitVarDscInfo; // defined in register_arg_convention.h class FgStack; // defined in fgbasic.cpp class Instrumentor; // defined in fgprofile.cpp class SpanningTreeVisitor; // defined in fgprofile.cpp class CSE_DataFlow; // defined in OptCSE.cpp class OptBoolsDsc; // defined in optimizer.cpp #ifdef DEBUG struct IndentStack; #endif class Lowering; // defined in lower.h // The following are defined in this file, Compiler.h class Compiler; /***************************************************************************** * Unwind info */ #include "unwind.h" /*****************************************************************************/ // // Declare global operator new overloads that use the compiler's arena allocator // // I wanted to make the second argument optional, with default = CMK_Unknown, but that // caused these to be ambiguous with the global placement new operators. void* __cdecl operator new(size_t n, Compiler* context, CompMemKind cmk); void* __cdecl operator new[](size_t n, Compiler* context, CompMemKind cmk); void* __cdecl operator new(size_t n, void* p, const jitstd::placement_t& syntax_difference); // Requires the definitions of "operator new" so including "LoopCloning.h" after the definitions. #include "loopcloning.h" /*****************************************************************************/ /* This is included here and not earlier as it needs the definition of "CSE" * which is defined in the section above */ /*****************************************************************************/ unsigned genLog2(unsigned value); unsigned genLog2(unsigned __int64 value); unsigned ReinterpretHexAsDecimal(unsigned in); /*****************************************************************************/ const unsigned FLG_CCTOR = (CORINFO_FLG_CONSTRUCTOR | CORINFO_FLG_STATIC); #ifdef DEBUG const int BAD_STK_OFFS = 0xBAADF00D; // for LclVarDsc::lvStkOffs #endif //------------------------------------------------------------------------ // HFA info shared by LclVarDsc and fgArgTabEntry //------------------------------------------------------------------------ inline bool IsHfa(CorInfoHFAElemType kind) { return kind != CORINFO_HFA_ELEM_NONE; } inline var_types HfaTypeFromElemKind(CorInfoHFAElemType kind) { switch (kind) { case CORINFO_HFA_ELEM_FLOAT: return TYP_FLOAT; case CORINFO_HFA_ELEM_DOUBLE: return TYP_DOUBLE; #ifdef FEATURE_SIMD case CORINFO_HFA_ELEM_VECTOR64: return TYP_SIMD8; case CORINFO_HFA_ELEM_VECTOR128: return TYP_SIMD16; #endif case CORINFO_HFA_ELEM_NONE: return TYP_UNDEF; default: assert(!"Invalid HfaElemKind"); return TYP_UNDEF; } } inline CorInfoHFAElemType HfaElemKindFromType(var_types type) { switch (type) { case TYP_FLOAT: return CORINFO_HFA_ELEM_FLOAT; case TYP_DOUBLE: return CORINFO_HFA_ELEM_DOUBLE; #ifdef FEATURE_SIMD case TYP_SIMD8: return CORINFO_HFA_ELEM_VECTOR64; case TYP_SIMD16: return CORINFO_HFA_ELEM_VECTOR128; #endif case TYP_UNDEF: return CORINFO_HFA_ELEM_NONE; default: assert(!"Invalid HFA Type"); return CORINFO_HFA_ELEM_NONE; } } // The following holds the Local var info (scope information) typedef const char* VarName; // Actual ASCII string struct VarScopeDsc { unsigned vsdVarNum; // (remapped) LclVarDsc number unsigned vsdLVnum; // 'which' in eeGetLVinfo(). // Also, it is the index of this entry in the info.compVarScopes array, // which is useful since the array is also accessed via the // compEnterScopeList and compExitScopeList sorted arrays. IL_OFFSET vsdLifeBeg; // instr offset of beg of life IL_OFFSET vsdLifeEnd; // instr offset of end of life #ifdef DEBUG VarName vsdName; // name of the var #endif }; // This class stores information associated with a LclVar SSA definition. class LclSsaVarDsc { // The basic block where the definition occurs. Definitions of uninitialized variables // are considered to occur at the start of the first basic block (fgFirstBB). // // TODO-Cleanup: In the case of uninitialized variables the block is set to nullptr by // SsaBuilder and changed to fgFirstBB during value numbering. It would be useful to // investigate and perhaps eliminate this rather unexpected behavior. BasicBlock* m_block; // The GT_ASG node that generates the definition, or nullptr for definitions // of uninitialized variables. GenTreeOp* m_asg; public: LclSsaVarDsc() : m_block(nullptr), m_asg(nullptr) { } LclSsaVarDsc(BasicBlock* block, GenTreeOp* asg) : m_block(block), m_asg(asg) { assert((asg == nullptr) || asg->OperIs(GT_ASG)); } BasicBlock* GetBlock() const { return m_block; } void SetBlock(BasicBlock* block) { m_block = block; } GenTreeOp* GetAssignment() const { return m_asg; } void SetAssignment(GenTreeOp* asg) { assert((asg == nullptr) || asg->OperIs(GT_ASG)); m_asg = asg; } ValueNumPair m_vnPair; }; // This class stores information associated with a memory SSA definition. class SsaMemDef { public: ValueNumPair m_vnPair; }; //------------------------------------------------------------------------ // SsaDefArray: A resizable array of SSA definitions. // // Unlike an ordinary resizable array implementation, this allows only element // addition (by calling AllocSsaNum) and has special handling for RESERVED_SSA_NUM // (basically it's a 1-based array). The array doesn't impose any particular // requirements on the elements it stores and AllocSsaNum forwards its arguments // to the array element constructor, this way the array supports both LclSsaVarDsc // and SsaMemDef elements. // template <typename T> class SsaDefArray { T* m_array; unsigned m_arraySize; unsigned m_count; static_assert_no_msg(SsaConfig::RESERVED_SSA_NUM == 0); static_assert_no_msg(SsaConfig::FIRST_SSA_NUM == 1); // Get the minimum valid SSA number. unsigned GetMinSsaNum() const { return SsaConfig::FIRST_SSA_NUM; } // Increase (double) the size of the array. void GrowArray(CompAllocator alloc) { unsigned oldSize = m_arraySize; unsigned newSize = max(2, oldSize * 2); T* newArray = alloc.allocate<T>(newSize); for (unsigned i = 0; i < oldSize; i++) { newArray[i] = m_array[i]; } m_array = newArray; m_arraySize = newSize; } public: // Construct an empty SsaDefArray. SsaDefArray() : m_array(nullptr), m_arraySize(0), m_count(0) { } // Reset the array (used only if the SSA form is reconstructed). void Reset() { m_count = 0; } // Allocate a new SSA number (starting with SsaConfig::FIRST_SSA_NUM). template <class... Args> unsigned AllocSsaNum(CompAllocator alloc, Args&&... args) { if (m_count == m_arraySize) { GrowArray(alloc); } unsigned ssaNum = GetMinSsaNum() + m_count; m_array[m_count++] = T(std::forward<Args>(args)...); // Ensure that the first SSA number we allocate is SsaConfig::FIRST_SSA_NUM assert((ssaNum == SsaConfig::FIRST_SSA_NUM) || (m_count > 1)); return ssaNum; } // Get the number of SSA definitions in the array. unsigned GetCount() const { return m_count; } // Get a pointer to the SSA definition at the specified index. T* GetSsaDefByIndex(unsigned index) { assert(index < m_count); return &m_array[index]; } // Check if the specified SSA number is valid. bool IsValidSsaNum(unsigned ssaNum) const { return (GetMinSsaNum() <= ssaNum) && (ssaNum < (GetMinSsaNum() + m_count)); } // Get a pointer to the SSA definition associated with the specified SSA number. T* GetSsaDef(unsigned ssaNum) { assert(ssaNum != SsaConfig::RESERVED_SSA_NUM); return GetSsaDefByIndex(ssaNum - GetMinSsaNum()); } // Get an SSA number associated with the specified SSA def (that must be in this array). unsigned GetSsaNum(T* ssaDef) { assert((m_array <= ssaDef) && (ssaDef < &m_array[m_count])); return GetMinSsaNum() + static_cast<unsigned>(ssaDef - &m_array[0]); } }; enum RefCountState { RCS_INVALID, // not valid to get/set ref counts RCS_EARLY, // early counts for struct promotion and struct passing RCS_NORMAL, // normal ref counts (from lvaMarkRefs onward) }; #ifdef DEBUG // Reasons why we can't enregister a local. enum class DoNotEnregisterReason { None, AddrExposed, // the address of this local is exposed. DontEnregStructs, // struct enregistration is disabled. NotRegSizeStruct, // the struct size does not much any register size, usually the struct size is too big. LocalField, // the local is accessed with LCL_FLD, note we can do it not only for struct locals. VMNeedsStackAddr, LiveInOutOfHandler, // the local is alive in and out of exception handler and not signle def. BlockOp, // Is read or written via a block operation. IsStructArg, // Is a struct passed as an argument in a way that requires a stack location. DepField, // It is a field of a dependently promoted struct NoRegVars, // opts.compFlags & CLFLG_REGVAR is not set MinOptsGC, // It is a GC Ref and we are compiling MinOpts #if !defined(TARGET_64BIT) LongParamField, // It is a decomposed field of a long parameter. #endif #ifdef JIT32_GCENCODER PinningRef, #endif LclAddrNode, // the local is accessed with LCL_ADDR_VAR/FLD. CastTakesAddr, StoreBlkSrc, // the local is used as STORE_BLK source. OneAsgRetyping, // fgMorphOneAsgBlockOp prevents this local from being enregister. SwizzleArg, // the local is passed using LCL_FLD as another type. BlockOpRet, // the struct is returned and it promoted or there is a cast. ReturnSpCheck, // the local is used to do SP check SimdUserForcesDep // a promoted struct was used by a SIMD/HWI node; it must be dependently promoted }; enum class AddressExposedReason { NONE, PARENT_EXPOSED, // This is a promoted field but the parent is exposed. TOO_CONSERVATIVE, // Were marked as exposed to be conservative, fix these places. ESCAPE_ADDRESS, // The address is escaping, for example, passed as call argument. WIDE_INDIR, // We access via indirection with wider type. OSR_EXPOSED, // It was exposed in the original method, osr has to repeat it. STRESS_LCL_FLD, // Stress mode replaces localVar with localFld and makes them addrExposed. COPY_FLD_BY_FLD, // Field by field copy takes the address of the local, can be fixed. DISPATCH_RET_BUF // Caller return buffer dispatch. }; #endif // DEBUG class LclVarDsc { public: // The constructor. Most things can just be zero'ed. // // Initialize the ArgRegs to REG_STK. // Morph will update if this local is passed in a register. LclVarDsc() : _lvArgReg(REG_STK) , #if FEATURE_MULTIREG_ARGS _lvOtherArgReg(REG_STK) , #endif // FEATURE_MULTIREG_ARGS lvClassHnd(NO_CLASS_HANDLE) , lvRefBlks(BlockSetOps::UninitVal()) , lvPerSsaData() { } // note this only packs because var_types is a typedef of unsigned char var_types lvType : 5; // TYP_INT/LONG/FLOAT/DOUBLE/REF unsigned char lvIsParam : 1; // is this a parameter? unsigned char lvIsRegArg : 1; // is this an argument that was passed by register? unsigned char lvFramePointerBased : 1; // 0 = off of REG_SPBASE (e.g., ESP), 1 = off of REG_FPBASE (e.g., EBP) unsigned char lvOnFrame : 1; // (part of) the variable lives on the frame unsigned char lvRegister : 1; // assigned to live in a register? For RyuJIT backend, this is only set if the // variable is in the same register for the entire function. unsigned char lvTracked : 1; // is this a tracked variable? bool lvTrackedNonStruct() { return lvTracked && lvType != TYP_STRUCT; } unsigned char lvPinned : 1; // is this a pinned variable? unsigned char lvMustInit : 1; // must be initialized private: bool m_addrExposed : 1; // The address of this variable is "exposed" -- passed as an argument, stored in a // global location, etc. // We cannot reason reliably about the value of the variable. public: unsigned char lvDoNotEnregister : 1; // Do not enregister this variable. unsigned char lvFieldAccessed : 1; // The var is a struct local, and a field of the variable is accessed. Affects // struct promotion. unsigned char lvLiveInOutOfHndlr : 1; // The variable is live in or out of an exception handler, and therefore must // be on the stack (at least at those boundaries.) unsigned char lvInSsa : 1; // The variable is in SSA form (set by SsaBuilder) unsigned char lvIsCSE : 1; // Indicates if this LclVar is a CSE variable. unsigned char lvHasLdAddrOp : 1; // has ldloca or ldarga opcode on this local. unsigned char lvStackByref : 1; // This is a compiler temporary of TYP_BYREF that is known to point into our local // stack frame. unsigned char lvHasILStoreOp : 1; // there is at least one STLOC or STARG on this local unsigned char lvHasMultipleILStoreOp : 1; // there is more than one STLOC on this local unsigned char lvIsTemp : 1; // Short-lifetime compiler temp #if defined(TARGET_AMD64) || defined(TARGET_ARM64) unsigned char lvIsImplicitByRef : 1; // Set if the argument is an implicit byref. #endif // defined(TARGET_AMD64) || defined(TARGET_ARM64) unsigned char lvIsBoolean : 1; // set if variable is boolean unsigned char lvSingleDef : 1; // variable has a single def // before lvaMarkLocalVars: identifies ref type locals that can get type updates // after lvaMarkLocalVars: identifies locals that are suitable for optAddCopies unsigned char lvSingleDefRegCandidate : 1; // variable has a single def and hence is a register candidate // Currently, this is only used to decide if an EH variable can be // a register candiate or not. unsigned char lvDisqualifySingleDefRegCandidate : 1; // tracks variable that are disqualified from register // candidancy unsigned char lvSpillAtSingleDef : 1; // variable has a single def (as determined by LSRA interval scan) // and is spilled making it candidate to spill right after the // first (and only) definition. // Note: We cannot reuse lvSingleDefRegCandidate because it is set // in earlier phase and the information might not be appropriate // in LSRA. unsigned char lvDisqualify : 1; // variable is no longer OK for add copy optimization unsigned char lvVolatileHint : 1; // hint for AssertionProp #ifndef TARGET_64BIT unsigned char lvStructDoubleAlign : 1; // Must we double align this struct? #endif // !TARGET_64BIT #ifdef TARGET_64BIT unsigned char lvQuirkToLong : 1; // Quirk to allocate this LclVar as a 64-bit long #endif #ifdef DEBUG unsigned char lvKeepType : 1; // Don't change the type of this variable unsigned char lvNoLclFldStress : 1; // Can't apply local field stress on this one #endif unsigned char lvIsPtr : 1; // Might this be used in an address computation? (used by buffer overflow security // checks) unsigned char lvIsUnsafeBuffer : 1; // Does this contain an unsafe buffer requiring buffer overflow security checks? unsigned char lvPromoted : 1; // True when this local is a promoted struct, a normed struct, or a "split" long on a // 32-bit target. For implicit byref parameters, this gets hijacked between // fgRetypeImplicitByRefArgs and fgMarkDemotedImplicitByRefArgs to indicate whether // references to the arg are being rewritten as references to a promoted shadow local. unsigned char lvIsStructField : 1; // Is this local var a field of a promoted struct local? unsigned char lvOverlappingFields : 1; // True when we have a struct with possibly overlapping fields unsigned char lvContainsHoles : 1; // True when we have a promoted struct that contains holes unsigned char lvCustomLayout : 1; // True when this struct has "CustomLayout" unsigned char lvIsMultiRegArg : 1; // true if this is a multireg LclVar struct used in an argument context unsigned char lvIsMultiRegRet : 1; // true if this is a multireg LclVar struct assigned from a multireg call #ifdef FEATURE_HFA_FIELDS_PRESENT CorInfoHFAElemType _lvHfaElemKind : 3; // What kind of an HFA this is (CORINFO_HFA_ELEM_NONE if it is not an HFA). #endif // FEATURE_HFA_FIELDS_PRESENT #ifdef DEBUG // TODO-Cleanup: See the note on lvSize() - this flag is only in use by asserts that are checking for struct // types, and is needed because of cases where TYP_STRUCT is bashed to an integral type. // Consider cleaning this up so this workaround is not required. unsigned char lvUnusedStruct : 1; // All references to this promoted struct are through its field locals. // I.e. there is no longer any reference to the struct directly. // In this case we can simply remove this struct local. unsigned char lvUndoneStructPromotion : 1; // The struct promotion was undone and hence there should be no // reference to the fields of this struct. #endif unsigned char lvLRACandidate : 1; // Tracked for linear scan register allocation purposes #ifdef FEATURE_SIMD // Note that both SIMD vector args and locals are marked as lvSIMDType = true, but the // type of an arg node is TYP_BYREF and a local node is TYP_SIMD*. unsigned char lvSIMDType : 1; // This is a SIMD struct unsigned char lvUsedInSIMDIntrinsic : 1; // This tells lclvar is used for simd intrinsic unsigned char lvSimdBaseJitType : 5; // Note: this only packs because CorInfoType has less than 32 entries CorInfoType GetSimdBaseJitType() const { return (CorInfoType)lvSimdBaseJitType; } void SetSimdBaseJitType(CorInfoType simdBaseJitType) { assert(simdBaseJitType < (1 << 5)); lvSimdBaseJitType = (unsigned char)simdBaseJitType; } var_types GetSimdBaseType() const; #endif // FEATURE_SIMD unsigned char lvRegStruct : 1; // This is a reg-sized non-field-addressed struct. unsigned char lvClassIsExact : 1; // lvClassHandle is the exact type #ifdef DEBUG unsigned char lvClassInfoUpdated : 1; // true if this var has updated class handle or exactness #endif unsigned char lvImplicitlyReferenced : 1; // true if there are non-IR references to this local (prolog, epilog, gc, // eh) unsigned char lvSuppressedZeroInit : 1; // local needs zero init if we transform tail call to loop unsigned char lvHasExplicitInit : 1; // The local is explicitly initialized and doesn't need zero initialization in // the prolog. If the local has gc pointers, there are no gc-safe points // between the prolog and the explicit initialization. union { unsigned lvFieldLclStart; // The index of the local var representing the first field in the promoted struct // local. For implicit byref parameters, this gets hijacked between // fgRetypeImplicitByRefArgs and fgMarkDemotedImplicitByRefArgs to point to the // struct local created to model the parameter's struct promotion, if any. unsigned lvParentLcl; // The index of the local var representing the parent (i.e. the promoted struct local). // Valid on promoted struct local fields. }; unsigned char lvFieldCnt; // Number of fields in the promoted VarDsc. unsigned char lvFldOffset; unsigned char lvFldOrdinal; #ifdef DEBUG unsigned char lvSingleDefDisqualifyReason = 'H'; #endif #if FEATURE_MULTIREG_ARGS regNumber lvRegNumForSlot(unsigned slotNum) { if (slotNum == 0) { return (regNumber)_lvArgReg; } else if (slotNum == 1) { return GetOtherArgReg(); } else { assert(false && "Invalid slotNum!"); } unreached(); } #endif // FEATURE_MULTIREG_ARGS CorInfoHFAElemType GetLvHfaElemKind() const { #ifdef FEATURE_HFA_FIELDS_PRESENT return _lvHfaElemKind; #else NOWAY_MSG("GetLvHfaElemKind"); return CORINFO_HFA_ELEM_NONE; #endif // FEATURE_HFA_FIELDS_PRESENT } void SetLvHfaElemKind(CorInfoHFAElemType elemKind) { #ifdef FEATURE_HFA_FIELDS_PRESENT _lvHfaElemKind = elemKind; #else NOWAY_MSG("SetLvHfaElemKind"); #endif // FEATURE_HFA_FIELDS_PRESENT } bool lvIsHfa() const { if (GlobalJitOptions::compFeatureHfa) { return IsHfa(GetLvHfaElemKind()); } else { return false; } } bool lvIsHfaRegArg() const { if (GlobalJitOptions::compFeatureHfa) { return lvIsRegArg && lvIsHfa(); } else { return false; } } //------------------------------------------------------------------------------ // lvHfaSlots: Get the number of slots used by an HFA local // // Return Value: // On Arm64 - Returns 1-4 indicating the number of register slots used by the HFA // On Arm32 - Returns the total number of single FP register slots used by the HFA, max is 8 // unsigned lvHfaSlots() const { assert(lvIsHfa()); assert(varTypeIsStruct(lvType)); unsigned slots = 0; #ifdef TARGET_ARM slots = lvExactSize / sizeof(float); assert(slots <= 8); #elif defined(TARGET_ARM64) switch (GetLvHfaElemKind()) { case CORINFO_HFA_ELEM_NONE: assert(!"lvHfaSlots called for non-HFA"); break; case CORINFO_HFA_ELEM_FLOAT: assert((lvExactSize % 4) == 0); slots = lvExactSize >> 2; break; case CORINFO_HFA_ELEM_DOUBLE: case CORINFO_HFA_ELEM_VECTOR64: assert((lvExactSize % 8) == 0); slots = lvExactSize >> 3; break; case CORINFO_HFA_ELEM_VECTOR128: assert((lvExactSize % 16) == 0); slots = lvExactSize >> 4; break; default: unreached(); } assert(slots <= 4); #endif // TARGET_ARM64 return slots; } // lvIsMultiRegArgOrRet() // returns true if this is a multireg LclVar struct used in an argument context // or if this is a multireg LclVar struct assigned from a multireg call bool lvIsMultiRegArgOrRet() { return lvIsMultiRegArg || lvIsMultiRegRet; } #if defined(DEBUG) private: DoNotEnregisterReason m_doNotEnregReason; AddressExposedReason m_addrExposedReason; public: void SetDoNotEnregReason(DoNotEnregisterReason reason) { m_doNotEnregReason = reason; } DoNotEnregisterReason GetDoNotEnregReason() const { return m_doNotEnregReason; } AddressExposedReason GetAddrExposedReason() const { return m_addrExposedReason; } #endif // DEBUG public: void SetAddressExposed(bool value DEBUGARG(AddressExposedReason reason)) { m_addrExposed = value; INDEBUG(m_addrExposedReason = reason); } void CleanAddressExposed() { m_addrExposed = false; } bool IsAddressExposed() const { return m_addrExposed; } private: regNumberSmall _lvRegNum; // Used to store the register this variable is in (or, the low register of a // register pair). It is set during codegen any time the // variable is enregistered (lvRegister is only set // to non-zero if the variable gets the same register assignment for its entire // lifetime). #if !defined(TARGET_64BIT) regNumberSmall _lvOtherReg; // Used for "upper half" of long var. #endif // !defined(TARGET_64BIT) regNumberSmall _lvArgReg; // The (first) register in which this argument is passed. #if FEATURE_MULTIREG_ARGS regNumberSmall _lvOtherArgReg; // Used for the second part of the struct passed in a register. // Note this is defined but not used by ARM32 #endif // FEATURE_MULTIREG_ARGS regNumberSmall _lvArgInitReg; // the register into which the argument is moved at entry public: // The register number is stored in a small format (8 bits), but the getters return and the setters take // a full-size (unsigned) format, to localize the casts here. ///////////////////// regNumber GetRegNum() const { return (regNumber)_lvRegNum; } void SetRegNum(regNumber reg) { _lvRegNum = (regNumberSmall)reg; assert(_lvRegNum == reg); } ///////////////////// #if defined(TARGET_64BIT) regNumber GetOtherReg() const { assert(!"shouldn't get here"); // can't use "unreached();" because it's NORETURN, which causes C4072 // "unreachable code" warnings return REG_NA; } void SetOtherReg(regNumber reg) { assert(!"shouldn't get here"); // can't use "unreached();" because it's NORETURN, which causes C4072 // "unreachable code" warnings } #else // !TARGET_64BIT regNumber GetOtherReg() const { return (regNumber)_lvOtherReg; } void SetOtherReg(regNumber reg) { _lvOtherReg = (regNumberSmall)reg; assert(_lvOtherReg == reg); } #endif // !TARGET_64BIT ///////////////////// regNumber GetArgReg() const { return (regNumber)_lvArgReg; } void SetArgReg(regNumber reg) { _lvArgReg = (regNumberSmall)reg; assert(_lvArgReg == reg); } #if FEATURE_MULTIREG_ARGS regNumber GetOtherArgReg() const { return (regNumber)_lvOtherArgReg; } void SetOtherArgReg(regNumber reg) { _lvOtherArgReg = (regNumberSmall)reg; assert(_lvOtherArgReg == reg); } #endif // FEATURE_MULTIREG_ARGS #ifdef FEATURE_SIMD // Is this is a SIMD struct? bool lvIsSIMDType() const { return lvSIMDType; } // Is this is a SIMD struct which is used for SIMD intrinsic? bool lvIsUsedInSIMDIntrinsic() const { return lvUsedInSIMDIntrinsic; } #else // If feature_simd not enabled, return false bool lvIsSIMDType() const { return false; } bool lvIsUsedInSIMDIntrinsic() const { return false; } #endif ///////////////////// regNumber GetArgInitReg() const { return (regNumber)_lvArgInitReg; } void SetArgInitReg(regNumber reg) { _lvArgInitReg = (regNumberSmall)reg; assert(_lvArgInitReg == reg); } ///////////////////// bool lvIsRegCandidate() const { return lvLRACandidate != 0; } bool lvIsInReg() const { return lvIsRegCandidate() && (GetRegNum() != REG_STK); } regMaskTP lvRegMask() const { regMaskTP regMask = RBM_NONE; if (varTypeUsesFloatReg(TypeGet())) { if (GetRegNum() != REG_STK) { regMask = genRegMaskFloat(GetRegNum(), TypeGet()); } } else { if (GetRegNum() != REG_STK) { regMask = genRegMask(GetRegNum()); } } return regMask; } unsigned short lvVarIndex; // variable tracking index private: unsigned short m_lvRefCnt; // unweighted (real) reference count. For implicit by reference // parameters, this gets hijacked from fgResetImplicitByRefRefCount // through fgMarkDemotedImplicitByRefArgs, to provide a static // appearance count (computed during address-exposed analysis) // that fgMakeOutgoingStructArgCopy consults during global morph // to determine if eliding its copy is legal. weight_t m_lvRefCntWtd; // weighted reference count public: unsigned short lvRefCnt(RefCountState state = RCS_NORMAL) const; void incLvRefCnt(unsigned short delta, RefCountState state = RCS_NORMAL); void setLvRefCnt(unsigned short newValue, RefCountState state = RCS_NORMAL); weight_t lvRefCntWtd(RefCountState state = RCS_NORMAL) const; void incLvRefCntWtd(weight_t delta, RefCountState state = RCS_NORMAL); void setLvRefCntWtd(weight_t newValue, RefCountState state = RCS_NORMAL); private: int lvStkOffs; // stack offset of home in bytes. public: int GetStackOffset() const { return lvStkOffs; } void SetStackOffset(int offset) { lvStkOffs = offset; } unsigned lvExactSize; // (exact) size of the type in bytes // Is this a promoted struct? // This method returns true only for structs (including SIMD structs), not for // locals that are split on a 32-bit target. // It is only necessary to use this: // 1) if only structs are wanted, and // 2) if Lowering has already been done. // Otherwise lvPromoted is valid. bool lvPromotedStruct() { #if !defined(TARGET_64BIT) return (lvPromoted && !varTypeIsLong(lvType)); #else // defined(TARGET_64BIT) return lvPromoted; #endif // defined(TARGET_64BIT) } unsigned lvSize() const; size_t lvArgStackSize() const; unsigned lvSlotNum; // original slot # (if remapped) typeInfo lvVerTypeInfo; // type info needed for verification // class handle for the local or null if not known or not a class, // for a struct handle use `GetStructHnd()`. CORINFO_CLASS_HANDLE lvClassHnd; // Get class handle for a struct local or implicitByRef struct local. CORINFO_CLASS_HANDLE GetStructHnd() const { #ifdef FEATURE_SIMD if (lvSIMDType && (m_layout == nullptr)) { return NO_CLASS_HANDLE; } #endif assert(m_layout != nullptr); #if defined(TARGET_AMD64) || defined(TARGET_ARM64) assert(varTypeIsStruct(TypeGet()) || (lvIsImplicitByRef && (TypeGet() == TYP_BYREF))); #else assert(varTypeIsStruct(TypeGet())); #endif CORINFO_CLASS_HANDLE structHnd = m_layout->GetClassHandle(); assert(structHnd != NO_CLASS_HANDLE); return structHnd; } CORINFO_FIELD_HANDLE lvFieldHnd; // field handle for promoted struct fields private: ClassLayout* m_layout; // layout info for structs public: BlockSet lvRefBlks; // Set of blocks that contain refs Statement* lvDefStmt; // Pointer to the statement with the single definition void lvaDisqualifyVar(); // Call to disqualify a local variable from use in optAddCopies var_types TypeGet() const { return (var_types)lvType; } bool lvStackAligned() const { assert(lvIsStructField); return ((lvFldOffset % TARGET_POINTER_SIZE) == 0); } bool lvNormalizeOnLoad() const { return varTypeIsSmall(TypeGet()) && // lvIsStructField is treated the same as the aliased local, see fgDoNormalizeOnStore. (lvIsParam || m_addrExposed || lvIsStructField); } bool lvNormalizeOnStore() const { return varTypeIsSmall(TypeGet()) && // lvIsStructField is treated the same as the aliased local, see fgDoNormalizeOnStore. !(lvIsParam || m_addrExposed || lvIsStructField); } void incRefCnts(weight_t weight, Compiler* pComp, RefCountState state = RCS_NORMAL, bool propagate = true); var_types GetHfaType() const { if (GlobalJitOptions::compFeatureHfa) { assert(lvIsHfa()); return HfaTypeFromElemKind(GetLvHfaElemKind()); } else { return TYP_UNDEF; } } void SetHfaType(var_types type) { if (GlobalJitOptions::compFeatureHfa) { CorInfoHFAElemType elemKind = HfaElemKindFromType(type); SetLvHfaElemKind(elemKind); // Ensure we've allocated enough bits. assert(GetLvHfaElemKind() == elemKind); } } // Returns true if this variable contains GC pointers (including being a GC pointer itself). bool HasGCPtr() const { return varTypeIsGC(lvType) || ((lvType == TYP_STRUCT) && m_layout->HasGCPtr()); } // Returns the layout of a struct variable. ClassLayout* GetLayout() const { assert(varTypeIsStruct(lvType)); return m_layout; } // Sets the layout of a struct variable. void SetLayout(ClassLayout* layout) { assert(varTypeIsStruct(lvType)); assert((m_layout == nullptr) || ClassLayout::AreCompatible(m_layout, layout)); m_layout = layout; } SsaDefArray<LclSsaVarDsc> lvPerSsaData; // Returns the address of the per-Ssa data for the given ssaNum (which is required // not to be the SsaConfig::RESERVED_SSA_NUM, which indicates that the variable is // not an SSA variable). LclSsaVarDsc* GetPerSsaData(unsigned ssaNum) { return lvPerSsaData.GetSsaDef(ssaNum); } // Returns the SSA number for "ssaDef". Requires "ssaDef" to be a valid definition // of this variable. unsigned GetSsaNumForSsaDef(LclSsaVarDsc* ssaDef) { return lvPerSsaData.GetSsaNum(ssaDef); } var_types GetRegisterType(const GenTreeLclVarCommon* tree) const; var_types GetRegisterType() const; var_types GetActualRegisterType() const; bool IsEnregisterableType() const { return GetRegisterType() != TYP_UNDEF; } bool IsEnregisterableLcl() const { if (lvDoNotEnregister) { return false; } return IsEnregisterableType(); } //----------------------------------------------------------------------------- // IsAlwaysAliveInMemory: Determines if this variable's value is always // up-to-date on stack. This is possible if this is an EH-var or // we decided to spill after single-def. // bool IsAlwaysAliveInMemory() const { return lvLiveInOutOfHndlr || lvSpillAtSingleDef; } bool CanBeReplacedWithItsField(Compiler* comp) const; #ifdef DEBUG public: const char* lvReason; void PrintVarReg() const { printf("%s", getRegName(GetRegNum())); } #endif // DEBUG }; // class LclVarDsc enum class SymbolicIntegerValue : int32_t { LongMin, IntMin, ShortMin, ByteMin, Zero, One, ByteMax, UByteMax, ShortMax, UShortMax, IntMax, UIntMax, LongMax, }; inline constexpr bool operator>(SymbolicIntegerValue left, SymbolicIntegerValue right) { return static_cast<int32_t>(left) > static_cast<int32_t>(right); } inline constexpr bool operator>=(SymbolicIntegerValue left, SymbolicIntegerValue right) { return static_cast<int32_t>(left) >= static_cast<int32_t>(right); } inline constexpr bool operator<(SymbolicIntegerValue left, SymbolicIntegerValue right) { return static_cast<int32_t>(left) < static_cast<int32_t>(right); } inline constexpr bool operator<=(SymbolicIntegerValue left, SymbolicIntegerValue right) { return static_cast<int32_t>(left) <= static_cast<int32_t>(right); } // Represents an integral range useful for reasoning about integral casts. // It uses a symbolic representation for lower and upper bounds so // that it can efficiently handle integers of all sizes on all hosts. // // Note that the ranges represented by this class are **always** in the // "signed" domain. This is so that if we know the range a node produces, it // can be trivially used to determine if a cast above the node does or does not // overflow, which requires that the interpretation of integers be the same both // for the "input" and "output". We choose signed interpretation here because it // produces nice continuous ranges and because IR uses sign-extension for constants. // // Some examples of how ranges are computed for casts: // 1. CAST_OVF(ubyte <- uint): does not overflow for [0..UBYTE_MAX], produces the // same range - all casts that do not change the representation, i. e. have the same // "actual" input and output type, have the same "input" and "output" range. // 2. CAST_OVF(ulong <- uint): never oveflows => the "input" range is [INT_MIN..INT_MAX] // (aka all possible 32 bit integers). Produces [0..UINT_MAX] (aka all possible 32 // bit integers zero-extended to 64 bits). // 3. CAST_OVF(int <- uint): overflows for inputs larger than INT_MAX <=> less than 0 // when interpreting as signed => the "input" range is [0..INT_MAX], the same range // being the produced one as the node does not change the width of the integer. // class IntegralRange { private: SymbolicIntegerValue m_lowerBound; SymbolicIntegerValue m_upperBound; public: IntegralRange() = default; IntegralRange(SymbolicIntegerValue lowerBound, SymbolicIntegerValue upperBound) : m_lowerBound(lowerBound), m_upperBound(upperBound) { assert(lowerBound <= upperBound); } bool Contains(int64_t value) const; bool Contains(IntegralRange other) const { return (m_lowerBound <= other.m_lowerBound) && (other.m_upperBound <= m_upperBound); } bool IsPositive() { return m_lowerBound >= SymbolicIntegerValue::Zero; } bool Equals(IntegralRange other) const { return (m_lowerBound == other.m_lowerBound) && (m_upperBound == other.m_upperBound); } static int64_t SymbolicToRealValue(SymbolicIntegerValue value); static SymbolicIntegerValue LowerBoundForType(var_types type); static SymbolicIntegerValue UpperBoundForType(var_types type); static IntegralRange ForType(var_types type) { return {LowerBoundForType(type), UpperBoundForType(type)}; } static IntegralRange ForNode(GenTree* node, Compiler* compiler); static IntegralRange ForCastInput(GenTreeCast* cast); static IntegralRange ForCastOutput(GenTreeCast* cast); #ifdef DEBUG static void Print(IntegralRange range); #endif // DEBUG }; /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX TempsInfo XX XX XX XX The temporary lclVars allocated by the compiler for code generation XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ /***************************************************************************** * * The following keeps track of temporaries allocated in the stack frame * during code-generation (after register allocation). These spill-temps are * only used if we run out of registers while evaluating a tree. * * These are different from the more common temps allocated by lvaGrabTemp(). */ class TempDsc { public: TempDsc* tdNext; private: int tdOffs; #ifdef DEBUG static const int BAD_TEMP_OFFSET = 0xDDDDDDDD; // used as a sentinel "bad value" for tdOffs in DEBUG #endif // DEBUG int tdNum; BYTE tdSize; var_types tdType; public: TempDsc(int _tdNum, unsigned _tdSize, var_types _tdType) : tdNum(_tdNum), tdSize((BYTE)_tdSize), tdType(_tdType) { #ifdef DEBUG // temps must have a negative number (so they have a different number from all local variables) assert(tdNum < 0); tdOffs = BAD_TEMP_OFFSET; #endif // DEBUG if (tdNum != _tdNum) { IMPL_LIMITATION("too many spill temps"); } } #ifdef DEBUG bool tdLegalOffset() const { return tdOffs != BAD_TEMP_OFFSET; } #endif // DEBUG int tdTempOffs() const { assert(tdLegalOffset()); return tdOffs; } void tdSetTempOffs(int offs) { tdOffs = offs; assert(tdLegalOffset()); } void tdAdjustTempOffs(int offs) { tdOffs += offs; assert(tdLegalOffset()); } int tdTempNum() const { assert(tdNum < 0); return tdNum; } unsigned tdTempSize() const { return tdSize; } var_types tdTempType() const { return tdType; } }; // interface to hide linearscan implementation from rest of compiler class LinearScanInterface { public: virtual void doLinearScan() = 0; virtual void recordVarLocationsAtStartOfBB(BasicBlock* bb) = 0; virtual bool willEnregisterLocalVars() const = 0; #if TRACK_LSRA_STATS virtual void dumpLsraStatsCsv(FILE* file) = 0; virtual void dumpLsraStatsSummary(FILE* file) = 0; #endif // TRACK_LSRA_STATS }; LinearScanInterface* getLinearScanAllocator(Compiler* comp); // Information about arrays: their element type and size, and the offset of the first element. // We label GT_IND's that are array indices with GTF_IND_ARR_INDEX, and, for such nodes, // associate an array info via the map retrieved by GetArrayInfoMap(). This information is used, // for example, in value numbering of array index expressions. struct ArrayInfo { var_types m_elemType; CORINFO_CLASS_HANDLE m_elemStructType; unsigned m_elemSize; unsigned m_elemOffset; ArrayInfo() : m_elemType(TYP_UNDEF), m_elemStructType(nullptr), m_elemSize(0), m_elemOffset(0) { } ArrayInfo(var_types elemType, unsigned elemSize, unsigned elemOffset, CORINFO_CLASS_HANDLE elemStructType) : m_elemType(elemType), m_elemStructType(elemStructType), m_elemSize(elemSize), m_elemOffset(elemOffset) { } }; // This enumeration names the phases into which we divide compilation. The phases should completely // partition a compilation. enum Phases { #define CompPhaseNameMacro(enum_nm, string_nm, short_nm, hasChildren, parent, measureIR) enum_nm, #include "compphases.h" PHASE_NUMBER_OF }; extern const char* PhaseNames[]; extern const char* PhaseEnums[]; extern const LPCWSTR PhaseShortNames[]; // Specify which checks should be run after each phase // enum class PhaseChecks { CHECK_NONE, CHECK_ALL }; // Specify compiler data that a phase might modify enum class PhaseStatus : unsigned { MODIFIED_NOTHING, MODIFIED_EVERYTHING }; // The following enum provides a simple 1:1 mapping to CLR API's enum API_ICorJitInfo_Names { #define DEF_CLR_API(name) API_##name, #include "ICorJitInfo_API_names.h" API_COUNT }; //--------------------------------------------------------------- // Compilation time. // // A "CompTimeInfo" is a structure for tracking the compilation time of one or more methods. // We divide a compilation into a sequence of contiguous phases, and track the total (per-thread) cycles // of the compilation, as well as the cycles for each phase. We also track the number of bytecodes. // If there is a failure in reading a timer at any point, the "CompTimeInfo" becomes invalid, as indicated // by "m_timerFailure" being true. // If FEATURE_JIT_METHOD_PERF is not set, we define a minimal form of this, enough to let other code compile. struct CompTimeInfo { #ifdef FEATURE_JIT_METHOD_PERF // The string names of the phases. static const char* PhaseNames[]; static bool PhaseHasChildren[]; static int PhaseParent[]; static bool PhaseReportsIRSize[]; unsigned m_byteCodeBytes; unsigned __int64 m_totalCycles; unsigned __int64 m_invokesByPhase[PHASE_NUMBER_OF]; unsigned __int64 m_cyclesByPhase[PHASE_NUMBER_OF]; #if MEASURE_CLRAPI_CALLS unsigned __int64 m_CLRinvokesByPhase[PHASE_NUMBER_OF]; unsigned __int64 m_CLRcyclesByPhase[PHASE_NUMBER_OF]; #endif unsigned m_nodeCountAfterPhase[PHASE_NUMBER_OF]; // For better documentation, we call EndPhase on // non-leaf phases. We should also call EndPhase on the // last leaf subphase; obviously, the elapsed cycles between the EndPhase // for the last leaf subphase and the EndPhase for an ancestor should be very small. // We add all such "redundant end phase" intervals to this variable below; we print // it out in a report, so we can verify that it is, indeed, very small. If it ever // isn't, this means that we're doing something significant between the end of the last // declared subphase and the end of its parent. unsigned __int64 m_parentPhaseEndSlop; bool m_timerFailure; #if MEASURE_CLRAPI_CALLS // The following measures the time spent inside each individual CLR API call. unsigned m_allClrAPIcalls; unsigned m_perClrAPIcalls[API_ICorJitInfo_Names::API_COUNT]; unsigned __int64 m_allClrAPIcycles; unsigned __int64 m_perClrAPIcycles[API_ICorJitInfo_Names::API_COUNT]; unsigned __int32 m_maxClrAPIcycles[API_ICorJitInfo_Names::API_COUNT]; #endif // MEASURE_CLRAPI_CALLS CompTimeInfo(unsigned byteCodeBytes); #endif }; #ifdef FEATURE_JIT_METHOD_PERF #if MEASURE_CLRAPI_CALLS struct WrapICorJitInfo; #endif // This class summarizes the JIT time information over the course of a run: the number of methods compiled, // and the total and maximum timings. (These are instances of the "CompTimeInfo" type described above). // The operation of adding a single method's timing to the summary may be performed concurrently by several // threads, so it is protected by a lock. // This class is intended to be used as a singleton type, with only a single instance. class CompTimeSummaryInfo { // This lock protects the fields of all CompTimeSummaryInfo(s) (of which we expect there to be one). static CritSecObject s_compTimeSummaryLock; int m_numMethods; int m_totMethods; CompTimeInfo m_total; CompTimeInfo m_maximum; int m_numFilteredMethods; CompTimeInfo m_filtered; // This can use what ever data you want to determine if the value to be added // belongs in the filtered section (it's always included in the unfiltered section) bool IncludedInFilteredData(CompTimeInfo& info); public: // This is the unique CompTimeSummaryInfo object for this instance of the runtime. static CompTimeSummaryInfo s_compTimeSummary; CompTimeSummaryInfo() : m_numMethods(0), m_totMethods(0), m_total(0), m_maximum(0), m_numFilteredMethods(0), m_filtered(0) { } // Assumes that "info" is a completed CompTimeInfo for a compilation; adds it to the summary. // This is thread safe. void AddInfo(CompTimeInfo& info, bool includePhases); // Print the summary information to "f". // This is not thread-safe; assumed to be called by only one thread. void Print(FILE* f); }; // A JitTimer encapsulates a CompTimeInfo for a single compilation. It also tracks the start of compilation, // and when the current phase started. This is intended to be part of a Compilation object. // class JitTimer { unsigned __int64 m_start; // Start of the compilation. unsigned __int64 m_curPhaseStart; // Start of the current phase. #if MEASURE_CLRAPI_CALLS unsigned __int64 m_CLRcallStart; // Start of the current CLR API call (if any). unsigned __int64 m_CLRcallInvokes; // CLR API invokes under current outer so far unsigned __int64 m_CLRcallCycles; // CLR API cycles under current outer so far. int m_CLRcallAPInum; // The enum/index of the current CLR API call (or -1). static double s_cyclesPerSec; // Cached for speedier measurements #endif #ifdef DEBUG Phases m_lastPhase; // The last phase that was completed (or (Phases)-1 to start). #endif CompTimeInfo m_info; // The CompTimeInfo for this compilation. static CritSecObject s_csvLock; // Lock to protect the time log file. static FILE* s_csvFile; // The time log file handle. void PrintCsvMethodStats(Compiler* comp); private: void* operator new(size_t); void* operator new[](size_t); void operator delete(void*); void operator delete[](void*); public: // Initialized the timer instance JitTimer(unsigned byteCodeSize); static JitTimer* Create(Compiler* comp, unsigned byteCodeSize) { return ::new (comp, CMK_Unknown) JitTimer(byteCodeSize); } static void PrintCsvHeader(); // Ends the current phase (argument is for a redundant check). void EndPhase(Compiler* compiler, Phases phase); #if MEASURE_CLRAPI_CALLS // Start and end a timed CLR API call. void CLRApiCallEnter(unsigned apix); void CLRApiCallLeave(unsigned apix); #endif // MEASURE_CLRAPI_CALLS // Completes the timing of the current method, which is assumed to have "byteCodeBytes" bytes of bytecode, // and adds it to "sum". void Terminate(Compiler* comp, CompTimeSummaryInfo& sum, bool includePhases); // Attempts to query the cycle counter of the current thread. If successful, returns "true" and sets // *cycles to the cycle counter value. Otherwise, returns false and sets the "m_timerFailure" flag of // "m_info" to true. bool GetThreadCycles(unsigned __int64* cycles) { bool res = CycleTimer::GetThreadCyclesS(cycles); if (!res) { m_info.m_timerFailure = true; } return res; } static void Shutdown(); }; #endif // FEATURE_JIT_METHOD_PERF //------------------- Function/Funclet info ------------------------------- enum FuncKind : BYTE { FUNC_ROOT, // The main/root function (always id==0) FUNC_HANDLER, // a funclet associated with an EH handler (finally, fault, catch, filter handler) FUNC_FILTER, // a funclet associated with an EH filter FUNC_COUNT }; class emitLocation; struct FuncInfoDsc { FuncKind funKind; BYTE funFlags; // Currently unused, just here for padding unsigned short funEHIndex; // index, into the ebd table, of innermost EH clause corresponding to this // funclet. It is only valid if funKind field indicates this is a // EH-related funclet: FUNC_HANDLER or FUNC_FILTER #if defined(TARGET_AMD64) // TODO-AMD64-Throughput: make the AMD64 info more like the ARM info to avoid having this large static array. emitLocation* startLoc; emitLocation* endLoc; emitLocation* coldStartLoc; // locations for the cold section, if there is one. emitLocation* coldEndLoc; UNWIND_INFO unwindHeader; // Maximum of 255 UNWIND_CODE 'nodes' and then the unwind header. If there are an odd // number of codes, the VM or Zapper will 4-byte align the whole thing. BYTE unwindCodes[offsetof(UNWIND_INFO, UnwindCode) + (0xFF * sizeof(UNWIND_CODE))]; unsigned unwindCodeSlot; #elif defined(TARGET_X86) emitLocation* startLoc; emitLocation* endLoc; emitLocation* coldStartLoc; // locations for the cold section, if there is one. emitLocation* coldEndLoc; #elif defined(TARGET_ARMARCH) UnwindInfo uwi; // Unwind information for this function/funclet's hot section UnwindInfo* uwiCold; // Unwind information for this function/funclet's cold section // Note: we only have a pointer here instead of the actual object, // to save memory in the JIT case (compared to the NGEN case), // where we don't have any cold section. // Note 2: we currently don't support hot/cold splitting in functions // with EH, so uwiCold will be NULL for all funclets. emitLocation* startLoc; emitLocation* endLoc; emitLocation* coldStartLoc; // locations for the cold section, if there is one. emitLocation* coldEndLoc; #endif // TARGET_ARMARCH #if defined(FEATURE_CFI_SUPPORT) jitstd::vector<CFI_CODE>* cfiCodes; #endif // FEATURE_CFI_SUPPORT // Eventually we may want to move rsModifiedRegsMask, lvaOutgoingArgSize, and anything else // that isn't shared between the main function body and funclets. }; struct fgArgTabEntry { GenTreeCall::Use* use; // Points to the argument's GenTreeCall::Use in gtCallArgs or gtCallThisArg. GenTreeCall::Use* lateUse; // Points to the argument's GenTreeCall::Use in gtCallLateArgs, if any. // Get the node that coresponds to this argument entry. // This is the "real" node and not a placeholder or setup node. GenTree* GetNode() const { return lateUse == nullptr ? use->GetNode() : lateUse->GetNode(); } unsigned argNum; // The original argument number, also specifies the required argument evaluation order from the IL private: regNumberSmall regNums[MAX_ARG_REG_COUNT]; // The registers to use when passing this argument, set to REG_STK for // arguments passed on the stack public: unsigned numRegs; // Count of number of registers that this argument uses. // Note that on ARM, if we have a double hfa, this reflects the number // of DOUBLE registers. #if defined(UNIX_AMD64_ABI) // Unix amd64 will split floating point types and integer types in structs // between floating point and general purpose registers. Keep track of that // information so we do not need to recompute it later. unsigned structIntRegs; unsigned structFloatRegs; #endif // UNIX_AMD64_ABI #if defined(DEBUG_ARG_SLOTS) // These fields were used to calculate stack size in stack slots for arguments // but now they are replaced by precise `m_byteOffset/m_byteSize` because of // arm64 apple abi requirements. // A slot is a pointer sized region in the OutArg area. unsigned slotNum; // When an argument is passed in the OutArg area this is the slot number in the OutArg area unsigned numSlots; // Count of number of slots that this argument uses #endif // DEBUG_ARG_SLOTS // Return number of stack slots that this argument is taking. // TODO-Cleanup: this function does not align with arm64 apple model, // delete it. In most cases we just want to know if we it is using stack or not // but in some cases we are checking if it is a multireg arg, like: // `numRegs + GetStackSlotsNumber() > 1` that is harder to replace. // unsigned GetStackSlotsNumber() const { return roundUp(GetStackByteSize(), TARGET_POINTER_SIZE) / TARGET_POINTER_SIZE; } private: unsigned _lateArgInx; // index into gtCallLateArgs list; UINT_MAX if this is not a late arg. public: unsigned tmpNum; // the LclVar number if we had to force evaluation of this arg var_types argType; // The type used to pass this argument. This is generally the original argument type, but when a // struct is passed as a scalar type, this is that type. // Note that if a struct is passed by reference, this will still be the struct type. bool needTmp : 1; // True when we force this argument's evaluation into a temp LclVar bool needPlace : 1; // True when we must replace this argument with a placeholder node bool isTmp : 1; // True when we setup a temp LclVar for this argument due to size issues with the struct bool processed : 1; // True when we have decided the evaluation order for this argument in the gtCallLateArgs bool isBackFilled : 1; // True when the argument fills a register slot skipped due to alignment requirements of // previous arguments. NonStandardArgKind nonStandardArgKind : 4; // The non-standard arg kind. Non-standard args are args that are forced // to be in certain registers or on the stack, regardless of where they // appear in the arg list. bool isStruct : 1; // True if this is a struct arg bool _isVararg : 1; // True if the argument is in a vararg context. bool passedByRef : 1; // True iff the argument is passed by reference. #if FEATURE_ARG_SPLIT bool _isSplit : 1; // True when this argument is split between the registers and OutArg area #endif // FEATURE_ARG_SPLIT #ifdef FEATURE_HFA_FIELDS_PRESENT CorInfoHFAElemType _hfaElemKind : 3; // What kind of an HFA this is (CORINFO_HFA_ELEM_NONE if it is not an HFA). #endif CorInfoHFAElemType GetHfaElemKind() const { #ifdef FEATURE_HFA_FIELDS_PRESENT return _hfaElemKind; #else NOWAY_MSG("GetHfaElemKind"); return CORINFO_HFA_ELEM_NONE; #endif } void SetHfaElemKind(CorInfoHFAElemType elemKind) { #ifdef FEATURE_HFA_FIELDS_PRESENT _hfaElemKind = elemKind; #else NOWAY_MSG("SetHfaElemKind"); #endif } bool isNonStandard() const { return nonStandardArgKind != NonStandardArgKind::None; } // Returns true if the IR node for this non-standarg arg is added by fgInitArgInfo. // In this case, it must be removed by GenTreeCall::ResetArgInfo. bool isNonStandardArgAddedLate() const { switch (static_cast<NonStandardArgKind>(nonStandardArgKind)) { case NonStandardArgKind::None: case NonStandardArgKind::PInvokeFrame: case NonStandardArgKind::ShiftLow: case NonStandardArgKind::ShiftHigh: case NonStandardArgKind::FixedRetBuffer: case NonStandardArgKind::ValidateIndirectCallTarget: return false; case NonStandardArgKind::WrapperDelegateCell: case NonStandardArgKind::VirtualStubCell: case NonStandardArgKind::PInvokeCookie: case NonStandardArgKind::PInvokeTarget: case NonStandardArgKind::R2RIndirectionCell: return true; default: unreached(); } } bool isLateArg() const { bool isLate = (_lateArgInx != UINT_MAX); return isLate; } unsigned GetLateArgInx() const { assert(isLateArg()); return _lateArgInx; } void SetLateArgInx(unsigned inx) { _lateArgInx = inx; } regNumber GetRegNum() const { return (regNumber)regNums[0]; } regNumber GetOtherRegNum() const { return (regNumber)regNums[1]; } #if defined(UNIX_AMD64_ABI) SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR structDesc; #endif void setRegNum(unsigned int i, regNumber regNum) { assert(i < MAX_ARG_REG_COUNT); regNums[i] = (regNumberSmall)regNum; } regNumber GetRegNum(unsigned int i) { assert(i < MAX_ARG_REG_COUNT); return (regNumber)regNums[i]; } bool IsSplit() const { #if FEATURE_ARG_SPLIT return compFeatureArgSplit() && _isSplit; #else // FEATURE_ARG_SPLIT return false; #endif } void SetSplit(bool value) { #if FEATURE_ARG_SPLIT _isSplit = value; #endif } bool IsVararg() const { return compFeatureVarArg() && _isVararg; } void SetIsVararg(bool value) { if (compFeatureVarArg()) { _isVararg = value; } } bool IsHfaArg() const { if (GlobalJitOptions::compFeatureHfa) { return IsHfa(GetHfaElemKind()); } else { return false; } } bool IsHfaRegArg() const { if (GlobalJitOptions::compFeatureHfa) { return IsHfa(GetHfaElemKind()) && isPassedInRegisters(); } else { return false; } } unsigned intRegCount() const { #if defined(UNIX_AMD64_ABI) if (this->isStruct) { return this->structIntRegs; } #endif // defined(UNIX_AMD64_ABI) if (!this->isPassedInFloatRegisters()) { return this->numRegs; } return 0; } unsigned floatRegCount() const { #if defined(UNIX_AMD64_ABI) if (this->isStruct) { return this->structFloatRegs; } #endif // defined(UNIX_AMD64_ABI) if (this->isPassedInFloatRegisters()) { return this->numRegs; } return 0; } // Get the number of bytes that this argument is occupying on the stack, // including padding up to the target pointer size for platforms // where a stack argument can't take less. unsigned GetStackByteSize() const { if (!IsSplit() && numRegs > 0) { return 0; } assert(!IsHfaArg() || !IsSplit()); assert(GetByteSize() > TARGET_POINTER_SIZE * numRegs); const unsigned stackByteSize = GetByteSize() - TARGET_POINTER_SIZE * numRegs; return stackByteSize; } var_types GetHfaType() const { if (GlobalJitOptions::compFeatureHfa) { return HfaTypeFromElemKind(GetHfaElemKind()); } else { return TYP_UNDEF; } } void SetHfaType(var_types type, unsigned hfaSlots) { if (GlobalJitOptions::compFeatureHfa) { if (type != TYP_UNDEF) { // We must already have set the passing mode. assert(numRegs != 0 || GetStackByteSize() != 0); // We originally set numRegs according to the size of the struct, but if the size of the // hfaType is not the same as the pointer size, we need to correct it. // Note that hfaSlots is the number of registers we will use. For ARM, that is twice // the number of "double registers". unsigned numHfaRegs = hfaSlots; #ifdef TARGET_ARM if (type == TYP_DOUBLE) { // Must be an even number of registers. assert((numRegs & 1) == 0); numHfaRegs = hfaSlots / 2; } #endif // TARGET_ARM if (!IsHfaArg()) { // We haven't previously set this; do so now. CorInfoHFAElemType elemKind = HfaElemKindFromType(type); SetHfaElemKind(elemKind); // Ensure we've allocated enough bits. assert(GetHfaElemKind() == elemKind); if (isPassedInRegisters()) { numRegs = numHfaRegs; } } else { // We've already set this; ensure that it's consistent. if (isPassedInRegisters()) { assert(numRegs == numHfaRegs); } assert(type == HfaTypeFromElemKind(GetHfaElemKind())); } } } } #ifdef TARGET_ARM void SetIsBackFilled(bool backFilled) { isBackFilled = backFilled; } bool IsBackFilled() const { return isBackFilled; } #else // !TARGET_ARM void SetIsBackFilled(bool backFilled) { } bool IsBackFilled() const { return false; } #endif // !TARGET_ARM bool isPassedInRegisters() const { return !IsSplit() && (numRegs != 0); } bool isPassedInFloatRegisters() const { #ifdef TARGET_X86 return false; #else return isValidFloatArgReg(GetRegNum()); #endif } // Can we replace the struct type of this node with a primitive type for argument passing? bool TryPassAsPrimitive() const { return !IsSplit() && ((numRegs == 1) || (m_byteSize <= TARGET_POINTER_SIZE)); } #if defined(DEBUG_ARG_SLOTS) // Returns the number of "slots" used, where for this purpose a // register counts as a slot. unsigned getSlotCount() const { if (isBackFilled) { assert(isPassedInRegisters()); assert(numRegs == 1); } else if (GetRegNum() == REG_STK) { assert(!isPassedInRegisters()); assert(numRegs == 0); } else { assert(numRegs > 0); } return numSlots + numRegs; } #endif #if defined(DEBUG_ARG_SLOTS) // Returns the size as a multiple of pointer-size. // For targets without HFAs, this is the same as getSlotCount(). unsigned getSize() const { unsigned size = getSlotCount(); if (GlobalJitOptions::compFeatureHfa) { if (IsHfaRegArg()) { #ifdef TARGET_ARM // We counted the number of regs, but if they are DOUBLE hfa regs we have to double the size. if (GetHfaType() == TYP_DOUBLE) { assert(!IsSplit()); size <<= 1; } #elif defined(TARGET_ARM64) // We counted the number of regs, but if they are FLOAT hfa regs we have to halve the size, // or if they are SIMD16 vector hfa regs we have to double the size. if (GetHfaType() == TYP_FLOAT) { // Round up in case of odd HFA count. size = (size + 1) >> 1; } #ifdef FEATURE_SIMD else if (GetHfaType() == TYP_SIMD16) { size <<= 1; } #endif // FEATURE_SIMD #endif // TARGET_ARM64 } } return size; } #endif // DEBUG_ARG_SLOTS private: unsigned m_byteOffset; // byte size that this argument takes including the padding after. // For example, 1-byte arg on x64 with 8-byte alignment // will have `m_byteSize == 8`, the same arg on apple arm64 will have `m_byteSize == 1`. unsigned m_byteSize; unsigned m_byteAlignment; // usually 4 or 8 bytes (slots/registers). public: void SetByteOffset(unsigned byteOffset) { DEBUG_ARG_SLOTS_ASSERT(byteOffset / TARGET_POINTER_SIZE == slotNum); m_byteOffset = byteOffset; } unsigned GetByteOffset() const { DEBUG_ARG_SLOTS_ASSERT(m_byteOffset / TARGET_POINTER_SIZE == slotNum); return m_byteOffset; } void SetByteSize(unsigned byteSize, bool isStruct, bool isFloatHfa) { unsigned roundedByteSize; if (compMacOsArm64Abi()) { // Only struct types need extension or rounding to pointer size, but HFA<float> does not. if (isStruct && !isFloatHfa) { roundedByteSize = roundUp(byteSize, TARGET_POINTER_SIZE); } else { roundedByteSize = byteSize; } } else { roundedByteSize = roundUp(byteSize, TARGET_POINTER_SIZE); } #if !defined(TARGET_ARM) // Arm32 could have a struct with 8 byte alignment // which rounded size % 8 is not 0. assert(m_byteAlignment != 0); assert(roundedByteSize % m_byteAlignment == 0); #endif // TARGET_ARM #if defined(DEBUG_ARG_SLOTS) if (!compMacOsArm64Abi() && !isStruct) { assert(roundedByteSize == getSlotCount() * TARGET_POINTER_SIZE); } #endif m_byteSize = roundedByteSize; } unsigned GetByteSize() const { return m_byteSize; } void SetByteAlignment(unsigned byteAlignment) { m_byteAlignment = byteAlignment; } unsigned GetByteAlignment() const { return m_byteAlignment; } // Set the register numbers for a multireg argument. // There's nothing to do on x64/Ux because the structDesc has already been used to set the // register numbers. void SetMultiRegNums() { #if FEATURE_MULTIREG_ARGS && !defined(UNIX_AMD64_ABI) if (numRegs == 1) { return; } regNumber argReg = GetRegNum(0); #ifdef TARGET_ARM unsigned int regSize = (GetHfaType() == TYP_DOUBLE) ? 2 : 1; #else unsigned int regSize = 1; #endif if (numRegs > MAX_ARG_REG_COUNT) NO_WAY("Multireg argument exceeds the maximum length"); for (unsigned int regIndex = 1; regIndex < numRegs; regIndex++) { argReg = (regNumber)(argReg + regSize); setRegNum(regIndex, argReg); } #endif // FEATURE_MULTIREG_ARGS && !defined(UNIX_AMD64_ABI) } #ifdef DEBUG // Check that the value of 'isStruct' is consistent. // A struct arg must be one of the following: // - A node of struct type, // - A GT_FIELD_LIST, or // - A node of a scalar type, passed in a single register or slot // (or two slots in the case of a struct pass on the stack as TYP_DOUBLE). // void checkIsStruct() const { GenTree* node = GetNode(); if (isStruct) { if (!varTypeIsStruct(node) && !node->OperIs(GT_FIELD_LIST)) { // This is the case where we are passing a struct as a primitive type. // On most targets, this is always a single register or slot. // However, on ARM this could be two slots if it is TYP_DOUBLE. bool isPassedAsPrimitiveType = ((numRegs == 1) || ((numRegs == 0) && (GetByteSize() <= TARGET_POINTER_SIZE))); #ifdef TARGET_ARM if (!isPassedAsPrimitiveType) { if (node->TypeGet() == TYP_DOUBLE && numRegs == 0 && (numSlots == 2)) { isPassedAsPrimitiveType = true; } } #endif // TARGET_ARM assert(isPassedAsPrimitiveType); } } else { assert(!varTypeIsStruct(node)); } } void Dump() const; #endif }; //------------------------------------------------------------------------- // // The class fgArgInfo is used to handle the arguments // when morphing a GT_CALL node. // class fgArgInfo { Compiler* compiler; // Back pointer to the compiler instance so that we can allocate memory GenTreeCall* callTree; // Back pointer to the GT_CALL node for this fgArgInfo unsigned argCount; // Updatable arg count value #if defined(DEBUG_ARG_SLOTS) unsigned nextSlotNum; // Updatable slot count value #endif unsigned nextStackByteOffset; unsigned stkLevel; // Stack depth when we make this call (for x86) #if defined(UNIX_X86_ABI) bool alignmentDone; // Updateable flag, set to 'true' after we've done any required alignment. unsigned stkSizeBytes; // Size of stack used by this call, in bytes. Calculated during fgMorphArgs(). unsigned padStkAlign; // Stack alignment in bytes required before arguments are pushed for this call. // Computed dynamically during codegen, based on stkSizeBytes and the current // stack level (genStackLevel) when the first stack adjustment is made for // this call. #endif #if FEATURE_FIXED_OUT_ARGS unsigned outArgSize; // Size of the out arg area for the call, will be at least MIN_ARG_AREA_FOR_CALL #endif unsigned argTableSize; // size of argTable array (equal to the argCount when done with fgMorphArgs) bool hasRegArgs; // true if we have one or more register arguments bool hasStackArgs; // true if we have one or more stack arguments bool argsComplete; // marker for state bool argsSorted; // marker for state bool needsTemps; // one or more arguments must be copied to a temp by EvalArgsToTemps fgArgTabEntry** argTable; // variable sized array of per argument descrption: (i.e. argTable[argTableSize]) private: void AddArg(fgArgTabEntry* curArgTabEntry); public: fgArgInfo(Compiler* comp, GenTreeCall* call, unsigned argCount); fgArgInfo(GenTreeCall* newCall, GenTreeCall* oldCall); fgArgTabEntry* AddRegArg(unsigned argNum, GenTree* node, GenTreeCall::Use* use, regNumber regNum, unsigned numRegs, unsigned byteSize, unsigned byteAlignment, bool isStruct, bool isFloatHfa, bool isVararg = false); #ifdef UNIX_AMD64_ABI fgArgTabEntry* AddRegArg(unsigned argNum, GenTree* node, GenTreeCall::Use* use, regNumber regNum, unsigned numRegs, unsigned byteSize, unsigned byteAlignment, const bool isStruct, const bool isFloatHfa, const bool isVararg, const regNumber otherRegNum, const unsigned structIntRegs, const unsigned structFloatRegs, const SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR* const structDescPtr = nullptr); #endif // UNIX_AMD64_ABI fgArgTabEntry* AddStkArg(unsigned argNum, GenTree* node, GenTreeCall::Use* use, unsigned numSlots, unsigned byteSize, unsigned byteAlignment, bool isStruct, bool isFloatHfa, bool isVararg = false); void RemorphReset(); void UpdateRegArg(fgArgTabEntry* argEntry, GenTree* node, bool reMorphing); void UpdateStkArg(fgArgTabEntry* argEntry, GenTree* node, bool reMorphing); void SplitArg(unsigned argNum, unsigned numRegs, unsigned numSlots); void EvalToTmp(fgArgTabEntry* curArgTabEntry, unsigned tmpNum, GenTree* newNode); void ArgsComplete(); void SortArgs(); void EvalArgsToTemps(); unsigned ArgCount() const { return argCount; } fgArgTabEntry** ArgTable() const { return argTable; } #if defined(DEBUG_ARG_SLOTS) unsigned GetNextSlotNum() const { return nextSlotNum; } #endif unsigned GetNextSlotByteOffset() const { return nextStackByteOffset; } bool HasRegArgs() const { return hasRegArgs; } bool NeedsTemps() const { return needsTemps; } bool HasStackArgs() const { return hasStackArgs; } bool AreArgsComplete() const { return argsComplete; } #if FEATURE_FIXED_OUT_ARGS unsigned GetOutArgSize() const { return outArgSize; } void SetOutArgSize(unsigned newVal) { outArgSize = newVal; } #endif // FEATURE_FIXED_OUT_ARGS #if defined(UNIX_X86_ABI) void ComputeStackAlignment(unsigned curStackLevelInBytes) { padStkAlign = AlignmentPad(curStackLevelInBytes, STACK_ALIGN); } unsigned GetStkAlign() const { return padStkAlign; } void SetStkSizeBytes(unsigned newStkSizeBytes) { stkSizeBytes = newStkSizeBytes; } unsigned GetStkSizeBytes() const { return stkSizeBytes; } bool IsStkAlignmentDone() const { return alignmentDone; } void SetStkAlignmentDone() { alignmentDone = true; } #endif // defined(UNIX_X86_ABI) // Get the fgArgTabEntry for the arg at position argNum. fgArgTabEntry* GetArgEntry(unsigned argNum, bool reMorphing = true) const { fgArgTabEntry* curArgTabEntry = nullptr; if (!reMorphing) { // The arg table has not yet been sorted. curArgTabEntry = argTable[argNum]; assert(curArgTabEntry->argNum == argNum); return curArgTabEntry; } for (unsigned i = 0; i < argCount; i++) { curArgTabEntry = argTable[i]; if (curArgTabEntry->argNum == argNum) { return curArgTabEntry; } } noway_assert(!"GetArgEntry: argNum not found"); return nullptr; } void SetNeedsTemps() { needsTemps = true; } // Get the node for the arg at position argIndex. // Caller must ensure that this index is a valid arg index. GenTree* GetArgNode(unsigned argIndex) const { return GetArgEntry(argIndex)->GetNode(); } void Dump(Compiler* compiler) const; }; #ifdef DEBUG // XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX // We have the ability to mark source expressions with "Test Labels." // These drive assertions within the JIT, or internal JIT testing. For example, we could label expressions // that should be CSE defs, and other expressions that should uses of those defs, with a shared label. enum TestLabel // This must be kept identical to System.Runtime.CompilerServices.JitTestLabel.TestLabel. { TL_SsaName, TL_VN, // Defines a "VN equivalence class". (For full VN, including exceptions thrown). TL_VNNorm, // Like above, but uses the non-exceptional value of the expression. TL_CSE_Def, // This must be identified in the JIT as a CSE def TL_CSE_Use, // This must be identified in the JIT as a CSE use TL_LoopHoist, // Expression must (or must not) be hoisted out of the loop. }; struct TestLabelAndNum { TestLabel m_tl; ssize_t m_num; TestLabelAndNum() : m_tl(TestLabel(0)), m_num(0) { } }; typedef JitHashTable<GenTree*, JitPtrKeyFuncs<GenTree>, TestLabelAndNum> NodeToTestDataMap; // XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX #endif // DEBUG //------------------------------------------------------------------------- // LoopFlags: flags for the loop table. // enum LoopFlags : unsigned short { LPFLG_EMPTY = 0, // LPFLG_UNUSED = 0x0001, // LPFLG_UNUSED = 0x0002, LPFLG_ITER = 0x0004, // loop of form: for (i = icon or lclVar; test_condition(); i++) // LPFLG_UNUSED = 0x0008, LPFLG_CONTAINS_CALL = 0x0010, // If executing the loop body *may* execute a call LPFLG_VAR_INIT = 0x0020, // iterator is initialized with a local var (var # found in lpVarInit) LPFLG_CONST_INIT = 0x0040, // iterator is initialized with a constant (found in lpConstInit) LPFLG_SIMD_LIMIT = 0x0080, // iterator is compared with vector element count (found in lpConstLimit) LPFLG_VAR_LIMIT = 0x0100, // iterator is compared with a local var (var # found in lpVarLimit) LPFLG_CONST_LIMIT = 0x0200, // iterator is compared with a constant (found in lpConstLimit) LPFLG_ARRLEN_LIMIT = 0x0400, // iterator is compared with a.len or a[i].len (found in lpArrLenLimit) LPFLG_HAS_PREHEAD = 0x0800, // lpHead is known to be a preHead for this loop LPFLG_REMOVED = 0x1000, // has been removed from the loop table (unrolled or optimized away) LPFLG_DONT_UNROLL = 0x2000, // do not unroll this loop LPFLG_ASGVARS_YES = 0x4000, // "lpAsgVars" has been computed LPFLG_ASGVARS_INC = 0x8000, // "lpAsgVars" is incomplete -- vars beyond those representable in an AllVarSet // type are assigned to. }; inline constexpr LoopFlags operator~(LoopFlags a) { return (LoopFlags)(~(unsigned short)a); } inline constexpr LoopFlags operator|(LoopFlags a, LoopFlags b) { return (LoopFlags)((unsigned short)a | (unsigned short)b); } inline constexpr LoopFlags operator&(LoopFlags a, LoopFlags b) { return (LoopFlags)((unsigned short)a & (unsigned short)b); } inline LoopFlags& operator|=(LoopFlags& a, LoopFlags b) { return a = (LoopFlags)((unsigned short)a | (unsigned short)b); } inline LoopFlags& operator&=(LoopFlags& a, LoopFlags b) { return a = (LoopFlags)((unsigned short)a & (unsigned short)b); } // The following holds information about instr offsets in terms of generated code. enum class IPmappingDscKind { Prolog, // The mapping represents the start of a prolog. Epilog, // The mapping represents the start of an epilog. NoMapping, // This does not map to any IL offset. Normal, // The mapping maps to an IL offset. }; struct IPmappingDsc { emitLocation ipmdNativeLoc; // the emitter location of the native code corresponding to the IL offset IPmappingDscKind ipmdKind; // The kind of mapping ILLocation ipmdLoc; // The location for normal mappings bool ipmdIsLabel; // Can this code be a branch label? }; struct PreciseIPMapping { emitLocation nativeLoc; DebugInfo debugInfo; }; /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX The big guy. The sections are currently organized as : XX XX XX XX o GenTree and BasicBlock XX XX o LclVarsInfo XX XX o Importer XX XX o FlowGraph XX XX o Optimizer XX XX o RegAlloc XX XX o EEInterface XX XX o TempsInfo XX XX o RegSet XX XX o GCInfo XX XX o Instruction XX XX o ScopeInfo XX XX o PrologScopeInfo XX XX o CodeGenerator XX XX o UnwindInfo XX XX o Compiler XX XX o typeInfo XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ struct HWIntrinsicInfo; class Compiler { friend class emitter; friend class UnwindInfo; friend class UnwindFragmentInfo; friend class UnwindEpilogInfo; friend class JitTimer; friend class LinearScan; friend class fgArgInfo; friend class Rationalizer; friend class Phase; friend class Lowering; friend class CSE_DataFlow; friend class CSE_Heuristic; friend class CodeGenInterface; friend class CodeGen; friend class LclVarDsc; friend class TempDsc; friend class LIR; friend class ObjectAllocator; friend class LocalAddressVisitor; friend struct GenTree; friend class MorphInitBlockHelper; friend class MorphCopyBlockHelper; #ifdef FEATURE_HW_INTRINSICS friend struct HWIntrinsicInfo; #endif // FEATURE_HW_INTRINSICS #ifndef TARGET_64BIT friend class DecomposeLongs; #endif // !TARGET_64BIT /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Misc structs definitions XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ public: hashBvGlobalData hbvGlobalData; // Used by the hashBv bitvector package. #ifdef DEBUG bool verbose; bool verboseTrees; bool shouldUseVerboseTrees(); bool asciiTrees; // If true, dump trees using only ASCII characters bool shouldDumpASCIITrees(); bool verboseSsa; // If true, produce especially verbose dump output in SSA construction. bool shouldUseVerboseSsa(); bool treesBeforeAfterMorph; // If true, print trees before/after morphing (paired by an intra-compilation id: int morphNum; // This counts the the trees that have been morphed, allowing us to label each uniquely. bool doExtraSuperPmiQueries; void makeExtraStructQueries(CORINFO_CLASS_HANDLE structHandle, int level); // Make queries recursively 'level' deep. const char* VarNameToStr(VarName name) { return name; } DWORD expensiveDebugCheckLevel; #endif #if FEATURE_MULTIREG_RET GenTree* impAssignMultiRegTypeToVar(GenTree* op, CORINFO_CLASS_HANDLE hClass DEBUGARG(CorInfoCallConvExtension callConv)); #endif // FEATURE_MULTIREG_RET #ifdef TARGET_X86 bool isTrivialPointerSizedStruct(CORINFO_CLASS_HANDLE clsHnd) const; #endif // TARGET_X86 //------------------------------------------------------------------------- // Functions to handle homogeneous floating-point aggregates (HFAs) in ARM/ARM64. // HFAs are one to four element structs where each element is the same // type, either all float or all double. We handle HVAs (one to four elements of // vector types) uniformly with HFAs. HFAs are treated specially // in the ARM/ARM64 Procedure Call Standards, specifically, they are passed in // floating-point registers instead of the general purpose registers. // bool IsHfa(CORINFO_CLASS_HANDLE hClass); bool IsHfa(GenTree* tree); var_types GetHfaType(GenTree* tree); unsigned GetHfaCount(GenTree* tree); var_types GetHfaType(CORINFO_CLASS_HANDLE hClass); unsigned GetHfaCount(CORINFO_CLASS_HANDLE hClass); bool IsMultiRegReturnedType(CORINFO_CLASS_HANDLE hClass, CorInfoCallConvExtension callConv); //------------------------------------------------------------------------- // The following is used for validating format of EH table // struct EHNodeDsc; typedef struct EHNodeDsc* pEHNodeDsc; EHNodeDsc* ehnTree; // root of the tree comprising the EHnodes. EHNodeDsc* ehnNext; // root of the tree comprising the EHnodes. struct EHNodeDsc { enum EHBlockType { TryNode, FilterNode, HandlerNode, FinallyNode, FaultNode }; EHBlockType ehnBlockType; // kind of EH block IL_OFFSET ehnStartOffset; // IL offset of start of the EH block IL_OFFSET ehnEndOffset; // IL offset past end of the EH block. (TODO: looks like verInsertEhNode() sets this to // the last IL offset, not "one past the last one", i.e., the range Start to End is // inclusive). pEHNodeDsc ehnNext; // next (non-nested) block in sequential order pEHNodeDsc ehnChild; // leftmost nested block union { pEHNodeDsc ehnTryNode; // for filters and handlers, the corresponding try node pEHNodeDsc ehnHandlerNode; // for a try node, the corresponding handler node }; pEHNodeDsc ehnFilterNode; // if this is a try node and has a filter, otherwise 0 pEHNodeDsc ehnEquivalent; // if blockType=tryNode, start offset and end offset is same, void ehnSetTryNodeType() { ehnBlockType = TryNode; } void ehnSetFilterNodeType() { ehnBlockType = FilterNode; } void ehnSetHandlerNodeType() { ehnBlockType = HandlerNode; } void ehnSetFinallyNodeType() { ehnBlockType = FinallyNode; } void ehnSetFaultNodeType() { ehnBlockType = FaultNode; } bool ehnIsTryBlock() { return ehnBlockType == TryNode; } bool ehnIsFilterBlock() { return ehnBlockType == FilterNode; } bool ehnIsHandlerBlock() { return ehnBlockType == HandlerNode; } bool ehnIsFinallyBlock() { return ehnBlockType == FinallyNode; } bool ehnIsFaultBlock() { return ehnBlockType == FaultNode; } // returns true if there is any overlap between the two nodes static bool ehnIsOverlap(pEHNodeDsc node1, pEHNodeDsc node2) { if (node1->ehnStartOffset < node2->ehnStartOffset) { return (node1->ehnEndOffset >= node2->ehnStartOffset); } else { return (node1->ehnStartOffset <= node2->ehnEndOffset); } } // fails with BADCODE if inner is not completely nested inside outer static bool ehnIsNested(pEHNodeDsc inner, pEHNodeDsc outer) { return ((inner->ehnStartOffset >= outer->ehnStartOffset) && (inner->ehnEndOffset <= outer->ehnEndOffset)); } }; //------------------------------------------------------------------------- // Exception handling functions // #if !defined(FEATURE_EH_FUNCLETS) bool ehNeedsShadowSPslots() { return (info.compXcptnsCount || opts.compDbgEnC); } // 0 for methods with no EH // 1 for methods with non-nested EH, or where only the try blocks are nested // 2 for a method with a catch within a catch // etc. unsigned ehMaxHndNestingCount; #endif // !FEATURE_EH_FUNCLETS static bool jitIsBetween(unsigned value, unsigned start, unsigned end); static bool jitIsBetweenInclusive(unsigned value, unsigned start, unsigned end); bool bbInCatchHandlerILRange(BasicBlock* blk); bool bbInFilterILRange(BasicBlock* blk); bool bbInTryRegions(unsigned regionIndex, BasicBlock* blk); bool bbInExnFlowRegions(unsigned regionIndex, BasicBlock* blk); bool bbInHandlerRegions(unsigned regionIndex, BasicBlock* blk); bool bbInCatchHandlerRegions(BasicBlock* tryBlk, BasicBlock* hndBlk); unsigned short bbFindInnermostCommonTryRegion(BasicBlock* bbOne, BasicBlock* bbTwo); unsigned short bbFindInnermostTryRegionContainingHandlerRegion(unsigned handlerIndex); unsigned short bbFindInnermostHandlerRegionContainingTryRegion(unsigned tryIndex); // Returns true if "block" is the start of a try region. bool bbIsTryBeg(BasicBlock* block); // Returns true if "block" is the start of a handler or filter region. bool bbIsHandlerBeg(BasicBlock* block); // Returns true iff "block" is where control flows if an exception is raised in the // try region, and sets "*regionIndex" to the index of the try for the handler. // Differs from "IsHandlerBeg" in the case of filters, where this is true for the first // block of the filter, but not for the filter's handler. bool bbIsExFlowBlock(BasicBlock* block, unsigned* regionIndex); bool ehHasCallableHandlers(); // Return the EH descriptor for the given region index. EHblkDsc* ehGetDsc(unsigned regionIndex); // Return the EH index given a region descriptor. unsigned ehGetIndex(EHblkDsc* ehDsc); // Return the EH descriptor index of the enclosing try, for the given region index. unsigned ehGetEnclosingTryIndex(unsigned regionIndex); // Return the EH descriptor index of the enclosing handler, for the given region index. unsigned ehGetEnclosingHndIndex(unsigned regionIndex); // Return the EH descriptor for the most nested 'try' region this BasicBlock is a member of (or nullptr if this // block is not in a 'try' region). EHblkDsc* ehGetBlockTryDsc(BasicBlock* block); // Return the EH descriptor for the most nested filter or handler region this BasicBlock is a member of (or nullptr // if this block is not in a filter or handler region). EHblkDsc* ehGetBlockHndDsc(BasicBlock* block); // Return the EH descriptor for the most nested region that may handle exceptions raised in this BasicBlock (or // nullptr if this block's exceptions propagate to caller). EHblkDsc* ehGetBlockExnFlowDsc(BasicBlock* block); EHblkDsc* ehIsBlockTryLast(BasicBlock* block); EHblkDsc* ehIsBlockHndLast(BasicBlock* block); bool ehIsBlockEHLast(BasicBlock* block); bool ehBlockHasExnFlowDsc(BasicBlock* block); // Return the region index of the most nested EH region this block is in. unsigned ehGetMostNestedRegionIndex(BasicBlock* block, bool* inTryRegion); // Find the true enclosing try index, ignoring 'mutual protect' try. Uses IL ranges to check. unsigned ehTrueEnclosingTryIndexIL(unsigned regionIndex); // Return the index of the most nested enclosing region for a particular EH region. Returns NO_ENCLOSING_INDEX // if there is no enclosing region. If the returned index is not NO_ENCLOSING_INDEX, then '*inTryRegion' // is set to 'true' if the enclosing region is a 'try', or 'false' if the enclosing region is a handler. // (It can never be a filter.) unsigned ehGetEnclosingRegionIndex(unsigned regionIndex, bool* inTryRegion); // A block has been deleted. Update the EH table appropriately. void ehUpdateForDeletedBlock(BasicBlock* block); // Determine whether a block can be deleted while preserving the EH normalization rules. bool ehCanDeleteEmptyBlock(BasicBlock* block); // Update the 'last' pointers in the EH table to reflect new or deleted blocks in an EH region. void ehUpdateLastBlocks(BasicBlock* oldLast, BasicBlock* newLast); // For a finally handler, find the region index that the BBJ_CALLFINALLY lives in that calls the handler, // or NO_ENCLOSING_INDEX if the BBJ_CALLFINALLY lives in the main function body. Normally, the index // is the same index as the handler (and the BBJ_CALLFINALLY lives in the 'try' region), but for AMD64 the // BBJ_CALLFINALLY lives in the enclosing try or handler region, whichever is more nested, or the main function // body. If the returned index is not NO_ENCLOSING_INDEX, then '*inTryRegion' is set to 'true' if the // BBJ_CALLFINALLY lives in the returned index's 'try' region, or 'false' if lives in the handler region. (It never // lives in a filter.) unsigned ehGetCallFinallyRegionIndex(unsigned finallyIndex, bool* inTryRegion); // Find the range of basic blocks in which all BBJ_CALLFINALLY will be found that target the 'finallyIndex' region's // handler. Set begBlk to the first block, and endBlk to the block after the last block of the range // (nullptr if the last block is the last block in the program). // Precondition: 'finallyIndex' is the EH region of a try/finally clause. void ehGetCallFinallyBlockRange(unsigned finallyIndex, BasicBlock** begBlk, BasicBlock** endBlk); #ifdef DEBUG // Given a BBJ_CALLFINALLY block and the EH region index of the finally it is calling, return // 'true' if the BBJ_CALLFINALLY is in the correct EH region. bool ehCallFinallyInCorrectRegion(BasicBlock* blockCallFinally, unsigned finallyIndex); #endif // DEBUG #if defined(FEATURE_EH_FUNCLETS) // Do we need a PSPSym in the main function? For codegen purposes, we only need one // if there is a filter that protects a region with a nested EH clause (such as a // try/catch nested in the 'try' body of a try/filter/filter-handler). See // genFuncletProlog() for more details. However, the VM seems to use it for more // purposes, maybe including debugging. Until we are sure otherwise, always create // a PSPSym for functions with any EH. bool ehNeedsPSPSym() const { #ifdef TARGET_X86 return false; #else // TARGET_X86 return compHndBBtabCount > 0; #endif // TARGET_X86 } bool ehAnyFunclets(); // Are there any funclets in this function? unsigned ehFuncletCount(); // Return the count of funclets in the function unsigned bbThrowIndex(BasicBlock* blk); // Get the index to use as the cache key for sharing throw blocks #else // !FEATURE_EH_FUNCLETS bool ehAnyFunclets() { return false; } unsigned ehFuncletCount() { return 0; } unsigned bbThrowIndex(BasicBlock* blk) { return blk->bbTryIndex; } // Get the index to use as the cache key for sharing throw blocks #endif // !FEATURE_EH_FUNCLETS // Returns a flowList representing the "EH predecessors" of "blk". These are the normal predecessors of // "blk", plus one special case: if "blk" is the first block of a handler, considers the predecessor(s) of the first // first block of the corresponding try region to be "EH predecessors". (If there is a single such predecessor, // for example, we want to consider that the immediate dominator of the catch clause start block, so it's // convenient to also consider it a predecessor.) flowList* BlockPredsWithEH(BasicBlock* blk); // This table is useful for memoization of the method above. typedef JitHashTable<BasicBlock*, JitPtrKeyFuncs<BasicBlock>, flowList*> BlockToFlowListMap; BlockToFlowListMap* m_blockToEHPreds; BlockToFlowListMap* GetBlockToEHPreds() { if (m_blockToEHPreds == nullptr) { m_blockToEHPreds = new (getAllocator()) BlockToFlowListMap(getAllocator()); } return m_blockToEHPreds; } void* ehEmitCookie(BasicBlock* block); UNATIVE_OFFSET ehCodeOffset(BasicBlock* block); EHblkDsc* ehInitHndRange(BasicBlock* src, IL_OFFSET* hndBeg, IL_OFFSET* hndEnd, bool* inFilter); EHblkDsc* ehInitTryRange(BasicBlock* src, IL_OFFSET* tryBeg, IL_OFFSET* tryEnd); EHblkDsc* ehInitHndBlockRange(BasicBlock* blk, BasicBlock** hndBeg, BasicBlock** hndLast, bool* inFilter); EHblkDsc* ehInitTryBlockRange(BasicBlock* blk, BasicBlock** tryBeg, BasicBlock** tryLast); void fgSetTryBeg(EHblkDsc* handlerTab, BasicBlock* newTryBeg); void fgSetTryEnd(EHblkDsc* handlerTab, BasicBlock* newTryLast); void fgSetHndEnd(EHblkDsc* handlerTab, BasicBlock* newHndLast); void fgSkipRmvdBlocks(EHblkDsc* handlerTab); void fgAllocEHTable(); void fgRemoveEHTableEntry(unsigned XTnum); #if defined(FEATURE_EH_FUNCLETS) EHblkDsc* fgAddEHTableEntry(unsigned XTnum); #endif // FEATURE_EH_FUNCLETS #if !FEATURE_EH void fgRemoveEH(); #endif // !FEATURE_EH void fgSortEHTable(); // Causes the EH table to obey some well-formedness conditions, by inserting // empty BB's when necessary: // * No block is both the first block of a handler and the first block of a try. // * No block is the first block of multiple 'try' regions. // * No block is the last block of multiple EH regions. void fgNormalizeEH(); bool fgNormalizeEHCase1(); bool fgNormalizeEHCase2(); bool fgNormalizeEHCase3(); void fgCheckForLoopsInHandlers(); #ifdef DEBUG void dispIncomingEHClause(unsigned num, const CORINFO_EH_CLAUSE& clause); void dispOutgoingEHClause(unsigned num, const CORINFO_EH_CLAUSE& clause); void fgVerifyHandlerTab(); void fgDispHandlerTab(); #endif // DEBUG bool fgNeedToSortEHTable; void verInitEHTree(unsigned numEHClauses); void verInsertEhNode(CORINFO_EH_CLAUSE* clause, EHblkDsc* handlerTab); void verInsertEhNodeInTree(EHNodeDsc** ppRoot, EHNodeDsc* node); void verInsertEhNodeParent(EHNodeDsc** ppRoot, EHNodeDsc* node); void verCheckNestingLevel(EHNodeDsc* initRoot); /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX GenTree and BasicBlock XX XX XX XX Functions to allocate and display the GenTrees and BasicBlocks XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ // Functions to create nodes Statement* gtNewStmt(GenTree* expr = nullptr); Statement* gtNewStmt(GenTree* expr, const DebugInfo& di); // For unary opers. GenTree* gtNewOperNode(genTreeOps oper, var_types type, GenTree* op1, bool doSimplifications = TRUE); // For binary opers. GenTree* gtNewOperNode(genTreeOps oper, var_types type, GenTree* op1, GenTree* op2); GenTreeColon* gtNewColonNode(var_types type, GenTree* elseNode, GenTree* thenNode); GenTreeQmark* gtNewQmarkNode(var_types type, GenTree* cond, GenTreeColon* colon); GenTree* gtNewLargeOperNode(genTreeOps oper, var_types type = TYP_I_IMPL, GenTree* op1 = nullptr, GenTree* op2 = nullptr); GenTreeIntCon* gtNewIconNode(ssize_t value, var_types type = TYP_INT); GenTreeIntCon* gtNewIconNode(unsigned fieldOffset, FieldSeqNode* fieldSeq); GenTreeIntCon* gtNewNull(); GenTreeIntCon* gtNewTrue(); GenTreeIntCon* gtNewFalse(); GenTree* gtNewPhysRegNode(regNumber reg, var_types type); GenTree* gtNewJmpTableNode(); GenTree* gtNewIndOfIconHandleNode(var_types indType, size_t value, GenTreeFlags iconFlags, bool isInvariant); GenTree* gtNewIconHandleNode(size_t value, GenTreeFlags flags, FieldSeqNode* fields = nullptr); GenTreeFlags gtTokenToIconFlags(unsigned token); GenTree* gtNewIconEmbHndNode(void* value, void* pValue, GenTreeFlags flags, void* compileTimeHandle); GenTree* gtNewIconEmbScpHndNode(CORINFO_MODULE_HANDLE scpHnd); GenTree* gtNewIconEmbClsHndNode(CORINFO_CLASS_HANDLE clsHnd); GenTree* gtNewIconEmbMethHndNode(CORINFO_METHOD_HANDLE methHnd); GenTree* gtNewIconEmbFldHndNode(CORINFO_FIELD_HANDLE fldHnd); GenTree* gtNewStringLiteralNode(InfoAccessType iat, void* pValue); GenTreeIntCon* gtNewStringLiteralLength(GenTreeStrCon* node); GenTree* gtNewLconNode(__int64 value); GenTree* gtNewDconNode(double value, var_types type = TYP_DOUBLE); GenTree* gtNewSconNode(int CPX, CORINFO_MODULE_HANDLE scpHandle); GenTree* gtNewZeroConNode(var_types type); GenTree* gtNewOneConNode(var_types type); GenTreeLclVar* gtNewStoreLclVar(unsigned dstLclNum, GenTree* src); #ifdef FEATURE_SIMD GenTree* gtNewSIMDVectorZero(var_types simdType, CorInfoType simdBaseJitType, unsigned simdSize); #endif GenTree* gtNewBlkOpNode(GenTree* dst, GenTree* srcOrFillVal, bool isVolatile, bool isCopyBlock); GenTree* gtNewPutArgReg(var_types type, GenTree* arg, regNumber argReg); GenTree* gtNewBitCastNode(var_types type, GenTree* arg); protected: void gtBlockOpInit(GenTree* result, GenTree* dst, GenTree* srcOrFillVal, bool isVolatile); public: GenTreeObj* gtNewObjNode(CORINFO_CLASS_HANDLE structHnd, GenTree* addr); void gtSetObjGcInfo(GenTreeObj* objNode); GenTree* gtNewStructVal(CORINFO_CLASS_HANDLE structHnd, GenTree* addr); GenTree* gtNewBlockVal(GenTree* addr, unsigned size); GenTree* gtNewCpObjNode(GenTree* dst, GenTree* src, CORINFO_CLASS_HANDLE structHnd, bool isVolatile); GenTreeCall::Use* gtNewCallArgs(GenTree* node); GenTreeCall::Use* gtNewCallArgs(GenTree* node1, GenTree* node2); GenTreeCall::Use* gtNewCallArgs(GenTree* node1, GenTree* node2, GenTree* node3); GenTreeCall::Use* gtNewCallArgs(GenTree* node1, GenTree* node2, GenTree* node3, GenTree* node4); GenTreeCall::Use* gtPrependNewCallArg(GenTree* node, GenTreeCall::Use* args); GenTreeCall::Use* gtInsertNewCallArgAfter(GenTree* node, GenTreeCall::Use* after); GenTreeCall* gtNewCallNode(gtCallTypes callType, CORINFO_METHOD_HANDLE handle, var_types type, GenTreeCall::Use* args, const DebugInfo& di = DebugInfo()); GenTreeCall* gtNewIndCallNode(GenTree* addr, var_types type, GenTreeCall::Use* args, const DebugInfo& di = DebugInfo()); GenTreeCall* gtNewHelperCallNode(unsigned helper, var_types type, GenTreeCall::Use* args = nullptr); GenTreeCall* gtNewRuntimeLookupHelperCallNode(CORINFO_RUNTIME_LOOKUP* pRuntimeLookup, GenTree* ctxTree, void* compileTimeHandle); GenTreeLclVar* gtNewLclvNode(unsigned lnum, var_types type DEBUGARG(IL_OFFSET offs = BAD_IL_OFFSET)); GenTreeLclVar* gtNewLclLNode(unsigned lnum, var_types type DEBUGARG(IL_OFFSET offs = BAD_IL_OFFSET)); GenTreeLclVar* gtNewLclVarAddrNode(unsigned lclNum, var_types type = TYP_I_IMPL); GenTreeLclFld* gtNewLclFldAddrNode(unsigned lclNum, unsigned lclOffs, FieldSeqNode* fieldSeq, var_types type = TYP_I_IMPL); #ifdef FEATURE_SIMD GenTreeSIMD* gtNewSIMDNode( var_types type, GenTree* op1, SIMDIntrinsicID simdIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize); GenTreeSIMD* gtNewSIMDNode(var_types type, GenTree* op1, GenTree* op2, SIMDIntrinsicID simdIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize); void SetOpLclRelatedToSIMDIntrinsic(GenTree* op); #endif #ifdef FEATURE_HW_INTRINSICS GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic = false); GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type, GenTree* op1, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic = false); GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type, GenTree* op1, GenTree* op2, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic = false); GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type, GenTree* op1, GenTree* op2, GenTree* op3, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic = false); GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type, GenTree* op1, GenTree* op2, GenTree* op3, GenTree* op4, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic = false); GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type, GenTree** operands, size_t operandCount, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic = false); GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type, IntrinsicNodeBuilder&& nodeBuilder, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic = false); GenTreeHWIntrinsic* gtNewSimdAsHWIntrinsicNode(var_types type, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize) { bool isSimdAsHWIntrinsic = true; return gtNewSimdHWIntrinsicNode(type, hwIntrinsicID, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); } GenTreeHWIntrinsic* gtNewSimdAsHWIntrinsicNode( var_types type, GenTree* op1, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize) { bool isSimdAsHWIntrinsic = true; return gtNewSimdHWIntrinsicNode(type, op1, hwIntrinsicID, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); } GenTreeHWIntrinsic* gtNewSimdAsHWIntrinsicNode(var_types type, GenTree* op1, GenTree* op2, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize) { bool isSimdAsHWIntrinsic = true; return gtNewSimdHWIntrinsicNode(type, op1, op2, hwIntrinsicID, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); } GenTreeHWIntrinsic* gtNewSimdAsHWIntrinsicNode(var_types type, GenTree* op1, GenTree* op2, GenTree* op3, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize) { bool isSimdAsHWIntrinsic = true; return gtNewSimdHWIntrinsicNode(type, op1, op2, op3, hwIntrinsicID, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); } GenTree* gtNewSimdAbsNode( var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdBinOpNode(genTreeOps op, var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdCeilNode( var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdCmpOpNode(genTreeOps op, var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdCmpOpAllNode(genTreeOps op, var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdCmpOpAnyNode(genTreeOps op, var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdCndSelNode(var_types type, GenTree* op1, GenTree* op2, GenTree* op3, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdCreateBroadcastNode( var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdDotProdNode(var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdFloorNode( var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdGetElementNode(var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdMaxNode(var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdMinNode(var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdNarrowNode(var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdSqrtNode( var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdSumNode( var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdUnOpNode(genTreeOps op, var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdWidenLowerNode( var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdWidenUpperNode( var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdWithElementNode(var_types type, GenTree* op1, GenTree* op2, GenTree* op3, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdZeroNode(var_types type, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTreeHWIntrinsic* gtNewScalarHWIntrinsicNode(var_types type, NamedIntrinsic hwIntrinsicID); GenTreeHWIntrinsic* gtNewScalarHWIntrinsicNode(var_types type, GenTree* op1, NamedIntrinsic hwIntrinsicID); GenTreeHWIntrinsic* gtNewScalarHWIntrinsicNode(var_types type, GenTree* op1, GenTree* op2, NamedIntrinsic hwIntrinsicID); GenTreeHWIntrinsic* gtNewScalarHWIntrinsicNode( var_types type, GenTree* op1, GenTree* op2, GenTree* op3, NamedIntrinsic hwIntrinsicID); CORINFO_CLASS_HANDLE gtGetStructHandleForHWSIMD(var_types simdType, CorInfoType simdBaseJitType); CorInfoType getBaseJitTypeFromArgIfNeeded(NamedIntrinsic intrinsic, CORINFO_CLASS_HANDLE clsHnd, CORINFO_SIG_INFO* sig, CorInfoType simdBaseJitType); #endif // FEATURE_HW_INTRINSICS GenTree* gtNewMustThrowException(unsigned helper, var_types type, CORINFO_CLASS_HANDLE clsHnd); GenTreeLclFld* gtNewLclFldNode(unsigned lnum, var_types type, unsigned offset); GenTree* gtNewInlineCandidateReturnExpr(GenTree* inlineCandidate, var_types type, BasicBlockFlags bbFlags); GenTreeField* gtNewFieldRef(var_types type, CORINFO_FIELD_HANDLE fldHnd, GenTree* obj = nullptr, DWORD offset = 0); GenTree* gtNewIndexRef(var_types typ, GenTree* arrayOp, GenTree* indexOp); GenTreeArrLen* gtNewArrLen(var_types typ, GenTree* arrayOp, int lenOffset, BasicBlock* block); GenTreeIndir* gtNewIndir(var_types typ, GenTree* addr); GenTree* gtNewNullCheck(GenTree* addr, BasicBlock* basicBlock); var_types gtTypeForNullCheck(GenTree* tree); void gtChangeOperToNullCheck(GenTree* tree, BasicBlock* block); static fgArgTabEntry* gtArgEntryByArgNum(GenTreeCall* call, unsigned argNum); static fgArgTabEntry* gtArgEntryByNode(GenTreeCall* call, GenTree* node); fgArgTabEntry* gtArgEntryByLateArgIndex(GenTreeCall* call, unsigned lateArgInx); static GenTree* gtArgNodeByLateArgInx(GenTreeCall* call, unsigned lateArgInx); GenTreeOp* gtNewAssignNode(GenTree* dst, GenTree* src); GenTree* gtNewTempAssign(unsigned tmp, GenTree* val, Statement** pAfterStmt = nullptr, const DebugInfo& di = DebugInfo(), BasicBlock* block = nullptr); GenTree* gtNewRefCOMfield(GenTree* objPtr, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_ACCESS_FLAGS access, CORINFO_FIELD_INFO* pFieldInfo, var_types lclTyp, CORINFO_CLASS_HANDLE structType, GenTree* assg); GenTree* gtNewNothingNode(); GenTree* gtNewArgPlaceHolderNode(var_types type, CORINFO_CLASS_HANDLE clsHnd); GenTree* gtUnusedValNode(GenTree* expr); GenTree* gtNewKeepAliveNode(GenTree* op); GenTreeCast* gtNewCastNode(var_types typ, GenTree* op1, bool fromUnsigned, var_types castType); GenTreeCast* gtNewCastNodeL(var_types typ, GenTree* op1, bool fromUnsigned, var_types castType); GenTreeAllocObj* gtNewAllocObjNode( unsigned int helper, bool helperHasSideEffects, CORINFO_CLASS_HANDLE clsHnd, var_types type, GenTree* op1); GenTreeAllocObj* gtNewAllocObjNode(CORINFO_RESOLVED_TOKEN* pResolvedToken, bool useParent); GenTree* gtNewRuntimeLookup(CORINFO_GENERIC_HANDLE hnd, CorInfoGenericHandleType hndTyp, GenTree* lookupTree); GenTreeIndir* gtNewMethodTableLookup(GenTree* obj); //------------------------------------------------------------------------ // Other GenTree functions GenTree* gtClone(GenTree* tree, bool complexOK = false); // If `tree` is a lclVar with lclNum `varNum`, return an IntCns with value `varVal`; otherwise, // create a copy of `tree`, adding specified flags, replacing uses of lclVar `deepVarNum` with // IntCnses with value `deepVarVal`. GenTree* gtCloneExpr( GenTree* tree, GenTreeFlags addFlags, unsigned varNum, int varVal, unsigned deepVarNum, int deepVarVal); // Create a copy of `tree`, optionally adding specifed flags, and optionally mapping uses of local // `varNum` to int constants with value `varVal`. GenTree* gtCloneExpr(GenTree* tree, GenTreeFlags addFlags = GTF_EMPTY, unsigned varNum = BAD_VAR_NUM, int varVal = 0) { return gtCloneExpr(tree, addFlags, varNum, varVal, varNum, varVal); } Statement* gtCloneStmt(Statement* stmt) { GenTree* exprClone = gtCloneExpr(stmt->GetRootNode()); return gtNewStmt(exprClone, stmt->GetDebugInfo()); } // Internal helper for cloning a call GenTreeCall* gtCloneExprCallHelper(GenTreeCall* call, GenTreeFlags addFlags = GTF_EMPTY, unsigned deepVarNum = BAD_VAR_NUM, int deepVarVal = 0); // Create copy of an inline or guarded devirtualization candidate tree. GenTreeCall* gtCloneCandidateCall(GenTreeCall* call); void gtUpdateSideEffects(Statement* stmt, GenTree* tree); void gtUpdateTreeAncestorsSideEffects(GenTree* tree); void gtUpdateStmtSideEffects(Statement* stmt); void gtUpdateNodeSideEffects(GenTree* tree); void gtUpdateNodeOperSideEffects(GenTree* tree); void gtUpdateNodeOperSideEffectsPost(GenTree* tree); // Returns "true" iff the complexity (not formally defined, but first interpretation // is #of nodes in subtree) of "tree" is greater than "limit". // (This is somewhat redundant with the "GetCostEx()/GetCostSz()" fields, but can be used // before they have been set.) bool gtComplexityExceeds(GenTree** tree, unsigned limit); GenTree* gtReverseCond(GenTree* tree); static bool gtHasRef(GenTree* tree, ssize_t lclNum); bool gtHasLocalsWithAddrOp(GenTree* tree); unsigned gtSetCallArgsOrder(const GenTreeCall::UseList& args, bool lateArgs, int* callCostEx, int* callCostSz); unsigned gtSetMultiOpOrder(GenTreeMultiOp* multiOp); void gtWalkOp(GenTree** op1, GenTree** op2, GenTree* base, bool constOnly); #ifdef DEBUG unsigned gtHashValue(GenTree* tree); GenTree* gtWalkOpEffectiveVal(GenTree* op); #endif void gtPrepareCost(GenTree* tree); bool gtIsLikelyRegVar(GenTree* tree); // Returns true iff the secondNode can be swapped with firstNode. bool gtCanSwapOrder(GenTree* firstNode, GenTree* secondNode); // Given an address expression, compute its costs and addressing mode opportunities, // and mark addressing mode candidates as GTF_DONT_CSE. // TODO-Throughput - Consider actually instantiating these early, to avoid // having to re-run the algorithm that looks for them (might also improve CQ). bool gtMarkAddrMode(GenTree* addr, int* costEx, int* costSz, var_types type); unsigned gtSetEvalOrder(GenTree* tree); void gtSetStmtInfo(Statement* stmt); // Returns "true" iff "node" has any of the side effects in "flags". bool gtNodeHasSideEffects(GenTree* node, GenTreeFlags flags); // Returns "true" iff "tree" or its (transitive) children have any of the side effects in "flags". bool gtTreeHasSideEffects(GenTree* tree, GenTreeFlags flags); // Appends 'expr' in front of 'list' // 'list' will typically start off as 'nullptr' // when 'list' is non-null a GT_COMMA node is used to insert 'expr' GenTree* gtBuildCommaList(GenTree* list, GenTree* expr); void gtExtractSideEffList(GenTree* expr, GenTree** pList, GenTreeFlags GenTreeFlags = GTF_SIDE_EFFECT, bool ignoreRoot = false); GenTree* gtGetThisArg(GenTreeCall* call); // Static fields of struct types (and sometimes the types that those are reduced to) are represented by having the // static field contain an object pointer to the boxed struct. This simplifies the GC implementation...but // complicates the JIT somewhat. This predicate returns "true" iff a node with type "fieldNodeType", representing // the given "fldHnd", is such an object pointer. bool gtIsStaticFieldPtrToBoxedStruct(var_types fieldNodeType, CORINFO_FIELD_HANDLE fldHnd); // Return true if call is a recursive call; return false otherwise. // Note when inlining, this looks for calls back to the root method. bool gtIsRecursiveCall(GenTreeCall* call) { return gtIsRecursiveCall(call->gtCallMethHnd); } bool gtIsRecursiveCall(CORINFO_METHOD_HANDLE callMethodHandle) { return (callMethodHandle == impInlineRoot()->info.compMethodHnd); } //------------------------------------------------------------------------- GenTree* gtFoldExpr(GenTree* tree); GenTree* gtFoldExprConst(GenTree* tree); GenTree* gtFoldExprSpecial(GenTree* tree); GenTree* gtFoldBoxNullable(GenTree* tree); GenTree* gtFoldExprCompare(GenTree* tree); GenTree* gtCreateHandleCompare(genTreeOps oper, GenTree* op1, GenTree* op2, CorInfoInlineTypeCheck typeCheckInliningResult); GenTree* gtFoldExprCall(GenTreeCall* call); GenTree* gtFoldTypeCompare(GenTree* tree); GenTree* gtFoldTypeEqualityCall(bool isEq, GenTree* op1, GenTree* op2); // Options to control behavior of gtTryRemoveBoxUpstreamEffects enum BoxRemovalOptions { BR_REMOVE_AND_NARROW, // remove effects, minimize remaining work, return possibly narrowed source tree BR_REMOVE_AND_NARROW_WANT_TYPE_HANDLE, // remove effects and minimize remaining work, return type handle tree BR_REMOVE_BUT_NOT_NARROW, // remove effects, return original source tree BR_DONT_REMOVE, // check if removal is possible, return copy source tree BR_DONT_REMOVE_WANT_TYPE_HANDLE, // check if removal is possible, return type handle tree BR_MAKE_LOCAL_COPY // revise box to copy to temp local and return local's address }; GenTree* gtTryRemoveBoxUpstreamEffects(GenTree* tree, BoxRemovalOptions options = BR_REMOVE_AND_NARROW); GenTree* gtOptimizeEnumHasFlag(GenTree* thisOp, GenTree* flagOp); //------------------------------------------------------------------------- // Get the handle, if any. CORINFO_CLASS_HANDLE gtGetStructHandleIfPresent(GenTree* tree); // Get the handle, and assert if not found. CORINFO_CLASS_HANDLE gtGetStructHandle(GenTree* tree); // Get the handle for a ref type. CORINFO_CLASS_HANDLE gtGetClassHandle(GenTree* tree, bool* pIsExact, bool* pIsNonNull); // Get the class handle for an helper call CORINFO_CLASS_HANDLE gtGetHelperCallClassHandle(GenTreeCall* call, bool* pIsExact, bool* pIsNonNull); // Get the element handle for an array of ref type. CORINFO_CLASS_HANDLE gtGetArrayElementClassHandle(GenTree* array); // Get a class handle from a helper call argument CORINFO_CLASS_HANDLE gtGetHelperArgClassHandle(GenTree* array); // Get the class handle for a field CORINFO_CLASS_HANDLE gtGetFieldClassHandle(CORINFO_FIELD_HANDLE fieldHnd, bool* pIsExact, bool* pIsNonNull); // Check if this tree is a gc static base helper call bool gtIsStaticGCBaseHelperCall(GenTree* tree); //------------------------------------------------------------------------- // Functions to display the trees #ifdef DEBUG void gtDispNode(GenTree* tree, IndentStack* indentStack, _In_z_ const char* msg, bool isLIR); void gtDispConst(GenTree* tree); void gtDispLeaf(GenTree* tree, IndentStack* indentStack); void gtDispNodeName(GenTree* tree); #if FEATURE_MULTIREG_RET unsigned gtDispMultiRegCount(GenTree* tree); #endif void gtDispRegVal(GenTree* tree); void gtDispZeroFieldSeq(GenTree* tree); void gtDispVN(GenTree* tree); void gtDispCommonEndLine(GenTree* tree); enum IndentInfo { IINone, IIArc, IIArcTop, IIArcBottom, IIEmbedded, IIError, IndentInfoCount }; void gtDispChild(GenTree* child, IndentStack* indentStack, IndentInfo arcType, _In_opt_ const char* msg = nullptr, bool topOnly = false); void gtDispTree(GenTree* tree, IndentStack* indentStack = nullptr, _In_opt_ const char* msg = nullptr, bool topOnly = false, bool isLIR = false); void gtGetLclVarNameInfo(unsigned lclNum, const char** ilKindOut, const char** ilNameOut, unsigned* ilNumOut); int gtGetLclVarName(unsigned lclNum, char* buf, unsigned buf_remaining); char* gtGetLclVarName(unsigned lclNum); void gtDispLclVar(unsigned lclNum, bool padForBiggestDisp = true); void gtDispLclVarStructType(unsigned lclNum); void gtDispClassLayout(ClassLayout* layout, var_types type); void gtDispILLocation(const ILLocation& loc); void gtDispStmt(Statement* stmt, const char* msg = nullptr); void gtDispBlockStmts(BasicBlock* block); void gtGetArgMsg(GenTreeCall* call, GenTree* arg, unsigned argNum, char* bufp, unsigned bufLength); void gtGetLateArgMsg(GenTreeCall* call, GenTree* arg, int argNum, char* bufp, unsigned bufLength); void gtDispArgList(GenTreeCall* call, GenTree* lastCallOperand, IndentStack* indentStack); void gtDispAnyFieldSeq(FieldSeqNode* fieldSeq); void gtDispFieldSeq(FieldSeqNode* pfsn); void gtDispRange(LIR::ReadOnlyRange const& range); void gtDispTreeRange(LIR::Range& containingRange, GenTree* tree); void gtDispLIRNode(GenTree* node, const char* prefixMsg = nullptr); #endif // For tree walks enum fgWalkResult { WALK_CONTINUE, WALK_SKIP_SUBTREES, WALK_ABORT }; struct fgWalkData; typedef fgWalkResult(fgWalkPreFn)(GenTree** pTree, fgWalkData* data); typedef fgWalkResult(fgWalkPostFn)(GenTree** pTree, fgWalkData* data); static fgWalkPreFn gtMarkColonCond; static fgWalkPreFn gtClearColonCond; struct FindLinkData { GenTree* nodeToFind; GenTree** result; GenTree* parent; }; FindLinkData gtFindLink(Statement* stmt, GenTree* node); bool gtHasCatchArg(GenTree* tree); typedef ArrayStack<GenTree*> GenTreeStack; static bool gtHasCallOnStack(GenTreeStack* parentStack); //========================================================================= // BasicBlock functions #ifdef DEBUG // This is a debug flag we will use to assert when creating block during codegen // as this interferes with procedure splitting. If you know what you're doing, set // it to true before creating the block. (DEBUG only) bool fgSafeBasicBlockCreation; #endif BasicBlock* bbNewBasicBlock(BBjumpKinds jumpKind); void placeLoopAlignInstructions(); /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX LclVarsInfo XX XX XX XX The variables to be used by the code generator. XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ // // For both PROMOTION_TYPE_NONE and PROMOTION_TYPE_DEPENDENT the struct will // be placed in the stack frame and it's fields must be laid out sequentially. // // For PROMOTION_TYPE_INDEPENDENT each of the struct's fields is replaced by // a local variable that can be enregistered or placed in the stack frame. // The fields do not need to be laid out sequentially // enum lvaPromotionType { PROMOTION_TYPE_NONE, // The struct local is not promoted PROMOTION_TYPE_INDEPENDENT, // The struct local is promoted, // and its field locals are independent of its parent struct local. PROMOTION_TYPE_DEPENDENT // The struct local is promoted, // but its field locals depend on its parent struct local. }; /*****************************************************************************/ enum FrameLayoutState { NO_FRAME_LAYOUT, INITIAL_FRAME_LAYOUT, PRE_REGALLOC_FRAME_LAYOUT, REGALLOC_FRAME_LAYOUT, TENTATIVE_FRAME_LAYOUT, FINAL_FRAME_LAYOUT }; public: RefCountState lvaRefCountState; // Current local ref count state bool lvaLocalVarRefCounted() const { return lvaRefCountState == RCS_NORMAL; } bool lvaTrackedFixed; // true: We cannot add new 'tracked' variable unsigned lvaCount; // total number of locals, which includes function arguments, // special arguments, IL local variables, and JIT temporary variables LclVarDsc* lvaTable; // variable descriptor table unsigned lvaTableCnt; // lvaTable size (>= lvaCount) unsigned lvaTrackedCount; // actual # of locals being tracked unsigned lvaTrackedCountInSizeTUnits; // min # of size_t's sufficient to hold a bit for all the locals being tracked #ifdef DEBUG VARSET_TP lvaTrackedVars; // set of tracked variables #endif #ifndef TARGET_64BIT VARSET_TP lvaLongVars; // set of long (64-bit) variables #endif VARSET_TP lvaFloatVars; // set of floating-point (32-bit and 64-bit) variables unsigned lvaCurEpoch; // VarSets are relative to a specific set of tracked var indices. // It that changes, this changes. VarSets from different epochs // cannot be meaningfully combined. unsigned GetCurLVEpoch() { return lvaCurEpoch; } // reverse map of tracked number to var number unsigned lvaTrackedToVarNumSize; unsigned* lvaTrackedToVarNum; #if DOUBLE_ALIGN #ifdef DEBUG // # of procs compiled a with double-aligned stack static unsigned s_lvaDoubleAlignedProcsCount; #endif #endif // Getters and setters for address-exposed and do-not-enregister local var properties. bool lvaVarAddrExposed(unsigned varNum) const; void lvaSetVarAddrExposed(unsigned varNum DEBUGARG(AddressExposedReason reason)); void lvaSetVarLiveInOutOfHandler(unsigned varNum); bool lvaVarDoNotEnregister(unsigned varNum); void lvSetMinOptsDoNotEnreg(); bool lvaEnregEHVars; bool lvaEnregMultiRegVars; void lvaSetVarDoNotEnregister(unsigned varNum DEBUGARG(DoNotEnregisterReason reason)); unsigned lvaVarargsHandleArg; #ifdef TARGET_X86 unsigned lvaVarargsBaseOfStkArgs; // Pointer (computed based on incoming varargs handle) to the start of the stack // arguments #endif // TARGET_X86 unsigned lvaInlinedPInvokeFrameVar; // variable representing the InlinedCallFrame unsigned lvaReversePInvokeFrameVar; // variable representing the reverse PInvoke frame #if FEATURE_FIXED_OUT_ARGS unsigned lvaPInvokeFrameRegSaveVar; // variable representing the RegSave for PInvoke inlining. #endif unsigned lvaMonAcquired; // boolean variable introduced into in synchronized methods // that tracks whether the lock has been taken unsigned lvaArg0Var; // The lclNum of arg0. Normally this will be info.compThisArg. // However, if there is a "ldarga 0" or "starg 0" in the IL, // we will redirect all "ldarg(a) 0" and "starg 0" to this temp. unsigned lvaInlineeReturnSpillTemp; // The temp to spill the non-VOID return expression // in case there are multiple BBJ_RETURN blocks in the inlinee // or if the inlinee has GC ref locals. #if FEATURE_FIXED_OUT_ARGS unsigned lvaOutgoingArgSpaceVar; // dummy TYP_LCLBLK var for fixed outgoing argument space PhasedVar<unsigned> lvaOutgoingArgSpaceSize; // size of fixed outgoing argument space #endif // FEATURE_FIXED_OUT_ARGS static unsigned GetOutgoingArgByteSize(unsigned sizeWithoutPadding) { return roundUp(sizeWithoutPadding, TARGET_POINTER_SIZE); } // Variable representing the return address. The helper-based tailcall // mechanism passes the address of the return address to a runtime helper // where it is used to detect tail-call chains. unsigned lvaRetAddrVar; #if defined(DEBUG) && defined(TARGET_XARCH) unsigned lvaReturnSpCheck; // Stores SP to confirm it is not corrupted on return. #endif // defined(DEBUG) && defined(TARGET_XARCH) #if defined(DEBUG) && defined(TARGET_X86) unsigned lvaCallSpCheck; // Stores SP to confirm it is not corrupted after every call. #endif // defined(DEBUG) && defined(TARGET_X86) bool lvaGenericsContextInUse; bool lvaKeepAliveAndReportThis(); // Synchronized instance method of a reference type, or // CORINFO_GENERICS_CTXT_FROM_THIS? bool lvaReportParamTypeArg(); // Exceptions and CORINFO_GENERICS_CTXT_FROM_PARAMTYPEARG? //------------------------------------------------------------------------- // All these frame offsets are inter-related and must be kept in sync #if !defined(FEATURE_EH_FUNCLETS) // This is used for the callable handlers unsigned lvaShadowSPslotsVar; // TYP_BLK variable for all the shadow SP slots #endif // FEATURE_EH_FUNCLETS int lvaCachedGenericContextArgOffs; int lvaCachedGenericContextArgOffset(); // For CORINFO_CALLCONV_PARAMTYPE and if generic context is passed as // THIS pointer #ifdef JIT32_GCENCODER unsigned lvaLocAllocSPvar; // variable which stores the value of ESP after the the last alloca/localloc #endif // JIT32_GCENCODER unsigned lvaNewObjArrayArgs; // variable with arguments for new MD array helper // TODO-Review: Prior to reg predict we reserve 24 bytes for Spill temps. // after the reg predict we will use a computed maxTmpSize // which is based upon the number of spill temps predicted by reg predict // All this is necessary because if we under-estimate the size of the spill // temps we could fail when encoding instructions that reference stack offsets for ARM. // // Pre codegen max spill temp size. static const unsigned MAX_SPILL_TEMP_SIZE = 24; //------------------------------------------------------------------------- unsigned lvaGetMaxSpillTempSize(); #ifdef TARGET_ARM bool lvaIsPreSpilled(unsigned lclNum, regMaskTP preSpillMask); #endif // TARGET_ARM void lvaAssignFrameOffsets(FrameLayoutState curState); void lvaFixVirtualFrameOffsets(); void lvaUpdateArgWithInitialReg(LclVarDsc* varDsc); void lvaUpdateArgsWithInitialReg(); void lvaAssignVirtualFrameOffsetsToArgs(); #ifdef UNIX_AMD64_ABI int lvaAssignVirtualFrameOffsetToArg(unsigned lclNum, unsigned argSize, int argOffs, int* callerArgOffset); #else // !UNIX_AMD64_ABI int lvaAssignVirtualFrameOffsetToArg(unsigned lclNum, unsigned argSize, int argOffs); #endif // !UNIX_AMD64_ABI void lvaAssignVirtualFrameOffsetsToLocals(); int lvaAllocLocalAndSetVirtualOffset(unsigned lclNum, unsigned size, int stkOffs); #ifdef TARGET_AMD64 // Returns true if compCalleeRegsPushed (including RBP if used as frame pointer) is even. bool lvaIsCalleeSavedIntRegCountEven(); #endif void lvaAlignFrame(); void lvaAssignFrameOffsetsToPromotedStructs(); int lvaAllocateTemps(int stkOffs, bool mustDoubleAlign); #ifdef DEBUG void lvaDumpRegLocation(unsigned lclNum); void lvaDumpFrameLocation(unsigned lclNum); void lvaDumpEntry(unsigned lclNum, FrameLayoutState curState, size_t refCntWtdWidth = 6); void lvaTableDump(FrameLayoutState curState = NO_FRAME_LAYOUT); // NO_FRAME_LAYOUT means use the current frame // layout state defined by lvaDoneFrameLayout #endif // Limit frames size to 1GB. The maximum is 2GB in theory - make it intentionally smaller // to avoid bugs from borderline cases. #define MAX_FrameSize 0x3FFFFFFF void lvaIncrementFrameSize(unsigned size); unsigned lvaFrameSize(FrameLayoutState curState); // Returns the caller-SP-relative offset for the SP/FP relative offset determined by FP based. int lvaToCallerSPRelativeOffset(int offs, bool isFpBased, bool forRootFrame = true) const; // Returns the caller-SP-relative offset for the local variable "varNum." int lvaGetCallerSPRelativeOffset(unsigned varNum); // Returns the SP-relative offset for the local variable "varNum". Illegal to ask this for functions with localloc. int lvaGetSPRelativeOffset(unsigned varNum); int lvaToInitialSPRelativeOffset(unsigned offset, bool isFpBased); int lvaGetInitialSPRelativeOffset(unsigned varNum); // True if this is an OSR compilation and this local is potentially // located on the original method stack frame. bool lvaIsOSRLocal(unsigned varNum); //------------------------ For splitting types ---------------------------- void lvaInitTypeRef(); void lvaInitArgs(InitVarDscInfo* varDscInfo); void lvaInitThisPtr(InitVarDscInfo* varDscInfo); void lvaInitRetBuffArg(InitVarDscInfo* varDscInfo, bool useFixedRetBufReg); void lvaInitUserArgs(InitVarDscInfo* varDscInfo, unsigned skipArgs, unsigned takeArgs); void lvaInitGenericsCtxt(InitVarDscInfo* varDscInfo); void lvaInitVarArgsHandle(InitVarDscInfo* varDscInfo); void lvaInitVarDsc(LclVarDsc* varDsc, unsigned varNum, CorInfoType corInfoType, CORINFO_CLASS_HANDLE typeHnd, CORINFO_ARG_LIST_HANDLE varList, CORINFO_SIG_INFO* varSig); static unsigned lvaTypeRefMask(var_types type); var_types lvaGetActualType(unsigned lclNum); var_types lvaGetRealType(unsigned lclNum); //------------------------------------------------------------------------- void lvaInit(); LclVarDsc* lvaGetDesc(unsigned lclNum) { assert(lclNum < lvaCount); return &lvaTable[lclNum]; } LclVarDsc* lvaGetDesc(unsigned lclNum) const { assert(lclNum < lvaCount); return &lvaTable[lclNum]; } LclVarDsc* lvaGetDesc(const GenTreeLclVarCommon* lclVar) { return lvaGetDesc(lclVar->GetLclNum()); } unsigned lvaTrackedIndexToLclNum(unsigned trackedIndex) { assert(trackedIndex < lvaTrackedCount); unsigned lclNum = lvaTrackedToVarNum[trackedIndex]; assert(lclNum < lvaCount); return lclNum; } LclVarDsc* lvaGetDescByTrackedIndex(unsigned trackedIndex) { return lvaGetDesc(lvaTrackedIndexToLclNum(trackedIndex)); } unsigned lvaGetLclNum(const LclVarDsc* varDsc) { assert((lvaTable <= varDsc) && (varDsc < lvaTable + lvaCount)); // varDsc must point within the table assert(((char*)varDsc - (char*)lvaTable) % sizeof(LclVarDsc) == 0); // varDsc better not point in the middle of a variable unsigned varNum = (unsigned)(varDsc - lvaTable); assert(varDsc == &lvaTable[varNum]); return varNum; } unsigned lvaLclSize(unsigned varNum); unsigned lvaLclExactSize(unsigned varNum); bool lvaHaveManyLocals() const; unsigned lvaGrabTemp(bool shortLifetime DEBUGARG(const char* reason)); unsigned lvaGrabTemps(unsigned cnt DEBUGARG(const char* reason)); unsigned lvaGrabTempWithImplicitUse(bool shortLifetime DEBUGARG(const char* reason)); void lvaSortByRefCount(); void lvaMarkLocalVars(); // Local variable ref-counting void lvaComputeRefCounts(bool isRecompute, bool setSlotNumbers); void lvaMarkLocalVars(BasicBlock* block, bool isRecompute); void lvaAllocOutgoingArgSpaceVar(); // Set up lvaOutgoingArgSpaceVar VARSET_VALRET_TP lvaStmtLclMask(Statement* stmt); #ifdef DEBUG struct lvaStressLclFldArgs { Compiler* m_pCompiler; bool m_bFirstPass; }; static fgWalkPreFn lvaStressLclFldCB; void lvaStressLclFld(); void lvaDispVarSet(VARSET_VALARG_TP set, VARSET_VALARG_TP allVars); void lvaDispVarSet(VARSET_VALARG_TP set); #endif #ifdef TARGET_ARM int lvaFrameAddress(int varNum, bool mustBeFPBased, regNumber* pBaseReg, int addrModeOffset, bool isFloatUsage); #else int lvaFrameAddress(int varNum, bool* pFPbased); #endif bool lvaIsParameter(unsigned varNum); bool lvaIsRegArgument(unsigned varNum); bool lvaIsOriginalThisArg(unsigned varNum); // Is this varNum the original this argument? bool lvaIsOriginalThisReadOnly(); // return true if there is no place in the code // that writes to arg0 // For x64 this is 3, 5, 6, 7, >8 byte structs that are passed by reference. // For ARM64, this is structs larger than 16 bytes that are passed by reference. bool lvaIsImplicitByRefLocal(unsigned varNum) { #if defined(TARGET_AMD64) || defined(TARGET_ARM64) LclVarDsc* varDsc = lvaGetDesc(varNum); if (varDsc->lvIsImplicitByRef) { assert(varDsc->lvIsParam); assert(varTypeIsStruct(varDsc) || (varDsc->lvType == TYP_BYREF)); return true; } #endif // defined(TARGET_AMD64) || defined(TARGET_ARM64) return false; } // Returns true if this local var is a multireg struct bool lvaIsMultiregStruct(LclVarDsc* varDsc, bool isVararg); // If the local is a TYP_STRUCT, get/set a class handle describing it CORINFO_CLASS_HANDLE lvaGetStruct(unsigned varNum); void lvaSetStruct(unsigned varNum, CORINFO_CLASS_HANDLE typeHnd, bool unsafeValueClsCheck, bool setTypeInfo = true); void lvaSetStructUsedAsVarArg(unsigned varNum); // If the local is TYP_REF, set or update the associated class information. void lvaSetClass(unsigned varNum, CORINFO_CLASS_HANDLE clsHnd, bool isExact = false); void lvaSetClass(unsigned varNum, GenTree* tree, CORINFO_CLASS_HANDLE stackHandle = nullptr); void lvaUpdateClass(unsigned varNum, CORINFO_CLASS_HANDLE clsHnd, bool isExact = false); void lvaUpdateClass(unsigned varNum, GenTree* tree, CORINFO_CLASS_HANDLE stackHandle = nullptr); #define MAX_NumOfFieldsInPromotableStruct 4 // Maximum number of fields in promotable struct // Info about struct type fields. struct lvaStructFieldInfo { CORINFO_FIELD_HANDLE fldHnd; unsigned char fldOffset; unsigned char fldOrdinal; var_types fldType; unsigned fldSize; CORINFO_CLASS_HANDLE fldTypeHnd; lvaStructFieldInfo() : fldHnd(nullptr), fldOffset(0), fldOrdinal(0), fldType(TYP_UNDEF), fldSize(0), fldTypeHnd(nullptr) { } }; // Info about a struct type, instances of which may be candidates for promotion. struct lvaStructPromotionInfo { CORINFO_CLASS_HANDLE typeHnd; bool canPromote; bool containsHoles; bool customLayout; bool fieldsSorted; unsigned char fieldCnt; lvaStructFieldInfo fields[MAX_NumOfFieldsInPromotableStruct]; lvaStructPromotionInfo(CORINFO_CLASS_HANDLE typeHnd = nullptr) : typeHnd(typeHnd) , canPromote(false) , containsHoles(false) , customLayout(false) , fieldsSorted(false) , fieldCnt(0) { } }; struct lvaFieldOffsetCmp { bool operator()(const lvaStructFieldInfo& field1, const lvaStructFieldInfo& field2); }; // This class is responsible for checking validity and profitability of struct promotion. // If it is both legal and profitable, then TryPromoteStructVar promotes the struct and initializes // nessesary information for fgMorphStructField to use. class StructPromotionHelper { public: StructPromotionHelper(Compiler* compiler); bool CanPromoteStructType(CORINFO_CLASS_HANDLE typeHnd); bool TryPromoteStructVar(unsigned lclNum); void Clear() { structPromotionInfo.typeHnd = NO_CLASS_HANDLE; } #ifdef DEBUG void CheckRetypedAsScalar(CORINFO_FIELD_HANDLE fieldHnd, var_types requestedType); #endif // DEBUG private: bool CanPromoteStructVar(unsigned lclNum); bool ShouldPromoteStructVar(unsigned lclNum); void PromoteStructVar(unsigned lclNum); void SortStructFields(); bool CanConstructAndPromoteField(lvaStructPromotionInfo* structPromotionInfo); lvaStructFieldInfo GetFieldInfo(CORINFO_FIELD_HANDLE fieldHnd, BYTE ordinal); bool TryPromoteStructField(lvaStructFieldInfo& outerFieldInfo); private: Compiler* compiler; lvaStructPromotionInfo structPromotionInfo; #ifdef DEBUG typedef JitHashTable<CORINFO_FIELD_HANDLE, JitPtrKeyFuncs<CORINFO_FIELD_STRUCT_>, var_types> RetypedAsScalarFieldsMap; RetypedAsScalarFieldsMap retypedFieldsMap; #endif // DEBUG }; StructPromotionHelper* structPromotionHelper; unsigned lvaGetFieldLocal(const LclVarDsc* varDsc, unsigned int fldOffset); lvaPromotionType lvaGetPromotionType(const LclVarDsc* varDsc); lvaPromotionType lvaGetPromotionType(unsigned varNum); lvaPromotionType lvaGetParentPromotionType(const LclVarDsc* varDsc); lvaPromotionType lvaGetParentPromotionType(unsigned varNum); bool lvaIsFieldOfDependentlyPromotedStruct(const LclVarDsc* varDsc); bool lvaIsGCTracked(const LclVarDsc* varDsc); #if defined(FEATURE_SIMD) bool lvaMapSimd12ToSimd16(const LclVarDsc* varDsc) { assert(varDsc->lvType == TYP_SIMD12); assert(varDsc->lvExactSize == 12); #if defined(TARGET_64BIT) assert(compMacOsArm64Abi() || varDsc->lvSize() == 16); #endif // defined(TARGET_64BIT) // We make local variable SIMD12 types 16 bytes instead of just 12. // lvSize() will return 16 bytes for SIMD12, even for fields. // However, we can't do that mapping if the var is a dependently promoted struct field. // Such a field must remain its exact size within its parent struct unless it is a single // field *and* it is the only field in a struct of 16 bytes. if (varDsc->lvSize() != 16) { return false; } if (lvaIsFieldOfDependentlyPromotedStruct(varDsc)) { LclVarDsc* parentVarDsc = lvaGetDesc(varDsc->lvParentLcl); return (parentVarDsc->lvFieldCnt == 1) && (parentVarDsc->lvSize() == 16); } return true; } #endif // defined(FEATURE_SIMD) unsigned lvaGSSecurityCookie; // LclVar number bool lvaTempsHaveLargerOffsetThanVars(); // Returns "true" iff local variable "lclNum" is in SSA form. bool lvaInSsa(unsigned lclNum) { assert(lclNum < lvaCount); return lvaTable[lclNum].lvInSsa; } unsigned lvaStubArgumentVar; // variable representing the secret stub argument coming in EAX #if defined(FEATURE_EH_FUNCLETS) unsigned lvaPSPSym; // variable representing the PSPSym #endif InlineInfo* impInlineInfo; // Only present for inlinees InlineStrategy* m_inlineStrategy; InlineContext* compInlineContext; // Always present // The Compiler* that is the root of the inlining tree of which "this" is a member. Compiler* impInlineRoot(); #if defined(DEBUG) || defined(INLINE_DATA) unsigned __int64 getInlineCycleCount() { return m_compCycles; } #endif // defined(DEBUG) || defined(INLINE_DATA) bool fgNoStructPromotion; // Set to TRUE to turn off struct promotion for this method. bool fgNoStructParamPromotion; // Set to TRUE to turn off struct promotion for parameters this method. //========================================================================= // PROTECTED //========================================================================= protected: //---------------- Local variable ref-counting ---------------------------- void lvaMarkLclRefs(GenTree* tree, BasicBlock* block, Statement* stmt, bool isRecompute); bool IsDominatedByExceptionalEntry(BasicBlock* block); void SetVolatileHint(LclVarDsc* varDsc); // Keeps the mapping from SSA #'s to VN's for the implicit memory variables. SsaDefArray<SsaMemDef> lvMemoryPerSsaData; public: // Returns the address of the per-Ssa data for memory at the given ssaNum (which is required // not to be the SsaConfig::RESERVED_SSA_NUM, which indicates that the variable is // not an SSA variable). SsaMemDef* GetMemoryPerSsaData(unsigned ssaNum) { return lvMemoryPerSsaData.GetSsaDef(ssaNum); } /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Importer XX XX XX XX Imports the given method and converts it to semantic trees XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ private: // For prefixFlags enum { PREFIX_TAILCALL_EXPLICIT = 0x00000001, // call has "tail" IL prefix PREFIX_TAILCALL_IMPLICIT = 0x00000010, // call is treated as having "tail" prefix even though there is no "tail" IL prefix PREFIX_TAILCALL_STRESS = 0x00000100, // call doesn't "tail" IL prefix but is treated as explicit because of tail call stress PREFIX_TAILCALL = (PREFIX_TAILCALL_EXPLICIT | PREFIX_TAILCALL_IMPLICIT | PREFIX_TAILCALL_STRESS), PREFIX_VOLATILE = 0x00001000, PREFIX_UNALIGNED = 0x00010000, PREFIX_CONSTRAINED = 0x00100000, PREFIX_READONLY = 0x01000000 }; static void impValidateMemoryAccessOpcode(const BYTE* codeAddr, const BYTE* codeEndp, bool volatilePrefix); static OPCODE impGetNonPrefixOpcode(const BYTE* codeAddr, const BYTE* codeEndp); static bool impOpcodeIsCallOpcode(OPCODE opcode); public: void impInit(); void impImport(); CORINFO_CLASS_HANDLE impGetRefAnyClass(); CORINFO_CLASS_HANDLE impGetRuntimeArgumentHandle(); CORINFO_CLASS_HANDLE impGetTypeHandleClass(); CORINFO_CLASS_HANDLE impGetStringClass(); CORINFO_CLASS_HANDLE impGetObjectClass(); // Returns underlying type of handles returned by ldtoken instruction var_types GetRuntimeHandleUnderlyingType() { // RuntimeTypeHandle is backed by raw pointer on CoreRT and by object reference on other runtimes return IsTargetAbi(CORINFO_CORERT_ABI) ? TYP_I_IMPL : TYP_REF; } void impDevirtualizeCall(GenTreeCall* call, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_METHOD_HANDLE* method, unsigned* methodFlags, CORINFO_CONTEXT_HANDLE* contextHandle, CORINFO_CONTEXT_HANDLE* exactContextHandle, bool isLateDevirtualization, bool isExplicitTailCall, IL_OFFSET ilOffset = BAD_IL_OFFSET); //========================================================================= // PROTECTED //========================================================================= protected: //-------------------- Stack manipulation --------------------------------- unsigned impStkSize; // Size of the full stack #define SMALL_STACK_SIZE 16 // number of elements in impSmallStack struct SavedStack // used to save/restore stack contents. { unsigned ssDepth; // number of values on stack StackEntry* ssTrees; // saved tree values }; bool impIsPrimitive(CorInfoType type); bool impILConsumesAddr(const BYTE* codeAddr); void impResolveToken(const BYTE* addr, CORINFO_RESOLVED_TOKEN* pResolvedToken, CorInfoTokenKind kind); void impPushOnStack(GenTree* tree, typeInfo ti); void impPushNullObjRefOnStack(); StackEntry impPopStack(); StackEntry& impStackTop(unsigned n = 0); unsigned impStackHeight(); void impSaveStackState(SavedStack* savePtr, bool copy); void impRestoreStackState(SavedStack* savePtr); GenTree* impImportLdvirtftn(GenTree* thisPtr, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo); int impBoxPatternMatch(CORINFO_RESOLVED_TOKEN* pResolvedToken, const BYTE* codeAddr, const BYTE* codeEndp, bool makeInlineObservation = false); void impImportAndPushBox(CORINFO_RESOLVED_TOKEN* pResolvedToken); void impImportNewObjArray(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo); bool impCanPInvokeInline(); bool impCanPInvokeInlineCallSite(BasicBlock* block); void impCheckForPInvokeCall( GenTreeCall* call, CORINFO_METHOD_HANDLE methHnd, CORINFO_SIG_INFO* sig, unsigned mflags, BasicBlock* block); GenTreeCall* impImportIndirectCall(CORINFO_SIG_INFO* sig, const DebugInfo& di = DebugInfo()); void impPopArgsForUnmanagedCall(GenTree* call, CORINFO_SIG_INFO* sig); void impInsertHelperCall(CORINFO_HELPER_DESC* helperCall); void impHandleAccessAllowed(CorInfoIsAccessAllowedResult result, CORINFO_HELPER_DESC* helperCall); void impHandleAccessAllowedInternal(CorInfoIsAccessAllowedResult result, CORINFO_HELPER_DESC* helperCall); var_types impImportCall(OPCODE opcode, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, // Is this a "constrained." call on a // type parameter? GenTree* newobjThis, int prefixFlags, CORINFO_CALL_INFO* callInfo, IL_OFFSET rawILOffset); CORINFO_CLASS_HANDLE impGetSpecialIntrinsicExactReturnType(CORINFO_METHOD_HANDLE specialIntrinsicHandle); bool impMethodInfo_hasRetBuffArg(CORINFO_METHOD_INFO* methInfo, CorInfoCallConvExtension callConv); GenTree* impFixupCallStructReturn(GenTreeCall* call, CORINFO_CLASS_HANDLE retClsHnd); GenTree* impFixupStructReturnType(GenTree* op, CORINFO_CLASS_HANDLE retClsHnd, CorInfoCallConvExtension unmgdCallConv); #ifdef DEBUG var_types impImportJitTestLabelMark(int numArgs); #endif // DEBUG GenTree* impInitClass(CORINFO_RESOLVED_TOKEN* pResolvedToken); GenTree* impImportStaticReadOnlyField(void* fldAddr, var_types lclTyp); GenTree* impImportStaticFieldAccess(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_ACCESS_FLAGS access, CORINFO_FIELD_INFO* pFieldInfo, var_types lclTyp); static void impBashVarAddrsToI(GenTree* tree1, GenTree* tree2 = nullptr); GenTree* impImplicitIorI4Cast(GenTree* tree, var_types dstTyp); GenTree* impImplicitR4orR8Cast(GenTree* tree, var_types dstTyp); void impImportLeave(BasicBlock* block); void impResetLeaveBlock(BasicBlock* block, unsigned jmpAddr); GenTree* impTypeIsAssignable(GenTree* typeTo, GenTree* typeFrom); // Mirrors StringComparison.cs enum StringComparison { Ordinal = 4, OrdinalIgnoreCase = 5 }; enum StringComparisonJoint { Eq, // (d1 == cns1) && (s2 == cns2) Xor, // (d1 ^ cns1) | (s2 ^ cns2) }; GenTree* impStringEqualsOrStartsWith(bool startsWith, CORINFO_SIG_INFO* sig, unsigned methodFlags); GenTree* impSpanEqualsOrStartsWith(bool startsWith, CORINFO_SIG_INFO* sig, unsigned methodFlags); GenTree* impExpandHalfConstEquals(GenTreeLclVar* data, GenTree* lengthFld, bool checkForNull, bool startsWith, WCHAR* cnsData, int len, int dataOffset, StringComparison cmpMode); GenTree* impCreateCompareInd(GenTreeLclVar* obj, var_types type, ssize_t offset, ssize_t value, StringComparison ignoreCase, StringComparisonJoint joint = Eq); GenTree* impExpandHalfConstEqualsSWAR( GenTreeLclVar* data, WCHAR* cns, int len, int dataOffset, StringComparison cmpMode); GenTree* impExpandHalfConstEqualsSIMD( GenTreeLclVar* data, WCHAR* cns, int len, int dataOffset, StringComparison cmpMode); GenTreeStrCon* impGetStrConFromSpan(GenTree* span); GenTree* impIntrinsic(GenTree* newobjThis, CORINFO_CLASS_HANDLE clsHnd, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, unsigned methodFlags, int memberRef, bool readonlyCall, bool tailCall, CORINFO_RESOLVED_TOKEN* pContstrainedResolvedToken, CORINFO_THIS_TRANSFORM constraintCallThisTransform, NamedIntrinsic* pIntrinsicName, bool* isSpecialIntrinsic = nullptr); GenTree* impMathIntrinsic(CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, var_types callType, NamedIntrinsic intrinsicName, bool tailCall); NamedIntrinsic lookupNamedIntrinsic(CORINFO_METHOD_HANDLE method); GenTree* impUnsupportedNamedIntrinsic(unsigned helper, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, bool mustExpand); #ifdef FEATURE_HW_INTRINSICS GenTree* impHWIntrinsic(NamedIntrinsic intrinsic, CORINFO_CLASS_HANDLE clsHnd, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, bool mustExpand); GenTree* impSimdAsHWIntrinsic(NamedIntrinsic intrinsic, CORINFO_CLASS_HANDLE clsHnd, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, GenTree* newobjThis); protected: bool compSupportsHWIntrinsic(CORINFO_InstructionSet isa); GenTree* impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic, CORINFO_CLASS_HANDLE clsHnd, CORINFO_SIG_INFO* sig, var_types retType, CorInfoType simdBaseJitType, unsigned simdSize, GenTree* newobjThis); GenTree* impSpecialIntrinsic(NamedIntrinsic intrinsic, CORINFO_CLASS_HANDLE clsHnd, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, CorInfoType simdBaseJitType, var_types retType, unsigned simdSize); GenTree* getArgForHWIntrinsic(var_types argType, CORINFO_CLASS_HANDLE argClass, bool expectAddr = false, GenTree* newobjThis = nullptr); GenTree* impNonConstFallback(NamedIntrinsic intrinsic, var_types simdType, CorInfoType simdBaseJitType); GenTree* addRangeCheckIfNeeded( NamedIntrinsic intrinsic, GenTree* immOp, bool mustExpand, int immLowerBound, int immUpperBound); GenTree* addRangeCheckForHWIntrinsic(GenTree* immOp, int immLowerBound, int immUpperBound); #ifdef TARGET_XARCH GenTree* impBaseIntrinsic(NamedIntrinsic intrinsic, CORINFO_CLASS_HANDLE clsHnd, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, CorInfoType simdBaseJitType, var_types retType, unsigned simdSize); GenTree* impSSEIntrinsic(NamedIntrinsic intrinsic, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig); GenTree* impSSE2Intrinsic(NamedIntrinsic intrinsic, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig); GenTree* impAvxOrAvx2Intrinsic(NamedIntrinsic intrinsic, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig); GenTree* impBMI1OrBMI2Intrinsic(NamedIntrinsic intrinsic, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig); #endif // TARGET_XARCH #endif // FEATURE_HW_INTRINSICS GenTree* impArrayAccessIntrinsic(CORINFO_CLASS_HANDLE clsHnd, CORINFO_SIG_INFO* sig, int memberRef, bool readonlyCall, NamedIntrinsic intrinsicName); GenTree* impInitializeArrayIntrinsic(CORINFO_SIG_INFO* sig); GenTree* impCreateSpanIntrinsic(CORINFO_SIG_INFO* sig); GenTree* impKeepAliveIntrinsic(GenTree* objToKeepAlive); GenTree* impMethodPointer(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo); GenTree* impTransformThis(GenTree* thisPtr, CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, CORINFO_THIS_TRANSFORM transform); //----------------- Manipulating the trees and stmts ---------------------- Statement* impStmtList; // Statements for the BB being imported. Statement* impLastStmt; // The last statement for the current BB. public: enum { CHECK_SPILL_ALL = -1, CHECK_SPILL_NONE = -2 }; void impBeginTreeList(); void impEndTreeList(BasicBlock* block, Statement* firstStmt, Statement* lastStmt); void impEndTreeList(BasicBlock* block); void impAppendStmtCheck(Statement* stmt, unsigned chkLevel); void impAppendStmt(Statement* stmt, unsigned chkLevel, bool checkConsumedDebugInfo = true); void impAppendStmt(Statement* stmt); void impInsertStmtBefore(Statement* stmt, Statement* stmtBefore); Statement* impAppendTree(GenTree* tree, unsigned chkLevel, const DebugInfo& di, bool checkConsumedDebugInfo = true); void impInsertTreeBefore(GenTree* tree, const DebugInfo& di, Statement* stmtBefore); void impAssignTempGen(unsigned tmp, GenTree* val, unsigned curLevel = (unsigned)CHECK_SPILL_NONE, Statement** pAfterStmt = nullptr, const DebugInfo& di = DebugInfo(), BasicBlock* block = nullptr); void impAssignTempGen(unsigned tmpNum, GenTree* val, CORINFO_CLASS_HANDLE structHnd, unsigned curLevel, Statement** pAfterStmt = nullptr, const DebugInfo& di = DebugInfo(), BasicBlock* block = nullptr); Statement* impExtractLastStmt(); GenTree* impCloneExpr(GenTree* tree, GenTree** clone, CORINFO_CLASS_HANDLE structHnd, unsigned curLevel, Statement** pAfterStmt DEBUGARG(const char* reason)); GenTree* impAssignStruct(GenTree* dest, GenTree* src, CORINFO_CLASS_HANDLE structHnd, unsigned curLevel, Statement** pAfterStmt = nullptr, const DebugInfo& di = DebugInfo(), BasicBlock* block = nullptr); GenTree* impAssignStructPtr(GenTree* dest, GenTree* src, CORINFO_CLASS_HANDLE structHnd, unsigned curLevel, Statement** pAfterStmt = nullptr, const DebugInfo& di = DebugInfo(), BasicBlock* block = nullptr); GenTree* impGetStructAddr(GenTree* structVal, CORINFO_CLASS_HANDLE structHnd, unsigned curLevel, bool willDeref); var_types impNormStructType(CORINFO_CLASS_HANDLE structHnd, CorInfoType* simdBaseJitType = nullptr); GenTree* impNormStructVal(GenTree* structVal, CORINFO_CLASS_HANDLE structHnd, unsigned curLevel, bool forceNormalization = false); GenTree* impTokenToHandle(CORINFO_RESOLVED_TOKEN* pResolvedToken, bool* pRuntimeLookup = nullptr, bool mustRestoreHandle = false, bool importParent = false); GenTree* impParentClassTokenToHandle(CORINFO_RESOLVED_TOKEN* pResolvedToken, bool* pRuntimeLookup = nullptr, bool mustRestoreHandle = false) { return impTokenToHandle(pResolvedToken, pRuntimeLookup, mustRestoreHandle, true); } GenTree* impLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_LOOKUP* pLookup, GenTreeFlags flags, void* compileTimeHandle); GenTree* getRuntimeContextTree(CORINFO_RUNTIME_LOOKUP_KIND kind); GenTree* impRuntimeLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_LOOKUP* pLookup, void* compileTimeHandle); GenTree* impReadyToRunLookupToTree(CORINFO_CONST_LOOKUP* pLookup, GenTreeFlags flags, void* compileTimeHandle); GenTreeCall* impReadyToRunHelperToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken, CorInfoHelpFunc helper, var_types type, GenTreeCall::Use* args = nullptr, CORINFO_LOOKUP_KIND* pGenericLookupKind = nullptr); bool impIsCastHelperEligibleForClassProbe(GenTree* tree); bool impIsCastHelperMayHaveProfileData(GenTree* tree); GenTree* impCastClassOrIsInstToTree( GenTree* op1, GenTree* op2, CORINFO_RESOLVED_TOKEN* pResolvedToken, bool isCastClass, IL_OFFSET ilOffset); GenTree* impOptimizeCastClassOrIsInst(GenTree* op1, CORINFO_RESOLVED_TOKEN* pResolvedToken, bool isCastClass); bool VarTypeIsMultiByteAndCanEnreg(var_types type, CORINFO_CLASS_HANDLE typeClass, unsigned* typeSize, bool forReturn, bool isVarArg, CorInfoCallConvExtension callConv); bool IsIntrinsicImplementedByUserCall(NamedIntrinsic intrinsicName); bool IsTargetIntrinsic(NamedIntrinsic intrinsicName); bool IsMathIntrinsic(NamedIntrinsic intrinsicName); bool IsMathIntrinsic(GenTree* tree); private: //----------------- Importing the method ---------------------------------- CORINFO_CONTEXT_HANDLE impTokenLookupContextHandle; // The context used for looking up tokens. #ifdef DEBUG unsigned impCurOpcOffs; const char* impCurOpcName; bool impNestedStackSpill; // For displaying instrs with generated native code (-n:B) Statement* impLastILoffsStmt; // oldest stmt added for which we did not call SetLastILOffset(). void impNoteLastILoffs(); #endif // Debug info of current statement being imported. It gets set to contain // no IL location (!impCurStmtDI.GetLocation().IsValid) after it has been // set in the appended trees. Then it gets updated at IL instructions for // which we have to report mapping info. // It will always contain the current inline context. DebugInfo impCurStmtDI; DebugInfo impCreateDIWithCurrentStackInfo(IL_OFFSET offs, bool isCall); void impCurStmtOffsSet(IL_OFFSET offs); void impNoteBranchOffs(); unsigned impInitBlockLineInfo(); bool impIsThis(GenTree* obj); bool impIsLDFTN_TOKEN(const BYTE* delegateCreateStart, const BYTE* newobjCodeAddr); bool impIsDUP_LDVIRTFTN_TOKEN(const BYTE* delegateCreateStart, const BYTE* newobjCodeAddr); bool impIsAnySTLOC(OPCODE opcode) { return ((opcode == CEE_STLOC) || (opcode == CEE_STLOC_S) || ((opcode >= CEE_STLOC_0) && (opcode <= CEE_STLOC_3))); } GenTreeCall::Use* impPopCallArgs(unsigned count, CORINFO_SIG_INFO* sig, GenTreeCall::Use* prefixArgs = nullptr); bool impCheckImplicitArgumentCoercion(var_types sigType, var_types nodeType) const; GenTreeCall::Use* impPopReverseCallArgs(unsigned count, CORINFO_SIG_INFO* sig, unsigned skipReverseCount = 0); //---------------- Spilling the importer stack ---------------------------- // The maximum number of bytes of IL processed without clean stack state. // It allows to limit the maximum tree size and depth. static const unsigned MAX_TREE_SIZE = 200; bool impCanSpillNow(OPCODE prevOpcode); struct PendingDsc { PendingDsc* pdNext; BasicBlock* pdBB; SavedStack pdSavedStack; ThisInitState pdThisPtrInit; }; PendingDsc* impPendingList; // list of BBs currently waiting to be imported. PendingDsc* impPendingFree; // Freed up dscs that can be reused // We keep a byte-per-block map (dynamically extended) in the top-level Compiler object of a compilation. JitExpandArray<BYTE> impPendingBlockMembers; // Return the byte for "b" (allocating/extending impPendingBlockMembers if necessary.) // Operates on the map in the top-level ancestor. BYTE impGetPendingBlockMember(BasicBlock* blk) { return impInlineRoot()->impPendingBlockMembers.Get(blk->bbInd()); } // Set the byte for "b" to "val" (allocating/extending impPendingBlockMembers if necessary.) // Operates on the map in the top-level ancestor. void impSetPendingBlockMember(BasicBlock* blk, BYTE val) { impInlineRoot()->impPendingBlockMembers.Set(blk->bbInd(), val); } bool impCanReimport; bool impSpillStackEntry(unsigned level, unsigned varNum #ifdef DEBUG , bool bAssertOnRecursion, const char* reason #endif ); void impSpillStackEnsure(bool spillLeaves = false); void impEvalSideEffects(); void impSpillSpecialSideEff(); void impSpillSideEffects(bool spillGlobEffects, unsigned chkLevel DEBUGARG(const char* reason)); void impSpillValueClasses(); void impSpillEvalStack(); static fgWalkPreFn impFindValueClasses; void impSpillLclRefs(ssize_t lclNum); BasicBlock* impPushCatchArgOnStack(BasicBlock* hndBlk, CORINFO_CLASS_HANDLE clsHnd, bool isSingleBlockFilter); bool impBlockIsInALoop(BasicBlock* block); void impImportBlockCode(BasicBlock* block); void impReimportMarkBlock(BasicBlock* block); void impReimportMarkSuccessors(BasicBlock* block); void impVerifyEHBlock(BasicBlock* block, bool isTryStart); void impImportBlockPending(BasicBlock* block); // Similar to impImportBlockPending, but assumes that block has already been imported once and is being // reimported for some reason. It specifically does *not* look at verCurrentState to set the EntryState // for the block, but instead, just re-uses the block's existing EntryState. void impReimportBlockPending(BasicBlock* block); var_types impGetByRefResultType(genTreeOps oper, bool fUnsigned, GenTree** pOp1, GenTree** pOp2); void impImportBlock(BasicBlock* block); // Assumes that "block" is a basic block that completes with a non-empty stack. We will assign the values // on the stack to local variables (the "spill temp" variables). The successor blocks will assume that // its incoming stack contents are in those locals. This requires "block" and its successors to agree on // the variables that will be used -- and for all the predecessors of those successors, and the // successors of those predecessors, etc. Call such a set of blocks closed under alternating // successor/predecessor edges a "spill clique." A block is a "predecessor" or "successor" member of the // clique (or, conceivably, both). Each block has a specified sequence of incoming and outgoing spill // temps. If "block" already has its outgoing spill temps assigned (they are always a contiguous series // of local variable numbers, so we represent them with the base local variable number), returns that. // Otherwise, picks a set of spill temps, and propagates this choice to all blocks in the spill clique of // which "block" is a member (asserting, in debug mode, that no block in this clique had its spill temps // chosen already. More precisely, that the incoming or outgoing spill temps are not chosen, depending // on which kind of member of the clique the block is). unsigned impGetSpillTmpBase(BasicBlock* block); // Assumes that "block" is a basic block that completes with a non-empty stack. We have previously // assigned the values on the stack to local variables (the "spill temp" variables). The successor blocks // will assume that its incoming stack contents are in those locals. This requires "block" and its // successors to agree on the variables and their types that will be used. The CLI spec allows implicit // conversions between 'int' and 'native int' or 'float' and 'double' stack types. So one predecessor can // push an int and another can push a native int. For 64-bit we have chosen to implement this by typing // the "spill temp" as native int, and then importing (or re-importing as needed) so that all the // predecessors in the "spill clique" push a native int (sign-extending if needed), and all the // successors receive a native int. Similarly float and double are unified to double. // This routine is called after a type-mismatch is detected, and it will walk the spill clique to mark // blocks for re-importation as appropriate (both successors, so they get the right incoming type, and // predecessors, so they insert an upcast if needed). void impReimportSpillClique(BasicBlock* block); // When we compute a "spill clique" (see above) these byte-maps are allocated to have a byte per basic // block, and represent the predecessor and successor members of the clique currently being computed. // *** Access to these will need to be locked in a parallel compiler. JitExpandArray<BYTE> impSpillCliquePredMembers; JitExpandArray<BYTE> impSpillCliqueSuccMembers; enum SpillCliqueDir { SpillCliquePred, SpillCliqueSucc }; // Abstract class for receiving a callback while walking a spill clique class SpillCliqueWalker { public: virtual void Visit(SpillCliqueDir predOrSucc, BasicBlock* blk) = 0; }; // This class is used for setting the bbStkTempsIn and bbStkTempsOut on the blocks within a spill clique class SetSpillTempsBase : public SpillCliqueWalker { unsigned m_baseTmp; public: SetSpillTempsBase(unsigned baseTmp) : m_baseTmp(baseTmp) { } virtual void Visit(SpillCliqueDir predOrSucc, BasicBlock* blk); }; // This class is used for implementing impReimportSpillClique part on each block within the spill clique class ReimportSpillClique : public SpillCliqueWalker { Compiler* m_pComp; public: ReimportSpillClique(Compiler* pComp) : m_pComp(pComp) { } virtual void Visit(SpillCliqueDir predOrSucc, BasicBlock* blk); }; // This is the heart of the algorithm for walking spill cliques. It invokes callback->Visit for each // predecessor or successor within the spill clique void impWalkSpillCliqueFromPred(BasicBlock* pred, SpillCliqueWalker* callback); // For a BasicBlock that has already been imported, the EntryState has an array of GenTrees for the // incoming locals. This walks that list an resets the types of the GenTrees to match the types of // the VarDscs. They get out of sync when we have int/native int issues (see impReimportSpillClique). void impRetypeEntryStateTemps(BasicBlock* blk); BYTE impSpillCliqueGetMember(SpillCliqueDir predOrSucc, BasicBlock* blk); void impSpillCliqueSetMember(SpillCliqueDir predOrSucc, BasicBlock* blk, BYTE val); void impPushVar(GenTree* op, typeInfo tiRetVal); GenTreeLclVar* impCreateLocalNode(unsigned lclNum DEBUGARG(IL_OFFSET offset)); void impLoadVar(unsigned lclNum, IL_OFFSET offset, const typeInfo& tiRetVal); void impLoadVar(unsigned lclNum, IL_OFFSET offset) { impLoadVar(lclNum, offset, lvaGetDesc(lclNum)->lvVerTypeInfo); } void impLoadArg(unsigned ilArgNum, IL_OFFSET offset); void impLoadLoc(unsigned ilLclNum, IL_OFFSET offset); bool impReturnInstruction(int prefixFlags, OPCODE& opcode); #ifdef TARGET_ARM void impMarkLclDstNotPromotable(unsigned tmpNum, GenTree* op, CORINFO_CLASS_HANDLE hClass); #endif // A free list of linked list nodes used to represent to-do stacks of basic blocks. struct BlockListNode { BasicBlock* m_blk; BlockListNode* m_next; BlockListNode(BasicBlock* blk, BlockListNode* next = nullptr) : m_blk(blk), m_next(next) { } void* operator new(size_t sz, Compiler* comp); }; BlockListNode* impBlockListNodeFreeList; void FreeBlockListNode(BlockListNode* node); bool impIsValueType(typeInfo* pTypeInfo); var_types mangleVarArgsType(var_types type); regNumber getCallArgIntRegister(regNumber floatReg); regNumber getCallArgFloatRegister(regNumber intReg); #if defined(DEBUG) static unsigned jitTotalMethodCompiled; #endif #ifdef DEBUG static LONG jitNestingLevel; #endif // DEBUG static bool impIsAddressInLocal(const GenTree* tree, GenTree** lclVarTreeOut = nullptr); void impMakeDiscretionaryInlineObservations(InlineInfo* pInlineInfo, InlineResult* inlineResult); // STATIC inlining decision based on the IL code. void impCanInlineIL(CORINFO_METHOD_HANDLE fncHandle, CORINFO_METHOD_INFO* methInfo, bool forceInline, InlineResult* inlineResult); void impCheckCanInline(GenTreeCall* call, CORINFO_METHOD_HANDLE fncHandle, unsigned methAttr, CORINFO_CONTEXT_HANDLE exactContextHnd, InlineCandidateInfo** ppInlineCandidateInfo, InlineResult* inlineResult); void impInlineRecordArgInfo(InlineInfo* pInlineInfo, GenTree* curArgVal, unsigned argNum, InlineResult* inlineResult); void impInlineInitVars(InlineInfo* pInlineInfo); unsigned impInlineFetchLocal(unsigned lclNum DEBUGARG(const char* reason)); GenTree* impInlineFetchArg(unsigned lclNum, InlArgInfo* inlArgInfo, InlLclVarInfo* lclTypeInfo); bool impInlineIsThis(GenTree* tree, InlArgInfo* inlArgInfo); bool impInlineIsGuaranteedThisDerefBeforeAnySideEffects(GenTree* additionalTree, GenTreeCall::Use* additionalCallArgs, GenTree* dereferencedAddress, InlArgInfo* inlArgInfo); void impMarkInlineCandidate(GenTree* call, CORINFO_CONTEXT_HANDLE exactContextHnd, bool exactContextNeedsRuntimeLookup, CORINFO_CALL_INFO* callInfo); void impMarkInlineCandidateHelper(GenTreeCall* call, CORINFO_CONTEXT_HANDLE exactContextHnd, bool exactContextNeedsRuntimeLookup, CORINFO_CALL_INFO* callInfo); bool impTailCallRetTypeCompatible(bool allowWidening, var_types callerRetType, CORINFO_CLASS_HANDLE callerRetTypeClass, CorInfoCallConvExtension callerCallConv, var_types calleeRetType, CORINFO_CLASS_HANDLE calleeRetTypeClass, CorInfoCallConvExtension calleeCallConv); bool impIsTailCallILPattern( bool tailPrefixed, OPCODE curOpcode, const BYTE* codeAddrOfNextOpcode, const BYTE* codeEnd, bool isRecursive); bool impIsImplicitTailCallCandidate( OPCODE curOpcode, const BYTE* codeAddrOfNextOpcode, const BYTE* codeEnd, int prefixFlags, bool isRecursive); bool impIsClassExact(CORINFO_CLASS_HANDLE classHnd); bool impCanSkipCovariantStoreCheck(GenTree* value, GenTree* array); CORINFO_RESOLVED_TOKEN* impAllocateToken(const CORINFO_RESOLVED_TOKEN& token); /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX FlowGraph XX XX XX XX Info about the basic-blocks, their contents and the flow analysis XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ public: BasicBlock* fgFirstBB; // Beginning of the basic block list BasicBlock* fgLastBB; // End of the basic block list BasicBlock* fgFirstColdBlock; // First block to be placed in the cold section BasicBlock* fgEntryBB; // For OSR, the original method's entry point BasicBlock* fgOSREntryBB; // For OSR, the logical entry point (~ patchpoint) #if defined(FEATURE_EH_FUNCLETS) BasicBlock* fgFirstFuncletBB; // First block of outlined funclets (to allow block insertion before the funclets) #endif BasicBlock* fgFirstBBScratch; // Block inserted for initialization stuff. Is nullptr if no such block has been // created. BasicBlockList* fgReturnBlocks; // list of BBJ_RETURN blocks unsigned fgEdgeCount; // # of control flow edges between the BBs unsigned fgBBcount; // # of BBs in the method #ifdef DEBUG unsigned fgBBcountAtCodegen; // # of BBs in the method at the start of codegen #endif unsigned fgBBNumMax; // The max bbNum that has been assigned to basic blocks unsigned fgDomBBcount; // # of BBs for which we have dominator and reachability information BasicBlock** fgBBInvPostOrder; // The flow graph stored in an array sorted in topological order, needed to compute // dominance. Indexed by block number. Size: fgBBNumMax + 1. // After the dominance tree is computed, we cache a DFS preorder number and DFS postorder number to compute // dominance queries in O(1). fgDomTreePreOrder and fgDomTreePostOrder are arrays giving the block's preorder and // postorder number, respectively. The arrays are indexed by basic block number. (Note that blocks are numbered // starting from one. Thus, we always waste element zero. This makes debugging easier and makes the code less likely // to suffer from bugs stemming from forgetting to add or subtract one from the block number to form an array // index). The arrays are of size fgBBNumMax + 1. unsigned* fgDomTreePreOrder; unsigned* fgDomTreePostOrder; // Dominator tree used by SSA construction and copy propagation (the two are expected to use the same tree // in order to avoid the need for SSA reconstruction and an "out of SSA" phase). DomTreeNode* fgSsaDomTree; bool fgBBVarSetsInited; // Allocate array like T* a = new T[fgBBNumMax + 1]; // Using helper so we don't keep forgetting +1. template <typename T> T* fgAllocateTypeForEachBlk(CompMemKind cmk = CMK_Unknown) { return getAllocator(cmk).allocate<T>(fgBBNumMax + 1); } // BlockSets are relative to a specific set of BasicBlock numbers. If that changes // (if the blocks are renumbered), this changes. BlockSets from different epochs // cannot be meaningfully combined. Note that new blocks can be created with higher // block numbers without changing the basic block epoch. These blocks *cannot* // participate in a block set until the blocks are all renumbered, causing the epoch // to change. This is useful if continuing to use previous block sets is valuable. // If the epoch is zero, then it is uninitialized, and block sets can't be used. unsigned fgCurBBEpoch; unsigned GetCurBasicBlockEpoch() { return fgCurBBEpoch; } // The number of basic blocks in the current epoch. When the blocks are renumbered, // this is fgBBcount. As blocks are added, fgBBcount increases, fgCurBBEpochSize remains // the same, until a new BasicBlock epoch is created, such as when the blocks are all renumbered. unsigned fgCurBBEpochSize; // The number of "size_t" elements required to hold a bitset large enough for fgCurBBEpochSize // bits. This is precomputed to avoid doing math every time BasicBlockBitSetTraits::GetArrSize() is called. unsigned fgBBSetCountInSizeTUnits; void NewBasicBlockEpoch() { INDEBUG(unsigned oldEpochArrSize = fgBBSetCountInSizeTUnits); // We have a new epoch. Compute and cache the size needed for new BlockSets. fgCurBBEpoch++; fgCurBBEpochSize = fgBBNumMax + 1; fgBBSetCountInSizeTUnits = roundUp(fgCurBBEpochSize, (unsigned)(sizeof(size_t) * 8)) / unsigned(sizeof(size_t) * 8); #ifdef DEBUG // All BlockSet objects are now invalid! fgReachabilitySetsValid = false; // the bbReach sets are now invalid! fgEnterBlksSetValid = false; // the fgEnterBlks set is now invalid! if (verbose) { unsigned epochArrSize = BasicBlockBitSetTraits::GetArrSize(this, sizeof(size_t)); printf("\nNew BlockSet epoch %d, # of blocks (including unused BB00): %u, bitset array size: %u (%s)", fgCurBBEpoch, fgCurBBEpochSize, epochArrSize, (epochArrSize <= 1) ? "short" : "long"); if ((fgCurBBEpoch != 1) && ((oldEpochArrSize <= 1) != (epochArrSize <= 1))) { // If we're not just establishing the first epoch, and the epoch array size has changed such that we're // going to change our bitset representation from short (just a size_t bitset) to long (a pointer to an // array of size_t bitsets), then print that out. printf("; NOTE: BlockSet size was previously %s!", (oldEpochArrSize <= 1) ? "short" : "long"); } printf("\n"); } #endif // DEBUG } void EnsureBasicBlockEpoch() { if (fgCurBBEpochSize != fgBBNumMax + 1) { NewBasicBlockEpoch(); } } BasicBlock* fgNewBasicBlock(BBjumpKinds jumpKind); void fgEnsureFirstBBisScratch(); bool fgFirstBBisScratch(); bool fgBBisScratch(BasicBlock* block); void fgExtendEHRegionBefore(BasicBlock* block); void fgExtendEHRegionAfter(BasicBlock* block); BasicBlock* fgNewBBbefore(BBjumpKinds jumpKind, BasicBlock* block, bool extendRegion); BasicBlock* fgNewBBafter(BBjumpKinds jumpKind, BasicBlock* block, bool extendRegion); BasicBlock* fgNewBBinRegion(BBjumpKinds jumpKind, unsigned tryIndex, unsigned hndIndex, BasicBlock* nearBlk, bool putInFilter = false, bool runRarely = false, bool insertAtEnd = false); BasicBlock* fgNewBBinRegion(BBjumpKinds jumpKind, BasicBlock* srcBlk, bool runRarely = false, bool insertAtEnd = false); BasicBlock* fgNewBBinRegion(BBjumpKinds jumpKind); BasicBlock* fgNewBBinRegionWorker(BBjumpKinds jumpKind, BasicBlock* afterBlk, unsigned xcptnIndex, bool putInTryRegion); void fgInsertBBbefore(BasicBlock* insertBeforeBlk, BasicBlock* newBlk); void fgInsertBBafter(BasicBlock* insertAfterBlk, BasicBlock* newBlk); void fgUnlinkBlock(BasicBlock* block); #ifdef FEATURE_JIT_METHOD_PERF unsigned fgMeasureIR(); #endif // FEATURE_JIT_METHOD_PERF bool fgModified; // True if the flow graph has been modified recently bool fgComputePredsDone; // Have we computed the bbPreds list bool fgCheapPredsValid; // Is the bbCheapPreds list valid? bool fgDomsComputed; // Have we computed the dominator sets? bool fgReturnBlocksComputed; // Have we computed the return blocks list? bool fgOptimizedFinally; // Did we optimize any try-finallys? bool fgHasSwitch; // any BBJ_SWITCH jumps? BlockSet fgEnterBlks; // Set of blocks which have a special transfer of control; the "entry" blocks plus EH handler // begin blocks. #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) BlockSet fgAlwaysBlks; // Set of blocks which are BBJ_ALWAYS part of BBJ_CALLFINALLY/BBJ_ALWAYS pair that should // never be removed due to a requirement to use the BBJ_ALWAYS for generating code and // not have "retless" blocks. #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) #ifdef DEBUG bool fgReachabilitySetsValid; // Are the bbReach sets valid? bool fgEnterBlksSetValid; // Is the fgEnterBlks set valid? #endif // DEBUG bool fgRemoveRestOfBlock; // true if we know that we will throw bool fgStmtRemoved; // true if we remove statements -> need new DFA // There are two modes for ordering of the trees. // - In FGOrderTree, the dominant ordering is the tree order, and the nodes contained in // each tree and sub-tree are contiguous, and can be traversed (in gtNext/gtPrev order) // by traversing the tree according to the order of the operands. // - In FGOrderLinear, the dominant ordering is the linear order. enum FlowGraphOrder { FGOrderTree, FGOrderLinear }; FlowGraphOrder fgOrder; // The following are boolean flags that keep track of the state of internal data structures bool fgStmtListThreaded; // true if the node list is now threaded bool fgCanRelocateEHRegions; // true if we are allowed to relocate the EH regions bool fgEdgeWeightsComputed; // true after we have called fgComputeEdgeWeights bool fgHaveValidEdgeWeights; // true if we were successful in computing all of the edge weights bool fgSlopUsedInEdgeWeights; // true if their was some slop used when computing the edge weights bool fgRangeUsedInEdgeWeights; // true if some of the edgeWeight are expressed in Min..Max form bool fgNeedsUpdateFlowGraph; // true if we need to run fgUpdateFlowGraph weight_t fgCalledCount; // count of the number of times this method was called // This is derived from the profile data // or is BB_UNITY_WEIGHT when we don't have profile data #if defined(FEATURE_EH_FUNCLETS) bool fgFuncletsCreated; // true if the funclet creation phase has been run #endif // FEATURE_EH_FUNCLETS bool fgGlobalMorph; // indicates if we are during the global morphing phase // since fgMorphTree can be called from several places bool impBoxTempInUse; // the temp below is valid and available unsigned impBoxTemp; // a temporary that is used for boxing #ifdef DEBUG bool jitFallbackCompile; // Are we doing a fallback compile? That is, have we executed a NO_WAY assert, // and we are trying to compile again in a "safer", minopts mode? #endif #if defined(DEBUG) unsigned impInlinedCodeSize; bool fgPrintInlinedMethods; #endif jitstd::vector<flowList*>* fgPredListSortVector; //------------------------------------------------------------------------- void fgInit(); PhaseStatus fgImport(); PhaseStatus fgTransformIndirectCalls(); PhaseStatus fgTransformPatchpoints(); PhaseStatus fgInline(); PhaseStatus fgRemoveEmptyTry(); PhaseStatus fgRemoveEmptyFinally(); PhaseStatus fgMergeFinallyChains(); PhaseStatus fgCloneFinally(); void fgCleanupContinuation(BasicBlock* continuation); #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) PhaseStatus fgUpdateFinallyTargetFlags(); void fgClearAllFinallyTargetBits(); void fgAddFinallyTargetFlags(); #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) PhaseStatus fgTailMergeThrows(); void fgTailMergeThrowsFallThroughHelper(BasicBlock* predBlock, BasicBlock* nonCanonicalBlock, BasicBlock* canonicalBlock, flowList* predEdge); void fgTailMergeThrowsJumpToHelper(BasicBlock* predBlock, BasicBlock* nonCanonicalBlock, BasicBlock* canonicalBlock, flowList* predEdge); GenTree* fgCheckCallArgUpdate(GenTree* parent, GenTree* child, var_types origType); #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) // Sometimes we need to defer updating the BBF_FINALLY_TARGET bit. fgNeedToAddFinallyTargetBits signals // when this is necessary. bool fgNeedToAddFinallyTargetBits; #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) bool fgRetargetBranchesToCanonicalCallFinally(BasicBlock* block, BasicBlock* handler, BlockToBlockMap& continuationMap); GenTree* fgGetCritSectOfStaticMethod(); #if defined(FEATURE_EH_FUNCLETS) void fgAddSyncMethodEnterExit(); GenTree* fgCreateMonitorTree(unsigned lvaMonitorBool, unsigned lvaThisVar, BasicBlock* block, bool enter); void fgConvertSyncReturnToLeave(BasicBlock* block); #endif // FEATURE_EH_FUNCLETS void fgAddReversePInvokeEnterExit(); bool fgMoreThanOneReturnBlock(); // The number of separate return points in the method. unsigned fgReturnCount; void fgAddInternal(); enum class FoldResult { FOLD_DID_NOTHING, FOLD_CHANGED_CONTROL_FLOW, FOLD_REMOVED_LAST_STMT, FOLD_ALTERED_LAST_STMT, }; FoldResult fgFoldConditional(BasicBlock* block); void fgMorphStmts(BasicBlock* block); void fgMorphBlocks(); void fgMergeBlockReturn(BasicBlock* block); bool fgMorphBlockStmt(BasicBlock* block, Statement* stmt DEBUGARG(const char* msg)); void fgSetOptions(); #ifdef DEBUG static fgWalkPreFn fgAssertNoQmark; void fgPreExpandQmarkChecks(GenTree* expr); void fgPostExpandQmarkChecks(); static void fgCheckQmarkAllowedForm(GenTree* tree); #endif IL_OFFSET fgFindBlockILOffset(BasicBlock* block); void fgFixEntryFlowForOSR(); BasicBlock* fgSplitBlockAtBeginning(BasicBlock* curr); BasicBlock* fgSplitBlockAtEnd(BasicBlock* curr); BasicBlock* fgSplitBlockAfterStatement(BasicBlock* curr, Statement* stmt); BasicBlock* fgSplitBlockAfterNode(BasicBlock* curr, GenTree* node); // for LIR BasicBlock* fgSplitEdge(BasicBlock* curr, BasicBlock* succ); Statement* fgNewStmtFromTree(GenTree* tree, BasicBlock* block, const DebugInfo& di); Statement* fgNewStmtFromTree(GenTree* tree); Statement* fgNewStmtFromTree(GenTree* tree, BasicBlock* block); Statement* fgNewStmtFromTree(GenTree* tree, const DebugInfo& di); GenTree* fgGetTopLevelQmark(GenTree* expr, GenTree** ppDst = nullptr); void fgExpandQmarkForCastInstOf(BasicBlock* block, Statement* stmt); void fgExpandQmarkStmt(BasicBlock* block, Statement* stmt); void fgExpandQmarkNodes(); // Do "simple lowering." This functionality is (conceptually) part of "general" // lowering that is distributed between fgMorph and the lowering phase of LSRA. void fgSimpleLowering(); GenTree* fgInitThisClass(); GenTreeCall* fgGetStaticsCCtorHelper(CORINFO_CLASS_HANDLE cls, CorInfoHelpFunc helper); GenTreeCall* fgGetSharedCCtor(CORINFO_CLASS_HANDLE cls); bool backendRequiresLocalVarLifetimes() { return !opts.MinOpts() || m_pLinearScan->willEnregisterLocalVars(); } void fgLocalVarLiveness(); void fgLocalVarLivenessInit(); void fgPerNodeLocalVarLiveness(GenTree* node); void fgPerBlockLocalVarLiveness(); VARSET_VALRET_TP fgGetHandlerLiveVars(BasicBlock* block); void fgLiveVarAnalysis(bool updateInternalOnly = false); void fgComputeLifeCall(VARSET_TP& life, GenTreeCall* call); void fgComputeLifeTrackedLocalUse(VARSET_TP& life, LclVarDsc& varDsc, GenTreeLclVarCommon* node); bool fgComputeLifeTrackedLocalDef(VARSET_TP& life, VARSET_VALARG_TP keepAliveVars, LclVarDsc& varDsc, GenTreeLclVarCommon* node); bool fgComputeLifeUntrackedLocal(VARSET_TP& life, VARSET_VALARG_TP keepAliveVars, LclVarDsc& varDsc, GenTreeLclVarCommon* lclVarNode); bool fgComputeLifeLocal(VARSET_TP& life, VARSET_VALARG_TP keepAliveVars, GenTree* lclVarNode); void fgComputeLife(VARSET_TP& life, GenTree* startNode, GenTree* endNode, VARSET_VALARG_TP volatileVars, bool* pStmtInfoDirty DEBUGARG(bool* treeModf)); void fgComputeLifeLIR(VARSET_TP& life, BasicBlock* block, VARSET_VALARG_TP volatileVars); bool fgTryRemoveNonLocal(GenTree* node, LIR::Range* blockRange); void fgRemoveDeadStoreLIR(GenTree* store, BasicBlock* block); bool fgRemoveDeadStore(GenTree** pTree, LclVarDsc* varDsc, VARSET_VALARG_TP life, bool* doAgain, bool* pStmtInfoDirty, bool* pStoreRemoved DEBUGARG(bool* treeModf)); void fgInterBlockLocalVarLiveness(); // Blocks: convenience methods for enabling range-based `for` iteration over the function's blocks, e.g.: // 1. for (BasicBlock* const block : compiler->Blocks()) ... // 2. for (BasicBlock* const block : compiler->Blocks(startBlock)) ... // 3. for (BasicBlock* const block : compiler->Blocks(startBlock, endBlock)) ... // In case (1), the block list can be empty. In case (2), `startBlock` can be nullptr. In case (3), // both `startBlock` and `endBlock` must be non-null. // BasicBlockSimpleList Blocks() const { return BasicBlockSimpleList(fgFirstBB); } BasicBlockSimpleList Blocks(BasicBlock* startBlock) const { return BasicBlockSimpleList(startBlock); } BasicBlockRangeList Blocks(BasicBlock* startBlock, BasicBlock* endBlock) const { return BasicBlockRangeList(startBlock, endBlock); } // The presence of a partial definition presents some difficulties for SSA: this is both a use of some SSA name // of "x", and a def of a new SSA name for "x". The tree only has one local variable for "x", so it has to choose // whether to treat that as the use or def. It chooses the "use", and thus the old SSA name. This map allows us // to record/recover the "def" SSA number, given the lcl var node for "x" in such a tree. typedef JitHashTable<GenTree*, JitPtrKeyFuncs<GenTree>, unsigned> NodeToUnsignedMap; NodeToUnsignedMap* m_opAsgnVarDefSsaNums; NodeToUnsignedMap* GetOpAsgnVarDefSsaNums() { if (m_opAsgnVarDefSsaNums == nullptr) { m_opAsgnVarDefSsaNums = new (getAllocator()) NodeToUnsignedMap(getAllocator()); } return m_opAsgnVarDefSsaNums; } // This map tracks nodes whose value numbers explicitly or implicitly depend on memory states. // The map provides the entry block of the most closely enclosing loop that // defines the memory region accessed when defining the nodes's VN. // // This information should be consulted when considering hoisting node out of a loop, as the VN // for the node will only be valid within the indicated loop. // // It is not fine-grained enough to track memory dependence within loops, so cannot be used // for more general code motion. // // If a node does not have an entry in the map we currently assume the VN is not memory dependent // and so memory does not constrain hoisting. // typedef JitHashTable<GenTree*, JitPtrKeyFuncs<GenTree>, BasicBlock*> NodeToLoopMemoryBlockMap; NodeToLoopMemoryBlockMap* m_nodeToLoopMemoryBlockMap; NodeToLoopMemoryBlockMap* GetNodeToLoopMemoryBlockMap() { if (m_nodeToLoopMemoryBlockMap == nullptr) { m_nodeToLoopMemoryBlockMap = new (getAllocator()) NodeToLoopMemoryBlockMap(getAllocator()); } return m_nodeToLoopMemoryBlockMap; } void optRecordLoopMemoryDependence(GenTree* tree, BasicBlock* block, ValueNum memoryVN); void optCopyLoopMemoryDependence(GenTree* fromTree, GenTree* toTree); // Requires value numbering phase to have completed. Returns the value number ("gtVN") of the // "tree," EXCEPT in the case of GTF_VAR_USEASG, because the tree node's gtVN member is the // "use" VN. Performs a lookup into the map of (use asg tree -> def VN.) to return the "def's" // VN. inline ValueNum GetUseAsgDefVNOrTreeVN(GenTree* tree); // Requires that "lcl" has the GTF_VAR_DEF flag set. Returns the SSA number of "lcl". // Except: assumes that lcl is a def, and if it is // a partial def (GTF_VAR_USEASG), looks up and returns the SSA number for the "def", // rather than the "use" SSA number recorded in the tree "lcl". inline unsigned GetSsaNumForLocalVarDef(GenTree* lcl); inline bool PreciseRefCountsRequired(); // Performs SSA conversion. void fgSsaBuild(); // Reset any data structures to the state expected by "fgSsaBuild", so it can be run again. void fgResetForSsa(); unsigned fgSsaPassesCompleted; // Number of times fgSsaBuild has been run. // Returns "true" if this is a special variable that is never zero initialized in the prolog. inline bool fgVarIsNeverZeroInitializedInProlog(unsigned varNum); // Returns "true" if the variable needs explicit zero initialization. inline bool fgVarNeedsExplicitZeroInit(unsigned varNum, bool bbInALoop, bool bbIsReturn); // The value numbers for this compilation. ValueNumStore* vnStore; public: ValueNumStore* GetValueNumStore() { return vnStore; } // Do value numbering (assign a value number to each // tree node). void fgValueNumber(); // Computes new GcHeap VN via the assignment H[elemTypeEq][arrVN][inx][fldSeq] = rhsVN. // Assumes that "elemTypeEq" is the (equivalence class rep) of the array element type. // The 'indType' is the indirection type of the lhs of the assignment and will typically // match the element type of the array or fldSeq. When this type doesn't match // or if the fldSeq is 'NotAField' we invalidate the array contents H[elemTypeEq][arrVN] // ValueNum fgValueNumberArrIndexAssign(CORINFO_CLASS_HANDLE elemTypeEq, ValueNum arrVN, ValueNum inxVN, FieldSeqNode* fldSeq, ValueNum rhsVN, var_types indType); // Requires that "tree" is a GT_IND marked as an array index, and that its address argument // has been parsed to yield the other input arguments. If evaluation of the address // can raise exceptions, those should be captured in the exception set "addrXvnp". // Assumes that "elemTypeEq" is the (equivalence class rep) of the array element type. // Marks "tree" with the VN for H[elemTypeEq][arrVN][inx][fldSeq] (for the liberal VN; a new unique // VN for the conservative VN.) Also marks the tree's argument as the address of an array element. // The type tree->TypeGet() will typically match the element type of the array or fldSeq. // When this type doesn't match or if the fldSeq is 'NotAField' we return a new unique VN // ValueNum fgValueNumberArrIndexVal(GenTree* tree, CORINFO_CLASS_HANDLE elemTypeEq, ValueNum arrVN, ValueNum inxVN, ValueNumPair addrXvnp, FieldSeqNode* fldSeq); // Requires "funcApp" to be a VNF_PtrToArrElem, and "addrXvnp" to represent the exception set thrown // by evaluating the array index expression "tree". Returns the value number resulting from // dereferencing the array in the current GcHeap state. If "tree" is non-null, it must be the // "GT_IND" that does the dereference, and it is given the returned value number. ValueNum fgValueNumberArrIndexVal(GenTree* tree, VNFuncApp* funcApp, ValueNumPair addrXvnp); // Compute the value number for a byref-exposed load of the given type via the given pointerVN. ValueNum fgValueNumberByrefExposedLoad(var_types type, ValueNum pointerVN); unsigned fgVNPassesCompleted; // Number of times fgValueNumber has been run. // Utility functions for fgValueNumber. // Perform value-numbering for the trees in "blk". void fgValueNumberBlock(BasicBlock* blk); // Requires that "entryBlock" is the entry block of loop "loopNum", and that "loopNum" is the // innermost loop of which "entryBlock" is the entry. Returns the value number that should be // assumed for the memoryKind at the start "entryBlk". ValueNum fgMemoryVNForLoopSideEffects(MemoryKind memoryKind, BasicBlock* entryBlock, unsigned loopNum); // Called when an operation (performed by "tree", described by "msg") may cause the GcHeap to be mutated. // As GcHeap is a subset of ByrefExposed, this will also annotate the ByrefExposed mutation. void fgMutateGcHeap(GenTree* tree DEBUGARG(const char* msg)); // Called when an operation (performed by "tree", described by "msg") may cause an address-exposed local to be // mutated. void fgMutateAddressExposedLocal(GenTree* tree DEBUGARG(const char* msg)); // For a GC heap store at curTree, record the new curMemoryVN's and update curTree's MemorySsaMap. // As GcHeap is a subset of ByrefExposed, this will also record the ByrefExposed store. void recordGcHeapStore(GenTree* curTree, ValueNum gcHeapVN DEBUGARG(const char* msg)); // For a store to an address-exposed local at curTree, record the new curMemoryVN and update curTree's MemorySsaMap. void recordAddressExposedLocalStore(GenTree* curTree, ValueNum memoryVN DEBUGARG(const char* msg)); void fgSetCurrentMemoryVN(MemoryKind memoryKind, ValueNum newMemoryVN); // Tree caused an update in the current memory VN. If "tree" has an associated heap SSA #, record that // value in that SSA #. void fgValueNumberRecordMemorySsa(MemoryKind memoryKind, GenTree* tree); // The input 'tree' is a leaf node that is a constant // Assign the proper value number to the tree void fgValueNumberTreeConst(GenTree* tree); // If the VN store has been initialized, reassign the // proper value number to the constant tree. void fgUpdateConstTreeValueNumber(GenTree* tree); // Assumes that all inputs to "tree" have had value numbers assigned; assigns a VN to tree. // (With some exceptions: the VN of the lhs of an assignment is assigned as part of the // assignment.) void fgValueNumberTree(GenTree* tree); void fgValueNumberAssignment(GenTreeOp* tree); // Does value-numbering for a block assignment. void fgValueNumberBlockAssignment(GenTree* tree); bool fgValueNumberBlockAssignmentTypeCheck(LclVarDsc* dstVarDsc, FieldSeqNode* dstFldSeq, GenTree* src); // Does value-numbering for a cast tree. void fgValueNumberCastTree(GenTree* tree); // Does value-numbering for an intrinsic tree. void fgValueNumberIntrinsic(GenTree* tree); #ifdef FEATURE_SIMD // Does value-numbering for a GT_SIMD tree void fgValueNumberSimd(GenTreeSIMD* tree); #endif // FEATURE_SIMD #ifdef FEATURE_HW_INTRINSICS // Does value-numbering for a GT_HWINTRINSIC tree void fgValueNumberHWIntrinsic(GenTreeHWIntrinsic* tree); #endif // FEATURE_HW_INTRINSICS // Does value-numbering for a call. We interpret some helper calls. void fgValueNumberCall(GenTreeCall* call); // Does value-numbering for a helper representing a cast operation. void fgValueNumberCastHelper(GenTreeCall* call); // Does value-numbering for a helper "call" that has a VN function symbol "vnf". void fgValueNumberHelperCallFunc(GenTreeCall* call, VNFunc vnf, ValueNumPair vnpExc); // Requires "helpCall" to be a helper call. Assigns it a value number; // we understand the semantics of some of the calls. Returns "true" if // the call may modify the heap (we assume arbitrary memory side effects if so). bool fgValueNumberHelperCall(GenTreeCall* helpCall); // Requires that "helpFunc" is one of the pure Jit Helper methods. // Returns the corresponding VNFunc to use for value numbering VNFunc fgValueNumberJitHelperMethodVNFunc(CorInfoHelpFunc helpFunc); // Adds the exception set for the current tree node which has a memory indirection operation void fgValueNumberAddExceptionSetForIndirection(GenTree* tree, GenTree* baseAddr); // Adds the exception sets for the current tree node which is performing a division or modulus operation void fgValueNumberAddExceptionSetForDivision(GenTree* tree); // Adds the exception set for the current tree node which is performing a overflow checking operation void fgValueNumberAddExceptionSetForOverflow(GenTree* tree); // Adds the exception set for the current tree node which is performing a bounds check operation void fgValueNumberAddExceptionSetForBoundsCheck(GenTree* tree); // Adds the exception set for the current tree node which is performing a ckfinite operation void fgValueNumberAddExceptionSetForCkFinite(GenTree* tree); // Adds the exception sets for the current tree node void fgValueNumberAddExceptionSet(GenTree* tree); #ifdef DEBUG void fgDebugCheckExceptionSets(); void fgDebugCheckValueNumberedTree(GenTree* tree); #endif // These are the current value number for the memory implicit variables while // doing value numbering. These are the value numbers under the "liberal" interpretation // of memory values; the "conservative" interpretation needs no VN, since every access of // memory yields an unknown value. ValueNum fgCurMemoryVN[MemoryKindCount]; // Return a "pseudo"-class handle for an array element type. If "elemType" is TYP_STRUCT, // requires "elemStructType" to be non-null (and to have a low-order zero). Otherwise, low order bit // is 1, and the rest is an encoding of "elemTyp". static CORINFO_CLASS_HANDLE EncodeElemType(var_types elemTyp, CORINFO_CLASS_HANDLE elemStructType) { if (elemStructType != nullptr) { assert(varTypeIsStruct(elemTyp) || elemTyp == TYP_REF || elemTyp == TYP_BYREF || varTypeIsIntegral(elemTyp)); assert((size_t(elemStructType) & 0x1) == 0x0); // Make sure the encoding below is valid. return elemStructType; } else { assert(elemTyp != TYP_STRUCT); elemTyp = varTypeToSigned(elemTyp); return CORINFO_CLASS_HANDLE(size_t(elemTyp) << 1 | 0x1); } } // If "clsHnd" is the result of an "EncodePrim" call, returns true and sets "*pPrimType" to the // var_types it represents. Otherwise, returns TYP_STRUCT (on the assumption that "clsHnd" is // the struct type of the element). static var_types DecodeElemType(CORINFO_CLASS_HANDLE clsHnd) { size_t clsHndVal = size_t(clsHnd); if (clsHndVal & 0x1) { return var_types(clsHndVal >> 1); } else { return TYP_STRUCT; } } // Convert a BYTE which represents the VM's CorInfoGCtype to the JIT's var_types var_types getJitGCType(BYTE gcType); // Returns true if the provided type should be treated as a primitive type // for the unmanaged calling conventions. bool isNativePrimitiveStructType(CORINFO_CLASS_HANDLE clsHnd); enum structPassingKind { SPK_Unknown, // Invalid value, never returned SPK_PrimitiveType, // The struct is passed/returned using a primitive type. SPK_EnclosingType, // Like SPK_Primitive type, but used for return types that // require a primitive type temp that is larger than the struct size. // Currently used for structs of size 3, 5, 6, or 7 bytes. SPK_ByValue, // The struct is passed/returned by value (using the ABI rules) // for ARM64 and UNIX_X64 in multiple registers. (when all of the // parameters registers are used, then the stack will be used) // for X86 passed on the stack, for ARM32 passed in registers // or the stack or split between registers and the stack. SPK_ByValueAsHfa, // The struct is passed/returned as an HFA in multiple registers. SPK_ByReference }; // The struct is passed/returned by reference to a copy/buffer. // Get the "primitive" type that is is used when we are given a struct of size 'structSize'. // For pointer sized structs the 'clsHnd' is used to determine if the struct contains GC ref. // A "primitive" type is one of the scalar types: byte, short, int, long, ref, float, double // If we can't or shouldn't use a "primitive" type then TYP_UNKNOWN is returned. // // isVarArg is passed for use on Windows Arm64 to change the decision returned regarding // hfa types. // var_types getPrimitiveTypeForStruct(unsigned structSize, CORINFO_CLASS_HANDLE clsHnd, bool isVarArg); // Get the type that is used to pass values of the given struct type. // isVarArg is passed for use on Windows Arm64 to change the decision returned regarding // hfa types. // var_types getArgTypeForStruct(CORINFO_CLASS_HANDLE clsHnd, structPassingKind* wbPassStruct, bool isVarArg, unsigned structSize); // Get the type that is used to return values of the given struct type. // If the size is unknown, pass 0 and it will be determined from 'clsHnd'. var_types getReturnTypeForStruct(CORINFO_CLASS_HANDLE clsHnd, CorInfoCallConvExtension callConv, structPassingKind* wbPassStruct = nullptr, unsigned structSize = 0); #ifdef DEBUG // Print a representation of "vnp" or "vn" on standard output. // If "level" is non-zero, we also print out a partial expansion of the value. void vnpPrint(ValueNumPair vnp, unsigned level); void vnPrint(ValueNum vn, unsigned level); #endif bool fgDominate(BasicBlock* b1, BasicBlock* b2); // Return true if b1 dominates b2 // Dominator computation member functions // Not exposed outside Compiler protected: bool fgReachable(BasicBlock* b1, BasicBlock* b2); // Returns true if block b1 can reach block b2 // Compute immediate dominators, the dominator tree and and its pre/post-order travsersal numbers. void fgComputeDoms(); void fgCompDominatedByExceptionalEntryBlocks(); BlockSet_ValRet_T fgGetDominatorSet(BasicBlock* block); // Returns a set of blocks that dominate the given block. // Note: this is relatively slow compared to calling fgDominate(), // especially if dealing with a single block versus block check. void fgComputeReachabilitySets(); // Compute bbReach sets. (Also sets BBF_GC_SAFE_POINT flag on blocks.) void fgComputeReturnBlocks(); // Initialize fgReturnBlocks to a list of BBJ_RETURN blocks. void fgComputeEnterBlocksSet(); // Compute the set of entry blocks, 'fgEnterBlks'. bool fgRemoveUnreachableBlocks(); // Remove blocks determined to be unreachable by the bbReach sets. void fgComputeReachability(); // Perform flow graph node reachability analysis. BasicBlock* fgIntersectDom(BasicBlock* a, BasicBlock* b); // Intersect two immediate dominator sets. void fgDfsInvPostOrder(); // In order to compute dominance using fgIntersectDom, the flow graph nodes must be // processed in topological sort, this function takes care of that. void fgDfsInvPostOrderHelper(BasicBlock* block, BlockSet& visited, unsigned* count); BlockSet_ValRet_T fgDomFindStartNodes(); // Computes which basic blocks don't have incoming edges in the flow graph. // Returns this as a set. INDEBUG(void fgDispDomTree(DomTreeNode* domTree);) // Helper that prints out the Dominator Tree in debug builds. DomTreeNode* fgBuildDomTree(); // Once we compute all the immediate dominator sets for each node in the flow graph // (performed by fgComputeDoms), this procedure builds the dominance tree represented // adjacency lists. // In order to speed up the queries of the form 'Does A dominates B', we can perform a DFS preorder and postorder // traversal of the dominance tree and the dominance query will become A dominates B iif preOrder(A) <= preOrder(B) // && postOrder(A) >= postOrder(B) making the computation O(1). void fgNumberDomTree(DomTreeNode* domTree); // When the flow graph changes, we need to update the block numbers, predecessor lists, reachability sets, // dominators, and possibly loops. void fgUpdateChangedFlowGraph(const bool computePreds = true, const bool computeDoms = true, const bool computeReturnBlocks = false, const bool computeLoops = false); public: // Compute the predecessors of the blocks in the control flow graph. void fgComputePreds(); // Remove all predecessor information. void fgRemovePreds(); // Compute the cheap flow graph predecessors lists. This is used in some early phases // before the full predecessors lists are computed. void fgComputeCheapPreds(); private: void fgAddCheapPred(BasicBlock* block, BasicBlock* blockPred); void fgRemoveCheapPred(BasicBlock* block, BasicBlock* blockPred); public: enum GCPollType { GCPOLL_NONE, GCPOLL_CALL, GCPOLL_INLINE }; // Initialize the per-block variable sets (used for liveness analysis). void fgInitBlockVarSets(); PhaseStatus fgInsertGCPolls(); BasicBlock* fgCreateGCPoll(GCPollType pollType, BasicBlock* block); // Requires that "block" is a block that returns from // a finally. Returns the number of successors (jump targets of // of blocks in the covered "try" that did a "LEAVE".) unsigned fgNSuccsOfFinallyRet(BasicBlock* block); // Requires that "block" is a block that returns (in the sense of BBJ_EHFINALLYRET) from // a finally. Returns its "i"th successor (jump targets of // of blocks in the covered "try" that did a "LEAVE".) // Requires that "i" < fgNSuccsOfFinallyRet(block). BasicBlock* fgSuccOfFinallyRet(BasicBlock* block, unsigned i); private: // Factor out common portions of the impls of the methods above. void fgSuccOfFinallyRetWork(BasicBlock* block, unsigned i, BasicBlock** bres, unsigned* nres); public: // For many purposes, it is desirable to be able to enumerate the *distinct* targets of a switch statement, // skipping duplicate targets. (E.g., in flow analyses that are only interested in the set of possible targets.) // SwitchUniqueSuccSet contains the non-duplicated switch targets. // (Code that modifies the jump table of a switch has an obligation to call Compiler::UpdateSwitchTableTarget, // which in turn will call the "UpdateTarget" method of this type if a SwitchUniqueSuccSet has already // been computed for the switch block. If a switch block is deleted or is transformed into a non-switch, // we leave the entry associated with the block, but it will no longer be accessed.) struct SwitchUniqueSuccSet { unsigned numDistinctSuccs; // Number of distinct targets of the switch. BasicBlock** nonDuplicates; // Array of "numDistinctSuccs", containing all the distinct switch target // successors. // The switch block "switchBlk" just had an entry with value "from" modified to the value "to". // Update "this" as necessary: if "from" is no longer an element of the jump table of "switchBlk", // remove it from "this", and ensure that "to" is a member. Use "alloc" to do any required allocation. void UpdateTarget(CompAllocator alloc, BasicBlock* switchBlk, BasicBlock* from, BasicBlock* to); }; typedef JitHashTable<BasicBlock*, JitPtrKeyFuncs<BasicBlock>, SwitchUniqueSuccSet> BlockToSwitchDescMap; private: // Maps BasicBlock*'s that end in switch statements to SwitchUniqueSuccSets that allow // iteration over only the distinct successors. BlockToSwitchDescMap* m_switchDescMap; public: BlockToSwitchDescMap* GetSwitchDescMap(bool createIfNull = true) { if ((m_switchDescMap == nullptr) && createIfNull) { m_switchDescMap = new (getAllocator()) BlockToSwitchDescMap(getAllocator()); } return m_switchDescMap; } // Invalidate the map of unique switch block successors. For example, since the hash key of the map // depends on block numbers, we must invalidate the map when the blocks are renumbered, to ensure that // we don't accidentally look up and return the wrong switch data. void InvalidateUniqueSwitchSuccMap() { m_switchDescMap = nullptr; } // Requires "switchBlock" to be a block that ends in a switch. Returns // the corresponding SwitchUniqueSuccSet. SwitchUniqueSuccSet GetDescriptorForSwitch(BasicBlock* switchBlk); // The switch block "switchBlk" just had an entry with value "from" modified to the value "to". // Update "this" as necessary: if "from" is no longer an element of the jump table of "switchBlk", // remove it from "this", and ensure that "to" is a member. void UpdateSwitchTableTarget(BasicBlock* switchBlk, BasicBlock* from, BasicBlock* to); // Remove the "SwitchUniqueSuccSet" of "switchBlk" in the BlockToSwitchDescMap. void fgInvalidateSwitchDescMapEntry(BasicBlock* switchBlk); BasicBlock* fgFirstBlockOfHandler(BasicBlock* block); bool fgIsFirstBlockOfFilterOrHandler(BasicBlock* block); flowList* fgGetPredForBlock(BasicBlock* block, BasicBlock* blockPred); flowList* fgGetPredForBlock(BasicBlock* block, BasicBlock* blockPred, flowList*** ptrToPred); flowList* fgRemoveRefPred(BasicBlock* block, BasicBlock* blockPred); flowList* fgRemoveAllRefPreds(BasicBlock* block, BasicBlock* blockPred); void fgRemoveBlockAsPred(BasicBlock* block); void fgChangeSwitchBlock(BasicBlock* oldSwitchBlock, BasicBlock* newSwitchBlock); void fgReplaceSwitchJumpTarget(BasicBlock* blockSwitch, BasicBlock* newTarget, BasicBlock* oldTarget); void fgReplaceJumpTarget(BasicBlock* block, BasicBlock* newTarget, BasicBlock* oldTarget); void fgReplacePred(BasicBlock* block, BasicBlock* oldPred, BasicBlock* newPred); flowList* fgAddRefPred(BasicBlock* block, BasicBlock* blockPred, flowList* oldEdge = nullptr, bool initializingPreds = false); // Only set to 'true' when we are computing preds in // fgComputePreds() void fgFindBasicBlocks(); bool fgIsBetterFallThrough(BasicBlock* bCur, BasicBlock* bAlt); bool fgCheckEHCanInsertAfterBlock(BasicBlock* blk, unsigned regionIndex, bool putInTryRegion); BasicBlock* fgFindInsertPoint(unsigned regionIndex, bool putInTryRegion, BasicBlock* startBlk, BasicBlock* endBlk, BasicBlock* nearBlk, BasicBlock* jumpBlk, bool runRarely); unsigned fgGetNestingLevel(BasicBlock* block, unsigned* pFinallyNesting = nullptr); void fgPostImportationCleanup(); void fgRemoveStmt(BasicBlock* block, Statement* stmt DEBUGARG(bool isUnlink = false)); void fgUnlinkStmt(BasicBlock* block, Statement* stmt); bool fgCheckRemoveStmt(BasicBlock* block, Statement* stmt); void fgCreateLoopPreHeader(unsigned lnum); void fgUnreachableBlock(BasicBlock* block); void fgRemoveConditionalJump(BasicBlock* block); BasicBlock* fgLastBBInMainFunction(); BasicBlock* fgEndBBAfterMainFunction(); void fgUnlinkRange(BasicBlock* bBeg, BasicBlock* bEnd); void fgRemoveBlock(BasicBlock* block, bool unreachable); bool fgCanCompactBlocks(BasicBlock* block, BasicBlock* bNext); void fgCompactBlocks(BasicBlock* block, BasicBlock* bNext); void fgUpdateLoopsAfterCompacting(BasicBlock* block, BasicBlock* bNext); BasicBlock* fgConnectFallThrough(BasicBlock* bSrc, BasicBlock* bDst); bool fgRenumberBlocks(); bool fgExpandRarelyRunBlocks(); bool fgEhAllowsMoveBlock(BasicBlock* bBefore, BasicBlock* bAfter); void fgMoveBlocksAfter(BasicBlock* bStart, BasicBlock* bEnd, BasicBlock* insertAfterBlk); enum FG_RELOCATE_TYPE { FG_RELOCATE_TRY, // relocate the 'try' region FG_RELOCATE_HANDLER // relocate the handler region (including the filter if necessary) }; BasicBlock* fgRelocateEHRange(unsigned regionIndex, FG_RELOCATE_TYPE relocateType); #if defined(FEATURE_EH_FUNCLETS) #if defined(TARGET_ARM) void fgClearFinallyTargetBit(BasicBlock* block); #endif // defined(TARGET_ARM) bool fgIsIntraHandlerPred(BasicBlock* predBlock, BasicBlock* block); bool fgAnyIntraHandlerPreds(BasicBlock* block); void fgInsertFuncletPrologBlock(BasicBlock* block); void fgCreateFuncletPrologBlocks(); void fgCreateFunclets(); #else // !FEATURE_EH_FUNCLETS bool fgRelocateEHRegions(); #endif // !FEATURE_EH_FUNCLETS bool fgOptimizeUncondBranchToSimpleCond(BasicBlock* block, BasicBlock* target); bool fgBlockEndFavorsTailDuplication(BasicBlock* block, unsigned lclNum); bool fgBlockIsGoodTailDuplicationCandidate(BasicBlock* block, unsigned* lclNum); bool fgOptimizeEmptyBlock(BasicBlock* block); bool fgOptimizeBranchToEmptyUnconditional(BasicBlock* block, BasicBlock* bDest); bool fgOptimizeBranch(BasicBlock* bJump); bool fgOptimizeSwitchBranches(BasicBlock* block); bool fgOptimizeBranchToNext(BasicBlock* block, BasicBlock* bNext, BasicBlock* bPrev); bool fgOptimizeSwitchJumps(); #ifdef DEBUG void fgPrintEdgeWeights(); #endif void fgComputeBlockAndEdgeWeights(); weight_t fgComputeMissingBlockWeights(); void fgComputeCalledCount(weight_t returnWeight); void fgComputeEdgeWeights(); bool fgReorderBlocks(); PhaseStatus fgDetermineFirstColdBlock(); bool fgIsForwardBranch(BasicBlock* bJump, BasicBlock* bSrc = nullptr); bool fgUpdateFlowGraph(bool doTailDup = false); void fgFindOperOrder(); // method that returns if you should split here typedef bool(fgSplitPredicate)(GenTree* tree, GenTree* parent, fgWalkData* data); void fgSetBlockOrder(); void fgRemoveReturnBlock(BasicBlock* block); /* Helper code that has been factored out */ inline void fgConvertBBToThrowBB(BasicBlock* block); bool fgCastNeeded(GenTree* tree, var_types toType); GenTree* fgDoNormalizeOnStore(GenTree* tree); GenTree* fgMakeTmpArgNode(fgArgTabEntry* curArgTabEntry); // The following check for loops that don't execute calls bool fgLoopCallMarked; void fgLoopCallTest(BasicBlock* srcBB, BasicBlock* dstBB); void fgLoopCallMark(); void fgMarkLoopHead(BasicBlock* block); unsigned fgGetCodeEstimate(BasicBlock* block); #if DUMP_FLOWGRAPHS enum class PhasePosition { PrePhase, PostPhase }; const char* fgProcessEscapes(const char* nameIn, escapeMapping_t* map); static void fgDumpTree(FILE* fgxFile, GenTree* const tree); FILE* fgOpenFlowGraphFile(bool* wbDontClose, Phases phase, PhasePosition pos, LPCWSTR type); bool fgDumpFlowGraph(Phases phase, PhasePosition pos); #endif // DUMP_FLOWGRAPHS #ifdef DEBUG void fgDispDoms(); void fgDispReach(); void fgDispBBLiveness(BasicBlock* block); void fgDispBBLiveness(); void fgTableDispBasicBlock(BasicBlock* block, int ibcColWidth = 0); void fgDispBasicBlocks(BasicBlock* firstBlock, BasicBlock* lastBlock, bool dumpTrees); void fgDispBasicBlocks(bool dumpTrees = false); void fgDumpStmtTree(Statement* stmt, unsigned bbNum); void fgDumpBlock(BasicBlock* block); void fgDumpTrees(BasicBlock* firstBlock, BasicBlock* lastBlock); static fgWalkPreFn fgStress64RsltMulCB; void fgStress64RsltMul(); void fgDebugCheckUpdate(); void fgDebugCheckBBNumIncreasing(); void fgDebugCheckBBlist(bool checkBBNum = false, bool checkBBRefs = true); void fgDebugCheckBlockLinks(); void fgDebugCheckLinks(bool morphTrees = false); void fgDebugCheckStmtsList(BasicBlock* block, bool morphTrees); void fgDebugCheckNodeLinks(BasicBlock* block, Statement* stmt); void fgDebugCheckNodesUniqueness(); void fgDebugCheckLoopTable(); void fgDebugCheckFlags(GenTree* tree); void fgDebugCheckDispFlags(GenTree* tree, GenTreeFlags dispFlags, GenTreeDebugFlags debugFlags); void fgDebugCheckFlagsHelper(GenTree* tree, GenTreeFlags actualFlags, GenTreeFlags expectedFlags); void fgDebugCheckTryFinallyExits(); void fgDebugCheckProfileData(); bool fgDebugCheckIncomingProfileData(BasicBlock* block); bool fgDebugCheckOutgoingProfileData(BasicBlock* block); #endif // DEBUG static bool fgProfileWeightsEqual(weight_t weight1, weight_t weight2); static bool fgProfileWeightsConsistent(weight_t weight1, weight_t weight2); static GenTree* fgGetFirstNode(GenTree* tree); //--------------------- Walking the trees in the IR ----------------------- struct fgWalkData { Compiler* compiler; fgWalkPreFn* wtprVisitorFn; fgWalkPostFn* wtpoVisitorFn; void* pCallbackData; // user-provided data GenTree* parent; // parent of current node, provided to callback GenTreeStack* parentStack; // stack of parent nodes, if asked for bool wtprLclsOnly; // whether to only visit lclvar nodes #ifdef DEBUG bool printModified; // callback can use this #endif }; fgWalkResult fgWalkTreePre(GenTree** pTree, fgWalkPreFn* visitor, void* pCallBackData = nullptr, bool lclVarsOnly = false, bool computeStack = false); fgWalkResult fgWalkTree(GenTree** pTree, fgWalkPreFn* preVisitor, fgWalkPostFn* postVisitor, void* pCallBackData = nullptr); void fgWalkAllTreesPre(fgWalkPreFn* visitor, void* pCallBackData); //----- Postorder fgWalkResult fgWalkTreePost(GenTree** pTree, fgWalkPostFn* visitor, void* pCallBackData = nullptr, bool computeStack = false); // An fgWalkPreFn that looks for expressions that have inline throws in // minopts mode. Basically it looks for tress with gtOverflowEx() or // GTF_IND_RNGCHK. It returns WALK_ABORT if one is found. It // returns WALK_SKIP_SUBTREES if GTF_EXCEPT is not set (assumes flags // properly propagated to parent trees). It returns WALK_CONTINUE // otherwise. static fgWalkResult fgChkThrowCB(GenTree** pTree, Compiler::fgWalkData* data); static fgWalkResult fgChkLocAllocCB(GenTree** pTree, Compiler::fgWalkData* data); static fgWalkResult fgChkQmarkCB(GenTree** pTree, Compiler::fgWalkData* data); /************************************************************************** * PROTECTED *************************************************************************/ protected: friend class SsaBuilder; friend struct ValueNumberState; //--------------------- Detect the basic blocks --------------------------- BasicBlock** fgBBs; // Table of pointers to the BBs void fgInitBBLookup(); BasicBlock* fgLookupBB(unsigned addr); bool fgCanSwitchToOptimized(); void fgSwitchToOptimized(const char* reason); bool fgMayExplicitTailCall(); void fgFindJumpTargets(const BYTE* codeAddr, IL_OFFSET codeSize, FixedBitVect* jumpTarget); void fgMarkBackwardJump(BasicBlock* startBlock, BasicBlock* endBlock); void fgLinkBasicBlocks(); unsigned fgMakeBasicBlocks(const BYTE* codeAddr, IL_OFFSET codeSize, FixedBitVect* jumpTarget); void fgCheckBasicBlockControlFlow(); void fgControlFlowPermitted(BasicBlock* blkSrc, BasicBlock* blkDest, bool IsLeave = false /* is the src a leave block */); bool fgFlowToFirstBlockOfInnerTry(BasicBlock* blkSrc, BasicBlock* blkDest, bool sibling); void fgObserveInlineConstants(OPCODE opcode, const FgStack& stack, bool isInlining); void fgAdjustForAddressExposedOrWrittenThis(); unsigned fgStressBBProf() { #ifdef DEBUG unsigned result = JitConfig.JitStressBBProf(); if (result == 0) { if (compStressCompile(STRESS_BB_PROFILE, 15)) { result = 1; } } return result; #else return 0; #endif } bool fgHaveProfileData(); bool fgGetProfileWeightForBasicBlock(IL_OFFSET offset, weight_t* weight); Instrumentor* fgCountInstrumentor; Instrumentor* fgClassInstrumentor; PhaseStatus fgPrepareToInstrumentMethod(); PhaseStatus fgInstrumentMethod(); PhaseStatus fgIncorporateProfileData(); void fgIncorporateBlockCounts(); void fgIncorporateEdgeCounts(); CORINFO_CLASS_HANDLE getRandomClass(ICorJitInfo::PgoInstrumentationSchema* schema, UINT32 countSchemaItems, BYTE* pInstrumentationData, int32_t ilOffset, CLRRandom* random); public: const char* fgPgoFailReason; bool fgPgoDisabled; ICorJitInfo::PgoSource fgPgoSource; ICorJitInfo::PgoInstrumentationSchema* fgPgoSchema; BYTE* fgPgoData; UINT32 fgPgoSchemaCount; HRESULT fgPgoQueryResult; UINT32 fgNumProfileRuns; UINT32 fgPgoBlockCounts; UINT32 fgPgoEdgeCounts; UINT32 fgPgoClassProfiles; unsigned fgPgoInlineePgo; unsigned fgPgoInlineeNoPgo; unsigned fgPgoInlineeNoPgoSingleBlock; void WalkSpanningTree(SpanningTreeVisitor* visitor); void fgSetProfileWeight(BasicBlock* block, weight_t weight); void fgApplyProfileScale(); bool fgHaveSufficientProfileData(); bool fgHaveTrustedProfileData(); // fgIsUsingProfileWeights - returns true if we have real profile data for this method // or if we have some fake profile data for the stress mode bool fgIsUsingProfileWeights() { return (fgHaveProfileData() || fgStressBBProf()); } // fgProfileRunsCount - returns total number of scenario runs for the profile data // or BB_UNITY_WEIGHT_UNSIGNED when we aren't using profile data. unsigned fgProfileRunsCount() { return fgIsUsingProfileWeights() ? fgNumProfileRuns : BB_UNITY_WEIGHT_UNSIGNED; } //-------- Insert a statement at the start or end of a basic block -------- #ifdef DEBUG public: static bool fgBlockContainsStatementBounded(BasicBlock* block, Statement* stmt, bool answerOnBoundExceeded = true); #endif public: Statement* fgNewStmtAtBeg(BasicBlock* block, GenTree* tree, const DebugInfo& di = DebugInfo()); void fgInsertStmtAtEnd(BasicBlock* block, Statement* stmt); Statement* fgNewStmtAtEnd(BasicBlock* block, GenTree* tree, const DebugInfo& di = DebugInfo()); Statement* fgNewStmtNearEnd(BasicBlock* block, GenTree* tree, const DebugInfo& di = DebugInfo()); private: void fgInsertStmtNearEnd(BasicBlock* block, Statement* stmt); void fgInsertStmtAtBeg(BasicBlock* block, Statement* stmt); void fgInsertStmtAfter(BasicBlock* block, Statement* insertionPoint, Statement* stmt); public: void fgInsertStmtBefore(BasicBlock* block, Statement* insertionPoint, Statement* stmt); private: Statement* fgInsertStmtListAfter(BasicBlock* block, Statement* stmtAfter, Statement* stmtList); // Create a new temporary variable to hold the result of *ppTree, // and transform the graph accordingly. GenTree* fgInsertCommaFormTemp(GenTree** ppTree, CORINFO_CLASS_HANDLE structType = nullptr); GenTree* fgMakeMultiUse(GenTree** ppTree); private: // Recognize a bitwise rotation pattern and convert into a GT_ROL or a GT_ROR node. GenTree* fgRecognizeAndMorphBitwiseRotation(GenTree* tree); bool fgOperIsBitwiseRotationRoot(genTreeOps oper); #if !defined(TARGET_64BIT) // Recognize and morph a long multiplication with 32 bit operands. GenTreeOp* fgRecognizeAndMorphLongMul(GenTreeOp* mul); GenTreeOp* fgMorphLongMul(GenTreeOp* mul); #endif //-------- Determine the order in which the trees will be evaluated ------- unsigned fgTreeSeqNum; GenTree* fgTreeSeqLst; GenTree* fgTreeSeqBeg; GenTree* fgSetTreeSeq(GenTree* tree, GenTree* prev = nullptr, bool isLIR = false); void fgSetTreeSeqHelper(GenTree* tree, bool isLIR); void fgSetTreeSeqFinish(GenTree* tree, bool isLIR); void fgSetStmtSeq(Statement* stmt); void fgSetBlockOrder(BasicBlock* block); //------------------------- Morphing -------------------------------------- unsigned fgPtrArgCntMax; public: //------------------------------------------------------------------------ // fgGetPtrArgCntMax: Return the maximum number of pointer-sized stack arguments that calls inside this method // can push on the stack. This value is calculated during morph. // // Return Value: // Returns fgPtrArgCntMax, that is a private field. // unsigned fgGetPtrArgCntMax() const { return fgPtrArgCntMax; } //------------------------------------------------------------------------ // fgSetPtrArgCntMax: Set the maximum number of pointer-sized stack arguments that calls inside this method // can push on the stack. This function is used during StackLevelSetter to fix incorrect morph calculations. // void fgSetPtrArgCntMax(unsigned argCntMax) { fgPtrArgCntMax = argCntMax; } bool compCanEncodePtrArgCntMax(); private: hashBv* fgOutgoingArgTemps; hashBv* fgCurrentlyInUseArgTemps; void fgSetRngChkTarget(GenTree* tree, bool delay = true); BasicBlock* fgSetRngChkTargetInner(SpecialCodeKind kind, bool delay); #if REARRANGE_ADDS void fgMoveOpsLeft(GenTree* tree); #endif bool fgIsCommaThrow(GenTree* tree, bool forFolding = false); bool fgIsThrow(GenTree* tree); bool fgInDifferentRegions(BasicBlock* blk1, BasicBlock* blk2); bool fgIsBlockCold(BasicBlock* block); GenTree* fgMorphCastIntoHelper(GenTree* tree, int helper, GenTree* oper); GenTree* fgMorphIntoHelperCall(GenTree* tree, int helper, GenTreeCall::Use* args, bool morphArgs = true); GenTree* fgMorphStackArgForVarArgs(unsigned lclNum, var_types varType, unsigned lclOffs); // A "MorphAddrContext" carries information from the surrounding context. If we are evaluating a byref address, // it is useful to know whether the address will be immediately dereferenced, or whether the address value will // be used, perhaps by passing it as an argument to a called method. This affects how null checking is done: // for sufficiently small offsets, we can rely on OS page protection to implicitly null-check addresses that we // know will be dereferenced. To know that reliance on implicit null checking is sound, we must further know that // all offsets between the top-level indirection and the bottom are constant, and that their sum is sufficiently // small; hence the other fields of MorphAddrContext. enum MorphAddrContextKind { MACK_Ind, MACK_Addr, }; struct MorphAddrContext { MorphAddrContextKind m_kind; bool m_allConstantOffsets; // Valid only for "m_kind == MACK_Ind". True iff all offsets between // top-level indirection and here have been constants. size_t m_totalOffset; // Valid only for "m_kind == MACK_Ind", and if "m_allConstantOffsets" is true. // In that case, is the sum of those constant offsets. MorphAddrContext(MorphAddrContextKind kind) : m_kind(kind), m_allConstantOffsets(true), m_totalOffset(0) { } }; // A MACK_CopyBlock context is immutable, so we can just make one of these and share it. static MorphAddrContext s_CopyBlockMAC; #ifdef FEATURE_SIMD GenTree* getSIMDStructFromField(GenTree* tree, CorInfoType* simdBaseJitTypeOut, unsigned* indexOut, unsigned* simdSizeOut, bool ignoreUsedInSIMDIntrinsic = false); GenTree* fgMorphFieldAssignToSimdSetElement(GenTree* tree); GenTree* fgMorphFieldToSimdGetElement(GenTree* tree); bool fgMorphCombineSIMDFieldAssignments(BasicBlock* block, Statement* stmt); void impMarkContiguousSIMDFieldAssignments(Statement* stmt); // fgPreviousCandidateSIMDFieldAsgStmt is only used for tracking previous simd field assignment // in function: Complier::impMarkContiguousSIMDFieldAssignments. Statement* fgPreviousCandidateSIMDFieldAsgStmt; #endif // FEATURE_SIMD GenTree* fgMorphArrayIndex(GenTree* tree); GenTree* fgMorphExpandCast(GenTreeCast* tree); GenTreeFieldList* fgMorphLclArgToFieldlist(GenTreeLclVarCommon* lcl); void fgInitArgInfo(GenTreeCall* call); GenTreeCall* fgMorphArgs(GenTreeCall* call); void fgMakeOutgoingStructArgCopy(GenTreeCall* call, GenTreeCall::Use* args, CORINFO_CLASS_HANDLE copyBlkClass); GenTree* fgMorphLocalVar(GenTree* tree, bool forceRemorph); public: bool fgAddrCouldBeNull(GenTree* addr); private: GenTree* fgMorphField(GenTree* tree, MorphAddrContext* mac); bool fgCanFastTailCall(GenTreeCall* call, const char** failReason); #if FEATURE_FASTTAILCALL bool fgCallHasMustCopyByrefParameter(GenTreeCall* callee); #endif bool fgCheckStmtAfterTailCall(); GenTree* fgMorphTailCallViaHelpers(GenTreeCall* call, CORINFO_TAILCALL_HELPERS& help); bool fgCanTailCallViaJitHelper(); void fgMorphTailCallViaJitHelper(GenTreeCall* call); GenTree* fgCreateCallDispatcherAndGetResult(GenTreeCall* origCall, CORINFO_METHOD_HANDLE callTargetStubHnd, CORINFO_METHOD_HANDLE dispatcherHnd); GenTree* getLookupTree(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_LOOKUP* pLookup, GenTreeFlags handleFlags, void* compileTimeHandle); GenTree* getRuntimeLookupTree(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_LOOKUP* pLookup, void* compileTimeHandle); GenTree* getVirtMethodPointerTree(GenTree* thisPtr, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo); GenTree* getTokenHandleTree(CORINFO_RESOLVED_TOKEN* pResolvedToken, bool parent); GenTree* fgMorphPotentialTailCall(GenTreeCall* call); GenTree* fgGetStubAddrArg(GenTreeCall* call); unsigned fgGetArgTabEntryParameterLclNum(GenTreeCall* call, fgArgTabEntry* argTabEntry); void fgMorphRecursiveFastTailCallIntoLoop(BasicBlock* block, GenTreeCall* recursiveTailCall); Statement* fgAssignRecursiveCallArgToCallerParam(GenTree* arg, fgArgTabEntry* argTabEntry, unsigned lclParamNum, BasicBlock* block, const DebugInfo& callDI, Statement* tmpAssignmentInsertionPoint, Statement* paramAssignmentInsertionPoint); GenTree* fgMorphCall(GenTreeCall* call); GenTree* fgExpandVirtualVtableCallTarget(GenTreeCall* call); void fgMorphCallInline(GenTreeCall* call, InlineResult* result); void fgMorphCallInlineHelper(GenTreeCall* call, InlineResult* result, InlineContext** createdContext); #if DEBUG void fgNoteNonInlineCandidate(Statement* stmt, GenTreeCall* call); static fgWalkPreFn fgFindNonInlineCandidate; #endif GenTree* fgOptimizeDelegateConstructor(GenTreeCall* call, CORINFO_CONTEXT_HANDLE* ExactContextHnd, CORINFO_RESOLVED_TOKEN* ldftnToken); GenTree* fgMorphLeaf(GenTree* tree); void fgAssignSetVarDef(GenTree* tree); GenTree* fgMorphOneAsgBlockOp(GenTree* tree); GenTree* fgMorphInitBlock(GenTree* tree); GenTree* fgMorphPromoteLocalInitBlock(GenTreeLclVar* destLclNode, GenTree* initVal, unsigned blockSize); GenTree* fgMorphGetStructAddr(GenTree** pTree, CORINFO_CLASS_HANDLE clsHnd, bool isRValue = false); GenTree* fgMorphBlockOperand(GenTree* tree, var_types asgType, unsigned blockWidth, bool isBlkReqd); GenTree* fgMorphCopyBlock(GenTree* tree); GenTree* fgMorphStoreDynBlock(GenTreeStoreDynBlk* tree); GenTree* fgMorphForRegisterFP(GenTree* tree); GenTree* fgMorphSmpOp(GenTree* tree, MorphAddrContext* mac = nullptr); GenTree* fgOptimizeCast(GenTreeCast* cast); GenTree* fgOptimizeEqualityComparisonWithConst(GenTreeOp* cmp); GenTree* fgOptimizeRelationalComparisonWithConst(GenTreeOp* cmp); #ifdef FEATURE_HW_INTRINSICS GenTree* fgOptimizeHWIntrinsic(GenTreeHWIntrinsic* node); #endif GenTree* fgOptimizeCommutativeArithmetic(GenTreeOp* tree); GenTree* fgOptimizeRelationalComparisonWithCasts(GenTreeOp* cmp); GenTree* fgOptimizeAddition(GenTreeOp* add); GenTree* fgOptimizeMultiply(GenTreeOp* mul); GenTree* fgOptimizeBitwiseAnd(GenTreeOp* andOp); GenTree* fgPropagateCommaThrow(GenTree* parent, GenTreeOp* commaThrow, GenTreeFlags precedingSideEffects); GenTree* fgMorphRetInd(GenTreeUnOp* tree); GenTree* fgMorphModToSubMulDiv(GenTreeOp* tree); GenTree* fgMorphSmpOpOptional(GenTreeOp* tree); GenTree* fgMorphMultiOp(GenTreeMultiOp* multiOp); GenTree* fgMorphConst(GenTree* tree); bool fgMorphCanUseLclFldForCopy(unsigned lclNum1, unsigned lclNum2); GenTreeLclVar* fgMorphTryFoldObjAsLclVar(GenTreeObj* obj, bool destroyNodes = true); GenTreeOp* fgMorphCommutative(GenTreeOp* tree); GenTree* fgMorphCastedBitwiseOp(GenTreeOp* tree); GenTree* fgMorphReduceAddOps(GenTree* tree); public: GenTree* fgMorphTree(GenTree* tree, MorphAddrContext* mac = nullptr); private: void fgKillDependentAssertionsSingle(unsigned lclNum DEBUGARG(GenTree* tree)); void fgKillDependentAssertions(unsigned lclNum DEBUGARG(GenTree* tree)); void fgMorphTreeDone(GenTree* tree, GenTree* oldTree = nullptr DEBUGARG(int morphNum = 0)); Statement* fgMorphStmt; unsigned fgGetBigOffsetMorphingTemp(var_types type); // We cache one temp per type to be // used when morphing big offset. //----------------------- Liveness analysis ------------------------------- VARSET_TP fgCurUseSet; // vars used by block (before an assignment) VARSET_TP fgCurDefSet; // vars assigned by block (before a use) MemoryKindSet fgCurMemoryUse; // True iff the current basic block uses memory. MemoryKindSet fgCurMemoryDef; // True iff the current basic block modifies memory. MemoryKindSet fgCurMemoryHavoc; // True if the current basic block is known to set memory to a "havoc" value. bool byrefStatesMatchGcHeapStates; // True iff GcHeap and ByrefExposed memory have all the same def points. void fgMarkUseDef(GenTreeLclVarCommon* tree); void fgBeginScopeLife(VARSET_TP* inScope, VarScopeDsc* var); void fgEndScopeLife(VARSET_TP* inScope, VarScopeDsc* var); void fgMarkInScope(BasicBlock* block, VARSET_VALARG_TP inScope); void fgUnmarkInScope(BasicBlock* block, VARSET_VALARG_TP unmarkScope); void fgExtendDbgScopes(); void fgExtendDbgLifetimes(); #ifdef DEBUG void fgDispDebugScopes(); #endif // DEBUG //------------------------------------------------------------------------- // // The following keeps track of any code we've added for things like array // range checking or explicit calls to enable GC, and so on. // public: struct AddCodeDsc { AddCodeDsc* acdNext; BasicBlock* acdDstBlk; // block to which we jump unsigned acdData; SpecialCodeKind acdKind; // what kind of a special block is this? #if !FEATURE_FIXED_OUT_ARGS bool acdStkLvlInit; // has acdStkLvl value been already set? unsigned acdStkLvl; // stack level in stack slots. #endif // !FEATURE_FIXED_OUT_ARGS }; private: static unsigned acdHelper(SpecialCodeKind codeKind); AddCodeDsc* fgAddCodeList; bool fgAddCodeModf; bool fgRngChkThrowAdded; AddCodeDsc* fgExcptnTargetCache[SCK_COUNT]; BasicBlock* fgRngChkTarget(BasicBlock* block, SpecialCodeKind kind); BasicBlock* fgAddCodeRef(BasicBlock* srcBlk, unsigned refData, SpecialCodeKind kind); public: AddCodeDsc* fgFindExcptnTarget(SpecialCodeKind kind, unsigned refData); bool fgUseThrowHelperBlocks(); AddCodeDsc* fgGetAdditionalCodeDescriptors() { return fgAddCodeList; } private: bool fgIsCodeAdded(); bool fgIsThrowHlpBlk(BasicBlock* block); #if !FEATURE_FIXED_OUT_ARGS unsigned fgThrowHlpBlkStkLevel(BasicBlock* block); #endif // !FEATURE_FIXED_OUT_ARGS unsigned fgBigOffsetMorphingTemps[TYP_COUNT]; unsigned fgCheckInlineDepthAndRecursion(InlineInfo* inlineInfo); void fgInvokeInlineeCompiler(GenTreeCall* call, InlineResult* result, InlineContext** createdContext); void fgInsertInlineeBlocks(InlineInfo* pInlineInfo); Statement* fgInlinePrependStatements(InlineInfo* inlineInfo); void fgInlineAppendStatements(InlineInfo* inlineInfo, BasicBlock* block, Statement* stmt); #if FEATURE_MULTIREG_RET GenTree* fgGetStructAsStructPtr(GenTree* tree); GenTree* fgAssignStructInlineeToVar(GenTree* child, CORINFO_CLASS_HANDLE retClsHnd); void fgAttachStructInlineeToAsg(GenTree* tree, GenTree* child, CORINFO_CLASS_HANDLE retClsHnd); #endif // FEATURE_MULTIREG_RET static fgWalkPreFn fgUpdateInlineReturnExpressionPlaceHolder; static fgWalkPostFn fgLateDevirtualization; #ifdef DEBUG static fgWalkPreFn fgDebugCheckInlineCandidates; void CheckNoTransformableIndirectCallsRemain(); static fgWalkPreFn fgDebugCheckForTransformableIndirectCalls; #endif void fgPromoteStructs(); void fgMorphStructField(GenTree* tree, GenTree* parent); void fgMorphLocalField(GenTree* tree, GenTree* parent); // Reset the refCount for implicit byrefs. void fgResetImplicitByRefRefCount(); // Change implicit byrefs' types from struct to pointer, and for any that were // promoted, create new promoted struct temps. void fgRetypeImplicitByRefArgs(); // Rewrite appearances of implicit byrefs (manifest the implied additional level of indirection). bool fgMorphImplicitByRefArgs(GenTree* tree); GenTree* fgMorphImplicitByRefArgs(GenTree* tree, bool isAddr); // Clear up annotations for any struct promotion temps created for implicit byrefs. void fgMarkDemotedImplicitByRefArgs(); void fgMarkAddressExposedLocals(); void fgMarkAddressExposedLocals(Statement* stmt); PhaseStatus fgForwardSub(); bool fgForwardSubBlock(BasicBlock* block); bool fgForwardSubStatement(Statement* statement); static fgWalkPreFn fgUpdateSideEffectsPre; static fgWalkPostFn fgUpdateSideEffectsPost; // The given local variable, required to be a struct variable, is being assigned via // a "lclField", to make it masquerade as an integral type in the ABI. Make sure that // the variable is not enregistered, and is therefore not promoted independently. void fgLclFldAssign(unsigned lclNum); static fgWalkPreFn gtHasLocalsWithAddrOpCB; enum TypeProducerKind { TPK_Unknown = 0, // May not be a RuntimeType TPK_Handle = 1, // RuntimeType via handle TPK_GetType = 2, // RuntimeType via Object.get_Type() TPK_Null = 3, // Tree value is null TPK_Other = 4 // RuntimeType via other means }; TypeProducerKind gtGetTypeProducerKind(GenTree* tree); bool gtIsTypeHandleToRuntimeTypeHelper(GenTreeCall* call); bool gtIsTypeHandleToRuntimeTypeHandleHelper(GenTreeCall* call, CorInfoHelpFunc* pHelper = nullptr); bool gtIsActiveCSE_Candidate(GenTree* tree); bool fgIsBigOffset(size_t offset); bool fgNeedReturnSpillTemp(); /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Optimizer XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ public: void optInit(); GenTree* optRemoveRangeCheck(GenTreeBoundsChk* check, GenTree* comma, Statement* stmt); GenTree* optRemoveStandaloneRangeCheck(GenTreeBoundsChk* check, Statement* stmt); void optRemoveCommaBasedRangeCheck(GenTree* comma, Statement* stmt); protected: // Do hoisting for all loops. void optHoistLoopCode(); // To represent sets of VN's that have already been hoisted in outer loops. typedef JitHashTable<ValueNum, JitSmallPrimitiveKeyFuncs<ValueNum>, bool> VNSet; struct LoopHoistContext { private: // The set of variables hoisted in the current loop (or nullptr if there are none). VNSet* m_pHoistedInCurLoop; public: // Value numbers of expressions that have been hoisted in parent loops in the loop nest. VNSet m_hoistedInParentLoops; // Value numbers of expressions that have been hoisted in the current (or most recent) loop in the nest. // Previous decisions on loop-invariance of value numbers in the current loop. VNSet m_curLoopVnInvariantCache; VNSet* GetHoistedInCurLoop(Compiler* comp) { if (m_pHoistedInCurLoop == nullptr) { m_pHoistedInCurLoop = new (comp->getAllocatorLoopHoist()) VNSet(comp->getAllocatorLoopHoist()); } return m_pHoistedInCurLoop; } VNSet* ExtractHoistedInCurLoop() { VNSet* res = m_pHoistedInCurLoop; m_pHoistedInCurLoop = nullptr; return res; } LoopHoistContext(Compiler* comp) : m_pHoistedInCurLoop(nullptr) , m_hoistedInParentLoops(comp->getAllocatorLoopHoist()) , m_curLoopVnInvariantCache(comp->getAllocatorLoopHoist()) { } }; // Do hoisting for loop "lnum" (an index into the optLoopTable), and all loops nested within it. // Tracks the expressions that have been hoisted by containing loops by temporarily recording their // value numbers in "m_hoistedInParentLoops". This set is not modified by the call. void optHoistLoopNest(unsigned lnum, LoopHoistContext* hoistCtxt); // Do hoisting for a particular loop ("lnum" is an index into the optLoopTable.) // Assumes that expressions have been hoisted in containing loops if their value numbers are in // "m_hoistedInParentLoops". // void optHoistThisLoop(unsigned lnum, LoopHoistContext* hoistCtxt); // Hoist all expressions in "blocks" that are invariant in loop "loopNum" (an index into the optLoopTable) // outside of that loop. Exempt expressions whose value number is in "m_hoistedInParentLoops"; add VN's of hoisted // expressions to "hoistInLoop". void optHoistLoopBlocks(unsigned loopNum, ArrayStack<BasicBlock*>* blocks, LoopHoistContext* hoistContext); // Return true if the tree looks profitable to hoist out of loop 'lnum'. bool optIsProfitableToHoistTree(GenTree* tree, unsigned lnum); // Performs the hoisting 'tree' into the PreHeader for loop 'lnum' void optHoistCandidate(GenTree* tree, BasicBlock* treeBb, unsigned lnum, LoopHoistContext* hoistCtxt); // Returns true iff the ValueNum "vn" represents a value that is loop-invariant in "lnum". // Constants and init values are always loop invariant. // VNPhi's connect VN's to the SSA definition, so we can know if the SSA def occurs in the loop. bool optVNIsLoopInvariant(ValueNum vn, unsigned lnum, VNSet* recordedVNs); // If "blk" is the entry block of a natural loop, returns true and sets "*pLnum" to the index of the loop // in the loop table. bool optBlockIsLoopEntry(BasicBlock* blk, unsigned* pLnum); // Records the set of "side effects" of all loops: fields (object instance and static) // written to, and SZ-array element type equivalence classes updated. void optComputeLoopSideEffects(); #ifdef DEBUG bool optAnyChildNotRemoved(unsigned loopNum); #endif // DEBUG // Mark a loop as removed. void optMarkLoopRemoved(unsigned loopNum); private: // Requires "lnum" to be the index of an outermost loop in the loop table. Traverses the body of that loop, // including all nested loops, and records the set of "side effects" of the loop: fields (object instance and // static) written to, and SZ-array element type equivalence classes updated. void optComputeLoopNestSideEffects(unsigned lnum); // Given a loop number 'lnum' mark it and any nested loops as having 'memoryHavoc' void optRecordLoopNestsMemoryHavoc(unsigned lnum, MemoryKindSet memoryHavoc); // Add the side effects of "blk" (which is required to be within a loop) to all loops of which it is a part. // Returns false if we encounter a block that is not marked as being inside a loop. // bool optComputeLoopSideEffectsOfBlock(BasicBlock* blk); // Hoist the expression "expr" out of loop "lnum". void optPerformHoistExpr(GenTree* expr, BasicBlock* exprBb, unsigned lnum); public: void optOptimizeBools(); public: PhaseStatus optInvertLoops(); // Invert loops so they're entered at top and tested at bottom. PhaseStatus optOptimizeLayout(); // Optimize the BasicBlock layout of the method PhaseStatus optSetBlockWeights(); PhaseStatus optFindLoopsPhase(); // Finds loops and records them in the loop table void optFindLoops(); PhaseStatus optCloneLoops(); void optCloneLoop(unsigned loopInd, LoopCloneContext* context); void optEnsureUniqueHead(unsigned loopInd, weight_t ambientWeight); PhaseStatus optUnrollLoops(); // Unrolls loops (needs to have cost info) void optRemoveRedundantZeroInits(); protected: // This enumeration describes what is killed by a call. enum callInterf { CALLINT_NONE, // no interference (most helpers) CALLINT_REF_INDIRS, // kills GC ref indirections (SETFIELD OBJ) CALLINT_SCL_INDIRS, // kills non GC ref indirections (SETFIELD non-OBJ) CALLINT_ALL_INDIRS, // kills both GC ref and non GC ref indirections (SETFIELD STRUCT) CALLINT_ALL, // kills everything (normal method call) }; enum class FieldKindForVN { SimpleStatic, WithBaseAddr }; public: // A "LoopDsc" describes a ("natural") loop. We (currently) require the body of a loop to be a contiguous (in // bbNext order) sequence of basic blocks. (At times, we may require the blocks in a loop to be "properly numbered" // in bbNext order; we use comparisons on the bbNum to decide order.) // The blocks that define the body are // top <= entry <= bottom // The "head" of the loop is a block outside the loop that has "entry" as a successor. We only support loops with a // single 'head' block. The meanings of these blocks are given in the definitions below. Also see the picture at // Compiler::optFindNaturalLoops(). struct LoopDsc { BasicBlock* lpHead; // HEAD of the loop (not part of the looping of the loop) -- has ENTRY as a successor. BasicBlock* lpTop; // loop TOP (the back edge from lpBottom reaches here). Lexically first block (in bbNext // order) reachable in this loop. BasicBlock* lpEntry; // the ENTRY in the loop (in most cases TOP or BOTTOM) BasicBlock* lpBottom; // loop BOTTOM (from here we have a back edge to the TOP) BasicBlock* lpExit; // if a single exit loop this is the EXIT (in most cases BOTTOM) callInterf lpAsgCall; // "callInterf" for calls in the loop ALLVARSET_TP lpAsgVars; // set of vars assigned within the loop (all vars, not just tracked) varRefKinds lpAsgInds : 8; // set of inds modified within the loop LoopFlags lpFlags; unsigned char lpExitCnt; // number of exits from the loop unsigned char lpParent; // The index of the most-nested loop that completely contains this one, // or else BasicBlock::NOT_IN_LOOP if no such loop exists. unsigned char lpChild; // The index of a nested loop, or else BasicBlock::NOT_IN_LOOP if no child exists. // (Actually, an "immediately" nested loop -- // no other child of this loop is a parent of lpChild.) unsigned char lpSibling; // The index of another loop that is an immediate child of lpParent, // or else BasicBlock::NOT_IN_LOOP. One can enumerate all the children of a loop // by following "lpChild" then "lpSibling" links. bool lpLoopHasMemoryHavoc[MemoryKindCount]; // The loop contains an operation that we assume has arbitrary // memory side effects. If this is set, the fields below // may not be accurate (since they become irrelevant.) VARSET_TP lpVarInOut; // The set of variables that are IN or OUT during the execution of this loop VARSET_TP lpVarUseDef; // The set of variables that are USE or DEF during the execution of this loop // The following counts are used for hoisting profitability checks. int lpHoistedExprCount; // The register count for the non-FP expressions from inside this loop that have been // hoisted int lpLoopVarCount; // The register count for the non-FP LclVars that are read/written inside this loop int lpVarInOutCount; // The register count for the non-FP LclVars that are alive inside or across this loop int lpHoistedFPExprCount; // The register count for the FP expressions from inside this loop that have been // hoisted int lpLoopVarFPCount; // The register count for the FP LclVars that are read/written inside this loop int lpVarInOutFPCount; // The register count for the FP LclVars that are alive inside or across this loop typedef JitHashTable<CORINFO_FIELD_HANDLE, JitPtrKeyFuncs<struct CORINFO_FIELD_STRUCT_>, FieldKindForVN> FieldHandleSet; FieldHandleSet* lpFieldsModified; // This has entries for all static field and object instance fields modified // in the loop. typedef JitHashTable<CORINFO_CLASS_HANDLE, JitPtrKeyFuncs<struct CORINFO_CLASS_STRUCT_>, bool> ClassHandleSet; ClassHandleSet* lpArrayElemTypesModified; // Bits set indicate the set of sz array element types such that // arrays of that type are modified // in the loop. // Adds the variable liveness information for 'blk' to 'this' LoopDsc void AddVariableLiveness(Compiler* comp, BasicBlock* blk); inline void AddModifiedField(Compiler* comp, CORINFO_FIELD_HANDLE fldHnd, FieldKindForVN fieldKind); // This doesn't *always* take a class handle -- it can also take primitive types, encoded as class handles // (shifted left, with a low-order bit set to distinguish.) // Use the {Encode/Decode}ElemType methods to construct/destruct these. inline void AddModifiedElemType(Compiler* comp, CORINFO_CLASS_HANDLE structHnd); /* The following values are set only for iterator loops, i.e. has the flag LPFLG_ITER set */ GenTree* lpIterTree; // The "i = i <op> const" tree unsigned lpIterVar() const; // iterator variable # int lpIterConst() const; // the constant with which the iterator is incremented genTreeOps lpIterOper() const; // the type of the operation on the iterator (ASG_ADD, ASG_SUB, etc.) void VERIFY_lpIterTree() const; var_types lpIterOperType() const; // For overflow instructions // Set to the block where we found the initialization for LPFLG_CONST_INIT or LPFLG_VAR_INIT loops. // Initially, this will be 'head', but 'head' might change if we insert a loop pre-header block. BasicBlock* lpInitBlock; union { int lpConstInit; // initial constant value of iterator // : Valid if LPFLG_CONST_INIT unsigned lpVarInit; // initial local var number to which we initialize the iterator // : Valid if LPFLG_VAR_INIT }; // The following is for LPFLG_ITER loops only (i.e. the loop condition is "i RELOP const or var") GenTree* lpTestTree; // pointer to the node containing the loop test genTreeOps lpTestOper() const; // the type of the comparison between the iterator and the limit (GT_LE, GT_GE, // etc.) void VERIFY_lpTestTree() const; bool lpIsReversed() const; // true if the iterator node is the second operand in the loop condition GenTree* lpIterator() const; // the iterator node in the loop test GenTree* lpLimit() const; // the limit node in the loop test // Limit constant value of iterator - loop condition is "i RELOP const" // : Valid if LPFLG_CONST_LIMIT int lpConstLimit() const; // The lclVar # in the loop condition ( "i RELOP lclVar" ) // : Valid if LPFLG_VAR_LIMIT unsigned lpVarLimit() const; // The array length in the loop condition ( "i RELOP arr.len" or "i RELOP arr[i][j].len" ) // : Valid if LPFLG_ARRLEN_LIMIT bool lpArrLenLimit(Compiler* comp, ArrIndex* index) const; // Returns "true" iff this is a "top entry" loop. bool lpIsTopEntry() const { if (lpHead->bbNext == lpEntry) { assert(lpHead->bbFallsThrough()); assert(lpTop == lpEntry); return true; } else { return false; } } // Returns "true" iff "*this" contains the blk. bool lpContains(BasicBlock* blk) const { return lpTop->bbNum <= blk->bbNum && blk->bbNum <= lpBottom->bbNum; } // Returns "true" iff "*this" (properly) contains the range [top, bottom] (allowing tops // to be equal, but requiring bottoms to be different.) bool lpContains(BasicBlock* top, BasicBlock* bottom) const { return lpTop->bbNum <= top->bbNum && bottom->bbNum < lpBottom->bbNum; } // Returns "true" iff "*this" (properly) contains "lp2" (allowing tops to be equal, but requiring // bottoms to be different.) bool lpContains(const LoopDsc& lp2) const { return lpContains(lp2.lpTop, lp2.lpBottom); } // Returns "true" iff "*this" is (properly) contained by the range [top, bottom] // (allowing tops to be equal, but requiring bottoms to be different.) bool lpContainedBy(BasicBlock* top, BasicBlock* bottom) const { return top->bbNum <= lpTop->bbNum && lpBottom->bbNum < bottom->bbNum; } // Returns "true" iff "*this" is (properly) contained by "lp2" // (allowing tops to be equal, but requiring bottoms to be different.) bool lpContainedBy(const LoopDsc& lp2) const { return lpContainedBy(lp2.lpTop, lp2.lpBottom); } // Returns "true" iff "*this" is disjoint from the range [top, bottom]. bool lpDisjoint(BasicBlock* top, BasicBlock* bottom) const { return bottom->bbNum < lpTop->bbNum || lpBottom->bbNum < top->bbNum; } // Returns "true" iff "*this" is disjoint from "lp2". bool lpDisjoint(const LoopDsc& lp2) const { return lpDisjoint(lp2.lpTop, lp2.lpBottom); } // Returns "true" iff the loop is well-formed (see code for defn). bool lpWellFormed() const { return lpTop->bbNum <= lpEntry->bbNum && lpEntry->bbNum <= lpBottom->bbNum && (lpHead->bbNum < lpTop->bbNum || lpHead->bbNum > lpBottom->bbNum); } #ifdef DEBUG void lpValidatePreHeader() const { // If this is called, we expect there to be a pre-header. assert(lpFlags & LPFLG_HAS_PREHEAD); // The pre-header must unconditionally enter the loop. assert(lpHead->GetUniqueSucc() == lpEntry); // The loop block must be marked as a pre-header. assert(lpHead->bbFlags & BBF_LOOP_PREHEADER); // The loop entry must have a single non-loop predecessor, which is the pre-header. // We can't assume here that the bbNum are properly ordered, so we can't do a simple lpContained() // check. So, we defer this check, which will be done by `fgDebugCheckLoopTable()`. } #endif // DEBUG // LoopBlocks: convenience method for enabling range-based `for` iteration over all the // blocks in a loop, e.g.: // for (BasicBlock* const block : loop->LoopBlocks()) ... // Currently, the loop blocks are expected to be in linear, lexical, `bbNext` order // from `lpTop` through `lpBottom`, inclusive. All blocks in this range are considered // to be part of the loop. // BasicBlockRangeList LoopBlocks() const { return BasicBlockRangeList(lpTop, lpBottom); } }; protected: bool fgMightHaveLoop(); // returns true if there are any back edges bool fgHasLoops; // True if this method has any loops, set in fgComputeReachability public: LoopDsc* optLoopTable; // loop descriptor table unsigned char optLoopCount; // number of tracked loops unsigned char loopAlignCandidates; // number of loops identified for alignment // Every time we rebuild the loop table, we increase the global "loop epoch". Any loop indices or // loop table pointers from the previous epoch are invalid. // TODO: validate this in some way? unsigned optCurLoopEpoch; void NewLoopEpoch() { ++optCurLoopEpoch; JITDUMP("New loop epoch %d\n", optCurLoopEpoch); } #ifdef DEBUG unsigned char loopsAligned; // number of loops actually aligned #endif // DEBUG bool optRecordLoop(BasicBlock* head, BasicBlock* top, BasicBlock* entry, BasicBlock* bottom, BasicBlock* exit, unsigned char exitCnt); void optClearLoopIterInfo(); #ifdef DEBUG void optPrintLoopInfo(unsigned lnum, bool printVerbose = false); void optPrintLoopInfo(const LoopDsc* loop, bool printVerbose = false); void optPrintLoopTable(); #endif protected: unsigned optCallCount; // number of calls made in the method unsigned optIndirectCallCount; // number of virtual, interface and indirect calls made in the method unsigned optNativeCallCount; // number of Pinvoke/Native calls made in the method unsigned optLoopsCloned; // number of loops cloned in the current method. #ifdef DEBUG void optCheckPreds(); #endif void optResetLoopInfo(); void optFindAndScaleGeneralLoopBlocks(); // Determine if there are any potential loops, and set BBF_LOOP_HEAD on potential loop heads. void optMarkLoopHeads(); void optScaleLoopBlocks(BasicBlock* begBlk, BasicBlock* endBlk); void optUnmarkLoopBlocks(BasicBlock* begBlk, BasicBlock* endBlk); void optUpdateLoopsBeforeRemoveBlock(BasicBlock* block, bool skipUnmarkLoop = false); bool optIsLoopTestEvalIntoTemp(Statement* testStmt, Statement** newTestStmt); unsigned optIsLoopIncrTree(GenTree* incr); bool optCheckIterInLoopTest(unsigned loopInd, GenTree* test, BasicBlock* from, BasicBlock* to, unsigned iterVar); bool optComputeIterInfo(GenTree* incr, BasicBlock* from, BasicBlock* to, unsigned* pIterVar); bool optPopulateInitInfo(unsigned loopInd, BasicBlock* initBlock, GenTree* init, unsigned iterVar); bool optExtractInitTestIncr( BasicBlock* head, BasicBlock* bottom, BasicBlock* exit, GenTree** ppInit, GenTree** ppTest, GenTree** ppIncr); void optFindNaturalLoops(); void optIdentifyLoopsForAlignment(); // Ensures that all the loops in the loop nest rooted at "loopInd" (an index into the loop table) are 'canonical' -- // each loop has a unique "top." Returns "true" iff the flowgraph has been modified. bool optCanonicalizeLoopNest(unsigned char loopInd); // Ensures that the loop "loopInd" (an index into the loop table) is 'canonical' -- it has a unique "top," // unshared with any other loop. Returns "true" iff the flowgraph has been modified bool optCanonicalizeLoop(unsigned char loopInd); // Requires "l1" to be a valid loop table index, and not "BasicBlock::NOT_IN_LOOP". // Requires "l2" to be a valid loop table index, or else "BasicBlock::NOT_IN_LOOP". // Returns true iff "l2" is not NOT_IN_LOOP, and "l1" contains "l2". // A loop contains itself. bool optLoopContains(unsigned l1, unsigned l2) const; // Updates the loop table by changing loop "loopInd", whose head is required // to be "from", to be "to". Also performs this transformation for any // loop nested in "loopInd" that shares the same head as "loopInd". void optUpdateLoopHead(unsigned loopInd, BasicBlock* from, BasicBlock* to); void optRedirectBlock(BasicBlock* blk, BlockToBlockMap* redirectMap, const bool updatePreds = false); // Marks the containsCall information to "lnum" and any parent loops. void AddContainsCallAllContainingLoops(unsigned lnum); // Adds the variable liveness information from 'blk' to "lnum" and any parent loops. void AddVariableLivenessAllContainingLoops(unsigned lnum, BasicBlock* blk); // Adds "fldHnd" to the set of modified fields of "lnum" and any parent loops. void AddModifiedFieldAllContainingLoops(unsigned lnum, CORINFO_FIELD_HANDLE fldHnd, FieldKindForVN fieldKind); // Adds "elemType" to the set of modified array element types of "lnum" and any parent loops. void AddModifiedElemTypeAllContainingLoops(unsigned lnum, CORINFO_CLASS_HANDLE elemType); // Requires that "from" and "to" have the same "bbJumpKind" (perhaps because "to" is a clone // of "from".) Copies the jump destination from "from" to "to". void optCopyBlkDest(BasicBlock* from, BasicBlock* to); // Returns true if 'block' is an entry block for any loop in 'optLoopTable' bool optIsLoopEntry(BasicBlock* block) const; // The depth of the loop described by "lnum" (an index into the loop table.) (0 == top level) unsigned optLoopDepth(unsigned lnum) { assert(lnum < optLoopCount); unsigned depth = 0; while ((lnum = optLoopTable[lnum].lpParent) != BasicBlock::NOT_IN_LOOP) { ++depth; } return depth; } // Struct used in optInvertWhileLoop to count interesting constructs to boost the profitability score. struct OptInvertCountTreeInfoType { int sharedStaticHelperCount; int arrayLengthCount; }; static fgWalkResult optInvertCountTreeInfo(GenTree** pTree, fgWalkData* data); bool optInvertWhileLoop(BasicBlock* block); private: static bool optIterSmallOverflow(int iterAtExit, var_types incrType); static bool optIterSmallUnderflow(int iterAtExit, var_types decrType); bool optComputeLoopRep(int constInit, int constLimit, int iterInc, genTreeOps iterOper, var_types iterType, genTreeOps testOper, bool unsignedTest, bool dupCond, unsigned* iterCount); static fgWalkPreFn optIsVarAssgCB; protected: bool optIsVarAssigned(BasicBlock* beg, BasicBlock* end, GenTree* skip, unsigned var); bool optIsVarAssgLoop(unsigned lnum, unsigned var); int optIsSetAssgLoop(unsigned lnum, ALLVARSET_VALARG_TP vars, varRefKinds inds = VR_NONE); bool optNarrowTree(GenTree* tree, var_types srct, var_types dstt, ValueNumPair vnpNarrow, bool doit); protected: // The following is the upper limit on how many expressions we'll keep track // of for the CSE analysis. // static const unsigned MAX_CSE_CNT = EXPSET_SZ; static const int MIN_CSE_COST = 2; // BitVec trait information only used by the optCSE_canSwap() method, for the CSE_defMask and CSE_useMask. // This BitVec uses one bit per CSE candidate BitVecTraits* cseMaskTraits; // one bit per CSE candidate // BitVec trait information for computing CSE availability using the CSE_DataFlow algorithm. // Two bits are allocated per CSE candidate to compute CSE availability // plus an extra bit to handle the initial unvisited case. // (See CSE_DataFlow::EndMerge for an explanation of why this is necessary.) // // The two bits per CSE candidate have the following meanings: // 11 - The CSE is available, and is also available when considering calls as killing availability. // 10 - The CSE is available, but is not available when considering calls as killing availability. // 00 - The CSE is not available // 01 - An illegal combination // BitVecTraits* cseLivenessTraits; //----------------------------------------------------------------------------------------------------------------- // getCSEnum2bit: Return the normalized index to use in the EXPSET_TP for the CSE with the given CSE index. // Each GenTree has a `gtCSEnum` field. Zero is reserved to mean this node is not a CSE, positive values indicate // CSE uses, and negative values indicate CSE defs. The caller must pass a non-zero positive value, as from // GET_CSE_INDEX(). // static unsigned genCSEnum2bit(unsigned CSEnum) { assert((CSEnum > 0) && (CSEnum <= MAX_CSE_CNT)); return CSEnum - 1; } //----------------------------------------------------------------------------------------------------------------- // getCSEAvailBit: Return the bit used by CSE dataflow sets (bbCseGen, etc.) for the availability bit for a CSE. // static unsigned getCSEAvailBit(unsigned CSEnum) { return genCSEnum2bit(CSEnum) * 2; } //----------------------------------------------------------------------------------------------------------------- // getCSEAvailCrossCallBit: Return the bit used by CSE dataflow sets (bbCseGen, etc.) for the availability bit // for a CSE considering calls as killing availability bit (see description above). // static unsigned getCSEAvailCrossCallBit(unsigned CSEnum) { return getCSEAvailBit(CSEnum) + 1; } void optPrintCSEDataFlowSet(EXPSET_VALARG_TP cseDataFlowSet, bool includeBits = true); EXPSET_TP cseCallKillsMask; // Computed once - A mask that is used to kill available CSEs at callsites /* Generic list of nodes - used by the CSE logic */ struct treeLst { treeLst* tlNext; GenTree* tlTree; }; struct treeStmtLst { treeStmtLst* tslNext; GenTree* tslTree; // tree node Statement* tslStmt; // statement containing the tree BasicBlock* tslBlock; // block containing the statement }; // The following logic keeps track of expressions via a simple hash table. struct CSEdsc { CSEdsc* csdNextInBucket; // used by the hash table size_t csdHashKey; // the orginal hashkey ssize_t csdConstDefValue; // When we CSE similar constants, this is the value that we use as the def ValueNum csdConstDefVN; // When we CSE similar constants, this is the ValueNumber that we use for the LclVar // assignment unsigned csdIndex; // 1..optCSECandidateCount bool csdIsSharedConst; // true if this CSE is a shared const bool csdLiveAcrossCall; unsigned short csdDefCount; // definition count unsigned short csdUseCount; // use count (excluding the implicit uses at defs) weight_t csdDefWtCnt; // weighted def count weight_t csdUseWtCnt; // weighted use count (excluding the implicit uses at defs) GenTree* csdTree; // treenode containing the 1st occurrence Statement* csdStmt; // stmt containing the 1st occurrence BasicBlock* csdBlock; // block containing the 1st occurrence treeStmtLst* csdTreeList; // list of matching tree nodes: head treeStmtLst* csdTreeLast; // list of matching tree nodes: tail // ToDo: This can be removed when gtGetStructHandleIfPresent stops guessing // and GT_IND nodes always have valid struct handle. // CORINFO_CLASS_HANDLE csdStructHnd; // The class handle, currently needed to create a SIMD LclVar in PerformCSE bool csdStructHndMismatch; ValueNum defExcSetPromise; // The exception set that is now required for all defs of this CSE. // This will be set to NoVN if we decide to abandon this CSE ValueNum defExcSetCurrent; // The set of exceptions we currently can use for CSE uses. ValueNum defConservNormVN; // if all def occurrences share the same conservative normal value // number, this will reflect it; otherwise, NoVN. // not used for shared const CSE's }; static const size_t s_optCSEhashSizeInitial; static const size_t s_optCSEhashGrowthFactor; static const size_t s_optCSEhashBucketSize; size_t optCSEhashSize; // The current size of hashtable size_t optCSEhashCount; // Number of entries in hashtable size_t optCSEhashMaxCountBeforeResize; // Number of entries before resize CSEdsc** optCSEhash; CSEdsc** optCSEtab; typedef JitHashTable<GenTree*, JitPtrKeyFuncs<GenTree>, GenTree*> NodeToNodeMap; NodeToNodeMap* optCseCheckedBoundMap; // Maps bound nodes to ancestor compares that should be // re-numbered with the bound to improve range check elimination // Given a compare, look for a cse candidate checked bound feeding it and add a map entry if found. void optCseUpdateCheckedBoundMap(GenTree* compare); void optCSEstop(); CSEdsc* optCSEfindDsc(unsigned index); bool optUnmarkCSE(GenTree* tree); // user defined callback data for the tree walk function optCSE_MaskHelper() struct optCSE_MaskData { EXPSET_TP CSE_defMask; EXPSET_TP CSE_useMask; }; // Treewalk helper for optCSE_DefMask and optCSE_UseMask static fgWalkPreFn optCSE_MaskHelper; // This function walks all the node for an given tree // and return the mask of CSE definitions and uses for the tree // void optCSE_GetMaskData(GenTree* tree, optCSE_MaskData* pMaskData); // Given a binary tree node return true if it is safe to swap the order of evaluation for op1 and op2. bool optCSE_canSwap(GenTree* firstNode, GenTree* secondNode); struct optCSEcostCmpEx { bool operator()(const CSEdsc* op1, const CSEdsc* op2); }; struct optCSEcostCmpSz { bool operator()(const CSEdsc* op1, const CSEdsc* op2); }; void optCleanupCSEs(); #ifdef DEBUG void optEnsureClearCSEInfo(); #endif // DEBUG static bool Is_Shared_Const_CSE(size_t key) { return ((key & TARGET_SIGN_BIT) != 0); } // returns the encoded key static size_t Encode_Shared_Const_CSE_Value(size_t key) { return TARGET_SIGN_BIT | (key >> CSE_CONST_SHARED_LOW_BITS); } // returns the orginal key static size_t Decode_Shared_Const_CSE_Value(size_t enckey) { assert(Is_Shared_Const_CSE(enckey)); return (enckey & ~TARGET_SIGN_BIT) << CSE_CONST_SHARED_LOW_BITS; } /************************************************************************** * Value Number based CSEs *************************************************************************/ // String to use for formatting CSE numbers. Note that this is the positive number, e.g., from GET_CSE_INDEX(). #define FMT_CSE "CSE #%02u" public: void optOptimizeValnumCSEs(); protected: void optValnumCSE_Init(); unsigned optValnumCSE_Index(GenTree* tree, Statement* stmt); bool optValnumCSE_Locate(); void optValnumCSE_InitDataFlow(); void optValnumCSE_DataFlow(); void optValnumCSE_Availablity(); void optValnumCSE_Heuristic(); bool optDoCSE; // True when we have found a duplicate CSE tree bool optValnumCSE_phase; // True when we are executing the optOptimizeValnumCSEs() phase unsigned optCSECandidateCount; // Count of CSE's candidates unsigned optCSEstart; // The first local variable number that is a CSE unsigned optCSEcount; // The total count of CSE's introduced. weight_t optCSEweight; // The weight of the current block when we are doing PerformCSE bool optIsCSEcandidate(GenTree* tree); // lclNumIsTrueCSE returns true if the LclVar was introduced by the CSE phase of the compiler // bool lclNumIsTrueCSE(unsigned lclNum) const { return ((optCSEcount > 0) && (lclNum >= optCSEstart) && (lclNum < optCSEstart + optCSEcount)); } // lclNumIsCSE returns true if the LclVar should be treated like a CSE with regards to constant prop. // bool lclNumIsCSE(unsigned lclNum) const { return lvaGetDesc(lclNum)->lvIsCSE; } #ifdef DEBUG bool optConfigDisableCSE(); bool optConfigDisableCSE2(); #endif void optOptimizeCSEs(); struct isVarAssgDsc { GenTree* ivaSkip; ALLVARSET_TP ivaMaskVal; // Set of variables assigned to. This is a set of all vars, not tracked vars. #ifdef DEBUG void* ivaSelf; #endif unsigned ivaVar; // Variable we are interested in, or -1 varRefKinds ivaMaskInd; // What kind of indirect assignments are there? callInterf ivaMaskCall; // What kind of calls are there? bool ivaMaskIncomplete; // Variables not representable in ivaMaskVal were assigned to. }; static callInterf optCallInterf(GenTreeCall* call); public: // VN based copy propagation. // In DEBUG builds, we'd like to know the tree that the SSA definition was pushed for. // While for ordinary SSA defs it will be available (as an ASG) in the SSA descriptor, // for locals which will use "definitions from uses", it will not be, so we store it // in this class instead. class CopyPropSsaDef { LclSsaVarDsc* m_ssaDef; #ifdef DEBUG GenTree* m_defNode; #endif public: CopyPropSsaDef(LclSsaVarDsc* ssaDef, GenTree* defNode) : m_ssaDef(ssaDef) #ifdef DEBUG , m_defNode(defNode) #endif { } LclSsaVarDsc* GetSsaDef() const { return m_ssaDef; } #ifdef DEBUG GenTree* GetDefNode() const { return m_defNode; } #endif }; typedef ArrayStack<CopyPropSsaDef> CopyPropSsaDefStack; typedef JitHashTable<unsigned, JitSmallPrimitiveKeyFuncs<unsigned>, CopyPropSsaDefStack*> LclNumToLiveDefsMap; // Copy propagation functions. void optCopyProp(Statement* stmt, GenTreeLclVarCommon* tree, unsigned lclNum, LclNumToLiveDefsMap* curSsaName); void optBlockCopyPropPopStacks(BasicBlock* block, LclNumToLiveDefsMap* curSsaName); void optBlockCopyProp(BasicBlock* block, LclNumToLiveDefsMap* curSsaName); void optCopyPropPushDef(GenTreeOp* asg, GenTreeLclVarCommon* lclNode, unsigned lclNum, LclNumToLiveDefsMap* curSsaName); unsigned optIsSsaLocal(GenTreeLclVarCommon* lclNode); int optCopyProp_LclVarScore(const LclVarDsc* lclVarDsc, const LclVarDsc* copyVarDsc, bool preferOp2); void optVnCopyProp(); INDEBUG(void optDumpCopyPropStack(LclNumToLiveDefsMap* curSsaName)); /************************************************************************** * Early value propagation *************************************************************************/ struct SSAName { unsigned m_lvNum; unsigned m_ssaNum; SSAName(unsigned lvNum, unsigned ssaNum) : m_lvNum(lvNum), m_ssaNum(ssaNum) { } static unsigned GetHashCode(SSAName ssaNm) { return (ssaNm.m_lvNum << 16) | (ssaNm.m_ssaNum); } static bool Equals(SSAName ssaNm1, SSAName ssaNm2) { return (ssaNm1.m_lvNum == ssaNm2.m_lvNum) && (ssaNm1.m_ssaNum == ssaNm2.m_ssaNum); } }; #define OMF_HAS_NEWARRAY 0x00000001 // Method contains 'new' of an array #define OMF_HAS_NEWOBJ 0x00000002 // Method contains 'new' of an object type. #define OMF_HAS_ARRAYREF 0x00000004 // Method contains array element loads or stores. #define OMF_HAS_NULLCHECK 0x00000008 // Method contains null check. #define OMF_HAS_FATPOINTER 0x00000010 // Method contains call, that needs fat pointer transformation. #define OMF_HAS_OBJSTACKALLOC 0x00000020 // Method contains an object allocated on the stack. #define OMF_HAS_GUARDEDDEVIRT 0x00000040 // Method contains guarded devirtualization candidate #define OMF_HAS_EXPRUNTIMELOOKUP 0x00000080 // Method contains a runtime lookup to an expandable dictionary. #define OMF_HAS_PATCHPOINT 0x00000100 // Method contains patchpoints #define OMF_NEEDS_GCPOLLS 0x00000200 // Method needs GC polls #define OMF_HAS_FROZEN_STRING 0x00000400 // Method has a frozen string (REF constant int), currently only on CoreRT. #define OMF_HAS_PARTIAL_COMPILATION_PATCHPOINT 0x00000800 // Method contains partial compilation patchpoints #define OMF_HAS_TAILCALL_SUCCESSOR 0x00001000 // Method has potential tail call in a non BBJ_RETURN block bool doesMethodHaveFatPointer() { return (optMethodFlags & OMF_HAS_FATPOINTER) != 0; } void setMethodHasFatPointer() { optMethodFlags |= OMF_HAS_FATPOINTER; } void clearMethodHasFatPointer() { optMethodFlags &= ~OMF_HAS_FATPOINTER; } void addFatPointerCandidate(GenTreeCall* call); bool doesMethodHaveFrozenString() const { return (optMethodFlags & OMF_HAS_FROZEN_STRING) != 0; } void setMethodHasFrozenString() { optMethodFlags |= OMF_HAS_FROZEN_STRING; } bool doesMethodHaveGuardedDevirtualization() const { return (optMethodFlags & OMF_HAS_GUARDEDDEVIRT) != 0; } void setMethodHasGuardedDevirtualization() { optMethodFlags |= OMF_HAS_GUARDEDDEVIRT; } void clearMethodHasGuardedDevirtualization() { optMethodFlags &= ~OMF_HAS_GUARDEDDEVIRT; } void considerGuardedDevirtualization(GenTreeCall* call, IL_OFFSET ilOffset, bool isInterface, CORINFO_METHOD_HANDLE baseMethod, CORINFO_CLASS_HANDLE baseClass, CORINFO_CONTEXT_HANDLE* pContextHandle DEBUGARG(CORINFO_CLASS_HANDLE objClass) DEBUGARG(const char* objClassName)); void addGuardedDevirtualizationCandidate(GenTreeCall* call, CORINFO_METHOD_HANDLE methodHandle, CORINFO_CLASS_HANDLE classHandle, unsigned methodAttr, unsigned classAttr, unsigned likelihood); bool doesMethodHaveExpRuntimeLookup() { return (optMethodFlags & OMF_HAS_EXPRUNTIMELOOKUP) != 0; } void setMethodHasExpRuntimeLookup() { optMethodFlags |= OMF_HAS_EXPRUNTIMELOOKUP; } void clearMethodHasExpRuntimeLookup() { optMethodFlags &= ~OMF_HAS_EXPRUNTIMELOOKUP; } void addExpRuntimeLookupCandidate(GenTreeCall* call); bool doesMethodHavePatchpoints() { return (optMethodFlags & OMF_HAS_PATCHPOINT) != 0; } void setMethodHasPatchpoint() { optMethodFlags |= OMF_HAS_PATCHPOINT; } bool doesMethodHavePartialCompilationPatchpoints() { return (optMethodFlags & OMF_HAS_PARTIAL_COMPILATION_PATCHPOINT) != 0; } void setMethodHasPartialCompilationPatchpoint() { optMethodFlags |= OMF_HAS_PARTIAL_COMPILATION_PATCHPOINT; } unsigned optMethodFlags; bool doesMethodHaveNoReturnCalls() { return optNoReturnCallCount > 0; } void setMethodHasNoReturnCalls() { optNoReturnCallCount++; } unsigned optNoReturnCallCount; // Recursion bound controls how far we can go backwards tracking for a SSA value. // No throughput diff was found with backward walk bound between 3-8. static const int optEarlyPropRecurBound = 5; enum class optPropKind { OPK_INVALID, OPK_ARRAYLEN, OPK_NULLCHECK }; typedef JitHashTable<unsigned, JitSmallPrimitiveKeyFuncs<unsigned>, GenTree*> LocalNumberToNullCheckTreeMap; GenTree* getArrayLengthFromAllocation(GenTree* tree DEBUGARG(BasicBlock* block)); GenTree* optPropGetValueRec(unsigned lclNum, unsigned ssaNum, optPropKind valueKind, int walkDepth); GenTree* optPropGetValue(unsigned lclNum, unsigned ssaNum, optPropKind valueKind); GenTree* optEarlyPropRewriteTree(GenTree* tree, LocalNumberToNullCheckTreeMap* nullCheckMap); bool optDoEarlyPropForBlock(BasicBlock* block); bool optDoEarlyPropForFunc(); void optEarlyProp(); void optFoldNullCheck(GenTree* tree, LocalNumberToNullCheckTreeMap* nullCheckMap); GenTree* optFindNullCheckToFold(GenTree* tree, LocalNumberToNullCheckTreeMap* nullCheckMap); bool optIsNullCheckFoldingLegal(GenTree* tree, GenTree* nullCheckTree, GenTree** nullCheckParent, Statement** nullCheckStmt); bool optCanMoveNullCheckPastTree(GenTree* tree, unsigned nullCheckLclNum, bool isInsideTry, bool checkSideEffectSummary); #if DEBUG void optCheckFlagsAreSet(unsigned methodFlag, const char* methodFlagStr, unsigned bbFlag, const char* bbFlagStr, GenTree* tree, BasicBlock* basicBlock); #endif // Redundant branch opts // PhaseStatus optRedundantBranches(); bool optRedundantRelop(BasicBlock* const block); bool optRedundantBranch(BasicBlock* const block); bool optJumpThread(BasicBlock* const block, BasicBlock* const domBlock, bool domIsSameRelop); bool optReachable(BasicBlock* const fromBlock, BasicBlock* const toBlock, BasicBlock* const excludedBlock); /************************************************************************** * Value/Assertion propagation *************************************************************************/ public: // Data structures for assertion prop BitVecTraits* apTraits; ASSERT_TP apFull; enum optAssertionKind { OAK_INVALID, OAK_EQUAL, OAK_NOT_EQUAL, OAK_SUBRANGE, OAK_NO_THROW, OAK_COUNT }; enum optOp1Kind { O1K_INVALID, O1K_LCLVAR, O1K_ARR_BND, O1K_BOUND_OPER_BND, O1K_BOUND_LOOP_BND, O1K_CONSTANT_LOOP_BND, O1K_CONSTANT_LOOP_BND_UN, O1K_EXACT_TYPE, O1K_SUBTYPE, O1K_VALUE_NUMBER, O1K_COUNT }; enum optOp2Kind { O2K_INVALID, O2K_LCLVAR_COPY, O2K_IND_CNS_INT, O2K_CONST_INT, O2K_CONST_LONG, O2K_CONST_DOUBLE, O2K_ZEROOBJ, O2K_SUBRANGE, O2K_COUNT }; struct AssertionDsc { optAssertionKind assertionKind; struct SsaVar { unsigned lclNum; // assigned to or property of this local var number unsigned ssaNum; }; struct ArrBnd { ValueNum vnIdx; ValueNum vnLen; }; struct AssertionDscOp1 { optOp1Kind kind; // a normal LclVar, or Exact-type or Subtype ValueNum vn; union { SsaVar lcl; ArrBnd bnd; }; } op1; struct AssertionDscOp2 { optOp2Kind kind; // a const or copy assignment ValueNum vn; struct IntVal { ssize_t iconVal; // integer #if !defined(HOST_64BIT) unsigned padding; // unused; ensures iconFlags does not overlap lconVal #endif GenTreeFlags iconFlags; // gtFlags }; union { struct { SsaVar lcl; FieldSeqNode* zeroOffsetFieldSeq; }; IntVal u1; __int64 lconVal; double dconVal; IntegralRange u2; }; } op2; bool IsCheckedBoundArithBound() { return ((assertionKind == OAK_EQUAL || assertionKind == OAK_NOT_EQUAL) && op1.kind == O1K_BOUND_OPER_BND); } bool IsCheckedBoundBound() { return ((assertionKind == OAK_EQUAL || assertionKind == OAK_NOT_EQUAL) && op1.kind == O1K_BOUND_LOOP_BND); } bool IsConstantBound() { return ((assertionKind == OAK_EQUAL || assertionKind == OAK_NOT_EQUAL) && (op1.kind == O1K_CONSTANT_LOOP_BND)); } bool IsConstantBoundUnsigned() { return ((assertionKind == OAK_EQUAL || assertionKind == OAK_NOT_EQUAL) && (op1.kind == O1K_CONSTANT_LOOP_BND_UN)); } bool IsBoundsCheckNoThrow() { return ((assertionKind == OAK_NO_THROW) && (op1.kind == O1K_ARR_BND)); } bool IsCopyAssertion() { return ((assertionKind == OAK_EQUAL) && (op1.kind == O1K_LCLVAR) && (op2.kind == O2K_LCLVAR_COPY)); } bool IsConstantInt32Assertion() { return ((assertionKind == OAK_EQUAL) || (assertionKind == OAK_NOT_EQUAL)) && (op2.kind == O2K_CONST_INT); } static bool SameKind(AssertionDsc* a1, AssertionDsc* a2) { return a1->assertionKind == a2->assertionKind && a1->op1.kind == a2->op1.kind && a1->op2.kind == a2->op2.kind; } static bool ComplementaryKind(optAssertionKind kind, optAssertionKind kind2) { if (kind == OAK_EQUAL) { return kind2 == OAK_NOT_EQUAL; } else if (kind == OAK_NOT_EQUAL) { return kind2 == OAK_EQUAL; } return false; } bool HasSameOp1(AssertionDsc* that, bool vnBased) { if (op1.kind != that->op1.kind) { return false; } else if (op1.kind == O1K_ARR_BND) { assert(vnBased); return (op1.bnd.vnIdx == that->op1.bnd.vnIdx) && (op1.bnd.vnLen == that->op1.bnd.vnLen); } else { return ((vnBased && (op1.vn == that->op1.vn)) || (!vnBased && (op1.lcl.lclNum == that->op1.lcl.lclNum))); } } bool HasSameOp2(AssertionDsc* that, bool vnBased) { if (op2.kind != that->op2.kind) { return false; } switch (op2.kind) { case O2K_IND_CNS_INT: case O2K_CONST_INT: return ((op2.u1.iconVal == that->op2.u1.iconVal) && (op2.u1.iconFlags == that->op2.u1.iconFlags)); case O2K_CONST_LONG: return (op2.lconVal == that->op2.lconVal); case O2K_CONST_DOUBLE: // exact match because of positive and negative zero. return (memcmp(&op2.dconVal, &that->op2.dconVal, sizeof(double)) == 0); case O2K_ZEROOBJ: return true; case O2K_LCLVAR_COPY: return (op2.lcl.lclNum == that->op2.lcl.lclNum) && (!vnBased || op2.lcl.ssaNum == that->op2.lcl.ssaNum) && (op2.zeroOffsetFieldSeq == that->op2.zeroOffsetFieldSeq); case O2K_SUBRANGE: return op2.u2.Equals(that->op2.u2); case O2K_INVALID: // we will return false break; default: assert(!"Unexpected value for op2.kind in AssertionDsc."); break; } return false; } bool Complementary(AssertionDsc* that, bool vnBased) { return ComplementaryKind(assertionKind, that->assertionKind) && HasSameOp1(that, vnBased) && HasSameOp2(that, vnBased); } bool Equals(AssertionDsc* that, bool vnBased) { if (assertionKind != that->assertionKind) { return false; } else if (assertionKind == OAK_NO_THROW) { assert(op2.kind == O2K_INVALID); return HasSameOp1(that, vnBased); } else { return HasSameOp1(that, vnBased) && HasSameOp2(that, vnBased); } } }; protected: static fgWalkPreFn optAddCopiesCallback; static fgWalkPreFn optVNAssertionPropCurStmtVisitor; unsigned optAddCopyLclNum; GenTree* optAddCopyAsgnNode; bool optLocalAssertionProp; // indicates that we are performing local assertion prop bool optAssertionPropagated; // set to true if we modified the trees bool optAssertionPropagatedCurrentStmt; #ifdef DEBUG GenTree* optAssertionPropCurrentTree; #endif AssertionIndex* optComplementaryAssertionMap; JitExpandArray<ASSERT_TP>* optAssertionDep; // table that holds dependent assertions (assertions // using the value of a local var) for each local var AssertionDsc* optAssertionTabPrivate; // table that holds info about value assignments AssertionIndex optAssertionCount; // total number of assertions in the assertion table AssertionIndex optMaxAssertionCount; public: void optVnNonNullPropCurStmt(BasicBlock* block, Statement* stmt, GenTree* tree); fgWalkResult optVNConstantPropCurStmt(BasicBlock* block, Statement* stmt, GenTree* tree); GenTree* optVNConstantPropOnJTrue(BasicBlock* block, GenTree* test); GenTree* optVNConstantPropOnTree(BasicBlock* block, GenTree* tree); GenTree* optExtractSideEffListFromConst(GenTree* tree); AssertionIndex GetAssertionCount() { return optAssertionCount; } ASSERT_TP* bbJtrueAssertionOut; typedef JitHashTable<ValueNum, JitSmallPrimitiveKeyFuncs<ValueNum>, ASSERT_TP> ValueNumToAssertsMap; ValueNumToAssertsMap* optValueNumToAsserts; // Assertion prop helpers. ASSERT_TP& GetAssertionDep(unsigned lclNum); AssertionDsc* optGetAssertion(AssertionIndex assertIndex); void optAssertionInit(bool isLocalProp); void optAssertionTraitsInit(AssertionIndex assertionCount); void optAssertionReset(AssertionIndex limit); void optAssertionRemove(AssertionIndex index); // Assertion prop data flow functions. void optAssertionPropMain(); Statement* optVNAssertionPropCurStmt(BasicBlock* block, Statement* stmt); bool optIsTreeKnownIntValue(bool vnBased, GenTree* tree, ssize_t* pConstant, GenTreeFlags* pIconFlags); ASSERT_TP* optInitAssertionDataflowFlags(); ASSERT_TP* optComputeAssertionGen(); // Assertion Gen functions. void optAssertionGen(GenTree* tree); AssertionIndex optAssertionGenCast(GenTreeCast* cast); AssertionIndex optAssertionGenPhiDefn(GenTree* tree); AssertionInfo optCreateJTrueBoundsAssertion(GenTree* tree); AssertionInfo optAssertionGenJtrue(GenTree* tree); AssertionIndex optCreateJtrueAssertions(GenTree* op1, GenTree* op2, Compiler::optAssertionKind assertionKind, bool helperCallArgs = false); AssertionIndex optFindComplementary(AssertionIndex assertionIndex); void optMapComplementary(AssertionIndex assertionIndex, AssertionIndex index); // Assertion creation functions. AssertionIndex optCreateAssertion(GenTree* op1, GenTree* op2, optAssertionKind assertionKind, bool helperCallArgs = false); AssertionIndex optFinalizeCreatingAssertion(AssertionDsc* assertion); bool optTryExtractSubrangeAssertion(GenTree* source, IntegralRange* pRange); void optCreateComplementaryAssertion(AssertionIndex assertionIndex, GenTree* op1, GenTree* op2, bool helperCallArgs = false); bool optAssertionVnInvolvesNan(AssertionDsc* assertion); AssertionIndex optAddAssertion(AssertionDsc* assertion); void optAddVnAssertionMapping(ValueNum vn, AssertionIndex index); #ifdef DEBUG void optPrintVnAssertionMapping(); #endif ASSERT_TP optGetVnMappedAssertions(ValueNum vn); // Used for respective assertion propagations. AssertionIndex optAssertionIsSubrange(GenTree* tree, IntegralRange range, ASSERT_VALARG_TP assertions); AssertionIndex optAssertionIsSubtype(GenTree* tree, GenTree* methodTableArg, ASSERT_VALARG_TP assertions); AssertionIndex optAssertionIsNonNullInternal(GenTree* op, ASSERT_VALARG_TP assertions DEBUGARG(bool* pVnBased)); bool optAssertionIsNonNull(GenTree* op, ASSERT_VALARG_TP assertions DEBUGARG(bool* pVnBased) DEBUGARG(AssertionIndex* pIndex)); AssertionIndex optGlobalAssertionIsEqualOrNotEqual(ASSERT_VALARG_TP assertions, GenTree* op1, GenTree* op2); AssertionIndex optGlobalAssertionIsEqualOrNotEqualZero(ASSERT_VALARG_TP assertions, GenTree* op1); AssertionIndex optLocalAssertionIsEqualOrNotEqual( optOp1Kind op1Kind, unsigned lclNum, optOp2Kind op2Kind, ssize_t cnsVal, ASSERT_VALARG_TP assertions); // Assertion prop for lcl var functions. bool optAssertionProp_LclVarTypeCheck(GenTree* tree, LclVarDsc* lclVarDsc, LclVarDsc* copyVarDsc); GenTree* optCopyAssertionProp(AssertionDsc* curAssertion, GenTreeLclVarCommon* tree, Statement* stmt DEBUGARG(AssertionIndex index)); GenTree* optConstantAssertionProp(AssertionDsc* curAssertion, GenTreeLclVarCommon* tree, Statement* stmt DEBUGARG(AssertionIndex index)); bool optZeroObjAssertionProp(GenTree* tree, ASSERT_VALARG_TP assertions); // Assertion propagation functions. GenTree* optAssertionProp(ASSERT_VALARG_TP assertions, GenTree* tree, Statement* stmt, BasicBlock* block); GenTree* optAssertionProp_LclVar(ASSERT_VALARG_TP assertions, GenTreeLclVarCommon* tree, Statement* stmt); GenTree* optAssertionProp_Asg(ASSERT_VALARG_TP assertions, GenTreeOp* asg, Statement* stmt); GenTree* optAssertionProp_Return(ASSERT_VALARG_TP assertions, GenTreeUnOp* ret, Statement* stmt); GenTree* optAssertionProp_Ind(ASSERT_VALARG_TP assertions, GenTree* tree, Statement* stmt); GenTree* optAssertionProp_Cast(ASSERT_VALARG_TP assertions, GenTreeCast* cast, Statement* stmt); GenTree* optAssertionProp_Call(ASSERT_VALARG_TP assertions, GenTreeCall* call, Statement* stmt); GenTree* optAssertionProp_RelOp(ASSERT_VALARG_TP assertions, GenTree* tree, Statement* stmt); GenTree* optAssertionProp_Comma(ASSERT_VALARG_TP assertions, GenTree* tree, Statement* stmt); GenTree* optAssertionProp_BndsChk(ASSERT_VALARG_TP assertions, GenTree* tree, Statement* stmt); GenTree* optAssertionPropGlobal_RelOp(ASSERT_VALARG_TP assertions, GenTree* tree, Statement* stmt); GenTree* optAssertionPropLocal_RelOp(ASSERT_VALARG_TP assertions, GenTree* tree, Statement* stmt); GenTree* optAssertionProp_Update(GenTree* newTree, GenTree* tree, Statement* stmt); GenTree* optNonNullAssertionProp_Call(ASSERT_VALARG_TP assertions, GenTreeCall* call); // Implied assertion functions. void optImpliedAssertions(AssertionIndex assertionIndex, ASSERT_TP& activeAssertions); void optImpliedByTypeOfAssertions(ASSERT_TP& activeAssertions); void optImpliedByCopyAssertion(AssertionDsc* copyAssertion, AssertionDsc* depAssertion, ASSERT_TP& result); void optImpliedByConstAssertion(AssertionDsc* curAssertion, ASSERT_TP& result); #ifdef DEBUG void optPrintAssertion(AssertionDsc* newAssertion, AssertionIndex assertionIndex = 0); void optPrintAssertionIndex(AssertionIndex index); void optPrintAssertionIndices(ASSERT_TP assertions); void optDebugCheckAssertion(AssertionDsc* assertion); void optDebugCheckAssertions(AssertionIndex AssertionIndex); #endif static void optDumpAssertionIndices(const char* header, ASSERT_TP assertions, const char* footer = nullptr); static void optDumpAssertionIndices(ASSERT_TP assertions, const char* footer = nullptr); void optAddCopies(); /************************************************************************** * Range checks *************************************************************************/ public: struct LoopCloneVisitorInfo { LoopCloneContext* context; unsigned loopNum; Statement* stmt; LoopCloneVisitorInfo(LoopCloneContext* context, unsigned loopNum, Statement* stmt) : context(context), loopNum(loopNum), stmt(nullptr) { } }; bool optIsStackLocalInvariant(unsigned loopNum, unsigned lclNum); bool optExtractArrIndex(GenTree* tree, ArrIndex* result, unsigned lhsNum); bool optReconstructArrIndex(GenTree* tree, ArrIndex* result, unsigned lhsNum); bool optIdentifyLoopOptInfo(unsigned loopNum, LoopCloneContext* context); static fgWalkPreFn optCanOptimizeByLoopCloningVisitor; fgWalkResult optCanOptimizeByLoopCloning(GenTree* tree, LoopCloneVisitorInfo* info); bool optObtainLoopCloningOpts(LoopCloneContext* context); bool optIsLoopClonable(unsigned loopInd); bool optLoopCloningEnabled(); #ifdef DEBUG void optDebugLogLoopCloning(BasicBlock* block, Statement* insertBefore); #endif void optPerformStaticOptimizations(unsigned loopNum, LoopCloneContext* context DEBUGARG(bool fastPath)); bool optComputeDerefConditions(unsigned loopNum, LoopCloneContext* context); bool optDeriveLoopCloningConditions(unsigned loopNum, LoopCloneContext* context); BasicBlock* optInsertLoopChoiceConditions(LoopCloneContext* context, unsigned loopNum, BasicBlock* slowHead, BasicBlock* insertAfter); protected: ssize_t optGetArrayRefScaleAndIndex(GenTree* mul, GenTree** pIndex DEBUGARG(bool bRngChk)); bool optReachWithoutCall(BasicBlock* srcBB, BasicBlock* dstBB); protected: bool optLoopsMarked; /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX RegAlloc XX XX XX XX Does the register allocation and puts the remaining lclVars on the stack XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ public: regNumber raUpdateRegStateForArg(RegState* regState, LclVarDsc* argDsc); void raMarkStkVars(); #if FEATURE_PARTIAL_SIMD_CALLEE_SAVE #if defined(TARGET_AMD64) static bool varTypeNeedsPartialCalleeSave(var_types type) { assert(type != TYP_STRUCT); return (type == TYP_SIMD32); } #elif defined(TARGET_ARM64) static bool varTypeNeedsPartialCalleeSave(var_types type) { assert(type != TYP_STRUCT); // ARM64 ABI FP Callee save registers only require Callee to save lower 8 Bytes // For SIMD types longer than 8 bytes Caller is responsible for saving and restoring Upper bytes. return ((type == TYP_SIMD16) || (type == TYP_SIMD12)); } #else // !defined(TARGET_AMD64) && !defined(TARGET_ARM64) #error("Unknown target architecture for FEATURE_SIMD") #endif // !defined(TARGET_AMD64) && !defined(TARGET_ARM64) #endif // FEATURE_PARTIAL_SIMD_CALLEE_SAVE protected: // Some things are used by both LSRA and regpredict allocators. FrameType rpFrameType; bool rpMustCreateEBPCalled; // Set to true after we have called rpMustCreateEBPFrame once bool rpMustCreateEBPFrame(INDEBUG(const char** wbReason)); private: Lowering* m_pLowering; // Lowering; needed to Lower IR that's added or modified after Lowering. LinearScanInterface* m_pLinearScan; // Linear Scan allocator /* raIsVarargsStackArg is called by raMaskStkVars and by lvaComputeRefCounts. It identifies the special case where a varargs function has a parameter passed on the stack, other than the special varargs handle. Such parameters require special treatment, because they cannot be tracked by the GC (their offsets in the stack are not known at compile time). */ bool raIsVarargsStackArg(unsigned lclNum) { #ifdef TARGET_X86 LclVarDsc* varDsc = lvaGetDesc(lclNum); assert(varDsc->lvIsParam); return (info.compIsVarArgs && !varDsc->lvIsRegArg && (lclNum != lvaVarargsHandleArg)); #else // TARGET_X86 return false; #endif // TARGET_X86 } /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX EEInterface XX XX XX XX Get to the class and method info from the Execution Engine given XX XX tokens for the class and method XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ public: // Get handles void eeGetCallInfo(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_RESOLVED_TOKEN* pConstrainedToken, CORINFO_CALLINFO_FLAGS flags, CORINFO_CALL_INFO* pResult); void eeGetFieldInfo(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_ACCESS_FLAGS flags, CORINFO_FIELD_INFO* pResult); // Get the flags bool eeIsValueClass(CORINFO_CLASS_HANDLE clsHnd); bool eeIsIntrinsic(CORINFO_METHOD_HANDLE ftn); bool eeIsFieldStatic(CORINFO_FIELD_HANDLE fldHnd); var_types eeGetFieldType(CORINFO_FIELD_HANDLE fldHnd, CORINFO_CLASS_HANDLE* pStructHnd = nullptr); #if defined(DEBUG) || defined(FEATURE_JIT_METHOD_PERF) || defined(FEATURE_SIMD) || defined(TRACK_LSRA_STATS) const char* eeGetMethodName(CORINFO_METHOD_HANDLE hnd, const char** className); const char* eeGetMethodFullName(CORINFO_METHOD_HANDLE hnd); unsigned compMethodHash(CORINFO_METHOD_HANDLE methodHandle); bool eeIsNativeMethod(CORINFO_METHOD_HANDLE method); CORINFO_METHOD_HANDLE eeGetMethodHandleForNative(CORINFO_METHOD_HANDLE method); #endif var_types eeGetArgType(CORINFO_ARG_LIST_HANDLE list, CORINFO_SIG_INFO* sig); var_types eeGetArgType(CORINFO_ARG_LIST_HANDLE list, CORINFO_SIG_INFO* sig, bool* isPinned); CORINFO_CLASS_HANDLE eeGetArgClass(CORINFO_SIG_INFO* sig, CORINFO_ARG_LIST_HANDLE list); CORINFO_CLASS_HANDLE eeGetClassFromContext(CORINFO_CONTEXT_HANDLE context); unsigned eeGetArgSize(CORINFO_ARG_LIST_HANDLE list, CORINFO_SIG_INFO* sig); static unsigned eeGetArgSizeAlignment(var_types type, bool isFloatHfa); // VOM info, method sigs void eeGetSig(unsigned sigTok, CORINFO_MODULE_HANDLE scope, CORINFO_CONTEXT_HANDLE context, CORINFO_SIG_INFO* retSig); void eeGetCallSiteSig(unsigned sigTok, CORINFO_MODULE_HANDLE scope, CORINFO_CONTEXT_HANDLE context, CORINFO_SIG_INFO* retSig); void eeGetMethodSig(CORINFO_METHOD_HANDLE methHnd, CORINFO_SIG_INFO* retSig, CORINFO_CLASS_HANDLE owner = nullptr); // Method entry-points, instrs CORINFO_METHOD_HANDLE eeMarkNativeTarget(CORINFO_METHOD_HANDLE method); CORINFO_EE_INFO eeInfo; bool eeInfoInitialized; CORINFO_EE_INFO* eeGetEEInfo(); // Gets the offset of a SDArray's first element static unsigned eeGetArrayDataOffset(); // Get the offset of a MDArray's first element static unsigned eeGetMDArrayDataOffset(unsigned rank); // Get the offset of a MDArray's dimension length for a given dimension. static unsigned eeGetMDArrayLengthOffset(unsigned rank, unsigned dimension); // Get the offset of a MDArray's lower bound for a given dimension. static unsigned eeGetMDArrayLowerBoundOffset(unsigned rank, unsigned dimension); GenTree* eeGetPInvokeCookie(CORINFO_SIG_INFO* szMetaSig); // Returns the page size for the target machine as reported by the EE. target_size_t eeGetPageSize() { return (target_size_t)eeGetEEInfo()->osPageSize; } //------------------------------------------------------------------------ // VirtualStubParam: virtual stub dispatch extra parameter (slot address). // // It represents Abi and target specific registers for the parameter. // class VirtualStubParamInfo { public: VirtualStubParamInfo(bool isCoreRTABI) { #if defined(TARGET_X86) reg = REG_EAX; regMask = RBM_EAX; #elif defined(TARGET_AMD64) if (isCoreRTABI) { reg = REG_R10; regMask = RBM_R10; } else { reg = REG_R11; regMask = RBM_R11; } #elif defined(TARGET_ARM) if (isCoreRTABI) { reg = REG_R12; regMask = RBM_R12; } else { reg = REG_R4; regMask = RBM_R4; } #elif defined(TARGET_ARM64) reg = REG_R11; regMask = RBM_R11; #else #error Unsupported or unset target architecture #endif } regNumber GetReg() const { return reg; } _regMask_enum GetRegMask() const { return regMask; } private: regNumber reg; _regMask_enum regMask; }; VirtualStubParamInfo* virtualStubParamInfo; bool IsTargetAbi(CORINFO_RUNTIME_ABI abi) { return eeGetEEInfo()->targetAbi == abi; } bool generateCFIUnwindCodes() { #if defined(FEATURE_CFI_SUPPORT) return TargetOS::IsUnix && IsTargetAbi(CORINFO_CORERT_ABI); #else return false; #endif } // Debugging support - Line number info void eeGetStmtOffsets(); unsigned eeBoundariesCount; ICorDebugInfo::OffsetMapping* eeBoundaries; // Boundaries to report to the EE void eeSetLIcount(unsigned count); void eeSetLIinfo(unsigned which, UNATIVE_OFFSET offs, IPmappingDscKind kind, const ILLocation& loc); void eeSetLIdone(); #ifdef DEBUG static void eeDispILOffs(IL_OFFSET offs); static void eeDispSourceMappingOffs(uint32_t offs); static void eeDispLineInfo(const ICorDebugInfo::OffsetMapping* line); void eeDispLineInfos(); #endif // DEBUG // Debugging support - Local var info void eeGetVars(); unsigned eeVarsCount; struct VarResultInfo { UNATIVE_OFFSET startOffset; UNATIVE_OFFSET endOffset; DWORD varNumber; CodeGenInterface::siVarLoc loc; } * eeVars; void eeSetLVcount(unsigned count); void eeSetLVinfo(unsigned which, UNATIVE_OFFSET startOffs, UNATIVE_OFFSET length, unsigned varNum, const CodeGenInterface::siVarLoc& loc); void eeSetLVdone(); #ifdef DEBUG void eeDispVar(ICorDebugInfo::NativeVarInfo* var); void eeDispVars(CORINFO_METHOD_HANDLE ftn, ULONG32 cVars, ICorDebugInfo::NativeVarInfo* vars); #endif // DEBUG // ICorJitInfo wrappers void eeReserveUnwindInfo(bool isFunclet, bool isColdCode, ULONG unwindSize); void eeAllocUnwindInfo(BYTE* pHotCode, BYTE* pColdCode, ULONG startOffset, ULONG endOffset, ULONG unwindSize, BYTE* pUnwindBlock, CorJitFuncKind funcKind); void eeSetEHcount(unsigned cEH); void eeSetEHinfo(unsigned EHnumber, const CORINFO_EH_CLAUSE* clause); WORD eeGetRelocTypeHint(void* target); // ICorStaticInfo wrapper functions bool eeTryResolveToken(CORINFO_RESOLVED_TOKEN* resolvedToken); #if defined(UNIX_AMD64_ABI) #ifdef DEBUG static void dumpSystemVClassificationType(SystemVClassificationType ct); #endif // DEBUG void eeGetSystemVAmd64PassStructInRegisterDescriptor( /*IN*/ CORINFO_CLASS_HANDLE structHnd, /*OUT*/ SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR* structPassInRegDescPtr); #endif // UNIX_AMD64_ABI template <typename ParamType> bool eeRunWithErrorTrap(void (*function)(ParamType*), ParamType* param) { return eeRunWithErrorTrapImp(reinterpret_cast<void (*)(void*)>(function), reinterpret_cast<void*>(param)); } bool eeRunWithErrorTrapImp(void (*function)(void*), void* param); template <typename ParamType> bool eeRunWithSPMIErrorTrap(void (*function)(ParamType*), ParamType* param) { return eeRunWithSPMIErrorTrapImp(reinterpret_cast<void (*)(void*)>(function), reinterpret_cast<void*>(param)); } bool eeRunWithSPMIErrorTrapImp(void (*function)(void*), void* param); // Utility functions const char* eeGetFieldName(CORINFO_FIELD_HANDLE fieldHnd, const char** classNamePtr = nullptr); #if defined(DEBUG) const WCHAR* eeGetCPString(size_t stringHandle); #endif const char* eeGetClassName(CORINFO_CLASS_HANDLE clsHnd); static CORINFO_METHOD_HANDLE eeFindHelper(unsigned helper); static CorInfoHelpFunc eeGetHelperNum(CORINFO_METHOD_HANDLE method); static bool IsSharedStaticHelper(GenTree* tree); static bool IsGcSafePoint(GenTreeCall* call); static CORINFO_FIELD_HANDLE eeFindJitDataOffs(unsigned jitDataOffs); // returns true/false if 'field' is a Jit Data offset static bool eeIsJitDataOffs(CORINFO_FIELD_HANDLE field); // returns a number < 0 if 'field' is not a Jit Data offset, otherwise the data offset (limited to 2GB) static int eeGetJitDataOffs(CORINFO_FIELD_HANDLE field); /*****************************************************************************/ /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX CodeGenerator XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ public: CodeGenInterface* codeGen; // Record the instr offset mapping to the generated code jitstd::list<IPmappingDsc> genIPmappings; #ifdef DEBUG jitstd::list<PreciseIPMapping> genPreciseIPmappings; #endif // Managed RetVal - A side hash table meant to record the mapping from a // GT_CALL node to its debug info. This info is used to emit sequence points // that can be used by debugger to determine the native offset at which the // managed RetVal will be available. // // In fact we can store debug info in a GT_CALL node. This was ruled out in // favor of a side table for two reasons: 1) We need debug info for only those // GT_CALL nodes (created during importation) that correspond to an IL call and // whose return type is other than TYP_VOID. 2) GT_CALL node is a frequently used // structure and IL offset is needed only when generating debuggable code. Therefore // it is desirable to avoid memory size penalty in retail scenarios. typedef JitHashTable<GenTree*, JitPtrKeyFuncs<GenTree>, DebugInfo> CallSiteDebugInfoTable; CallSiteDebugInfoTable* genCallSite2DebugInfoMap; unsigned genReturnLocal; // Local number for the return value when applicable. BasicBlock* genReturnBB; // jumped to when not optimizing for speed. // The following properties are part of CodeGenContext. Getters are provided here for // convenience and backward compatibility, but the properties can only be set by invoking // the setter on CodeGenContext directly. emitter* GetEmitter() const { return codeGen->GetEmitter(); } bool isFramePointerUsed() const { return codeGen->isFramePointerUsed(); } bool GetInterruptible() { return codeGen->GetInterruptible(); } void SetInterruptible(bool value) { codeGen->SetInterruptible(value); } #if DOUBLE_ALIGN const bool genDoubleAlign() { return codeGen->doDoubleAlign(); } DWORD getCanDoubleAlign(); bool shouldDoubleAlign(unsigned refCntStk, unsigned refCntReg, weight_t refCntWtdReg, unsigned refCntStkParam, weight_t refCntWtdStkDbl); #endif // DOUBLE_ALIGN bool IsFullPtrRegMapRequired() { return codeGen->IsFullPtrRegMapRequired(); } void SetFullPtrRegMapRequired(bool value) { codeGen->SetFullPtrRegMapRequired(value); } // Things that MAY belong either in CodeGen or CodeGenContext #if defined(FEATURE_EH_FUNCLETS) FuncInfoDsc* compFuncInfos; unsigned short compCurrFuncIdx; unsigned short compFuncInfoCount; unsigned short compFuncCount() { assert(fgFuncletsCreated); return compFuncInfoCount; } #else // !FEATURE_EH_FUNCLETS // This is a no-op when there are no funclets! void genUpdateCurrentFunclet(BasicBlock* block) { return; } FuncInfoDsc compFuncInfoRoot; static const unsigned compCurrFuncIdx = 0; unsigned short compFuncCount() { return 1; } #endif // !FEATURE_EH_FUNCLETS FuncInfoDsc* funCurrentFunc(); void funSetCurrentFunc(unsigned funcIdx); FuncInfoDsc* funGetFunc(unsigned funcIdx); unsigned int funGetFuncIdx(BasicBlock* block); // LIVENESS VARSET_TP compCurLife; // current live variables GenTree* compCurLifeTree; // node after which compCurLife has been computed // Compare the given "newLife" with last set of live variables and update // codeGen "gcInfo", siScopes, "regSet" with the new variable's homes/liveness. template <bool ForCodeGen> void compChangeLife(VARSET_VALARG_TP newLife); // Update the GC's masks, register's masks and reports change on variable's homes given a set of // current live variables if changes have happened since "compCurLife". template <bool ForCodeGen> inline void compUpdateLife(VARSET_VALARG_TP newLife); // Gets a register mask that represent the kill set for a helper call since // not all JIT Helper calls follow the standard ABI on the target architecture. regMaskTP compHelperCallKillSet(CorInfoHelpFunc helper); #ifdef TARGET_ARM // Requires that "varDsc" be a promoted struct local variable being passed as an argument, beginning at // "firstArgRegNum", which is assumed to have already been aligned to the register alignment restriction of the // struct type. Adds bits to "*pArgSkippedRegMask" for any argument registers *not* used in passing "varDsc" -- // i.e., internal "holes" caused by internal alignment constraints. For example, if the struct contained an int and // a double, and we at R0 (on ARM), then R1 would be skipped, and the bit for R1 would be added to the mask. void fgAddSkippedRegsInPromotedStructArg(LclVarDsc* varDsc, unsigned firstArgRegNum, regMaskTP* pArgSkippedRegMask); #endif // TARGET_ARM // If "tree" is a indirection (GT_IND, or GT_OBJ) whose arg is an ADDR, whose arg is a LCL_VAR, return that LCL_VAR // node, else NULL. static GenTreeLclVar* fgIsIndirOfAddrOfLocal(GenTree* tree); // This map is indexed by GT_OBJ nodes that are address of promoted struct variables, which // have been annotated with the GTF_VAR_DEATH flag. If such a node is *not* mapped in this // table, one may assume that all the (tracked) field vars die at this GT_OBJ. Otherwise, // the node maps to a pointer to a VARSET_TP, containing set bits for each of the tracked field // vars of the promoted struct local that go dead at the given node (the set bits are the bits // for the tracked var indices of the field vars, as in a live var set). // // The map is allocated on demand so all map operations should use one of the following three // wrapper methods. NodeToVarsetPtrMap* m_promotedStructDeathVars; NodeToVarsetPtrMap* GetPromotedStructDeathVars() { if (m_promotedStructDeathVars == nullptr) { m_promotedStructDeathVars = new (getAllocator()) NodeToVarsetPtrMap(getAllocator()); } return m_promotedStructDeathVars; } void ClearPromotedStructDeathVars() { if (m_promotedStructDeathVars != nullptr) { m_promotedStructDeathVars->RemoveAll(); } } bool LookupPromotedStructDeathVars(GenTree* tree, VARSET_TP** bits) { *bits = nullptr; bool result = false; if (m_promotedStructDeathVars != nullptr) { result = m_promotedStructDeathVars->Lookup(tree, bits); } return result; } /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX UnwindInfo XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ #if !defined(__GNUC__) #pragma region Unwind information #endif public: // // Infrastructure functions: start/stop/reserve/emit. // void unwindBegProlog(); void unwindEndProlog(); void unwindBegEpilog(); void unwindEndEpilog(); void unwindReserve(); void unwindEmit(void* pHotCode, void* pColdCode); // // Specific unwind information functions: called by code generation to indicate a particular // prolog or epilog unwindable instruction has been generated. // void unwindPush(regNumber reg); void unwindAllocStack(unsigned size); void unwindSetFrameReg(regNumber reg, unsigned offset); void unwindSaveReg(regNumber reg, unsigned offset); #if defined(TARGET_ARM) void unwindPushMaskInt(regMaskTP mask); void unwindPushMaskFloat(regMaskTP mask); void unwindPopMaskInt(regMaskTP mask); void unwindPopMaskFloat(regMaskTP mask); void unwindBranch16(); // The epilog terminates with a 16-bit branch (e.g., "bx lr") void unwindNop(unsigned codeSizeInBytes); // Generate unwind NOP code. 'codeSizeInBytes' is 2 or 4 bytes. Only // called via unwindPadding(). void unwindPadding(); // Generate a sequence of unwind NOP codes representing instructions between the last // instruction and the current location. #endif // TARGET_ARM #if defined(TARGET_ARM64) void unwindNop(); void unwindPadding(); // Generate a sequence of unwind NOP codes representing instructions between the last // instruction and the current location. void unwindSaveReg(regNumber reg, int offset); // str reg, [sp, #offset] void unwindSaveRegPreindexed(regNumber reg, int offset); // str reg, [sp, #offset]! void unwindSaveRegPair(regNumber reg1, regNumber reg2, int offset); // stp reg1, reg2, [sp, #offset] void unwindSaveRegPairPreindexed(regNumber reg1, regNumber reg2, int offset); // stp reg1, reg2, [sp, #offset]! void unwindSaveNext(); // unwind code: save_next void unwindReturn(regNumber reg); // ret lr #endif // defined(TARGET_ARM64) // // Private "helper" functions for the unwind implementation. // private: #if defined(FEATURE_EH_FUNCLETS) void unwindGetFuncLocations(FuncInfoDsc* func, bool getHotSectionData, /* OUT */ emitLocation** ppStartLoc, /* OUT */ emitLocation** ppEndLoc); #endif // FEATURE_EH_FUNCLETS void unwindReserveFunc(FuncInfoDsc* func); void unwindEmitFunc(FuncInfoDsc* func, void* pHotCode, void* pColdCode); #if defined(TARGET_AMD64) || (defined(TARGET_X86) && defined(FEATURE_EH_FUNCLETS)) void unwindReserveFuncHelper(FuncInfoDsc* func, bool isHotCode); void unwindEmitFuncHelper(FuncInfoDsc* func, void* pHotCode, void* pColdCode, bool isHotCode); #endif // TARGET_AMD64 || (TARGET_X86 && FEATURE_EH_FUNCLETS) UNATIVE_OFFSET unwindGetCurrentOffset(FuncInfoDsc* func); #if defined(TARGET_AMD64) void unwindBegPrologWindows(); void unwindPushWindows(regNumber reg); void unwindAllocStackWindows(unsigned size); void unwindSetFrameRegWindows(regNumber reg, unsigned offset); void unwindSaveRegWindows(regNumber reg, unsigned offset); #ifdef UNIX_AMD64_ABI void unwindSaveRegCFI(regNumber reg, unsigned offset); #endif // UNIX_AMD64_ABI #elif defined(TARGET_ARM) void unwindPushPopMaskInt(regMaskTP mask, bool useOpsize16); void unwindPushPopMaskFloat(regMaskTP mask); #endif // TARGET_ARM #if defined(FEATURE_CFI_SUPPORT) short mapRegNumToDwarfReg(regNumber reg); void createCfiCode(FuncInfoDsc* func, UNATIVE_OFFSET codeOffset, UCHAR opcode, short dwarfReg, INT offset = 0); void unwindPushPopCFI(regNumber reg); void unwindBegPrologCFI(); void unwindPushPopMaskCFI(regMaskTP regMask, bool isFloat); void unwindAllocStackCFI(unsigned size); void unwindSetFrameRegCFI(regNumber reg, unsigned offset); void unwindEmitFuncCFI(FuncInfoDsc* func, void* pHotCode, void* pColdCode); #ifdef DEBUG void DumpCfiInfo(bool isHotCode, UNATIVE_OFFSET startOffset, UNATIVE_OFFSET endOffset, DWORD cfiCodeBytes, const CFI_CODE* const pCfiCode); #endif #endif // FEATURE_CFI_SUPPORT #if !defined(__GNUC__) #pragma endregion // Note: region is NOT under !defined(__GNUC__) #endif /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX SIMD XX XX XX XX Info about SIMD types, methods and the SIMD assembly (i.e. the assembly XX XX that contains the distinguished, well-known SIMD type definitions). XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ bool IsBaselineSimdIsaSupported() { #ifdef FEATURE_SIMD #if defined(TARGET_XARCH) CORINFO_InstructionSet minimumIsa = InstructionSet_SSE2; #elif defined(TARGET_ARM64) CORINFO_InstructionSet minimumIsa = InstructionSet_AdvSimd; #else #error Unsupported platform #endif // !TARGET_XARCH && !TARGET_ARM64 return compOpportunisticallyDependsOn(minimumIsa); #else return false; #endif } #if defined(DEBUG) bool IsBaselineSimdIsaSupportedDebugOnly() { #ifdef FEATURE_SIMD #if defined(TARGET_XARCH) CORINFO_InstructionSet minimumIsa = InstructionSet_SSE2; #elif defined(TARGET_ARM64) CORINFO_InstructionSet minimumIsa = InstructionSet_AdvSimd; #else #error Unsupported platform #endif // !TARGET_XARCH && !TARGET_ARM64 return compIsaSupportedDebugOnly(minimumIsa); #else return false; #endif // FEATURE_SIMD } #endif // DEBUG // Get highest available level for SIMD codegen SIMDLevel getSIMDSupportLevel() { #if defined(TARGET_XARCH) if (compOpportunisticallyDependsOn(InstructionSet_AVX2)) { return SIMD_AVX2_Supported; } if (compOpportunisticallyDependsOn(InstructionSet_SSE42)) { return SIMD_SSE4_Supported; } // min bar is SSE2 return SIMD_SSE2_Supported; #else assert(!"Available instruction set(s) for SIMD codegen is not defined for target arch"); unreached(); return SIMD_Not_Supported; #endif } bool isIntrinsicType(CORINFO_CLASS_HANDLE clsHnd) { return info.compCompHnd->isIntrinsicType(clsHnd); } const char* getClassNameFromMetadata(CORINFO_CLASS_HANDLE cls, const char** namespaceName) { return info.compCompHnd->getClassNameFromMetadata(cls, namespaceName); } CORINFO_CLASS_HANDLE getTypeInstantiationArgument(CORINFO_CLASS_HANDLE cls, unsigned index) { return info.compCompHnd->getTypeInstantiationArgument(cls, index); } #ifdef FEATURE_SIMD #ifndef TARGET_ARM64 // Should we support SIMD intrinsics? bool featureSIMD; #endif // Should we recognize SIMD types? // We always do this on ARM64 to support HVA types. bool supportSIMDTypes() { #ifdef TARGET_ARM64 return true; #else return featureSIMD; #endif } // Have we identified any SIMD types? // This is currently used by struct promotion to avoid getting type information for a struct // field to see if it is a SIMD type, if we haven't seen any SIMD types or operations in // the method. bool _usesSIMDTypes; bool usesSIMDTypes() { return _usesSIMDTypes; } void setUsesSIMDTypes(bool value) { _usesSIMDTypes = value; } // This is a temp lclVar allocated on the stack as TYP_SIMD. It is used to implement intrinsics // that require indexed access to the individual fields of the vector, which is not well supported // by the hardware. It is allocated when/if such situations are encountered during Lowering. unsigned lvaSIMDInitTempVarNum; struct SIMDHandlesCache { // SIMD Types CORINFO_CLASS_HANDLE SIMDFloatHandle; CORINFO_CLASS_HANDLE SIMDDoubleHandle; CORINFO_CLASS_HANDLE SIMDIntHandle; CORINFO_CLASS_HANDLE SIMDUShortHandle; CORINFO_CLASS_HANDLE SIMDUByteHandle; CORINFO_CLASS_HANDLE SIMDShortHandle; CORINFO_CLASS_HANDLE SIMDByteHandle; CORINFO_CLASS_HANDLE SIMDLongHandle; CORINFO_CLASS_HANDLE SIMDUIntHandle; CORINFO_CLASS_HANDLE SIMDULongHandle; CORINFO_CLASS_HANDLE SIMDNIntHandle; CORINFO_CLASS_HANDLE SIMDNUIntHandle; CORINFO_CLASS_HANDLE SIMDVector2Handle; CORINFO_CLASS_HANDLE SIMDVector3Handle; CORINFO_CLASS_HANDLE SIMDVector4Handle; CORINFO_CLASS_HANDLE SIMDVectorHandle; #ifdef FEATURE_HW_INTRINSICS #if defined(TARGET_ARM64) CORINFO_CLASS_HANDLE Vector64FloatHandle; CORINFO_CLASS_HANDLE Vector64DoubleHandle; CORINFO_CLASS_HANDLE Vector64IntHandle; CORINFO_CLASS_HANDLE Vector64UShortHandle; CORINFO_CLASS_HANDLE Vector64UByteHandle; CORINFO_CLASS_HANDLE Vector64ShortHandle; CORINFO_CLASS_HANDLE Vector64ByteHandle; CORINFO_CLASS_HANDLE Vector64LongHandle; CORINFO_CLASS_HANDLE Vector64UIntHandle; CORINFO_CLASS_HANDLE Vector64ULongHandle; CORINFO_CLASS_HANDLE Vector64NIntHandle; CORINFO_CLASS_HANDLE Vector64NUIntHandle; #endif // defined(TARGET_ARM64) CORINFO_CLASS_HANDLE Vector128FloatHandle; CORINFO_CLASS_HANDLE Vector128DoubleHandle; CORINFO_CLASS_HANDLE Vector128IntHandle; CORINFO_CLASS_HANDLE Vector128UShortHandle; CORINFO_CLASS_HANDLE Vector128UByteHandle; CORINFO_CLASS_HANDLE Vector128ShortHandle; CORINFO_CLASS_HANDLE Vector128ByteHandle; CORINFO_CLASS_HANDLE Vector128LongHandle; CORINFO_CLASS_HANDLE Vector128UIntHandle; CORINFO_CLASS_HANDLE Vector128ULongHandle; CORINFO_CLASS_HANDLE Vector128NIntHandle; CORINFO_CLASS_HANDLE Vector128NUIntHandle; #if defined(TARGET_XARCH) CORINFO_CLASS_HANDLE Vector256FloatHandle; CORINFO_CLASS_HANDLE Vector256DoubleHandle; CORINFO_CLASS_HANDLE Vector256IntHandle; CORINFO_CLASS_HANDLE Vector256UShortHandle; CORINFO_CLASS_HANDLE Vector256UByteHandle; CORINFO_CLASS_HANDLE Vector256ShortHandle; CORINFO_CLASS_HANDLE Vector256ByteHandle; CORINFO_CLASS_HANDLE Vector256LongHandle; CORINFO_CLASS_HANDLE Vector256UIntHandle; CORINFO_CLASS_HANDLE Vector256ULongHandle; CORINFO_CLASS_HANDLE Vector256NIntHandle; CORINFO_CLASS_HANDLE Vector256NUIntHandle; #endif // defined(TARGET_XARCH) #endif // FEATURE_HW_INTRINSICS SIMDHandlesCache() { memset(this, 0, sizeof(*this)); } }; SIMDHandlesCache* m_simdHandleCache; // Get an appropriate "zero" for the given type and class handle. GenTree* gtGetSIMDZero(var_types simdType, CorInfoType simdBaseJitType, CORINFO_CLASS_HANDLE simdHandle); // Get the handle for a SIMD type. CORINFO_CLASS_HANDLE gtGetStructHandleForSIMD(var_types simdType, CorInfoType simdBaseJitType) { if (m_simdHandleCache == nullptr) { // This may happen if the JIT generates SIMD node on its own, without importing them. // Otherwise getBaseJitTypeAndSizeOfSIMDType should have created the cache. return NO_CLASS_HANDLE; } if (simdBaseJitType == CORINFO_TYPE_FLOAT) { switch (simdType) { case TYP_SIMD8: return m_simdHandleCache->SIMDVector2Handle; case TYP_SIMD12: return m_simdHandleCache->SIMDVector3Handle; case TYP_SIMD16: if ((getSIMDVectorType() == TYP_SIMD32) || (m_simdHandleCache->SIMDVector4Handle != NO_CLASS_HANDLE)) { return m_simdHandleCache->SIMDVector4Handle; } break; case TYP_SIMD32: break; default: unreached(); } } assert(emitTypeSize(simdType) <= largestEnregisterableStructSize()); switch (simdBaseJitType) { case CORINFO_TYPE_FLOAT: return m_simdHandleCache->SIMDFloatHandle; case CORINFO_TYPE_DOUBLE: return m_simdHandleCache->SIMDDoubleHandle; case CORINFO_TYPE_INT: return m_simdHandleCache->SIMDIntHandle; case CORINFO_TYPE_USHORT: return m_simdHandleCache->SIMDUShortHandle; case CORINFO_TYPE_UBYTE: return m_simdHandleCache->SIMDUByteHandle; case CORINFO_TYPE_SHORT: return m_simdHandleCache->SIMDShortHandle; case CORINFO_TYPE_BYTE: return m_simdHandleCache->SIMDByteHandle; case CORINFO_TYPE_LONG: return m_simdHandleCache->SIMDLongHandle; case CORINFO_TYPE_UINT: return m_simdHandleCache->SIMDUIntHandle; case CORINFO_TYPE_ULONG: return m_simdHandleCache->SIMDULongHandle; case CORINFO_TYPE_NATIVEINT: return m_simdHandleCache->SIMDNIntHandle; case CORINFO_TYPE_NATIVEUINT: return m_simdHandleCache->SIMDNUIntHandle; default: assert(!"Didn't find a class handle for simdType"); } return NO_CLASS_HANDLE; } // Returns true if this is a SIMD type that should be considered an opaque // vector type (i.e. do not analyze or promote its fields). // Note that all but the fixed vector types are opaque, even though they may // actually be declared as having fields. bool isOpaqueSIMDType(CORINFO_CLASS_HANDLE structHandle) const { return ((m_simdHandleCache != nullptr) && (structHandle != m_simdHandleCache->SIMDVector2Handle) && (structHandle != m_simdHandleCache->SIMDVector3Handle) && (structHandle != m_simdHandleCache->SIMDVector4Handle)); } // Returns true if the tree corresponds to a TYP_SIMD lcl var. // Note that both SIMD vector args and locals are mared as lvSIMDType = true, but // type of an arg node is TYP_BYREF and a local node is TYP_SIMD or TYP_STRUCT. bool isSIMDTypeLocal(GenTree* tree) { return tree->OperIsLocal() && lvaGetDesc(tree->AsLclVarCommon())->lvSIMDType; } // Returns true if the lclVar is an opaque SIMD type. bool isOpaqueSIMDLclVar(const LclVarDsc* varDsc) const { if (!varDsc->lvSIMDType) { return false; } return isOpaqueSIMDType(varDsc->GetStructHnd()); } static bool isRelOpSIMDIntrinsic(SIMDIntrinsicID intrinsicId) { return (intrinsicId == SIMDIntrinsicEqual); } // Returns base JIT type of a TYP_SIMD local. // Returns CORINFO_TYPE_UNDEF if the local is not TYP_SIMD. CorInfoType getBaseJitTypeOfSIMDLocal(GenTree* tree) { if (isSIMDTypeLocal(tree)) { return lvaGetDesc(tree->AsLclVarCommon())->GetSimdBaseJitType(); } return CORINFO_TYPE_UNDEF; } bool isSIMDClass(CORINFO_CLASS_HANDLE clsHnd) { if (isIntrinsicType(clsHnd)) { const char* namespaceName = nullptr; (void)getClassNameFromMetadata(clsHnd, &namespaceName); return strcmp(namespaceName, "System.Numerics") == 0; } return false; } bool isSIMDClass(typeInfo* pTypeInfo) { return pTypeInfo->IsStruct() && isSIMDClass(pTypeInfo->GetClassHandleForValueClass()); } bool isHWSIMDClass(CORINFO_CLASS_HANDLE clsHnd) { #ifdef FEATURE_HW_INTRINSICS if (isIntrinsicType(clsHnd)) { const char* namespaceName = nullptr; (void)getClassNameFromMetadata(clsHnd, &namespaceName); return strcmp(namespaceName, "System.Runtime.Intrinsics") == 0; } #endif // FEATURE_HW_INTRINSICS return false; } bool isHWSIMDClass(typeInfo* pTypeInfo) { #ifdef FEATURE_HW_INTRINSICS return pTypeInfo->IsStruct() && isHWSIMDClass(pTypeInfo->GetClassHandleForValueClass()); #else return false; #endif } bool isSIMDorHWSIMDClass(CORINFO_CLASS_HANDLE clsHnd) { return isSIMDClass(clsHnd) || isHWSIMDClass(clsHnd); } bool isSIMDorHWSIMDClass(typeInfo* pTypeInfo) { return isSIMDClass(pTypeInfo) || isHWSIMDClass(pTypeInfo); } // Get the base (element) type and size in bytes for a SIMD type. Returns CORINFO_TYPE_UNDEF // if it is not a SIMD type or is an unsupported base JIT type. CorInfoType getBaseJitTypeAndSizeOfSIMDType(CORINFO_CLASS_HANDLE typeHnd, unsigned* sizeBytes = nullptr); CorInfoType getBaseJitTypeOfSIMDType(CORINFO_CLASS_HANDLE typeHnd) { return getBaseJitTypeAndSizeOfSIMDType(typeHnd, nullptr); } // Get SIMD Intrinsic info given the method handle. // Also sets typeHnd, argCount, baseType and sizeBytes out params. const SIMDIntrinsicInfo* getSIMDIntrinsicInfo(CORINFO_CLASS_HANDLE* typeHnd, CORINFO_METHOD_HANDLE methodHnd, CORINFO_SIG_INFO* sig, bool isNewObj, unsigned* argCount, CorInfoType* simdBaseJitType, unsigned* sizeBytes); // Pops and returns GenTree node from importers type stack. // Normalizes TYP_STRUCT value in case of GT_CALL, GT_RET_EXPR and arg nodes. GenTree* impSIMDPopStack(var_types type, bool expectAddr = false, CORINFO_CLASS_HANDLE structType = nullptr); // Transforms operands and returns the SIMD intrinsic to be applied on // transformed operands to obtain given relop result. SIMDIntrinsicID impSIMDRelOp(SIMDIntrinsicID relOpIntrinsicId, CORINFO_CLASS_HANDLE typeHnd, unsigned simdVectorSize, CorInfoType* inOutBaseJitType, GenTree** op1, GenTree** op2); #if defined(TARGET_XARCH) // Transforms operands and returns the SIMD intrinsic to be applied on // transformed operands to obtain == comparison result. SIMDIntrinsicID impSIMDLongRelOpEqual(CORINFO_CLASS_HANDLE typeHnd, unsigned simdVectorSize, GenTree** op1, GenTree** op2); #endif // defined(TARGET_XARCH) void setLclRelatedToSIMDIntrinsic(GenTree* tree); bool areFieldsContiguous(GenTree* op1, GenTree* op2); bool areLocalFieldsContiguous(GenTreeLclFld* first, GenTreeLclFld* second); bool areArrayElementsContiguous(GenTree* op1, GenTree* op2); bool areArgumentsContiguous(GenTree* op1, GenTree* op2); GenTree* createAddressNodeForSIMDInit(GenTree* tree, unsigned simdSize); // check methodHnd to see if it is a SIMD method that is expanded as an intrinsic in the JIT. GenTree* impSIMDIntrinsic(OPCODE opcode, GenTree* newobjThis, CORINFO_CLASS_HANDLE clsHnd, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, unsigned methodFlags, int memberRef); GenTree* getOp1ForConstructor(OPCODE opcode, GenTree* newobjThis, CORINFO_CLASS_HANDLE clsHnd); // Whether SIMD vector occupies part of SIMD register. // SSE2: vector2f/3f are considered sub register SIMD types. // AVX: vector2f, 3f and 4f are all considered sub register SIMD types. bool isSubRegisterSIMDType(GenTreeSIMD* simdNode) { unsigned vectorRegisterByteLength; #if defined(TARGET_XARCH) // Calling the getSIMDVectorRegisterByteLength api causes the size of Vector<T> to be recorded // with the AOT compiler, so that it cannot change from aot compilation time to runtime // This api does not require such fixing as it merely pertains to the size of the simd type // relative to the Vector<T> size as used at compile time. (So detecting a vector length of 16 here // does not preclude the code from being used on a machine with a larger vector length.) if (getSIMDSupportLevel() < SIMD_AVX2_Supported) { vectorRegisterByteLength = 16; } else { vectorRegisterByteLength = 32; } #else vectorRegisterByteLength = getSIMDVectorRegisterByteLength(); #endif return (simdNode->GetSimdSize() < vectorRegisterByteLength); } // Get the type for the hardware SIMD vector. // This is the maximum SIMD type supported for this target. var_types getSIMDVectorType() { #if defined(TARGET_XARCH) if (getSIMDSupportLevel() == SIMD_AVX2_Supported) { return TYP_SIMD32; } else { // Verify and record that AVX2 isn't supported compVerifyInstructionSetUnusable(InstructionSet_AVX2); assert(getSIMDSupportLevel() >= SIMD_SSE2_Supported); return TYP_SIMD16; } #elif defined(TARGET_ARM64) return TYP_SIMD16; #else assert(!"getSIMDVectorType() unimplemented on target arch"); unreached(); #endif } // Get the size of the SIMD type in bytes int getSIMDTypeSizeInBytes(CORINFO_CLASS_HANDLE typeHnd) { unsigned sizeBytes = 0; (void)getBaseJitTypeAndSizeOfSIMDType(typeHnd, &sizeBytes); return sizeBytes; } // Get the the number of elements of baseType of SIMD vector given by its size and baseType static int getSIMDVectorLength(unsigned simdSize, var_types baseType); // Get the the number of elements of baseType of SIMD vector given by its type handle int getSIMDVectorLength(CORINFO_CLASS_HANDLE typeHnd); // Get preferred alignment of SIMD type. int getSIMDTypeAlignment(var_types simdType); // Get the number of bytes in a System.Numeric.Vector<T> for the current compilation. // Note - cannot be used for System.Runtime.Intrinsic unsigned getSIMDVectorRegisterByteLength() { #if defined(TARGET_XARCH) if (getSIMDSupportLevel() == SIMD_AVX2_Supported) { return YMM_REGSIZE_BYTES; } else { // Verify and record that AVX2 isn't supported compVerifyInstructionSetUnusable(InstructionSet_AVX2); assert(getSIMDSupportLevel() >= SIMD_SSE2_Supported); return XMM_REGSIZE_BYTES; } #elif defined(TARGET_ARM64) return FP_REGSIZE_BYTES; #else assert(!"getSIMDVectorRegisterByteLength() unimplemented on target arch"); unreached(); #endif } // The minimum and maximum possible number of bytes in a SIMD vector. // maxSIMDStructBytes // The minimum SIMD size supported by System.Numeric.Vectors or System.Runtime.Intrinsic // SSE: 16-byte Vector<T> and Vector128<T> // AVX: 32-byte Vector256<T> (Vector<T> is 16-byte) // AVX2: 32-byte Vector<T> and Vector256<T> unsigned int maxSIMDStructBytes() { #if defined(FEATURE_HW_INTRINSICS) && defined(TARGET_XARCH) if (compOpportunisticallyDependsOn(InstructionSet_AVX)) { return YMM_REGSIZE_BYTES; } else { // Verify and record that AVX2 isn't supported compVerifyInstructionSetUnusable(InstructionSet_AVX2); assert(getSIMDSupportLevel() >= SIMD_SSE2_Supported); return XMM_REGSIZE_BYTES; } #else return getSIMDVectorRegisterByteLength(); #endif } unsigned int minSIMDStructBytes() { return emitTypeSize(TYP_SIMD8); } public: // Returns the codegen type for a given SIMD size. static var_types getSIMDTypeForSize(unsigned size) { var_types simdType = TYP_UNDEF; if (size == 8) { simdType = TYP_SIMD8; } else if (size == 12) { simdType = TYP_SIMD12; } else if (size == 16) { simdType = TYP_SIMD16; } else if (size == 32) { simdType = TYP_SIMD32; } else { noway_assert(!"Unexpected size for SIMD type"); } return simdType; } private: unsigned getSIMDInitTempVarNum(var_types simdType); #else // !FEATURE_SIMD bool isOpaqueSIMDLclVar(LclVarDsc* varDsc) { return false; } #endif // FEATURE_SIMD public: //------------------------------------------------------------------------ // largestEnregisterableStruct: The size in bytes of the largest struct that can be enregistered. // // Notes: It is not guaranteed that the struct of this size or smaller WILL be a // candidate for enregistration. unsigned largestEnregisterableStructSize() { #ifdef FEATURE_SIMD #if defined(FEATURE_HW_INTRINSICS) && defined(TARGET_XARCH) if (opts.IsReadyToRun()) { // Return constant instead of maxSIMDStructBytes, as maxSIMDStructBytes performs // checks that are effected by the current level of instruction set support would // otherwise cause the highest level of instruction set support to be reported to crossgen2. // and this api is only ever used as an optimization or assert, so no reporting should // ever happen. return YMM_REGSIZE_BYTES; } #endif // defined(FEATURE_HW_INTRINSICS) && defined(TARGET_XARCH) unsigned vectorRegSize = maxSIMDStructBytes(); assert(vectorRegSize >= TARGET_POINTER_SIZE); return vectorRegSize; #else // !FEATURE_SIMD return TARGET_POINTER_SIZE; #endif // !FEATURE_SIMD } // Use to determine if a struct *might* be a SIMD type. As this function only takes a size, many // structs will fit the criteria. bool structSizeMightRepresentSIMDType(size_t structSize) { #ifdef FEATURE_SIMD // Do not use maxSIMDStructBytes as that api in R2R on X86 and X64 may notify the JIT // about the size of a struct under the assumption that the struct size needs to be recorded. // By using largestEnregisterableStructSize here, the detail of whether or not Vector256<T> is // enregistered or not will not be messaged to the R2R compiler. return (structSize >= minSIMDStructBytes()) && (structSize <= largestEnregisterableStructSize()); #else return false; #endif // FEATURE_SIMD } #ifdef FEATURE_SIMD static bool vnEncodesResultTypeForSIMDIntrinsic(SIMDIntrinsicID intrinsicId); #endif // !FEATURE_SIMD #ifdef FEATURE_HW_INTRINSICS static bool vnEncodesResultTypeForHWIntrinsic(NamedIntrinsic hwIntrinsicID); #endif // FEATURE_HW_INTRINSICS private: // These routines need not be enclosed under FEATURE_SIMD since lvIsSIMDType() // is defined for both FEATURE_SIMD and !FEATURE_SIMD apropriately. The use // of this routines also avoids the need of #ifdef FEATURE_SIMD specific code. // Is this var is of type simd struct? bool lclVarIsSIMDType(unsigned varNum) { return lvaGetDesc(varNum)->lvIsSIMDType(); } // Is this Local node a SIMD local? bool lclVarIsSIMDType(GenTreeLclVarCommon* lclVarTree) { return lclVarIsSIMDType(lclVarTree->GetLclNum()); } // Returns true if the TYP_SIMD locals on stack are aligned at their // preferred byte boundary specified by getSIMDTypeAlignment(). // // As per the Intel manual, the preferred alignment for AVX vectors is // 32-bytes. It is not clear whether additional stack space used in // aligning stack is worth the benefit and for now will use 16-byte // alignment for AVX 256-bit vectors with unaligned load/stores to/from // memory. On x86, the stack frame is aligned to 4 bytes. We need to extend // existing support for double (8-byte) alignment to 16 or 32 byte // alignment for frames with local SIMD vars, if that is determined to be // profitable. // // On Amd64 and SysV, RSP+8 is aligned on entry to the function (before // prolog has run). This means that in RBP-based frames RBP will be 16-byte // aligned. For RSP-based frames these are only sometimes aligned, depending // on the frame size. // bool isSIMDTypeLocalAligned(unsigned varNum) { #if defined(FEATURE_SIMD) && ALIGN_SIMD_TYPES if (lclVarIsSIMDType(varNum) && lvaTable[varNum].lvType != TYP_BYREF) { // TODO-Cleanup: Can't this use the lvExactSize on the varDsc? int alignment = getSIMDTypeAlignment(lvaTable[varNum].lvType); if (alignment <= STACK_ALIGN) { bool rbpBased; int off = lvaFrameAddress(varNum, &rbpBased); // On SysV and Winx64 ABIs RSP+8 will be 16-byte aligned at the // first instruction of a function. If our frame is RBP based // then RBP will always be 16 bytes aligned, so we can simply // check the offset. if (rbpBased) { return (off % alignment) == 0; } // For RSP-based frame the alignment of RSP depends on our // locals. rsp+8 is aligned on entry and we just subtract frame // size so it is not hard to compute. Note that the compiler // tries hard to make sure the frame size means RSP will be // 16-byte aligned, but for leaf functions without locals (i.e. // frameSize = 0) it will not be. int frameSize = codeGen->genTotalFrameSize(); return ((8 - frameSize + off) % alignment) == 0; } } #endif // FEATURE_SIMD return false; } #ifdef DEBUG // Answer the question: Is a particular ISA supported? // Use this api when asking the question so that future // ISA questions can be asked correctly or when asserting // support/nonsupport for an instruction set bool compIsaSupportedDebugOnly(CORINFO_InstructionSet isa) const { #if defined(TARGET_XARCH) || defined(TARGET_ARM64) return (opts.compSupportsISA & (1ULL << isa)) != 0; #else return false; #endif } #endif // DEBUG bool notifyInstructionSetUsage(CORINFO_InstructionSet isa, bool supported) const; // Answer the question: Is a particular ISA allowed to be used implicitly by optimizations? // The result of this api call will exactly match the target machine // on which the function is executed (except for CoreLib, where there are special rules) bool compExactlyDependsOn(CORINFO_InstructionSet isa) const { #if defined(TARGET_XARCH) || defined(TARGET_ARM64) uint64_t isaBit = (1ULL << isa); if ((opts.compSupportsISAReported & isaBit) == 0) { if (notifyInstructionSetUsage(isa, (opts.compSupportsISA & isaBit) != 0)) ((Compiler*)this)->opts.compSupportsISAExactly |= isaBit; ((Compiler*)this)->opts.compSupportsISAReported |= isaBit; } return (opts.compSupportsISAExactly & isaBit) != 0; #else return false; #endif } // Ensure that code will not execute if an instruction set is usable. Call only // if the instruction set has previously reported as unusable, but when // that that status has not yet been recorded to the AOT compiler void compVerifyInstructionSetUnusable(CORINFO_InstructionSet isa) { // use compExactlyDependsOn to capture are record the use of the isa bool isaUsable = compExactlyDependsOn(isa); // Assert that the is unusable. If true, this function should never be called. assert(!isaUsable); } // Answer the question: Is a particular ISA allowed to be used implicitly by optimizations? // The result of this api call will match the target machine if the result is true // If the result is false, then the target machine may have support for the instruction bool compOpportunisticallyDependsOn(CORINFO_InstructionSet isa) const { if ((opts.compSupportsISA & (1ULL << isa)) != 0) { return compExactlyDependsOn(isa); } else { return false; } } // Answer the question: Is a particular ISA supported for explicit hardware intrinsics? bool compHWIntrinsicDependsOn(CORINFO_InstructionSet isa) const { // Report intent to use the ISA to the EE compExactlyDependsOn(isa); return ((opts.compSupportsISA & (1ULL << isa)) != 0); } bool canUseVexEncoding() const { #ifdef TARGET_XARCH return compOpportunisticallyDependsOn(InstructionSet_AVX); #else return false; #endif } /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Compiler XX XX XX XX Generic info about the compilation and the method being compiled. XX XX It is responsible for driving the other phases. XX XX It is also responsible for all the memory management. XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ public: Compiler* InlineeCompiler; // The Compiler instance for the inlinee InlineResult* compInlineResult; // The result of importing the inlinee method. bool compDoAggressiveInlining; // If true, mark every method as CORINFO_FLG_FORCEINLINE bool compJmpOpUsed; // Does the method do a JMP bool compLongUsed; // Does the method use TYP_LONG bool compFloatingPointUsed; // Does the method use TYP_FLOAT or TYP_DOUBLE bool compTailCallUsed; // Does the method do a tailcall bool compTailPrefixSeen; // Does the method IL have tail. prefix bool compLocallocSeen; // Does the method IL have localloc opcode bool compLocallocUsed; // Does the method use localloc. bool compLocallocOptimized; // Does the method have an optimized localloc bool compQmarkUsed; // Does the method use GT_QMARK/GT_COLON bool compQmarkRationalized; // Is it allowed to use a GT_QMARK/GT_COLON node. bool compHasBackwardJump; // Does the method (or some inlinee) have a lexically backwards jump? bool compHasBackwardJumpInHandler; // Does the method have a lexically backwards jump in a handler? bool compSwitchedToOptimized; // Codegen initially was Tier0 but jit switched to FullOpts bool compSwitchedToMinOpts; // Codegen initially was Tier1/FullOpts but jit switched to MinOpts bool compSuppressedZeroInit; // There are vars with lvSuppressedZeroInit set // NOTE: These values are only reliable after // the importing is completely finished. #ifdef DEBUG // State information - which phases have completed? // These are kept together for easy discoverability bool bRangeAllowStress; bool compCodeGenDone; int64_t compNumStatementLinksTraversed; // # of links traversed while doing debug checks bool fgNormalizeEHDone; // Has the flowgraph EH normalization phase been done? size_t compSizeEstimate; // The estimated size of the method as per `gtSetEvalOrder`. size_t compCycleEstimate; // The estimated cycle count of the method as per `gtSetEvalOrder` #endif // DEBUG bool fgLocalVarLivenessDone; // Note that this one is used outside of debug. bool fgLocalVarLivenessChanged; bool compLSRADone; bool compRationalIRForm; bool compUsesThrowHelper; // There is a call to a THROW_HELPER for the compiled method. bool compGeneratingProlog; bool compGeneratingEpilog; bool compNeedsGSSecurityCookie; // There is an unsafe buffer (or localloc) on the stack. // Insert cookie on frame and code to check the cookie, like VC++ -GS. bool compGSReorderStackLayout; // There is an unsafe buffer on the stack, reorder locals and make local // copies of susceptible parameters to avoid buffer overrun attacks through locals/params bool getNeedsGSSecurityCookie() const { return compNeedsGSSecurityCookie; } void setNeedsGSSecurityCookie() { compNeedsGSSecurityCookie = true; } FrameLayoutState lvaDoneFrameLayout; // The highest frame layout state that we've completed. During // frame layout calculations, this is the level we are currently // computing. //---------------------------- JITing options ----------------------------- enum codeOptimize { BLENDED_CODE, SMALL_CODE, FAST_CODE, COUNT_OPT_CODE }; struct Options { JitFlags* jitFlags; // all flags passed from the EE // The instruction sets that the compiler is allowed to emit. uint64_t compSupportsISA; // The instruction sets that were reported to the VM as being used by the current method. Subset of // compSupportsISA. uint64_t compSupportsISAReported; // The instruction sets that the compiler is allowed to take advantage of implicitly during optimizations. // Subset of compSupportsISA. // The instruction sets available in compSupportsISA and not available in compSupportsISAExactly can be only // used via explicit hardware intrinsics. uint64_t compSupportsISAExactly; void setSupportedISAs(CORINFO_InstructionSetFlags isas) { compSupportsISA = isas.GetFlagsRaw(); } unsigned compFlags; // method attributes unsigned instrCount; unsigned lvRefCount; codeOptimize compCodeOpt; // what type of code optimizations bool compUseCMOV; // optimize maximally and/or favor speed over size? #define DEFAULT_MIN_OPTS_CODE_SIZE 60000 #define DEFAULT_MIN_OPTS_INSTR_COUNT 20000 #define DEFAULT_MIN_OPTS_BB_COUNT 2000 #define DEFAULT_MIN_OPTS_LV_NUM_COUNT 2000 #define DEFAULT_MIN_OPTS_LV_REF_COUNT 8000 // Maximun number of locals before turning off the inlining #define MAX_LV_NUM_COUNT_FOR_INLINING 512 bool compMinOpts; bool compMinOptsIsSet; #ifdef DEBUG mutable bool compMinOptsIsUsed; bool MinOpts() const { assert(compMinOptsIsSet); compMinOptsIsUsed = true; return compMinOpts; } bool IsMinOptsSet() const { return compMinOptsIsSet; } #else // !DEBUG bool MinOpts() const { return compMinOpts; } bool IsMinOptsSet() const { return compMinOptsIsSet; } #endif // !DEBUG bool OptimizationDisabled() const { return MinOpts() || compDbgCode; } bool OptimizationEnabled() const { return !OptimizationDisabled(); } void SetMinOpts(bool val) { assert(!compMinOptsIsUsed); assert(!compMinOptsIsSet || (compMinOpts == val)); compMinOpts = val; compMinOptsIsSet = true; } // true if the CLFLG_* for an optimization is set. bool OptEnabled(unsigned optFlag) const { return !!(compFlags & optFlag); } #ifdef FEATURE_READYTORUN bool IsReadyToRun() const { return jitFlags->IsSet(JitFlags::JIT_FLAG_READYTORUN); } #else bool IsReadyToRun() const { return false; } #endif // Check if the compilation is control-flow guard enabled. bool IsCFGEnabled() const { #if defined(TARGET_ARM64) || defined(TARGET_AMD64) // On these platforms we assume the register that the target is // passed in is preserved by the validator and take care to get the // target from the register for the call (even in debug mode). static_assert_no_msg((RBM_VALIDATE_INDIRECT_CALL_TRASH & (1 << REG_VALIDATE_INDIRECT_CALL_ADDR)) == 0); if (JitConfig.JitForceControlFlowGuard()) return true; return jitFlags->IsSet(JitFlags::JIT_FLAG_ENABLE_CFG); #else // The remaining platforms are not supported and would require some // work to support. // // ARM32: // The ARM32 validator does not preserve any volatile registers // which means we have to take special care to allocate and use a // callee-saved register (reloading the target from memory is a // security issue). // // x86: // On x86 some VSD calls disassemble the call site and expect an // indirect call which is fundamentally incompatible with CFG. // This would require a different way to pass this information // through. // return false; #endif } #ifdef FEATURE_ON_STACK_REPLACEMENT bool IsOSR() const { return jitFlags->IsSet(JitFlags::JIT_FLAG_OSR); } #else bool IsOSR() const { return false; } #endif // true if we should use the PINVOKE_{BEGIN,END} helpers instead of generating // PInvoke transitions inline. Normally used by R2R, but also used when generating a reverse pinvoke frame, as // the current logic for frame setup initializes and pushes // the InlinedCallFrame before performing the Reverse PInvoke transition, which is invalid (as frames cannot // safely be pushed/popped while the thread is in a preemptive state.). bool ShouldUsePInvokeHelpers() { return jitFlags->IsSet(JitFlags::JIT_FLAG_USE_PINVOKE_HELPERS) || jitFlags->IsSet(JitFlags::JIT_FLAG_REVERSE_PINVOKE); } // true if we should use insert the REVERSE_PINVOKE_{ENTER,EXIT} helpers in the method // prolog/epilog bool IsReversePInvoke() { return jitFlags->IsSet(JitFlags::JIT_FLAG_REVERSE_PINVOKE); } bool compScopeInfo; // Generate the LocalVar info ? bool compDbgCode; // Generate debugger-friendly code? bool compDbgInfo; // Gather debugging info? bool compDbgEnC; #ifdef PROFILING_SUPPORTED bool compNoPInvokeInlineCB; #else static const bool compNoPInvokeInlineCB; #endif #ifdef DEBUG bool compGcChecks; // Check arguments and return values to ensure they are sane #endif #if defined(DEBUG) && defined(TARGET_XARCH) bool compStackCheckOnRet; // Check stack pointer on return to ensure it is correct. #endif // defined(DEBUG) && defined(TARGET_XARCH) #if defined(DEBUG) && defined(TARGET_X86) bool compStackCheckOnCall; // Check stack pointer after call to ensure it is correct. Only for x86. #endif // defined(DEBUG) && defined(TARGET_X86) bool compReloc; // Generate relocs for pointers in code, true for all ngen/prejit codegen #ifdef DEBUG #if defined(TARGET_XARCH) bool compEnablePCRelAddr; // Whether absolute addr be encoded as PC-rel offset by RyuJIT where possible #endif #endif // DEBUG #ifdef UNIX_AMD64_ABI // This flag is indicating if there is a need to align the frame. // On AMD64-Windows, if there are calls, 4 slots for the outgoing ars are allocated, except for // FastTailCall. This slots makes the frame size non-zero, so alignment logic will be called. // On AMD64-Unix, there are no such slots. There is a possibility to have calls in the method with frame size of // 0. The frame alignment logic won't kick in. This flags takes care of the AMD64-Unix case by remembering that // there are calls and making sure the frame alignment logic is executed. bool compNeedToAlignFrame; #endif // UNIX_AMD64_ABI bool compProcedureSplitting; // Separate cold code from hot code bool genFPorder; // Preserve FP order (operations are non-commutative) bool genFPopt; // Can we do frame-pointer-omission optimization? bool altJit; // True if we are an altjit and are compiling this method #ifdef OPT_CONFIG bool optRepeat; // Repeat optimizer phases k times #endif #ifdef DEBUG bool compProcedureSplittingEH; // Separate cold code from hot code for functions with EH bool dspCode; // Display native code generated bool dspEHTable; // Display the EH table reported to the VM bool dspDebugInfo; // Display the Debug info reported to the VM bool dspInstrs; // Display the IL instructions intermixed with the native code output bool dspLines; // Display source-code lines intermixed with native code output bool dmpHex; // Display raw bytes in hex of native code output bool varNames; // Display variables names in native code output bool disAsm; // Display native code as it is generated bool disAsmSpilled; // Display native code when any register spilling occurs bool disasmWithGC; // Display GC info interleaved with disassembly. bool disDiffable; // Makes the Disassembly code 'diff-able' bool disAddr; // Display process address next to each instruction in disassembly code bool disAlignment; // Display alignment boundaries in disassembly code bool disAsm2; // Display native code after it is generated using external disassembler bool dspOrder; // Display names of each of the methods that we ngen/jit bool dspUnwind; // Display the unwind info output bool dspDiffable; // Makes the Jit Dump 'diff-able' (currently uses same COMPlus_* flag as disDiffable) bool compLongAddress; // Force using large pseudo instructions for long address // (IF_LARGEJMP/IF_LARGEADR/IF_LARGLDC) bool dspGCtbls; // Display the GC tables #endif bool compExpandCallsEarly; // True if we should expand virtual call targets early for this method // Default numbers used to perform loop alignment. All the numbers are choosen // based on experimenting with various benchmarks. // Default minimum loop block weight required to enable loop alignment. #define DEFAULT_ALIGN_LOOP_MIN_BLOCK_WEIGHT 4 // By default a loop will be aligned at 32B address boundary to get better // performance as per architecture manuals. #define DEFAULT_ALIGN_LOOP_BOUNDARY 0x20 // For non-adaptive loop alignment, by default, only align a loop whose size is // at most 3 times the alignment block size. If the loop is bigger than that, it is most // likely complicated enough that loop alignment will not impact performance. #define DEFAULT_MAX_LOOPSIZE_FOR_ALIGN DEFAULT_ALIGN_LOOP_BOUNDARY * 3 #ifdef DEBUG // Loop alignment variables // If set, for non-adaptive alignment, ensure loop jmps are not on or cross alignment boundary. bool compJitAlignLoopForJcc; #endif // For non-adaptive alignment, minimum loop size (in bytes) for which alignment will be done. unsigned short compJitAlignLoopMaxCodeSize; // Minimum weight needed for the first block of a loop to make it a candidate for alignment. unsigned short compJitAlignLoopMinBlockWeight; // For non-adaptive alignment, address boundary (power of 2) at which loop alignment should // be done. By default, 32B. unsigned short compJitAlignLoopBoundary; // Padding limit to align a loop. unsigned short compJitAlignPaddingLimit; // If set, perform adaptive loop alignment that limits number of padding based on loop size. bool compJitAlignLoopAdaptive; // If set, tries to hide alignment instructions behind unconditional jumps. bool compJitHideAlignBehindJmp; #ifdef LATE_DISASM bool doLateDisasm; // Run the late disassembler #endif // LATE_DISASM #if DUMP_GC_TABLES && !defined(DEBUG) #pragma message("NOTE: this non-debug build has GC ptr table dumping always enabled!") static const bool dspGCtbls = true; #endif #ifdef PROFILING_SUPPORTED // Whether to emit Enter/Leave/TailCall hooks using a dummy stub (DummyProfilerELTStub()). // This option helps make the JIT behave as if it is running under a profiler. bool compJitELTHookEnabled; #endif // PROFILING_SUPPORTED #if FEATURE_TAILCALL_OPT // Whether opportunistic or implicit tail call optimization is enabled. bool compTailCallOpt; // Whether optimization of transforming a recursive tail call into a loop is enabled. bool compTailCallLoopOpt; #endif #if FEATURE_FASTTAILCALL // Whether fast tail calls are allowed. bool compFastTailCalls; #endif // FEATURE_FASTTAILCALL #if defined(TARGET_ARM64) // Decision about whether to save FP/LR registers with callee-saved registers (see // COMPlus_JitSaveFpLrWithCalleSavedRegisters). int compJitSaveFpLrWithCalleeSavedRegisters; #endif // defined(TARGET_ARM64) #ifdef CONFIGURABLE_ARM_ABI bool compUseSoftFP = false; #else #ifdef ARM_SOFTFP static const bool compUseSoftFP = true; #else // !ARM_SOFTFP static const bool compUseSoftFP = false; #endif // ARM_SOFTFP #endif // CONFIGURABLE_ARM_ABI } opts; static bool s_pAltJitExcludeAssembliesListInitialized; static AssemblyNamesList2* s_pAltJitExcludeAssembliesList; #ifdef DEBUG static bool s_pJitDisasmIncludeAssembliesListInitialized; static AssemblyNamesList2* s_pJitDisasmIncludeAssembliesList; static bool s_pJitFunctionFileInitialized; static MethodSet* s_pJitMethodSet; #endif // DEBUG #ifdef DEBUG // silence warning of cast to greater size. It is easier to silence than construct code the compiler is happy with, and // it is safe in this case #pragma warning(push) #pragma warning(disable : 4312) template <typename T> T dspPtr(T p) { return (p == ZERO) ? ZERO : (opts.dspDiffable ? T(0xD1FFAB1E) : p); } template <typename T> T dspOffset(T o) { return (o == ZERO) ? ZERO : (opts.dspDiffable ? T(0xD1FFAB1E) : o); } #pragma warning(pop) static int dspTreeID(GenTree* tree) { return tree->gtTreeID; } static void printStmtID(Statement* stmt) { assert(stmt != nullptr); printf(FMT_STMT, stmt->GetID()); } static void printTreeID(GenTree* tree) { if (tree == nullptr) { printf("[------]"); } else { printf("[%06d]", dspTreeID(tree)); } } const char* pgoSourceToString(ICorJitInfo::PgoSource p); const char* devirtualizationDetailToString(CORINFO_DEVIRTUALIZATION_DETAIL detail); #endif // DEBUG // clang-format off #define STRESS_MODES \ \ STRESS_MODE(NONE) \ \ /* "Variations" stress areas which we try to mix up with each other. */ \ /* These should not be exhaustively used as they might */ \ /* hide/trivialize other areas */ \ \ STRESS_MODE(REGS) \ STRESS_MODE(DBL_ALN) \ STRESS_MODE(LCL_FLDS) \ STRESS_MODE(UNROLL_LOOPS) \ STRESS_MODE(MAKE_CSE) \ STRESS_MODE(LEGACY_INLINE) \ STRESS_MODE(CLONE_EXPR) \ STRESS_MODE(USE_CMOV) \ STRESS_MODE(FOLD) \ STRESS_MODE(MERGED_RETURNS) \ STRESS_MODE(BB_PROFILE) \ STRESS_MODE(OPT_BOOLS_GC) \ STRESS_MODE(REMORPH_TREES) \ STRESS_MODE(64RSLT_MUL) \ STRESS_MODE(DO_WHILE_LOOPS) \ STRESS_MODE(MIN_OPTS) \ STRESS_MODE(REVERSE_FLAG) /* Will set GTF_REVERSE_OPS whenever we can */ \ STRESS_MODE(REVERSE_COMMA) /* Will reverse commas created with gtNewCommaNode */ \ STRESS_MODE(TAILCALL) /* Will make the call as a tailcall whenever legal */ \ STRESS_MODE(CATCH_ARG) /* Will spill catch arg */ \ STRESS_MODE(UNSAFE_BUFFER_CHECKS) \ STRESS_MODE(NULL_OBJECT_CHECK) \ STRESS_MODE(PINVOKE_RESTORE_ESP) \ STRESS_MODE(RANDOM_INLINE) \ STRESS_MODE(SWITCH_CMP_BR_EXPANSION) \ STRESS_MODE(GENERIC_VARN) \ STRESS_MODE(PROFILER_CALLBACKS) /* Will generate profiler hooks for ELT callbacks */ \ STRESS_MODE(BYREF_PROMOTION) /* Change undoPromotion decisions for byrefs */ \ STRESS_MODE(PROMOTE_FEWER_STRUCTS)/* Don't promote some structs that can be promoted */ \ STRESS_MODE(VN_BUDGET)/* Randomize the VN budget */ \ \ /* After COUNT_VARN, stress level 2 does all of these all the time */ \ \ STRESS_MODE(COUNT_VARN) \ \ /* "Check" stress areas that can be exhaustively used if we */ \ /* dont care about performance at all */ \ \ STRESS_MODE(FORCE_INLINE) /* Treat every method as AggressiveInlining */ \ STRESS_MODE(CHK_FLOW_UPDATE) \ STRESS_MODE(EMITTER) \ STRESS_MODE(CHK_REIMPORT) \ STRESS_MODE(FLATFP) \ STRESS_MODE(GENERIC_CHECK) \ STRESS_MODE(COUNT) enum compStressArea { #define STRESS_MODE(mode) STRESS_##mode, STRESS_MODES #undef STRESS_MODE }; // clang-format on #ifdef DEBUG static const LPCWSTR s_compStressModeNames[STRESS_COUNT + 1]; BYTE compActiveStressModes[STRESS_COUNT]; #endif // DEBUG #define MAX_STRESS_WEIGHT 100 bool compStressCompile(compStressArea stressArea, unsigned weightPercentage); bool compStressCompileHelper(compStressArea stressArea, unsigned weightPercentage); #ifdef DEBUG bool compInlineStress() { return compStressCompile(STRESS_LEGACY_INLINE, 50); } bool compRandomInlineStress() { return compStressCompile(STRESS_RANDOM_INLINE, 50); } bool compPromoteFewerStructs(unsigned lclNum); #endif // DEBUG bool compTailCallStress() { #ifdef DEBUG // Do not stress tailcalls in IL stubs as the runtime creates several IL // stubs to implement the tailcall mechanism, which would then // recursively create more IL stubs. return !opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB) && (JitConfig.TailcallStress() != 0 || compStressCompile(STRESS_TAILCALL, 5)); #else return false; #endif } const char* compGetTieringName(bool wantShortName = false) const; const char* compGetStressMessage() const; codeOptimize compCodeOpt() const { #if 0 // Switching between size & speed has measurable throughput impact // (3.5% on NGen CoreLib when measured). It used to be enabled for // DEBUG, but should generate identical code between CHK & RET builds, // so that's not acceptable. // TODO-Throughput: Figure out what to do about size vs. speed & throughput. // Investigate the cause of the throughput regression. return opts.compCodeOpt; #else return BLENDED_CODE; #endif } //--------------------- Info about the procedure -------------------------- struct Info { COMP_HANDLE compCompHnd; CORINFO_MODULE_HANDLE compScopeHnd; CORINFO_CLASS_HANDLE compClassHnd; CORINFO_METHOD_HANDLE compMethodHnd; CORINFO_METHOD_INFO* compMethodInfo; bool hasCircularClassConstraints; bool hasCircularMethodConstraints; #if defined(DEBUG) || defined(LATE_DISASM) || DUMP_FLOWGRAPHS const char* compMethodName; const char* compClassName; const char* compFullName; double compPerfScore; int compMethodSuperPMIIndex; // useful when debugging under SuperPMI #endif // defined(DEBUG) || defined(LATE_DISASM) || DUMP_FLOWGRAPHS #if defined(DEBUG) || defined(INLINE_DATA) // Method hash is logically const, but computed // on first demand. mutable unsigned compMethodHashPrivate; unsigned compMethodHash() const; #endif // defined(DEBUG) || defined(INLINE_DATA) #ifdef PSEUDORANDOM_NOP_INSERTION // things for pseudorandom nop insertion unsigned compChecksum; CLRRandom compRNG; #endif // The following holds the FLG_xxxx flags for the method we're compiling. unsigned compFlags; // The following holds the class attributes for the method we're compiling. unsigned compClassAttr; const BYTE* compCode; IL_OFFSET compILCodeSize; // The IL code size IL_OFFSET compILImportSize; // Estimated amount of IL actually imported IL_OFFSET compILEntry; // The IL entry point (normally 0) PatchpointInfo* compPatchpointInfo; // Patchpoint data for OSR (normally nullptr) UNATIVE_OFFSET compNativeCodeSize; // The native code size, after instructions are issued. This // is less than (compTotalHotCodeSize + compTotalColdCodeSize) only if: // (1) the code is not hot/cold split, and we issued less code than we expected, or // (2) the code is hot/cold split, and we issued less code than we expected // in the cold section (the hot section will always be padded out to compTotalHotCodeSize). bool compIsStatic : 1; // Is the method static (no 'this' pointer)? bool compIsVarArgs : 1; // Does the method have varargs parameters? bool compInitMem : 1; // Is the CORINFO_OPT_INIT_LOCALS bit set in the method info options? bool compProfilerCallback : 1; // JIT inserted a profiler Enter callback bool compPublishStubParam : 1; // EAX captured in prolog will be available through an intrinsic bool compHasNextCallRetAddr : 1; // The NextCallReturnAddress intrinsic is used. var_types compRetType; // Return type of the method as declared in IL var_types compRetNativeType; // Normalized return type as per target arch ABI unsigned compILargsCount; // Number of arguments (incl. implicit but not hidden) unsigned compArgsCount; // Number of arguments (incl. implicit and hidden) #if FEATURE_FASTTAILCALL unsigned compArgStackSize; // Incoming argument stack size in bytes #endif // FEATURE_FASTTAILCALL unsigned compRetBuffArg; // position of hidden return param var (0, 1) (BAD_VAR_NUM means not present); int compTypeCtxtArg; // position of hidden param for type context for generic code (CORINFO_CALLCONV_PARAMTYPE) unsigned compThisArg; // position of implicit this pointer param (not to be confused with lvaArg0Var) unsigned compILlocalsCount; // Number of vars : args + locals (incl. implicit but not hidden) unsigned compLocalsCount; // Number of vars : args + locals (incl. implicit and hidden) unsigned compMaxStack; UNATIVE_OFFSET compTotalHotCodeSize; // Total number of bytes of Hot Code in the method UNATIVE_OFFSET compTotalColdCodeSize; // Total number of bytes of Cold Code in the method unsigned compUnmanagedCallCountWithGCTransition; // count of unmanaged calls with GC transition. CorInfoCallConvExtension compCallConv; // The entry-point calling convention for this method. unsigned compLvFrameListRoot; // lclNum for the Frame root unsigned compXcptnsCount; // Number of exception-handling clauses read in the method's IL. // You should generally use compHndBBtabCount instead: it is the // current number of EH clauses (after additions like synchronized // methods and funclets, and removals like unreachable code deletion). Target::ArgOrder compArgOrder; bool compMatchedVM; // true if the VM is "matched": either the JIT is a cross-compiler // and the VM expects that, or the JIT is a "self-host" compiler // (e.g., x86 hosted targeting x86) and the VM expects that. /* The following holds IL scope information about local variables. */ unsigned compVarScopesCount; VarScopeDsc* compVarScopes; /* The following holds information about instr offsets for * which we need to report IP-mappings */ IL_OFFSET* compStmtOffsets; // sorted unsigned compStmtOffsetsCount; ICorDebugInfo::BoundaryTypes compStmtOffsetsImplicit; #define CPU_X86 0x0100 // The generic X86 CPU #define CPU_X86_PENTIUM_4 0x0110 #define CPU_X64 0x0200 // The generic x64 CPU #define CPU_AMD_X64 0x0210 // AMD x64 CPU #define CPU_INTEL_X64 0x0240 // Intel x64 CPU #define CPU_ARM 0x0300 // The generic ARM CPU #define CPU_ARM64 0x0400 // The generic ARM64 CPU unsigned genCPU; // What CPU are we running on // Number of class profile probes in this method unsigned compClassProbeCount; } info; // Returns true if the method being compiled returns a non-void and non-struct value. // Note that lvaInitTypeRef() normalizes compRetNativeType for struct returns in a // single register as per target arch ABI (e.g on Amd64 Windows structs of size 1, 2, // 4 or 8 gets normalized to TYP_BYTE/TYP_SHORT/TYP_INT/TYP_LONG; On Arm HFA structs). // Methods returning such structs are considered to return non-struct return value and // this method returns true in that case. bool compMethodReturnsNativeScalarType() { return (info.compRetType != TYP_VOID) && !varTypeIsStruct(info.compRetNativeType); } // Returns true if the method being compiled returns RetBuf addr as its return value bool compMethodReturnsRetBufAddr() { // There are cases where implicit RetBuf argument should be explicitly returned in a register. // In such cases the return type is changed to TYP_BYREF and appropriate IR is generated. // These cases are: CLANG_FORMAT_COMMENT_ANCHOR; #ifdef TARGET_AMD64 // 1. on x64 Windows and Unix the address of RetBuf needs to be returned by // methods with hidden RetBufArg in RAX. In such case GT_RETURN is of TYP_BYREF, // returning the address of RetBuf. return (info.compRetBuffArg != BAD_VAR_NUM); #else // TARGET_AMD64 #ifdef PROFILING_SUPPORTED // 2. Profiler Leave callback expects the address of retbuf as return value for // methods with hidden RetBuf argument. impReturnInstruction() when profiler // callbacks are needed creates GT_RETURN(TYP_BYREF, op1 = Addr of RetBuf) for // methods with hidden RetBufArg. if (compIsProfilerHookNeeded()) { return (info.compRetBuffArg != BAD_VAR_NUM); } #endif // 3. Windows ARM64 native instance calling convention requires the address of RetBuff // to be returned in x0. CLANG_FORMAT_COMMENT_ANCHOR; #if defined(TARGET_ARM64) if (TargetOS::IsWindows) { auto callConv = info.compCallConv; if (callConvIsInstanceMethodCallConv(callConv)) { return (info.compRetBuffArg != BAD_VAR_NUM); } } #endif // TARGET_ARM64 // 4. x86 unmanaged calling conventions require the address of RetBuff to be returned in eax. CLANG_FORMAT_COMMENT_ANCHOR; #if defined(TARGET_X86) if (info.compCallConv != CorInfoCallConvExtension::Managed) { return (info.compRetBuffArg != BAD_VAR_NUM); } #endif return false; #endif // TARGET_AMD64 } // Returns true if the method returns a value in more than one return register // TODO-ARM-Bug: Deal with multi-register genReturnLocaled structs? // TODO-ARM64: Does this apply for ARM64 too? bool compMethodReturnsMultiRegRetType() { #if FEATURE_MULTIREG_RET #if defined(TARGET_X86) // On x86, 64-bit longs and structs are returned in multiple registers return varTypeIsLong(info.compRetNativeType) || (varTypeIsStruct(info.compRetNativeType) && (info.compRetBuffArg == BAD_VAR_NUM)); #else // targets: X64-UNIX, ARM64 or ARM32 // On all other targets that support multireg return values: // Methods returning a struct in multiple registers have a return value of TYP_STRUCT. // Such method's compRetNativeType is TYP_STRUCT without a hidden RetBufArg return varTypeIsStruct(info.compRetNativeType) && (info.compRetBuffArg == BAD_VAR_NUM); #endif // TARGET_XXX #else // not FEATURE_MULTIREG_RET // For this architecture there are no multireg returns return false; #endif // FEATURE_MULTIREG_RET } bool compEnregLocals() { return ((opts.compFlags & CLFLG_REGVAR) != 0); } bool compEnregStructLocals() { return (JitConfig.JitEnregStructLocals() != 0); } bool compObjectStackAllocation() { return (JitConfig.JitObjectStackAllocation() != 0); } // Returns true if the method returns a value in more than one return register, // it should replace/be merged with compMethodReturnsMultiRegRetType when #36868 is fixed. // The difference from original `compMethodReturnsMultiRegRetType` is in ARM64 SIMD* handling, // this method correctly returns false for it (it is passed as HVA), when the original returns true. bool compMethodReturnsMultiRegRegTypeAlternate() { #if FEATURE_MULTIREG_RET #if defined(TARGET_X86) // On x86, 64-bit longs and structs are returned in multiple registers return varTypeIsLong(info.compRetNativeType) || (varTypeIsStruct(info.compRetNativeType) && (info.compRetBuffArg == BAD_VAR_NUM)); #else // targets: X64-UNIX, ARM64 or ARM32 #if defined(TARGET_ARM64) // TYP_SIMD* are returned in one register. if (varTypeIsSIMD(info.compRetNativeType)) { return false; } #endif // On all other targets that support multireg return values: // Methods returning a struct in multiple registers have a return value of TYP_STRUCT. // Such method's compRetNativeType is TYP_STRUCT without a hidden RetBufArg return varTypeIsStruct(info.compRetNativeType) && (info.compRetBuffArg == BAD_VAR_NUM); #endif // TARGET_XXX #else // not FEATURE_MULTIREG_RET // For this architecture there are no multireg returns return false; #endif // FEATURE_MULTIREG_RET } // Returns true if the method being compiled returns a value bool compMethodHasRetVal() { return compMethodReturnsNativeScalarType() || compMethodReturnsRetBufAddr() || compMethodReturnsMultiRegRetType(); } // Returns true if the method requires a PInvoke prolog and epilog bool compMethodRequiresPInvokeFrame() { return (info.compUnmanagedCallCountWithGCTransition > 0); } // Returns true if address-exposed user variables should be poisoned with a recognizable value bool compShouldPoisonFrame() { #ifdef FEATURE_ON_STACK_REPLACEMENT if (opts.IsOSR()) return false; #endif return !info.compInitMem && opts.compDbgCode; } // Returns true if the jit supports having patchpoints in this method. // Optionally, get the reason why not. bool compCanHavePatchpoints(const char** reason = nullptr); #if defined(DEBUG) void compDispLocalVars(); #endif // DEBUG private: class ClassLayoutTable* m_classLayoutTable; class ClassLayoutTable* typCreateClassLayoutTable(); class ClassLayoutTable* typGetClassLayoutTable(); public: // Get the layout having the specified layout number. ClassLayout* typGetLayoutByNum(unsigned layoutNum); // Get the layout number of the specified layout. unsigned typGetLayoutNum(ClassLayout* layout); // Get the layout having the specified size but no class handle. ClassLayout* typGetBlkLayout(unsigned blockSize); // Get the number of a layout having the specified size but no class handle. unsigned typGetBlkLayoutNum(unsigned blockSize); // Get the layout for the specified class handle. ClassLayout* typGetObjLayout(CORINFO_CLASS_HANDLE classHandle); // Get the number of a layout for the specified class handle. unsigned typGetObjLayoutNum(CORINFO_CLASS_HANDLE classHandle); //-------------------------- Global Compiler Data ------------------------------------ #ifdef DEBUG private: static LONG s_compMethodsCount; // to produce unique label names #endif public: #ifdef DEBUG LONG compMethodID; unsigned compGenTreeID; unsigned compStatementID; unsigned compBasicBlockID; #endif BasicBlock* compCurBB; // the current basic block in process Statement* compCurStmt; // the current statement in process GenTree* compCurTree; // the current tree in process // The following is used to create the 'method JIT info' block. size_t compInfoBlkSize; BYTE* compInfoBlkAddr; EHblkDsc* compHndBBtab; // array of EH data unsigned compHndBBtabCount; // element count of used elements in EH data array unsigned compHndBBtabAllocCount; // element count of allocated elements in EH data array #if defined(TARGET_X86) //------------------------------------------------------------------------- // Tracking of region covered by the monitor in synchronized methods void* syncStartEmitCookie; // the emitter cookie for first instruction after the call to MON_ENTER void* syncEndEmitCookie; // the emitter cookie for first instruction after the call to MON_EXIT #endif // !TARGET_X86 Phases mostRecentlyActivePhase; // the most recently active phase PhaseChecks activePhaseChecks; // the currently active phase checks //------------------------------------------------------------------------- // The following keeps track of how many bytes of local frame space we've // grabbed so far in the current function, and how many argument bytes we // need to pop when we return. // unsigned compLclFrameSize; // secObject+lclBlk+locals+temps // Count of callee-saved regs we pushed in the prolog. // Does not include EBP for isFramePointerUsed() and double-aligned frames. // In case of Amd64 this doesn't include float regs saved on stack. unsigned compCalleeRegsPushed; #if defined(TARGET_XARCH) // Mask of callee saved float regs on stack. regMaskTP compCalleeFPRegsSavedMask; #endif #ifdef TARGET_AMD64 // Quirk for VS debug-launch scenario to work: // Bytes of padding between save-reg area and locals. #define VSQUIRK_STACK_PAD (2 * REGSIZE_BYTES) unsigned compVSQuirkStackPaddingNeeded; #endif unsigned compArgSize; // total size of arguments in bytes (including register args (lvIsRegArg)) unsigned compMapILargNum(unsigned ILargNum); // map accounting for hidden args unsigned compMapILvarNum(unsigned ILvarNum); // map accounting for hidden args unsigned compMap2ILvarNum(unsigned varNum) const; // map accounting for hidden args #if defined(TARGET_ARM64) struct FrameInfo { // Frame type (1-5) int frameType; // Distance from established (method body) SP to base of callee save area int calleeSaveSpOffset; // Amount to subtract from SP before saving (prolog) OR // to add to SP after restoring (epilog) callee saves int calleeSaveSpDelta; // Distance from established SP to where caller's FP was saved int offsetSpToSavedFp; } compFrameInfo; #endif //------------------------------------------------------------------------- static void compStartup(); // One-time initialization static void compShutdown(); // One-time finalization void compInit(ArenaAllocator* pAlloc, CORINFO_METHOD_HANDLE methodHnd, COMP_HANDLE compHnd, CORINFO_METHOD_INFO* methodInfo, InlineInfo* inlineInfo); void compDone(); static void compDisplayStaticSizes(FILE* fout); //------------ Some utility functions -------------- void* compGetHelperFtn(CorInfoHelpFunc ftnNum, /* IN */ void** ppIndirection); /* OUT */ // Several JIT/EE interface functions return a CorInfoType, and also return a // class handle as an out parameter if the type is a value class. Returns the // size of the type these describe. unsigned compGetTypeSize(CorInfoType cit, CORINFO_CLASS_HANDLE clsHnd); // Returns true if the method being compiled has a return buffer. bool compHasRetBuffArg(); #ifdef DEBUG // Components used by the compiler may write unit test suites, and // have them run within this method. They will be run only once per process, and only // in debug. (Perhaps should be under the control of a COMPlus_ flag.) // These should fail by asserting. void compDoComponentUnitTestsOnce(); #endif // DEBUG int compCompile(CORINFO_MODULE_HANDLE classPtr, void** methodCodePtr, uint32_t* methodCodeSize, JitFlags* compileFlags); void compCompileFinish(); int compCompileHelper(CORINFO_MODULE_HANDLE classPtr, COMP_HANDLE compHnd, CORINFO_METHOD_INFO* methodInfo, void** methodCodePtr, uint32_t* methodCodeSize, JitFlags* compileFlag); ArenaAllocator* compGetArenaAllocator(); void generatePatchpointInfo(); #if MEASURE_MEM_ALLOC static bool s_dspMemStats; // Display per-phase memory statistics for every function #endif // MEASURE_MEM_ALLOC #if LOOP_HOIST_STATS unsigned m_loopsConsidered; bool m_curLoopHasHoistedExpression; unsigned m_loopsWithHoistedExpressions; unsigned m_totalHoistedExpressions; void AddLoopHoistStats(); void PrintPerMethodLoopHoistStats(); static CritSecObject s_loopHoistStatsLock; // This lock protects the data structures below. static unsigned s_loopsConsidered; static unsigned s_loopsWithHoistedExpressions; static unsigned s_totalHoistedExpressions; static void PrintAggregateLoopHoistStats(FILE* f); #endif // LOOP_HOIST_STATS #if TRACK_ENREG_STATS class EnregisterStats { private: unsigned m_totalNumberOfVars; unsigned m_totalNumberOfStructVars; unsigned m_totalNumberOfEnregVars; unsigned m_totalNumberOfStructEnregVars; unsigned m_addrExposed; unsigned m_VMNeedsStackAddr; unsigned m_localField; unsigned m_blockOp; unsigned m_dontEnregStructs; unsigned m_notRegSizeStruct; unsigned m_structArg; unsigned m_lclAddrNode; unsigned m_castTakesAddr; unsigned m_storeBlkSrc; unsigned m_oneAsgRetyping; unsigned m_swizzleArg; unsigned m_blockOpRet; unsigned m_returnSpCheck; unsigned m_simdUserForcesDep; unsigned m_liveInOutHndlr; unsigned m_depField; unsigned m_noRegVars; unsigned m_minOptsGC; #ifdef JIT32_GCENCODER unsigned m_PinningRef; #endif // JIT32_GCENCODER #if !defined(TARGET_64BIT) unsigned m_longParamField; #endif // !TARGET_64BIT unsigned m_parentExposed; unsigned m_tooConservative; unsigned m_escapeAddress; unsigned m_osrExposed; unsigned m_stressLclFld; unsigned m_copyFldByFld; unsigned m_dispatchRetBuf; unsigned m_wideIndir; public: void RecordLocal(const LclVarDsc* varDsc); void Dump(FILE* fout) const; }; static EnregisterStats s_enregisterStats; #endif // TRACK_ENREG_STATS bool compIsForImportOnly(); bool compIsForInlining() const; bool compDonotInline(); #ifdef DEBUG // Get the default fill char value we randomize this value when JitStress is enabled. static unsigned char compGetJitDefaultFill(Compiler* comp); const char* compLocalVarName(unsigned varNum, unsigned offs); VarName compVarName(regNumber reg, bool isFloatReg = false); const char* compRegVarName(regNumber reg, bool displayVar = false, bool isFloatReg = false); const char* compRegNameForSize(regNumber reg, size_t size); const char* compFPregVarName(unsigned fpReg, bool displayVar = false); void compDspSrcLinesByNativeIP(UNATIVE_OFFSET curIP); void compDspSrcLinesByLineNum(unsigned line, bool seek = false); #endif // DEBUG //------------------------------------------------------------------------- struct VarScopeListNode { VarScopeDsc* data; VarScopeListNode* next; static VarScopeListNode* Create(VarScopeDsc* value, CompAllocator alloc) { VarScopeListNode* node = new (alloc) VarScopeListNode; node->data = value; node->next = nullptr; return node; } }; struct VarScopeMapInfo { VarScopeListNode* head; VarScopeListNode* tail; static VarScopeMapInfo* Create(VarScopeListNode* node, CompAllocator alloc) { VarScopeMapInfo* info = new (alloc) VarScopeMapInfo; info->head = node; info->tail = node; return info; } }; // Max value of scope count for which we would use linear search; for larger values we would use hashtable lookup. static const unsigned MAX_LINEAR_FIND_LCL_SCOPELIST = 32; typedef JitHashTable<unsigned, JitSmallPrimitiveKeyFuncs<unsigned>, VarScopeMapInfo*> VarNumToScopeDscMap; // Map to keep variables' scope indexed by varNum containing it's scope dscs at the index. VarNumToScopeDscMap* compVarScopeMap; VarScopeDsc* compFindLocalVar(unsigned varNum, unsigned lifeBeg, unsigned lifeEnd); VarScopeDsc* compFindLocalVar(unsigned varNum, unsigned offs); VarScopeDsc* compFindLocalVarLinear(unsigned varNum, unsigned offs); void compInitVarScopeMap(); VarScopeDsc** compEnterScopeList; // List has the offsets where variables // enter scope, sorted by instr offset unsigned compNextEnterScope; VarScopeDsc** compExitScopeList; // List has the offsets where variables // go out of scope, sorted by instr offset unsigned compNextExitScope; void compInitScopeLists(); void compResetScopeLists(); VarScopeDsc* compGetNextEnterScope(unsigned offs, bool scan = false); VarScopeDsc* compGetNextExitScope(unsigned offs, bool scan = false); void compProcessScopesUntil(unsigned offset, VARSET_TP* inScope, void (Compiler::*enterScopeFn)(VARSET_TP* inScope, VarScopeDsc*), void (Compiler::*exitScopeFn)(VARSET_TP* inScope, VarScopeDsc*)); #ifdef DEBUG void compDispScopeLists(); #endif // DEBUG bool compIsProfilerHookNeeded(); //------------------------------------------------------------------------- /* Statistical Data Gathering */ void compJitStats(); // call this function and enable // various ifdef's below for statistical data #if CALL_ARG_STATS void compCallArgStats(); static void compDispCallArgStats(FILE* fout); #endif //------------------------------------------------------------------------- protected: #ifdef DEBUG bool skipMethod(); #endif ArenaAllocator* compArenaAllocator; public: void compFunctionTraceStart(); void compFunctionTraceEnd(void* methodCodePtr, ULONG methodCodeSize, bool isNYI); protected: size_t compMaxUncheckedOffsetForNullObject; void compInitOptions(JitFlags* compileFlags); void compSetProcessor(); void compInitDebuggingInfo(); void compSetOptimizationLevel(); #ifdef TARGET_ARMARCH bool compRsvdRegCheck(FrameLayoutState curState); #endif void compCompile(void** methodCodePtr, uint32_t* methodCodeSize, JitFlags* compileFlags); // Clear annotations produced during optimizations; to be used between iterations when repeating opts. void ResetOptAnnotations(); // Regenerate loop descriptors; to be used between iterations when repeating opts. void RecomputeLoopInfo(); #ifdef PROFILING_SUPPORTED // Data required for generating profiler Enter/Leave/TailCall hooks bool compProfilerHookNeeded; // Whether profiler Enter/Leave/TailCall hook needs to be generated for the method void* compProfilerMethHnd; // Profiler handle of the method being compiled. Passed as param to ELT callbacks bool compProfilerMethHndIndirected; // Whether compProfilerHandle is pointer to the handle or is an actual handle #endif public: // Assumes called as part of process shutdown; does any compiler-specific work associated with that. static void ProcessShutdownWork(ICorStaticInfo* statInfo); CompAllocator getAllocator(CompMemKind cmk = CMK_Generic) { return CompAllocator(compArenaAllocator, cmk); } CompAllocator getAllocatorGC() { return getAllocator(CMK_GC); } CompAllocator getAllocatorLoopHoist() { return getAllocator(CMK_LoopHoist); } #ifdef DEBUG CompAllocator getAllocatorDebugOnly() { return getAllocator(CMK_DebugOnly); } #endif // DEBUG /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX typeInfo XX XX XX XX Checks for type compatibility and merges types XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ public: // Returns true if child is equal to or a subtype of parent for merge purposes // This support is necessary to suport attributes that are not described in // for example, signatures. For example, the permanent home byref (byref that // points to the gc heap), isn't a property of method signatures, therefore, // it is safe to have mismatches here (that tiCompatibleWith will not flag), // but when deciding if we need to reimport a block, we need to take these // in account bool tiMergeCompatibleWith(const typeInfo& pChild, const typeInfo& pParent, bool normalisedForStack) const; // Returns true if child is equal to or a subtype of parent. // normalisedForStack indicates that both types are normalised for the stack bool tiCompatibleWith(const typeInfo& pChild, const typeInfo& pParent, bool normalisedForStack) const; // Merges pDest and pSrc. Returns false if merge is undefined. // *pDest is modified to represent the merged type. Sets "*changed" to true // if this changes "*pDest". bool tiMergeToCommonParent(typeInfo* pDest, const typeInfo* pSrc, bool* changed) const; /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX IL verification stuff XX XX XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ public: // The following is used to track liveness of local variables, initialization // of valueclass constructors, and type safe use of IL instructions. // dynamic state info needed for verification EntryState verCurrentState; // this ptr of object type .ctors are considered intited only after // the base class ctor is called, or an alternate ctor is called. // An uninited this ptr can be used to access fields, but cannot // be used to call a member function. bool verTrackObjCtorInitState; void verInitBBEntryState(BasicBlock* block, EntryState* currentState); // Requires that "tis" is not TIS_Bottom -- it's a definite init/uninit state. void verSetThisInit(BasicBlock* block, ThisInitState tis); void verInitCurrentState(); void verResetCurrentState(BasicBlock* block, EntryState* currentState); // Merges the current verification state into the entry state of "block", return false if that merge fails, // TRUE if it succeeds. Further sets "*changed" to true if this changes the entry state of "block". bool verMergeEntryStates(BasicBlock* block, bool* changed); void verConvertBBToThrowVerificationException(BasicBlock* block DEBUGARG(bool logMsg)); void verHandleVerificationFailure(BasicBlock* block DEBUGARG(bool logMsg)); typeInfo verMakeTypeInfo(CORINFO_CLASS_HANDLE clsHnd, bool bashStructToRef = false); // converts from jit type representation to typeInfo typeInfo verMakeTypeInfo(CorInfoType ciType, CORINFO_CLASS_HANDLE clsHnd); // converts from jit type representation to typeInfo bool verIsSDArray(const typeInfo& ti); typeInfo verGetArrayElemType(const typeInfo& ti); typeInfo verParseArgSigToTypeInfo(CORINFO_SIG_INFO* sig, CORINFO_ARG_LIST_HANDLE args); bool verIsByRefLike(const typeInfo& ti); bool verIsSafeToReturnByRef(const typeInfo& ti); // generic type variables range over types that satisfy IsBoxable bool verIsBoxable(const typeInfo& ti); void DECLSPEC_NORETURN verRaiseVerifyException(INDEBUG(const char* reason) DEBUGARG(const char* file) DEBUGARG(unsigned line)); void verRaiseVerifyExceptionIfNeeded(INDEBUG(const char* reason) DEBUGARG(const char* file) DEBUGARG(unsigned line)); bool verCheckTailCallConstraint(OPCODE opcode, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, // Is this a "constrained." call // on a type parameter? bool speculative // If true, won't throw if verificatoin fails. Instead it will // return false to the caller. // If false, it will throw. ); bool verIsBoxedValueType(const typeInfo& ti); void verVerifyCall(OPCODE opcode, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, bool tailCall, bool readonlyCall, // is this a "readonly." call? const BYTE* delegateCreateStart, const BYTE* codeAddr, CORINFO_CALL_INFO* callInfo DEBUGARG(const char* methodName)); bool verCheckDelegateCreation(const BYTE* delegateCreateStart, const BYTE* codeAddr, mdMemberRef& targetMemberRef); typeInfo verVerifySTIND(const typeInfo& ptr, const typeInfo& value, const typeInfo& instrType); typeInfo verVerifyLDIND(const typeInfo& ptr, const typeInfo& instrType); void verVerifyField(CORINFO_RESOLVED_TOKEN* pResolvedToken, const CORINFO_FIELD_INFO& fieldInfo, const typeInfo* tiThis, bool mutator, bool allowPlainStructAsThis = false); void verVerifyCond(const typeInfo& tiOp1, const typeInfo& tiOp2, unsigned opcode); void verVerifyThisPtrInitialised(); bool verIsCallToInitThisPtr(CORINFO_CLASS_HANDLE context, CORINFO_CLASS_HANDLE target); #ifdef DEBUG // One line log function. Default level is 0. Increasing it gives you // more log information // levels are currently unused: #define JITDUMP(level,...) (); void JitLogEE(unsigned level, const char* fmt, ...); bool compDebugBreak; bool compJitHaltMethod(); #endif /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX GS Security checks for unsafe buffers XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ public: struct ShadowParamVarInfo { FixedBitVect* assignGroup; // the closure set of variables whose values depend on each other unsigned shadowCopy; // Lcl var num, if not valid set to BAD_VAR_NUM static bool mayNeedShadowCopy(LclVarDsc* varDsc) { #if defined(TARGET_AMD64) // GS cookie logic to create shadow slots, create trees to copy reg args to shadow // slots and update all trees to refer to shadow slots is done immediately after // fgMorph(). Lsra could potentially mark a param as DoNotEnregister after JIT determines // not to shadow a parameter. Also, LSRA could potentially spill a param which is passed // in register. Therefore, conservatively all params may need a shadow copy. Note that // GS cookie logic further checks whether the param is a ptr or an unsafe buffer before // creating a shadow slot even though this routine returns true. // // TODO-AMD64-CQ: Revisit this conservative approach as it could create more shadow slots than // required. There are two cases under which a reg arg could potentially be used from its // home location: // a) LSRA marks it as DoNotEnregister (see LinearScan::identifyCandidates()) // b) LSRA spills it // // Possible solution to address case (a) // - The conditions under which LSRA marks a varDsc as DoNotEnregister could be checked // in this routine. Note that live out of exception handler is something we may not be // able to do it here since GS cookie logic is invoked ahead of liveness computation. // Therefore, for methods with exception handling and need GS cookie check we might have // to take conservative approach. // // Possible solution to address case (b) // - Whenver a parameter passed in an argument register needs to be spilled by LSRA, we // create a new spill temp if the method needs GS cookie check. return varDsc->lvIsParam; #else // !defined(TARGET_AMD64) return varDsc->lvIsParam && !varDsc->lvIsRegArg; #endif } #ifdef DEBUG void Print() { printf("assignGroup [%p]; shadowCopy: [%d];\n", assignGroup, shadowCopy); } #endif }; GSCookie* gsGlobalSecurityCookieAddr; // Address of global cookie for unsafe buffer checks GSCookie gsGlobalSecurityCookieVal; // Value of global cookie if addr is NULL ShadowParamVarInfo* gsShadowVarInfo; // Table used by shadow param analysis code void gsGSChecksInitCookie(); // Grabs cookie variable void gsCopyShadowParams(); // Identify vulnerable params and create dhadow copies bool gsFindVulnerableParams(); // Shadow param analysis code void gsParamsToShadows(); // Insert copy code and replave param uses by shadow static fgWalkPreFn gsMarkPtrsAndAssignGroups; // Shadow param analysis tree-walk static fgWalkPreFn gsReplaceShadowParams; // Shadow param replacement tree-walk #define DEFAULT_MAX_INLINE_SIZE 100 // Methods with > DEFAULT_MAX_INLINE_SIZE IL bytes will never be inlined. // This can be overwritten by setting complus_JITInlineSize env variable. #define DEFAULT_MAX_INLINE_DEPTH 20 // Methods at more than this level deep will not be inlined #define DEFAULT_MAX_LOCALLOC_TO_LOCAL_SIZE 32 // fixed locallocs of this size or smaller will convert to local buffers private: #ifdef FEATURE_JIT_METHOD_PERF JitTimer* pCompJitTimer; // Timer data structure (by phases) for current compilation. static CompTimeSummaryInfo s_compJitTimerSummary; // Summary of the Timer information for the whole run. static LPCWSTR JitTimeLogCsv(); // Retrieve the file name for CSV from ConfigDWORD. static LPCWSTR compJitTimeLogFilename; // If a log file for JIT time is desired, filename to write it to. #endif void BeginPhase(Phases phase); // Indicate the start of the given phase. void EndPhase(Phases phase); // Indicate the end of the given phase. #if MEASURE_CLRAPI_CALLS // Thin wrappers that call into JitTimer (if present). inline void CLRApiCallEnter(unsigned apix); inline void CLRApiCallLeave(unsigned apix); public: inline void CLR_API_Enter(API_ICorJitInfo_Names ename); inline void CLR_API_Leave(API_ICorJitInfo_Names ename); private: #endif #if defined(DEBUG) || defined(INLINE_DATA) // These variables are associated with maintaining SQM data about compile time. unsigned __int64 m_compCyclesAtEndOfInlining; // The thread-virtualized cycle count at the end of the inlining phase // in the current compilation. unsigned __int64 m_compCycles; // Net cycle count for current compilation DWORD m_compTickCountAtEndOfInlining; // The result of GetTickCount() (# ms since some epoch marker) at the end of // the inlining phase in the current compilation. #endif // defined(DEBUG) || defined(INLINE_DATA) // Records the SQM-relevant (cycles and tick count). Should be called after inlining is complete. // (We do this after inlining because this marks the last point at which the JIT is likely to cause // type-loading and class initialization). void RecordStateAtEndOfInlining(); // Assumes being called at the end of compilation. Update the SQM state. void RecordStateAtEndOfCompilation(); public: #if FUNC_INFO_LOGGING static LPCWSTR compJitFuncInfoFilename; // If a log file for per-function information is required, this is the // filename to write it to. static FILE* compJitFuncInfoFile; // And this is the actual FILE* to write to. #endif // FUNC_INFO_LOGGING Compiler* prevCompiler; // Previous compiler on stack for TLS Compiler* linked list for reentrant compilers. #if MEASURE_NOWAY void RecordNowayAssert(const char* filename, unsigned line, const char* condStr); #endif // MEASURE_NOWAY #ifndef FEATURE_TRACELOGGING // Should we actually fire the noway assert body and the exception handler? bool compShouldThrowOnNoway(); #else // FEATURE_TRACELOGGING // Should we actually fire the noway assert body and the exception handler? bool compShouldThrowOnNoway(const char* filename, unsigned line); // Telemetry instance to use per method compilation. JitTelemetry compJitTelemetry; // Get common parameters that have to be logged with most telemetry data. void compGetTelemetryDefaults(const char** assemblyName, const char** scopeName, const char** methodName, unsigned* methodHash); #endif // !FEATURE_TRACELOGGING #ifdef DEBUG private: NodeToTestDataMap* m_nodeTestData; static const unsigned FIRST_LOOP_HOIST_CSE_CLASS = 1000; unsigned m_loopHoistCSEClass; // LoopHoist test annotations turn into CSE requirements; we // label them with CSE Class #'s starting at FIRST_LOOP_HOIST_CSE_CLASS. // Current kept in this. public: NodeToTestDataMap* GetNodeTestData() { Compiler* compRoot = impInlineRoot(); if (compRoot->m_nodeTestData == nullptr) { compRoot->m_nodeTestData = new (getAllocatorDebugOnly()) NodeToTestDataMap(getAllocatorDebugOnly()); } return compRoot->m_nodeTestData; } typedef JitHashTable<GenTree*, JitPtrKeyFuncs<GenTree>, int> NodeToIntMap; // Returns the set (i.e., the domain of the result map) of nodes that are keys in m_nodeTestData, and // currently occur in the AST graph. NodeToIntMap* FindReachableNodesInNodeTestData(); // Node "from" is being eliminated, and being replaced by node "to". If "from" had any associated // test data, associate that data with "to". void TransferTestDataToNode(GenTree* from, GenTree* to); // These are the methods that test that the various conditions implied by the // test attributes are satisfied. void JitTestCheckSSA(); // SSA builder tests. void JitTestCheckVN(); // Value numbering tests. #endif // DEBUG // The "FieldSeqStore", for canonicalizing field sequences. See the definition of FieldSeqStore for // operations. FieldSeqStore* m_fieldSeqStore; FieldSeqStore* GetFieldSeqStore() { Compiler* compRoot = impInlineRoot(); if (compRoot->m_fieldSeqStore == nullptr) { // Create a CompAllocator that labels sub-structure with CMK_FieldSeqStore, and use that for allocation. CompAllocator ialloc(getAllocator(CMK_FieldSeqStore)); compRoot->m_fieldSeqStore = new (ialloc) FieldSeqStore(ialloc); } return compRoot->m_fieldSeqStore; } typedef JitHashTable<GenTree*, JitPtrKeyFuncs<GenTree>, FieldSeqNode*> NodeToFieldSeqMap; // Some nodes of "TYP_BYREF" or "TYP_I_IMPL" actually represent the address of a field within a struct, but since // the offset of the field is zero, there's no "GT_ADD" node. We normally attach a field sequence to the constant // that is added, but what do we do when that constant is zero, and is thus not present? We use this mechanism to // attach the field sequence directly to the address node. NodeToFieldSeqMap* m_zeroOffsetFieldMap; NodeToFieldSeqMap* GetZeroOffsetFieldMap() { // Don't need to worry about inlining here if (m_zeroOffsetFieldMap == nullptr) { // Create a CompAllocator that labels sub-structure with CMK_ZeroOffsetFieldMap, and use that for // allocation. CompAllocator ialloc(getAllocator(CMK_ZeroOffsetFieldMap)); m_zeroOffsetFieldMap = new (ialloc) NodeToFieldSeqMap(ialloc); } return m_zeroOffsetFieldMap; } // Requires that "op1" is a node of type "TYP_BYREF" or "TYP_I_IMPL". We are dereferencing this with the fields in // "fieldSeq", whose offsets are required all to be zero. Ensures that any field sequence annotation currently on // "op1" or its components is augmented by appending "fieldSeq". In practice, if "op1" is a GT_LCL_FLD, it has // a field sequence as a member; otherwise, it may be the addition of an a byref and a constant, where the const // has a field sequence -- in this case "fieldSeq" is appended to that of the constant; otherwise, we // record the the field sequence using the ZeroOffsetFieldMap described above. // // One exception above is that "op1" is a node of type "TYP_REF" where "op1" is a GT_LCL_VAR. // This happens when System.Object vtable pointer is a regular field at offset 0 in System.Private.CoreLib in // CoreRT. Such case is handled same as the default case. void fgAddFieldSeqForZeroOffset(GenTree* op1, FieldSeqNode* fieldSeq); typedef JitHashTable<const GenTree*, JitPtrKeyFuncs<GenTree>, ArrayInfo> NodeToArrayInfoMap; NodeToArrayInfoMap* m_arrayInfoMap; NodeToArrayInfoMap* GetArrayInfoMap() { Compiler* compRoot = impInlineRoot(); if (compRoot->m_arrayInfoMap == nullptr) { // Create a CompAllocator that labels sub-structure with CMK_ArrayInfoMap, and use that for allocation. CompAllocator ialloc(getAllocator(CMK_ArrayInfoMap)); compRoot->m_arrayInfoMap = new (ialloc) NodeToArrayInfoMap(ialloc); } return compRoot->m_arrayInfoMap; } //----------------------------------------------------------------------------------------------------------------- // Compiler::TryGetArrayInfo: // Given an indirection node, checks to see whether or not that indirection represents an array access, and // if so returns information about the array. // // Arguments: // indir - The `GT_IND` node. // arrayInfo (out) - Information about the accessed array if this function returns true. Undefined otherwise. // // Returns: // True if the `GT_IND` node represents an array access; false otherwise. bool TryGetArrayInfo(GenTreeIndir* indir, ArrayInfo* arrayInfo) { if ((indir->gtFlags & GTF_IND_ARR_INDEX) == 0) { return false; } if (indir->gtOp1->OperIs(GT_INDEX_ADDR)) { GenTreeIndexAddr* const indexAddr = indir->gtOp1->AsIndexAddr(); *arrayInfo = ArrayInfo(indexAddr->gtElemType, indexAddr->gtElemSize, indexAddr->gtElemOffset, indexAddr->gtStructElemClass); return true; } bool found = GetArrayInfoMap()->Lookup(indir, arrayInfo); assert(found); return true; } NodeToUnsignedMap* m_memorySsaMap[MemoryKindCount]; // In some cases, we want to assign intermediate SSA #'s to memory states, and know what nodes create those memory // states. (We do this for try blocks, where, if the try block doesn't do a call that loses track of the memory // state, all the possible memory states are possible initial states of the corresponding catch block(s).) NodeToUnsignedMap* GetMemorySsaMap(MemoryKind memoryKind) { if (memoryKind == GcHeap && byrefStatesMatchGcHeapStates) { // Use the same map for GCHeap and ByrefExposed when their states match. memoryKind = ByrefExposed; } assert(memoryKind < MemoryKindCount); Compiler* compRoot = impInlineRoot(); if (compRoot->m_memorySsaMap[memoryKind] == nullptr) { // Create a CompAllocator that labels sub-structure with CMK_ArrayInfoMap, and use that for allocation. CompAllocator ialloc(getAllocator(CMK_ArrayInfoMap)); compRoot->m_memorySsaMap[memoryKind] = new (ialloc) NodeToUnsignedMap(ialloc); } return compRoot->m_memorySsaMap[memoryKind]; } // The Refany type is the only struct type whose structure is implicitly assumed by IL. We need its fields. CORINFO_CLASS_HANDLE m_refAnyClass; CORINFO_FIELD_HANDLE GetRefanyDataField() { if (m_refAnyClass == nullptr) { m_refAnyClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPED_BYREF); } return info.compCompHnd->getFieldInClass(m_refAnyClass, 0); } CORINFO_FIELD_HANDLE GetRefanyTypeField() { if (m_refAnyClass == nullptr) { m_refAnyClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPED_BYREF); } return info.compCompHnd->getFieldInClass(m_refAnyClass, 1); } #if VARSET_COUNTOPS static BitSetSupport::BitSetOpCounter m_varsetOpCounter; #endif #if ALLVARSET_COUNTOPS static BitSetSupport::BitSetOpCounter m_allvarsetOpCounter; #endif static HelperCallProperties s_helperCallProperties; #ifdef UNIX_AMD64_ABI static var_types GetTypeFromClassificationAndSizes(SystemVClassificationType classType, int size); static var_types GetEightByteType(const SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR& structDesc, unsigned slotNum); static void GetStructTypeOffset(const SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR& structDesc, var_types* type0, var_types* type1, unsigned __int8* offset0, unsigned __int8* offset1); void GetStructTypeOffset(CORINFO_CLASS_HANDLE typeHnd, var_types* type0, var_types* type1, unsigned __int8* offset0, unsigned __int8* offset1); #endif // defined(UNIX_AMD64_ABI) void fgMorphMultiregStructArgs(GenTreeCall* call); GenTree* fgMorphMultiregStructArg(GenTree* arg, fgArgTabEntry* fgEntryPtr); bool killGCRefs(GenTree* tree); }; // end of class Compiler //--------------------------------------------------------------------------------------------------------------------- // GenTreeVisitor: a flexible tree walker implemented using the curiously-recurring-template pattern. // // This class implements a configurable walker for IR trees. There are five configuration options (defaults values are // shown in parentheses): // // - ComputeStack (false): when true, the walker will push each node onto the `m_ancestors` stack. "Ancestors" is a bit // of a misnomer, as the first entry will always be the current node. // // - DoPreOrder (false): when true, the walker will invoke `TVisitor::PreOrderVisit` with the current node as an // argument before visiting the node's operands. // // - DoPostOrder (false): when true, the walker will invoke `TVisitor::PostOrderVisit` with the current node as an // argument after visiting the node's operands. // // - DoLclVarsOnly (false): when true, the walker will only invoke `TVisitor::PreOrderVisit` for lclVar nodes. // `DoPreOrder` must be true if this option is true. // // - UseExecutionOrder (false): when true, then walker will visit a node's operands in execution order (e.g. if a // binary operator has the `GTF_REVERSE_OPS` flag set, the second operand will be // visited before the first). // // At least one of `DoPreOrder` and `DoPostOrder` must be specified. // // A simple pre-order visitor might look something like the following: // // class CountingVisitor final : public GenTreeVisitor<CountingVisitor> // { // public: // enum // { // DoPreOrder = true // }; // // unsigned m_count; // // CountingVisitor(Compiler* compiler) // : GenTreeVisitor<CountingVisitor>(compiler), m_count(0) // { // } // // Compiler::fgWalkResult PreOrderVisit(GenTree* node) // { // m_count++; // } // }; // // This visitor would then be used like so: // // CountingVisitor countingVisitor(compiler); // countingVisitor.WalkTree(root); // template <typename TVisitor> class GenTreeVisitor { protected: typedef Compiler::fgWalkResult fgWalkResult; enum { ComputeStack = false, DoPreOrder = false, DoPostOrder = false, DoLclVarsOnly = false, UseExecutionOrder = false, }; Compiler* m_compiler; ArrayStack<GenTree*> m_ancestors; GenTreeVisitor(Compiler* compiler) : m_compiler(compiler), m_ancestors(compiler->getAllocator(CMK_ArrayStack)) { assert(compiler != nullptr); static_assert_no_msg(TVisitor::DoPreOrder || TVisitor::DoPostOrder); static_assert_no_msg(!TVisitor::DoLclVarsOnly || TVisitor::DoPreOrder); } fgWalkResult PreOrderVisit(GenTree** use, GenTree* user) { return fgWalkResult::WALK_CONTINUE; } fgWalkResult PostOrderVisit(GenTree** use, GenTree* user) { return fgWalkResult::WALK_CONTINUE; } public: fgWalkResult WalkTree(GenTree** use, GenTree* user) { assert(use != nullptr); GenTree* node = *use; if (TVisitor::ComputeStack) { m_ancestors.Push(node); } fgWalkResult result = fgWalkResult::WALK_CONTINUE; if (TVisitor::DoPreOrder && !TVisitor::DoLclVarsOnly) { result = reinterpret_cast<TVisitor*>(this)->PreOrderVisit(use, user); if (result == fgWalkResult::WALK_ABORT) { return result; } node = *use; if ((node == nullptr) || (result == fgWalkResult::WALK_SKIP_SUBTREES)) { goto DONE; } } switch (node->OperGet()) { // Leaf lclVars case GT_LCL_VAR: case GT_LCL_FLD: case GT_LCL_VAR_ADDR: case GT_LCL_FLD_ADDR: if (TVisitor::DoLclVarsOnly) { result = reinterpret_cast<TVisitor*>(this)->PreOrderVisit(use, user); if (result == fgWalkResult::WALK_ABORT) { return result; } } FALLTHROUGH; // Leaf nodes case GT_CATCH_ARG: case GT_LABEL: case GT_FTN_ADDR: case GT_RET_EXPR: case GT_CNS_INT: case GT_CNS_LNG: case GT_CNS_DBL: case GT_CNS_STR: case GT_MEMORYBARRIER: case GT_JMP: case GT_JCC: case GT_SETCC: case GT_NO_OP: case GT_START_NONGC: case GT_START_PREEMPTGC: case GT_PROF_HOOK: #if !defined(FEATURE_EH_FUNCLETS) case GT_END_LFIN: #endif // !FEATURE_EH_FUNCLETS case GT_PHI_ARG: case GT_JMPTABLE: case GT_CLS_VAR: case GT_CLS_VAR_ADDR: case GT_ARGPLACE: case GT_PHYSREG: case GT_EMITNOP: case GT_PINVOKE_PROLOG: case GT_PINVOKE_EPILOG: case GT_IL_OFFSET: break; // Lclvar unary operators case GT_STORE_LCL_VAR: case GT_STORE_LCL_FLD: if (TVisitor::DoLclVarsOnly) { result = reinterpret_cast<TVisitor*>(this)->PreOrderVisit(use, user); if (result == fgWalkResult::WALK_ABORT) { return result; } } FALLTHROUGH; // Standard unary operators case GT_NOT: case GT_NEG: case GT_BSWAP: case GT_BSWAP16: case GT_COPY: case GT_RELOAD: case GT_ARR_LENGTH: case GT_CAST: case GT_BITCAST: case GT_CKFINITE: case GT_LCLHEAP: case GT_ADDR: case GT_IND: case GT_OBJ: case GT_BLK: case GT_BOX: case GT_ALLOCOBJ: case GT_INIT_VAL: case GT_JTRUE: case GT_SWITCH: case GT_NULLCHECK: case GT_PUTARG_REG: case GT_PUTARG_STK: case GT_PUTARG_TYPE: case GT_RETURNTRAP: case GT_NOP: case GT_FIELD: case GT_RETURN: case GT_RETFILT: case GT_RUNTIMELOOKUP: case GT_KEEPALIVE: case GT_INC_SATURATE: { GenTreeUnOp* const unOp = node->AsUnOp(); if (unOp->gtOp1 != nullptr) { result = WalkTree(&unOp->gtOp1, unOp); if (result == fgWalkResult::WALK_ABORT) { return result; } } break; } // Special nodes case GT_PHI: for (GenTreePhi::Use& use : node->AsPhi()->Uses()) { result = WalkTree(&use.NodeRef(), node); if (result == fgWalkResult::WALK_ABORT) { return result; } } break; case GT_FIELD_LIST: for (GenTreeFieldList::Use& use : node->AsFieldList()->Uses()) { result = WalkTree(&use.NodeRef(), node); if (result == fgWalkResult::WALK_ABORT) { return result; } } break; case GT_CMPXCHG: { GenTreeCmpXchg* const cmpXchg = node->AsCmpXchg(); result = WalkTree(&cmpXchg->gtOpLocation, cmpXchg); if (result == fgWalkResult::WALK_ABORT) { return result; } result = WalkTree(&cmpXchg->gtOpValue, cmpXchg); if (result == fgWalkResult::WALK_ABORT) { return result; } result = WalkTree(&cmpXchg->gtOpComparand, cmpXchg); if (result == fgWalkResult::WALK_ABORT) { return result; } break; } case GT_ARR_ELEM: { GenTreeArrElem* const arrElem = node->AsArrElem(); result = WalkTree(&arrElem->gtArrObj, arrElem); if (result == fgWalkResult::WALK_ABORT) { return result; } const unsigned rank = arrElem->gtArrRank; for (unsigned dim = 0; dim < rank; dim++) { result = WalkTree(&arrElem->gtArrInds[dim], arrElem); if (result == fgWalkResult::WALK_ABORT) { return result; } } break; } case GT_ARR_OFFSET: { GenTreeArrOffs* const arrOffs = node->AsArrOffs(); result = WalkTree(&arrOffs->gtOffset, arrOffs); if (result == fgWalkResult::WALK_ABORT) { return result; } result = WalkTree(&arrOffs->gtIndex, arrOffs); if (result == fgWalkResult::WALK_ABORT) { return result; } result = WalkTree(&arrOffs->gtArrObj, arrOffs); if (result == fgWalkResult::WALK_ABORT) { return result; } break; } case GT_STORE_DYN_BLK: { GenTreeStoreDynBlk* const dynBlock = node->AsStoreDynBlk(); GenTree** op1Use = &dynBlock->gtOp1; GenTree** op2Use = &dynBlock->gtOp2; GenTree** op3Use = &dynBlock->gtDynamicSize; result = WalkTree(op1Use, dynBlock); if (result == fgWalkResult::WALK_ABORT) { return result; } result = WalkTree(op2Use, dynBlock); if (result == fgWalkResult::WALK_ABORT) { return result; } result = WalkTree(op3Use, dynBlock); if (result == fgWalkResult::WALK_ABORT) { return result; } break; } case GT_CALL: { GenTreeCall* const call = node->AsCall(); if (call->gtCallThisArg != nullptr) { result = WalkTree(&call->gtCallThisArg->NodeRef(), call); if (result == fgWalkResult::WALK_ABORT) { return result; } } for (GenTreeCall::Use& use : call->Args()) { result = WalkTree(&use.NodeRef(), call); if (result == fgWalkResult::WALK_ABORT) { return result; } } for (GenTreeCall::Use& use : call->LateArgs()) { result = WalkTree(&use.NodeRef(), call); if (result == fgWalkResult::WALK_ABORT) { return result; } } if (call->gtCallType == CT_INDIRECT) { if (call->gtCallCookie != nullptr) { result = WalkTree(&call->gtCallCookie, call); if (result == fgWalkResult::WALK_ABORT) { return result; } } result = WalkTree(&call->gtCallAddr, call); if (result == fgWalkResult::WALK_ABORT) { return result; } } if (call->gtControlExpr != nullptr) { result = WalkTree(&call->gtControlExpr, call); if (result == fgWalkResult::WALK_ABORT) { return result; } } break; } #if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS) #if defined(FEATURE_SIMD) case GT_SIMD: #endif #if defined(FEATURE_HW_INTRINSICS) case GT_HWINTRINSIC: #endif if (TVisitor::UseExecutionOrder && node->IsReverseOp()) { assert(node->AsMultiOp()->GetOperandCount() == 2); result = WalkTree(&node->AsMultiOp()->Op(2), node); if (result == fgWalkResult::WALK_ABORT) { return result; } result = WalkTree(&node->AsMultiOp()->Op(1), node); if (result == fgWalkResult::WALK_ABORT) { return result; } } else { for (GenTree** use : node->AsMultiOp()->UseEdges()) { result = WalkTree(use, node); if (result == fgWalkResult::WALK_ABORT) { return result; } } } break; #endif // defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS) // Binary nodes default: { assert(node->OperIsBinary()); GenTreeOp* const op = node->AsOp(); GenTree** op1Use = &op->gtOp1; GenTree** op2Use = &op->gtOp2; if (TVisitor::UseExecutionOrder && node->IsReverseOp()) { std::swap(op1Use, op2Use); } if (*op1Use != nullptr) { result = WalkTree(op1Use, op); if (result == fgWalkResult::WALK_ABORT) { return result; } } if (*op2Use != nullptr) { result = WalkTree(op2Use, op); if (result == fgWalkResult::WALK_ABORT) { return result; } } break; } } DONE: // Finally, visit the current node if (TVisitor::DoPostOrder) { result = reinterpret_cast<TVisitor*>(this)->PostOrderVisit(use, user); } if (TVisitor::ComputeStack) { m_ancestors.Pop(); } return result; } }; template <bool computeStack, bool doPreOrder, bool doPostOrder, bool doLclVarsOnly, bool useExecutionOrder> class GenericTreeWalker final : public GenTreeVisitor<GenericTreeWalker<computeStack, doPreOrder, doPostOrder, doLclVarsOnly, useExecutionOrder>> { public: enum { ComputeStack = computeStack, DoPreOrder = doPreOrder, DoPostOrder = doPostOrder, DoLclVarsOnly = doLclVarsOnly, UseExecutionOrder = useExecutionOrder, }; private: Compiler::fgWalkData* m_walkData; public: GenericTreeWalker(Compiler::fgWalkData* walkData) : GenTreeVisitor<GenericTreeWalker<computeStack, doPreOrder, doPostOrder, doLclVarsOnly, useExecutionOrder>>( walkData->compiler) , m_walkData(walkData) { assert(walkData != nullptr); if (computeStack) { walkData->parentStack = &this->m_ancestors; } } Compiler::fgWalkResult PreOrderVisit(GenTree** use, GenTree* user) { m_walkData->parent = user; return m_walkData->wtprVisitorFn(use, m_walkData); } Compiler::fgWalkResult PostOrderVisit(GenTree** use, GenTree* user) { m_walkData->parent = user; return m_walkData->wtpoVisitorFn(use, m_walkData); } }; // A dominator tree visitor implemented using the curiously-recurring-template pattern, similar to GenTreeVisitor. template <typename TVisitor> class DomTreeVisitor { protected: Compiler* const m_compiler; DomTreeNode* const m_domTree; DomTreeVisitor(Compiler* compiler, DomTreeNode* domTree) : m_compiler(compiler), m_domTree(domTree) { } void Begin() { } void PreOrderVisit(BasicBlock* block) { } void PostOrderVisit(BasicBlock* block) { } void End() { } public: //------------------------------------------------------------------------ // WalkTree: Walk the dominator tree, starting from fgFirstBB. // // Notes: // This performs a non-recursive, non-allocating walk of the tree by using // DomTreeNode's firstChild and nextSibling links to locate the children of // a node and BasicBlock's bbIDom parent link to go back up the tree when // no more children are left. // // Forests are also supported, provided that all the roots are chained via // DomTreeNode::nextSibling to fgFirstBB. // void WalkTree() { static_cast<TVisitor*>(this)->Begin(); for (BasicBlock *next, *block = m_compiler->fgFirstBB; block != nullptr; block = next) { static_cast<TVisitor*>(this)->PreOrderVisit(block); next = m_domTree[block->bbNum].firstChild; if (next != nullptr) { assert(next->bbIDom == block); continue; } do { static_cast<TVisitor*>(this)->PostOrderVisit(block); next = m_domTree[block->bbNum].nextSibling; if (next != nullptr) { assert(next->bbIDom == block->bbIDom); break; } block = block->bbIDom; } while (block != nullptr); } static_cast<TVisitor*>(this)->End(); } }; // EHClauses: adapter class for forward iteration of the exception handling table using range-based `for`, e.g.: // for (EHblkDsc* const ehDsc : EHClauses(compiler)) // class EHClauses { EHblkDsc* m_begin; EHblkDsc* m_end; // Forward iterator for the exception handling table entries. Iteration is in table order. // class iterator { EHblkDsc* m_ehDsc; public: iterator(EHblkDsc* ehDsc) : m_ehDsc(ehDsc) { } EHblkDsc* operator*() const { return m_ehDsc; } iterator& operator++() { ++m_ehDsc; return *this; } bool operator!=(const iterator& i) const { return m_ehDsc != i.m_ehDsc; } }; public: EHClauses(Compiler* comp) : m_begin(comp->compHndBBtab), m_end(comp->compHndBBtab + comp->compHndBBtabCount) { assert((m_begin != nullptr) || (m_begin == m_end)); } iterator begin() const { return iterator(m_begin); } iterator end() const { return iterator(m_end); } }; /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Miscellaneous Compiler stuff XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ // Values used to mark the types a stack slot is used for const unsigned TYPE_REF_INT = 0x01; // slot used as a 32-bit int const unsigned TYPE_REF_LNG = 0x02; // slot used as a 64-bit long const unsigned TYPE_REF_FLT = 0x04; // slot used as a 32-bit float const unsigned TYPE_REF_DBL = 0x08; // slot used as a 64-bit float const unsigned TYPE_REF_PTR = 0x10; // slot used as a 32-bit pointer const unsigned TYPE_REF_BYR = 0x20; // slot used as a byref pointer const unsigned TYPE_REF_STC = 0x40; // slot used as a struct const unsigned TYPE_REF_TYPEMASK = 0x7F; // bits that represent the type // const unsigned TYPE_REF_ADDR_TAKEN = 0x80; // slots address was taken /***************************************************************************** * * Variables to keep track of total code amounts. */ #if DISPLAY_SIZES extern size_t grossVMsize; extern size_t grossNCsize; extern size_t totalNCsize; extern unsigned genMethodICnt; extern unsigned genMethodNCnt; extern size_t gcHeaderISize; extern size_t gcPtrMapISize; extern size_t gcHeaderNSize; extern size_t gcPtrMapNSize; #endif // DISPLAY_SIZES /***************************************************************************** * * Variables to keep track of basic block counts (more data on 1 BB methods) */ #if COUNT_BASIC_BLOCKS extern Histogram bbCntTable; extern Histogram bbOneBBSizeTable; #endif /***************************************************************************** * * Used by optFindNaturalLoops to gather statistical information such as * - total number of natural loops * - number of loops with 1, 2, ... exit conditions * - number of loops that have an iterator (for like) * - number of loops that have a constant iterator */ #if COUNT_LOOPS extern unsigned totalLoopMethods; // counts the total number of methods that have natural loops extern unsigned maxLoopsPerMethod; // counts the maximum number of loops a method has extern unsigned totalLoopOverflows; // # of methods that identified more loops than we can represent extern unsigned totalLoopCount; // counts the total number of natural loops extern unsigned totalUnnatLoopCount; // counts the total number of (not-necessarily natural) loops extern unsigned totalUnnatLoopOverflows; // # of methods that identified more unnatural loops than we can represent extern unsigned iterLoopCount; // counts the # of loops with an iterator (for like) extern unsigned simpleTestLoopCount; // counts the # of loops with an iterator and a simple loop condition (iter < // const) extern unsigned constIterLoopCount; // counts the # of loops with a constant iterator (for like) extern bool hasMethodLoops; // flag to keep track if we already counted a method as having loops extern unsigned loopsThisMethod; // counts the number of loops in the current method extern bool loopOverflowThisMethod; // True if we exceeded the max # of loops in the method. extern Histogram loopCountTable; // Histogram of loop counts extern Histogram loopExitCountTable; // Histogram of loop exit counts #endif // COUNT_LOOPS /***************************************************************************** * variables to keep track of how many iterations we go in a dataflow pass */ #if DATAFLOW_ITER extern unsigned CSEiterCount; // counts the # of iteration for the CSE dataflow extern unsigned CFiterCount; // counts the # of iteration for the Const Folding dataflow #endif // DATAFLOW_ITER #if MEASURE_BLOCK_SIZE extern size_t genFlowNodeSize; extern size_t genFlowNodeCnt; #endif // MEASURE_BLOCK_SIZE #if MEASURE_NODE_SIZE struct NodeSizeStats { void Init() { genTreeNodeCnt = 0; genTreeNodeSize = 0; genTreeNodeActualSize = 0; } // Count of tree nodes allocated. unsigned __int64 genTreeNodeCnt; // The size we allocate. unsigned __int64 genTreeNodeSize; // The actual size of the node. Note that the actual size will likely be smaller // than the allocated size, but we sometimes use SetOper()/ChangeOper() to change // a smaller node to a larger one. TODO-Cleanup: add stats on // SetOper()/ChangeOper() usage to quantify this. unsigned __int64 genTreeNodeActualSize; }; extern NodeSizeStats genNodeSizeStats; // Total node size stats extern NodeSizeStats genNodeSizeStatsPerFunc; // Per-function node size stats extern Histogram genTreeNcntHist; extern Histogram genTreeNsizHist; #endif // MEASURE_NODE_SIZE /***************************************************************************** * Count fatal errors (including noway_asserts). */ #if MEASURE_FATAL extern unsigned fatal_badCode; extern unsigned fatal_noWay; extern unsigned fatal_implLimitation; extern unsigned fatal_NOMEM; extern unsigned fatal_noWayAssertBody; #ifdef DEBUG extern unsigned fatal_noWayAssertBodyArgs; #endif // DEBUG extern unsigned fatal_NYI; #endif // MEASURE_FATAL /***************************************************************************** * Codegen */ #ifdef TARGET_XARCH const instruction INS_SHIFT_LEFT_LOGICAL = INS_shl; const instruction INS_SHIFT_RIGHT_LOGICAL = INS_shr; const instruction INS_SHIFT_RIGHT_ARITHM = INS_sar; const instruction INS_AND = INS_and; const instruction INS_OR = INS_or; const instruction INS_XOR = INS_xor; const instruction INS_NEG = INS_neg; const instruction INS_TEST = INS_test; const instruction INS_MUL = INS_imul; const instruction INS_SIGNED_DIVIDE = INS_idiv; const instruction INS_UNSIGNED_DIVIDE = INS_div; const instruction INS_BREAKPOINT = INS_int3; const instruction INS_ADDC = INS_adc; const instruction INS_SUBC = INS_sbb; const instruction INS_NOT = INS_not; #endif // TARGET_XARCH #ifdef TARGET_ARM const instruction INS_SHIFT_LEFT_LOGICAL = INS_lsl; const instruction INS_SHIFT_RIGHT_LOGICAL = INS_lsr; const instruction INS_SHIFT_RIGHT_ARITHM = INS_asr; const instruction INS_AND = INS_and; const instruction INS_OR = INS_orr; const instruction INS_XOR = INS_eor; const instruction INS_NEG = INS_rsb; const instruction INS_TEST = INS_tst; const instruction INS_MUL = INS_mul; const instruction INS_MULADD = INS_mla; const instruction INS_SIGNED_DIVIDE = INS_sdiv; const instruction INS_UNSIGNED_DIVIDE = INS_udiv; const instruction INS_BREAKPOINT = INS_bkpt; const instruction INS_ADDC = INS_adc; const instruction INS_SUBC = INS_sbc; const instruction INS_NOT = INS_mvn; const instruction INS_ABS = INS_vabs; const instruction INS_SQRT = INS_vsqrt; #endif // TARGET_ARM #ifdef TARGET_ARM64 const instruction INS_MULADD = INS_madd; inline const instruction INS_BREAKPOINT_osHelper() { // GDB needs the encoding of brk #0 // Windbg needs the encoding of brk #F000 return TargetOS::IsUnix ? INS_brk_unix : INS_brk_windows; } #define INS_BREAKPOINT INS_BREAKPOINT_osHelper() const instruction INS_ABS = INS_fabs; const instruction INS_SQRT = INS_fsqrt; #endif // TARGET_ARM64 /*****************************************************************************/ extern const BYTE genTypeSizes[]; extern const BYTE genTypeAlignments[]; extern const BYTE genTypeStSzs[]; extern const BYTE genActualTypes[]; /*****************************************************************************/ #ifdef DEBUG void dumpConvertedVarSet(Compiler* comp, VARSET_VALARG_TP vars); #endif // DEBUG #include "compiler.hpp" // All the shared inline functions /*****************************************************************************/ #endif //_COMPILER_H_ /*****************************************************************************/
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Compiler XX XX XX XX Represents the method data we are currently JIT-compiling. XX XX An instance of this class is created for every method we JIT. XX XX This contains all the info needed for the method. So allocating a XX XX a new instance per method makes it thread-safe. XX XX It should be used to do all the memory management for the compiler run. XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ /*****************************************************************************/ #ifndef _COMPILER_H_ #define _COMPILER_H_ /*****************************************************************************/ #include "jit.h" #include "opcode.h" #include "varset.h" #include "jitstd.h" #include "jithashtable.h" #include "gentree.h" #include "debuginfo.h" #include "lir.h" #include "block.h" #include "inline.h" #include "jiteh.h" #include "instr.h" #include "regalloc.h" #include "sm.h" #include "cycletimer.h" #include "blockset.h" #include "arraystack.h" #include "hashbv.h" #include "jitexpandarray.h" #include "tinyarray.h" #include "valuenum.h" #include "jittelemetry.h" #include "namedintrinsiclist.h" #ifdef LATE_DISASM #include "disasm.h" #endif #include "codegeninterface.h" #include "regset.h" #include "jitgcinfo.h" #if DUMP_GC_TABLES && defined(JIT32_GCENCODER) #include "gcdump.h" #endif #include "emit.h" #include "hwintrinsic.h" #include "simd.h" #include "simdashwintrinsic.h" // This is only used locally in the JIT to indicate that // a verification block should be inserted #define SEH_VERIFICATION_EXCEPTION 0xe0564552 // VER /***************************************************************************** * Forward declarations */ struct InfoHdr; // defined in GCInfo.h struct escapeMapping_t; // defined in fgdiagnostic.cpp class emitter; // defined in emit.h struct ShadowParamVarInfo; // defined in GSChecks.cpp struct InitVarDscInfo; // defined in register_arg_convention.h class FgStack; // defined in fgbasic.cpp class Instrumentor; // defined in fgprofile.cpp class SpanningTreeVisitor; // defined in fgprofile.cpp class CSE_DataFlow; // defined in OptCSE.cpp class OptBoolsDsc; // defined in optimizer.cpp #ifdef DEBUG struct IndentStack; #endif class Lowering; // defined in lower.h // The following are defined in this file, Compiler.h class Compiler; /***************************************************************************** * Unwind info */ #include "unwind.h" /*****************************************************************************/ // // Declare global operator new overloads that use the compiler's arena allocator // // I wanted to make the second argument optional, with default = CMK_Unknown, but that // caused these to be ambiguous with the global placement new operators. void* __cdecl operator new(size_t n, Compiler* context, CompMemKind cmk); void* __cdecl operator new[](size_t n, Compiler* context, CompMemKind cmk); void* __cdecl operator new(size_t n, void* p, const jitstd::placement_t& syntax_difference); // Requires the definitions of "operator new" so including "LoopCloning.h" after the definitions. #include "loopcloning.h" /*****************************************************************************/ /* This is included here and not earlier as it needs the definition of "CSE" * which is defined in the section above */ /*****************************************************************************/ unsigned genLog2(unsigned value); unsigned genLog2(unsigned __int64 value); unsigned ReinterpretHexAsDecimal(unsigned in); /*****************************************************************************/ const unsigned FLG_CCTOR = (CORINFO_FLG_CONSTRUCTOR | CORINFO_FLG_STATIC); #ifdef DEBUG const int BAD_STK_OFFS = 0xBAADF00D; // for LclVarDsc::lvStkOffs #endif //------------------------------------------------------------------------ // HFA info shared by LclVarDsc and fgArgTabEntry //------------------------------------------------------------------------ inline bool IsHfa(CorInfoHFAElemType kind) { return kind != CORINFO_HFA_ELEM_NONE; } inline var_types HfaTypeFromElemKind(CorInfoHFAElemType kind) { switch (kind) { case CORINFO_HFA_ELEM_FLOAT: return TYP_FLOAT; case CORINFO_HFA_ELEM_DOUBLE: return TYP_DOUBLE; #ifdef FEATURE_SIMD case CORINFO_HFA_ELEM_VECTOR64: return TYP_SIMD8; case CORINFO_HFA_ELEM_VECTOR128: return TYP_SIMD16; #endif case CORINFO_HFA_ELEM_NONE: return TYP_UNDEF; default: assert(!"Invalid HfaElemKind"); return TYP_UNDEF; } } inline CorInfoHFAElemType HfaElemKindFromType(var_types type) { switch (type) { case TYP_FLOAT: return CORINFO_HFA_ELEM_FLOAT; case TYP_DOUBLE: return CORINFO_HFA_ELEM_DOUBLE; #ifdef FEATURE_SIMD case TYP_SIMD8: return CORINFO_HFA_ELEM_VECTOR64; case TYP_SIMD16: return CORINFO_HFA_ELEM_VECTOR128; #endif case TYP_UNDEF: return CORINFO_HFA_ELEM_NONE; default: assert(!"Invalid HFA Type"); return CORINFO_HFA_ELEM_NONE; } } // The following holds the Local var info (scope information) typedef const char* VarName; // Actual ASCII string struct VarScopeDsc { unsigned vsdVarNum; // (remapped) LclVarDsc number unsigned vsdLVnum; // 'which' in eeGetLVinfo(). // Also, it is the index of this entry in the info.compVarScopes array, // which is useful since the array is also accessed via the // compEnterScopeList and compExitScopeList sorted arrays. IL_OFFSET vsdLifeBeg; // instr offset of beg of life IL_OFFSET vsdLifeEnd; // instr offset of end of life #ifdef DEBUG VarName vsdName; // name of the var #endif }; // This class stores information associated with a LclVar SSA definition. class LclSsaVarDsc { // The basic block where the definition occurs. Definitions of uninitialized variables // are considered to occur at the start of the first basic block (fgFirstBB). // // TODO-Cleanup: In the case of uninitialized variables the block is set to nullptr by // SsaBuilder and changed to fgFirstBB during value numbering. It would be useful to // investigate and perhaps eliminate this rather unexpected behavior. BasicBlock* m_block; // The GT_ASG node that generates the definition, or nullptr for definitions // of uninitialized variables. GenTreeOp* m_asg; public: LclSsaVarDsc() : m_block(nullptr), m_asg(nullptr) { } LclSsaVarDsc(BasicBlock* block, GenTreeOp* asg) : m_block(block), m_asg(asg) { assert((asg == nullptr) || asg->OperIs(GT_ASG)); } BasicBlock* GetBlock() const { return m_block; } void SetBlock(BasicBlock* block) { m_block = block; } GenTreeOp* GetAssignment() const { return m_asg; } void SetAssignment(GenTreeOp* asg) { assert((asg == nullptr) || asg->OperIs(GT_ASG)); m_asg = asg; } ValueNumPair m_vnPair; }; // This class stores information associated with a memory SSA definition. class SsaMemDef { public: ValueNumPair m_vnPair; }; //------------------------------------------------------------------------ // SsaDefArray: A resizable array of SSA definitions. // // Unlike an ordinary resizable array implementation, this allows only element // addition (by calling AllocSsaNum) and has special handling for RESERVED_SSA_NUM // (basically it's a 1-based array). The array doesn't impose any particular // requirements on the elements it stores and AllocSsaNum forwards its arguments // to the array element constructor, this way the array supports both LclSsaVarDsc // and SsaMemDef elements. // template <typename T> class SsaDefArray { T* m_array; unsigned m_arraySize; unsigned m_count; static_assert_no_msg(SsaConfig::RESERVED_SSA_NUM == 0); static_assert_no_msg(SsaConfig::FIRST_SSA_NUM == 1); // Get the minimum valid SSA number. unsigned GetMinSsaNum() const { return SsaConfig::FIRST_SSA_NUM; } // Increase (double) the size of the array. void GrowArray(CompAllocator alloc) { unsigned oldSize = m_arraySize; unsigned newSize = max(2, oldSize * 2); T* newArray = alloc.allocate<T>(newSize); for (unsigned i = 0; i < oldSize; i++) { newArray[i] = m_array[i]; } m_array = newArray; m_arraySize = newSize; } public: // Construct an empty SsaDefArray. SsaDefArray() : m_array(nullptr), m_arraySize(0), m_count(0) { } // Reset the array (used only if the SSA form is reconstructed). void Reset() { m_count = 0; } // Allocate a new SSA number (starting with SsaConfig::FIRST_SSA_NUM). template <class... Args> unsigned AllocSsaNum(CompAllocator alloc, Args&&... args) { if (m_count == m_arraySize) { GrowArray(alloc); } unsigned ssaNum = GetMinSsaNum() + m_count; m_array[m_count++] = T(std::forward<Args>(args)...); // Ensure that the first SSA number we allocate is SsaConfig::FIRST_SSA_NUM assert((ssaNum == SsaConfig::FIRST_SSA_NUM) || (m_count > 1)); return ssaNum; } // Get the number of SSA definitions in the array. unsigned GetCount() const { return m_count; } // Get a pointer to the SSA definition at the specified index. T* GetSsaDefByIndex(unsigned index) { assert(index < m_count); return &m_array[index]; } // Check if the specified SSA number is valid. bool IsValidSsaNum(unsigned ssaNum) const { return (GetMinSsaNum() <= ssaNum) && (ssaNum < (GetMinSsaNum() + m_count)); } // Get a pointer to the SSA definition associated with the specified SSA number. T* GetSsaDef(unsigned ssaNum) { assert(ssaNum != SsaConfig::RESERVED_SSA_NUM); return GetSsaDefByIndex(ssaNum - GetMinSsaNum()); } // Get an SSA number associated with the specified SSA def (that must be in this array). unsigned GetSsaNum(T* ssaDef) { assert((m_array <= ssaDef) && (ssaDef < &m_array[m_count])); return GetMinSsaNum() + static_cast<unsigned>(ssaDef - &m_array[0]); } }; enum RefCountState { RCS_INVALID, // not valid to get/set ref counts RCS_EARLY, // early counts for struct promotion and struct passing RCS_NORMAL, // normal ref counts (from lvaMarkRefs onward) }; #ifdef DEBUG // Reasons why we can't enregister a local. enum class DoNotEnregisterReason { None, AddrExposed, // the address of this local is exposed. DontEnregStructs, // struct enregistration is disabled. NotRegSizeStruct, // the struct size does not much any register size, usually the struct size is too big. LocalField, // the local is accessed with LCL_FLD, note we can do it not only for struct locals. VMNeedsStackAddr, LiveInOutOfHandler, // the local is alive in and out of exception handler and not signle def. BlockOp, // Is read or written via a block operation. IsStructArg, // Is a struct passed as an argument in a way that requires a stack location. DepField, // It is a field of a dependently promoted struct NoRegVars, // opts.compFlags & CLFLG_REGVAR is not set MinOptsGC, // It is a GC Ref and we are compiling MinOpts #if !defined(TARGET_64BIT) LongParamField, // It is a decomposed field of a long parameter. #endif #ifdef JIT32_GCENCODER PinningRef, #endif LclAddrNode, // the local is accessed with LCL_ADDR_VAR/FLD. CastTakesAddr, StoreBlkSrc, // the local is used as STORE_BLK source. OneAsgRetyping, // fgMorphOneAsgBlockOp prevents this local from being enregister. SwizzleArg, // the local is passed using LCL_FLD as another type. BlockOpRet, // the struct is returned and it promoted or there is a cast. ReturnSpCheck, // the local is used to do SP check SimdUserForcesDep // a promoted struct was used by a SIMD/HWI node; it must be dependently promoted }; enum class AddressExposedReason { NONE, PARENT_EXPOSED, // This is a promoted field but the parent is exposed. TOO_CONSERVATIVE, // Were marked as exposed to be conservative, fix these places. ESCAPE_ADDRESS, // The address is escaping, for example, passed as call argument. WIDE_INDIR, // We access via indirection with wider type. OSR_EXPOSED, // It was exposed in the original method, osr has to repeat it. STRESS_LCL_FLD, // Stress mode replaces localVar with localFld and makes them addrExposed. COPY_FLD_BY_FLD, // Field by field copy takes the address of the local, can be fixed. DISPATCH_RET_BUF // Caller return buffer dispatch. }; #endif // DEBUG class LclVarDsc { public: // The constructor. Most things can just be zero'ed. // // Initialize the ArgRegs to REG_STK. // Morph will update if this local is passed in a register. LclVarDsc() : _lvArgReg(REG_STK) , #if FEATURE_MULTIREG_ARGS _lvOtherArgReg(REG_STK) , #endif // FEATURE_MULTIREG_ARGS lvClassHnd(NO_CLASS_HANDLE) , lvRefBlks(BlockSetOps::UninitVal()) , lvPerSsaData() { } // note this only packs because var_types is a typedef of unsigned char var_types lvType : 5; // TYP_INT/LONG/FLOAT/DOUBLE/REF unsigned char lvIsParam : 1; // is this a parameter? unsigned char lvIsRegArg : 1; // is this an argument that was passed by register? unsigned char lvFramePointerBased : 1; // 0 = off of REG_SPBASE (e.g., ESP), 1 = off of REG_FPBASE (e.g., EBP) unsigned char lvOnFrame : 1; // (part of) the variable lives on the frame unsigned char lvRegister : 1; // assigned to live in a register? For RyuJIT backend, this is only set if the // variable is in the same register for the entire function. unsigned char lvTracked : 1; // is this a tracked variable? bool lvTrackedNonStruct() { return lvTracked && lvType != TYP_STRUCT; } unsigned char lvPinned : 1; // is this a pinned variable? unsigned char lvMustInit : 1; // must be initialized private: bool m_addrExposed : 1; // The address of this variable is "exposed" -- passed as an argument, stored in a // global location, etc. // We cannot reason reliably about the value of the variable. public: unsigned char lvDoNotEnregister : 1; // Do not enregister this variable. unsigned char lvFieldAccessed : 1; // The var is a struct local, and a field of the variable is accessed. Affects // struct promotion. unsigned char lvLiveInOutOfHndlr : 1; // The variable is live in or out of an exception handler, and therefore must // be on the stack (at least at those boundaries.) unsigned char lvInSsa : 1; // The variable is in SSA form (set by SsaBuilder) unsigned char lvIsCSE : 1; // Indicates if this LclVar is a CSE variable. unsigned char lvHasLdAddrOp : 1; // has ldloca or ldarga opcode on this local. unsigned char lvStackByref : 1; // This is a compiler temporary of TYP_BYREF that is known to point into our local // stack frame. unsigned char lvHasILStoreOp : 1; // there is at least one STLOC or STARG on this local unsigned char lvHasMultipleILStoreOp : 1; // there is more than one STLOC on this local unsigned char lvIsTemp : 1; // Short-lifetime compiler temp #if defined(TARGET_AMD64) || defined(TARGET_ARM64) unsigned char lvIsImplicitByRef : 1; // Set if the argument is an implicit byref. #endif // defined(TARGET_AMD64) || defined(TARGET_ARM64) unsigned char lvIsBoolean : 1; // set if variable is boolean unsigned char lvSingleDef : 1; // variable has a single def // before lvaMarkLocalVars: identifies ref type locals that can get type updates // after lvaMarkLocalVars: identifies locals that are suitable for optAddCopies unsigned char lvSingleDefRegCandidate : 1; // variable has a single def and hence is a register candidate // Currently, this is only used to decide if an EH variable can be // a register candiate or not. unsigned char lvDisqualifySingleDefRegCandidate : 1; // tracks variable that are disqualified from register // candidancy unsigned char lvSpillAtSingleDef : 1; // variable has a single def (as determined by LSRA interval scan) // and is spilled making it candidate to spill right after the // first (and only) definition. // Note: We cannot reuse lvSingleDefRegCandidate because it is set // in earlier phase and the information might not be appropriate // in LSRA. unsigned char lvDisqualify : 1; // variable is no longer OK for add copy optimization unsigned char lvVolatileHint : 1; // hint for AssertionProp #ifndef TARGET_64BIT unsigned char lvStructDoubleAlign : 1; // Must we double align this struct? #endif // !TARGET_64BIT #ifdef TARGET_64BIT unsigned char lvQuirkToLong : 1; // Quirk to allocate this LclVar as a 64-bit long #endif #ifdef DEBUG unsigned char lvKeepType : 1; // Don't change the type of this variable unsigned char lvNoLclFldStress : 1; // Can't apply local field stress on this one #endif unsigned char lvIsPtr : 1; // Might this be used in an address computation? (used by buffer overflow security // checks) unsigned char lvIsUnsafeBuffer : 1; // Does this contain an unsafe buffer requiring buffer overflow security checks? unsigned char lvPromoted : 1; // True when this local is a promoted struct, a normed struct, or a "split" long on a // 32-bit target. For implicit byref parameters, this gets hijacked between // fgRetypeImplicitByRefArgs and fgMarkDemotedImplicitByRefArgs to indicate whether // references to the arg are being rewritten as references to a promoted shadow local. unsigned char lvIsStructField : 1; // Is this local var a field of a promoted struct local? unsigned char lvOverlappingFields : 1; // True when we have a struct with possibly overlapping fields unsigned char lvContainsHoles : 1; // True when we have a promoted struct that contains holes unsigned char lvCustomLayout : 1; // True when this struct has "CustomLayout" unsigned char lvIsMultiRegArg : 1; // true if this is a multireg LclVar struct used in an argument context unsigned char lvIsMultiRegRet : 1; // true if this is a multireg LclVar struct assigned from a multireg call #ifdef FEATURE_HFA_FIELDS_PRESENT CorInfoHFAElemType _lvHfaElemKind : 3; // What kind of an HFA this is (CORINFO_HFA_ELEM_NONE if it is not an HFA). #endif // FEATURE_HFA_FIELDS_PRESENT #ifdef DEBUG // TODO-Cleanup: See the note on lvSize() - this flag is only in use by asserts that are checking for struct // types, and is needed because of cases where TYP_STRUCT is bashed to an integral type. // Consider cleaning this up so this workaround is not required. unsigned char lvUnusedStruct : 1; // All references to this promoted struct are through its field locals. // I.e. there is no longer any reference to the struct directly. // In this case we can simply remove this struct local. unsigned char lvUndoneStructPromotion : 1; // The struct promotion was undone and hence there should be no // reference to the fields of this struct. #endif unsigned char lvLRACandidate : 1; // Tracked for linear scan register allocation purposes #ifdef FEATURE_SIMD // Note that both SIMD vector args and locals are marked as lvSIMDType = true, but the // type of an arg node is TYP_BYREF and a local node is TYP_SIMD*. unsigned char lvSIMDType : 1; // This is a SIMD struct unsigned char lvUsedInSIMDIntrinsic : 1; // This tells lclvar is used for simd intrinsic unsigned char lvSimdBaseJitType : 5; // Note: this only packs because CorInfoType has less than 32 entries CorInfoType GetSimdBaseJitType() const { return (CorInfoType)lvSimdBaseJitType; } void SetSimdBaseJitType(CorInfoType simdBaseJitType) { assert(simdBaseJitType < (1 << 5)); lvSimdBaseJitType = (unsigned char)simdBaseJitType; } var_types GetSimdBaseType() const; #endif // FEATURE_SIMD unsigned char lvRegStruct : 1; // This is a reg-sized non-field-addressed struct. unsigned char lvClassIsExact : 1; // lvClassHandle is the exact type #ifdef DEBUG unsigned char lvClassInfoUpdated : 1; // true if this var has updated class handle or exactness #endif unsigned char lvImplicitlyReferenced : 1; // true if there are non-IR references to this local (prolog, epilog, gc, // eh) unsigned char lvSuppressedZeroInit : 1; // local needs zero init if we transform tail call to loop unsigned char lvHasExplicitInit : 1; // The local is explicitly initialized and doesn't need zero initialization in // the prolog. If the local has gc pointers, there are no gc-safe points // between the prolog and the explicit initialization. union { unsigned lvFieldLclStart; // The index of the local var representing the first field in the promoted struct // local. For implicit byref parameters, this gets hijacked between // fgRetypeImplicitByRefArgs and fgMarkDemotedImplicitByRefArgs to point to the // struct local created to model the parameter's struct promotion, if any. unsigned lvParentLcl; // The index of the local var representing the parent (i.e. the promoted struct local). // Valid on promoted struct local fields. }; unsigned char lvFieldCnt; // Number of fields in the promoted VarDsc. unsigned char lvFldOffset; unsigned char lvFldOrdinal; #ifdef DEBUG unsigned char lvSingleDefDisqualifyReason = 'H'; #endif #if FEATURE_MULTIREG_ARGS regNumber lvRegNumForSlot(unsigned slotNum) { if (slotNum == 0) { return (regNumber)_lvArgReg; } else if (slotNum == 1) { return GetOtherArgReg(); } else { assert(false && "Invalid slotNum!"); } unreached(); } #endif // FEATURE_MULTIREG_ARGS CorInfoHFAElemType GetLvHfaElemKind() const { #ifdef FEATURE_HFA_FIELDS_PRESENT return _lvHfaElemKind; #else NOWAY_MSG("GetLvHfaElemKind"); return CORINFO_HFA_ELEM_NONE; #endif // FEATURE_HFA_FIELDS_PRESENT } void SetLvHfaElemKind(CorInfoHFAElemType elemKind) { #ifdef FEATURE_HFA_FIELDS_PRESENT _lvHfaElemKind = elemKind; #else NOWAY_MSG("SetLvHfaElemKind"); #endif // FEATURE_HFA_FIELDS_PRESENT } bool lvIsHfa() const { if (GlobalJitOptions::compFeatureHfa) { return IsHfa(GetLvHfaElemKind()); } else { return false; } } bool lvIsHfaRegArg() const { if (GlobalJitOptions::compFeatureHfa) { return lvIsRegArg && lvIsHfa(); } else { return false; } } //------------------------------------------------------------------------------ // lvHfaSlots: Get the number of slots used by an HFA local // // Return Value: // On Arm64 - Returns 1-4 indicating the number of register slots used by the HFA // On Arm32 - Returns the total number of single FP register slots used by the HFA, max is 8 // unsigned lvHfaSlots() const { assert(lvIsHfa()); assert(varTypeIsStruct(lvType)); unsigned slots = 0; #ifdef TARGET_ARM slots = lvExactSize / sizeof(float); assert(slots <= 8); #elif defined(TARGET_ARM64) switch (GetLvHfaElemKind()) { case CORINFO_HFA_ELEM_NONE: assert(!"lvHfaSlots called for non-HFA"); break; case CORINFO_HFA_ELEM_FLOAT: assert((lvExactSize % 4) == 0); slots = lvExactSize >> 2; break; case CORINFO_HFA_ELEM_DOUBLE: case CORINFO_HFA_ELEM_VECTOR64: assert((lvExactSize % 8) == 0); slots = lvExactSize >> 3; break; case CORINFO_HFA_ELEM_VECTOR128: assert((lvExactSize % 16) == 0); slots = lvExactSize >> 4; break; default: unreached(); } assert(slots <= 4); #endif // TARGET_ARM64 return slots; } // lvIsMultiRegArgOrRet() // returns true if this is a multireg LclVar struct used in an argument context // or if this is a multireg LclVar struct assigned from a multireg call bool lvIsMultiRegArgOrRet() { return lvIsMultiRegArg || lvIsMultiRegRet; } #if defined(DEBUG) private: DoNotEnregisterReason m_doNotEnregReason; AddressExposedReason m_addrExposedReason; public: void SetDoNotEnregReason(DoNotEnregisterReason reason) { m_doNotEnregReason = reason; } DoNotEnregisterReason GetDoNotEnregReason() const { return m_doNotEnregReason; } AddressExposedReason GetAddrExposedReason() const { return m_addrExposedReason; } #endif // DEBUG public: void SetAddressExposed(bool value DEBUGARG(AddressExposedReason reason)) { m_addrExposed = value; INDEBUG(m_addrExposedReason = reason); } void CleanAddressExposed() { m_addrExposed = false; } bool IsAddressExposed() const { return m_addrExposed; } private: regNumberSmall _lvRegNum; // Used to store the register this variable is in (or, the low register of a // register pair). It is set during codegen any time the // variable is enregistered (lvRegister is only set // to non-zero if the variable gets the same register assignment for its entire // lifetime). #if !defined(TARGET_64BIT) regNumberSmall _lvOtherReg; // Used for "upper half" of long var. #endif // !defined(TARGET_64BIT) regNumberSmall _lvArgReg; // The (first) register in which this argument is passed. #if FEATURE_MULTIREG_ARGS regNumberSmall _lvOtherArgReg; // Used for the second part of the struct passed in a register. // Note this is defined but not used by ARM32 #endif // FEATURE_MULTIREG_ARGS regNumberSmall _lvArgInitReg; // the register into which the argument is moved at entry public: // The register number is stored in a small format (8 bits), but the getters return and the setters take // a full-size (unsigned) format, to localize the casts here. ///////////////////// regNumber GetRegNum() const { return (regNumber)_lvRegNum; } void SetRegNum(regNumber reg) { _lvRegNum = (regNumberSmall)reg; assert(_lvRegNum == reg); } ///////////////////// #if defined(TARGET_64BIT) regNumber GetOtherReg() const { assert(!"shouldn't get here"); // can't use "unreached();" because it's NORETURN, which causes C4072 // "unreachable code" warnings return REG_NA; } void SetOtherReg(regNumber reg) { assert(!"shouldn't get here"); // can't use "unreached();" because it's NORETURN, which causes C4072 // "unreachable code" warnings } #else // !TARGET_64BIT regNumber GetOtherReg() const { return (regNumber)_lvOtherReg; } void SetOtherReg(regNumber reg) { _lvOtherReg = (regNumberSmall)reg; assert(_lvOtherReg == reg); } #endif // !TARGET_64BIT ///////////////////// regNumber GetArgReg() const { return (regNumber)_lvArgReg; } void SetArgReg(regNumber reg) { _lvArgReg = (regNumberSmall)reg; assert(_lvArgReg == reg); } #if FEATURE_MULTIREG_ARGS regNumber GetOtherArgReg() const { return (regNumber)_lvOtherArgReg; } void SetOtherArgReg(regNumber reg) { _lvOtherArgReg = (regNumberSmall)reg; assert(_lvOtherArgReg == reg); } #endif // FEATURE_MULTIREG_ARGS #ifdef FEATURE_SIMD // Is this is a SIMD struct? bool lvIsSIMDType() const { return lvSIMDType; } // Is this is a SIMD struct which is used for SIMD intrinsic? bool lvIsUsedInSIMDIntrinsic() const { return lvUsedInSIMDIntrinsic; } #else // If feature_simd not enabled, return false bool lvIsSIMDType() const { return false; } bool lvIsUsedInSIMDIntrinsic() const { return false; } #endif ///////////////////// regNumber GetArgInitReg() const { return (regNumber)_lvArgInitReg; } void SetArgInitReg(regNumber reg) { _lvArgInitReg = (regNumberSmall)reg; assert(_lvArgInitReg == reg); } ///////////////////// bool lvIsRegCandidate() const { return lvLRACandidate != 0; } bool lvIsInReg() const { return lvIsRegCandidate() && (GetRegNum() != REG_STK); } regMaskTP lvRegMask() const { regMaskTP regMask = RBM_NONE; if (varTypeUsesFloatReg(TypeGet())) { if (GetRegNum() != REG_STK) { regMask = genRegMaskFloat(GetRegNum(), TypeGet()); } } else { if (GetRegNum() != REG_STK) { regMask = genRegMask(GetRegNum()); } } return regMask; } unsigned short lvVarIndex; // variable tracking index private: unsigned short m_lvRefCnt; // unweighted (real) reference count. For implicit by reference // parameters, this gets hijacked from fgResetImplicitByRefRefCount // through fgMarkDemotedImplicitByRefArgs, to provide a static // appearance count (computed during address-exposed analysis) // that fgMakeOutgoingStructArgCopy consults during global morph // to determine if eliding its copy is legal. weight_t m_lvRefCntWtd; // weighted reference count public: unsigned short lvRefCnt(RefCountState state = RCS_NORMAL) const; void incLvRefCnt(unsigned short delta, RefCountState state = RCS_NORMAL); void setLvRefCnt(unsigned short newValue, RefCountState state = RCS_NORMAL); weight_t lvRefCntWtd(RefCountState state = RCS_NORMAL) const; void incLvRefCntWtd(weight_t delta, RefCountState state = RCS_NORMAL); void setLvRefCntWtd(weight_t newValue, RefCountState state = RCS_NORMAL); private: int lvStkOffs; // stack offset of home in bytes. public: int GetStackOffset() const { return lvStkOffs; } void SetStackOffset(int offset) { lvStkOffs = offset; } unsigned lvExactSize; // (exact) size of the type in bytes // Is this a promoted struct? // This method returns true only for structs (including SIMD structs), not for // locals that are split on a 32-bit target. // It is only necessary to use this: // 1) if only structs are wanted, and // 2) if Lowering has already been done. // Otherwise lvPromoted is valid. bool lvPromotedStruct() { #if !defined(TARGET_64BIT) return (lvPromoted && !varTypeIsLong(lvType)); #else // defined(TARGET_64BIT) return lvPromoted; #endif // defined(TARGET_64BIT) } unsigned lvSize() const; size_t lvArgStackSize() const; unsigned lvSlotNum; // original slot # (if remapped) typeInfo lvVerTypeInfo; // type info needed for verification // class handle for the local or null if not known or not a class, // for a struct handle use `GetStructHnd()`. CORINFO_CLASS_HANDLE lvClassHnd; // Get class handle for a struct local or implicitByRef struct local. CORINFO_CLASS_HANDLE GetStructHnd() const { #ifdef FEATURE_SIMD if (lvSIMDType && (m_layout == nullptr)) { return NO_CLASS_HANDLE; } #endif assert(m_layout != nullptr); #if defined(TARGET_AMD64) || defined(TARGET_ARM64) assert(varTypeIsStruct(TypeGet()) || (lvIsImplicitByRef && (TypeGet() == TYP_BYREF))); #else assert(varTypeIsStruct(TypeGet())); #endif CORINFO_CLASS_HANDLE structHnd = m_layout->GetClassHandle(); assert(structHnd != NO_CLASS_HANDLE); return structHnd; } CORINFO_FIELD_HANDLE lvFieldHnd; // field handle for promoted struct fields private: ClassLayout* m_layout; // layout info for structs public: BlockSet lvRefBlks; // Set of blocks that contain refs Statement* lvDefStmt; // Pointer to the statement with the single definition void lvaDisqualifyVar(); // Call to disqualify a local variable from use in optAddCopies var_types TypeGet() const { return (var_types)lvType; } bool lvStackAligned() const { assert(lvIsStructField); return ((lvFldOffset % TARGET_POINTER_SIZE) == 0); } bool lvNormalizeOnLoad() const { return varTypeIsSmall(TypeGet()) && // lvIsStructField is treated the same as the aliased local, see fgDoNormalizeOnStore. (lvIsParam || m_addrExposed || lvIsStructField); } bool lvNormalizeOnStore() const { return varTypeIsSmall(TypeGet()) && // lvIsStructField is treated the same as the aliased local, see fgDoNormalizeOnStore. !(lvIsParam || m_addrExposed || lvIsStructField); } void incRefCnts(weight_t weight, Compiler* pComp, RefCountState state = RCS_NORMAL, bool propagate = true); var_types GetHfaType() const { if (GlobalJitOptions::compFeatureHfa) { assert(lvIsHfa()); return HfaTypeFromElemKind(GetLvHfaElemKind()); } else { return TYP_UNDEF; } } void SetHfaType(var_types type) { if (GlobalJitOptions::compFeatureHfa) { CorInfoHFAElemType elemKind = HfaElemKindFromType(type); SetLvHfaElemKind(elemKind); // Ensure we've allocated enough bits. assert(GetLvHfaElemKind() == elemKind); } } // Returns true if this variable contains GC pointers (including being a GC pointer itself). bool HasGCPtr() const { return varTypeIsGC(lvType) || ((lvType == TYP_STRUCT) && m_layout->HasGCPtr()); } // Returns the layout of a struct variable. ClassLayout* GetLayout() const { assert(varTypeIsStruct(lvType)); return m_layout; } // Sets the layout of a struct variable. void SetLayout(ClassLayout* layout) { assert(varTypeIsStruct(lvType)); assert((m_layout == nullptr) || ClassLayout::AreCompatible(m_layout, layout)); m_layout = layout; } SsaDefArray<LclSsaVarDsc> lvPerSsaData; // Returns the address of the per-Ssa data for the given ssaNum (which is required // not to be the SsaConfig::RESERVED_SSA_NUM, which indicates that the variable is // not an SSA variable). LclSsaVarDsc* GetPerSsaData(unsigned ssaNum) { return lvPerSsaData.GetSsaDef(ssaNum); } // Returns the SSA number for "ssaDef". Requires "ssaDef" to be a valid definition // of this variable. unsigned GetSsaNumForSsaDef(LclSsaVarDsc* ssaDef) { return lvPerSsaData.GetSsaNum(ssaDef); } var_types GetRegisterType(const GenTreeLclVarCommon* tree) const; var_types GetRegisterType() const; var_types GetActualRegisterType() const; bool IsEnregisterableType() const { return GetRegisterType() != TYP_UNDEF; } bool IsEnregisterableLcl() const { if (lvDoNotEnregister) { return false; } return IsEnregisterableType(); } //----------------------------------------------------------------------------- // IsAlwaysAliveInMemory: Determines if this variable's value is always // up-to-date on stack. This is possible if this is an EH-var or // we decided to spill after single-def. // bool IsAlwaysAliveInMemory() const { return lvLiveInOutOfHndlr || lvSpillAtSingleDef; } bool CanBeReplacedWithItsField(Compiler* comp) const; #ifdef DEBUG public: const char* lvReason; void PrintVarReg() const { printf("%s", getRegName(GetRegNum())); } #endif // DEBUG }; // class LclVarDsc enum class SymbolicIntegerValue : int32_t { LongMin, IntMin, ShortMin, ByteMin, Zero, One, ByteMax, UByteMax, ShortMax, UShortMax, IntMax, UIntMax, LongMax, }; inline constexpr bool operator>(SymbolicIntegerValue left, SymbolicIntegerValue right) { return static_cast<int32_t>(left) > static_cast<int32_t>(right); } inline constexpr bool operator>=(SymbolicIntegerValue left, SymbolicIntegerValue right) { return static_cast<int32_t>(left) >= static_cast<int32_t>(right); } inline constexpr bool operator<(SymbolicIntegerValue left, SymbolicIntegerValue right) { return static_cast<int32_t>(left) < static_cast<int32_t>(right); } inline constexpr bool operator<=(SymbolicIntegerValue left, SymbolicIntegerValue right) { return static_cast<int32_t>(left) <= static_cast<int32_t>(right); } // Represents an integral range useful for reasoning about integral casts. // It uses a symbolic representation for lower and upper bounds so // that it can efficiently handle integers of all sizes on all hosts. // // Note that the ranges represented by this class are **always** in the // "signed" domain. This is so that if we know the range a node produces, it // can be trivially used to determine if a cast above the node does or does not // overflow, which requires that the interpretation of integers be the same both // for the "input" and "output". We choose signed interpretation here because it // produces nice continuous ranges and because IR uses sign-extension for constants. // // Some examples of how ranges are computed for casts: // 1. CAST_OVF(ubyte <- uint): does not overflow for [0..UBYTE_MAX], produces the // same range - all casts that do not change the representation, i. e. have the same // "actual" input and output type, have the same "input" and "output" range. // 2. CAST_OVF(ulong <- uint): never oveflows => the "input" range is [INT_MIN..INT_MAX] // (aka all possible 32 bit integers). Produces [0..UINT_MAX] (aka all possible 32 // bit integers zero-extended to 64 bits). // 3. CAST_OVF(int <- uint): overflows for inputs larger than INT_MAX <=> less than 0 // when interpreting as signed => the "input" range is [0..INT_MAX], the same range // being the produced one as the node does not change the width of the integer. // class IntegralRange { private: SymbolicIntegerValue m_lowerBound; SymbolicIntegerValue m_upperBound; public: IntegralRange() = default; IntegralRange(SymbolicIntegerValue lowerBound, SymbolicIntegerValue upperBound) : m_lowerBound(lowerBound), m_upperBound(upperBound) { assert(lowerBound <= upperBound); } bool Contains(int64_t value) const; bool Contains(IntegralRange other) const { return (m_lowerBound <= other.m_lowerBound) && (other.m_upperBound <= m_upperBound); } bool IsPositive() { return m_lowerBound >= SymbolicIntegerValue::Zero; } bool Equals(IntegralRange other) const { return (m_lowerBound == other.m_lowerBound) && (m_upperBound == other.m_upperBound); } static int64_t SymbolicToRealValue(SymbolicIntegerValue value); static SymbolicIntegerValue LowerBoundForType(var_types type); static SymbolicIntegerValue UpperBoundForType(var_types type); static IntegralRange ForType(var_types type) { return {LowerBoundForType(type), UpperBoundForType(type)}; } static IntegralRange ForNode(GenTree* node, Compiler* compiler); static IntegralRange ForCastInput(GenTreeCast* cast); static IntegralRange ForCastOutput(GenTreeCast* cast); #ifdef DEBUG static void Print(IntegralRange range); #endif // DEBUG }; /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX TempsInfo XX XX XX XX The temporary lclVars allocated by the compiler for code generation XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ /***************************************************************************** * * The following keeps track of temporaries allocated in the stack frame * during code-generation (after register allocation). These spill-temps are * only used if we run out of registers while evaluating a tree. * * These are different from the more common temps allocated by lvaGrabTemp(). */ class TempDsc { public: TempDsc* tdNext; private: int tdOffs; #ifdef DEBUG static const int BAD_TEMP_OFFSET = 0xDDDDDDDD; // used as a sentinel "bad value" for tdOffs in DEBUG #endif // DEBUG int tdNum; BYTE tdSize; var_types tdType; public: TempDsc(int _tdNum, unsigned _tdSize, var_types _tdType) : tdNum(_tdNum), tdSize((BYTE)_tdSize), tdType(_tdType) { #ifdef DEBUG // temps must have a negative number (so they have a different number from all local variables) assert(tdNum < 0); tdOffs = BAD_TEMP_OFFSET; #endif // DEBUG if (tdNum != _tdNum) { IMPL_LIMITATION("too many spill temps"); } } #ifdef DEBUG bool tdLegalOffset() const { return tdOffs != BAD_TEMP_OFFSET; } #endif // DEBUG int tdTempOffs() const { assert(tdLegalOffset()); return tdOffs; } void tdSetTempOffs(int offs) { tdOffs = offs; assert(tdLegalOffset()); } void tdAdjustTempOffs(int offs) { tdOffs += offs; assert(tdLegalOffset()); } int tdTempNum() const { assert(tdNum < 0); return tdNum; } unsigned tdTempSize() const { return tdSize; } var_types tdTempType() const { return tdType; } }; // interface to hide linearscan implementation from rest of compiler class LinearScanInterface { public: virtual void doLinearScan() = 0; virtual void recordVarLocationsAtStartOfBB(BasicBlock* bb) = 0; virtual bool willEnregisterLocalVars() const = 0; #if TRACK_LSRA_STATS virtual void dumpLsraStatsCsv(FILE* file) = 0; virtual void dumpLsraStatsSummary(FILE* file) = 0; #endif // TRACK_LSRA_STATS }; LinearScanInterface* getLinearScanAllocator(Compiler* comp); // Information about arrays: their element type and size, and the offset of the first element. // We label GT_IND's that are array indices with GTF_IND_ARR_INDEX, and, for such nodes, // associate an array info via the map retrieved by GetArrayInfoMap(). This information is used, // for example, in value numbering of array index expressions. struct ArrayInfo { var_types m_elemType; CORINFO_CLASS_HANDLE m_elemStructType; unsigned m_elemSize; unsigned m_elemOffset; ArrayInfo() : m_elemType(TYP_UNDEF), m_elemStructType(nullptr), m_elemSize(0), m_elemOffset(0) { } ArrayInfo(var_types elemType, unsigned elemSize, unsigned elemOffset, CORINFO_CLASS_HANDLE elemStructType) : m_elemType(elemType), m_elemStructType(elemStructType), m_elemSize(elemSize), m_elemOffset(elemOffset) { } }; // This enumeration names the phases into which we divide compilation. The phases should completely // partition a compilation. enum Phases { #define CompPhaseNameMacro(enum_nm, string_nm, short_nm, hasChildren, parent, measureIR) enum_nm, #include "compphases.h" PHASE_NUMBER_OF }; extern const char* PhaseNames[]; extern const char* PhaseEnums[]; extern const LPCWSTR PhaseShortNames[]; // Specify which checks should be run after each phase // enum class PhaseChecks { CHECK_NONE, CHECK_ALL }; // Specify compiler data that a phase might modify enum class PhaseStatus : unsigned { MODIFIED_NOTHING, MODIFIED_EVERYTHING }; // The following enum provides a simple 1:1 mapping to CLR API's enum API_ICorJitInfo_Names { #define DEF_CLR_API(name) API_##name, #include "ICorJitInfo_API_names.h" API_COUNT }; //--------------------------------------------------------------- // Compilation time. // // A "CompTimeInfo" is a structure for tracking the compilation time of one or more methods. // We divide a compilation into a sequence of contiguous phases, and track the total (per-thread) cycles // of the compilation, as well as the cycles for each phase. We also track the number of bytecodes. // If there is a failure in reading a timer at any point, the "CompTimeInfo" becomes invalid, as indicated // by "m_timerFailure" being true. // If FEATURE_JIT_METHOD_PERF is not set, we define a minimal form of this, enough to let other code compile. struct CompTimeInfo { #ifdef FEATURE_JIT_METHOD_PERF // The string names of the phases. static const char* PhaseNames[]; static bool PhaseHasChildren[]; static int PhaseParent[]; static bool PhaseReportsIRSize[]; unsigned m_byteCodeBytes; unsigned __int64 m_totalCycles; unsigned __int64 m_invokesByPhase[PHASE_NUMBER_OF]; unsigned __int64 m_cyclesByPhase[PHASE_NUMBER_OF]; #if MEASURE_CLRAPI_CALLS unsigned __int64 m_CLRinvokesByPhase[PHASE_NUMBER_OF]; unsigned __int64 m_CLRcyclesByPhase[PHASE_NUMBER_OF]; #endif unsigned m_nodeCountAfterPhase[PHASE_NUMBER_OF]; // For better documentation, we call EndPhase on // non-leaf phases. We should also call EndPhase on the // last leaf subphase; obviously, the elapsed cycles between the EndPhase // for the last leaf subphase and the EndPhase for an ancestor should be very small. // We add all such "redundant end phase" intervals to this variable below; we print // it out in a report, so we can verify that it is, indeed, very small. If it ever // isn't, this means that we're doing something significant between the end of the last // declared subphase and the end of its parent. unsigned __int64 m_parentPhaseEndSlop; bool m_timerFailure; #if MEASURE_CLRAPI_CALLS // The following measures the time spent inside each individual CLR API call. unsigned m_allClrAPIcalls; unsigned m_perClrAPIcalls[API_ICorJitInfo_Names::API_COUNT]; unsigned __int64 m_allClrAPIcycles; unsigned __int64 m_perClrAPIcycles[API_ICorJitInfo_Names::API_COUNT]; unsigned __int32 m_maxClrAPIcycles[API_ICorJitInfo_Names::API_COUNT]; #endif // MEASURE_CLRAPI_CALLS CompTimeInfo(unsigned byteCodeBytes); #endif }; #ifdef FEATURE_JIT_METHOD_PERF #if MEASURE_CLRAPI_CALLS struct WrapICorJitInfo; #endif // This class summarizes the JIT time information over the course of a run: the number of methods compiled, // and the total and maximum timings. (These are instances of the "CompTimeInfo" type described above). // The operation of adding a single method's timing to the summary may be performed concurrently by several // threads, so it is protected by a lock. // This class is intended to be used as a singleton type, with only a single instance. class CompTimeSummaryInfo { // This lock protects the fields of all CompTimeSummaryInfo(s) (of which we expect there to be one). static CritSecObject s_compTimeSummaryLock; int m_numMethods; int m_totMethods; CompTimeInfo m_total; CompTimeInfo m_maximum; int m_numFilteredMethods; CompTimeInfo m_filtered; // This can use what ever data you want to determine if the value to be added // belongs in the filtered section (it's always included in the unfiltered section) bool IncludedInFilteredData(CompTimeInfo& info); public: // This is the unique CompTimeSummaryInfo object for this instance of the runtime. static CompTimeSummaryInfo s_compTimeSummary; CompTimeSummaryInfo() : m_numMethods(0), m_totMethods(0), m_total(0), m_maximum(0), m_numFilteredMethods(0), m_filtered(0) { } // Assumes that "info" is a completed CompTimeInfo for a compilation; adds it to the summary. // This is thread safe. void AddInfo(CompTimeInfo& info, bool includePhases); // Print the summary information to "f". // This is not thread-safe; assumed to be called by only one thread. void Print(FILE* f); }; // A JitTimer encapsulates a CompTimeInfo for a single compilation. It also tracks the start of compilation, // and when the current phase started. This is intended to be part of a Compilation object. // class JitTimer { unsigned __int64 m_start; // Start of the compilation. unsigned __int64 m_curPhaseStart; // Start of the current phase. #if MEASURE_CLRAPI_CALLS unsigned __int64 m_CLRcallStart; // Start of the current CLR API call (if any). unsigned __int64 m_CLRcallInvokes; // CLR API invokes under current outer so far unsigned __int64 m_CLRcallCycles; // CLR API cycles under current outer so far. int m_CLRcallAPInum; // The enum/index of the current CLR API call (or -1). static double s_cyclesPerSec; // Cached for speedier measurements #endif #ifdef DEBUG Phases m_lastPhase; // The last phase that was completed (or (Phases)-1 to start). #endif CompTimeInfo m_info; // The CompTimeInfo for this compilation. static CritSecObject s_csvLock; // Lock to protect the time log file. static FILE* s_csvFile; // The time log file handle. void PrintCsvMethodStats(Compiler* comp); private: void* operator new(size_t); void* operator new[](size_t); void operator delete(void*); void operator delete[](void*); public: // Initialized the timer instance JitTimer(unsigned byteCodeSize); static JitTimer* Create(Compiler* comp, unsigned byteCodeSize) { return ::new (comp, CMK_Unknown) JitTimer(byteCodeSize); } static void PrintCsvHeader(); // Ends the current phase (argument is for a redundant check). void EndPhase(Compiler* compiler, Phases phase); #if MEASURE_CLRAPI_CALLS // Start and end a timed CLR API call. void CLRApiCallEnter(unsigned apix); void CLRApiCallLeave(unsigned apix); #endif // MEASURE_CLRAPI_CALLS // Completes the timing of the current method, which is assumed to have "byteCodeBytes" bytes of bytecode, // and adds it to "sum". void Terminate(Compiler* comp, CompTimeSummaryInfo& sum, bool includePhases); // Attempts to query the cycle counter of the current thread. If successful, returns "true" and sets // *cycles to the cycle counter value. Otherwise, returns false and sets the "m_timerFailure" flag of // "m_info" to true. bool GetThreadCycles(unsigned __int64* cycles) { bool res = CycleTimer::GetThreadCyclesS(cycles); if (!res) { m_info.m_timerFailure = true; } return res; } static void Shutdown(); }; #endif // FEATURE_JIT_METHOD_PERF //------------------- Function/Funclet info ------------------------------- enum FuncKind : BYTE { FUNC_ROOT, // The main/root function (always id==0) FUNC_HANDLER, // a funclet associated with an EH handler (finally, fault, catch, filter handler) FUNC_FILTER, // a funclet associated with an EH filter FUNC_COUNT }; class emitLocation; struct FuncInfoDsc { FuncKind funKind; BYTE funFlags; // Currently unused, just here for padding unsigned short funEHIndex; // index, into the ebd table, of innermost EH clause corresponding to this // funclet. It is only valid if funKind field indicates this is a // EH-related funclet: FUNC_HANDLER or FUNC_FILTER #if defined(TARGET_AMD64) // TODO-AMD64-Throughput: make the AMD64 info more like the ARM info to avoid having this large static array. emitLocation* startLoc; emitLocation* endLoc; emitLocation* coldStartLoc; // locations for the cold section, if there is one. emitLocation* coldEndLoc; UNWIND_INFO unwindHeader; // Maximum of 255 UNWIND_CODE 'nodes' and then the unwind header. If there are an odd // number of codes, the VM or Zapper will 4-byte align the whole thing. BYTE unwindCodes[offsetof(UNWIND_INFO, UnwindCode) + (0xFF * sizeof(UNWIND_CODE))]; unsigned unwindCodeSlot; #elif defined(TARGET_X86) emitLocation* startLoc; emitLocation* endLoc; emitLocation* coldStartLoc; // locations for the cold section, if there is one. emitLocation* coldEndLoc; #elif defined(TARGET_ARMARCH) UnwindInfo uwi; // Unwind information for this function/funclet's hot section UnwindInfo* uwiCold; // Unwind information for this function/funclet's cold section // Note: we only have a pointer here instead of the actual object, // to save memory in the JIT case (compared to the NGEN case), // where we don't have any cold section. // Note 2: we currently don't support hot/cold splitting in functions // with EH, so uwiCold will be NULL for all funclets. emitLocation* startLoc; emitLocation* endLoc; emitLocation* coldStartLoc; // locations for the cold section, if there is one. emitLocation* coldEndLoc; #endif // TARGET_ARMARCH #if defined(FEATURE_CFI_SUPPORT) jitstd::vector<CFI_CODE>* cfiCodes; #endif // FEATURE_CFI_SUPPORT // Eventually we may want to move rsModifiedRegsMask, lvaOutgoingArgSize, and anything else // that isn't shared between the main function body and funclets. }; struct fgArgTabEntry { GenTreeCall::Use* use; // Points to the argument's GenTreeCall::Use in gtCallArgs or gtCallThisArg. GenTreeCall::Use* lateUse; // Points to the argument's GenTreeCall::Use in gtCallLateArgs, if any. // Get the node that coresponds to this argument entry. // This is the "real" node and not a placeholder or setup node. GenTree* GetNode() const { return lateUse == nullptr ? use->GetNode() : lateUse->GetNode(); } unsigned argNum; // The original argument number, also specifies the required argument evaluation order from the IL private: regNumberSmall regNums[MAX_ARG_REG_COUNT]; // The registers to use when passing this argument, set to REG_STK for // arguments passed on the stack public: unsigned numRegs; // Count of number of registers that this argument uses. // Note that on ARM, if we have a double hfa, this reflects the number // of DOUBLE registers. #if defined(UNIX_AMD64_ABI) // Unix amd64 will split floating point types and integer types in structs // between floating point and general purpose registers. Keep track of that // information so we do not need to recompute it later. unsigned structIntRegs; unsigned structFloatRegs; #endif // UNIX_AMD64_ABI #if defined(DEBUG_ARG_SLOTS) // These fields were used to calculate stack size in stack slots for arguments // but now they are replaced by precise `m_byteOffset/m_byteSize` because of // arm64 apple abi requirements. // A slot is a pointer sized region in the OutArg area. unsigned slotNum; // When an argument is passed in the OutArg area this is the slot number in the OutArg area unsigned numSlots; // Count of number of slots that this argument uses #endif // DEBUG_ARG_SLOTS // Return number of stack slots that this argument is taking. // TODO-Cleanup: this function does not align with arm64 apple model, // delete it. In most cases we just want to know if we it is using stack or not // but in some cases we are checking if it is a multireg arg, like: // `numRegs + GetStackSlotsNumber() > 1` that is harder to replace. // unsigned GetStackSlotsNumber() const { return roundUp(GetStackByteSize(), TARGET_POINTER_SIZE) / TARGET_POINTER_SIZE; } private: unsigned _lateArgInx; // index into gtCallLateArgs list; UINT_MAX if this is not a late arg. public: unsigned tmpNum; // the LclVar number if we had to force evaluation of this arg var_types argType; // The type used to pass this argument. This is generally the original argument type, but when a // struct is passed as a scalar type, this is that type. // Note that if a struct is passed by reference, this will still be the struct type. bool needTmp : 1; // True when we force this argument's evaluation into a temp LclVar bool needPlace : 1; // True when we must replace this argument with a placeholder node bool isTmp : 1; // True when we setup a temp LclVar for this argument due to size issues with the struct bool processed : 1; // True when we have decided the evaluation order for this argument in the gtCallLateArgs bool isBackFilled : 1; // True when the argument fills a register slot skipped due to alignment requirements of // previous arguments. NonStandardArgKind nonStandardArgKind : 4; // The non-standard arg kind. Non-standard args are args that are forced // to be in certain registers or on the stack, regardless of where they // appear in the arg list. bool isStruct : 1; // True if this is a struct arg bool _isVararg : 1; // True if the argument is in a vararg context. bool passedByRef : 1; // True iff the argument is passed by reference. #if FEATURE_ARG_SPLIT bool _isSplit : 1; // True when this argument is split between the registers and OutArg area #endif // FEATURE_ARG_SPLIT #ifdef FEATURE_HFA_FIELDS_PRESENT CorInfoHFAElemType _hfaElemKind : 3; // What kind of an HFA this is (CORINFO_HFA_ELEM_NONE if it is not an HFA). #endif CorInfoHFAElemType GetHfaElemKind() const { #ifdef FEATURE_HFA_FIELDS_PRESENT return _hfaElemKind; #else NOWAY_MSG("GetHfaElemKind"); return CORINFO_HFA_ELEM_NONE; #endif } void SetHfaElemKind(CorInfoHFAElemType elemKind) { #ifdef FEATURE_HFA_FIELDS_PRESENT _hfaElemKind = elemKind; #else NOWAY_MSG("SetHfaElemKind"); #endif } bool isNonStandard() const { return nonStandardArgKind != NonStandardArgKind::None; } // Returns true if the IR node for this non-standarg arg is added by fgInitArgInfo. // In this case, it must be removed by GenTreeCall::ResetArgInfo. bool isNonStandardArgAddedLate() const { switch (static_cast<NonStandardArgKind>(nonStandardArgKind)) { case NonStandardArgKind::None: case NonStandardArgKind::PInvokeFrame: case NonStandardArgKind::ShiftLow: case NonStandardArgKind::ShiftHigh: case NonStandardArgKind::FixedRetBuffer: case NonStandardArgKind::ValidateIndirectCallTarget: return false; case NonStandardArgKind::WrapperDelegateCell: case NonStandardArgKind::VirtualStubCell: case NonStandardArgKind::PInvokeCookie: case NonStandardArgKind::PInvokeTarget: case NonStandardArgKind::R2RIndirectionCell: return true; default: unreached(); } } bool isLateArg() const { bool isLate = (_lateArgInx != UINT_MAX); return isLate; } unsigned GetLateArgInx() const { assert(isLateArg()); return _lateArgInx; } void SetLateArgInx(unsigned inx) { _lateArgInx = inx; } regNumber GetRegNum() const { return (regNumber)regNums[0]; } regNumber GetOtherRegNum() const { return (regNumber)regNums[1]; } #if defined(UNIX_AMD64_ABI) SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR structDesc; #endif void setRegNum(unsigned int i, regNumber regNum) { assert(i < MAX_ARG_REG_COUNT); regNums[i] = (regNumberSmall)regNum; } regNumber GetRegNum(unsigned int i) { assert(i < MAX_ARG_REG_COUNT); return (regNumber)regNums[i]; } bool IsSplit() const { #if FEATURE_ARG_SPLIT return compFeatureArgSplit() && _isSplit; #else // FEATURE_ARG_SPLIT return false; #endif } void SetSplit(bool value) { #if FEATURE_ARG_SPLIT _isSplit = value; #endif } bool IsVararg() const { return compFeatureVarArg() && _isVararg; } void SetIsVararg(bool value) { if (compFeatureVarArg()) { _isVararg = value; } } bool IsHfaArg() const { if (GlobalJitOptions::compFeatureHfa) { return IsHfa(GetHfaElemKind()); } else { return false; } } bool IsHfaRegArg() const { if (GlobalJitOptions::compFeatureHfa) { return IsHfa(GetHfaElemKind()) && isPassedInRegisters(); } else { return false; } } unsigned intRegCount() const { #if defined(UNIX_AMD64_ABI) if (this->isStruct) { return this->structIntRegs; } #endif // defined(UNIX_AMD64_ABI) if (!this->isPassedInFloatRegisters()) { return this->numRegs; } return 0; } unsigned floatRegCount() const { #if defined(UNIX_AMD64_ABI) if (this->isStruct) { return this->structFloatRegs; } #endif // defined(UNIX_AMD64_ABI) if (this->isPassedInFloatRegisters()) { return this->numRegs; } return 0; } // Get the number of bytes that this argument is occupying on the stack, // including padding up to the target pointer size for platforms // where a stack argument can't take less. unsigned GetStackByteSize() const { if (!IsSplit() && numRegs > 0) { return 0; } assert(!IsHfaArg() || !IsSplit()); assert(GetByteSize() > TARGET_POINTER_SIZE * numRegs); const unsigned stackByteSize = GetByteSize() - TARGET_POINTER_SIZE * numRegs; return stackByteSize; } var_types GetHfaType() const { if (GlobalJitOptions::compFeatureHfa) { return HfaTypeFromElemKind(GetHfaElemKind()); } else { return TYP_UNDEF; } } void SetHfaType(var_types type, unsigned hfaSlots) { if (GlobalJitOptions::compFeatureHfa) { if (type != TYP_UNDEF) { // We must already have set the passing mode. assert(numRegs != 0 || GetStackByteSize() != 0); // We originally set numRegs according to the size of the struct, but if the size of the // hfaType is not the same as the pointer size, we need to correct it. // Note that hfaSlots is the number of registers we will use. For ARM, that is twice // the number of "double registers". unsigned numHfaRegs = hfaSlots; #ifdef TARGET_ARM if (type == TYP_DOUBLE) { // Must be an even number of registers. assert((numRegs & 1) == 0); numHfaRegs = hfaSlots / 2; } #endif // TARGET_ARM if (!IsHfaArg()) { // We haven't previously set this; do so now. CorInfoHFAElemType elemKind = HfaElemKindFromType(type); SetHfaElemKind(elemKind); // Ensure we've allocated enough bits. assert(GetHfaElemKind() == elemKind); if (isPassedInRegisters()) { numRegs = numHfaRegs; } } else { // We've already set this; ensure that it's consistent. if (isPassedInRegisters()) { assert(numRegs == numHfaRegs); } assert(type == HfaTypeFromElemKind(GetHfaElemKind())); } } } } #ifdef TARGET_ARM void SetIsBackFilled(bool backFilled) { isBackFilled = backFilled; } bool IsBackFilled() const { return isBackFilled; } #else // !TARGET_ARM void SetIsBackFilled(bool backFilled) { } bool IsBackFilled() const { return false; } #endif // !TARGET_ARM bool isPassedInRegisters() const { return !IsSplit() && (numRegs != 0); } bool isPassedInFloatRegisters() const { #ifdef TARGET_X86 return false; #else return isValidFloatArgReg(GetRegNum()); #endif } // Can we replace the struct type of this node with a primitive type for argument passing? bool TryPassAsPrimitive() const { return !IsSplit() && ((numRegs == 1) || (m_byteSize <= TARGET_POINTER_SIZE)); } #if defined(DEBUG_ARG_SLOTS) // Returns the number of "slots" used, where for this purpose a // register counts as a slot. unsigned getSlotCount() const { if (isBackFilled) { assert(isPassedInRegisters()); assert(numRegs == 1); } else if (GetRegNum() == REG_STK) { assert(!isPassedInRegisters()); assert(numRegs == 0); } else { assert(numRegs > 0); } return numSlots + numRegs; } #endif #if defined(DEBUG_ARG_SLOTS) // Returns the size as a multiple of pointer-size. // For targets without HFAs, this is the same as getSlotCount(). unsigned getSize() const { unsigned size = getSlotCount(); if (GlobalJitOptions::compFeatureHfa) { if (IsHfaRegArg()) { #ifdef TARGET_ARM // We counted the number of regs, but if they are DOUBLE hfa regs we have to double the size. if (GetHfaType() == TYP_DOUBLE) { assert(!IsSplit()); size <<= 1; } #elif defined(TARGET_ARM64) // We counted the number of regs, but if they are FLOAT hfa regs we have to halve the size, // or if they are SIMD16 vector hfa regs we have to double the size. if (GetHfaType() == TYP_FLOAT) { // Round up in case of odd HFA count. size = (size + 1) >> 1; } #ifdef FEATURE_SIMD else if (GetHfaType() == TYP_SIMD16) { size <<= 1; } #endif // FEATURE_SIMD #endif // TARGET_ARM64 } } return size; } #endif // DEBUG_ARG_SLOTS private: unsigned m_byteOffset; // byte size that this argument takes including the padding after. // For example, 1-byte arg on x64 with 8-byte alignment // will have `m_byteSize == 8`, the same arg on apple arm64 will have `m_byteSize == 1`. unsigned m_byteSize; unsigned m_byteAlignment; // usually 4 or 8 bytes (slots/registers). public: void SetByteOffset(unsigned byteOffset) { DEBUG_ARG_SLOTS_ASSERT(byteOffset / TARGET_POINTER_SIZE == slotNum); m_byteOffset = byteOffset; } unsigned GetByteOffset() const { DEBUG_ARG_SLOTS_ASSERT(m_byteOffset / TARGET_POINTER_SIZE == slotNum); return m_byteOffset; } void SetByteSize(unsigned byteSize, bool isStruct, bool isFloatHfa) { unsigned roundedByteSize; if (compMacOsArm64Abi()) { // Only struct types need extension or rounding to pointer size, but HFA<float> does not. if (isStruct && !isFloatHfa) { roundedByteSize = roundUp(byteSize, TARGET_POINTER_SIZE); } else { roundedByteSize = byteSize; } } else { roundedByteSize = roundUp(byteSize, TARGET_POINTER_SIZE); } #if !defined(TARGET_ARM) // Arm32 could have a struct with 8 byte alignment // which rounded size % 8 is not 0. assert(m_byteAlignment != 0); assert(roundedByteSize % m_byteAlignment == 0); #endif // TARGET_ARM #if defined(DEBUG_ARG_SLOTS) if (!compMacOsArm64Abi() && !isStruct) { assert(roundedByteSize == getSlotCount() * TARGET_POINTER_SIZE); } #endif m_byteSize = roundedByteSize; } unsigned GetByteSize() const { return m_byteSize; } void SetByteAlignment(unsigned byteAlignment) { m_byteAlignment = byteAlignment; } unsigned GetByteAlignment() const { return m_byteAlignment; } // Set the register numbers for a multireg argument. // There's nothing to do on x64/Ux because the structDesc has already been used to set the // register numbers. void SetMultiRegNums() { #if FEATURE_MULTIREG_ARGS && !defined(UNIX_AMD64_ABI) if (numRegs == 1) { return; } regNumber argReg = GetRegNum(0); #ifdef TARGET_ARM unsigned int regSize = (GetHfaType() == TYP_DOUBLE) ? 2 : 1; #else unsigned int regSize = 1; #endif if (numRegs > MAX_ARG_REG_COUNT) NO_WAY("Multireg argument exceeds the maximum length"); for (unsigned int regIndex = 1; regIndex < numRegs; regIndex++) { argReg = (regNumber)(argReg + regSize); setRegNum(regIndex, argReg); } #endif // FEATURE_MULTIREG_ARGS && !defined(UNIX_AMD64_ABI) } #ifdef DEBUG // Check that the value of 'isStruct' is consistent. // A struct arg must be one of the following: // - A node of struct type, // - A GT_FIELD_LIST, or // - A node of a scalar type, passed in a single register or slot // (or two slots in the case of a struct pass on the stack as TYP_DOUBLE). // void checkIsStruct() const { GenTree* node = GetNode(); if (isStruct) { if (!varTypeIsStruct(node) && !node->OperIs(GT_FIELD_LIST)) { // This is the case where we are passing a struct as a primitive type. // On most targets, this is always a single register or slot. // However, on ARM this could be two slots if it is TYP_DOUBLE. bool isPassedAsPrimitiveType = ((numRegs == 1) || ((numRegs == 0) && (GetByteSize() <= TARGET_POINTER_SIZE))); #ifdef TARGET_ARM if (!isPassedAsPrimitiveType) { if (node->TypeGet() == TYP_DOUBLE && numRegs == 0 && (numSlots == 2)) { isPassedAsPrimitiveType = true; } } #endif // TARGET_ARM assert(isPassedAsPrimitiveType); } } else { assert(!varTypeIsStruct(node)); } } void Dump() const; #endif }; //------------------------------------------------------------------------- // // The class fgArgInfo is used to handle the arguments // when morphing a GT_CALL node. // class fgArgInfo { Compiler* compiler; // Back pointer to the compiler instance so that we can allocate memory GenTreeCall* callTree; // Back pointer to the GT_CALL node for this fgArgInfo unsigned argCount; // Updatable arg count value #if defined(DEBUG_ARG_SLOTS) unsigned nextSlotNum; // Updatable slot count value #endif unsigned nextStackByteOffset; unsigned stkLevel; // Stack depth when we make this call (for x86) #if defined(UNIX_X86_ABI) bool alignmentDone; // Updateable flag, set to 'true' after we've done any required alignment. unsigned stkSizeBytes; // Size of stack used by this call, in bytes. Calculated during fgMorphArgs(). unsigned padStkAlign; // Stack alignment in bytes required before arguments are pushed for this call. // Computed dynamically during codegen, based on stkSizeBytes and the current // stack level (genStackLevel) when the first stack adjustment is made for // this call. #endif #if FEATURE_FIXED_OUT_ARGS unsigned outArgSize; // Size of the out arg area for the call, will be at least MIN_ARG_AREA_FOR_CALL #endif unsigned argTableSize; // size of argTable array (equal to the argCount when done with fgMorphArgs) bool hasRegArgs; // true if we have one or more register arguments bool hasStackArgs; // true if we have one or more stack arguments bool argsComplete; // marker for state bool argsSorted; // marker for state bool needsTemps; // one or more arguments must be copied to a temp by EvalArgsToTemps fgArgTabEntry** argTable; // variable sized array of per argument descrption: (i.e. argTable[argTableSize]) private: void AddArg(fgArgTabEntry* curArgTabEntry); public: fgArgInfo(Compiler* comp, GenTreeCall* call, unsigned argCount); fgArgInfo(GenTreeCall* newCall, GenTreeCall* oldCall); fgArgTabEntry* AddRegArg(unsigned argNum, GenTree* node, GenTreeCall::Use* use, regNumber regNum, unsigned numRegs, unsigned byteSize, unsigned byteAlignment, bool isStruct, bool isFloatHfa, bool isVararg = false); #ifdef UNIX_AMD64_ABI fgArgTabEntry* AddRegArg(unsigned argNum, GenTree* node, GenTreeCall::Use* use, regNumber regNum, unsigned numRegs, unsigned byteSize, unsigned byteAlignment, const bool isStruct, const bool isFloatHfa, const bool isVararg, const regNumber otherRegNum, const unsigned structIntRegs, const unsigned structFloatRegs, const SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR* const structDescPtr = nullptr); #endif // UNIX_AMD64_ABI fgArgTabEntry* AddStkArg(unsigned argNum, GenTree* node, GenTreeCall::Use* use, unsigned numSlots, unsigned byteSize, unsigned byteAlignment, bool isStruct, bool isFloatHfa, bool isVararg = false); void RemorphReset(); void UpdateRegArg(fgArgTabEntry* argEntry, GenTree* node, bool reMorphing); void UpdateStkArg(fgArgTabEntry* argEntry, GenTree* node, bool reMorphing); void SplitArg(unsigned argNum, unsigned numRegs, unsigned numSlots); void EvalToTmp(fgArgTabEntry* curArgTabEntry, unsigned tmpNum, GenTree* newNode); void ArgsComplete(); void SortArgs(); void EvalArgsToTemps(); unsigned ArgCount() const { return argCount; } fgArgTabEntry** ArgTable() const { return argTable; } #if defined(DEBUG_ARG_SLOTS) unsigned GetNextSlotNum() const { return nextSlotNum; } #endif unsigned GetNextSlotByteOffset() const { return nextStackByteOffset; } bool HasRegArgs() const { return hasRegArgs; } bool NeedsTemps() const { return needsTemps; } bool HasStackArgs() const { return hasStackArgs; } bool AreArgsComplete() const { return argsComplete; } #if FEATURE_FIXED_OUT_ARGS unsigned GetOutArgSize() const { return outArgSize; } void SetOutArgSize(unsigned newVal) { outArgSize = newVal; } #endif // FEATURE_FIXED_OUT_ARGS #if defined(UNIX_X86_ABI) void ComputeStackAlignment(unsigned curStackLevelInBytes) { padStkAlign = AlignmentPad(curStackLevelInBytes, STACK_ALIGN); } unsigned GetStkAlign() const { return padStkAlign; } void SetStkSizeBytes(unsigned newStkSizeBytes) { stkSizeBytes = newStkSizeBytes; } unsigned GetStkSizeBytes() const { return stkSizeBytes; } bool IsStkAlignmentDone() const { return alignmentDone; } void SetStkAlignmentDone() { alignmentDone = true; } #endif // defined(UNIX_X86_ABI) // Get the fgArgTabEntry for the arg at position argNum. fgArgTabEntry* GetArgEntry(unsigned argNum, bool reMorphing = true) const { fgArgTabEntry* curArgTabEntry = nullptr; if (!reMorphing) { // The arg table has not yet been sorted. curArgTabEntry = argTable[argNum]; assert(curArgTabEntry->argNum == argNum); return curArgTabEntry; } for (unsigned i = 0; i < argCount; i++) { curArgTabEntry = argTable[i]; if (curArgTabEntry->argNum == argNum) { return curArgTabEntry; } } noway_assert(!"GetArgEntry: argNum not found"); return nullptr; } void SetNeedsTemps() { needsTemps = true; } // Get the node for the arg at position argIndex. // Caller must ensure that this index is a valid arg index. GenTree* GetArgNode(unsigned argIndex) const { return GetArgEntry(argIndex)->GetNode(); } void Dump(Compiler* compiler) const; }; #ifdef DEBUG // XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX // We have the ability to mark source expressions with "Test Labels." // These drive assertions within the JIT, or internal JIT testing. For example, we could label expressions // that should be CSE defs, and other expressions that should uses of those defs, with a shared label. enum TestLabel // This must be kept identical to System.Runtime.CompilerServices.JitTestLabel.TestLabel. { TL_SsaName, TL_VN, // Defines a "VN equivalence class". (For full VN, including exceptions thrown). TL_VNNorm, // Like above, but uses the non-exceptional value of the expression. TL_CSE_Def, // This must be identified in the JIT as a CSE def TL_CSE_Use, // This must be identified in the JIT as a CSE use TL_LoopHoist, // Expression must (or must not) be hoisted out of the loop. }; struct TestLabelAndNum { TestLabel m_tl; ssize_t m_num; TestLabelAndNum() : m_tl(TestLabel(0)), m_num(0) { } }; typedef JitHashTable<GenTree*, JitPtrKeyFuncs<GenTree>, TestLabelAndNum> NodeToTestDataMap; // XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX #endif // DEBUG //------------------------------------------------------------------------- // LoopFlags: flags for the loop table. // enum LoopFlags : unsigned short { LPFLG_EMPTY = 0, // LPFLG_UNUSED = 0x0001, // LPFLG_UNUSED = 0x0002, LPFLG_ITER = 0x0004, // loop of form: for (i = icon or lclVar; test_condition(); i++) // LPFLG_UNUSED = 0x0008, LPFLG_CONTAINS_CALL = 0x0010, // If executing the loop body *may* execute a call LPFLG_VAR_INIT = 0x0020, // iterator is initialized with a local var (var # found in lpVarInit) LPFLG_CONST_INIT = 0x0040, // iterator is initialized with a constant (found in lpConstInit) LPFLG_SIMD_LIMIT = 0x0080, // iterator is compared with vector element count (found in lpConstLimit) LPFLG_VAR_LIMIT = 0x0100, // iterator is compared with a local var (var # found in lpVarLimit) LPFLG_CONST_LIMIT = 0x0200, // iterator is compared with a constant (found in lpConstLimit) LPFLG_ARRLEN_LIMIT = 0x0400, // iterator is compared with a.len or a[i].len (found in lpArrLenLimit) LPFLG_HAS_PREHEAD = 0x0800, // lpHead is known to be a preHead for this loop LPFLG_REMOVED = 0x1000, // has been removed from the loop table (unrolled or optimized away) LPFLG_DONT_UNROLL = 0x2000, // do not unroll this loop LPFLG_ASGVARS_YES = 0x4000, // "lpAsgVars" has been computed LPFLG_ASGVARS_INC = 0x8000, // "lpAsgVars" is incomplete -- vars beyond those representable in an AllVarSet // type are assigned to. }; inline constexpr LoopFlags operator~(LoopFlags a) { return (LoopFlags)(~(unsigned short)a); } inline constexpr LoopFlags operator|(LoopFlags a, LoopFlags b) { return (LoopFlags)((unsigned short)a | (unsigned short)b); } inline constexpr LoopFlags operator&(LoopFlags a, LoopFlags b) { return (LoopFlags)((unsigned short)a & (unsigned short)b); } inline LoopFlags& operator|=(LoopFlags& a, LoopFlags b) { return a = (LoopFlags)((unsigned short)a | (unsigned short)b); } inline LoopFlags& operator&=(LoopFlags& a, LoopFlags b) { return a = (LoopFlags)((unsigned short)a & (unsigned short)b); } // The following holds information about instr offsets in terms of generated code. enum class IPmappingDscKind { Prolog, // The mapping represents the start of a prolog. Epilog, // The mapping represents the start of an epilog. NoMapping, // This does not map to any IL offset. Normal, // The mapping maps to an IL offset. }; struct IPmappingDsc { emitLocation ipmdNativeLoc; // the emitter location of the native code corresponding to the IL offset IPmappingDscKind ipmdKind; // The kind of mapping ILLocation ipmdLoc; // The location for normal mappings bool ipmdIsLabel; // Can this code be a branch label? }; struct PreciseIPMapping { emitLocation nativeLoc; DebugInfo debugInfo; }; /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX The big guy. The sections are currently organized as : XX XX XX XX o GenTree and BasicBlock XX XX o LclVarsInfo XX XX o Importer XX XX o FlowGraph XX XX o Optimizer XX XX o RegAlloc XX XX o EEInterface XX XX o TempsInfo XX XX o RegSet XX XX o GCInfo XX XX o Instruction XX XX o ScopeInfo XX XX o PrologScopeInfo XX XX o CodeGenerator XX XX o UnwindInfo XX XX o Compiler XX XX o typeInfo XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ struct HWIntrinsicInfo; class Compiler { friend class emitter; friend class UnwindInfo; friend class UnwindFragmentInfo; friend class UnwindEpilogInfo; friend class JitTimer; friend class LinearScan; friend class fgArgInfo; friend class Rationalizer; friend class Phase; friend class Lowering; friend class CSE_DataFlow; friend class CSE_Heuristic; friend class CodeGenInterface; friend class CodeGen; friend class LclVarDsc; friend class TempDsc; friend class LIR; friend class ObjectAllocator; friend class LocalAddressVisitor; friend struct GenTree; friend class MorphInitBlockHelper; friend class MorphCopyBlockHelper; #ifdef FEATURE_HW_INTRINSICS friend struct HWIntrinsicInfo; #endif // FEATURE_HW_INTRINSICS #ifndef TARGET_64BIT friend class DecomposeLongs; #endif // !TARGET_64BIT /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Misc structs definitions XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ public: hashBvGlobalData hbvGlobalData; // Used by the hashBv bitvector package. #ifdef DEBUG bool verbose; bool verboseTrees; bool shouldUseVerboseTrees(); bool asciiTrees; // If true, dump trees using only ASCII characters bool shouldDumpASCIITrees(); bool verboseSsa; // If true, produce especially verbose dump output in SSA construction. bool shouldUseVerboseSsa(); bool treesBeforeAfterMorph; // If true, print trees before/after morphing (paired by an intra-compilation id: int morphNum; // This counts the the trees that have been morphed, allowing us to label each uniquely. bool doExtraSuperPmiQueries; void makeExtraStructQueries(CORINFO_CLASS_HANDLE structHandle, int level); // Make queries recursively 'level' deep. const char* VarNameToStr(VarName name) { return name; } DWORD expensiveDebugCheckLevel; #endif #if FEATURE_MULTIREG_RET GenTree* impAssignMultiRegTypeToVar(GenTree* op, CORINFO_CLASS_HANDLE hClass DEBUGARG(CorInfoCallConvExtension callConv)); #endif // FEATURE_MULTIREG_RET #ifdef TARGET_X86 bool isTrivialPointerSizedStruct(CORINFO_CLASS_HANDLE clsHnd) const; #endif // TARGET_X86 //------------------------------------------------------------------------- // Functions to handle homogeneous floating-point aggregates (HFAs) in ARM/ARM64. // HFAs are one to four element structs where each element is the same // type, either all float or all double. We handle HVAs (one to four elements of // vector types) uniformly with HFAs. HFAs are treated specially // in the ARM/ARM64 Procedure Call Standards, specifically, they are passed in // floating-point registers instead of the general purpose registers. // bool IsHfa(CORINFO_CLASS_HANDLE hClass); bool IsHfa(GenTree* tree); var_types GetHfaType(GenTree* tree); unsigned GetHfaCount(GenTree* tree); var_types GetHfaType(CORINFO_CLASS_HANDLE hClass); unsigned GetHfaCount(CORINFO_CLASS_HANDLE hClass); bool IsMultiRegReturnedType(CORINFO_CLASS_HANDLE hClass, CorInfoCallConvExtension callConv); //------------------------------------------------------------------------- // The following is used for validating format of EH table // struct EHNodeDsc; typedef struct EHNodeDsc* pEHNodeDsc; EHNodeDsc* ehnTree; // root of the tree comprising the EHnodes. EHNodeDsc* ehnNext; // root of the tree comprising the EHnodes. struct EHNodeDsc { enum EHBlockType { TryNode, FilterNode, HandlerNode, FinallyNode, FaultNode }; EHBlockType ehnBlockType; // kind of EH block IL_OFFSET ehnStartOffset; // IL offset of start of the EH block IL_OFFSET ehnEndOffset; // IL offset past end of the EH block. (TODO: looks like verInsertEhNode() sets this to // the last IL offset, not "one past the last one", i.e., the range Start to End is // inclusive). pEHNodeDsc ehnNext; // next (non-nested) block in sequential order pEHNodeDsc ehnChild; // leftmost nested block union { pEHNodeDsc ehnTryNode; // for filters and handlers, the corresponding try node pEHNodeDsc ehnHandlerNode; // for a try node, the corresponding handler node }; pEHNodeDsc ehnFilterNode; // if this is a try node and has a filter, otherwise 0 pEHNodeDsc ehnEquivalent; // if blockType=tryNode, start offset and end offset is same, void ehnSetTryNodeType() { ehnBlockType = TryNode; } void ehnSetFilterNodeType() { ehnBlockType = FilterNode; } void ehnSetHandlerNodeType() { ehnBlockType = HandlerNode; } void ehnSetFinallyNodeType() { ehnBlockType = FinallyNode; } void ehnSetFaultNodeType() { ehnBlockType = FaultNode; } bool ehnIsTryBlock() { return ehnBlockType == TryNode; } bool ehnIsFilterBlock() { return ehnBlockType == FilterNode; } bool ehnIsHandlerBlock() { return ehnBlockType == HandlerNode; } bool ehnIsFinallyBlock() { return ehnBlockType == FinallyNode; } bool ehnIsFaultBlock() { return ehnBlockType == FaultNode; } // returns true if there is any overlap between the two nodes static bool ehnIsOverlap(pEHNodeDsc node1, pEHNodeDsc node2) { if (node1->ehnStartOffset < node2->ehnStartOffset) { return (node1->ehnEndOffset >= node2->ehnStartOffset); } else { return (node1->ehnStartOffset <= node2->ehnEndOffset); } } // fails with BADCODE if inner is not completely nested inside outer static bool ehnIsNested(pEHNodeDsc inner, pEHNodeDsc outer) { return ((inner->ehnStartOffset >= outer->ehnStartOffset) && (inner->ehnEndOffset <= outer->ehnEndOffset)); } }; //------------------------------------------------------------------------- // Exception handling functions // #if !defined(FEATURE_EH_FUNCLETS) bool ehNeedsShadowSPslots() { return (info.compXcptnsCount || opts.compDbgEnC); } // 0 for methods with no EH // 1 for methods with non-nested EH, or where only the try blocks are nested // 2 for a method with a catch within a catch // etc. unsigned ehMaxHndNestingCount; #endif // !FEATURE_EH_FUNCLETS static bool jitIsBetween(unsigned value, unsigned start, unsigned end); static bool jitIsBetweenInclusive(unsigned value, unsigned start, unsigned end); bool bbInCatchHandlerILRange(BasicBlock* blk); bool bbInFilterILRange(BasicBlock* blk); bool bbInTryRegions(unsigned regionIndex, BasicBlock* blk); bool bbInExnFlowRegions(unsigned regionIndex, BasicBlock* blk); bool bbInHandlerRegions(unsigned regionIndex, BasicBlock* blk); bool bbInCatchHandlerRegions(BasicBlock* tryBlk, BasicBlock* hndBlk); unsigned short bbFindInnermostCommonTryRegion(BasicBlock* bbOne, BasicBlock* bbTwo); unsigned short bbFindInnermostTryRegionContainingHandlerRegion(unsigned handlerIndex); unsigned short bbFindInnermostHandlerRegionContainingTryRegion(unsigned tryIndex); // Returns true if "block" is the start of a try region. bool bbIsTryBeg(BasicBlock* block); // Returns true if "block" is the start of a handler or filter region. bool bbIsHandlerBeg(BasicBlock* block); // Returns true iff "block" is where control flows if an exception is raised in the // try region, and sets "*regionIndex" to the index of the try for the handler. // Differs from "IsHandlerBeg" in the case of filters, where this is true for the first // block of the filter, but not for the filter's handler. bool bbIsExFlowBlock(BasicBlock* block, unsigned* regionIndex); bool ehHasCallableHandlers(); // Return the EH descriptor for the given region index. EHblkDsc* ehGetDsc(unsigned regionIndex); // Return the EH index given a region descriptor. unsigned ehGetIndex(EHblkDsc* ehDsc); // Return the EH descriptor index of the enclosing try, for the given region index. unsigned ehGetEnclosingTryIndex(unsigned regionIndex); // Return the EH descriptor index of the enclosing handler, for the given region index. unsigned ehGetEnclosingHndIndex(unsigned regionIndex); // Return the EH descriptor for the most nested 'try' region this BasicBlock is a member of (or nullptr if this // block is not in a 'try' region). EHblkDsc* ehGetBlockTryDsc(BasicBlock* block); // Return the EH descriptor for the most nested filter or handler region this BasicBlock is a member of (or nullptr // if this block is not in a filter or handler region). EHblkDsc* ehGetBlockHndDsc(BasicBlock* block); // Return the EH descriptor for the most nested region that may handle exceptions raised in this BasicBlock (or // nullptr if this block's exceptions propagate to caller). EHblkDsc* ehGetBlockExnFlowDsc(BasicBlock* block); EHblkDsc* ehIsBlockTryLast(BasicBlock* block); EHblkDsc* ehIsBlockHndLast(BasicBlock* block); bool ehIsBlockEHLast(BasicBlock* block); bool ehBlockHasExnFlowDsc(BasicBlock* block); // Return the region index of the most nested EH region this block is in. unsigned ehGetMostNestedRegionIndex(BasicBlock* block, bool* inTryRegion); // Find the true enclosing try index, ignoring 'mutual protect' try. Uses IL ranges to check. unsigned ehTrueEnclosingTryIndexIL(unsigned regionIndex); // Return the index of the most nested enclosing region for a particular EH region. Returns NO_ENCLOSING_INDEX // if there is no enclosing region. If the returned index is not NO_ENCLOSING_INDEX, then '*inTryRegion' // is set to 'true' if the enclosing region is a 'try', or 'false' if the enclosing region is a handler. // (It can never be a filter.) unsigned ehGetEnclosingRegionIndex(unsigned regionIndex, bool* inTryRegion); // A block has been deleted. Update the EH table appropriately. void ehUpdateForDeletedBlock(BasicBlock* block); // Determine whether a block can be deleted while preserving the EH normalization rules. bool ehCanDeleteEmptyBlock(BasicBlock* block); // Update the 'last' pointers in the EH table to reflect new or deleted blocks in an EH region. void ehUpdateLastBlocks(BasicBlock* oldLast, BasicBlock* newLast); // For a finally handler, find the region index that the BBJ_CALLFINALLY lives in that calls the handler, // or NO_ENCLOSING_INDEX if the BBJ_CALLFINALLY lives in the main function body. Normally, the index // is the same index as the handler (and the BBJ_CALLFINALLY lives in the 'try' region), but for AMD64 the // BBJ_CALLFINALLY lives in the enclosing try or handler region, whichever is more nested, or the main function // body. If the returned index is not NO_ENCLOSING_INDEX, then '*inTryRegion' is set to 'true' if the // BBJ_CALLFINALLY lives in the returned index's 'try' region, or 'false' if lives in the handler region. (It never // lives in a filter.) unsigned ehGetCallFinallyRegionIndex(unsigned finallyIndex, bool* inTryRegion); // Find the range of basic blocks in which all BBJ_CALLFINALLY will be found that target the 'finallyIndex' region's // handler. Set begBlk to the first block, and endBlk to the block after the last block of the range // (nullptr if the last block is the last block in the program). // Precondition: 'finallyIndex' is the EH region of a try/finally clause. void ehGetCallFinallyBlockRange(unsigned finallyIndex, BasicBlock** begBlk, BasicBlock** endBlk); #ifdef DEBUG // Given a BBJ_CALLFINALLY block and the EH region index of the finally it is calling, return // 'true' if the BBJ_CALLFINALLY is in the correct EH region. bool ehCallFinallyInCorrectRegion(BasicBlock* blockCallFinally, unsigned finallyIndex); #endif // DEBUG #if defined(FEATURE_EH_FUNCLETS) // Do we need a PSPSym in the main function? For codegen purposes, we only need one // if there is a filter that protects a region with a nested EH clause (such as a // try/catch nested in the 'try' body of a try/filter/filter-handler). See // genFuncletProlog() for more details. However, the VM seems to use it for more // purposes, maybe including debugging. Until we are sure otherwise, always create // a PSPSym for functions with any EH. bool ehNeedsPSPSym() const { #ifdef TARGET_X86 return false; #else // TARGET_X86 return compHndBBtabCount > 0; #endif // TARGET_X86 } bool ehAnyFunclets(); // Are there any funclets in this function? unsigned ehFuncletCount(); // Return the count of funclets in the function unsigned bbThrowIndex(BasicBlock* blk); // Get the index to use as the cache key for sharing throw blocks #else // !FEATURE_EH_FUNCLETS bool ehAnyFunclets() { return false; } unsigned ehFuncletCount() { return 0; } unsigned bbThrowIndex(BasicBlock* blk) { return blk->bbTryIndex; } // Get the index to use as the cache key for sharing throw blocks #endif // !FEATURE_EH_FUNCLETS // Returns a flowList representing the "EH predecessors" of "blk". These are the normal predecessors of // "blk", plus one special case: if "blk" is the first block of a handler, considers the predecessor(s) of the first // first block of the corresponding try region to be "EH predecessors". (If there is a single such predecessor, // for example, we want to consider that the immediate dominator of the catch clause start block, so it's // convenient to also consider it a predecessor.) flowList* BlockPredsWithEH(BasicBlock* blk); // This table is useful for memoization of the method above. typedef JitHashTable<BasicBlock*, JitPtrKeyFuncs<BasicBlock>, flowList*> BlockToFlowListMap; BlockToFlowListMap* m_blockToEHPreds; BlockToFlowListMap* GetBlockToEHPreds() { if (m_blockToEHPreds == nullptr) { m_blockToEHPreds = new (getAllocator()) BlockToFlowListMap(getAllocator()); } return m_blockToEHPreds; } void* ehEmitCookie(BasicBlock* block); UNATIVE_OFFSET ehCodeOffset(BasicBlock* block); EHblkDsc* ehInitHndRange(BasicBlock* src, IL_OFFSET* hndBeg, IL_OFFSET* hndEnd, bool* inFilter); EHblkDsc* ehInitTryRange(BasicBlock* src, IL_OFFSET* tryBeg, IL_OFFSET* tryEnd); EHblkDsc* ehInitHndBlockRange(BasicBlock* blk, BasicBlock** hndBeg, BasicBlock** hndLast, bool* inFilter); EHblkDsc* ehInitTryBlockRange(BasicBlock* blk, BasicBlock** tryBeg, BasicBlock** tryLast); void fgSetTryBeg(EHblkDsc* handlerTab, BasicBlock* newTryBeg); void fgSetTryEnd(EHblkDsc* handlerTab, BasicBlock* newTryLast); void fgSetHndEnd(EHblkDsc* handlerTab, BasicBlock* newHndLast); void fgSkipRmvdBlocks(EHblkDsc* handlerTab); void fgAllocEHTable(); void fgRemoveEHTableEntry(unsigned XTnum); #if defined(FEATURE_EH_FUNCLETS) EHblkDsc* fgAddEHTableEntry(unsigned XTnum); #endif // FEATURE_EH_FUNCLETS #if !FEATURE_EH void fgRemoveEH(); #endif // !FEATURE_EH void fgSortEHTable(); // Causes the EH table to obey some well-formedness conditions, by inserting // empty BB's when necessary: // * No block is both the first block of a handler and the first block of a try. // * No block is the first block of multiple 'try' regions. // * No block is the last block of multiple EH regions. void fgNormalizeEH(); bool fgNormalizeEHCase1(); bool fgNormalizeEHCase2(); bool fgNormalizeEHCase3(); void fgCheckForLoopsInHandlers(); #ifdef DEBUG void dispIncomingEHClause(unsigned num, const CORINFO_EH_CLAUSE& clause); void dispOutgoingEHClause(unsigned num, const CORINFO_EH_CLAUSE& clause); void fgVerifyHandlerTab(); void fgDispHandlerTab(); #endif // DEBUG bool fgNeedToSortEHTable; void verInitEHTree(unsigned numEHClauses); void verInsertEhNode(CORINFO_EH_CLAUSE* clause, EHblkDsc* handlerTab); void verInsertEhNodeInTree(EHNodeDsc** ppRoot, EHNodeDsc* node); void verInsertEhNodeParent(EHNodeDsc** ppRoot, EHNodeDsc* node); void verCheckNestingLevel(EHNodeDsc* initRoot); /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX GenTree and BasicBlock XX XX XX XX Functions to allocate and display the GenTrees and BasicBlocks XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ // Functions to create nodes Statement* gtNewStmt(GenTree* expr = nullptr); Statement* gtNewStmt(GenTree* expr, const DebugInfo& di); // For unary opers. GenTree* gtNewOperNode(genTreeOps oper, var_types type, GenTree* op1, bool doSimplifications = TRUE); // For binary opers. GenTree* gtNewOperNode(genTreeOps oper, var_types type, GenTree* op1, GenTree* op2); GenTreeColon* gtNewColonNode(var_types type, GenTree* elseNode, GenTree* thenNode); GenTreeQmark* gtNewQmarkNode(var_types type, GenTree* cond, GenTreeColon* colon); GenTree* gtNewLargeOperNode(genTreeOps oper, var_types type = TYP_I_IMPL, GenTree* op1 = nullptr, GenTree* op2 = nullptr); GenTreeIntCon* gtNewIconNode(ssize_t value, var_types type = TYP_INT); GenTreeIntCon* gtNewIconNode(unsigned fieldOffset, FieldSeqNode* fieldSeq); GenTreeIntCon* gtNewNull(); GenTreeIntCon* gtNewTrue(); GenTreeIntCon* gtNewFalse(); GenTree* gtNewPhysRegNode(regNumber reg, var_types type); GenTree* gtNewJmpTableNode(); GenTree* gtNewIndOfIconHandleNode(var_types indType, size_t value, GenTreeFlags iconFlags, bool isInvariant); GenTree* gtNewIconHandleNode(size_t value, GenTreeFlags flags, FieldSeqNode* fields = nullptr); GenTreeFlags gtTokenToIconFlags(unsigned token); GenTree* gtNewIconEmbHndNode(void* value, void* pValue, GenTreeFlags flags, void* compileTimeHandle); GenTree* gtNewIconEmbScpHndNode(CORINFO_MODULE_HANDLE scpHnd); GenTree* gtNewIconEmbClsHndNode(CORINFO_CLASS_HANDLE clsHnd); GenTree* gtNewIconEmbMethHndNode(CORINFO_METHOD_HANDLE methHnd); GenTree* gtNewIconEmbFldHndNode(CORINFO_FIELD_HANDLE fldHnd); GenTree* gtNewStringLiteralNode(InfoAccessType iat, void* pValue); GenTreeIntCon* gtNewStringLiteralLength(GenTreeStrCon* node); GenTree* gtNewLconNode(__int64 value); GenTree* gtNewDconNode(double value, var_types type = TYP_DOUBLE); GenTree* gtNewSconNode(int CPX, CORINFO_MODULE_HANDLE scpHandle); GenTree* gtNewZeroConNode(var_types type); GenTree* gtNewOneConNode(var_types type); GenTreeLclVar* gtNewStoreLclVar(unsigned dstLclNum, GenTree* src); #ifdef FEATURE_SIMD GenTree* gtNewSIMDVectorZero(var_types simdType, CorInfoType simdBaseJitType, unsigned simdSize); #endif GenTree* gtNewBlkOpNode(GenTree* dst, GenTree* srcOrFillVal, bool isVolatile, bool isCopyBlock); GenTree* gtNewPutArgReg(var_types type, GenTree* arg, regNumber argReg); GenTree* gtNewBitCastNode(var_types type, GenTree* arg); protected: void gtBlockOpInit(GenTree* result, GenTree* dst, GenTree* srcOrFillVal, bool isVolatile); public: GenTreeObj* gtNewObjNode(CORINFO_CLASS_HANDLE structHnd, GenTree* addr); void gtSetObjGcInfo(GenTreeObj* objNode); GenTree* gtNewStructVal(CORINFO_CLASS_HANDLE structHnd, GenTree* addr); GenTree* gtNewBlockVal(GenTree* addr, unsigned size); GenTree* gtNewCpObjNode(GenTree* dst, GenTree* src, CORINFO_CLASS_HANDLE structHnd, bool isVolatile); GenTreeCall::Use* gtNewCallArgs(GenTree* node); GenTreeCall::Use* gtNewCallArgs(GenTree* node1, GenTree* node2); GenTreeCall::Use* gtNewCallArgs(GenTree* node1, GenTree* node2, GenTree* node3); GenTreeCall::Use* gtNewCallArgs(GenTree* node1, GenTree* node2, GenTree* node3, GenTree* node4); GenTreeCall::Use* gtPrependNewCallArg(GenTree* node, GenTreeCall::Use* args); GenTreeCall::Use* gtInsertNewCallArgAfter(GenTree* node, GenTreeCall::Use* after); GenTreeCall* gtNewCallNode(gtCallTypes callType, CORINFO_METHOD_HANDLE handle, var_types type, GenTreeCall::Use* args, const DebugInfo& di = DebugInfo()); GenTreeCall* gtNewIndCallNode(GenTree* addr, var_types type, GenTreeCall::Use* args, const DebugInfo& di = DebugInfo()); GenTreeCall* gtNewHelperCallNode(unsigned helper, var_types type, GenTreeCall::Use* args = nullptr); GenTreeCall* gtNewRuntimeLookupHelperCallNode(CORINFO_RUNTIME_LOOKUP* pRuntimeLookup, GenTree* ctxTree, void* compileTimeHandle); GenTreeLclVar* gtNewLclvNode(unsigned lnum, var_types type DEBUGARG(IL_OFFSET offs = BAD_IL_OFFSET)); GenTreeLclVar* gtNewLclLNode(unsigned lnum, var_types type DEBUGARG(IL_OFFSET offs = BAD_IL_OFFSET)); GenTreeLclVar* gtNewLclVarAddrNode(unsigned lclNum, var_types type = TYP_I_IMPL); GenTreeLclFld* gtNewLclFldAddrNode(unsigned lclNum, unsigned lclOffs, FieldSeqNode* fieldSeq, var_types type = TYP_I_IMPL); #ifdef FEATURE_SIMD GenTreeSIMD* gtNewSIMDNode( var_types type, GenTree* op1, SIMDIntrinsicID simdIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize); GenTreeSIMD* gtNewSIMDNode(var_types type, GenTree* op1, GenTree* op2, SIMDIntrinsicID simdIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize); void SetOpLclRelatedToSIMDIntrinsic(GenTree* op); #endif #ifdef FEATURE_HW_INTRINSICS GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic = false); GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type, GenTree* op1, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic = false); GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type, GenTree* op1, GenTree* op2, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic = false); GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type, GenTree* op1, GenTree* op2, GenTree* op3, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic = false); GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type, GenTree* op1, GenTree* op2, GenTree* op3, GenTree* op4, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic = false); GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type, GenTree** operands, size_t operandCount, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic = false); GenTreeHWIntrinsic* gtNewSimdHWIntrinsicNode(var_types type, IntrinsicNodeBuilder&& nodeBuilder, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic = false); GenTreeHWIntrinsic* gtNewSimdAsHWIntrinsicNode(var_types type, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize) { bool isSimdAsHWIntrinsic = true; return gtNewSimdHWIntrinsicNode(type, hwIntrinsicID, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); } GenTreeHWIntrinsic* gtNewSimdAsHWIntrinsicNode( var_types type, GenTree* op1, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize) { bool isSimdAsHWIntrinsic = true; return gtNewSimdHWIntrinsicNode(type, op1, hwIntrinsicID, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); } GenTreeHWIntrinsic* gtNewSimdAsHWIntrinsicNode(var_types type, GenTree* op1, GenTree* op2, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize) { bool isSimdAsHWIntrinsic = true; return gtNewSimdHWIntrinsicNode(type, op1, op2, hwIntrinsicID, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); } GenTreeHWIntrinsic* gtNewSimdAsHWIntrinsicNode(var_types type, GenTree* op1, GenTree* op2, GenTree* op3, NamedIntrinsic hwIntrinsicID, CorInfoType simdBaseJitType, unsigned simdSize) { bool isSimdAsHWIntrinsic = true; return gtNewSimdHWIntrinsicNode(type, op1, op2, op3, hwIntrinsicID, simdBaseJitType, simdSize, isSimdAsHWIntrinsic); } GenTree* gtNewSimdAbsNode( var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdBinOpNode(genTreeOps op, var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdCeilNode( var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdCmpOpNode(genTreeOps op, var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdCmpOpAllNode(genTreeOps op, var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdCmpOpAnyNode(genTreeOps op, var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdCndSelNode(var_types type, GenTree* op1, GenTree* op2, GenTree* op3, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdCreateBroadcastNode( var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdDotProdNode(var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdFloorNode( var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdGetElementNode(var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdMaxNode(var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdMinNode(var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdNarrowNode(var_types type, GenTree* op1, GenTree* op2, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdSqrtNode( var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdSumNode( var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdUnOpNode(genTreeOps op, var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdWidenLowerNode( var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdWidenUpperNode( var_types type, GenTree* op1, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdWithElementNode(var_types type, GenTree* op1, GenTree* op2, GenTree* op3, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTree* gtNewSimdZeroNode(var_types type, CorInfoType simdBaseJitType, unsigned simdSize, bool isSimdAsHWIntrinsic); GenTreeHWIntrinsic* gtNewScalarHWIntrinsicNode(var_types type, NamedIntrinsic hwIntrinsicID); GenTreeHWIntrinsic* gtNewScalarHWIntrinsicNode(var_types type, GenTree* op1, NamedIntrinsic hwIntrinsicID); GenTreeHWIntrinsic* gtNewScalarHWIntrinsicNode(var_types type, GenTree* op1, GenTree* op2, NamedIntrinsic hwIntrinsicID); GenTreeHWIntrinsic* gtNewScalarHWIntrinsicNode( var_types type, GenTree* op1, GenTree* op2, GenTree* op3, NamedIntrinsic hwIntrinsicID); CORINFO_CLASS_HANDLE gtGetStructHandleForHWSIMD(var_types simdType, CorInfoType simdBaseJitType); CorInfoType getBaseJitTypeFromArgIfNeeded(NamedIntrinsic intrinsic, CORINFO_CLASS_HANDLE clsHnd, CORINFO_SIG_INFO* sig, CorInfoType simdBaseJitType); #endif // FEATURE_HW_INTRINSICS GenTree* gtNewMustThrowException(unsigned helper, var_types type, CORINFO_CLASS_HANDLE clsHnd); GenTreeLclFld* gtNewLclFldNode(unsigned lnum, var_types type, unsigned offset); GenTree* gtNewInlineCandidateReturnExpr(GenTree* inlineCandidate, var_types type, BasicBlockFlags bbFlags); GenTreeField* gtNewFieldRef(var_types type, CORINFO_FIELD_HANDLE fldHnd, GenTree* obj = nullptr, DWORD offset = 0); GenTree* gtNewIndexRef(var_types typ, GenTree* arrayOp, GenTree* indexOp); GenTreeArrLen* gtNewArrLen(var_types typ, GenTree* arrayOp, int lenOffset, BasicBlock* block); GenTreeIndir* gtNewIndir(var_types typ, GenTree* addr); GenTree* gtNewNullCheck(GenTree* addr, BasicBlock* basicBlock); var_types gtTypeForNullCheck(GenTree* tree); void gtChangeOperToNullCheck(GenTree* tree, BasicBlock* block); static fgArgTabEntry* gtArgEntryByArgNum(GenTreeCall* call, unsigned argNum); static fgArgTabEntry* gtArgEntryByNode(GenTreeCall* call, GenTree* node); fgArgTabEntry* gtArgEntryByLateArgIndex(GenTreeCall* call, unsigned lateArgInx); static GenTree* gtArgNodeByLateArgInx(GenTreeCall* call, unsigned lateArgInx); GenTreeOp* gtNewAssignNode(GenTree* dst, GenTree* src); GenTree* gtNewTempAssign(unsigned tmp, GenTree* val, Statement** pAfterStmt = nullptr, const DebugInfo& di = DebugInfo(), BasicBlock* block = nullptr); GenTree* gtNewRefCOMfield(GenTree* objPtr, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_ACCESS_FLAGS access, CORINFO_FIELD_INFO* pFieldInfo, var_types lclTyp, CORINFO_CLASS_HANDLE structType, GenTree* assg); GenTree* gtNewNothingNode(); GenTree* gtNewArgPlaceHolderNode(var_types type, CORINFO_CLASS_HANDLE clsHnd); GenTree* gtUnusedValNode(GenTree* expr); GenTree* gtNewKeepAliveNode(GenTree* op); GenTreeCast* gtNewCastNode(var_types typ, GenTree* op1, bool fromUnsigned, var_types castType); GenTreeCast* gtNewCastNodeL(var_types typ, GenTree* op1, bool fromUnsigned, var_types castType); GenTreeAllocObj* gtNewAllocObjNode( unsigned int helper, bool helperHasSideEffects, CORINFO_CLASS_HANDLE clsHnd, var_types type, GenTree* op1); GenTreeAllocObj* gtNewAllocObjNode(CORINFO_RESOLVED_TOKEN* pResolvedToken, bool useParent); GenTree* gtNewRuntimeLookup(CORINFO_GENERIC_HANDLE hnd, CorInfoGenericHandleType hndTyp, GenTree* lookupTree); GenTreeIndir* gtNewMethodTableLookup(GenTree* obj); //------------------------------------------------------------------------ // Other GenTree functions GenTree* gtClone(GenTree* tree, bool complexOK = false); // If `tree` is a lclVar with lclNum `varNum`, return an IntCns with value `varVal`; otherwise, // create a copy of `tree`, adding specified flags, replacing uses of lclVar `deepVarNum` with // IntCnses with value `deepVarVal`. GenTree* gtCloneExpr( GenTree* tree, GenTreeFlags addFlags, unsigned varNum, int varVal, unsigned deepVarNum, int deepVarVal); // Create a copy of `tree`, optionally adding specifed flags, and optionally mapping uses of local // `varNum` to int constants with value `varVal`. GenTree* gtCloneExpr(GenTree* tree, GenTreeFlags addFlags = GTF_EMPTY, unsigned varNum = BAD_VAR_NUM, int varVal = 0) { return gtCloneExpr(tree, addFlags, varNum, varVal, varNum, varVal); } Statement* gtCloneStmt(Statement* stmt) { GenTree* exprClone = gtCloneExpr(stmt->GetRootNode()); return gtNewStmt(exprClone, stmt->GetDebugInfo()); } // Internal helper for cloning a call GenTreeCall* gtCloneExprCallHelper(GenTreeCall* call, GenTreeFlags addFlags = GTF_EMPTY, unsigned deepVarNum = BAD_VAR_NUM, int deepVarVal = 0); // Create copy of an inline or guarded devirtualization candidate tree. GenTreeCall* gtCloneCandidateCall(GenTreeCall* call); void gtUpdateSideEffects(Statement* stmt, GenTree* tree); void gtUpdateTreeAncestorsSideEffects(GenTree* tree); void gtUpdateStmtSideEffects(Statement* stmt); void gtUpdateNodeSideEffects(GenTree* tree); void gtUpdateNodeOperSideEffects(GenTree* tree); void gtUpdateNodeOperSideEffectsPost(GenTree* tree); // Returns "true" iff the complexity (not formally defined, but first interpretation // is #of nodes in subtree) of "tree" is greater than "limit". // (This is somewhat redundant with the "GetCostEx()/GetCostSz()" fields, but can be used // before they have been set.) bool gtComplexityExceeds(GenTree** tree, unsigned limit); GenTree* gtReverseCond(GenTree* tree); static bool gtHasRef(GenTree* tree, ssize_t lclNum); bool gtHasLocalsWithAddrOp(GenTree* tree); unsigned gtSetCallArgsOrder(const GenTreeCall::UseList& args, bool lateArgs, int* callCostEx, int* callCostSz); unsigned gtSetMultiOpOrder(GenTreeMultiOp* multiOp); void gtWalkOp(GenTree** op1, GenTree** op2, GenTree* base, bool constOnly); #ifdef DEBUG unsigned gtHashValue(GenTree* tree); GenTree* gtWalkOpEffectiveVal(GenTree* op); #endif void gtPrepareCost(GenTree* tree); bool gtIsLikelyRegVar(GenTree* tree); // Returns true iff the secondNode can be swapped with firstNode. bool gtCanSwapOrder(GenTree* firstNode, GenTree* secondNode); // Given an address expression, compute its costs and addressing mode opportunities, // and mark addressing mode candidates as GTF_DONT_CSE. // TODO-Throughput - Consider actually instantiating these early, to avoid // having to re-run the algorithm that looks for them (might also improve CQ). bool gtMarkAddrMode(GenTree* addr, int* costEx, int* costSz, var_types type); unsigned gtSetEvalOrder(GenTree* tree); void gtSetStmtInfo(Statement* stmt); // Returns "true" iff "node" has any of the side effects in "flags". bool gtNodeHasSideEffects(GenTree* node, GenTreeFlags flags); // Returns "true" iff "tree" or its (transitive) children have any of the side effects in "flags". bool gtTreeHasSideEffects(GenTree* tree, GenTreeFlags flags); // Appends 'expr' in front of 'list' // 'list' will typically start off as 'nullptr' // when 'list' is non-null a GT_COMMA node is used to insert 'expr' GenTree* gtBuildCommaList(GenTree* list, GenTree* expr); void gtExtractSideEffList(GenTree* expr, GenTree** pList, GenTreeFlags GenTreeFlags = GTF_SIDE_EFFECT, bool ignoreRoot = false); GenTree* gtGetThisArg(GenTreeCall* call); // Static fields of struct types (and sometimes the types that those are reduced to) are represented by having the // static field contain an object pointer to the boxed struct. This simplifies the GC implementation...but // complicates the JIT somewhat. This predicate returns "true" iff a node with type "fieldNodeType", representing // the given "fldHnd", is such an object pointer. bool gtIsStaticFieldPtrToBoxedStruct(var_types fieldNodeType, CORINFO_FIELD_HANDLE fldHnd); // Return true if call is a recursive call; return false otherwise. // Note when inlining, this looks for calls back to the root method. bool gtIsRecursiveCall(GenTreeCall* call) { return gtIsRecursiveCall(call->gtCallMethHnd); } bool gtIsRecursiveCall(CORINFO_METHOD_HANDLE callMethodHandle) { return (callMethodHandle == impInlineRoot()->info.compMethodHnd); } //------------------------------------------------------------------------- GenTree* gtFoldExpr(GenTree* tree); GenTree* gtFoldExprConst(GenTree* tree); GenTree* gtFoldExprSpecial(GenTree* tree); GenTree* gtFoldBoxNullable(GenTree* tree); GenTree* gtFoldExprCompare(GenTree* tree); GenTree* gtCreateHandleCompare(genTreeOps oper, GenTree* op1, GenTree* op2, CorInfoInlineTypeCheck typeCheckInliningResult); GenTree* gtFoldExprCall(GenTreeCall* call); GenTree* gtFoldTypeCompare(GenTree* tree); GenTree* gtFoldTypeEqualityCall(bool isEq, GenTree* op1, GenTree* op2); // Options to control behavior of gtTryRemoveBoxUpstreamEffects enum BoxRemovalOptions { BR_REMOVE_AND_NARROW, // remove effects, minimize remaining work, return possibly narrowed source tree BR_REMOVE_AND_NARROW_WANT_TYPE_HANDLE, // remove effects and minimize remaining work, return type handle tree BR_REMOVE_BUT_NOT_NARROW, // remove effects, return original source tree BR_DONT_REMOVE, // check if removal is possible, return copy source tree BR_DONT_REMOVE_WANT_TYPE_HANDLE, // check if removal is possible, return type handle tree BR_MAKE_LOCAL_COPY // revise box to copy to temp local and return local's address }; GenTree* gtTryRemoveBoxUpstreamEffects(GenTree* tree, BoxRemovalOptions options = BR_REMOVE_AND_NARROW); GenTree* gtOptimizeEnumHasFlag(GenTree* thisOp, GenTree* flagOp); //------------------------------------------------------------------------- // Get the handle, if any. CORINFO_CLASS_HANDLE gtGetStructHandleIfPresent(GenTree* tree); // Get the handle, and assert if not found. CORINFO_CLASS_HANDLE gtGetStructHandle(GenTree* tree); // Get the handle for a ref type. CORINFO_CLASS_HANDLE gtGetClassHandle(GenTree* tree, bool* pIsExact, bool* pIsNonNull); // Get the class handle for an helper call CORINFO_CLASS_HANDLE gtGetHelperCallClassHandle(GenTreeCall* call, bool* pIsExact, bool* pIsNonNull); // Get the element handle for an array of ref type. CORINFO_CLASS_HANDLE gtGetArrayElementClassHandle(GenTree* array); // Get a class handle from a helper call argument CORINFO_CLASS_HANDLE gtGetHelperArgClassHandle(GenTree* array); // Get the class handle for a field CORINFO_CLASS_HANDLE gtGetFieldClassHandle(CORINFO_FIELD_HANDLE fieldHnd, bool* pIsExact, bool* pIsNonNull); // Check if this tree is a gc static base helper call bool gtIsStaticGCBaseHelperCall(GenTree* tree); //------------------------------------------------------------------------- // Functions to display the trees #ifdef DEBUG void gtDispNode(GenTree* tree, IndentStack* indentStack, _In_z_ const char* msg, bool isLIR); void gtDispConst(GenTree* tree); void gtDispLeaf(GenTree* tree, IndentStack* indentStack); void gtDispNodeName(GenTree* tree); #if FEATURE_MULTIREG_RET unsigned gtDispMultiRegCount(GenTree* tree); #endif void gtDispRegVal(GenTree* tree); void gtDispZeroFieldSeq(GenTree* tree); void gtDispVN(GenTree* tree); void gtDispCommonEndLine(GenTree* tree); enum IndentInfo { IINone, IIArc, IIArcTop, IIArcBottom, IIEmbedded, IIError, IndentInfoCount }; void gtDispChild(GenTree* child, IndentStack* indentStack, IndentInfo arcType, _In_opt_ const char* msg = nullptr, bool topOnly = false); void gtDispTree(GenTree* tree, IndentStack* indentStack = nullptr, _In_opt_ const char* msg = nullptr, bool topOnly = false, bool isLIR = false); void gtGetLclVarNameInfo(unsigned lclNum, const char** ilKindOut, const char** ilNameOut, unsigned* ilNumOut); int gtGetLclVarName(unsigned lclNum, char* buf, unsigned buf_remaining); char* gtGetLclVarName(unsigned lclNum); void gtDispLclVar(unsigned lclNum, bool padForBiggestDisp = true); void gtDispLclVarStructType(unsigned lclNum); void gtDispClassLayout(ClassLayout* layout, var_types type); void gtDispILLocation(const ILLocation& loc); void gtDispStmt(Statement* stmt, const char* msg = nullptr); void gtDispBlockStmts(BasicBlock* block); void gtGetArgMsg(GenTreeCall* call, GenTree* arg, unsigned argNum, char* bufp, unsigned bufLength); void gtGetLateArgMsg(GenTreeCall* call, GenTree* arg, int argNum, char* bufp, unsigned bufLength); void gtDispArgList(GenTreeCall* call, GenTree* lastCallOperand, IndentStack* indentStack); void gtDispAnyFieldSeq(FieldSeqNode* fieldSeq); void gtDispFieldSeq(FieldSeqNode* pfsn); void gtDispRange(LIR::ReadOnlyRange const& range); void gtDispTreeRange(LIR::Range& containingRange, GenTree* tree); void gtDispLIRNode(GenTree* node, const char* prefixMsg = nullptr); #endif // For tree walks enum fgWalkResult { WALK_CONTINUE, WALK_SKIP_SUBTREES, WALK_ABORT }; struct fgWalkData; typedef fgWalkResult(fgWalkPreFn)(GenTree** pTree, fgWalkData* data); typedef fgWalkResult(fgWalkPostFn)(GenTree** pTree, fgWalkData* data); static fgWalkPreFn gtMarkColonCond; static fgWalkPreFn gtClearColonCond; struct FindLinkData { GenTree* nodeToFind; GenTree** result; GenTree* parent; }; FindLinkData gtFindLink(Statement* stmt, GenTree* node); bool gtHasCatchArg(GenTree* tree); typedef ArrayStack<GenTree*> GenTreeStack; static bool gtHasCallOnStack(GenTreeStack* parentStack); //========================================================================= // BasicBlock functions #ifdef DEBUG // This is a debug flag we will use to assert when creating block during codegen // as this interferes with procedure splitting. If you know what you're doing, set // it to true before creating the block. (DEBUG only) bool fgSafeBasicBlockCreation; #endif BasicBlock* bbNewBasicBlock(BBjumpKinds jumpKind); void placeLoopAlignInstructions(); /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX LclVarsInfo XX XX XX XX The variables to be used by the code generator. XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ // // For both PROMOTION_TYPE_NONE and PROMOTION_TYPE_DEPENDENT the struct will // be placed in the stack frame and it's fields must be laid out sequentially. // // For PROMOTION_TYPE_INDEPENDENT each of the struct's fields is replaced by // a local variable that can be enregistered or placed in the stack frame. // The fields do not need to be laid out sequentially // enum lvaPromotionType { PROMOTION_TYPE_NONE, // The struct local is not promoted PROMOTION_TYPE_INDEPENDENT, // The struct local is promoted, // and its field locals are independent of its parent struct local. PROMOTION_TYPE_DEPENDENT // The struct local is promoted, // but its field locals depend on its parent struct local. }; /*****************************************************************************/ enum FrameLayoutState { NO_FRAME_LAYOUT, INITIAL_FRAME_LAYOUT, PRE_REGALLOC_FRAME_LAYOUT, REGALLOC_FRAME_LAYOUT, TENTATIVE_FRAME_LAYOUT, FINAL_FRAME_LAYOUT }; public: RefCountState lvaRefCountState; // Current local ref count state bool lvaLocalVarRefCounted() const { return lvaRefCountState == RCS_NORMAL; } bool lvaTrackedFixed; // true: We cannot add new 'tracked' variable unsigned lvaCount; // total number of locals, which includes function arguments, // special arguments, IL local variables, and JIT temporary variables LclVarDsc* lvaTable; // variable descriptor table unsigned lvaTableCnt; // lvaTable size (>= lvaCount) unsigned lvaTrackedCount; // actual # of locals being tracked unsigned lvaTrackedCountInSizeTUnits; // min # of size_t's sufficient to hold a bit for all the locals being tracked #ifdef DEBUG VARSET_TP lvaTrackedVars; // set of tracked variables #endif #ifndef TARGET_64BIT VARSET_TP lvaLongVars; // set of long (64-bit) variables #endif VARSET_TP lvaFloatVars; // set of floating-point (32-bit and 64-bit) variables unsigned lvaCurEpoch; // VarSets are relative to a specific set of tracked var indices. // It that changes, this changes. VarSets from different epochs // cannot be meaningfully combined. unsigned GetCurLVEpoch() { return lvaCurEpoch; } // reverse map of tracked number to var number unsigned lvaTrackedToVarNumSize; unsigned* lvaTrackedToVarNum; #if DOUBLE_ALIGN #ifdef DEBUG // # of procs compiled a with double-aligned stack static unsigned s_lvaDoubleAlignedProcsCount; #endif #endif // Getters and setters for address-exposed and do-not-enregister local var properties. bool lvaVarAddrExposed(unsigned varNum) const; void lvaSetVarAddrExposed(unsigned varNum DEBUGARG(AddressExposedReason reason)); void lvaSetVarLiveInOutOfHandler(unsigned varNum); bool lvaVarDoNotEnregister(unsigned varNum); void lvSetMinOptsDoNotEnreg(); bool lvaEnregEHVars; bool lvaEnregMultiRegVars; void lvaSetVarDoNotEnregister(unsigned varNum DEBUGARG(DoNotEnregisterReason reason)); unsigned lvaVarargsHandleArg; #ifdef TARGET_X86 unsigned lvaVarargsBaseOfStkArgs; // Pointer (computed based on incoming varargs handle) to the start of the stack // arguments #endif // TARGET_X86 unsigned lvaInlinedPInvokeFrameVar; // variable representing the InlinedCallFrame unsigned lvaReversePInvokeFrameVar; // variable representing the reverse PInvoke frame #if FEATURE_FIXED_OUT_ARGS unsigned lvaPInvokeFrameRegSaveVar; // variable representing the RegSave for PInvoke inlining. #endif unsigned lvaMonAcquired; // boolean variable introduced into in synchronized methods // that tracks whether the lock has been taken unsigned lvaArg0Var; // The lclNum of arg0. Normally this will be info.compThisArg. // However, if there is a "ldarga 0" or "starg 0" in the IL, // we will redirect all "ldarg(a) 0" and "starg 0" to this temp. unsigned lvaInlineeReturnSpillTemp; // The temp to spill the non-VOID return expression // in case there are multiple BBJ_RETURN blocks in the inlinee // or if the inlinee has GC ref locals. #if FEATURE_FIXED_OUT_ARGS unsigned lvaOutgoingArgSpaceVar; // dummy TYP_LCLBLK var for fixed outgoing argument space PhasedVar<unsigned> lvaOutgoingArgSpaceSize; // size of fixed outgoing argument space #endif // FEATURE_FIXED_OUT_ARGS static unsigned GetOutgoingArgByteSize(unsigned sizeWithoutPadding) { return roundUp(sizeWithoutPadding, TARGET_POINTER_SIZE); } // Variable representing the return address. The helper-based tailcall // mechanism passes the address of the return address to a runtime helper // where it is used to detect tail-call chains. unsigned lvaRetAddrVar; #if defined(DEBUG) && defined(TARGET_XARCH) unsigned lvaReturnSpCheck; // Stores SP to confirm it is not corrupted on return. #endif // defined(DEBUG) && defined(TARGET_XARCH) #if defined(DEBUG) && defined(TARGET_X86) unsigned lvaCallSpCheck; // Stores SP to confirm it is not corrupted after every call. #endif // defined(DEBUG) && defined(TARGET_X86) bool lvaGenericsContextInUse; bool lvaKeepAliveAndReportThis(); // Synchronized instance method of a reference type, or // CORINFO_GENERICS_CTXT_FROM_THIS? bool lvaReportParamTypeArg(); // Exceptions and CORINFO_GENERICS_CTXT_FROM_PARAMTYPEARG? //------------------------------------------------------------------------- // All these frame offsets are inter-related and must be kept in sync #if !defined(FEATURE_EH_FUNCLETS) // This is used for the callable handlers unsigned lvaShadowSPslotsVar; // TYP_BLK variable for all the shadow SP slots #endif // FEATURE_EH_FUNCLETS int lvaCachedGenericContextArgOffs; int lvaCachedGenericContextArgOffset(); // For CORINFO_CALLCONV_PARAMTYPE and if generic context is passed as // THIS pointer #ifdef JIT32_GCENCODER unsigned lvaLocAllocSPvar; // variable which stores the value of ESP after the the last alloca/localloc #endif // JIT32_GCENCODER unsigned lvaNewObjArrayArgs; // variable with arguments for new MD array helper // TODO-Review: Prior to reg predict we reserve 24 bytes for Spill temps. // after the reg predict we will use a computed maxTmpSize // which is based upon the number of spill temps predicted by reg predict // All this is necessary because if we under-estimate the size of the spill // temps we could fail when encoding instructions that reference stack offsets for ARM. // // Pre codegen max spill temp size. static const unsigned MAX_SPILL_TEMP_SIZE = 24; //------------------------------------------------------------------------- unsigned lvaGetMaxSpillTempSize(); #ifdef TARGET_ARM bool lvaIsPreSpilled(unsigned lclNum, regMaskTP preSpillMask); #endif // TARGET_ARM void lvaAssignFrameOffsets(FrameLayoutState curState); void lvaFixVirtualFrameOffsets(); void lvaUpdateArgWithInitialReg(LclVarDsc* varDsc); void lvaUpdateArgsWithInitialReg(); void lvaAssignVirtualFrameOffsetsToArgs(); #ifdef UNIX_AMD64_ABI int lvaAssignVirtualFrameOffsetToArg(unsigned lclNum, unsigned argSize, int argOffs, int* callerArgOffset); #else // !UNIX_AMD64_ABI int lvaAssignVirtualFrameOffsetToArg(unsigned lclNum, unsigned argSize, int argOffs); #endif // !UNIX_AMD64_ABI void lvaAssignVirtualFrameOffsetsToLocals(); int lvaAllocLocalAndSetVirtualOffset(unsigned lclNum, unsigned size, int stkOffs); #ifdef TARGET_AMD64 // Returns true if compCalleeRegsPushed (including RBP if used as frame pointer) is even. bool lvaIsCalleeSavedIntRegCountEven(); #endif void lvaAlignFrame(); void lvaAssignFrameOffsetsToPromotedStructs(); int lvaAllocateTemps(int stkOffs, bool mustDoubleAlign); #ifdef DEBUG void lvaDumpRegLocation(unsigned lclNum); void lvaDumpFrameLocation(unsigned lclNum); void lvaDumpEntry(unsigned lclNum, FrameLayoutState curState, size_t refCntWtdWidth = 6); void lvaTableDump(FrameLayoutState curState = NO_FRAME_LAYOUT); // NO_FRAME_LAYOUT means use the current frame // layout state defined by lvaDoneFrameLayout #endif // Limit frames size to 1GB. The maximum is 2GB in theory - make it intentionally smaller // to avoid bugs from borderline cases. #define MAX_FrameSize 0x3FFFFFFF void lvaIncrementFrameSize(unsigned size); unsigned lvaFrameSize(FrameLayoutState curState); // Returns the caller-SP-relative offset for the SP/FP relative offset determined by FP based. int lvaToCallerSPRelativeOffset(int offs, bool isFpBased, bool forRootFrame = true) const; // Returns the caller-SP-relative offset for the local variable "varNum." int lvaGetCallerSPRelativeOffset(unsigned varNum); // Returns the SP-relative offset for the local variable "varNum". Illegal to ask this for functions with localloc. int lvaGetSPRelativeOffset(unsigned varNum); int lvaToInitialSPRelativeOffset(unsigned offset, bool isFpBased); int lvaGetInitialSPRelativeOffset(unsigned varNum); // True if this is an OSR compilation and this local is potentially // located on the original method stack frame. bool lvaIsOSRLocal(unsigned varNum); //------------------------ For splitting types ---------------------------- void lvaInitTypeRef(); void lvaInitArgs(InitVarDscInfo* varDscInfo); void lvaInitThisPtr(InitVarDscInfo* varDscInfo); void lvaInitRetBuffArg(InitVarDscInfo* varDscInfo, bool useFixedRetBufReg); void lvaInitUserArgs(InitVarDscInfo* varDscInfo, unsigned skipArgs, unsigned takeArgs); void lvaInitGenericsCtxt(InitVarDscInfo* varDscInfo); void lvaInitVarArgsHandle(InitVarDscInfo* varDscInfo); void lvaInitVarDsc(LclVarDsc* varDsc, unsigned varNum, CorInfoType corInfoType, CORINFO_CLASS_HANDLE typeHnd, CORINFO_ARG_LIST_HANDLE varList, CORINFO_SIG_INFO* varSig); static unsigned lvaTypeRefMask(var_types type); var_types lvaGetActualType(unsigned lclNum); var_types lvaGetRealType(unsigned lclNum); //------------------------------------------------------------------------- void lvaInit(); LclVarDsc* lvaGetDesc(unsigned lclNum) { assert(lclNum < lvaCount); return &lvaTable[lclNum]; } LclVarDsc* lvaGetDesc(unsigned lclNum) const { assert(lclNum < lvaCount); return &lvaTable[lclNum]; } LclVarDsc* lvaGetDesc(const GenTreeLclVarCommon* lclVar) { return lvaGetDesc(lclVar->GetLclNum()); } unsigned lvaTrackedIndexToLclNum(unsigned trackedIndex) { assert(trackedIndex < lvaTrackedCount); unsigned lclNum = lvaTrackedToVarNum[trackedIndex]; assert(lclNum < lvaCount); return lclNum; } LclVarDsc* lvaGetDescByTrackedIndex(unsigned trackedIndex) { return lvaGetDesc(lvaTrackedIndexToLclNum(trackedIndex)); } unsigned lvaGetLclNum(const LclVarDsc* varDsc) { assert((lvaTable <= varDsc) && (varDsc < lvaTable + lvaCount)); // varDsc must point within the table assert(((char*)varDsc - (char*)lvaTable) % sizeof(LclVarDsc) == 0); // varDsc better not point in the middle of a variable unsigned varNum = (unsigned)(varDsc - lvaTable); assert(varDsc == &lvaTable[varNum]); return varNum; } unsigned lvaLclSize(unsigned varNum); unsigned lvaLclExactSize(unsigned varNum); bool lvaHaveManyLocals() const; unsigned lvaGrabTemp(bool shortLifetime DEBUGARG(const char* reason)); unsigned lvaGrabTemps(unsigned cnt DEBUGARG(const char* reason)); unsigned lvaGrabTempWithImplicitUse(bool shortLifetime DEBUGARG(const char* reason)); void lvaSortByRefCount(); void lvaMarkLocalVars(); // Local variable ref-counting void lvaComputeRefCounts(bool isRecompute, bool setSlotNumbers); void lvaMarkLocalVars(BasicBlock* block, bool isRecompute); void lvaAllocOutgoingArgSpaceVar(); // Set up lvaOutgoingArgSpaceVar VARSET_VALRET_TP lvaStmtLclMask(Statement* stmt); #ifdef DEBUG struct lvaStressLclFldArgs { Compiler* m_pCompiler; bool m_bFirstPass; }; static fgWalkPreFn lvaStressLclFldCB; void lvaStressLclFld(); void lvaDispVarSet(VARSET_VALARG_TP set, VARSET_VALARG_TP allVars); void lvaDispVarSet(VARSET_VALARG_TP set); #endif #ifdef TARGET_ARM int lvaFrameAddress(int varNum, bool mustBeFPBased, regNumber* pBaseReg, int addrModeOffset, bool isFloatUsage); #else int lvaFrameAddress(int varNum, bool* pFPbased); #endif bool lvaIsParameter(unsigned varNum); bool lvaIsRegArgument(unsigned varNum); bool lvaIsOriginalThisArg(unsigned varNum); // Is this varNum the original this argument? bool lvaIsOriginalThisReadOnly(); // return true if there is no place in the code // that writes to arg0 // For x64 this is 3, 5, 6, 7, >8 byte structs that are passed by reference. // For ARM64, this is structs larger than 16 bytes that are passed by reference. bool lvaIsImplicitByRefLocal(unsigned varNum) { #if defined(TARGET_AMD64) || defined(TARGET_ARM64) LclVarDsc* varDsc = lvaGetDesc(varNum); if (varDsc->lvIsImplicitByRef) { assert(varDsc->lvIsParam); assert(varTypeIsStruct(varDsc) || (varDsc->lvType == TYP_BYREF)); return true; } #endif // defined(TARGET_AMD64) || defined(TARGET_ARM64) return false; } // Returns true if this local var is a multireg struct bool lvaIsMultiregStruct(LclVarDsc* varDsc, bool isVararg); // If the local is a TYP_STRUCT, get/set a class handle describing it CORINFO_CLASS_HANDLE lvaGetStruct(unsigned varNum); void lvaSetStruct(unsigned varNum, CORINFO_CLASS_HANDLE typeHnd, bool unsafeValueClsCheck, bool setTypeInfo = true); void lvaSetStructUsedAsVarArg(unsigned varNum); // If the local is TYP_REF, set or update the associated class information. void lvaSetClass(unsigned varNum, CORINFO_CLASS_HANDLE clsHnd, bool isExact = false); void lvaSetClass(unsigned varNum, GenTree* tree, CORINFO_CLASS_HANDLE stackHandle = nullptr); void lvaUpdateClass(unsigned varNum, CORINFO_CLASS_HANDLE clsHnd, bool isExact = false); void lvaUpdateClass(unsigned varNum, GenTree* tree, CORINFO_CLASS_HANDLE stackHandle = nullptr); #define MAX_NumOfFieldsInPromotableStruct 4 // Maximum number of fields in promotable struct // Info about struct type fields. struct lvaStructFieldInfo { CORINFO_FIELD_HANDLE fldHnd; unsigned char fldOffset; unsigned char fldOrdinal; var_types fldType; unsigned fldSize; CORINFO_CLASS_HANDLE fldTypeHnd; lvaStructFieldInfo() : fldHnd(nullptr), fldOffset(0), fldOrdinal(0), fldType(TYP_UNDEF), fldSize(0), fldTypeHnd(nullptr) { } }; // Info about a struct type, instances of which may be candidates for promotion. struct lvaStructPromotionInfo { CORINFO_CLASS_HANDLE typeHnd; bool canPromote; bool containsHoles; bool customLayout; bool fieldsSorted; unsigned char fieldCnt; lvaStructFieldInfo fields[MAX_NumOfFieldsInPromotableStruct]; lvaStructPromotionInfo(CORINFO_CLASS_HANDLE typeHnd = nullptr) : typeHnd(typeHnd) , canPromote(false) , containsHoles(false) , customLayout(false) , fieldsSorted(false) , fieldCnt(0) { } }; struct lvaFieldOffsetCmp { bool operator()(const lvaStructFieldInfo& field1, const lvaStructFieldInfo& field2); }; // This class is responsible for checking validity and profitability of struct promotion. // If it is both legal and profitable, then TryPromoteStructVar promotes the struct and initializes // nessesary information for fgMorphStructField to use. class StructPromotionHelper { public: StructPromotionHelper(Compiler* compiler); bool CanPromoteStructType(CORINFO_CLASS_HANDLE typeHnd); bool TryPromoteStructVar(unsigned lclNum); void Clear() { structPromotionInfo.typeHnd = NO_CLASS_HANDLE; } #ifdef DEBUG void CheckRetypedAsScalar(CORINFO_FIELD_HANDLE fieldHnd, var_types requestedType); #endif // DEBUG private: bool CanPromoteStructVar(unsigned lclNum); bool ShouldPromoteStructVar(unsigned lclNum); void PromoteStructVar(unsigned lclNum); void SortStructFields(); bool CanConstructAndPromoteField(lvaStructPromotionInfo* structPromotionInfo); lvaStructFieldInfo GetFieldInfo(CORINFO_FIELD_HANDLE fieldHnd, BYTE ordinal); bool TryPromoteStructField(lvaStructFieldInfo& outerFieldInfo); private: Compiler* compiler; lvaStructPromotionInfo structPromotionInfo; #ifdef DEBUG typedef JitHashTable<CORINFO_FIELD_HANDLE, JitPtrKeyFuncs<CORINFO_FIELD_STRUCT_>, var_types> RetypedAsScalarFieldsMap; RetypedAsScalarFieldsMap retypedFieldsMap; #endif // DEBUG }; StructPromotionHelper* structPromotionHelper; unsigned lvaGetFieldLocal(const LclVarDsc* varDsc, unsigned int fldOffset); lvaPromotionType lvaGetPromotionType(const LclVarDsc* varDsc); lvaPromotionType lvaGetPromotionType(unsigned varNum); lvaPromotionType lvaGetParentPromotionType(const LclVarDsc* varDsc); lvaPromotionType lvaGetParentPromotionType(unsigned varNum); bool lvaIsFieldOfDependentlyPromotedStruct(const LclVarDsc* varDsc); bool lvaIsGCTracked(const LclVarDsc* varDsc); #if defined(FEATURE_SIMD) bool lvaMapSimd12ToSimd16(const LclVarDsc* varDsc) { assert(varDsc->lvType == TYP_SIMD12); assert(varDsc->lvExactSize == 12); #if defined(TARGET_64BIT) assert(compMacOsArm64Abi() || varDsc->lvSize() == 16); #endif // defined(TARGET_64BIT) // We make local variable SIMD12 types 16 bytes instead of just 12. // lvSize() will return 16 bytes for SIMD12, even for fields. // However, we can't do that mapping if the var is a dependently promoted struct field. // Such a field must remain its exact size within its parent struct unless it is a single // field *and* it is the only field in a struct of 16 bytes. if (varDsc->lvSize() != 16) { return false; } if (lvaIsFieldOfDependentlyPromotedStruct(varDsc)) { LclVarDsc* parentVarDsc = lvaGetDesc(varDsc->lvParentLcl); return (parentVarDsc->lvFieldCnt == 1) && (parentVarDsc->lvSize() == 16); } return true; } #endif // defined(FEATURE_SIMD) unsigned lvaGSSecurityCookie; // LclVar number bool lvaTempsHaveLargerOffsetThanVars(); // Returns "true" iff local variable "lclNum" is in SSA form. bool lvaInSsa(unsigned lclNum) { assert(lclNum < lvaCount); return lvaTable[lclNum].lvInSsa; } unsigned lvaStubArgumentVar; // variable representing the secret stub argument coming in EAX #if defined(FEATURE_EH_FUNCLETS) unsigned lvaPSPSym; // variable representing the PSPSym #endif InlineInfo* impInlineInfo; // Only present for inlinees InlineStrategy* m_inlineStrategy; InlineContext* compInlineContext; // Always present // The Compiler* that is the root of the inlining tree of which "this" is a member. Compiler* impInlineRoot(); #if defined(DEBUG) || defined(INLINE_DATA) unsigned __int64 getInlineCycleCount() { return m_compCycles; } #endif // defined(DEBUG) || defined(INLINE_DATA) bool fgNoStructPromotion; // Set to TRUE to turn off struct promotion for this method. bool fgNoStructParamPromotion; // Set to TRUE to turn off struct promotion for parameters this method. //========================================================================= // PROTECTED //========================================================================= protected: //---------------- Local variable ref-counting ---------------------------- void lvaMarkLclRefs(GenTree* tree, BasicBlock* block, Statement* stmt, bool isRecompute); bool IsDominatedByExceptionalEntry(BasicBlock* block); void SetVolatileHint(LclVarDsc* varDsc); // Keeps the mapping from SSA #'s to VN's for the implicit memory variables. SsaDefArray<SsaMemDef> lvMemoryPerSsaData; public: // Returns the address of the per-Ssa data for memory at the given ssaNum (which is required // not to be the SsaConfig::RESERVED_SSA_NUM, which indicates that the variable is // not an SSA variable). SsaMemDef* GetMemoryPerSsaData(unsigned ssaNum) { return lvMemoryPerSsaData.GetSsaDef(ssaNum); } /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Importer XX XX XX XX Imports the given method and converts it to semantic trees XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ private: // For prefixFlags enum { PREFIX_TAILCALL_EXPLICIT = 0x00000001, // call has "tail" IL prefix PREFIX_TAILCALL_IMPLICIT = 0x00000010, // call is treated as having "tail" prefix even though there is no "tail" IL prefix PREFIX_TAILCALL_STRESS = 0x00000100, // call doesn't "tail" IL prefix but is treated as explicit because of tail call stress PREFIX_TAILCALL = (PREFIX_TAILCALL_EXPLICIT | PREFIX_TAILCALL_IMPLICIT | PREFIX_TAILCALL_STRESS), PREFIX_VOLATILE = 0x00001000, PREFIX_UNALIGNED = 0x00010000, PREFIX_CONSTRAINED = 0x00100000, PREFIX_READONLY = 0x01000000 }; static void impValidateMemoryAccessOpcode(const BYTE* codeAddr, const BYTE* codeEndp, bool volatilePrefix); static OPCODE impGetNonPrefixOpcode(const BYTE* codeAddr, const BYTE* codeEndp); static bool impOpcodeIsCallOpcode(OPCODE opcode); public: void impInit(); void impImport(); CORINFO_CLASS_HANDLE impGetRefAnyClass(); CORINFO_CLASS_HANDLE impGetRuntimeArgumentHandle(); CORINFO_CLASS_HANDLE impGetTypeHandleClass(); CORINFO_CLASS_HANDLE impGetStringClass(); CORINFO_CLASS_HANDLE impGetObjectClass(); // Returns underlying type of handles returned by ldtoken instruction var_types GetRuntimeHandleUnderlyingType() { // RuntimeTypeHandle is backed by raw pointer on CoreRT and by object reference on other runtimes return IsTargetAbi(CORINFO_CORERT_ABI) ? TYP_I_IMPL : TYP_REF; } void impDevirtualizeCall(GenTreeCall* call, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_METHOD_HANDLE* method, unsigned* methodFlags, CORINFO_CONTEXT_HANDLE* contextHandle, CORINFO_CONTEXT_HANDLE* exactContextHandle, bool isLateDevirtualization, bool isExplicitTailCall, IL_OFFSET ilOffset = BAD_IL_OFFSET); //========================================================================= // PROTECTED //========================================================================= protected: //-------------------- Stack manipulation --------------------------------- unsigned impStkSize; // Size of the full stack #define SMALL_STACK_SIZE 16 // number of elements in impSmallStack struct SavedStack // used to save/restore stack contents. { unsigned ssDepth; // number of values on stack StackEntry* ssTrees; // saved tree values }; bool impIsPrimitive(CorInfoType type); bool impILConsumesAddr(const BYTE* codeAddr); void impResolveToken(const BYTE* addr, CORINFO_RESOLVED_TOKEN* pResolvedToken, CorInfoTokenKind kind); void impPushOnStack(GenTree* tree, typeInfo ti); void impPushNullObjRefOnStack(); StackEntry impPopStack(); StackEntry& impStackTop(unsigned n = 0); unsigned impStackHeight(); void impSaveStackState(SavedStack* savePtr, bool copy); void impRestoreStackState(SavedStack* savePtr); GenTree* impImportLdvirtftn(GenTree* thisPtr, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo); int impBoxPatternMatch(CORINFO_RESOLVED_TOKEN* pResolvedToken, const BYTE* codeAddr, const BYTE* codeEndp, bool makeInlineObservation = false); void impImportAndPushBox(CORINFO_RESOLVED_TOKEN* pResolvedToken); void impImportNewObjArray(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo); bool impCanPInvokeInline(); bool impCanPInvokeInlineCallSite(BasicBlock* block); void impCheckForPInvokeCall( GenTreeCall* call, CORINFO_METHOD_HANDLE methHnd, CORINFO_SIG_INFO* sig, unsigned mflags, BasicBlock* block); GenTreeCall* impImportIndirectCall(CORINFO_SIG_INFO* sig, const DebugInfo& di = DebugInfo()); void impPopArgsForUnmanagedCall(GenTree* call, CORINFO_SIG_INFO* sig); void impInsertHelperCall(CORINFO_HELPER_DESC* helperCall); void impHandleAccessAllowed(CorInfoIsAccessAllowedResult result, CORINFO_HELPER_DESC* helperCall); void impHandleAccessAllowedInternal(CorInfoIsAccessAllowedResult result, CORINFO_HELPER_DESC* helperCall); var_types impImportCall(OPCODE opcode, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, // Is this a "constrained." call on a // type parameter? GenTree* newobjThis, int prefixFlags, CORINFO_CALL_INFO* callInfo, IL_OFFSET rawILOffset); CORINFO_CLASS_HANDLE impGetSpecialIntrinsicExactReturnType(CORINFO_METHOD_HANDLE specialIntrinsicHandle); bool impMethodInfo_hasRetBuffArg(CORINFO_METHOD_INFO* methInfo, CorInfoCallConvExtension callConv); GenTree* impFixupCallStructReturn(GenTreeCall* call, CORINFO_CLASS_HANDLE retClsHnd); GenTree* impFixupStructReturnType(GenTree* op, CORINFO_CLASS_HANDLE retClsHnd, CorInfoCallConvExtension unmgdCallConv); #ifdef DEBUG var_types impImportJitTestLabelMark(int numArgs); #endif // DEBUG GenTree* impInitClass(CORINFO_RESOLVED_TOKEN* pResolvedToken); GenTree* impImportStaticReadOnlyField(void* fldAddr, var_types lclTyp); GenTree* impImportStaticFieldAccess(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_ACCESS_FLAGS access, CORINFO_FIELD_INFO* pFieldInfo, var_types lclTyp); static void impBashVarAddrsToI(GenTree* tree1, GenTree* tree2 = nullptr); GenTree* impImplicitIorI4Cast(GenTree* tree, var_types dstTyp); GenTree* impImplicitR4orR8Cast(GenTree* tree, var_types dstTyp); void impImportLeave(BasicBlock* block); void impResetLeaveBlock(BasicBlock* block, unsigned jmpAddr); GenTree* impTypeIsAssignable(GenTree* typeTo, GenTree* typeFrom); // Mirrors StringComparison.cs enum StringComparison { Ordinal = 4, OrdinalIgnoreCase = 5 }; enum StringComparisonJoint { Eq, // (d1 == cns1) && (s2 == cns2) Xor, // (d1 ^ cns1) | (s2 ^ cns2) }; GenTree* impStringEqualsOrStartsWith(bool startsWith, CORINFO_SIG_INFO* sig, unsigned methodFlags); GenTree* impSpanEqualsOrStartsWith(bool startsWith, CORINFO_SIG_INFO* sig, unsigned methodFlags); GenTree* impExpandHalfConstEquals(GenTreeLclVar* data, GenTree* lengthFld, bool checkForNull, bool startsWith, WCHAR* cnsData, int len, int dataOffset, StringComparison cmpMode); GenTree* impCreateCompareInd(GenTreeLclVar* obj, var_types type, ssize_t offset, ssize_t value, StringComparison ignoreCase, StringComparisonJoint joint = Eq); GenTree* impExpandHalfConstEqualsSWAR( GenTreeLclVar* data, WCHAR* cns, int len, int dataOffset, StringComparison cmpMode); GenTree* impExpandHalfConstEqualsSIMD( GenTreeLclVar* data, WCHAR* cns, int len, int dataOffset, StringComparison cmpMode); GenTreeStrCon* impGetStrConFromSpan(GenTree* span); GenTree* impIntrinsic(GenTree* newobjThis, CORINFO_CLASS_HANDLE clsHnd, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, unsigned methodFlags, int memberRef, bool readonlyCall, bool tailCall, CORINFO_RESOLVED_TOKEN* pContstrainedResolvedToken, CORINFO_THIS_TRANSFORM constraintCallThisTransform, NamedIntrinsic* pIntrinsicName, bool* isSpecialIntrinsic = nullptr); GenTree* impMathIntrinsic(CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, var_types callType, NamedIntrinsic intrinsicName, bool tailCall); NamedIntrinsic lookupNamedIntrinsic(CORINFO_METHOD_HANDLE method); GenTree* impUnsupportedNamedIntrinsic(unsigned helper, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, bool mustExpand); #ifdef FEATURE_HW_INTRINSICS GenTree* impHWIntrinsic(NamedIntrinsic intrinsic, CORINFO_CLASS_HANDLE clsHnd, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, bool mustExpand); GenTree* impSimdAsHWIntrinsic(NamedIntrinsic intrinsic, CORINFO_CLASS_HANDLE clsHnd, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, GenTree* newobjThis); protected: bool compSupportsHWIntrinsic(CORINFO_InstructionSet isa); GenTree* impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic, CORINFO_CLASS_HANDLE clsHnd, CORINFO_SIG_INFO* sig, var_types retType, CorInfoType simdBaseJitType, unsigned simdSize, GenTree* newobjThis); GenTree* impSpecialIntrinsic(NamedIntrinsic intrinsic, CORINFO_CLASS_HANDLE clsHnd, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, CorInfoType simdBaseJitType, var_types retType, unsigned simdSize); GenTree* getArgForHWIntrinsic(var_types argType, CORINFO_CLASS_HANDLE argClass, bool expectAddr = false, GenTree* newobjThis = nullptr); GenTree* impNonConstFallback(NamedIntrinsic intrinsic, var_types simdType, CorInfoType simdBaseJitType); GenTree* addRangeCheckIfNeeded( NamedIntrinsic intrinsic, GenTree* immOp, bool mustExpand, int immLowerBound, int immUpperBound); GenTree* addRangeCheckForHWIntrinsic(GenTree* immOp, int immLowerBound, int immUpperBound); #ifdef TARGET_XARCH GenTree* impBaseIntrinsic(NamedIntrinsic intrinsic, CORINFO_CLASS_HANDLE clsHnd, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, CorInfoType simdBaseJitType, var_types retType, unsigned simdSize); GenTree* impSSEIntrinsic(NamedIntrinsic intrinsic, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig); GenTree* impSSE2Intrinsic(NamedIntrinsic intrinsic, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig); GenTree* impAvxOrAvx2Intrinsic(NamedIntrinsic intrinsic, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig); GenTree* impBMI1OrBMI2Intrinsic(NamedIntrinsic intrinsic, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig); #endif // TARGET_XARCH #endif // FEATURE_HW_INTRINSICS GenTree* impArrayAccessIntrinsic(CORINFO_CLASS_HANDLE clsHnd, CORINFO_SIG_INFO* sig, int memberRef, bool readonlyCall, NamedIntrinsic intrinsicName); GenTree* impInitializeArrayIntrinsic(CORINFO_SIG_INFO* sig); GenTree* impCreateSpanIntrinsic(CORINFO_SIG_INFO* sig); GenTree* impKeepAliveIntrinsic(GenTree* objToKeepAlive); GenTree* impMethodPointer(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo); GenTree* impTransformThis(GenTree* thisPtr, CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, CORINFO_THIS_TRANSFORM transform); //----------------- Manipulating the trees and stmts ---------------------- Statement* impStmtList; // Statements for the BB being imported. Statement* impLastStmt; // The last statement for the current BB. public: enum { CHECK_SPILL_ALL = -1, CHECK_SPILL_NONE = -2 }; void impBeginTreeList(); void impEndTreeList(BasicBlock* block, Statement* firstStmt, Statement* lastStmt); void impEndTreeList(BasicBlock* block); void impAppendStmtCheck(Statement* stmt, unsigned chkLevel); void impAppendStmt(Statement* stmt, unsigned chkLevel, bool checkConsumedDebugInfo = true); void impAppendStmt(Statement* stmt); void impInsertStmtBefore(Statement* stmt, Statement* stmtBefore); Statement* impAppendTree(GenTree* tree, unsigned chkLevel, const DebugInfo& di, bool checkConsumedDebugInfo = true); void impInsertTreeBefore(GenTree* tree, const DebugInfo& di, Statement* stmtBefore); void impAssignTempGen(unsigned tmp, GenTree* val, unsigned curLevel = (unsigned)CHECK_SPILL_NONE, Statement** pAfterStmt = nullptr, const DebugInfo& di = DebugInfo(), BasicBlock* block = nullptr); void impAssignTempGen(unsigned tmpNum, GenTree* val, CORINFO_CLASS_HANDLE structHnd, unsigned curLevel, Statement** pAfterStmt = nullptr, const DebugInfo& di = DebugInfo(), BasicBlock* block = nullptr); Statement* impExtractLastStmt(); GenTree* impCloneExpr(GenTree* tree, GenTree** clone, CORINFO_CLASS_HANDLE structHnd, unsigned curLevel, Statement** pAfterStmt DEBUGARG(const char* reason)); GenTree* impAssignStruct(GenTree* dest, GenTree* src, CORINFO_CLASS_HANDLE structHnd, unsigned curLevel, Statement** pAfterStmt = nullptr, const DebugInfo& di = DebugInfo(), BasicBlock* block = nullptr); GenTree* impAssignStructPtr(GenTree* dest, GenTree* src, CORINFO_CLASS_HANDLE structHnd, unsigned curLevel, Statement** pAfterStmt = nullptr, const DebugInfo& di = DebugInfo(), BasicBlock* block = nullptr); GenTree* impGetStructAddr(GenTree* structVal, CORINFO_CLASS_HANDLE structHnd, unsigned curLevel, bool willDeref); var_types impNormStructType(CORINFO_CLASS_HANDLE structHnd, CorInfoType* simdBaseJitType = nullptr); GenTree* impNormStructVal(GenTree* structVal, CORINFO_CLASS_HANDLE structHnd, unsigned curLevel, bool forceNormalization = false); GenTree* impTokenToHandle(CORINFO_RESOLVED_TOKEN* pResolvedToken, bool* pRuntimeLookup = nullptr, bool mustRestoreHandle = false, bool importParent = false); GenTree* impParentClassTokenToHandle(CORINFO_RESOLVED_TOKEN* pResolvedToken, bool* pRuntimeLookup = nullptr, bool mustRestoreHandle = false) { return impTokenToHandle(pResolvedToken, pRuntimeLookup, mustRestoreHandle, true); } GenTree* impLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_LOOKUP* pLookup, GenTreeFlags flags, void* compileTimeHandle); GenTree* getRuntimeContextTree(CORINFO_RUNTIME_LOOKUP_KIND kind); GenTree* impRuntimeLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_LOOKUP* pLookup, void* compileTimeHandle); GenTree* impReadyToRunLookupToTree(CORINFO_CONST_LOOKUP* pLookup, GenTreeFlags flags, void* compileTimeHandle); GenTreeCall* impReadyToRunHelperToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken, CorInfoHelpFunc helper, var_types type, GenTreeCall::Use* args = nullptr, CORINFO_LOOKUP_KIND* pGenericLookupKind = nullptr); bool impIsCastHelperEligibleForClassProbe(GenTree* tree); bool impIsCastHelperMayHaveProfileData(GenTree* tree); GenTree* impCastClassOrIsInstToTree( GenTree* op1, GenTree* op2, CORINFO_RESOLVED_TOKEN* pResolvedToken, bool isCastClass, IL_OFFSET ilOffset); GenTree* impOptimizeCastClassOrIsInst(GenTree* op1, CORINFO_RESOLVED_TOKEN* pResolvedToken, bool isCastClass); bool VarTypeIsMultiByteAndCanEnreg(var_types type, CORINFO_CLASS_HANDLE typeClass, unsigned* typeSize, bool forReturn, bool isVarArg, CorInfoCallConvExtension callConv); bool IsIntrinsicImplementedByUserCall(NamedIntrinsic intrinsicName); bool IsTargetIntrinsic(NamedIntrinsic intrinsicName); bool IsMathIntrinsic(NamedIntrinsic intrinsicName); bool IsMathIntrinsic(GenTree* tree); private: //----------------- Importing the method ---------------------------------- CORINFO_CONTEXT_HANDLE impTokenLookupContextHandle; // The context used for looking up tokens. #ifdef DEBUG unsigned impCurOpcOffs; const char* impCurOpcName; bool impNestedStackSpill; // For displaying instrs with generated native code (-n:B) Statement* impLastILoffsStmt; // oldest stmt added for which we did not call SetLastILOffset(). void impNoteLastILoffs(); #endif // Debug info of current statement being imported. It gets set to contain // no IL location (!impCurStmtDI.GetLocation().IsValid) after it has been // set in the appended trees. Then it gets updated at IL instructions for // which we have to report mapping info. // It will always contain the current inline context. DebugInfo impCurStmtDI; DebugInfo impCreateDIWithCurrentStackInfo(IL_OFFSET offs, bool isCall); void impCurStmtOffsSet(IL_OFFSET offs); void impNoteBranchOffs(); unsigned impInitBlockLineInfo(); bool impIsThis(GenTree* obj); bool impIsLDFTN_TOKEN(const BYTE* delegateCreateStart, const BYTE* newobjCodeAddr); bool impIsDUP_LDVIRTFTN_TOKEN(const BYTE* delegateCreateStart, const BYTE* newobjCodeAddr); bool impIsAnySTLOC(OPCODE opcode) { return ((opcode == CEE_STLOC) || (opcode == CEE_STLOC_S) || ((opcode >= CEE_STLOC_0) && (opcode <= CEE_STLOC_3))); } GenTreeCall::Use* impPopCallArgs(unsigned count, CORINFO_SIG_INFO* sig, GenTreeCall::Use* prefixArgs = nullptr); bool impCheckImplicitArgumentCoercion(var_types sigType, var_types nodeType) const; GenTreeCall::Use* impPopReverseCallArgs(unsigned count, CORINFO_SIG_INFO* sig, unsigned skipReverseCount = 0); //---------------- Spilling the importer stack ---------------------------- // The maximum number of bytes of IL processed without clean stack state. // It allows to limit the maximum tree size and depth. static const unsigned MAX_TREE_SIZE = 200; bool impCanSpillNow(OPCODE prevOpcode); struct PendingDsc { PendingDsc* pdNext; BasicBlock* pdBB; SavedStack pdSavedStack; ThisInitState pdThisPtrInit; }; PendingDsc* impPendingList; // list of BBs currently waiting to be imported. PendingDsc* impPendingFree; // Freed up dscs that can be reused // We keep a byte-per-block map (dynamically extended) in the top-level Compiler object of a compilation. JitExpandArray<BYTE> impPendingBlockMembers; // Return the byte for "b" (allocating/extending impPendingBlockMembers if necessary.) // Operates on the map in the top-level ancestor. BYTE impGetPendingBlockMember(BasicBlock* blk) { return impInlineRoot()->impPendingBlockMembers.Get(blk->bbInd()); } // Set the byte for "b" to "val" (allocating/extending impPendingBlockMembers if necessary.) // Operates on the map in the top-level ancestor. void impSetPendingBlockMember(BasicBlock* blk, BYTE val) { impInlineRoot()->impPendingBlockMembers.Set(blk->bbInd(), val); } bool impCanReimport; bool impSpillStackEntry(unsigned level, unsigned varNum #ifdef DEBUG , bool bAssertOnRecursion, const char* reason #endif ); void impSpillStackEnsure(bool spillLeaves = false); void impEvalSideEffects(); void impSpillSpecialSideEff(); void impSpillSideEffects(bool spillGlobEffects, unsigned chkLevel DEBUGARG(const char* reason)); void impSpillValueClasses(); void impSpillEvalStack(); static fgWalkPreFn impFindValueClasses; void impSpillLclRefs(ssize_t lclNum); BasicBlock* impPushCatchArgOnStack(BasicBlock* hndBlk, CORINFO_CLASS_HANDLE clsHnd, bool isSingleBlockFilter); bool impBlockIsInALoop(BasicBlock* block); void impImportBlockCode(BasicBlock* block); void impReimportMarkBlock(BasicBlock* block); void impReimportMarkSuccessors(BasicBlock* block); void impVerifyEHBlock(BasicBlock* block, bool isTryStart); void impImportBlockPending(BasicBlock* block); // Similar to impImportBlockPending, but assumes that block has already been imported once and is being // reimported for some reason. It specifically does *not* look at verCurrentState to set the EntryState // for the block, but instead, just re-uses the block's existing EntryState. void impReimportBlockPending(BasicBlock* block); var_types impGetByRefResultType(genTreeOps oper, bool fUnsigned, GenTree** pOp1, GenTree** pOp2); void impImportBlock(BasicBlock* block); // Assumes that "block" is a basic block that completes with a non-empty stack. We will assign the values // on the stack to local variables (the "spill temp" variables). The successor blocks will assume that // its incoming stack contents are in those locals. This requires "block" and its successors to agree on // the variables that will be used -- and for all the predecessors of those successors, and the // successors of those predecessors, etc. Call such a set of blocks closed under alternating // successor/predecessor edges a "spill clique." A block is a "predecessor" or "successor" member of the // clique (or, conceivably, both). Each block has a specified sequence of incoming and outgoing spill // temps. If "block" already has its outgoing spill temps assigned (they are always a contiguous series // of local variable numbers, so we represent them with the base local variable number), returns that. // Otherwise, picks a set of spill temps, and propagates this choice to all blocks in the spill clique of // which "block" is a member (asserting, in debug mode, that no block in this clique had its spill temps // chosen already. More precisely, that the incoming or outgoing spill temps are not chosen, depending // on which kind of member of the clique the block is). unsigned impGetSpillTmpBase(BasicBlock* block); // Assumes that "block" is a basic block that completes with a non-empty stack. We have previously // assigned the values on the stack to local variables (the "spill temp" variables). The successor blocks // will assume that its incoming stack contents are in those locals. This requires "block" and its // successors to agree on the variables and their types that will be used. The CLI spec allows implicit // conversions between 'int' and 'native int' or 'float' and 'double' stack types. So one predecessor can // push an int and another can push a native int. For 64-bit we have chosen to implement this by typing // the "spill temp" as native int, and then importing (or re-importing as needed) so that all the // predecessors in the "spill clique" push a native int (sign-extending if needed), and all the // successors receive a native int. Similarly float and double are unified to double. // This routine is called after a type-mismatch is detected, and it will walk the spill clique to mark // blocks for re-importation as appropriate (both successors, so they get the right incoming type, and // predecessors, so they insert an upcast if needed). void impReimportSpillClique(BasicBlock* block); // When we compute a "spill clique" (see above) these byte-maps are allocated to have a byte per basic // block, and represent the predecessor and successor members of the clique currently being computed. // *** Access to these will need to be locked in a parallel compiler. JitExpandArray<BYTE> impSpillCliquePredMembers; JitExpandArray<BYTE> impSpillCliqueSuccMembers; enum SpillCliqueDir { SpillCliquePred, SpillCliqueSucc }; // Abstract class for receiving a callback while walking a spill clique class SpillCliqueWalker { public: virtual void Visit(SpillCliqueDir predOrSucc, BasicBlock* blk) = 0; }; // This class is used for setting the bbStkTempsIn and bbStkTempsOut on the blocks within a spill clique class SetSpillTempsBase : public SpillCliqueWalker { unsigned m_baseTmp; public: SetSpillTempsBase(unsigned baseTmp) : m_baseTmp(baseTmp) { } virtual void Visit(SpillCliqueDir predOrSucc, BasicBlock* blk); }; // This class is used for implementing impReimportSpillClique part on each block within the spill clique class ReimportSpillClique : public SpillCliqueWalker { Compiler* m_pComp; public: ReimportSpillClique(Compiler* pComp) : m_pComp(pComp) { } virtual void Visit(SpillCliqueDir predOrSucc, BasicBlock* blk); }; // This is the heart of the algorithm for walking spill cliques. It invokes callback->Visit for each // predecessor or successor within the spill clique void impWalkSpillCliqueFromPred(BasicBlock* pred, SpillCliqueWalker* callback); // For a BasicBlock that has already been imported, the EntryState has an array of GenTrees for the // incoming locals. This walks that list an resets the types of the GenTrees to match the types of // the VarDscs. They get out of sync when we have int/native int issues (see impReimportSpillClique). void impRetypeEntryStateTemps(BasicBlock* blk); BYTE impSpillCliqueGetMember(SpillCliqueDir predOrSucc, BasicBlock* blk); void impSpillCliqueSetMember(SpillCliqueDir predOrSucc, BasicBlock* blk, BYTE val); void impPushVar(GenTree* op, typeInfo tiRetVal); GenTreeLclVar* impCreateLocalNode(unsigned lclNum DEBUGARG(IL_OFFSET offset)); void impLoadVar(unsigned lclNum, IL_OFFSET offset, const typeInfo& tiRetVal); void impLoadVar(unsigned lclNum, IL_OFFSET offset) { impLoadVar(lclNum, offset, lvaGetDesc(lclNum)->lvVerTypeInfo); } void impLoadArg(unsigned ilArgNum, IL_OFFSET offset); void impLoadLoc(unsigned ilLclNum, IL_OFFSET offset); bool impReturnInstruction(int prefixFlags, OPCODE& opcode); #ifdef TARGET_ARM void impMarkLclDstNotPromotable(unsigned tmpNum, GenTree* op, CORINFO_CLASS_HANDLE hClass); #endif // A free list of linked list nodes used to represent to-do stacks of basic blocks. struct BlockListNode { BasicBlock* m_blk; BlockListNode* m_next; BlockListNode(BasicBlock* blk, BlockListNode* next = nullptr) : m_blk(blk), m_next(next) { } void* operator new(size_t sz, Compiler* comp); }; BlockListNode* impBlockListNodeFreeList; void FreeBlockListNode(BlockListNode* node); bool impIsValueType(typeInfo* pTypeInfo); var_types mangleVarArgsType(var_types type); regNumber getCallArgIntRegister(regNumber floatReg); regNumber getCallArgFloatRegister(regNumber intReg); #if defined(DEBUG) static unsigned jitTotalMethodCompiled; #endif #ifdef DEBUG static LONG jitNestingLevel; #endif // DEBUG static bool impIsAddressInLocal(const GenTree* tree, GenTree** lclVarTreeOut = nullptr); void impMakeDiscretionaryInlineObservations(InlineInfo* pInlineInfo, InlineResult* inlineResult); // STATIC inlining decision based on the IL code. void impCanInlineIL(CORINFO_METHOD_HANDLE fncHandle, CORINFO_METHOD_INFO* methInfo, bool forceInline, InlineResult* inlineResult); void impCheckCanInline(GenTreeCall* call, CORINFO_METHOD_HANDLE fncHandle, unsigned methAttr, CORINFO_CONTEXT_HANDLE exactContextHnd, InlineCandidateInfo** ppInlineCandidateInfo, InlineResult* inlineResult); void impInlineRecordArgInfo(InlineInfo* pInlineInfo, GenTree* curArgVal, unsigned argNum, InlineResult* inlineResult); void impInlineInitVars(InlineInfo* pInlineInfo); unsigned impInlineFetchLocal(unsigned lclNum DEBUGARG(const char* reason)); GenTree* impInlineFetchArg(unsigned lclNum, InlArgInfo* inlArgInfo, InlLclVarInfo* lclTypeInfo); bool impInlineIsThis(GenTree* tree, InlArgInfo* inlArgInfo); bool impInlineIsGuaranteedThisDerefBeforeAnySideEffects(GenTree* additionalTree, GenTreeCall::Use* additionalCallArgs, GenTree* dereferencedAddress, InlArgInfo* inlArgInfo); void impMarkInlineCandidate(GenTree* call, CORINFO_CONTEXT_HANDLE exactContextHnd, bool exactContextNeedsRuntimeLookup, CORINFO_CALL_INFO* callInfo); void impMarkInlineCandidateHelper(GenTreeCall* call, CORINFO_CONTEXT_HANDLE exactContextHnd, bool exactContextNeedsRuntimeLookup, CORINFO_CALL_INFO* callInfo); bool impTailCallRetTypeCompatible(bool allowWidening, var_types callerRetType, CORINFO_CLASS_HANDLE callerRetTypeClass, CorInfoCallConvExtension callerCallConv, var_types calleeRetType, CORINFO_CLASS_HANDLE calleeRetTypeClass, CorInfoCallConvExtension calleeCallConv); bool impIsTailCallILPattern( bool tailPrefixed, OPCODE curOpcode, const BYTE* codeAddrOfNextOpcode, const BYTE* codeEnd, bool isRecursive); bool impIsImplicitTailCallCandidate( OPCODE curOpcode, const BYTE* codeAddrOfNextOpcode, const BYTE* codeEnd, int prefixFlags, bool isRecursive); bool impIsClassExact(CORINFO_CLASS_HANDLE classHnd); bool impCanSkipCovariantStoreCheck(GenTree* value, GenTree* array); CORINFO_RESOLVED_TOKEN* impAllocateToken(const CORINFO_RESOLVED_TOKEN& token); /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX FlowGraph XX XX XX XX Info about the basic-blocks, their contents and the flow analysis XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ public: BasicBlock* fgFirstBB; // Beginning of the basic block list BasicBlock* fgLastBB; // End of the basic block list BasicBlock* fgFirstColdBlock; // First block to be placed in the cold section BasicBlock* fgEntryBB; // For OSR, the original method's entry point BasicBlock* fgOSREntryBB; // For OSR, the logical entry point (~ patchpoint) #if defined(FEATURE_EH_FUNCLETS) BasicBlock* fgFirstFuncletBB; // First block of outlined funclets (to allow block insertion before the funclets) #endif BasicBlock* fgFirstBBScratch; // Block inserted for initialization stuff. Is nullptr if no such block has been // created. BasicBlockList* fgReturnBlocks; // list of BBJ_RETURN blocks unsigned fgEdgeCount; // # of control flow edges between the BBs unsigned fgBBcount; // # of BBs in the method #ifdef DEBUG unsigned fgBBcountAtCodegen; // # of BBs in the method at the start of codegen #endif unsigned fgBBNumMax; // The max bbNum that has been assigned to basic blocks unsigned fgDomBBcount; // # of BBs for which we have dominator and reachability information BasicBlock** fgBBInvPostOrder; // The flow graph stored in an array sorted in topological order, needed to compute // dominance. Indexed by block number. Size: fgBBNumMax + 1. // After the dominance tree is computed, we cache a DFS preorder number and DFS postorder number to compute // dominance queries in O(1). fgDomTreePreOrder and fgDomTreePostOrder are arrays giving the block's preorder and // postorder number, respectively. The arrays are indexed by basic block number. (Note that blocks are numbered // starting from one. Thus, we always waste element zero. This makes debugging easier and makes the code less likely // to suffer from bugs stemming from forgetting to add or subtract one from the block number to form an array // index). The arrays are of size fgBBNumMax + 1. unsigned* fgDomTreePreOrder; unsigned* fgDomTreePostOrder; // Dominator tree used by SSA construction and copy propagation (the two are expected to use the same tree // in order to avoid the need for SSA reconstruction and an "out of SSA" phase). DomTreeNode* fgSsaDomTree; bool fgBBVarSetsInited; // Allocate array like T* a = new T[fgBBNumMax + 1]; // Using helper so we don't keep forgetting +1. template <typename T> T* fgAllocateTypeForEachBlk(CompMemKind cmk = CMK_Unknown) { return getAllocator(cmk).allocate<T>(fgBBNumMax + 1); } // BlockSets are relative to a specific set of BasicBlock numbers. If that changes // (if the blocks are renumbered), this changes. BlockSets from different epochs // cannot be meaningfully combined. Note that new blocks can be created with higher // block numbers without changing the basic block epoch. These blocks *cannot* // participate in a block set until the blocks are all renumbered, causing the epoch // to change. This is useful if continuing to use previous block sets is valuable. // If the epoch is zero, then it is uninitialized, and block sets can't be used. unsigned fgCurBBEpoch; unsigned GetCurBasicBlockEpoch() { return fgCurBBEpoch; } // The number of basic blocks in the current epoch. When the blocks are renumbered, // this is fgBBcount. As blocks are added, fgBBcount increases, fgCurBBEpochSize remains // the same, until a new BasicBlock epoch is created, such as when the blocks are all renumbered. unsigned fgCurBBEpochSize; // The number of "size_t" elements required to hold a bitset large enough for fgCurBBEpochSize // bits. This is precomputed to avoid doing math every time BasicBlockBitSetTraits::GetArrSize() is called. unsigned fgBBSetCountInSizeTUnits; void NewBasicBlockEpoch() { INDEBUG(unsigned oldEpochArrSize = fgBBSetCountInSizeTUnits); // We have a new epoch. Compute and cache the size needed for new BlockSets. fgCurBBEpoch++; fgCurBBEpochSize = fgBBNumMax + 1; fgBBSetCountInSizeTUnits = roundUp(fgCurBBEpochSize, (unsigned)(sizeof(size_t) * 8)) / unsigned(sizeof(size_t) * 8); #ifdef DEBUG // All BlockSet objects are now invalid! fgReachabilitySetsValid = false; // the bbReach sets are now invalid! fgEnterBlksSetValid = false; // the fgEnterBlks set is now invalid! if (verbose) { unsigned epochArrSize = BasicBlockBitSetTraits::GetArrSize(this, sizeof(size_t)); printf("\nNew BlockSet epoch %d, # of blocks (including unused BB00): %u, bitset array size: %u (%s)", fgCurBBEpoch, fgCurBBEpochSize, epochArrSize, (epochArrSize <= 1) ? "short" : "long"); if ((fgCurBBEpoch != 1) && ((oldEpochArrSize <= 1) != (epochArrSize <= 1))) { // If we're not just establishing the first epoch, and the epoch array size has changed such that we're // going to change our bitset representation from short (just a size_t bitset) to long (a pointer to an // array of size_t bitsets), then print that out. printf("; NOTE: BlockSet size was previously %s!", (oldEpochArrSize <= 1) ? "short" : "long"); } printf("\n"); } #endif // DEBUG } void EnsureBasicBlockEpoch() { if (fgCurBBEpochSize != fgBBNumMax + 1) { NewBasicBlockEpoch(); } } BasicBlock* fgNewBasicBlock(BBjumpKinds jumpKind); void fgEnsureFirstBBisScratch(); bool fgFirstBBisScratch(); bool fgBBisScratch(BasicBlock* block); void fgExtendEHRegionBefore(BasicBlock* block); void fgExtendEHRegionAfter(BasicBlock* block); BasicBlock* fgNewBBbefore(BBjumpKinds jumpKind, BasicBlock* block, bool extendRegion); BasicBlock* fgNewBBafter(BBjumpKinds jumpKind, BasicBlock* block, bool extendRegion); BasicBlock* fgNewBBinRegion(BBjumpKinds jumpKind, unsigned tryIndex, unsigned hndIndex, BasicBlock* nearBlk, bool putInFilter = false, bool runRarely = false, bool insertAtEnd = false); BasicBlock* fgNewBBinRegion(BBjumpKinds jumpKind, BasicBlock* srcBlk, bool runRarely = false, bool insertAtEnd = false); BasicBlock* fgNewBBinRegion(BBjumpKinds jumpKind); BasicBlock* fgNewBBinRegionWorker(BBjumpKinds jumpKind, BasicBlock* afterBlk, unsigned xcptnIndex, bool putInTryRegion); void fgInsertBBbefore(BasicBlock* insertBeforeBlk, BasicBlock* newBlk); void fgInsertBBafter(BasicBlock* insertAfterBlk, BasicBlock* newBlk); void fgUnlinkBlock(BasicBlock* block); #ifdef FEATURE_JIT_METHOD_PERF unsigned fgMeasureIR(); #endif // FEATURE_JIT_METHOD_PERF bool fgModified; // True if the flow graph has been modified recently bool fgComputePredsDone; // Have we computed the bbPreds list bool fgCheapPredsValid; // Is the bbCheapPreds list valid? bool fgDomsComputed; // Have we computed the dominator sets? bool fgReturnBlocksComputed; // Have we computed the return blocks list? bool fgOptimizedFinally; // Did we optimize any try-finallys? bool fgHasSwitch; // any BBJ_SWITCH jumps? BlockSet fgEnterBlks; // Set of blocks which have a special transfer of control; the "entry" blocks plus EH handler // begin blocks. #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) BlockSet fgAlwaysBlks; // Set of blocks which are BBJ_ALWAYS part of BBJ_CALLFINALLY/BBJ_ALWAYS pair that should // never be removed due to a requirement to use the BBJ_ALWAYS for generating code and // not have "retless" blocks. #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) #ifdef DEBUG bool fgReachabilitySetsValid; // Are the bbReach sets valid? bool fgEnterBlksSetValid; // Is the fgEnterBlks set valid? #endif // DEBUG bool fgRemoveRestOfBlock; // true if we know that we will throw bool fgStmtRemoved; // true if we remove statements -> need new DFA // There are two modes for ordering of the trees. // - In FGOrderTree, the dominant ordering is the tree order, and the nodes contained in // each tree and sub-tree are contiguous, and can be traversed (in gtNext/gtPrev order) // by traversing the tree according to the order of the operands. // - In FGOrderLinear, the dominant ordering is the linear order. enum FlowGraphOrder { FGOrderTree, FGOrderLinear }; FlowGraphOrder fgOrder; // The following are boolean flags that keep track of the state of internal data structures bool fgStmtListThreaded; // true if the node list is now threaded bool fgCanRelocateEHRegions; // true if we are allowed to relocate the EH regions bool fgEdgeWeightsComputed; // true after we have called fgComputeEdgeWeights bool fgHaveValidEdgeWeights; // true if we were successful in computing all of the edge weights bool fgSlopUsedInEdgeWeights; // true if their was some slop used when computing the edge weights bool fgRangeUsedInEdgeWeights; // true if some of the edgeWeight are expressed in Min..Max form bool fgNeedsUpdateFlowGraph; // true if we need to run fgUpdateFlowGraph weight_t fgCalledCount; // count of the number of times this method was called // This is derived from the profile data // or is BB_UNITY_WEIGHT when we don't have profile data #if defined(FEATURE_EH_FUNCLETS) bool fgFuncletsCreated; // true if the funclet creation phase has been run #endif // FEATURE_EH_FUNCLETS bool fgGlobalMorph; // indicates if we are during the global morphing phase // since fgMorphTree can be called from several places bool impBoxTempInUse; // the temp below is valid and available unsigned impBoxTemp; // a temporary that is used for boxing #ifdef DEBUG bool jitFallbackCompile; // Are we doing a fallback compile? That is, have we executed a NO_WAY assert, // and we are trying to compile again in a "safer", minopts mode? #endif #if defined(DEBUG) unsigned impInlinedCodeSize; bool fgPrintInlinedMethods; #endif jitstd::vector<flowList*>* fgPredListSortVector; //------------------------------------------------------------------------- void fgInit(); PhaseStatus fgImport(); PhaseStatus fgTransformIndirectCalls(); PhaseStatus fgTransformPatchpoints(); PhaseStatus fgInline(); PhaseStatus fgRemoveEmptyTry(); PhaseStatus fgRemoveEmptyFinally(); PhaseStatus fgMergeFinallyChains(); PhaseStatus fgCloneFinally(); void fgCleanupContinuation(BasicBlock* continuation); #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) PhaseStatus fgUpdateFinallyTargetFlags(); void fgClearAllFinallyTargetBits(); void fgAddFinallyTargetFlags(); #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) PhaseStatus fgTailMergeThrows(); void fgTailMergeThrowsFallThroughHelper(BasicBlock* predBlock, BasicBlock* nonCanonicalBlock, BasicBlock* canonicalBlock, flowList* predEdge); void fgTailMergeThrowsJumpToHelper(BasicBlock* predBlock, BasicBlock* nonCanonicalBlock, BasicBlock* canonicalBlock, flowList* predEdge); GenTree* fgCheckCallArgUpdate(GenTree* parent, GenTree* child, var_types origType); #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) // Sometimes we need to defer updating the BBF_FINALLY_TARGET bit. fgNeedToAddFinallyTargetBits signals // when this is necessary. bool fgNeedToAddFinallyTargetBits; #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) bool fgRetargetBranchesToCanonicalCallFinally(BasicBlock* block, BasicBlock* handler, BlockToBlockMap& continuationMap); GenTree* fgGetCritSectOfStaticMethod(); #if defined(FEATURE_EH_FUNCLETS) void fgAddSyncMethodEnterExit(); GenTree* fgCreateMonitorTree(unsigned lvaMonitorBool, unsigned lvaThisVar, BasicBlock* block, bool enter); void fgConvertSyncReturnToLeave(BasicBlock* block); #endif // FEATURE_EH_FUNCLETS void fgAddReversePInvokeEnterExit(); bool fgMoreThanOneReturnBlock(); // The number of separate return points in the method. unsigned fgReturnCount; void fgAddInternal(); enum class FoldResult { FOLD_DID_NOTHING, FOLD_CHANGED_CONTROL_FLOW, FOLD_REMOVED_LAST_STMT, FOLD_ALTERED_LAST_STMT, }; FoldResult fgFoldConditional(BasicBlock* block); void fgMorphStmts(BasicBlock* block); void fgMorphBlocks(); void fgMergeBlockReturn(BasicBlock* block); bool fgMorphBlockStmt(BasicBlock* block, Statement* stmt DEBUGARG(const char* msg)); void fgSetOptions(); #ifdef DEBUG static fgWalkPreFn fgAssertNoQmark; void fgPreExpandQmarkChecks(GenTree* expr); void fgPostExpandQmarkChecks(); static void fgCheckQmarkAllowedForm(GenTree* tree); #endif IL_OFFSET fgFindBlockILOffset(BasicBlock* block); void fgFixEntryFlowForOSR(); BasicBlock* fgSplitBlockAtBeginning(BasicBlock* curr); BasicBlock* fgSplitBlockAtEnd(BasicBlock* curr); BasicBlock* fgSplitBlockAfterStatement(BasicBlock* curr, Statement* stmt); BasicBlock* fgSplitBlockAfterNode(BasicBlock* curr, GenTree* node); // for LIR BasicBlock* fgSplitEdge(BasicBlock* curr, BasicBlock* succ); Statement* fgNewStmtFromTree(GenTree* tree, BasicBlock* block, const DebugInfo& di); Statement* fgNewStmtFromTree(GenTree* tree); Statement* fgNewStmtFromTree(GenTree* tree, BasicBlock* block); Statement* fgNewStmtFromTree(GenTree* tree, const DebugInfo& di); GenTree* fgGetTopLevelQmark(GenTree* expr, GenTree** ppDst = nullptr); void fgExpandQmarkForCastInstOf(BasicBlock* block, Statement* stmt); void fgExpandQmarkStmt(BasicBlock* block, Statement* stmt); void fgExpandQmarkNodes(); // Do "simple lowering." This functionality is (conceptually) part of "general" // lowering that is distributed between fgMorph and the lowering phase of LSRA. void fgSimpleLowering(); GenTree* fgInitThisClass(); GenTreeCall* fgGetStaticsCCtorHelper(CORINFO_CLASS_HANDLE cls, CorInfoHelpFunc helper); GenTreeCall* fgGetSharedCCtor(CORINFO_CLASS_HANDLE cls); bool backendRequiresLocalVarLifetimes() { return !opts.MinOpts() || m_pLinearScan->willEnregisterLocalVars(); } void fgLocalVarLiveness(); void fgLocalVarLivenessInit(); void fgPerNodeLocalVarLiveness(GenTree* node); void fgPerBlockLocalVarLiveness(); VARSET_VALRET_TP fgGetHandlerLiveVars(BasicBlock* block); void fgLiveVarAnalysis(bool updateInternalOnly = false); void fgComputeLifeCall(VARSET_TP& life, GenTreeCall* call); void fgComputeLifeTrackedLocalUse(VARSET_TP& life, LclVarDsc& varDsc, GenTreeLclVarCommon* node); bool fgComputeLifeTrackedLocalDef(VARSET_TP& life, VARSET_VALARG_TP keepAliveVars, LclVarDsc& varDsc, GenTreeLclVarCommon* node); bool fgComputeLifeUntrackedLocal(VARSET_TP& life, VARSET_VALARG_TP keepAliveVars, LclVarDsc& varDsc, GenTreeLclVarCommon* lclVarNode); bool fgComputeLifeLocal(VARSET_TP& life, VARSET_VALARG_TP keepAliveVars, GenTree* lclVarNode); void fgComputeLife(VARSET_TP& life, GenTree* startNode, GenTree* endNode, VARSET_VALARG_TP volatileVars, bool* pStmtInfoDirty DEBUGARG(bool* treeModf)); void fgComputeLifeLIR(VARSET_TP& life, BasicBlock* block, VARSET_VALARG_TP volatileVars); bool fgTryRemoveNonLocal(GenTree* node, LIR::Range* blockRange); void fgRemoveDeadStoreLIR(GenTree* store, BasicBlock* block); bool fgRemoveDeadStore(GenTree** pTree, LclVarDsc* varDsc, VARSET_VALARG_TP life, bool* doAgain, bool* pStmtInfoDirty, bool* pStoreRemoved DEBUGARG(bool* treeModf)); void fgInterBlockLocalVarLiveness(); // Blocks: convenience methods for enabling range-based `for` iteration over the function's blocks, e.g.: // 1. for (BasicBlock* const block : compiler->Blocks()) ... // 2. for (BasicBlock* const block : compiler->Blocks(startBlock)) ... // 3. for (BasicBlock* const block : compiler->Blocks(startBlock, endBlock)) ... // In case (1), the block list can be empty. In case (2), `startBlock` can be nullptr. In case (3), // both `startBlock` and `endBlock` must be non-null. // BasicBlockSimpleList Blocks() const { return BasicBlockSimpleList(fgFirstBB); } BasicBlockSimpleList Blocks(BasicBlock* startBlock) const { return BasicBlockSimpleList(startBlock); } BasicBlockRangeList Blocks(BasicBlock* startBlock, BasicBlock* endBlock) const { return BasicBlockRangeList(startBlock, endBlock); } // The presence of a partial definition presents some difficulties for SSA: this is both a use of some SSA name // of "x", and a def of a new SSA name for "x". The tree only has one local variable for "x", so it has to choose // whether to treat that as the use or def. It chooses the "use", and thus the old SSA name. This map allows us // to record/recover the "def" SSA number, given the lcl var node for "x" in such a tree. typedef JitHashTable<GenTree*, JitPtrKeyFuncs<GenTree>, unsigned> NodeToUnsignedMap; NodeToUnsignedMap* m_opAsgnVarDefSsaNums; NodeToUnsignedMap* GetOpAsgnVarDefSsaNums() { if (m_opAsgnVarDefSsaNums == nullptr) { m_opAsgnVarDefSsaNums = new (getAllocator()) NodeToUnsignedMap(getAllocator()); } return m_opAsgnVarDefSsaNums; } // This map tracks nodes whose value numbers explicitly or implicitly depend on memory states. // The map provides the entry block of the most closely enclosing loop that // defines the memory region accessed when defining the nodes's VN. // // This information should be consulted when considering hoisting node out of a loop, as the VN // for the node will only be valid within the indicated loop. // // It is not fine-grained enough to track memory dependence within loops, so cannot be used // for more general code motion. // // If a node does not have an entry in the map we currently assume the VN is not memory dependent // and so memory does not constrain hoisting. // typedef JitHashTable<GenTree*, JitPtrKeyFuncs<GenTree>, BasicBlock*> NodeToLoopMemoryBlockMap; NodeToLoopMemoryBlockMap* m_nodeToLoopMemoryBlockMap; NodeToLoopMemoryBlockMap* GetNodeToLoopMemoryBlockMap() { if (m_nodeToLoopMemoryBlockMap == nullptr) { m_nodeToLoopMemoryBlockMap = new (getAllocator()) NodeToLoopMemoryBlockMap(getAllocator()); } return m_nodeToLoopMemoryBlockMap; } void optRecordLoopMemoryDependence(GenTree* tree, BasicBlock* block, ValueNum memoryVN); void optCopyLoopMemoryDependence(GenTree* fromTree, GenTree* toTree); // Requires value numbering phase to have completed. Returns the value number ("gtVN") of the // "tree," EXCEPT in the case of GTF_VAR_USEASG, because the tree node's gtVN member is the // "use" VN. Performs a lookup into the map of (use asg tree -> def VN.) to return the "def's" // VN. inline ValueNum GetUseAsgDefVNOrTreeVN(GenTree* tree); // Requires that "lcl" has the GTF_VAR_DEF flag set. Returns the SSA number of "lcl". // Except: assumes that lcl is a def, and if it is // a partial def (GTF_VAR_USEASG), looks up and returns the SSA number for the "def", // rather than the "use" SSA number recorded in the tree "lcl". inline unsigned GetSsaNumForLocalVarDef(GenTree* lcl); inline bool PreciseRefCountsRequired(); // Performs SSA conversion. void fgSsaBuild(); // Reset any data structures to the state expected by "fgSsaBuild", so it can be run again. void fgResetForSsa(); unsigned fgSsaPassesCompleted; // Number of times fgSsaBuild has been run. // Returns "true" if this is a special variable that is never zero initialized in the prolog. inline bool fgVarIsNeverZeroInitializedInProlog(unsigned varNum); // Returns "true" if the variable needs explicit zero initialization. inline bool fgVarNeedsExplicitZeroInit(unsigned varNum, bool bbInALoop, bool bbIsReturn); // The value numbers for this compilation. ValueNumStore* vnStore; public: ValueNumStore* GetValueNumStore() { return vnStore; } // Do value numbering (assign a value number to each // tree node). void fgValueNumber(); // Computes new GcHeap VN via the assignment H[elemTypeEq][arrVN][inx][fldSeq] = rhsVN. // Assumes that "elemTypeEq" is the (equivalence class rep) of the array element type. // The 'indType' is the indirection type of the lhs of the assignment and will typically // match the element type of the array or fldSeq. When this type doesn't match // or if the fldSeq is 'NotAField' we invalidate the array contents H[elemTypeEq][arrVN] // ValueNum fgValueNumberArrIndexAssign(CORINFO_CLASS_HANDLE elemTypeEq, ValueNum arrVN, ValueNum inxVN, FieldSeqNode* fldSeq, ValueNum rhsVN, var_types indType); // Requires that "tree" is a GT_IND marked as an array index, and that its address argument // has been parsed to yield the other input arguments. If evaluation of the address // can raise exceptions, those should be captured in the exception set "addrXvnp". // Assumes that "elemTypeEq" is the (equivalence class rep) of the array element type. // Marks "tree" with the VN for H[elemTypeEq][arrVN][inx][fldSeq] (for the liberal VN; a new unique // VN for the conservative VN.) Also marks the tree's argument as the address of an array element. // The type tree->TypeGet() will typically match the element type of the array or fldSeq. // When this type doesn't match or if the fldSeq is 'NotAField' we return a new unique VN // ValueNum fgValueNumberArrIndexVal(GenTree* tree, CORINFO_CLASS_HANDLE elemTypeEq, ValueNum arrVN, ValueNum inxVN, ValueNumPair addrXvnp, FieldSeqNode* fldSeq); // Requires "funcApp" to be a VNF_PtrToArrElem, and "addrXvnp" to represent the exception set thrown // by evaluating the array index expression "tree". Returns the value number resulting from // dereferencing the array in the current GcHeap state. If "tree" is non-null, it must be the // "GT_IND" that does the dereference, and it is given the returned value number. ValueNum fgValueNumberArrIndexVal(GenTree* tree, VNFuncApp* funcApp, ValueNumPair addrXvnp); // Compute the value number for a byref-exposed load of the given type via the given pointerVN. ValueNum fgValueNumberByrefExposedLoad(var_types type, ValueNum pointerVN); unsigned fgVNPassesCompleted; // Number of times fgValueNumber has been run. // Utility functions for fgValueNumber. // Perform value-numbering for the trees in "blk". void fgValueNumberBlock(BasicBlock* blk); // Requires that "entryBlock" is the entry block of loop "loopNum", and that "loopNum" is the // innermost loop of which "entryBlock" is the entry. Returns the value number that should be // assumed for the memoryKind at the start "entryBlk". ValueNum fgMemoryVNForLoopSideEffects(MemoryKind memoryKind, BasicBlock* entryBlock, unsigned loopNum); // Called when an operation (performed by "tree", described by "msg") may cause the GcHeap to be mutated. // As GcHeap is a subset of ByrefExposed, this will also annotate the ByrefExposed mutation. void fgMutateGcHeap(GenTree* tree DEBUGARG(const char* msg)); // Called when an operation (performed by "tree", described by "msg") may cause an address-exposed local to be // mutated. void fgMutateAddressExposedLocal(GenTree* tree DEBUGARG(const char* msg)); // For a GC heap store at curTree, record the new curMemoryVN's and update curTree's MemorySsaMap. // As GcHeap is a subset of ByrefExposed, this will also record the ByrefExposed store. void recordGcHeapStore(GenTree* curTree, ValueNum gcHeapVN DEBUGARG(const char* msg)); // For a store to an address-exposed local at curTree, record the new curMemoryVN and update curTree's MemorySsaMap. void recordAddressExposedLocalStore(GenTree* curTree, ValueNum memoryVN DEBUGARG(const char* msg)); void fgSetCurrentMemoryVN(MemoryKind memoryKind, ValueNum newMemoryVN); // Tree caused an update in the current memory VN. If "tree" has an associated heap SSA #, record that // value in that SSA #. void fgValueNumberRecordMemorySsa(MemoryKind memoryKind, GenTree* tree); // The input 'tree' is a leaf node that is a constant // Assign the proper value number to the tree void fgValueNumberTreeConst(GenTree* tree); // If the VN store has been initialized, reassign the // proper value number to the constant tree. void fgUpdateConstTreeValueNumber(GenTree* tree); // Assumes that all inputs to "tree" have had value numbers assigned; assigns a VN to tree. // (With some exceptions: the VN of the lhs of an assignment is assigned as part of the // assignment.) void fgValueNumberTree(GenTree* tree); void fgValueNumberAssignment(GenTreeOp* tree); // Does value-numbering for a block assignment. void fgValueNumberBlockAssignment(GenTree* tree); bool fgValueNumberBlockAssignmentTypeCheck(LclVarDsc* dstVarDsc, FieldSeqNode* dstFldSeq, GenTree* src); // Does value-numbering for a cast tree. void fgValueNumberCastTree(GenTree* tree); // Does value-numbering for an intrinsic tree. void fgValueNumberIntrinsic(GenTree* tree); #ifdef FEATURE_SIMD // Does value-numbering for a GT_SIMD tree void fgValueNumberSimd(GenTreeSIMD* tree); #endif // FEATURE_SIMD #ifdef FEATURE_HW_INTRINSICS // Does value-numbering for a GT_HWINTRINSIC tree void fgValueNumberHWIntrinsic(GenTreeHWIntrinsic* tree); #endif // FEATURE_HW_INTRINSICS // Does value-numbering for a call. We interpret some helper calls. void fgValueNumberCall(GenTreeCall* call); // Does value-numbering for a helper representing a cast operation. void fgValueNumberCastHelper(GenTreeCall* call); // Does value-numbering for a helper "call" that has a VN function symbol "vnf". void fgValueNumberHelperCallFunc(GenTreeCall* call, VNFunc vnf, ValueNumPair vnpExc); // Requires "helpCall" to be a helper call. Assigns it a value number; // we understand the semantics of some of the calls. Returns "true" if // the call may modify the heap (we assume arbitrary memory side effects if so). bool fgValueNumberHelperCall(GenTreeCall* helpCall); // Requires that "helpFunc" is one of the pure Jit Helper methods. // Returns the corresponding VNFunc to use for value numbering VNFunc fgValueNumberJitHelperMethodVNFunc(CorInfoHelpFunc helpFunc); // Adds the exception set for the current tree node which has a memory indirection operation void fgValueNumberAddExceptionSetForIndirection(GenTree* tree, GenTree* baseAddr); // Adds the exception sets for the current tree node which is performing a division or modulus operation void fgValueNumberAddExceptionSetForDivision(GenTree* tree); // Adds the exception set for the current tree node which is performing a overflow checking operation void fgValueNumberAddExceptionSetForOverflow(GenTree* tree); // Adds the exception set for the current tree node which is performing a bounds check operation void fgValueNumberAddExceptionSetForBoundsCheck(GenTree* tree); // Adds the exception set for the current tree node which is performing a ckfinite operation void fgValueNumberAddExceptionSetForCkFinite(GenTree* tree); // Adds the exception sets for the current tree node void fgValueNumberAddExceptionSet(GenTree* tree); #ifdef DEBUG void fgDebugCheckExceptionSets(); void fgDebugCheckValueNumberedTree(GenTree* tree); #endif // These are the current value number for the memory implicit variables while // doing value numbering. These are the value numbers under the "liberal" interpretation // of memory values; the "conservative" interpretation needs no VN, since every access of // memory yields an unknown value. ValueNum fgCurMemoryVN[MemoryKindCount]; // Return a "pseudo"-class handle for an array element type. If "elemType" is TYP_STRUCT, // requires "elemStructType" to be non-null (and to have a low-order zero). Otherwise, low order bit // is 1, and the rest is an encoding of "elemTyp". static CORINFO_CLASS_HANDLE EncodeElemType(var_types elemTyp, CORINFO_CLASS_HANDLE elemStructType) { if (elemStructType != nullptr) { assert(varTypeIsStruct(elemTyp) || elemTyp == TYP_REF || elemTyp == TYP_BYREF || varTypeIsIntegral(elemTyp)); assert((size_t(elemStructType) & 0x1) == 0x0); // Make sure the encoding below is valid. return elemStructType; } else { assert(elemTyp != TYP_STRUCT); elemTyp = varTypeToSigned(elemTyp); return CORINFO_CLASS_HANDLE(size_t(elemTyp) << 1 | 0x1); } } // If "clsHnd" is the result of an "EncodePrim" call, returns true and sets "*pPrimType" to the // var_types it represents. Otherwise, returns TYP_STRUCT (on the assumption that "clsHnd" is // the struct type of the element). static var_types DecodeElemType(CORINFO_CLASS_HANDLE clsHnd) { size_t clsHndVal = size_t(clsHnd); if (clsHndVal & 0x1) { return var_types(clsHndVal >> 1); } else { return TYP_STRUCT; } } // Convert a BYTE which represents the VM's CorInfoGCtype to the JIT's var_types var_types getJitGCType(BYTE gcType); // Returns true if the provided type should be treated as a primitive type // for the unmanaged calling conventions. bool isNativePrimitiveStructType(CORINFO_CLASS_HANDLE clsHnd); enum structPassingKind { SPK_Unknown, // Invalid value, never returned SPK_PrimitiveType, // The struct is passed/returned using a primitive type. SPK_EnclosingType, // Like SPK_Primitive type, but used for return types that // require a primitive type temp that is larger than the struct size. // Currently used for structs of size 3, 5, 6, or 7 bytes. SPK_ByValue, // The struct is passed/returned by value (using the ABI rules) // for ARM64 and UNIX_X64 in multiple registers. (when all of the // parameters registers are used, then the stack will be used) // for X86 passed on the stack, for ARM32 passed in registers // or the stack or split between registers and the stack. SPK_ByValueAsHfa, // The struct is passed/returned as an HFA in multiple registers. SPK_ByReference }; // The struct is passed/returned by reference to a copy/buffer. // Get the "primitive" type that is is used when we are given a struct of size 'structSize'. // For pointer sized structs the 'clsHnd' is used to determine if the struct contains GC ref. // A "primitive" type is one of the scalar types: byte, short, int, long, ref, float, double // If we can't or shouldn't use a "primitive" type then TYP_UNKNOWN is returned. // // isVarArg is passed for use on Windows Arm64 to change the decision returned regarding // hfa types. // var_types getPrimitiveTypeForStruct(unsigned structSize, CORINFO_CLASS_HANDLE clsHnd, bool isVarArg); // Get the type that is used to pass values of the given struct type. // isVarArg is passed for use on Windows Arm64 to change the decision returned regarding // hfa types. // var_types getArgTypeForStruct(CORINFO_CLASS_HANDLE clsHnd, structPassingKind* wbPassStruct, bool isVarArg, unsigned structSize); // Get the type that is used to return values of the given struct type. // If the size is unknown, pass 0 and it will be determined from 'clsHnd'. var_types getReturnTypeForStruct(CORINFO_CLASS_HANDLE clsHnd, CorInfoCallConvExtension callConv, structPassingKind* wbPassStruct = nullptr, unsigned structSize = 0); #ifdef DEBUG // Print a representation of "vnp" or "vn" on standard output. // If "level" is non-zero, we also print out a partial expansion of the value. void vnpPrint(ValueNumPair vnp, unsigned level); void vnPrint(ValueNum vn, unsigned level); #endif bool fgDominate(BasicBlock* b1, BasicBlock* b2); // Return true if b1 dominates b2 // Dominator computation member functions // Not exposed outside Compiler protected: bool fgReachable(BasicBlock* b1, BasicBlock* b2); // Returns true if block b1 can reach block b2 // Compute immediate dominators, the dominator tree and and its pre/post-order travsersal numbers. void fgComputeDoms(); void fgCompDominatedByExceptionalEntryBlocks(); BlockSet_ValRet_T fgGetDominatorSet(BasicBlock* block); // Returns a set of blocks that dominate the given block. // Note: this is relatively slow compared to calling fgDominate(), // especially if dealing with a single block versus block check. void fgComputeReachabilitySets(); // Compute bbReach sets. (Also sets BBF_GC_SAFE_POINT flag on blocks.) void fgComputeReturnBlocks(); // Initialize fgReturnBlocks to a list of BBJ_RETURN blocks. void fgComputeEnterBlocksSet(); // Compute the set of entry blocks, 'fgEnterBlks'. bool fgRemoveUnreachableBlocks(); // Remove blocks determined to be unreachable by the bbReach sets. void fgComputeReachability(); // Perform flow graph node reachability analysis. BasicBlock* fgIntersectDom(BasicBlock* a, BasicBlock* b); // Intersect two immediate dominator sets. void fgDfsInvPostOrder(); // In order to compute dominance using fgIntersectDom, the flow graph nodes must be // processed in topological sort, this function takes care of that. void fgDfsInvPostOrderHelper(BasicBlock* block, BlockSet& visited, unsigned* count); BlockSet_ValRet_T fgDomFindStartNodes(); // Computes which basic blocks don't have incoming edges in the flow graph. // Returns this as a set. INDEBUG(void fgDispDomTree(DomTreeNode* domTree);) // Helper that prints out the Dominator Tree in debug builds. DomTreeNode* fgBuildDomTree(); // Once we compute all the immediate dominator sets for each node in the flow graph // (performed by fgComputeDoms), this procedure builds the dominance tree represented // adjacency lists. // In order to speed up the queries of the form 'Does A dominates B', we can perform a DFS preorder and postorder // traversal of the dominance tree and the dominance query will become A dominates B iif preOrder(A) <= preOrder(B) // && postOrder(A) >= postOrder(B) making the computation O(1). void fgNumberDomTree(DomTreeNode* domTree); // When the flow graph changes, we need to update the block numbers, predecessor lists, reachability sets, // dominators, and possibly loops. void fgUpdateChangedFlowGraph(const bool computePreds = true, const bool computeDoms = true, const bool computeReturnBlocks = false, const bool computeLoops = false); public: // Compute the predecessors of the blocks in the control flow graph. void fgComputePreds(); // Remove all predecessor information. void fgRemovePreds(); // Compute the cheap flow graph predecessors lists. This is used in some early phases // before the full predecessors lists are computed. void fgComputeCheapPreds(); private: void fgAddCheapPred(BasicBlock* block, BasicBlock* blockPred); void fgRemoveCheapPred(BasicBlock* block, BasicBlock* blockPred); public: enum GCPollType { GCPOLL_NONE, GCPOLL_CALL, GCPOLL_INLINE }; // Initialize the per-block variable sets (used for liveness analysis). void fgInitBlockVarSets(); PhaseStatus fgInsertGCPolls(); BasicBlock* fgCreateGCPoll(GCPollType pollType, BasicBlock* block); // Requires that "block" is a block that returns from // a finally. Returns the number of successors (jump targets of // of blocks in the covered "try" that did a "LEAVE".) unsigned fgNSuccsOfFinallyRet(BasicBlock* block); // Requires that "block" is a block that returns (in the sense of BBJ_EHFINALLYRET) from // a finally. Returns its "i"th successor (jump targets of // of blocks in the covered "try" that did a "LEAVE".) // Requires that "i" < fgNSuccsOfFinallyRet(block). BasicBlock* fgSuccOfFinallyRet(BasicBlock* block, unsigned i); private: // Factor out common portions of the impls of the methods above. void fgSuccOfFinallyRetWork(BasicBlock* block, unsigned i, BasicBlock** bres, unsigned* nres); public: // For many purposes, it is desirable to be able to enumerate the *distinct* targets of a switch statement, // skipping duplicate targets. (E.g., in flow analyses that are only interested in the set of possible targets.) // SwitchUniqueSuccSet contains the non-duplicated switch targets. // (Code that modifies the jump table of a switch has an obligation to call Compiler::UpdateSwitchTableTarget, // which in turn will call the "UpdateTarget" method of this type if a SwitchUniqueSuccSet has already // been computed for the switch block. If a switch block is deleted or is transformed into a non-switch, // we leave the entry associated with the block, but it will no longer be accessed.) struct SwitchUniqueSuccSet { unsigned numDistinctSuccs; // Number of distinct targets of the switch. BasicBlock** nonDuplicates; // Array of "numDistinctSuccs", containing all the distinct switch target // successors. // The switch block "switchBlk" just had an entry with value "from" modified to the value "to". // Update "this" as necessary: if "from" is no longer an element of the jump table of "switchBlk", // remove it from "this", and ensure that "to" is a member. Use "alloc" to do any required allocation. void UpdateTarget(CompAllocator alloc, BasicBlock* switchBlk, BasicBlock* from, BasicBlock* to); }; typedef JitHashTable<BasicBlock*, JitPtrKeyFuncs<BasicBlock>, SwitchUniqueSuccSet> BlockToSwitchDescMap; private: // Maps BasicBlock*'s that end in switch statements to SwitchUniqueSuccSets that allow // iteration over only the distinct successors. BlockToSwitchDescMap* m_switchDescMap; public: BlockToSwitchDescMap* GetSwitchDescMap(bool createIfNull = true) { if ((m_switchDescMap == nullptr) && createIfNull) { m_switchDescMap = new (getAllocator()) BlockToSwitchDescMap(getAllocator()); } return m_switchDescMap; } // Invalidate the map of unique switch block successors. For example, since the hash key of the map // depends on block numbers, we must invalidate the map when the blocks are renumbered, to ensure that // we don't accidentally look up and return the wrong switch data. void InvalidateUniqueSwitchSuccMap() { m_switchDescMap = nullptr; } // Requires "switchBlock" to be a block that ends in a switch. Returns // the corresponding SwitchUniqueSuccSet. SwitchUniqueSuccSet GetDescriptorForSwitch(BasicBlock* switchBlk); // The switch block "switchBlk" just had an entry with value "from" modified to the value "to". // Update "this" as necessary: if "from" is no longer an element of the jump table of "switchBlk", // remove it from "this", and ensure that "to" is a member. void UpdateSwitchTableTarget(BasicBlock* switchBlk, BasicBlock* from, BasicBlock* to); // Remove the "SwitchUniqueSuccSet" of "switchBlk" in the BlockToSwitchDescMap. void fgInvalidateSwitchDescMapEntry(BasicBlock* switchBlk); BasicBlock* fgFirstBlockOfHandler(BasicBlock* block); bool fgIsFirstBlockOfFilterOrHandler(BasicBlock* block); flowList* fgGetPredForBlock(BasicBlock* block, BasicBlock* blockPred); flowList* fgGetPredForBlock(BasicBlock* block, BasicBlock* blockPred, flowList*** ptrToPred); flowList* fgRemoveRefPred(BasicBlock* block, BasicBlock* blockPred); flowList* fgRemoveAllRefPreds(BasicBlock* block, BasicBlock* blockPred); void fgRemoveBlockAsPred(BasicBlock* block); void fgChangeSwitchBlock(BasicBlock* oldSwitchBlock, BasicBlock* newSwitchBlock); void fgReplaceSwitchJumpTarget(BasicBlock* blockSwitch, BasicBlock* newTarget, BasicBlock* oldTarget); void fgReplaceJumpTarget(BasicBlock* block, BasicBlock* newTarget, BasicBlock* oldTarget); void fgReplacePred(BasicBlock* block, BasicBlock* oldPred, BasicBlock* newPred); flowList* fgAddRefPred(BasicBlock* block, BasicBlock* blockPred, flowList* oldEdge = nullptr, bool initializingPreds = false); // Only set to 'true' when we are computing preds in // fgComputePreds() void fgFindBasicBlocks(); bool fgIsBetterFallThrough(BasicBlock* bCur, BasicBlock* bAlt); bool fgCheckEHCanInsertAfterBlock(BasicBlock* blk, unsigned regionIndex, bool putInTryRegion); BasicBlock* fgFindInsertPoint(unsigned regionIndex, bool putInTryRegion, BasicBlock* startBlk, BasicBlock* endBlk, BasicBlock* nearBlk, BasicBlock* jumpBlk, bool runRarely); unsigned fgGetNestingLevel(BasicBlock* block, unsigned* pFinallyNesting = nullptr); void fgPostImportationCleanup(); void fgRemoveStmt(BasicBlock* block, Statement* stmt DEBUGARG(bool isUnlink = false)); void fgUnlinkStmt(BasicBlock* block, Statement* stmt); bool fgCheckRemoveStmt(BasicBlock* block, Statement* stmt); void fgCreateLoopPreHeader(unsigned lnum); void fgUnreachableBlock(BasicBlock* block); void fgRemoveConditionalJump(BasicBlock* block); BasicBlock* fgLastBBInMainFunction(); BasicBlock* fgEndBBAfterMainFunction(); void fgUnlinkRange(BasicBlock* bBeg, BasicBlock* bEnd); void fgRemoveBlock(BasicBlock* block, bool unreachable); bool fgCanCompactBlocks(BasicBlock* block, BasicBlock* bNext); void fgCompactBlocks(BasicBlock* block, BasicBlock* bNext); void fgUpdateLoopsAfterCompacting(BasicBlock* block, BasicBlock* bNext); BasicBlock* fgConnectFallThrough(BasicBlock* bSrc, BasicBlock* bDst); bool fgRenumberBlocks(); bool fgExpandRarelyRunBlocks(); bool fgEhAllowsMoveBlock(BasicBlock* bBefore, BasicBlock* bAfter); void fgMoveBlocksAfter(BasicBlock* bStart, BasicBlock* bEnd, BasicBlock* insertAfterBlk); enum FG_RELOCATE_TYPE { FG_RELOCATE_TRY, // relocate the 'try' region FG_RELOCATE_HANDLER // relocate the handler region (including the filter if necessary) }; BasicBlock* fgRelocateEHRange(unsigned regionIndex, FG_RELOCATE_TYPE relocateType); #if defined(FEATURE_EH_FUNCLETS) #if defined(TARGET_ARM) void fgClearFinallyTargetBit(BasicBlock* block); #endif // defined(TARGET_ARM) bool fgIsIntraHandlerPred(BasicBlock* predBlock, BasicBlock* block); bool fgAnyIntraHandlerPreds(BasicBlock* block); void fgInsertFuncletPrologBlock(BasicBlock* block); void fgCreateFuncletPrologBlocks(); void fgCreateFunclets(); #else // !FEATURE_EH_FUNCLETS bool fgRelocateEHRegions(); #endif // !FEATURE_EH_FUNCLETS bool fgOptimizeUncondBranchToSimpleCond(BasicBlock* block, BasicBlock* target); bool fgBlockEndFavorsTailDuplication(BasicBlock* block, unsigned lclNum); bool fgBlockIsGoodTailDuplicationCandidate(BasicBlock* block, unsigned* lclNum); bool fgOptimizeEmptyBlock(BasicBlock* block); bool fgOptimizeBranchToEmptyUnconditional(BasicBlock* block, BasicBlock* bDest); bool fgOptimizeBranch(BasicBlock* bJump); bool fgOptimizeSwitchBranches(BasicBlock* block); bool fgOptimizeBranchToNext(BasicBlock* block, BasicBlock* bNext, BasicBlock* bPrev); bool fgOptimizeSwitchJumps(); #ifdef DEBUG void fgPrintEdgeWeights(); #endif void fgComputeBlockAndEdgeWeights(); weight_t fgComputeMissingBlockWeights(); void fgComputeCalledCount(weight_t returnWeight); void fgComputeEdgeWeights(); bool fgReorderBlocks(); PhaseStatus fgDetermineFirstColdBlock(); bool fgIsForwardBranch(BasicBlock* bJump, BasicBlock* bSrc = nullptr); bool fgUpdateFlowGraph(bool doTailDup = false); void fgFindOperOrder(); // method that returns if you should split here typedef bool(fgSplitPredicate)(GenTree* tree, GenTree* parent, fgWalkData* data); void fgSetBlockOrder(); void fgRemoveReturnBlock(BasicBlock* block); /* Helper code that has been factored out */ inline void fgConvertBBToThrowBB(BasicBlock* block); bool fgCastNeeded(GenTree* tree, var_types toType); GenTree* fgDoNormalizeOnStore(GenTree* tree); GenTree* fgMakeTmpArgNode(fgArgTabEntry* curArgTabEntry); // The following check for loops that don't execute calls bool fgLoopCallMarked; void fgLoopCallTest(BasicBlock* srcBB, BasicBlock* dstBB); void fgLoopCallMark(); void fgMarkLoopHead(BasicBlock* block); unsigned fgGetCodeEstimate(BasicBlock* block); #if DUMP_FLOWGRAPHS enum class PhasePosition { PrePhase, PostPhase }; const char* fgProcessEscapes(const char* nameIn, escapeMapping_t* map); static void fgDumpTree(FILE* fgxFile, GenTree* const tree); FILE* fgOpenFlowGraphFile(bool* wbDontClose, Phases phase, PhasePosition pos, LPCWSTR type); bool fgDumpFlowGraph(Phases phase, PhasePosition pos); #endif // DUMP_FLOWGRAPHS #ifdef DEBUG void fgDispDoms(); void fgDispReach(); void fgDispBBLiveness(BasicBlock* block); void fgDispBBLiveness(); void fgTableDispBasicBlock(BasicBlock* block, int ibcColWidth = 0); void fgDispBasicBlocks(BasicBlock* firstBlock, BasicBlock* lastBlock, bool dumpTrees); void fgDispBasicBlocks(bool dumpTrees = false); void fgDumpStmtTree(Statement* stmt, unsigned bbNum); void fgDumpBlock(BasicBlock* block); void fgDumpTrees(BasicBlock* firstBlock, BasicBlock* lastBlock); static fgWalkPreFn fgStress64RsltMulCB; void fgStress64RsltMul(); void fgDebugCheckUpdate(); void fgDebugCheckBBNumIncreasing(); void fgDebugCheckBBlist(bool checkBBNum = false, bool checkBBRefs = true); void fgDebugCheckBlockLinks(); void fgDebugCheckLinks(bool morphTrees = false); void fgDebugCheckStmtsList(BasicBlock* block, bool morphTrees); void fgDebugCheckNodeLinks(BasicBlock* block, Statement* stmt); void fgDebugCheckNodesUniqueness(); void fgDebugCheckLoopTable(); void fgDebugCheckFlags(GenTree* tree); void fgDebugCheckDispFlags(GenTree* tree, GenTreeFlags dispFlags, GenTreeDebugFlags debugFlags); void fgDebugCheckFlagsHelper(GenTree* tree, GenTreeFlags actualFlags, GenTreeFlags expectedFlags); void fgDebugCheckTryFinallyExits(); void fgDebugCheckProfileData(); bool fgDebugCheckIncomingProfileData(BasicBlock* block); bool fgDebugCheckOutgoingProfileData(BasicBlock* block); #endif // DEBUG static bool fgProfileWeightsEqual(weight_t weight1, weight_t weight2); static bool fgProfileWeightsConsistent(weight_t weight1, weight_t weight2); static GenTree* fgGetFirstNode(GenTree* tree); //--------------------- Walking the trees in the IR ----------------------- struct fgWalkData { Compiler* compiler; fgWalkPreFn* wtprVisitorFn; fgWalkPostFn* wtpoVisitorFn; void* pCallbackData; // user-provided data GenTree* parent; // parent of current node, provided to callback GenTreeStack* parentStack; // stack of parent nodes, if asked for bool wtprLclsOnly; // whether to only visit lclvar nodes #ifdef DEBUG bool printModified; // callback can use this #endif }; fgWalkResult fgWalkTreePre(GenTree** pTree, fgWalkPreFn* visitor, void* pCallBackData = nullptr, bool lclVarsOnly = false, bool computeStack = false); fgWalkResult fgWalkTree(GenTree** pTree, fgWalkPreFn* preVisitor, fgWalkPostFn* postVisitor, void* pCallBackData = nullptr); void fgWalkAllTreesPre(fgWalkPreFn* visitor, void* pCallBackData); //----- Postorder fgWalkResult fgWalkTreePost(GenTree** pTree, fgWalkPostFn* visitor, void* pCallBackData = nullptr, bool computeStack = false); // An fgWalkPreFn that looks for expressions that have inline throws in // minopts mode. Basically it looks for tress with gtOverflowEx() or // GTF_IND_RNGCHK. It returns WALK_ABORT if one is found. It // returns WALK_SKIP_SUBTREES if GTF_EXCEPT is not set (assumes flags // properly propagated to parent trees). It returns WALK_CONTINUE // otherwise. static fgWalkResult fgChkThrowCB(GenTree** pTree, Compiler::fgWalkData* data); static fgWalkResult fgChkLocAllocCB(GenTree** pTree, Compiler::fgWalkData* data); static fgWalkResult fgChkQmarkCB(GenTree** pTree, Compiler::fgWalkData* data); /************************************************************************** * PROTECTED *************************************************************************/ protected: friend class SsaBuilder; friend struct ValueNumberState; //--------------------- Detect the basic blocks --------------------------- BasicBlock** fgBBs; // Table of pointers to the BBs void fgInitBBLookup(); BasicBlock* fgLookupBB(unsigned addr); bool fgCanSwitchToOptimized(); void fgSwitchToOptimized(const char* reason); bool fgMayExplicitTailCall(); void fgFindJumpTargets(const BYTE* codeAddr, IL_OFFSET codeSize, FixedBitVect* jumpTarget); void fgMarkBackwardJump(BasicBlock* startBlock, BasicBlock* endBlock); void fgLinkBasicBlocks(); unsigned fgMakeBasicBlocks(const BYTE* codeAddr, IL_OFFSET codeSize, FixedBitVect* jumpTarget); void fgCheckBasicBlockControlFlow(); void fgControlFlowPermitted(BasicBlock* blkSrc, BasicBlock* blkDest, bool IsLeave = false /* is the src a leave block */); bool fgFlowToFirstBlockOfInnerTry(BasicBlock* blkSrc, BasicBlock* blkDest, bool sibling); void fgObserveInlineConstants(OPCODE opcode, const FgStack& stack, bool isInlining); void fgAdjustForAddressExposedOrWrittenThis(); unsigned fgStressBBProf() { #ifdef DEBUG unsigned result = JitConfig.JitStressBBProf(); if (result == 0) { if (compStressCompile(STRESS_BB_PROFILE, 15)) { result = 1; } } return result; #else return 0; #endif } bool fgHaveProfileData(); bool fgGetProfileWeightForBasicBlock(IL_OFFSET offset, weight_t* weight); Instrumentor* fgCountInstrumentor; Instrumentor* fgClassInstrumentor; PhaseStatus fgPrepareToInstrumentMethod(); PhaseStatus fgInstrumentMethod(); PhaseStatus fgIncorporateProfileData(); void fgIncorporateBlockCounts(); void fgIncorporateEdgeCounts(); CORINFO_CLASS_HANDLE getRandomClass(ICorJitInfo::PgoInstrumentationSchema* schema, UINT32 countSchemaItems, BYTE* pInstrumentationData, int32_t ilOffset, CLRRandom* random); public: const char* fgPgoFailReason; bool fgPgoDisabled; ICorJitInfo::PgoSource fgPgoSource; ICorJitInfo::PgoInstrumentationSchema* fgPgoSchema; BYTE* fgPgoData; UINT32 fgPgoSchemaCount; HRESULT fgPgoQueryResult; UINT32 fgNumProfileRuns; UINT32 fgPgoBlockCounts; UINT32 fgPgoEdgeCounts; UINT32 fgPgoClassProfiles; unsigned fgPgoInlineePgo; unsigned fgPgoInlineeNoPgo; unsigned fgPgoInlineeNoPgoSingleBlock; void WalkSpanningTree(SpanningTreeVisitor* visitor); void fgSetProfileWeight(BasicBlock* block, weight_t weight); void fgApplyProfileScale(); bool fgHaveSufficientProfileData(); bool fgHaveTrustedProfileData(); // fgIsUsingProfileWeights - returns true if we have real profile data for this method // or if we have some fake profile data for the stress mode bool fgIsUsingProfileWeights() { return (fgHaveProfileData() || fgStressBBProf()); } // fgProfileRunsCount - returns total number of scenario runs for the profile data // or BB_UNITY_WEIGHT_UNSIGNED when we aren't using profile data. unsigned fgProfileRunsCount() { return fgIsUsingProfileWeights() ? fgNumProfileRuns : BB_UNITY_WEIGHT_UNSIGNED; } //-------- Insert a statement at the start or end of a basic block -------- #ifdef DEBUG public: static bool fgBlockContainsStatementBounded(BasicBlock* block, Statement* stmt, bool answerOnBoundExceeded = true); #endif public: Statement* fgNewStmtAtBeg(BasicBlock* block, GenTree* tree, const DebugInfo& di = DebugInfo()); void fgInsertStmtAtEnd(BasicBlock* block, Statement* stmt); Statement* fgNewStmtAtEnd(BasicBlock* block, GenTree* tree, const DebugInfo& di = DebugInfo()); Statement* fgNewStmtNearEnd(BasicBlock* block, GenTree* tree, const DebugInfo& di = DebugInfo()); private: void fgInsertStmtNearEnd(BasicBlock* block, Statement* stmt); void fgInsertStmtAtBeg(BasicBlock* block, Statement* stmt); void fgInsertStmtAfter(BasicBlock* block, Statement* insertionPoint, Statement* stmt); public: void fgInsertStmtBefore(BasicBlock* block, Statement* insertionPoint, Statement* stmt); private: Statement* fgInsertStmtListAfter(BasicBlock* block, Statement* stmtAfter, Statement* stmtList); // Create a new temporary variable to hold the result of *ppTree, // and transform the graph accordingly. GenTree* fgInsertCommaFormTemp(GenTree** ppTree, CORINFO_CLASS_HANDLE structType = nullptr); GenTree* fgMakeMultiUse(GenTree** ppTree); private: // Recognize a bitwise rotation pattern and convert into a GT_ROL or a GT_ROR node. GenTree* fgRecognizeAndMorphBitwiseRotation(GenTree* tree); bool fgOperIsBitwiseRotationRoot(genTreeOps oper); #if !defined(TARGET_64BIT) // Recognize and morph a long multiplication with 32 bit operands. GenTreeOp* fgRecognizeAndMorphLongMul(GenTreeOp* mul); GenTreeOp* fgMorphLongMul(GenTreeOp* mul); #endif //-------- Determine the order in which the trees will be evaluated ------- unsigned fgTreeSeqNum; GenTree* fgTreeSeqLst; GenTree* fgTreeSeqBeg; GenTree* fgSetTreeSeq(GenTree* tree, GenTree* prev = nullptr, bool isLIR = false); void fgSetTreeSeqHelper(GenTree* tree, bool isLIR); void fgSetTreeSeqFinish(GenTree* tree, bool isLIR); void fgSetStmtSeq(Statement* stmt); void fgSetBlockOrder(BasicBlock* block); //------------------------- Morphing -------------------------------------- unsigned fgPtrArgCntMax; public: //------------------------------------------------------------------------ // fgGetPtrArgCntMax: Return the maximum number of pointer-sized stack arguments that calls inside this method // can push on the stack. This value is calculated during morph. // // Return Value: // Returns fgPtrArgCntMax, that is a private field. // unsigned fgGetPtrArgCntMax() const { return fgPtrArgCntMax; } //------------------------------------------------------------------------ // fgSetPtrArgCntMax: Set the maximum number of pointer-sized stack arguments that calls inside this method // can push on the stack. This function is used during StackLevelSetter to fix incorrect morph calculations. // void fgSetPtrArgCntMax(unsigned argCntMax) { fgPtrArgCntMax = argCntMax; } bool compCanEncodePtrArgCntMax(); private: hashBv* fgOutgoingArgTemps; hashBv* fgCurrentlyInUseArgTemps; void fgSetRngChkTarget(GenTree* tree, bool delay = true); BasicBlock* fgSetRngChkTargetInner(SpecialCodeKind kind, bool delay); #if REARRANGE_ADDS void fgMoveOpsLeft(GenTree* tree); #endif bool fgIsCommaThrow(GenTree* tree, bool forFolding = false); bool fgIsThrow(GenTree* tree); bool fgInDifferentRegions(BasicBlock* blk1, BasicBlock* blk2); bool fgIsBlockCold(BasicBlock* block); GenTree* fgMorphCastIntoHelper(GenTree* tree, int helper, GenTree* oper); GenTree* fgMorphIntoHelperCall(GenTree* tree, int helper, GenTreeCall::Use* args, bool morphArgs = true); GenTree* fgMorphStackArgForVarArgs(unsigned lclNum, var_types varType, unsigned lclOffs); // A "MorphAddrContext" carries information from the surrounding context. If we are evaluating a byref address, // it is useful to know whether the address will be immediately dereferenced, or whether the address value will // be used, perhaps by passing it as an argument to a called method. This affects how null checking is done: // for sufficiently small offsets, we can rely on OS page protection to implicitly null-check addresses that we // know will be dereferenced. To know that reliance on implicit null checking is sound, we must further know that // all offsets between the top-level indirection and the bottom are constant, and that their sum is sufficiently // small; hence the other fields of MorphAddrContext. enum MorphAddrContextKind { MACK_Ind, MACK_Addr, }; struct MorphAddrContext { MorphAddrContextKind m_kind; bool m_allConstantOffsets; // Valid only for "m_kind == MACK_Ind". True iff all offsets between // top-level indirection and here have been constants. size_t m_totalOffset; // Valid only for "m_kind == MACK_Ind", and if "m_allConstantOffsets" is true. // In that case, is the sum of those constant offsets. MorphAddrContext(MorphAddrContextKind kind) : m_kind(kind), m_allConstantOffsets(true), m_totalOffset(0) { } }; // A MACK_CopyBlock context is immutable, so we can just make one of these and share it. static MorphAddrContext s_CopyBlockMAC; #ifdef FEATURE_SIMD GenTree* getSIMDStructFromField(GenTree* tree, CorInfoType* simdBaseJitTypeOut, unsigned* indexOut, unsigned* simdSizeOut, bool ignoreUsedInSIMDIntrinsic = false); GenTree* fgMorphFieldAssignToSimdSetElement(GenTree* tree); GenTree* fgMorphFieldToSimdGetElement(GenTree* tree); bool fgMorphCombineSIMDFieldAssignments(BasicBlock* block, Statement* stmt); void impMarkContiguousSIMDFieldAssignments(Statement* stmt); // fgPreviousCandidateSIMDFieldAsgStmt is only used for tracking previous simd field assignment // in function: Complier::impMarkContiguousSIMDFieldAssignments. Statement* fgPreviousCandidateSIMDFieldAsgStmt; #endif // FEATURE_SIMD GenTree* fgMorphArrayIndex(GenTree* tree); GenTree* fgMorphExpandCast(GenTreeCast* tree); GenTreeFieldList* fgMorphLclArgToFieldlist(GenTreeLclVarCommon* lcl); void fgInitArgInfo(GenTreeCall* call); GenTreeCall* fgMorphArgs(GenTreeCall* call); void fgMakeOutgoingStructArgCopy(GenTreeCall* call, GenTreeCall::Use* args, CORINFO_CLASS_HANDLE copyBlkClass); GenTree* fgMorphLocalVar(GenTree* tree, bool forceRemorph); public: bool fgAddrCouldBeNull(GenTree* addr); private: GenTree* fgMorphField(GenTree* tree, MorphAddrContext* mac); bool fgCanFastTailCall(GenTreeCall* call, const char** failReason); #if FEATURE_FASTTAILCALL bool fgCallHasMustCopyByrefParameter(GenTreeCall* callee); #endif bool fgCheckStmtAfterTailCall(); GenTree* fgMorphTailCallViaHelpers(GenTreeCall* call, CORINFO_TAILCALL_HELPERS& help); bool fgCanTailCallViaJitHelper(); void fgMorphTailCallViaJitHelper(GenTreeCall* call); GenTree* fgCreateCallDispatcherAndGetResult(GenTreeCall* origCall, CORINFO_METHOD_HANDLE callTargetStubHnd, CORINFO_METHOD_HANDLE dispatcherHnd); GenTree* getLookupTree(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_LOOKUP* pLookup, GenTreeFlags handleFlags, void* compileTimeHandle); GenTree* getRuntimeLookupTree(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_LOOKUP* pLookup, void* compileTimeHandle); GenTree* getVirtMethodPointerTree(GenTree* thisPtr, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo); GenTree* getTokenHandleTree(CORINFO_RESOLVED_TOKEN* pResolvedToken, bool parent); GenTree* fgMorphPotentialTailCall(GenTreeCall* call); GenTree* fgGetStubAddrArg(GenTreeCall* call); unsigned fgGetArgTabEntryParameterLclNum(GenTreeCall* call, fgArgTabEntry* argTabEntry); void fgMorphRecursiveFastTailCallIntoLoop(BasicBlock* block, GenTreeCall* recursiveTailCall); Statement* fgAssignRecursiveCallArgToCallerParam(GenTree* arg, fgArgTabEntry* argTabEntry, unsigned lclParamNum, BasicBlock* block, const DebugInfo& callDI, Statement* tmpAssignmentInsertionPoint, Statement* paramAssignmentInsertionPoint); GenTree* fgMorphCall(GenTreeCall* call); GenTree* fgExpandVirtualVtableCallTarget(GenTreeCall* call); void fgMorphCallInline(GenTreeCall* call, InlineResult* result); void fgMorphCallInlineHelper(GenTreeCall* call, InlineResult* result, InlineContext** createdContext); #if DEBUG void fgNoteNonInlineCandidate(Statement* stmt, GenTreeCall* call); static fgWalkPreFn fgFindNonInlineCandidate; #endif GenTree* fgOptimizeDelegateConstructor(GenTreeCall* call, CORINFO_CONTEXT_HANDLE* ExactContextHnd, CORINFO_RESOLVED_TOKEN* ldftnToken); GenTree* fgMorphLeaf(GenTree* tree); void fgAssignSetVarDef(GenTree* tree); GenTree* fgMorphOneAsgBlockOp(GenTree* tree); GenTree* fgMorphInitBlock(GenTree* tree); GenTree* fgMorphPromoteLocalInitBlock(GenTreeLclVar* destLclNode, GenTree* initVal, unsigned blockSize); GenTree* fgMorphGetStructAddr(GenTree** pTree, CORINFO_CLASS_HANDLE clsHnd, bool isRValue = false); GenTree* fgMorphBlockOperand(GenTree* tree, var_types asgType, unsigned blockWidth, bool isBlkReqd); GenTree* fgMorphCopyBlock(GenTree* tree); GenTree* fgMorphStoreDynBlock(GenTreeStoreDynBlk* tree); GenTree* fgMorphForRegisterFP(GenTree* tree); GenTree* fgMorphSmpOp(GenTree* tree, MorphAddrContext* mac = nullptr); GenTree* fgOptimizeCast(GenTreeCast* cast); GenTree* fgOptimizeEqualityComparisonWithConst(GenTreeOp* cmp); GenTree* fgOptimizeRelationalComparisonWithConst(GenTreeOp* cmp); #ifdef FEATURE_HW_INTRINSICS GenTree* fgOptimizeHWIntrinsic(GenTreeHWIntrinsic* node); #endif GenTree* fgOptimizeCommutativeArithmetic(GenTreeOp* tree); GenTree* fgOptimizeRelationalComparisonWithCasts(GenTreeOp* cmp); GenTree* fgOptimizeAddition(GenTreeOp* add); GenTree* fgOptimizeMultiply(GenTreeOp* mul); GenTree* fgOptimizeBitwiseAnd(GenTreeOp* andOp); GenTree* fgPropagateCommaThrow(GenTree* parent, GenTreeOp* commaThrow, GenTreeFlags precedingSideEffects); GenTree* fgMorphRetInd(GenTreeUnOp* tree); GenTree* fgMorphModToSubMulDiv(GenTreeOp* tree); GenTree* fgMorphSmpOpOptional(GenTreeOp* tree); GenTree* fgMorphMultiOp(GenTreeMultiOp* multiOp); GenTree* fgMorphConst(GenTree* tree); bool fgMorphCanUseLclFldForCopy(unsigned lclNum1, unsigned lclNum2); GenTreeLclVar* fgMorphTryFoldObjAsLclVar(GenTreeObj* obj, bool destroyNodes = true); GenTreeOp* fgMorphCommutative(GenTreeOp* tree); GenTree* fgMorphCastedBitwiseOp(GenTreeOp* tree); GenTree* fgMorphReduceAddOps(GenTree* tree); public: GenTree* fgMorphTree(GenTree* tree, MorphAddrContext* mac = nullptr); private: void fgKillDependentAssertionsSingle(unsigned lclNum DEBUGARG(GenTree* tree)); void fgKillDependentAssertions(unsigned lclNum DEBUGARG(GenTree* tree)); void fgMorphTreeDone(GenTree* tree, GenTree* oldTree = nullptr DEBUGARG(int morphNum = 0)); Statement* fgMorphStmt; unsigned fgGetBigOffsetMorphingTemp(var_types type); // We cache one temp per type to be // used when morphing big offset. //----------------------- Liveness analysis ------------------------------- VARSET_TP fgCurUseSet; // vars used by block (before an assignment) VARSET_TP fgCurDefSet; // vars assigned by block (before a use) MemoryKindSet fgCurMemoryUse; // True iff the current basic block uses memory. MemoryKindSet fgCurMemoryDef; // True iff the current basic block modifies memory. MemoryKindSet fgCurMemoryHavoc; // True if the current basic block is known to set memory to a "havoc" value. bool byrefStatesMatchGcHeapStates; // True iff GcHeap and ByrefExposed memory have all the same def points. void fgMarkUseDef(GenTreeLclVarCommon* tree); void fgBeginScopeLife(VARSET_TP* inScope, VarScopeDsc* var); void fgEndScopeLife(VARSET_TP* inScope, VarScopeDsc* var); void fgMarkInScope(BasicBlock* block, VARSET_VALARG_TP inScope); void fgUnmarkInScope(BasicBlock* block, VARSET_VALARG_TP unmarkScope); void fgExtendDbgScopes(); void fgExtendDbgLifetimes(); #ifdef DEBUG void fgDispDebugScopes(); #endif // DEBUG //------------------------------------------------------------------------- // // The following keeps track of any code we've added for things like array // range checking or explicit calls to enable GC, and so on. // public: struct AddCodeDsc { AddCodeDsc* acdNext; BasicBlock* acdDstBlk; // block to which we jump unsigned acdData; SpecialCodeKind acdKind; // what kind of a special block is this? #if !FEATURE_FIXED_OUT_ARGS bool acdStkLvlInit; // has acdStkLvl value been already set? unsigned acdStkLvl; // stack level in stack slots. #endif // !FEATURE_FIXED_OUT_ARGS }; private: static unsigned acdHelper(SpecialCodeKind codeKind); AddCodeDsc* fgAddCodeList; bool fgAddCodeModf; bool fgRngChkThrowAdded; AddCodeDsc* fgExcptnTargetCache[SCK_COUNT]; BasicBlock* fgRngChkTarget(BasicBlock* block, SpecialCodeKind kind); BasicBlock* fgAddCodeRef(BasicBlock* srcBlk, unsigned refData, SpecialCodeKind kind); public: AddCodeDsc* fgFindExcptnTarget(SpecialCodeKind kind, unsigned refData); bool fgUseThrowHelperBlocks(); AddCodeDsc* fgGetAdditionalCodeDescriptors() { return fgAddCodeList; } private: bool fgIsCodeAdded(); bool fgIsThrowHlpBlk(BasicBlock* block); #if !FEATURE_FIXED_OUT_ARGS unsigned fgThrowHlpBlkStkLevel(BasicBlock* block); #endif // !FEATURE_FIXED_OUT_ARGS unsigned fgBigOffsetMorphingTemps[TYP_COUNT]; unsigned fgCheckInlineDepthAndRecursion(InlineInfo* inlineInfo); void fgInvokeInlineeCompiler(GenTreeCall* call, InlineResult* result, InlineContext** createdContext); void fgInsertInlineeBlocks(InlineInfo* pInlineInfo); Statement* fgInlinePrependStatements(InlineInfo* inlineInfo); void fgInlineAppendStatements(InlineInfo* inlineInfo, BasicBlock* block, Statement* stmt); #if FEATURE_MULTIREG_RET GenTree* fgGetStructAsStructPtr(GenTree* tree); GenTree* fgAssignStructInlineeToVar(GenTree* child, CORINFO_CLASS_HANDLE retClsHnd); void fgAttachStructInlineeToAsg(GenTree* tree, GenTree* child, CORINFO_CLASS_HANDLE retClsHnd); #endif // FEATURE_MULTIREG_RET static fgWalkPreFn fgUpdateInlineReturnExpressionPlaceHolder; static fgWalkPostFn fgLateDevirtualization; #ifdef DEBUG static fgWalkPreFn fgDebugCheckInlineCandidates; void CheckNoTransformableIndirectCallsRemain(); static fgWalkPreFn fgDebugCheckForTransformableIndirectCalls; #endif void fgPromoteStructs(); void fgMorphStructField(GenTree* tree, GenTree* parent); void fgMorphLocalField(GenTree* tree, GenTree* parent); // Reset the refCount for implicit byrefs. void fgResetImplicitByRefRefCount(); // Change implicit byrefs' types from struct to pointer, and for any that were // promoted, create new promoted struct temps. void fgRetypeImplicitByRefArgs(); // Rewrite appearances of implicit byrefs (manifest the implied additional level of indirection). bool fgMorphImplicitByRefArgs(GenTree* tree); GenTree* fgMorphImplicitByRefArgs(GenTree* tree, bool isAddr); // Clear up annotations for any struct promotion temps created for implicit byrefs. void fgMarkDemotedImplicitByRefArgs(); void fgMarkAddressExposedLocals(); void fgMarkAddressExposedLocals(Statement* stmt); PhaseStatus fgForwardSub(); bool fgForwardSubBlock(BasicBlock* block); bool fgForwardSubStatement(Statement* statement); static fgWalkPreFn fgUpdateSideEffectsPre; static fgWalkPostFn fgUpdateSideEffectsPost; // The given local variable, required to be a struct variable, is being assigned via // a "lclField", to make it masquerade as an integral type in the ABI. Make sure that // the variable is not enregistered, and is therefore not promoted independently. void fgLclFldAssign(unsigned lclNum); static fgWalkPreFn gtHasLocalsWithAddrOpCB; enum TypeProducerKind { TPK_Unknown = 0, // May not be a RuntimeType TPK_Handle = 1, // RuntimeType via handle TPK_GetType = 2, // RuntimeType via Object.get_Type() TPK_Null = 3, // Tree value is null TPK_Other = 4 // RuntimeType via other means }; TypeProducerKind gtGetTypeProducerKind(GenTree* tree); bool gtIsTypeHandleToRuntimeTypeHelper(GenTreeCall* call); bool gtIsTypeHandleToRuntimeTypeHandleHelper(GenTreeCall* call, CorInfoHelpFunc* pHelper = nullptr); bool gtIsActiveCSE_Candidate(GenTree* tree); bool fgIsBigOffset(size_t offset); bool fgNeedReturnSpillTemp(); /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Optimizer XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ public: void optInit(); GenTree* optRemoveRangeCheck(GenTreeBoundsChk* check, GenTree* comma, Statement* stmt); GenTree* optRemoveStandaloneRangeCheck(GenTreeBoundsChk* check, Statement* stmt); void optRemoveCommaBasedRangeCheck(GenTree* comma, Statement* stmt); protected: // Do hoisting for all loops. void optHoistLoopCode(); // To represent sets of VN's that have already been hoisted in outer loops. typedef JitHashTable<ValueNum, JitSmallPrimitiveKeyFuncs<ValueNum>, bool> VNSet; struct LoopHoistContext { private: // The set of variables hoisted in the current loop (or nullptr if there are none). VNSet* m_pHoistedInCurLoop; public: // Value numbers of expressions that have been hoisted in parent loops in the loop nest. VNSet m_hoistedInParentLoops; // Value numbers of expressions that have been hoisted in the current (or most recent) loop in the nest. // Previous decisions on loop-invariance of value numbers in the current loop. VNSet m_curLoopVnInvariantCache; VNSet* GetHoistedInCurLoop(Compiler* comp) { if (m_pHoistedInCurLoop == nullptr) { m_pHoistedInCurLoop = new (comp->getAllocatorLoopHoist()) VNSet(comp->getAllocatorLoopHoist()); } return m_pHoistedInCurLoop; } VNSet* ExtractHoistedInCurLoop() { VNSet* res = m_pHoistedInCurLoop; m_pHoistedInCurLoop = nullptr; return res; } LoopHoistContext(Compiler* comp) : m_pHoistedInCurLoop(nullptr) , m_hoistedInParentLoops(comp->getAllocatorLoopHoist()) , m_curLoopVnInvariantCache(comp->getAllocatorLoopHoist()) { } }; // Do hoisting for loop "lnum" (an index into the optLoopTable), and all loops nested within it. // Tracks the expressions that have been hoisted by containing loops by temporarily recording their // value numbers in "m_hoistedInParentLoops". This set is not modified by the call. void optHoistLoopNest(unsigned lnum, LoopHoistContext* hoistCtxt); // Do hoisting for a particular loop ("lnum" is an index into the optLoopTable.) // Assumes that expressions have been hoisted in containing loops if their value numbers are in // "m_hoistedInParentLoops". // void optHoistThisLoop(unsigned lnum, LoopHoistContext* hoistCtxt); // Hoist all expressions in "blocks" that are invariant in loop "loopNum" (an index into the optLoopTable) // outside of that loop. Exempt expressions whose value number is in "m_hoistedInParentLoops"; add VN's of hoisted // expressions to "hoistInLoop". void optHoistLoopBlocks(unsigned loopNum, ArrayStack<BasicBlock*>* blocks, LoopHoistContext* hoistContext); // Return true if the tree looks profitable to hoist out of loop 'lnum'. bool optIsProfitableToHoistTree(GenTree* tree, unsigned lnum); // Performs the hoisting 'tree' into the PreHeader for loop 'lnum' void optHoistCandidate(GenTree* tree, BasicBlock* treeBb, unsigned lnum, LoopHoistContext* hoistCtxt); // Returns true iff the ValueNum "vn" represents a value that is loop-invariant in "lnum". // Constants and init values are always loop invariant. // VNPhi's connect VN's to the SSA definition, so we can know if the SSA def occurs in the loop. bool optVNIsLoopInvariant(ValueNum vn, unsigned lnum, VNSet* recordedVNs); // If "blk" is the entry block of a natural loop, returns true and sets "*pLnum" to the index of the loop // in the loop table. bool optBlockIsLoopEntry(BasicBlock* blk, unsigned* pLnum); // Records the set of "side effects" of all loops: fields (object instance and static) // written to, and SZ-array element type equivalence classes updated. void optComputeLoopSideEffects(); #ifdef DEBUG bool optAnyChildNotRemoved(unsigned loopNum); #endif // DEBUG // Mark a loop as removed. void optMarkLoopRemoved(unsigned loopNum); private: // Requires "lnum" to be the index of an outermost loop in the loop table. Traverses the body of that loop, // including all nested loops, and records the set of "side effects" of the loop: fields (object instance and // static) written to, and SZ-array element type equivalence classes updated. void optComputeLoopNestSideEffects(unsigned lnum); // Given a loop number 'lnum' mark it and any nested loops as having 'memoryHavoc' void optRecordLoopNestsMemoryHavoc(unsigned lnum, MemoryKindSet memoryHavoc); // Add the side effects of "blk" (which is required to be within a loop) to all loops of which it is a part. // Returns false if we encounter a block that is not marked as being inside a loop. // bool optComputeLoopSideEffectsOfBlock(BasicBlock* blk); // Hoist the expression "expr" out of loop "lnum". void optPerformHoistExpr(GenTree* expr, BasicBlock* exprBb, unsigned lnum); public: void optOptimizeBools(); public: PhaseStatus optInvertLoops(); // Invert loops so they're entered at top and tested at bottom. PhaseStatus optOptimizeLayout(); // Optimize the BasicBlock layout of the method PhaseStatus optSetBlockWeights(); PhaseStatus optFindLoopsPhase(); // Finds loops and records them in the loop table void optFindLoops(); PhaseStatus optCloneLoops(); void optCloneLoop(unsigned loopInd, LoopCloneContext* context); void optEnsureUniqueHead(unsigned loopInd, weight_t ambientWeight); PhaseStatus optUnrollLoops(); // Unrolls loops (needs to have cost info) void optRemoveRedundantZeroInits(); protected: // This enumeration describes what is killed by a call. enum callInterf { CALLINT_NONE, // no interference (most helpers) CALLINT_REF_INDIRS, // kills GC ref indirections (SETFIELD OBJ) CALLINT_SCL_INDIRS, // kills non GC ref indirections (SETFIELD non-OBJ) CALLINT_ALL_INDIRS, // kills both GC ref and non GC ref indirections (SETFIELD STRUCT) CALLINT_ALL, // kills everything (normal method call) }; enum class FieldKindForVN { SimpleStatic, WithBaseAddr }; public: // A "LoopDsc" describes a ("natural") loop. We (currently) require the body of a loop to be a contiguous (in // bbNext order) sequence of basic blocks. (At times, we may require the blocks in a loop to be "properly numbered" // in bbNext order; we use comparisons on the bbNum to decide order.) // The blocks that define the body are // top <= entry <= bottom // The "head" of the loop is a block outside the loop that has "entry" as a successor. We only support loops with a // single 'head' block. The meanings of these blocks are given in the definitions below. Also see the picture at // Compiler::optFindNaturalLoops(). struct LoopDsc { BasicBlock* lpHead; // HEAD of the loop (not part of the looping of the loop) -- has ENTRY as a successor. BasicBlock* lpTop; // loop TOP (the back edge from lpBottom reaches here). Lexically first block (in bbNext // order) reachable in this loop. BasicBlock* lpEntry; // the ENTRY in the loop (in most cases TOP or BOTTOM) BasicBlock* lpBottom; // loop BOTTOM (from here we have a back edge to the TOP) BasicBlock* lpExit; // if a single exit loop this is the EXIT (in most cases BOTTOM) callInterf lpAsgCall; // "callInterf" for calls in the loop ALLVARSET_TP lpAsgVars; // set of vars assigned within the loop (all vars, not just tracked) varRefKinds lpAsgInds : 8; // set of inds modified within the loop LoopFlags lpFlags; unsigned char lpExitCnt; // number of exits from the loop unsigned char lpParent; // The index of the most-nested loop that completely contains this one, // or else BasicBlock::NOT_IN_LOOP if no such loop exists. unsigned char lpChild; // The index of a nested loop, or else BasicBlock::NOT_IN_LOOP if no child exists. // (Actually, an "immediately" nested loop -- // no other child of this loop is a parent of lpChild.) unsigned char lpSibling; // The index of another loop that is an immediate child of lpParent, // or else BasicBlock::NOT_IN_LOOP. One can enumerate all the children of a loop // by following "lpChild" then "lpSibling" links. bool lpLoopHasMemoryHavoc[MemoryKindCount]; // The loop contains an operation that we assume has arbitrary // memory side effects. If this is set, the fields below // may not be accurate (since they become irrelevant.) VARSET_TP lpVarInOut; // The set of variables that are IN or OUT during the execution of this loop VARSET_TP lpVarUseDef; // The set of variables that are USE or DEF during the execution of this loop // The following counts are used for hoisting profitability checks. int lpHoistedExprCount; // The register count for the non-FP expressions from inside this loop that have been // hoisted int lpLoopVarCount; // The register count for the non-FP LclVars that are read/written inside this loop int lpVarInOutCount; // The register count for the non-FP LclVars that are alive inside or across this loop int lpHoistedFPExprCount; // The register count for the FP expressions from inside this loop that have been // hoisted int lpLoopVarFPCount; // The register count for the FP LclVars that are read/written inside this loop int lpVarInOutFPCount; // The register count for the FP LclVars that are alive inside or across this loop typedef JitHashTable<CORINFO_FIELD_HANDLE, JitPtrKeyFuncs<struct CORINFO_FIELD_STRUCT_>, FieldKindForVN> FieldHandleSet; FieldHandleSet* lpFieldsModified; // This has entries for all static field and object instance fields modified // in the loop. typedef JitHashTable<CORINFO_CLASS_HANDLE, JitPtrKeyFuncs<struct CORINFO_CLASS_STRUCT_>, bool> ClassHandleSet; ClassHandleSet* lpArrayElemTypesModified; // Bits set indicate the set of sz array element types such that // arrays of that type are modified // in the loop. // Adds the variable liveness information for 'blk' to 'this' LoopDsc void AddVariableLiveness(Compiler* comp, BasicBlock* blk); inline void AddModifiedField(Compiler* comp, CORINFO_FIELD_HANDLE fldHnd, FieldKindForVN fieldKind); // This doesn't *always* take a class handle -- it can also take primitive types, encoded as class handles // (shifted left, with a low-order bit set to distinguish.) // Use the {Encode/Decode}ElemType methods to construct/destruct these. inline void AddModifiedElemType(Compiler* comp, CORINFO_CLASS_HANDLE structHnd); /* The following values are set only for iterator loops, i.e. has the flag LPFLG_ITER set */ GenTree* lpIterTree; // The "i = i <op> const" tree unsigned lpIterVar() const; // iterator variable # int lpIterConst() const; // the constant with which the iterator is incremented genTreeOps lpIterOper() const; // the type of the operation on the iterator (ASG_ADD, ASG_SUB, etc.) void VERIFY_lpIterTree() const; var_types lpIterOperType() const; // For overflow instructions // Set to the block where we found the initialization for LPFLG_CONST_INIT or LPFLG_VAR_INIT loops. // Initially, this will be 'head', but 'head' might change if we insert a loop pre-header block. BasicBlock* lpInitBlock; union { int lpConstInit; // initial constant value of iterator // : Valid if LPFLG_CONST_INIT unsigned lpVarInit; // initial local var number to which we initialize the iterator // : Valid if LPFLG_VAR_INIT }; // The following is for LPFLG_ITER loops only (i.e. the loop condition is "i RELOP const or var") GenTree* lpTestTree; // pointer to the node containing the loop test genTreeOps lpTestOper() const; // the type of the comparison between the iterator and the limit (GT_LE, GT_GE, // etc.) void VERIFY_lpTestTree() const; bool lpIsReversed() const; // true if the iterator node is the second operand in the loop condition GenTree* lpIterator() const; // the iterator node in the loop test GenTree* lpLimit() const; // the limit node in the loop test // Limit constant value of iterator - loop condition is "i RELOP const" // : Valid if LPFLG_CONST_LIMIT int lpConstLimit() const; // The lclVar # in the loop condition ( "i RELOP lclVar" ) // : Valid if LPFLG_VAR_LIMIT unsigned lpVarLimit() const; // The array length in the loop condition ( "i RELOP arr.len" or "i RELOP arr[i][j].len" ) // : Valid if LPFLG_ARRLEN_LIMIT bool lpArrLenLimit(Compiler* comp, ArrIndex* index) const; // Returns "true" iff this is a "top entry" loop. bool lpIsTopEntry() const { if (lpHead->bbNext == lpEntry) { assert(lpHead->bbFallsThrough()); assert(lpTop == lpEntry); return true; } else { return false; } } // Returns "true" iff "*this" contains the blk. bool lpContains(BasicBlock* blk) const { return lpTop->bbNum <= blk->bbNum && blk->bbNum <= lpBottom->bbNum; } // Returns "true" iff "*this" (properly) contains the range [top, bottom] (allowing tops // to be equal, but requiring bottoms to be different.) bool lpContains(BasicBlock* top, BasicBlock* bottom) const { return lpTop->bbNum <= top->bbNum && bottom->bbNum < lpBottom->bbNum; } // Returns "true" iff "*this" (properly) contains "lp2" (allowing tops to be equal, but requiring // bottoms to be different.) bool lpContains(const LoopDsc& lp2) const { return lpContains(lp2.lpTop, lp2.lpBottom); } // Returns "true" iff "*this" is (properly) contained by the range [top, bottom] // (allowing tops to be equal, but requiring bottoms to be different.) bool lpContainedBy(BasicBlock* top, BasicBlock* bottom) const { return top->bbNum <= lpTop->bbNum && lpBottom->bbNum < bottom->bbNum; } // Returns "true" iff "*this" is (properly) contained by "lp2" // (allowing tops to be equal, but requiring bottoms to be different.) bool lpContainedBy(const LoopDsc& lp2) const { return lpContainedBy(lp2.lpTop, lp2.lpBottom); } // Returns "true" iff "*this" is disjoint from the range [top, bottom]. bool lpDisjoint(BasicBlock* top, BasicBlock* bottom) const { return bottom->bbNum < lpTop->bbNum || lpBottom->bbNum < top->bbNum; } // Returns "true" iff "*this" is disjoint from "lp2". bool lpDisjoint(const LoopDsc& lp2) const { return lpDisjoint(lp2.lpTop, lp2.lpBottom); } // Returns "true" iff the loop is well-formed (see code for defn). bool lpWellFormed() const { return lpTop->bbNum <= lpEntry->bbNum && lpEntry->bbNum <= lpBottom->bbNum && (lpHead->bbNum < lpTop->bbNum || lpHead->bbNum > lpBottom->bbNum); } #ifdef DEBUG void lpValidatePreHeader() const { // If this is called, we expect there to be a pre-header. assert(lpFlags & LPFLG_HAS_PREHEAD); // The pre-header must unconditionally enter the loop. assert(lpHead->GetUniqueSucc() == lpEntry); // The loop block must be marked as a pre-header. assert(lpHead->bbFlags & BBF_LOOP_PREHEADER); // The loop entry must have a single non-loop predecessor, which is the pre-header. // We can't assume here that the bbNum are properly ordered, so we can't do a simple lpContained() // check. So, we defer this check, which will be done by `fgDebugCheckLoopTable()`. } #endif // DEBUG // LoopBlocks: convenience method for enabling range-based `for` iteration over all the // blocks in a loop, e.g.: // for (BasicBlock* const block : loop->LoopBlocks()) ... // Currently, the loop blocks are expected to be in linear, lexical, `bbNext` order // from `lpTop` through `lpBottom`, inclusive. All blocks in this range are considered // to be part of the loop. // BasicBlockRangeList LoopBlocks() const { return BasicBlockRangeList(lpTop, lpBottom); } }; protected: bool fgMightHaveLoop(); // returns true if there are any back edges bool fgHasLoops; // True if this method has any loops, set in fgComputeReachability public: LoopDsc* optLoopTable; // loop descriptor table unsigned char optLoopCount; // number of tracked loops unsigned char loopAlignCandidates; // number of loops identified for alignment // Every time we rebuild the loop table, we increase the global "loop epoch". Any loop indices or // loop table pointers from the previous epoch are invalid. // TODO: validate this in some way? unsigned optCurLoopEpoch; void NewLoopEpoch() { ++optCurLoopEpoch; JITDUMP("New loop epoch %d\n", optCurLoopEpoch); } #ifdef DEBUG unsigned char loopsAligned; // number of loops actually aligned #endif // DEBUG bool optRecordLoop(BasicBlock* head, BasicBlock* top, BasicBlock* entry, BasicBlock* bottom, BasicBlock* exit, unsigned char exitCnt); void optClearLoopIterInfo(); #ifdef DEBUG void optPrintLoopInfo(unsigned lnum, bool printVerbose = false); void optPrintLoopInfo(const LoopDsc* loop, bool printVerbose = false); void optPrintLoopTable(); #endif protected: unsigned optCallCount; // number of calls made in the method unsigned optIndirectCallCount; // number of virtual, interface and indirect calls made in the method unsigned optNativeCallCount; // number of Pinvoke/Native calls made in the method unsigned optLoopsCloned; // number of loops cloned in the current method. #ifdef DEBUG void optCheckPreds(); #endif void optResetLoopInfo(); void optFindAndScaleGeneralLoopBlocks(); // Determine if there are any potential loops, and set BBF_LOOP_HEAD on potential loop heads. void optMarkLoopHeads(); void optScaleLoopBlocks(BasicBlock* begBlk, BasicBlock* endBlk); void optUnmarkLoopBlocks(BasicBlock* begBlk, BasicBlock* endBlk); void optUpdateLoopsBeforeRemoveBlock(BasicBlock* block, bool skipUnmarkLoop = false); bool optIsLoopTestEvalIntoTemp(Statement* testStmt, Statement** newTestStmt); unsigned optIsLoopIncrTree(GenTree* incr); bool optCheckIterInLoopTest(unsigned loopInd, GenTree* test, BasicBlock* from, BasicBlock* to, unsigned iterVar); bool optComputeIterInfo(GenTree* incr, BasicBlock* from, BasicBlock* to, unsigned* pIterVar); bool optPopulateInitInfo(unsigned loopInd, BasicBlock* initBlock, GenTree* init, unsigned iterVar); bool optExtractInitTestIncr( BasicBlock* head, BasicBlock* bottom, BasicBlock* exit, GenTree** ppInit, GenTree** ppTest, GenTree** ppIncr); void optFindNaturalLoops(); void optIdentifyLoopsForAlignment(); // Ensures that all the loops in the loop nest rooted at "loopInd" (an index into the loop table) are 'canonical' -- // each loop has a unique "top." Returns "true" iff the flowgraph has been modified. bool optCanonicalizeLoopNest(unsigned char loopInd); // Ensures that the loop "loopInd" (an index into the loop table) is 'canonical' -- it has a unique "top," // unshared with any other loop. Returns "true" iff the flowgraph has been modified bool optCanonicalizeLoop(unsigned char loopInd); // Requires "l1" to be a valid loop table index, and not "BasicBlock::NOT_IN_LOOP". // Requires "l2" to be a valid loop table index, or else "BasicBlock::NOT_IN_LOOP". // Returns true iff "l2" is not NOT_IN_LOOP, and "l1" contains "l2". // A loop contains itself. bool optLoopContains(unsigned l1, unsigned l2) const; // Updates the loop table by changing loop "loopInd", whose head is required // to be "from", to be "to". Also performs this transformation for any // loop nested in "loopInd" that shares the same head as "loopInd". void optUpdateLoopHead(unsigned loopInd, BasicBlock* from, BasicBlock* to); void optRedirectBlock(BasicBlock* blk, BlockToBlockMap* redirectMap, const bool updatePreds = false); // Marks the containsCall information to "lnum" and any parent loops. void AddContainsCallAllContainingLoops(unsigned lnum); // Adds the variable liveness information from 'blk' to "lnum" and any parent loops. void AddVariableLivenessAllContainingLoops(unsigned lnum, BasicBlock* blk); // Adds "fldHnd" to the set of modified fields of "lnum" and any parent loops. void AddModifiedFieldAllContainingLoops(unsigned lnum, CORINFO_FIELD_HANDLE fldHnd, FieldKindForVN fieldKind); // Adds "elemType" to the set of modified array element types of "lnum" and any parent loops. void AddModifiedElemTypeAllContainingLoops(unsigned lnum, CORINFO_CLASS_HANDLE elemType); // Requires that "from" and "to" have the same "bbJumpKind" (perhaps because "to" is a clone // of "from".) Copies the jump destination from "from" to "to". void optCopyBlkDest(BasicBlock* from, BasicBlock* to); // Returns true if 'block' is an entry block for any loop in 'optLoopTable' bool optIsLoopEntry(BasicBlock* block) const; // The depth of the loop described by "lnum" (an index into the loop table.) (0 == top level) unsigned optLoopDepth(unsigned lnum) { assert(lnum < optLoopCount); unsigned depth = 0; while ((lnum = optLoopTable[lnum].lpParent) != BasicBlock::NOT_IN_LOOP) { ++depth; } return depth; } // Struct used in optInvertWhileLoop to count interesting constructs to boost the profitability score. struct OptInvertCountTreeInfoType { int sharedStaticHelperCount; int arrayLengthCount; }; static fgWalkResult optInvertCountTreeInfo(GenTree** pTree, fgWalkData* data); bool optInvertWhileLoop(BasicBlock* block); private: static bool optIterSmallOverflow(int iterAtExit, var_types incrType); static bool optIterSmallUnderflow(int iterAtExit, var_types decrType); bool optComputeLoopRep(int constInit, int constLimit, int iterInc, genTreeOps iterOper, var_types iterType, genTreeOps testOper, bool unsignedTest, bool dupCond, unsigned* iterCount); static fgWalkPreFn optIsVarAssgCB; protected: bool optIsVarAssigned(BasicBlock* beg, BasicBlock* end, GenTree* skip, unsigned var); bool optIsVarAssgLoop(unsigned lnum, unsigned var); int optIsSetAssgLoop(unsigned lnum, ALLVARSET_VALARG_TP vars, varRefKinds inds = VR_NONE); bool optNarrowTree(GenTree* tree, var_types srct, var_types dstt, ValueNumPair vnpNarrow, bool doit); protected: // The following is the upper limit on how many expressions we'll keep track // of for the CSE analysis. // static const unsigned MAX_CSE_CNT = EXPSET_SZ; static const int MIN_CSE_COST = 2; // BitVec trait information only used by the optCSE_canSwap() method, for the CSE_defMask and CSE_useMask. // This BitVec uses one bit per CSE candidate BitVecTraits* cseMaskTraits; // one bit per CSE candidate // BitVec trait information for computing CSE availability using the CSE_DataFlow algorithm. // Two bits are allocated per CSE candidate to compute CSE availability // plus an extra bit to handle the initial unvisited case. // (See CSE_DataFlow::EndMerge for an explanation of why this is necessary.) // // The two bits per CSE candidate have the following meanings: // 11 - The CSE is available, and is also available when considering calls as killing availability. // 10 - The CSE is available, but is not available when considering calls as killing availability. // 00 - The CSE is not available // 01 - An illegal combination // BitVecTraits* cseLivenessTraits; //----------------------------------------------------------------------------------------------------------------- // getCSEnum2bit: Return the normalized index to use in the EXPSET_TP for the CSE with the given CSE index. // Each GenTree has a `gtCSEnum` field. Zero is reserved to mean this node is not a CSE, positive values indicate // CSE uses, and negative values indicate CSE defs. The caller must pass a non-zero positive value, as from // GET_CSE_INDEX(). // static unsigned genCSEnum2bit(unsigned CSEnum) { assert((CSEnum > 0) && (CSEnum <= MAX_CSE_CNT)); return CSEnum - 1; } //----------------------------------------------------------------------------------------------------------------- // getCSEAvailBit: Return the bit used by CSE dataflow sets (bbCseGen, etc.) for the availability bit for a CSE. // static unsigned getCSEAvailBit(unsigned CSEnum) { return genCSEnum2bit(CSEnum) * 2; } //----------------------------------------------------------------------------------------------------------------- // getCSEAvailCrossCallBit: Return the bit used by CSE dataflow sets (bbCseGen, etc.) for the availability bit // for a CSE considering calls as killing availability bit (see description above). // static unsigned getCSEAvailCrossCallBit(unsigned CSEnum) { return getCSEAvailBit(CSEnum) + 1; } void optPrintCSEDataFlowSet(EXPSET_VALARG_TP cseDataFlowSet, bool includeBits = true); EXPSET_TP cseCallKillsMask; // Computed once - A mask that is used to kill available CSEs at callsites /* Generic list of nodes - used by the CSE logic */ struct treeLst { treeLst* tlNext; GenTree* tlTree; }; struct treeStmtLst { treeStmtLst* tslNext; GenTree* tslTree; // tree node Statement* tslStmt; // statement containing the tree BasicBlock* tslBlock; // block containing the statement }; // The following logic keeps track of expressions via a simple hash table. struct CSEdsc { CSEdsc* csdNextInBucket; // used by the hash table size_t csdHashKey; // the orginal hashkey ssize_t csdConstDefValue; // When we CSE similar constants, this is the value that we use as the def ValueNum csdConstDefVN; // When we CSE similar constants, this is the ValueNumber that we use for the LclVar // assignment unsigned csdIndex; // 1..optCSECandidateCount bool csdIsSharedConst; // true if this CSE is a shared const bool csdLiveAcrossCall; unsigned short csdDefCount; // definition count unsigned short csdUseCount; // use count (excluding the implicit uses at defs) weight_t csdDefWtCnt; // weighted def count weight_t csdUseWtCnt; // weighted use count (excluding the implicit uses at defs) GenTree* csdTree; // treenode containing the 1st occurrence Statement* csdStmt; // stmt containing the 1st occurrence BasicBlock* csdBlock; // block containing the 1st occurrence treeStmtLst* csdTreeList; // list of matching tree nodes: head treeStmtLst* csdTreeLast; // list of matching tree nodes: tail // ToDo: This can be removed when gtGetStructHandleIfPresent stops guessing // and GT_IND nodes always have valid struct handle. // CORINFO_CLASS_HANDLE csdStructHnd; // The class handle, currently needed to create a SIMD LclVar in PerformCSE bool csdStructHndMismatch; ValueNum defExcSetPromise; // The exception set that is now required for all defs of this CSE. // This will be set to NoVN if we decide to abandon this CSE ValueNum defExcSetCurrent; // The set of exceptions we currently can use for CSE uses. ValueNum defConservNormVN; // if all def occurrences share the same conservative normal value // number, this will reflect it; otherwise, NoVN. // not used for shared const CSE's }; static const size_t s_optCSEhashSizeInitial; static const size_t s_optCSEhashGrowthFactor; static const size_t s_optCSEhashBucketSize; size_t optCSEhashSize; // The current size of hashtable size_t optCSEhashCount; // Number of entries in hashtable size_t optCSEhashMaxCountBeforeResize; // Number of entries before resize CSEdsc** optCSEhash; CSEdsc** optCSEtab; typedef JitHashTable<GenTree*, JitPtrKeyFuncs<GenTree>, GenTree*> NodeToNodeMap; NodeToNodeMap* optCseCheckedBoundMap; // Maps bound nodes to ancestor compares that should be // re-numbered with the bound to improve range check elimination // Given a compare, look for a cse candidate checked bound feeding it and add a map entry if found. void optCseUpdateCheckedBoundMap(GenTree* compare); void optCSEstop(); CSEdsc* optCSEfindDsc(unsigned index); bool optUnmarkCSE(GenTree* tree); // user defined callback data for the tree walk function optCSE_MaskHelper() struct optCSE_MaskData { EXPSET_TP CSE_defMask; EXPSET_TP CSE_useMask; }; // Treewalk helper for optCSE_DefMask and optCSE_UseMask static fgWalkPreFn optCSE_MaskHelper; // This function walks all the node for an given tree // and return the mask of CSE definitions and uses for the tree // void optCSE_GetMaskData(GenTree* tree, optCSE_MaskData* pMaskData); // Given a binary tree node return true if it is safe to swap the order of evaluation for op1 and op2. bool optCSE_canSwap(GenTree* firstNode, GenTree* secondNode); struct optCSEcostCmpEx { bool operator()(const CSEdsc* op1, const CSEdsc* op2); }; struct optCSEcostCmpSz { bool operator()(const CSEdsc* op1, const CSEdsc* op2); }; void optCleanupCSEs(); #ifdef DEBUG void optEnsureClearCSEInfo(); #endif // DEBUG static bool Is_Shared_Const_CSE(size_t key) { return ((key & TARGET_SIGN_BIT) != 0); } // returns the encoded key static size_t Encode_Shared_Const_CSE_Value(size_t key) { return TARGET_SIGN_BIT | (key >> CSE_CONST_SHARED_LOW_BITS); } // returns the orginal key static size_t Decode_Shared_Const_CSE_Value(size_t enckey) { assert(Is_Shared_Const_CSE(enckey)); return (enckey & ~TARGET_SIGN_BIT) << CSE_CONST_SHARED_LOW_BITS; } /************************************************************************** * Value Number based CSEs *************************************************************************/ // String to use for formatting CSE numbers. Note that this is the positive number, e.g., from GET_CSE_INDEX(). #define FMT_CSE "CSE #%02u" public: void optOptimizeValnumCSEs(); protected: void optValnumCSE_Init(); unsigned optValnumCSE_Index(GenTree* tree, Statement* stmt); bool optValnumCSE_Locate(); void optValnumCSE_InitDataFlow(); void optValnumCSE_DataFlow(); void optValnumCSE_Availablity(); void optValnumCSE_Heuristic(); bool optDoCSE; // True when we have found a duplicate CSE tree bool optValnumCSE_phase; // True when we are executing the optOptimizeValnumCSEs() phase unsigned optCSECandidateCount; // Count of CSE's candidates unsigned optCSEstart; // The first local variable number that is a CSE unsigned optCSEcount; // The total count of CSE's introduced. weight_t optCSEweight; // The weight of the current block when we are doing PerformCSE bool optIsCSEcandidate(GenTree* tree); // lclNumIsTrueCSE returns true if the LclVar was introduced by the CSE phase of the compiler // bool lclNumIsTrueCSE(unsigned lclNum) const { return ((optCSEcount > 0) && (lclNum >= optCSEstart) && (lclNum < optCSEstart + optCSEcount)); } // lclNumIsCSE returns true if the LclVar should be treated like a CSE with regards to constant prop. // bool lclNumIsCSE(unsigned lclNum) const { return lvaGetDesc(lclNum)->lvIsCSE; } #ifdef DEBUG bool optConfigDisableCSE(); bool optConfigDisableCSE2(); #endif void optOptimizeCSEs(); struct isVarAssgDsc { GenTree* ivaSkip; ALLVARSET_TP ivaMaskVal; // Set of variables assigned to. This is a set of all vars, not tracked vars. #ifdef DEBUG void* ivaSelf; #endif unsigned ivaVar; // Variable we are interested in, or -1 varRefKinds ivaMaskInd; // What kind of indirect assignments are there? callInterf ivaMaskCall; // What kind of calls are there? bool ivaMaskIncomplete; // Variables not representable in ivaMaskVal were assigned to. }; static callInterf optCallInterf(GenTreeCall* call); public: // VN based copy propagation. // In DEBUG builds, we'd like to know the tree that the SSA definition was pushed for. // While for ordinary SSA defs it will be available (as an ASG) in the SSA descriptor, // for locals which will use "definitions from uses", it will not be, so we store it // in this class instead. class CopyPropSsaDef { LclSsaVarDsc* m_ssaDef; #ifdef DEBUG GenTree* m_defNode; #endif public: CopyPropSsaDef(LclSsaVarDsc* ssaDef, GenTree* defNode) : m_ssaDef(ssaDef) #ifdef DEBUG , m_defNode(defNode) #endif { } LclSsaVarDsc* GetSsaDef() const { return m_ssaDef; } #ifdef DEBUG GenTree* GetDefNode() const { return m_defNode; } #endif }; typedef ArrayStack<CopyPropSsaDef> CopyPropSsaDefStack; typedef JitHashTable<unsigned, JitSmallPrimitiveKeyFuncs<unsigned>, CopyPropSsaDefStack*> LclNumToLiveDefsMap; // Copy propagation functions. void optCopyProp(Statement* stmt, GenTreeLclVarCommon* tree, unsigned lclNum, LclNumToLiveDefsMap* curSsaName); void optBlockCopyPropPopStacks(BasicBlock* block, LclNumToLiveDefsMap* curSsaName); void optBlockCopyProp(BasicBlock* block, LclNumToLiveDefsMap* curSsaName); void optCopyPropPushDef(GenTreeOp* asg, GenTreeLclVarCommon* lclNode, unsigned lclNum, LclNumToLiveDefsMap* curSsaName); unsigned optIsSsaLocal(GenTreeLclVarCommon* lclNode); int optCopyProp_LclVarScore(const LclVarDsc* lclVarDsc, const LclVarDsc* copyVarDsc, bool preferOp2); void optVnCopyProp(); INDEBUG(void optDumpCopyPropStack(LclNumToLiveDefsMap* curSsaName)); /************************************************************************** * Early value propagation *************************************************************************/ struct SSAName { unsigned m_lvNum; unsigned m_ssaNum; SSAName(unsigned lvNum, unsigned ssaNum) : m_lvNum(lvNum), m_ssaNum(ssaNum) { } static unsigned GetHashCode(SSAName ssaNm) { return (ssaNm.m_lvNum << 16) | (ssaNm.m_ssaNum); } static bool Equals(SSAName ssaNm1, SSAName ssaNm2) { return (ssaNm1.m_lvNum == ssaNm2.m_lvNum) && (ssaNm1.m_ssaNum == ssaNm2.m_ssaNum); } }; #define OMF_HAS_NEWARRAY 0x00000001 // Method contains 'new' of an array #define OMF_HAS_NEWOBJ 0x00000002 // Method contains 'new' of an object type. #define OMF_HAS_ARRAYREF 0x00000004 // Method contains array element loads or stores. #define OMF_HAS_NULLCHECK 0x00000008 // Method contains null check. #define OMF_HAS_FATPOINTER 0x00000010 // Method contains call, that needs fat pointer transformation. #define OMF_HAS_OBJSTACKALLOC 0x00000020 // Method contains an object allocated on the stack. #define OMF_HAS_GUARDEDDEVIRT 0x00000040 // Method contains guarded devirtualization candidate #define OMF_HAS_EXPRUNTIMELOOKUP 0x00000080 // Method contains a runtime lookup to an expandable dictionary. #define OMF_HAS_PATCHPOINT 0x00000100 // Method contains patchpoints #define OMF_NEEDS_GCPOLLS 0x00000200 // Method needs GC polls #define OMF_HAS_FROZEN_STRING 0x00000400 // Method has a frozen string (REF constant int), currently only on CoreRT. #define OMF_HAS_PARTIAL_COMPILATION_PATCHPOINT 0x00000800 // Method contains partial compilation patchpoints #define OMF_HAS_TAILCALL_SUCCESSOR 0x00001000 // Method has potential tail call in a non BBJ_RETURN block bool doesMethodHaveFatPointer() { return (optMethodFlags & OMF_HAS_FATPOINTER) != 0; } void setMethodHasFatPointer() { optMethodFlags |= OMF_HAS_FATPOINTER; } void clearMethodHasFatPointer() { optMethodFlags &= ~OMF_HAS_FATPOINTER; } void addFatPointerCandidate(GenTreeCall* call); bool doesMethodHaveFrozenString() const { return (optMethodFlags & OMF_HAS_FROZEN_STRING) != 0; } void setMethodHasFrozenString() { optMethodFlags |= OMF_HAS_FROZEN_STRING; } bool doesMethodHaveGuardedDevirtualization() const { return (optMethodFlags & OMF_HAS_GUARDEDDEVIRT) != 0; } void setMethodHasGuardedDevirtualization() { optMethodFlags |= OMF_HAS_GUARDEDDEVIRT; } void clearMethodHasGuardedDevirtualization() { optMethodFlags &= ~OMF_HAS_GUARDEDDEVIRT; } void considerGuardedDevirtualization(GenTreeCall* call, IL_OFFSET ilOffset, bool isInterface, CORINFO_METHOD_HANDLE baseMethod, CORINFO_CLASS_HANDLE baseClass, CORINFO_CONTEXT_HANDLE* pContextHandle DEBUGARG(CORINFO_CLASS_HANDLE objClass) DEBUGARG(const char* objClassName)); void addGuardedDevirtualizationCandidate(GenTreeCall* call, CORINFO_METHOD_HANDLE methodHandle, CORINFO_CLASS_HANDLE classHandle, unsigned methodAttr, unsigned classAttr, unsigned likelihood); bool doesMethodHaveExpRuntimeLookup() { return (optMethodFlags & OMF_HAS_EXPRUNTIMELOOKUP) != 0; } void setMethodHasExpRuntimeLookup() { optMethodFlags |= OMF_HAS_EXPRUNTIMELOOKUP; } void clearMethodHasExpRuntimeLookup() { optMethodFlags &= ~OMF_HAS_EXPRUNTIMELOOKUP; } void addExpRuntimeLookupCandidate(GenTreeCall* call); bool doesMethodHavePatchpoints() { return (optMethodFlags & OMF_HAS_PATCHPOINT) != 0; } void setMethodHasPatchpoint() { optMethodFlags |= OMF_HAS_PATCHPOINT; } bool doesMethodHavePartialCompilationPatchpoints() { return (optMethodFlags & OMF_HAS_PARTIAL_COMPILATION_PATCHPOINT) != 0; } void setMethodHasPartialCompilationPatchpoint() { optMethodFlags |= OMF_HAS_PARTIAL_COMPILATION_PATCHPOINT; } unsigned optMethodFlags; bool doesMethodHaveNoReturnCalls() { return optNoReturnCallCount > 0; } void setMethodHasNoReturnCalls() { optNoReturnCallCount++; } unsigned optNoReturnCallCount; // Recursion bound controls how far we can go backwards tracking for a SSA value. // No throughput diff was found with backward walk bound between 3-8. static const int optEarlyPropRecurBound = 5; enum class optPropKind { OPK_INVALID, OPK_ARRAYLEN, OPK_NULLCHECK }; typedef JitHashTable<unsigned, JitSmallPrimitiveKeyFuncs<unsigned>, GenTree*> LocalNumberToNullCheckTreeMap; GenTree* getArrayLengthFromAllocation(GenTree* tree DEBUGARG(BasicBlock* block)); GenTree* optPropGetValueRec(unsigned lclNum, unsigned ssaNum, optPropKind valueKind, int walkDepth); GenTree* optPropGetValue(unsigned lclNum, unsigned ssaNum, optPropKind valueKind); GenTree* optEarlyPropRewriteTree(GenTree* tree, LocalNumberToNullCheckTreeMap* nullCheckMap); bool optDoEarlyPropForBlock(BasicBlock* block); bool optDoEarlyPropForFunc(); void optEarlyProp(); void optFoldNullCheck(GenTree* tree, LocalNumberToNullCheckTreeMap* nullCheckMap); GenTree* optFindNullCheckToFold(GenTree* tree, LocalNumberToNullCheckTreeMap* nullCheckMap); bool optIsNullCheckFoldingLegal(GenTree* tree, GenTree* nullCheckTree, GenTree** nullCheckParent, Statement** nullCheckStmt); bool optCanMoveNullCheckPastTree(GenTree* tree, unsigned nullCheckLclNum, bool isInsideTry, bool checkSideEffectSummary); #if DEBUG void optCheckFlagsAreSet(unsigned methodFlag, const char* methodFlagStr, unsigned bbFlag, const char* bbFlagStr, GenTree* tree, BasicBlock* basicBlock); #endif // Redundant branch opts // PhaseStatus optRedundantBranches(); bool optRedundantRelop(BasicBlock* const block); bool optRedundantBranch(BasicBlock* const block); bool optJumpThread(BasicBlock* const block, BasicBlock* const domBlock, bool domIsSameRelop); bool optReachable(BasicBlock* const fromBlock, BasicBlock* const toBlock, BasicBlock* const excludedBlock); /************************************************************************** * Value/Assertion propagation *************************************************************************/ public: // Data structures for assertion prop BitVecTraits* apTraits; ASSERT_TP apFull; enum optAssertionKind { OAK_INVALID, OAK_EQUAL, OAK_NOT_EQUAL, OAK_SUBRANGE, OAK_NO_THROW, OAK_COUNT }; enum optOp1Kind { O1K_INVALID, O1K_LCLVAR, O1K_ARR_BND, O1K_BOUND_OPER_BND, O1K_BOUND_LOOP_BND, O1K_CONSTANT_LOOP_BND, O1K_CONSTANT_LOOP_BND_UN, O1K_EXACT_TYPE, O1K_SUBTYPE, O1K_VALUE_NUMBER, O1K_COUNT }; enum optOp2Kind { O2K_INVALID, O2K_LCLVAR_COPY, O2K_IND_CNS_INT, O2K_CONST_INT, O2K_CONST_LONG, O2K_CONST_DOUBLE, O2K_ZEROOBJ, O2K_SUBRANGE, O2K_COUNT }; struct AssertionDsc { optAssertionKind assertionKind; struct SsaVar { unsigned lclNum; // assigned to or property of this local var number unsigned ssaNum; }; struct ArrBnd { ValueNum vnIdx; ValueNum vnLen; }; struct AssertionDscOp1 { optOp1Kind kind; // a normal LclVar, or Exact-type or Subtype ValueNum vn; union { SsaVar lcl; ArrBnd bnd; }; } op1; struct AssertionDscOp2 { optOp2Kind kind; // a const or copy assignment ValueNum vn; struct IntVal { ssize_t iconVal; // integer #if !defined(HOST_64BIT) unsigned padding; // unused; ensures iconFlags does not overlap lconVal #endif GenTreeFlags iconFlags; // gtFlags }; union { struct { SsaVar lcl; FieldSeqNode* zeroOffsetFieldSeq; }; IntVal u1; __int64 lconVal; double dconVal; IntegralRange u2; }; } op2; bool IsCheckedBoundArithBound() { return ((assertionKind == OAK_EQUAL || assertionKind == OAK_NOT_EQUAL) && op1.kind == O1K_BOUND_OPER_BND); } bool IsCheckedBoundBound() { return ((assertionKind == OAK_EQUAL || assertionKind == OAK_NOT_EQUAL) && op1.kind == O1K_BOUND_LOOP_BND); } bool IsConstantBound() { return ((assertionKind == OAK_EQUAL || assertionKind == OAK_NOT_EQUAL) && (op1.kind == O1K_CONSTANT_LOOP_BND)); } bool IsConstantBoundUnsigned() { return ((assertionKind == OAK_EQUAL || assertionKind == OAK_NOT_EQUAL) && (op1.kind == O1K_CONSTANT_LOOP_BND_UN)); } bool IsBoundsCheckNoThrow() { return ((assertionKind == OAK_NO_THROW) && (op1.kind == O1K_ARR_BND)); } bool IsCopyAssertion() { return ((assertionKind == OAK_EQUAL) && (op1.kind == O1K_LCLVAR) && (op2.kind == O2K_LCLVAR_COPY)); } bool IsConstantInt32Assertion() { return ((assertionKind == OAK_EQUAL) || (assertionKind == OAK_NOT_EQUAL)) && (op2.kind == O2K_CONST_INT); } static bool SameKind(AssertionDsc* a1, AssertionDsc* a2) { return a1->assertionKind == a2->assertionKind && a1->op1.kind == a2->op1.kind && a1->op2.kind == a2->op2.kind; } static bool ComplementaryKind(optAssertionKind kind, optAssertionKind kind2) { if (kind == OAK_EQUAL) { return kind2 == OAK_NOT_EQUAL; } else if (kind == OAK_NOT_EQUAL) { return kind2 == OAK_EQUAL; } return false; } bool HasSameOp1(AssertionDsc* that, bool vnBased) { if (op1.kind != that->op1.kind) { return false; } else if (op1.kind == O1K_ARR_BND) { assert(vnBased); return (op1.bnd.vnIdx == that->op1.bnd.vnIdx) && (op1.bnd.vnLen == that->op1.bnd.vnLen); } else { return ((vnBased && (op1.vn == that->op1.vn)) || (!vnBased && (op1.lcl.lclNum == that->op1.lcl.lclNum))); } } bool HasSameOp2(AssertionDsc* that, bool vnBased) { if (op2.kind != that->op2.kind) { return false; } switch (op2.kind) { case O2K_IND_CNS_INT: case O2K_CONST_INT: return ((op2.u1.iconVal == that->op2.u1.iconVal) && (op2.u1.iconFlags == that->op2.u1.iconFlags)); case O2K_CONST_LONG: return (op2.lconVal == that->op2.lconVal); case O2K_CONST_DOUBLE: // exact match because of positive and negative zero. return (memcmp(&op2.dconVal, &that->op2.dconVal, sizeof(double)) == 0); case O2K_ZEROOBJ: return true; case O2K_LCLVAR_COPY: return (op2.lcl.lclNum == that->op2.lcl.lclNum) && (!vnBased || op2.lcl.ssaNum == that->op2.lcl.ssaNum) && (op2.zeroOffsetFieldSeq == that->op2.zeroOffsetFieldSeq); case O2K_SUBRANGE: return op2.u2.Equals(that->op2.u2); case O2K_INVALID: // we will return false break; default: assert(!"Unexpected value for op2.kind in AssertionDsc."); break; } return false; } bool Complementary(AssertionDsc* that, bool vnBased) { return ComplementaryKind(assertionKind, that->assertionKind) && HasSameOp1(that, vnBased) && HasSameOp2(that, vnBased); } bool Equals(AssertionDsc* that, bool vnBased) { if (assertionKind != that->assertionKind) { return false; } else if (assertionKind == OAK_NO_THROW) { assert(op2.kind == O2K_INVALID); return HasSameOp1(that, vnBased); } else { return HasSameOp1(that, vnBased) && HasSameOp2(that, vnBased); } } }; protected: static fgWalkPreFn optAddCopiesCallback; static fgWalkPreFn optVNAssertionPropCurStmtVisitor; unsigned optAddCopyLclNum; GenTree* optAddCopyAsgnNode; bool optLocalAssertionProp; // indicates that we are performing local assertion prop bool optAssertionPropagated; // set to true if we modified the trees bool optAssertionPropagatedCurrentStmt; #ifdef DEBUG GenTree* optAssertionPropCurrentTree; #endif AssertionIndex* optComplementaryAssertionMap; JitExpandArray<ASSERT_TP>* optAssertionDep; // table that holds dependent assertions (assertions // using the value of a local var) for each local var AssertionDsc* optAssertionTabPrivate; // table that holds info about value assignments AssertionIndex optAssertionCount; // total number of assertions in the assertion table AssertionIndex optMaxAssertionCount; public: void optVnNonNullPropCurStmt(BasicBlock* block, Statement* stmt, GenTree* tree); fgWalkResult optVNConstantPropCurStmt(BasicBlock* block, Statement* stmt, GenTree* tree); GenTree* optVNConstantPropOnJTrue(BasicBlock* block, GenTree* test); GenTree* optVNConstantPropOnTree(BasicBlock* block, GenTree* tree); GenTree* optExtractSideEffListFromConst(GenTree* tree); AssertionIndex GetAssertionCount() { return optAssertionCount; } ASSERT_TP* bbJtrueAssertionOut; typedef JitHashTable<ValueNum, JitSmallPrimitiveKeyFuncs<ValueNum>, ASSERT_TP> ValueNumToAssertsMap; ValueNumToAssertsMap* optValueNumToAsserts; // Assertion prop helpers. ASSERT_TP& GetAssertionDep(unsigned lclNum); AssertionDsc* optGetAssertion(AssertionIndex assertIndex); void optAssertionInit(bool isLocalProp); void optAssertionTraitsInit(AssertionIndex assertionCount); void optAssertionReset(AssertionIndex limit); void optAssertionRemove(AssertionIndex index); // Assertion prop data flow functions. void optAssertionPropMain(); Statement* optVNAssertionPropCurStmt(BasicBlock* block, Statement* stmt); bool optIsTreeKnownIntValue(bool vnBased, GenTree* tree, ssize_t* pConstant, GenTreeFlags* pIconFlags); ASSERT_TP* optInitAssertionDataflowFlags(); ASSERT_TP* optComputeAssertionGen(); // Assertion Gen functions. void optAssertionGen(GenTree* tree); AssertionIndex optAssertionGenCast(GenTreeCast* cast); AssertionIndex optAssertionGenPhiDefn(GenTree* tree); AssertionInfo optCreateJTrueBoundsAssertion(GenTree* tree); AssertionInfo optAssertionGenJtrue(GenTree* tree); AssertionIndex optCreateJtrueAssertions(GenTree* op1, GenTree* op2, Compiler::optAssertionKind assertionKind, bool helperCallArgs = false); AssertionIndex optFindComplementary(AssertionIndex assertionIndex); void optMapComplementary(AssertionIndex assertionIndex, AssertionIndex index); // Assertion creation functions. AssertionIndex optCreateAssertion(GenTree* op1, GenTree* op2, optAssertionKind assertionKind, bool helperCallArgs = false); AssertionIndex optFinalizeCreatingAssertion(AssertionDsc* assertion); bool optTryExtractSubrangeAssertion(GenTree* source, IntegralRange* pRange); void optCreateComplementaryAssertion(AssertionIndex assertionIndex, GenTree* op1, GenTree* op2, bool helperCallArgs = false); bool optAssertionVnInvolvesNan(AssertionDsc* assertion); AssertionIndex optAddAssertion(AssertionDsc* assertion); void optAddVnAssertionMapping(ValueNum vn, AssertionIndex index); #ifdef DEBUG void optPrintVnAssertionMapping(); #endif ASSERT_TP optGetVnMappedAssertions(ValueNum vn); // Used for respective assertion propagations. AssertionIndex optAssertionIsSubrange(GenTree* tree, IntegralRange range, ASSERT_VALARG_TP assertions); AssertionIndex optAssertionIsSubtype(GenTree* tree, GenTree* methodTableArg, ASSERT_VALARG_TP assertions); AssertionIndex optAssertionIsNonNullInternal(GenTree* op, ASSERT_VALARG_TP assertions DEBUGARG(bool* pVnBased)); bool optAssertionIsNonNull(GenTree* op, ASSERT_VALARG_TP assertions DEBUGARG(bool* pVnBased) DEBUGARG(AssertionIndex* pIndex)); AssertionIndex optGlobalAssertionIsEqualOrNotEqual(ASSERT_VALARG_TP assertions, GenTree* op1, GenTree* op2); AssertionIndex optGlobalAssertionIsEqualOrNotEqualZero(ASSERT_VALARG_TP assertions, GenTree* op1); AssertionIndex optLocalAssertionIsEqualOrNotEqual( optOp1Kind op1Kind, unsigned lclNum, optOp2Kind op2Kind, ssize_t cnsVal, ASSERT_VALARG_TP assertions); // Assertion prop for lcl var functions. bool optAssertionProp_LclVarTypeCheck(GenTree* tree, LclVarDsc* lclVarDsc, LclVarDsc* copyVarDsc); GenTree* optCopyAssertionProp(AssertionDsc* curAssertion, GenTreeLclVarCommon* tree, Statement* stmt DEBUGARG(AssertionIndex index)); GenTree* optConstantAssertionProp(AssertionDsc* curAssertion, GenTreeLclVarCommon* tree, Statement* stmt DEBUGARG(AssertionIndex index)); bool optZeroObjAssertionProp(GenTree* tree, ASSERT_VALARG_TP assertions); // Assertion propagation functions. GenTree* optAssertionProp(ASSERT_VALARG_TP assertions, GenTree* tree, Statement* stmt, BasicBlock* block); GenTree* optAssertionProp_LclVar(ASSERT_VALARG_TP assertions, GenTreeLclVarCommon* tree, Statement* stmt); GenTree* optAssertionProp_Asg(ASSERT_VALARG_TP assertions, GenTreeOp* asg, Statement* stmt); GenTree* optAssertionProp_Return(ASSERT_VALARG_TP assertions, GenTreeUnOp* ret, Statement* stmt); GenTree* optAssertionProp_Ind(ASSERT_VALARG_TP assertions, GenTree* tree, Statement* stmt); GenTree* optAssertionProp_Cast(ASSERT_VALARG_TP assertions, GenTreeCast* cast, Statement* stmt); GenTree* optAssertionProp_Call(ASSERT_VALARG_TP assertions, GenTreeCall* call, Statement* stmt); GenTree* optAssertionProp_RelOp(ASSERT_VALARG_TP assertions, GenTree* tree, Statement* stmt); GenTree* optAssertionProp_Comma(ASSERT_VALARG_TP assertions, GenTree* tree, Statement* stmt); GenTree* optAssertionProp_BndsChk(ASSERT_VALARG_TP assertions, GenTree* tree, Statement* stmt); GenTree* optAssertionPropGlobal_RelOp(ASSERT_VALARG_TP assertions, GenTree* tree, Statement* stmt); GenTree* optAssertionPropLocal_RelOp(ASSERT_VALARG_TP assertions, GenTree* tree, Statement* stmt); GenTree* optAssertionProp_Update(GenTree* newTree, GenTree* tree, Statement* stmt); GenTree* optNonNullAssertionProp_Call(ASSERT_VALARG_TP assertions, GenTreeCall* call); // Implied assertion functions. void optImpliedAssertions(AssertionIndex assertionIndex, ASSERT_TP& activeAssertions); void optImpliedByTypeOfAssertions(ASSERT_TP& activeAssertions); void optImpliedByCopyAssertion(AssertionDsc* copyAssertion, AssertionDsc* depAssertion, ASSERT_TP& result); void optImpliedByConstAssertion(AssertionDsc* curAssertion, ASSERT_TP& result); #ifdef DEBUG void optPrintAssertion(AssertionDsc* newAssertion, AssertionIndex assertionIndex = 0); void optPrintAssertionIndex(AssertionIndex index); void optPrintAssertionIndices(ASSERT_TP assertions); void optDebugCheckAssertion(AssertionDsc* assertion); void optDebugCheckAssertions(AssertionIndex AssertionIndex); #endif static void optDumpAssertionIndices(const char* header, ASSERT_TP assertions, const char* footer = nullptr); static void optDumpAssertionIndices(ASSERT_TP assertions, const char* footer = nullptr); void optAddCopies(); /************************************************************************** * Range checks *************************************************************************/ public: struct LoopCloneVisitorInfo { LoopCloneContext* context; unsigned loopNum; Statement* stmt; LoopCloneVisitorInfo(LoopCloneContext* context, unsigned loopNum, Statement* stmt) : context(context), loopNum(loopNum), stmt(nullptr) { } }; bool optIsStackLocalInvariant(unsigned loopNum, unsigned lclNum); bool optExtractArrIndex(GenTree* tree, ArrIndex* result, unsigned lhsNum); bool optReconstructArrIndex(GenTree* tree, ArrIndex* result, unsigned lhsNum); bool optIdentifyLoopOptInfo(unsigned loopNum, LoopCloneContext* context); static fgWalkPreFn optCanOptimizeByLoopCloningVisitor; fgWalkResult optCanOptimizeByLoopCloning(GenTree* tree, LoopCloneVisitorInfo* info); bool optObtainLoopCloningOpts(LoopCloneContext* context); bool optIsLoopClonable(unsigned loopInd); bool optLoopCloningEnabled(); #ifdef DEBUG void optDebugLogLoopCloning(BasicBlock* block, Statement* insertBefore); #endif void optPerformStaticOptimizations(unsigned loopNum, LoopCloneContext* context DEBUGARG(bool fastPath)); bool optComputeDerefConditions(unsigned loopNum, LoopCloneContext* context); bool optDeriveLoopCloningConditions(unsigned loopNum, LoopCloneContext* context); BasicBlock* optInsertLoopChoiceConditions(LoopCloneContext* context, unsigned loopNum, BasicBlock* slowHead, BasicBlock* insertAfter); protected: ssize_t optGetArrayRefScaleAndIndex(GenTree* mul, GenTree** pIndex DEBUGARG(bool bRngChk)); bool optReachWithoutCall(BasicBlock* srcBB, BasicBlock* dstBB); protected: bool optLoopsMarked; /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX RegAlloc XX XX XX XX Does the register allocation and puts the remaining lclVars on the stack XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ public: regNumber raUpdateRegStateForArg(RegState* regState, LclVarDsc* argDsc); void raMarkStkVars(); #if FEATURE_PARTIAL_SIMD_CALLEE_SAVE #if defined(TARGET_AMD64) static bool varTypeNeedsPartialCalleeSave(var_types type) { assert(type != TYP_STRUCT); return (type == TYP_SIMD32); } #elif defined(TARGET_ARM64) static bool varTypeNeedsPartialCalleeSave(var_types type) { assert(type != TYP_STRUCT); // ARM64 ABI FP Callee save registers only require Callee to save lower 8 Bytes // For SIMD types longer than 8 bytes Caller is responsible for saving and restoring Upper bytes. return ((type == TYP_SIMD16) || (type == TYP_SIMD12)); } #else // !defined(TARGET_AMD64) && !defined(TARGET_ARM64) #error("Unknown target architecture for FEATURE_SIMD") #endif // !defined(TARGET_AMD64) && !defined(TARGET_ARM64) #endif // FEATURE_PARTIAL_SIMD_CALLEE_SAVE protected: // Some things are used by both LSRA and regpredict allocators. FrameType rpFrameType; bool rpMustCreateEBPCalled; // Set to true after we have called rpMustCreateEBPFrame once bool rpMustCreateEBPFrame(INDEBUG(const char** wbReason)); private: Lowering* m_pLowering; // Lowering; needed to Lower IR that's added or modified after Lowering. LinearScanInterface* m_pLinearScan; // Linear Scan allocator /* raIsVarargsStackArg is called by raMaskStkVars and by lvaComputeRefCounts. It identifies the special case where a varargs function has a parameter passed on the stack, other than the special varargs handle. Such parameters require special treatment, because they cannot be tracked by the GC (their offsets in the stack are not known at compile time). */ bool raIsVarargsStackArg(unsigned lclNum) { #ifdef TARGET_X86 LclVarDsc* varDsc = lvaGetDesc(lclNum); assert(varDsc->lvIsParam); return (info.compIsVarArgs && !varDsc->lvIsRegArg && (lclNum != lvaVarargsHandleArg)); #else // TARGET_X86 return false; #endif // TARGET_X86 } /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX EEInterface XX XX XX XX Get to the class and method info from the Execution Engine given XX XX tokens for the class and method XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ public: // Get handles void eeGetCallInfo(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_RESOLVED_TOKEN* pConstrainedToken, CORINFO_CALLINFO_FLAGS flags, CORINFO_CALL_INFO* pResult); void eeGetFieldInfo(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_ACCESS_FLAGS flags, CORINFO_FIELD_INFO* pResult); // Get the flags bool eeIsValueClass(CORINFO_CLASS_HANDLE clsHnd); bool eeIsIntrinsic(CORINFO_METHOD_HANDLE ftn); bool eeIsFieldStatic(CORINFO_FIELD_HANDLE fldHnd); var_types eeGetFieldType(CORINFO_FIELD_HANDLE fldHnd, CORINFO_CLASS_HANDLE* pStructHnd = nullptr); #if defined(DEBUG) || defined(FEATURE_JIT_METHOD_PERF) || defined(FEATURE_SIMD) || defined(TRACK_LSRA_STATS) const char* eeGetMethodName(CORINFO_METHOD_HANDLE hnd, const char** className); const char* eeGetMethodFullName(CORINFO_METHOD_HANDLE hnd); unsigned compMethodHash(CORINFO_METHOD_HANDLE methodHandle); bool eeIsNativeMethod(CORINFO_METHOD_HANDLE method); CORINFO_METHOD_HANDLE eeGetMethodHandleForNative(CORINFO_METHOD_HANDLE method); #endif var_types eeGetArgType(CORINFO_ARG_LIST_HANDLE list, CORINFO_SIG_INFO* sig); var_types eeGetArgType(CORINFO_ARG_LIST_HANDLE list, CORINFO_SIG_INFO* sig, bool* isPinned); CORINFO_CLASS_HANDLE eeGetArgClass(CORINFO_SIG_INFO* sig, CORINFO_ARG_LIST_HANDLE list); CORINFO_CLASS_HANDLE eeGetClassFromContext(CORINFO_CONTEXT_HANDLE context); unsigned eeGetArgSize(CORINFO_ARG_LIST_HANDLE list, CORINFO_SIG_INFO* sig); static unsigned eeGetArgSizeAlignment(var_types type, bool isFloatHfa); // VOM info, method sigs void eeGetSig(unsigned sigTok, CORINFO_MODULE_HANDLE scope, CORINFO_CONTEXT_HANDLE context, CORINFO_SIG_INFO* retSig); void eeGetCallSiteSig(unsigned sigTok, CORINFO_MODULE_HANDLE scope, CORINFO_CONTEXT_HANDLE context, CORINFO_SIG_INFO* retSig); void eeGetMethodSig(CORINFO_METHOD_HANDLE methHnd, CORINFO_SIG_INFO* retSig, CORINFO_CLASS_HANDLE owner = nullptr); // Method entry-points, instrs CORINFO_METHOD_HANDLE eeMarkNativeTarget(CORINFO_METHOD_HANDLE method); CORINFO_EE_INFO eeInfo; bool eeInfoInitialized; CORINFO_EE_INFO* eeGetEEInfo(); // Gets the offset of a SDArray's first element static unsigned eeGetArrayDataOffset(); // Get the offset of a MDArray's first element static unsigned eeGetMDArrayDataOffset(unsigned rank); // Get the offset of a MDArray's dimension length for a given dimension. static unsigned eeGetMDArrayLengthOffset(unsigned rank, unsigned dimension); // Get the offset of a MDArray's lower bound for a given dimension. static unsigned eeGetMDArrayLowerBoundOffset(unsigned rank, unsigned dimension); GenTree* eeGetPInvokeCookie(CORINFO_SIG_INFO* szMetaSig); // Returns the page size for the target machine as reported by the EE. target_size_t eeGetPageSize() { return (target_size_t)eeGetEEInfo()->osPageSize; } //------------------------------------------------------------------------ // VirtualStubParam: virtual stub dispatch extra parameter (slot address). // // It represents Abi and target specific registers for the parameter. // class VirtualStubParamInfo { public: VirtualStubParamInfo(bool isCoreRTABI) { #if defined(TARGET_X86) reg = REG_EAX; regMask = RBM_EAX; #elif defined(TARGET_AMD64) if (isCoreRTABI) { reg = REG_R10; regMask = RBM_R10; } else { reg = REG_R11; regMask = RBM_R11; } #elif defined(TARGET_ARM) if (isCoreRTABI) { reg = REG_R12; regMask = RBM_R12; } else { reg = REG_R4; regMask = RBM_R4; } #elif defined(TARGET_ARM64) reg = REG_R11; regMask = RBM_R11; #else #error Unsupported or unset target architecture #endif } regNumber GetReg() const { return reg; } _regMask_enum GetRegMask() const { return regMask; } private: regNumber reg; _regMask_enum regMask; }; VirtualStubParamInfo* virtualStubParamInfo; bool IsTargetAbi(CORINFO_RUNTIME_ABI abi) { return eeGetEEInfo()->targetAbi == abi; } bool generateCFIUnwindCodes() { #if defined(FEATURE_CFI_SUPPORT) return TargetOS::IsUnix && IsTargetAbi(CORINFO_CORERT_ABI); #else return false; #endif } // Debugging support - Line number info void eeGetStmtOffsets(); unsigned eeBoundariesCount; ICorDebugInfo::OffsetMapping* eeBoundaries; // Boundaries to report to the EE void eeSetLIcount(unsigned count); void eeSetLIinfo(unsigned which, UNATIVE_OFFSET offs, IPmappingDscKind kind, const ILLocation& loc); void eeSetLIdone(); #ifdef DEBUG static void eeDispILOffs(IL_OFFSET offs); static void eeDispSourceMappingOffs(uint32_t offs); static void eeDispLineInfo(const ICorDebugInfo::OffsetMapping* line); void eeDispLineInfos(); #endif // DEBUG // Debugging support - Local var info void eeGetVars(); unsigned eeVarsCount; struct VarResultInfo { UNATIVE_OFFSET startOffset; UNATIVE_OFFSET endOffset; DWORD varNumber; CodeGenInterface::siVarLoc loc; } * eeVars; void eeSetLVcount(unsigned count); void eeSetLVinfo(unsigned which, UNATIVE_OFFSET startOffs, UNATIVE_OFFSET length, unsigned varNum, const CodeGenInterface::siVarLoc& loc); void eeSetLVdone(); #ifdef DEBUG void eeDispVar(ICorDebugInfo::NativeVarInfo* var); void eeDispVars(CORINFO_METHOD_HANDLE ftn, ULONG32 cVars, ICorDebugInfo::NativeVarInfo* vars); #endif // DEBUG // ICorJitInfo wrappers void eeReserveUnwindInfo(bool isFunclet, bool isColdCode, ULONG unwindSize); void eeAllocUnwindInfo(BYTE* pHotCode, BYTE* pColdCode, ULONG startOffset, ULONG endOffset, ULONG unwindSize, BYTE* pUnwindBlock, CorJitFuncKind funcKind); void eeSetEHcount(unsigned cEH); void eeSetEHinfo(unsigned EHnumber, const CORINFO_EH_CLAUSE* clause); WORD eeGetRelocTypeHint(void* target); // ICorStaticInfo wrapper functions bool eeTryResolveToken(CORINFO_RESOLVED_TOKEN* resolvedToken); #if defined(UNIX_AMD64_ABI) #ifdef DEBUG static void dumpSystemVClassificationType(SystemVClassificationType ct); #endif // DEBUG void eeGetSystemVAmd64PassStructInRegisterDescriptor( /*IN*/ CORINFO_CLASS_HANDLE structHnd, /*OUT*/ SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR* structPassInRegDescPtr); #endif // UNIX_AMD64_ABI template <typename ParamType> bool eeRunWithErrorTrap(void (*function)(ParamType*), ParamType* param) { return eeRunWithErrorTrapImp(reinterpret_cast<void (*)(void*)>(function), reinterpret_cast<void*>(param)); } bool eeRunWithErrorTrapImp(void (*function)(void*), void* param); template <typename ParamType> bool eeRunWithSPMIErrorTrap(void (*function)(ParamType*), ParamType* param) { return eeRunWithSPMIErrorTrapImp(reinterpret_cast<void (*)(void*)>(function), reinterpret_cast<void*>(param)); } bool eeRunWithSPMIErrorTrapImp(void (*function)(void*), void* param); // Utility functions const char* eeGetFieldName(CORINFO_FIELD_HANDLE fieldHnd, const char** classNamePtr = nullptr); #if defined(DEBUG) const WCHAR* eeGetCPString(size_t stringHandle); #endif const char* eeGetClassName(CORINFO_CLASS_HANDLE clsHnd); static CORINFO_METHOD_HANDLE eeFindHelper(unsigned helper); static CorInfoHelpFunc eeGetHelperNum(CORINFO_METHOD_HANDLE method); static bool IsSharedStaticHelper(GenTree* tree); static bool IsGcSafePoint(GenTreeCall* call); static CORINFO_FIELD_HANDLE eeFindJitDataOffs(unsigned jitDataOffs); // returns true/false if 'field' is a Jit Data offset static bool eeIsJitDataOffs(CORINFO_FIELD_HANDLE field); // returns a number < 0 if 'field' is not a Jit Data offset, otherwise the data offset (limited to 2GB) static int eeGetJitDataOffs(CORINFO_FIELD_HANDLE field); /*****************************************************************************/ /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX CodeGenerator XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ public: CodeGenInterface* codeGen; // Record the instr offset mapping to the generated code jitstd::list<IPmappingDsc> genIPmappings; #ifdef DEBUG jitstd::list<PreciseIPMapping> genPreciseIPmappings; #endif // Managed RetVal - A side hash table meant to record the mapping from a // GT_CALL node to its debug info. This info is used to emit sequence points // that can be used by debugger to determine the native offset at which the // managed RetVal will be available. // // In fact we can store debug info in a GT_CALL node. This was ruled out in // favor of a side table for two reasons: 1) We need debug info for only those // GT_CALL nodes (created during importation) that correspond to an IL call and // whose return type is other than TYP_VOID. 2) GT_CALL node is a frequently used // structure and IL offset is needed only when generating debuggable code. Therefore // it is desirable to avoid memory size penalty in retail scenarios. typedef JitHashTable<GenTree*, JitPtrKeyFuncs<GenTree>, DebugInfo> CallSiteDebugInfoTable; CallSiteDebugInfoTable* genCallSite2DebugInfoMap; unsigned genReturnLocal; // Local number for the return value when applicable. BasicBlock* genReturnBB; // jumped to when not optimizing for speed. // The following properties are part of CodeGenContext. Getters are provided here for // convenience and backward compatibility, but the properties can only be set by invoking // the setter on CodeGenContext directly. emitter* GetEmitter() const { return codeGen->GetEmitter(); } bool isFramePointerUsed() const { return codeGen->isFramePointerUsed(); } bool GetInterruptible() { return codeGen->GetInterruptible(); } void SetInterruptible(bool value) { codeGen->SetInterruptible(value); } #if DOUBLE_ALIGN const bool genDoubleAlign() { return codeGen->doDoubleAlign(); } DWORD getCanDoubleAlign(); bool shouldDoubleAlign(unsigned refCntStk, unsigned refCntReg, weight_t refCntWtdReg, unsigned refCntStkParam, weight_t refCntWtdStkDbl); #endif // DOUBLE_ALIGN bool IsFullPtrRegMapRequired() { return codeGen->IsFullPtrRegMapRequired(); } void SetFullPtrRegMapRequired(bool value) { codeGen->SetFullPtrRegMapRequired(value); } // Things that MAY belong either in CodeGen or CodeGenContext #if defined(FEATURE_EH_FUNCLETS) FuncInfoDsc* compFuncInfos; unsigned short compCurrFuncIdx; unsigned short compFuncInfoCount; unsigned short compFuncCount() { assert(fgFuncletsCreated); return compFuncInfoCount; } #else // !FEATURE_EH_FUNCLETS // This is a no-op when there are no funclets! void genUpdateCurrentFunclet(BasicBlock* block) { return; } FuncInfoDsc compFuncInfoRoot; static const unsigned compCurrFuncIdx = 0; unsigned short compFuncCount() { return 1; } #endif // !FEATURE_EH_FUNCLETS FuncInfoDsc* funCurrentFunc(); void funSetCurrentFunc(unsigned funcIdx); FuncInfoDsc* funGetFunc(unsigned funcIdx); unsigned int funGetFuncIdx(BasicBlock* block); // LIVENESS VARSET_TP compCurLife; // current live variables GenTree* compCurLifeTree; // node after which compCurLife has been computed // Compare the given "newLife" with last set of live variables and update // codeGen "gcInfo", siScopes, "regSet" with the new variable's homes/liveness. template <bool ForCodeGen> void compChangeLife(VARSET_VALARG_TP newLife); // Update the GC's masks, register's masks and reports change on variable's homes given a set of // current live variables if changes have happened since "compCurLife". template <bool ForCodeGen> inline void compUpdateLife(VARSET_VALARG_TP newLife); // Gets a register mask that represent the kill set for a helper call since // not all JIT Helper calls follow the standard ABI on the target architecture. regMaskTP compHelperCallKillSet(CorInfoHelpFunc helper); #ifdef TARGET_ARM // Requires that "varDsc" be a promoted struct local variable being passed as an argument, beginning at // "firstArgRegNum", which is assumed to have already been aligned to the register alignment restriction of the // struct type. Adds bits to "*pArgSkippedRegMask" for any argument registers *not* used in passing "varDsc" -- // i.e., internal "holes" caused by internal alignment constraints. For example, if the struct contained an int and // a double, and we at R0 (on ARM), then R1 would be skipped, and the bit for R1 would be added to the mask. void fgAddSkippedRegsInPromotedStructArg(LclVarDsc* varDsc, unsigned firstArgRegNum, regMaskTP* pArgSkippedRegMask); #endif // TARGET_ARM // If "tree" is a indirection (GT_IND, or GT_OBJ) whose arg is an ADDR, whose arg is a LCL_VAR, return that LCL_VAR // node, else NULL. static GenTreeLclVar* fgIsIndirOfAddrOfLocal(GenTree* tree); // This map is indexed by GT_OBJ nodes that are address of promoted struct variables, which // have been annotated with the GTF_VAR_DEATH flag. If such a node is *not* mapped in this // table, one may assume that all the (tracked) field vars die at this GT_OBJ. Otherwise, // the node maps to a pointer to a VARSET_TP, containing set bits for each of the tracked field // vars of the promoted struct local that go dead at the given node (the set bits are the bits // for the tracked var indices of the field vars, as in a live var set). // // The map is allocated on demand so all map operations should use one of the following three // wrapper methods. NodeToVarsetPtrMap* m_promotedStructDeathVars; NodeToVarsetPtrMap* GetPromotedStructDeathVars() { if (m_promotedStructDeathVars == nullptr) { m_promotedStructDeathVars = new (getAllocator()) NodeToVarsetPtrMap(getAllocator()); } return m_promotedStructDeathVars; } void ClearPromotedStructDeathVars() { if (m_promotedStructDeathVars != nullptr) { m_promotedStructDeathVars->RemoveAll(); } } bool LookupPromotedStructDeathVars(GenTree* tree, VARSET_TP** bits) { *bits = nullptr; bool result = false; if (m_promotedStructDeathVars != nullptr) { result = m_promotedStructDeathVars->Lookup(tree, bits); } return result; } /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX UnwindInfo XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ #if !defined(__GNUC__) #pragma region Unwind information #endif public: // // Infrastructure functions: start/stop/reserve/emit. // void unwindBegProlog(); void unwindEndProlog(); void unwindBegEpilog(); void unwindEndEpilog(); void unwindReserve(); void unwindEmit(void* pHotCode, void* pColdCode); // // Specific unwind information functions: called by code generation to indicate a particular // prolog or epilog unwindable instruction has been generated. // void unwindPush(regNumber reg); void unwindAllocStack(unsigned size); void unwindSetFrameReg(regNumber reg, unsigned offset); void unwindSaveReg(regNumber reg, unsigned offset); #if defined(TARGET_ARM) void unwindPushMaskInt(regMaskTP mask); void unwindPushMaskFloat(regMaskTP mask); void unwindPopMaskInt(regMaskTP mask); void unwindPopMaskFloat(regMaskTP mask); void unwindBranch16(); // The epilog terminates with a 16-bit branch (e.g., "bx lr") void unwindNop(unsigned codeSizeInBytes); // Generate unwind NOP code. 'codeSizeInBytes' is 2 or 4 bytes. Only // called via unwindPadding(). void unwindPadding(); // Generate a sequence of unwind NOP codes representing instructions between the last // instruction and the current location. #endif // TARGET_ARM #if defined(TARGET_ARM64) void unwindNop(); void unwindPadding(); // Generate a sequence of unwind NOP codes representing instructions between the last // instruction and the current location. void unwindSaveReg(regNumber reg, int offset); // str reg, [sp, #offset] void unwindSaveRegPreindexed(regNumber reg, int offset); // str reg, [sp, #offset]! void unwindSaveRegPair(regNumber reg1, regNumber reg2, int offset); // stp reg1, reg2, [sp, #offset] void unwindSaveRegPairPreindexed(regNumber reg1, regNumber reg2, int offset); // stp reg1, reg2, [sp, #offset]! void unwindSaveNext(); // unwind code: save_next void unwindReturn(regNumber reg); // ret lr #endif // defined(TARGET_ARM64) // // Private "helper" functions for the unwind implementation. // private: #if defined(FEATURE_EH_FUNCLETS) void unwindGetFuncLocations(FuncInfoDsc* func, bool getHotSectionData, /* OUT */ emitLocation** ppStartLoc, /* OUT */ emitLocation** ppEndLoc); #endif // FEATURE_EH_FUNCLETS void unwindReserveFunc(FuncInfoDsc* func); void unwindEmitFunc(FuncInfoDsc* func, void* pHotCode, void* pColdCode); #if defined(TARGET_AMD64) || (defined(TARGET_X86) && defined(FEATURE_EH_FUNCLETS)) void unwindReserveFuncHelper(FuncInfoDsc* func, bool isHotCode); void unwindEmitFuncHelper(FuncInfoDsc* func, void* pHotCode, void* pColdCode, bool isHotCode); #endif // TARGET_AMD64 || (TARGET_X86 && FEATURE_EH_FUNCLETS) UNATIVE_OFFSET unwindGetCurrentOffset(FuncInfoDsc* func); #if defined(TARGET_AMD64) void unwindBegPrologWindows(); void unwindPushWindows(regNumber reg); void unwindAllocStackWindows(unsigned size); void unwindSetFrameRegWindows(regNumber reg, unsigned offset); void unwindSaveRegWindows(regNumber reg, unsigned offset); #ifdef UNIX_AMD64_ABI void unwindSaveRegCFI(regNumber reg, unsigned offset); #endif // UNIX_AMD64_ABI #elif defined(TARGET_ARM) void unwindPushPopMaskInt(regMaskTP mask, bool useOpsize16); void unwindPushPopMaskFloat(regMaskTP mask); #endif // TARGET_ARM #if defined(FEATURE_CFI_SUPPORT) short mapRegNumToDwarfReg(regNumber reg); void createCfiCode(FuncInfoDsc* func, UNATIVE_OFFSET codeOffset, UCHAR opcode, short dwarfReg, INT offset = 0); void unwindPushPopCFI(regNumber reg); void unwindBegPrologCFI(); void unwindPushPopMaskCFI(regMaskTP regMask, bool isFloat); void unwindAllocStackCFI(unsigned size); void unwindSetFrameRegCFI(regNumber reg, unsigned offset); void unwindEmitFuncCFI(FuncInfoDsc* func, void* pHotCode, void* pColdCode); #ifdef DEBUG void DumpCfiInfo(bool isHotCode, UNATIVE_OFFSET startOffset, UNATIVE_OFFSET endOffset, DWORD cfiCodeBytes, const CFI_CODE* const pCfiCode); #endif #endif // FEATURE_CFI_SUPPORT #if !defined(__GNUC__) #pragma endregion // Note: region is NOT under !defined(__GNUC__) #endif /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX SIMD XX XX XX XX Info about SIMD types, methods and the SIMD assembly (i.e. the assembly XX XX that contains the distinguished, well-known SIMD type definitions). XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ bool IsBaselineSimdIsaSupported() { #ifdef FEATURE_SIMD #if defined(TARGET_XARCH) CORINFO_InstructionSet minimumIsa = InstructionSet_SSE2; #elif defined(TARGET_ARM64) CORINFO_InstructionSet minimumIsa = InstructionSet_AdvSimd; #else #error Unsupported platform #endif // !TARGET_XARCH && !TARGET_ARM64 return compOpportunisticallyDependsOn(minimumIsa); #else return false; #endif } #if defined(DEBUG) bool IsBaselineSimdIsaSupportedDebugOnly() { #ifdef FEATURE_SIMD #if defined(TARGET_XARCH) CORINFO_InstructionSet minimumIsa = InstructionSet_SSE2; #elif defined(TARGET_ARM64) CORINFO_InstructionSet minimumIsa = InstructionSet_AdvSimd; #else #error Unsupported platform #endif // !TARGET_XARCH && !TARGET_ARM64 return compIsaSupportedDebugOnly(minimumIsa); #else return false; #endif // FEATURE_SIMD } #endif // DEBUG // Get highest available level for SIMD codegen SIMDLevel getSIMDSupportLevel() { #if defined(TARGET_XARCH) if (compOpportunisticallyDependsOn(InstructionSet_AVX2)) { return SIMD_AVX2_Supported; } if (compOpportunisticallyDependsOn(InstructionSet_SSE42)) { return SIMD_SSE4_Supported; } // min bar is SSE2 return SIMD_SSE2_Supported; #else assert(!"Available instruction set(s) for SIMD codegen is not defined for target arch"); unreached(); return SIMD_Not_Supported; #endif } bool isIntrinsicType(CORINFO_CLASS_HANDLE clsHnd) { return info.compCompHnd->isIntrinsicType(clsHnd); } const char* getClassNameFromMetadata(CORINFO_CLASS_HANDLE cls, const char** namespaceName) { return info.compCompHnd->getClassNameFromMetadata(cls, namespaceName); } CORINFO_CLASS_HANDLE getTypeInstantiationArgument(CORINFO_CLASS_HANDLE cls, unsigned index) { return info.compCompHnd->getTypeInstantiationArgument(cls, index); } #ifdef FEATURE_SIMD #ifndef TARGET_ARM64 // Should we support SIMD intrinsics? bool featureSIMD; #endif // Should we recognize SIMD types? // We always do this on ARM64 to support HVA types. bool supportSIMDTypes() { #ifdef TARGET_ARM64 return true; #else return featureSIMD; #endif } // Have we identified any SIMD types? // This is currently used by struct promotion to avoid getting type information for a struct // field to see if it is a SIMD type, if we haven't seen any SIMD types or operations in // the method. bool _usesSIMDTypes; bool usesSIMDTypes() { return _usesSIMDTypes; } void setUsesSIMDTypes(bool value) { _usesSIMDTypes = value; } // This is a temp lclVar allocated on the stack as TYP_SIMD. It is used to implement intrinsics // that require indexed access to the individual fields of the vector, which is not well supported // by the hardware. It is allocated when/if such situations are encountered during Lowering. unsigned lvaSIMDInitTempVarNum; struct SIMDHandlesCache { // SIMD Types CORINFO_CLASS_HANDLE SIMDFloatHandle; CORINFO_CLASS_HANDLE SIMDDoubleHandle; CORINFO_CLASS_HANDLE SIMDIntHandle; CORINFO_CLASS_HANDLE SIMDUShortHandle; CORINFO_CLASS_HANDLE SIMDUByteHandle; CORINFO_CLASS_HANDLE SIMDShortHandle; CORINFO_CLASS_HANDLE SIMDByteHandle; CORINFO_CLASS_HANDLE SIMDLongHandle; CORINFO_CLASS_HANDLE SIMDUIntHandle; CORINFO_CLASS_HANDLE SIMDULongHandle; CORINFO_CLASS_HANDLE SIMDNIntHandle; CORINFO_CLASS_HANDLE SIMDNUIntHandle; CORINFO_CLASS_HANDLE SIMDVector2Handle; CORINFO_CLASS_HANDLE SIMDVector3Handle; CORINFO_CLASS_HANDLE SIMDVector4Handle; CORINFO_CLASS_HANDLE SIMDVectorHandle; #ifdef FEATURE_HW_INTRINSICS #if defined(TARGET_ARM64) CORINFO_CLASS_HANDLE Vector64FloatHandle; CORINFO_CLASS_HANDLE Vector64DoubleHandle; CORINFO_CLASS_HANDLE Vector64IntHandle; CORINFO_CLASS_HANDLE Vector64UShortHandle; CORINFO_CLASS_HANDLE Vector64UByteHandle; CORINFO_CLASS_HANDLE Vector64ShortHandle; CORINFO_CLASS_HANDLE Vector64ByteHandle; CORINFO_CLASS_HANDLE Vector64LongHandle; CORINFO_CLASS_HANDLE Vector64UIntHandle; CORINFO_CLASS_HANDLE Vector64ULongHandle; CORINFO_CLASS_HANDLE Vector64NIntHandle; CORINFO_CLASS_HANDLE Vector64NUIntHandle; #endif // defined(TARGET_ARM64) CORINFO_CLASS_HANDLE Vector128FloatHandle; CORINFO_CLASS_HANDLE Vector128DoubleHandle; CORINFO_CLASS_HANDLE Vector128IntHandle; CORINFO_CLASS_HANDLE Vector128UShortHandle; CORINFO_CLASS_HANDLE Vector128UByteHandle; CORINFO_CLASS_HANDLE Vector128ShortHandle; CORINFO_CLASS_HANDLE Vector128ByteHandle; CORINFO_CLASS_HANDLE Vector128LongHandle; CORINFO_CLASS_HANDLE Vector128UIntHandle; CORINFO_CLASS_HANDLE Vector128ULongHandle; CORINFO_CLASS_HANDLE Vector128NIntHandle; CORINFO_CLASS_HANDLE Vector128NUIntHandle; #if defined(TARGET_XARCH) CORINFO_CLASS_HANDLE Vector256FloatHandle; CORINFO_CLASS_HANDLE Vector256DoubleHandle; CORINFO_CLASS_HANDLE Vector256IntHandle; CORINFO_CLASS_HANDLE Vector256UShortHandle; CORINFO_CLASS_HANDLE Vector256UByteHandle; CORINFO_CLASS_HANDLE Vector256ShortHandle; CORINFO_CLASS_HANDLE Vector256ByteHandle; CORINFO_CLASS_HANDLE Vector256LongHandle; CORINFO_CLASS_HANDLE Vector256UIntHandle; CORINFO_CLASS_HANDLE Vector256ULongHandle; CORINFO_CLASS_HANDLE Vector256NIntHandle; CORINFO_CLASS_HANDLE Vector256NUIntHandle; #endif // defined(TARGET_XARCH) #endif // FEATURE_HW_INTRINSICS SIMDHandlesCache() { memset(this, 0, sizeof(*this)); } }; SIMDHandlesCache* m_simdHandleCache; // Get an appropriate "zero" for the given type and class handle. GenTree* gtGetSIMDZero(var_types simdType, CorInfoType simdBaseJitType, CORINFO_CLASS_HANDLE simdHandle); // Get the handle for a SIMD type. CORINFO_CLASS_HANDLE gtGetStructHandleForSIMD(var_types simdType, CorInfoType simdBaseJitType) { if (m_simdHandleCache == nullptr) { // This may happen if the JIT generates SIMD node on its own, without importing them. // Otherwise getBaseJitTypeAndSizeOfSIMDType should have created the cache. return NO_CLASS_HANDLE; } if (simdBaseJitType == CORINFO_TYPE_FLOAT) { switch (simdType) { case TYP_SIMD8: return m_simdHandleCache->SIMDVector2Handle; case TYP_SIMD12: return m_simdHandleCache->SIMDVector3Handle; case TYP_SIMD16: if ((getSIMDVectorType() == TYP_SIMD32) || (m_simdHandleCache->SIMDVector4Handle != NO_CLASS_HANDLE)) { return m_simdHandleCache->SIMDVector4Handle; } break; case TYP_SIMD32: break; default: unreached(); } } assert(emitTypeSize(simdType) <= largestEnregisterableStructSize()); switch (simdBaseJitType) { case CORINFO_TYPE_FLOAT: return m_simdHandleCache->SIMDFloatHandle; case CORINFO_TYPE_DOUBLE: return m_simdHandleCache->SIMDDoubleHandle; case CORINFO_TYPE_INT: return m_simdHandleCache->SIMDIntHandle; case CORINFO_TYPE_USHORT: return m_simdHandleCache->SIMDUShortHandle; case CORINFO_TYPE_UBYTE: return m_simdHandleCache->SIMDUByteHandle; case CORINFO_TYPE_SHORT: return m_simdHandleCache->SIMDShortHandle; case CORINFO_TYPE_BYTE: return m_simdHandleCache->SIMDByteHandle; case CORINFO_TYPE_LONG: return m_simdHandleCache->SIMDLongHandle; case CORINFO_TYPE_UINT: return m_simdHandleCache->SIMDUIntHandle; case CORINFO_TYPE_ULONG: return m_simdHandleCache->SIMDULongHandle; case CORINFO_TYPE_NATIVEINT: return m_simdHandleCache->SIMDNIntHandle; case CORINFO_TYPE_NATIVEUINT: return m_simdHandleCache->SIMDNUIntHandle; default: assert(!"Didn't find a class handle for simdType"); } return NO_CLASS_HANDLE; } // Returns true if this is a SIMD type that should be considered an opaque // vector type (i.e. do not analyze or promote its fields). // Note that all but the fixed vector types are opaque, even though they may // actually be declared as having fields. bool isOpaqueSIMDType(CORINFO_CLASS_HANDLE structHandle) const { return ((m_simdHandleCache != nullptr) && (structHandle != m_simdHandleCache->SIMDVector2Handle) && (structHandle != m_simdHandleCache->SIMDVector3Handle) && (structHandle != m_simdHandleCache->SIMDVector4Handle)); } // Returns true if the tree corresponds to a TYP_SIMD lcl var. // Note that both SIMD vector args and locals are mared as lvSIMDType = true, but // type of an arg node is TYP_BYREF and a local node is TYP_SIMD or TYP_STRUCT. bool isSIMDTypeLocal(GenTree* tree) { return tree->OperIsLocal() && lvaGetDesc(tree->AsLclVarCommon())->lvSIMDType; } // Returns true if the lclVar is an opaque SIMD type. bool isOpaqueSIMDLclVar(const LclVarDsc* varDsc) const { if (!varDsc->lvSIMDType) { return false; } return isOpaqueSIMDType(varDsc->GetStructHnd()); } static bool isRelOpSIMDIntrinsic(SIMDIntrinsicID intrinsicId) { return (intrinsicId == SIMDIntrinsicEqual); } // Returns base JIT type of a TYP_SIMD local. // Returns CORINFO_TYPE_UNDEF if the local is not TYP_SIMD. CorInfoType getBaseJitTypeOfSIMDLocal(GenTree* tree) { if (isSIMDTypeLocal(tree)) { return lvaGetDesc(tree->AsLclVarCommon())->GetSimdBaseJitType(); } return CORINFO_TYPE_UNDEF; } bool isSIMDClass(CORINFO_CLASS_HANDLE clsHnd) { if (isIntrinsicType(clsHnd)) { const char* namespaceName = nullptr; (void)getClassNameFromMetadata(clsHnd, &namespaceName); return strcmp(namespaceName, "System.Numerics") == 0; } return false; } bool isSIMDClass(typeInfo* pTypeInfo) { return pTypeInfo->IsStruct() && isSIMDClass(pTypeInfo->GetClassHandleForValueClass()); } bool isHWSIMDClass(CORINFO_CLASS_HANDLE clsHnd) { #ifdef FEATURE_HW_INTRINSICS if (isIntrinsicType(clsHnd)) { const char* namespaceName = nullptr; (void)getClassNameFromMetadata(clsHnd, &namespaceName); return strcmp(namespaceName, "System.Runtime.Intrinsics") == 0; } #endif // FEATURE_HW_INTRINSICS return false; } bool isHWSIMDClass(typeInfo* pTypeInfo) { #ifdef FEATURE_HW_INTRINSICS return pTypeInfo->IsStruct() && isHWSIMDClass(pTypeInfo->GetClassHandleForValueClass()); #else return false; #endif } bool isSIMDorHWSIMDClass(CORINFO_CLASS_HANDLE clsHnd) { return isSIMDClass(clsHnd) || isHWSIMDClass(clsHnd); } bool isSIMDorHWSIMDClass(typeInfo* pTypeInfo) { return isSIMDClass(pTypeInfo) || isHWSIMDClass(pTypeInfo); } // Get the base (element) type and size in bytes for a SIMD type. Returns CORINFO_TYPE_UNDEF // if it is not a SIMD type or is an unsupported base JIT type. CorInfoType getBaseJitTypeAndSizeOfSIMDType(CORINFO_CLASS_HANDLE typeHnd, unsigned* sizeBytes = nullptr); CorInfoType getBaseJitTypeOfSIMDType(CORINFO_CLASS_HANDLE typeHnd) { return getBaseJitTypeAndSizeOfSIMDType(typeHnd, nullptr); } // Get SIMD Intrinsic info given the method handle. // Also sets typeHnd, argCount, baseType and sizeBytes out params. const SIMDIntrinsicInfo* getSIMDIntrinsicInfo(CORINFO_CLASS_HANDLE* typeHnd, CORINFO_METHOD_HANDLE methodHnd, CORINFO_SIG_INFO* sig, bool isNewObj, unsigned* argCount, CorInfoType* simdBaseJitType, unsigned* sizeBytes); // Pops and returns GenTree node from importers type stack. // Normalizes TYP_STRUCT value in case of GT_CALL, GT_RET_EXPR and arg nodes. GenTree* impSIMDPopStack(var_types type, bool expectAddr = false, CORINFO_CLASS_HANDLE structType = nullptr); // Transforms operands and returns the SIMD intrinsic to be applied on // transformed operands to obtain given relop result. SIMDIntrinsicID impSIMDRelOp(SIMDIntrinsicID relOpIntrinsicId, CORINFO_CLASS_HANDLE typeHnd, unsigned simdVectorSize, CorInfoType* inOutBaseJitType, GenTree** op1, GenTree** op2); #if defined(TARGET_XARCH) // Transforms operands and returns the SIMD intrinsic to be applied on // transformed operands to obtain == comparison result. SIMDIntrinsicID impSIMDLongRelOpEqual(CORINFO_CLASS_HANDLE typeHnd, unsigned simdVectorSize, GenTree** op1, GenTree** op2); #endif // defined(TARGET_XARCH) void setLclRelatedToSIMDIntrinsic(GenTree* tree); bool areFieldsContiguous(GenTree* op1, GenTree* op2); bool areLocalFieldsContiguous(GenTreeLclFld* first, GenTreeLclFld* second); bool areArrayElementsContiguous(GenTree* op1, GenTree* op2); bool areArgumentsContiguous(GenTree* op1, GenTree* op2); GenTree* createAddressNodeForSIMDInit(GenTree* tree, unsigned simdSize); // check methodHnd to see if it is a SIMD method that is expanded as an intrinsic in the JIT. GenTree* impSIMDIntrinsic(OPCODE opcode, GenTree* newobjThis, CORINFO_CLASS_HANDLE clsHnd, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, unsigned methodFlags, int memberRef); GenTree* getOp1ForConstructor(OPCODE opcode, GenTree* newobjThis, CORINFO_CLASS_HANDLE clsHnd); // Whether SIMD vector occupies part of SIMD register. // SSE2: vector2f/3f are considered sub register SIMD types. // AVX: vector2f, 3f and 4f are all considered sub register SIMD types. bool isSubRegisterSIMDType(GenTreeSIMD* simdNode) { unsigned vectorRegisterByteLength; #if defined(TARGET_XARCH) // Calling the getSIMDVectorRegisterByteLength api causes the size of Vector<T> to be recorded // with the AOT compiler, so that it cannot change from aot compilation time to runtime // This api does not require such fixing as it merely pertains to the size of the simd type // relative to the Vector<T> size as used at compile time. (So detecting a vector length of 16 here // does not preclude the code from being used on a machine with a larger vector length.) if (getSIMDSupportLevel() < SIMD_AVX2_Supported) { vectorRegisterByteLength = 16; } else { vectorRegisterByteLength = 32; } #else vectorRegisterByteLength = getSIMDVectorRegisterByteLength(); #endif return (simdNode->GetSimdSize() < vectorRegisterByteLength); } // Get the type for the hardware SIMD vector. // This is the maximum SIMD type supported for this target. var_types getSIMDVectorType() { #if defined(TARGET_XARCH) if (getSIMDSupportLevel() == SIMD_AVX2_Supported) { return TYP_SIMD32; } else { // Verify and record that AVX2 isn't supported compVerifyInstructionSetUnusable(InstructionSet_AVX2); assert(getSIMDSupportLevel() >= SIMD_SSE2_Supported); return TYP_SIMD16; } #elif defined(TARGET_ARM64) return TYP_SIMD16; #else assert(!"getSIMDVectorType() unimplemented on target arch"); unreached(); #endif } // Get the size of the SIMD type in bytes int getSIMDTypeSizeInBytes(CORINFO_CLASS_HANDLE typeHnd) { unsigned sizeBytes = 0; (void)getBaseJitTypeAndSizeOfSIMDType(typeHnd, &sizeBytes); return sizeBytes; } // Get the the number of elements of baseType of SIMD vector given by its size and baseType static int getSIMDVectorLength(unsigned simdSize, var_types baseType); // Get the the number of elements of baseType of SIMD vector given by its type handle int getSIMDVectorLength(CORINFO_CLASS_HANDLE typeHnd); // Get preferred alignment of SIMD type. int getSIMDTypeAlignment(var_types simdType); // Get the number of bytes in a System.Numeric.Vector<T> for the current compilation. // Note - cannot be used for System.Runtime.Intrinsic unsigned getSIMDVectorRegisterByteLength() { #if defined(TARGET_XARCH) if (getSIMDSupportLevel() == SIMD_AVX2_Supported) { return YMM_REGSIZE_BYTES; } else { // Verify and record that AVX2 isn't supported compVerifyInstructionSetUnusable(InstructionSet_AVX2); assert(getSIMDSupportLevel() >= SIMD_SSE2_Supported); return XMM_REGSIZE_BYTES; } #elif defined(TARGET_ARM64) return FP_REGSIZE_BYTES; #else assert(!"getSIMDVectorRegisterByteLength() unimplemented on target arch"); unreached(); #endif } // The minimum and maximum possible number of bytes in a SIMD vector. // maxSIMDStructBytes // The minimum SIMD size supported by System.Numeric.Vectors or System.Runtime.Intrinsic // SSE: 16-byte Vector<T> and Vector128<T> // AVX: 32-byte Vector256<T> (Vector<T> is 16-byte) // AVX2: 32-byte Vector<T> and Vector256<T> unsigned int maxSIMDStructBytes() { #if defined(FEATURE_HW_INTRINSICS) && defined(TARGET_XARCH) if (compOpportunisticallyDependsOn(InstructionSet_AVX)) { return YMM_REGSIZE_BYTES; } else { // Verify and record that AVX2 isn't supported compVerifyInstructionSetUnusable(InstructionSet_AVX2); assert(getSIMDSupportLevel() >= SIMD_SSE2_Supported); return XMM_REGSIZE_BYTES; } #else return getSIMDVectorRegisterByteLength(); #endif } unsigned int minSIMDStructBytes() { return emitTypeSize(TYP_SIMD8); } public: // Returns the codegen type for a given SIMD size. static var_types getSIMDTypeForSize(unsigned size) { var_types simdType = TYP_UNDEF; if (size == 8) { simdType = TYP_SIMD8; } else if (size == 12) { simdType = TYP_SIMD12; } else if (size == 16) { simdType = TYP_SIMD16; } else if (size == 32) { simdType = TYP_SIMD32; } else { noway_assert(!"Unexpected size for SIMD type"); } return simdType; } private: unsigned getSIMDInitTempVarNum(var_types simdType); #else // !FEATURE_SIMD bool isOpaqueSIMDLclVar(LclVarDsc* varDsc) { return false; } #endif // FEATURE_SIMD public: //------------------------------------------------------------------------ // largestEnregisterableStruct: The size in bytes of the largest struct that can be enregistered. // // Notes: It is not guaranteed that the struct of this size or smaller WILL be a // candidate for enregistration. unsigned largestEnregisterableStructSize() { #ifdef FEATURE_SIMD #if defined(FEATURE_HW_INTRINSICS) && defined(TARGET_XARCH) if (opts.IsReadyToRun()) { // Return constant instead of maxSIMDStructBytes, as maxSIMDStructBytes performs // checks that are effected by the current level of instruction set support would // otherwise cause the highest level of instruction set support to be reported to crossgen2. // and this api is only ever used as an optimization or assert, so no reporting should // ever happen. return YMM_REGSIZE_BYTES; } #endif // defined(FEATURE_HW_INTRINSICS) && defined(TARGET_XARCH) unsigned vectorRegSize = maxSIMDStructBytes(); assert(vectorRegSize >= TARGET_POINTER_SIZE); return vectorRegSize; #else // !FEATURE_SIMD return TARGET_POINTER_SIZE; #endif // !FEATURE_SIMD } // Use to determine if a struct *might* be a SIMD type. As this function only takes a size, many // structs will fit the criteria. bool structSizeMightRepresentSIMDType(size_t structSize) { #ifdef FEATURE_SIMD // Do not use maxSIMDStructBytes as that api in R2R on X86 and X64 may notify the JIT // about the size of a struct under the assumption that the struct size needs to be recorded. // By using largestEnregisterableStructSize here, the detail of whether or not Vector256<T> is // enregistered or not will not be messaged to the R2R compiler. return (structSize >= minSIMDStructBytes()) && (structSize <= largestEnregisterableStructSize()); #else return false; #endif // FEATURE_SIMD } #ifdef FEATURE_SIMD static bool vnEncodesResultTypeForSIMDIntrinsic(SIMDIntrinsicID intrinsicId); #endif // !FEATURE_SIMD #ifdef FEATURE_HW_INTRINSICS static bool vnEncodesResultTypeForHWIntrinsic(NamedIntrinsic hwIntrinsicID); #endif // FEATURE_HW_INTRINSICS private: // These routines need not be enclosed under FEATURE_SIMD since lvIsSIMDType() // is defined for both FEATURE_SIMD and !FEATURE_SIMD apropriately. The use // of this routines also avoids the need of #ifdef FEATURE_SIMD specific code. // Is this var is of type simd struct? bool lclVarIsSIMDType(unsigned varNum) { return lvaGetDesc(varNum)->lvIsSIMDType(); } // Is this Local node a SIMD local? bool lclVarIsSIMDType(GenTreeLclVarCommon* lclVarTree) { return lclVarIsSIMDType(lclVarTree->GetLclNum()); } // Returns true if the TYP_SIMD locals on stack are aligned at their // preferred byte boundary specified by getSIMDTypeAlignment(). // // As per the Intel manual, the preferred alignment for AVX vectors is // 32-bytes. It is not clear whether additional stack space used in // aligning stack is worth the benefit and for now will use 16-byte // alignment for AVX 256-bit vectors with unaligned load/stores to/from // memory. On x86, the stack frame is aligned to 4 bytes. We need to extend // existing support for double (8-byte) alignment to 16 or 32 byte // alignment for frames with local SIMD vars, if that is determined to be // profitable. // // On Amd64 and SysV, RSP+8 is aligned on entry to the function (before // prolog has run). This means that in RBP-based frames RBP will be 16-byte // aligned. For RSP-based frames these are only sometimes aligned, depending // on the frame size. // bool isSIMDTypeLocalAligned(unsigned varNum) { #if defined(FEATURE_SIMD) && ALIGN_SIMD_TYPES if (lclVarIsSIMDType(varNum) && lvaTable[varNum].lvType != TYP_BYREF) { // TODO-Cleanup: Can't this use the lvExactSize on the varDsc? int alignment = getSIMDTypeAlignment(lvaTable[varNum].lvType); if (alignment <= STACK_ALIGN) { bool rbpBased; int off = lvaFrameAddress(varNum, &rbpBased); // On SysV and Winx64 ABIs RSP+8 will be 16-byte aligned at the // first instruction of a function. If our frame is RBP based // then RBP will always be 16 bytes aligned, so we can simply // check the offset. if (rbpBased) { return (off % alignment) == 0; } // For RSP-based frame the alignment of RSP depends on our // locals. rsp+8 is aligned on entry and we just subtract frame // size so it is not hard to compute. Note that the compiler // tries hard to make sure the frame size means RSP will be // 16-byte aligned, but for leaf functions without locals (i.e. // frameSize = 0) it will not be. int frameSize = codeGen->genTotalFrameSize(); return ((8 - frameSize + off) % alignment) == 0; } } #endif // FEATURE_SIMD return false; } #ifdef DEBUG // Answer the question: Is a particular ISA supported? // Use this api when asking the question so that future // ISA questions can be asked correctly or when asserting // support/nonsupport for an instruction set bool compIsaSupportedDebugOnly(CORINFO_InstructionSet isa) const { #if defined(TARGET_XARCH) || defined(TARGET_ARM64) return (opts.compSupportsISA & (1ULL << isa)) != 0; #else return false; #endif } #endif // DEBUG bool notifyInstructionSetUsage(CORINFO_InstructionSet isa, bool supported) const; // Answer the question: Is a particular ISA allowed to be used implicitly by optimizations? // The result of this api call will exactly match the target machine // on which the function is executed (except for CoreLib, where there are special rules) bool compExactlyDependsOn(CORINFO_InstructionSet isa) const { #if defined(TARGET_XARCH) || defined(TARGET_ARM64) uint64_t isaBit = (1ULL << isa); if ((opts.compSupportsISAReported & isaBit) == 0) { if (notifyInstructionSetUsage(isa, (opts.compSupportsISA & isaBit) != 0)) ((Compiler*)this)->opts.compSupportsISAExactly |= isaBit; ((Compiler*)this)->opts.compSupportsISAReported |= isaBit; } return (opts.compSupportsISAExactly & isaBit) != 0; #else return false; #endif } // Ensure that code will not execute if an instruction set is usable. Call only // if the instruction set has previously reported as unusable, but when // that that status has not yet been recorded to the AOT compiler void compVerifyInstructionSetUnusable(CORINFO_InstructionSet isa) { // use compExactlyDependsOn to capture are record the use of the isa bool isaUsable = compExactlyDependsOn(isa); // Assert that the is unusable. If true, this function should never be called. assert(!isaUsable); } // Answer the question: Is a particular ISA allowed to be used implicitly by optimizations? // The result of this api call will match the target machine if the result is true // If the result is false, then the target machine may have support for the instruction bool compOpportunisticallyDependsOn(CORINFO_InstructionSet isa) const { if ((opts.compSupportsISA & (1ULL << isa)) != 0) { return compExactlyDependsOn(isa); } else { return false; } } // Answer the question: Is a particular ISA supported for explicit hardware intrinsics? bool compHWIntrinsicDependsOn(CORINFO_InstructionSet isa) const { // Report intent to use the ISA to the EE compExactlyDependsOn(isa); return ((opts.compSupportsISA & (1ULL << isa)) != 0); } bool canUseVexEncoding() const { #ifdef TARGET_XARCH return compOpportunisticallyDependsOn(InstructionSet_AVX); #else return false; #endif } /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Compiler XX XX XX XX Generic info about the compilation and the method being compiled. XX XX It is responsible for driving the other phases. XX XX It is also responsible for all the memory management. XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ public: Compiler* InlineeCompiler; // The Compiler instance for the inlinee InlineResult* compInlineResult; // The result of importing the inlinee method. bool compDoAggressiveInlining; // If true, mark every method as CORINFO_FLG_FORCEINLINE bool compJmpOpUsed; // Does the method do a JMP bool compLongUsed; // Does the method use TYP_LONG bool compFloatingPointUsed; // Does the method use TYP_FLOAT or TYP_DOUBLE bool compTailCallUsed; // Does the method do a tailcall bool compTailPrefixSeen; // Does the method IL have tail. prefix bool compLocallocSeen; // Does the method IL have localloc opcode bool compLocallocUsed; // Does the method use localloc. bool compLocallocOptimized; // Does the method have an optimized localloc bool compQmarkUsed; // Does the method use GT_QMARK/GT_COLON bool compQmarkRationalized; // Is it allowed to use a GT_QMARK/GT_COLON node. bool compHasBackwardJump; // Does the method (or some inlinee) have a lexically backwards jump? bool compHasBackwardJumpInHandler; // Does the method have a lexically backwards jump in a handler? bool compSwitchedToOptimized; // Codegen initially was Tier0 but jit switched to FullOpts bool compSwitchedToMinOpts; // Codegen initially was Tier1/FullOpts but jit switched to MinOpts bool compSuppressedZeroInit; // There are vars with lvSuppressedZeroInit set // NOTE: These values are only reliable after // the importing is completely finished. #ifdef DEBUG // State information - which phases have completed? // These are kept together for easy discoverability bool bRangeAllowStress; bool compCodeGenDone; int64_t compNumStatementLinksTraversed; // # of links traversed while doing debug checks bool fgNormalizeEHDone; // Has the flowgraph EH normalization phase been done? size_t compSizeEstimate; // The estimated size of the method as per `gtSetEvalOrder`. size_t compCycleEstimate; // The estimated cycle count of the method as per `gtSetEvalOrder` #endif // DEBUG bool fgLocalVarLivenessDone; // Note that this one is used outside of debug. bool fgLocalVarLivenessChanged; bool compLSRADone; bool compRationalIRForm; bool compUsesThrowHelper; // There is a call to a THROW_HELPER for the compiled method. bool compGeneratingProlog; bool compGeneratingEpilog; bool compNeedsGSSecurityCookie; // There is an unsafe buffer (or localloc) on the stack. // Insert cookie on frame and code to check the cookie, like VC++ -GS. bool compGSReorderStackLayout; // There is an unsafe buffer on the stack, reorder locals and make local // copies of susceptible parameters to avoid buffer overrun attacks through locals/params bool getNeedsGSSecurityCookie() const { return compNeedsGSSecurityCookie; } void setNeedsGSSecurityCookie() { compNeedsGSSecurityCookie = true; } FrameLayoutState lvaDoneFrameLayout; // The highest frame layout state that we've completed. During // frame layout calculations, this is the level we are currently // computing. //---------------------------- JITing options ----------------------------- enum codeOptimize { BLENDED_CODE, SMALL_CODE, FAST_CODE, COUNT_OPT_CODE }; struct Options { JitFlags* jitFlags; // all flags passed from the EE // The instruction sets that the compiler is allowed to emit. uint64_t compSupportsISA; // The instruction sets that were reported to the VM as being used by the current method. Subset of // compSupportsISA. uint64_t compSupportsISAReported; // The instruction sets that the compiler is allowed to take advantage of implicitly during optimizations. // Subset of compSupportsISA. // The instruction sets available in compSupportsISA and not available in compSupportsISAExactly can be only // used via explicit hardware intrinsics. uint64_t compSupportsISAExactly; void setSupportedISAs(CORINFO_InstructionSetFlags isas) { compSupportsISA = isas.GetFlagsRaw(); } unsigned compFlags; // method attributes unsigned instrCount; unsigned lvRefCount; codeOptimize compCodeOpt; // what type of code optimizations bool compUseCMOV; // optimize maximally and/or favor speed over size? #define DEFAULT_MIN_OPTS_CODE_SIZE 60000 #define DEFAULT_MIN_OPTS_INSTR_COUNT 20000 #define DEFAULT_MIN_OPTS_BB_COUNT 2000 #define DEFAULT_MIN_OPTS_LV_NUM_COUNT 2000 #define DEFAULT_MIN_OPTS_LV_REF_COUNT 8000 // Maximun number of locals before turning off the inlining #define MAX_LV_NUM_COUNT_FOR_INLINING 512 bool compMinOpts; bool compMinOptsIsSet; #ifdef DEBUG mutable bool compMinOptsIsUsed; bool MinOpts() const { assert(compMinOptsIsSet); compMinOptsIsUsed = true; return compMinOpts; } bool IsMinOptsSet() const { return compMinOptsIsSet; } #else // !DEBUG bool MinOpts() const { return compMinOpts; } bool IsMinOptsSet() const { return compMinOptsIsSet; } #endif // !DEBUG bool OptimizationDisabled() const { return MinOpts() || compDbgCode; } bool OptimizationEnabled() const { return !OptimizationDisabled(); } void SetMinOpts(bool val) { assert(!compMinOptsIsUsed); assert(!compMinOptsIsSet || (compMinOpts == val)); compMinOpts = val; compMinOptsIsSet = true; } // true if the CLFLG_* for an optimization is set. bool OptEnabled(unsigned optFlag) const { return !!(compFlags & optFlag); } #ifdef FEATURE_READYTORUN bool IsReadyToRun() const { return jitFlags->IsSet(JitFlags::JIT_FLAG_READYTORUN); } #else bool IsReadyToRun() const { return false; } #endif // Check if the compilation is control-flow guard enabled. bool IsCFGEnabled() const { #if defined(TARGET_ARM64) || defined(TARGET_AMD64) // On these platforms we assume the register that the target is // passed in is preserved by the validator and take care to get the // target from the register for the call (even in debug mode). static_assert_no_msg((RBM_VALIDATE_INDIRECT_CALL_TRASH & (1 << REG_VALIDATE_INDIRECT_CALL_ADDR)) == 0); if (JitConfig.JitForceControlFlowGuard()) return true; return jitFlags->IsSet(JitFlags::JIT_FLAG_ENABLE_CFG); #else // The remaining platforms are not supported and would require some // work to support. // // ARM32: // The ARM32 validator does not preserve any volatile registers // which means we have to take special care to allocate and use a // callee-saved register (reloading the target from memory is a // security issue). // // x86: // On x86 some VSD calls disassemble the call site and expect an // indirect call which is fundamentally incompatible with CFG. // This would require a different way to pass this information // through. // return false; #endif } #ifdef FEATURE_ON_STACK_REPLACEMENT bool IsOSR() const { return jitFlags->IsSet(JitFlags::JIT_FLAG_OSR); } #else bool IsOSR() const { return false; } #endif // true if we should use the PINVOKE_{BEGIN,END} helpers instead of generating // PInvoke transitions inline. Normally used by R2R, but also used when generating a reverse pinvoke frame, as // the current logic for frame setup initializes and pushes // the InlinedCallFrame before performing the Reverse PInvoke transition, which is invalid (as frames cannot // safely be pushed/popped while the thread is in a preemptive state.). bool ShouldUsePInvokeHelpers() { return jitFlags->IsSet(JitFlags::JIT_FLAG_USE_PINVOKE_HELPERS) || jitFlags->IsSet(JitFlags::JIT_FLAG_REVERSE_PINVOKE); } // true if we should use insert the REVERSE_PINVOKE_{ENTER,EXIT} helpers in the method // prolog/epilog bool IsReversePInvoke() { return jitFlags->IsSet(JitFlags::JIT_FLAG_REVERSE_PINVOKE); } bool compScopeInfo; // Generate the LocalVar info ? bool compDbgCode; // Generate debugger-friendly code? bool compDbgInfo; // Gather debugging info? bool compDbgEnC; #ifdef PROFILING_SUPPORTED bool compNoPInvokeInlineCB; #else static const bool compNoPInvokeInlineCB; #endif #ifdef DEBUG bool compGcChecks; // Check arguments and return values to ensure they are sane #endif #if defined(DEBUG) && defined(TARGET_XARCH) bool compStackCheckOnRet; // Check stack pointer on return to ensure it is correct. #endif // defined(DEBUG) && defined(TARGET_XARCH) #if defined(DEBUG) && defined(TARGET_X86) bool compStackCheckOnCall; // Check stack pointer after call to ensure it is correct. Only for x86. #endif // defined(DEBUG) && defined(TARGET_X86) bool compReloc; // Generate relocs for pointers in code, true for all ngen/prejit codegen #ifdef DEBUG #if defined(TARGET_XARCH) bool compEnablePCRelAddr; // Whether absolute addr be encoded as PC-rel offset by RyuJIT where possible #endif #endif // DEBUG #ifdef UNIX_AMD64_ABI // This flag is indicating if there is a need to align the frame. // On AMD64-Windows, if there are calls, 4 slots for the outgoing ars are allocated, except for // FastTailCall. This slots makes the frame size non-zero, so alignment logic will be called. // On AMD64-Unix, there are no such slots. There is a possibility to have calls in the method with frame size of // 0. The frame alignment logic won't kick in. This flags takes care of the AMD64-Unix case by remembering that // there are calls and making sure the frame alignment logic is executed. bool compNeedToAlignFrame; #endif // UNIX_AMD64_ABI bool compProcedureSplitting; // Separate cold code from hot code bool genFPorder; // Preserve FP order (operations are non-commutative) bool genFPopt; // Can we do frame-pointer-omission optimization? bool altJit; // True if we are an altjit and are compiling this method #ifdef OPT_CONFIG bool optRepeat; // Repeat optimizer phases k times #endif #ifdef DEBUG bool compProcedureSplittingEH; // Separate cold code from hot code for functions with EH bool dspCode; // Display native code generated bool dspEHTable; // Display the EH table reported to the VM bool dspDebugInfo; // Display the Debug info reported to the VM bool dspInstrs; // Display the IL instructions intermixed with the native code output bool dspLines; // Display source-code lines intermixed with native code output bool dmpHex; // Display raw bytes in hex of native code output bool varNames; // Display variables names in native code output bool disAsm; // Display native code as it is generated bool disAsmSpilled; // Display native code when any register spilling occurs bool disasmWithGC; // Display GC info interleaved with disassembly. bool disDiffable; // Makes the Disassembly code 'diff-able' bool disAddr; // Display process address next to each instruction in disassembly code bool disAlignment; // Display alignment boundaries in disassembly code bool disAsm2; // Display native code after it is generated using external disassembler bool dspOrder; // Display names of each of the methods that we ngen/jit bool dspUnwind; // Display the unwind info output bool dspDiffable; // Makes the Jit Dump 'diff-able' (currently uses same COMPlus_* flag as disDiffable) bool compLongAddress; // Force using large pseudo instructions for long address // (IF_LARGEJMP/IF_LARGEADR/IF_LARGLDC) bool dspGCtbls; // Display the GC tables #endif bool compExpandCallsEarly; // True if we should expand virtual call targets early for this method // Default numbers used to perform loop alignment. All the numbers are choosen // based on experimenting with various benchmarks. // Default minimum loop block weight required to enable loop alignment. #define DEFAULT_ALIGN_LOOP_MIN_BLOCK_WEIGHT 4 // By default a loop will be aligned at 32B address boundary to get better // performance as per architecture manuals. #define DEFAULT_ALIGN_LOOP_BOUNDARY 0x20 // For non-adaptive loop alignment, by default, only align a loop whose size is // at most 3 times the alignment block size. If the loop is bigger than that, it is most // likely complicated enough that loop alignment will not impact performance. #define DEFAULT_MAX_LOOPSIZE_FOR_ALIGN DEFAULT_ALIGN_LOOP_BOUNDARY * 3 #ifdef DEBUG // Loop alignment variables // If set, for non-adaptive alignment, ensure loop jmps are not on or cross alignment boundary. bool compJitAlignLoopForJcc; #endif // For non-adaptive alignment, minimum loop size (in bytes) for which alignment will be done. unsigned short compJitAlignLoopMaxCodeSize; // Minimum weight needed for the first block of a loop to make it a candidate for alignment. unsigned short compJitAlignLoopMinBlockWeight; // For non-adaptive alignment, address boundary (power of 2) at which loop alignment should // be done. By default, 32B. unsigned short compJitAlignLoopBoundary; // Padding limit to align a loop. unsigned short compJitAlignPaddingLimit; // If set, perform adaptive loop alignment that limits number of padding based on loop size. bool compJitAlignLoopAdaptive; // If set, tries to hide alignment instructions behind unconditional jumps. bool compJitHideAlignBehindJmp; #ifdef LATE_DISASM bool doLateDisasm; // Run the late disassembler #endif // LATE_DISASM #if DUMP_GC_TABLES && !defined(DEBUG) #pragma message("NOTE: this non-debug build has GC ptr table dumping always enabled!") static const bool dspGCtbls = true; #endif #ifdef PROFILING_SUPPORTED // Whether to emit Enter/Leave/TailCall hooks using a dummy stub (DummyProfilerELTStub()). // This option helps make the JIT behave as if it is running under a profiler. bool compJitELTHookEnabled; #endif // PROFILING_SUPPORTED #if FEATURE_TAILCALL_OPT // Whether opportunistic or implicit tail call optimization is enabled. bool compTailCallOpt; // Whether optimization of transforming a recursive tail call into a loop is enabled. bool compTailCallLoopOpt; #endif #if FEATURE_FASTTAILCALL // Whether fast tail calls are allowed. bool compFastTailCalls; #endif // FEATURE_FASTTAILCALL #if defined(TARGET_ARM64) // Decision about whether to save FP/LR registers with callee-saved registers (see // COMPlus_JitSaveFpLrWithCalleSavedRegisters). int compJitSaveFpLrWithCalleeSavedRegisters; #endif // defined(TARGET_ARM64) #ifdef CONFIGURABLE_ARM_ABI bool compUseSoftFP = false; #else #ifdef ARM_SOFTFP static const bool compUseSoftFP = true; #else // !ARM_SOFTFP static const bool compUseSoftFP = false; #endif // ARM_SOFTFP #endif // CONFIGURABLE_ARM_ABI } opts; static bool s_pAltJitExcludeAssembliesListInitialized; static AssemblyNamesList2* s_pAltJitExcludeAssembliesList; #ifdef DEBUG static bool s_pJitDisasmIncludeAssembliesListInitialized; static AssemblyNamesList2* s_pJitDisasmIncludeAssembliesList; static bool s_pJitFunctionFileInitialized; static MethodSet* s_pJitMethodSet; #endif // DEBUG #ifdef DEBUG // silence warning of cast to greater size. It is easier to silence than construct code the compiler is happy with, and // it is safe in this case #pragma warning(push) #pragma warning(disable : 4312) template <typename T> T dspPtr(T p) { return (p == ZERO) ? ZERO : (opts.dspDiffable ? T(0xD1FFAB1E) : p); } template <typename T> T dspOffset(T o) { return (o == ZERO) ? ZERO : (opts.dspDiffable ? T(0xD1FFAB1E) : o); } #pragma warning(pop) static int dspTreeID(GenTree* tree) { return tree->gtTreeID; } static void printStmtID(Statement* stmt) { assert(stmt != nullptr); printf(FMT_STMT, stmt->GetID()); } static void printTreeID(GenTree* tree) { if (tree == nullptr) { printf("[------]"); } else { printf("[%06d]", dspTreeID(tree)); } } const char* pgoSourceToString(ICorJitInfo::PgoSource p); const char* devirtualizationDetailToString(CORINFO_DEVIRTUALIZATION_DETAIL detail); #endif // DEBUG // clang-format off #define STRESS_MODES \ \ STRESS_MODE(NONE) \ \ /* "Variations" stress areas which we try to mix up with each other. */ \ /* These should not be exhaustively used as they might */ \ /* hide/trivialize other areas */ \ \ STRESS_MODE(REGS) \ STRESS_MODE(DBL_ALN) \ STRESS_MODE(LCL_FLDS) \ STRESS_MODE(UNROLL_LOOPS) \ STRESS_MODE(MAKE_CSE) \ STRESS_MODE(LEGACY_INLINE) \ STRESS_MODE(CLONE_EXPR) \ STRESS_MODE(USE_CMOV) \ STRESS_MODE(FOLD) \ STRESS_MODE(MERGED_RETURNS) \ STRESS_MODE(BB_PROFILE) \ STRESS_MODE(OPT_BOOLS_GC) \ STRESS_MODE(REMORPH_TREES) \ STRESS_MODE(64RSLT_MUL) \ STRESS_MODE(DO_WHILE_LOOPS) \ STRESS_MODE(MIN_OPTS) \ STRESS_MODE(REVERSE_FLAG) /* Will set GTF_REVERSE_OPS whenever we can */ \ STRESS_MODE(REVERSE_COMMA) /* Will reverse commas created with gtNewCommaNode */ \ STRESS_MODE(TAILCALL) /* Will make the call as a tailcall whenever legal */ \ STRESS_MODE(CATCH_ARG) /* Will spill catch arg */ \ STRESS_MODE(UNSAFE_BUFFER_CHECKS) \ STRESS_MODE(NULL_OBJECT_CHECK) \ STRESS_MODE(PINVOKE_RESTORE_ESP) \ STRESS_MODE(RANDOM_INLINE) \ STRESS_MODE(SWITCH_CMP_BR_EXPANSION) \ STRESS_MODE(GENERIC_VARN) \ STRESS_MODE(PROFILER_CALLBACKS) /* Will generate profiler hooks for ELT callbacks */ \ STRESS_MODE(BYREF_PROMOTION) /* Change undoPromotion decisions for byrefs */ \ STRESS_MODE(PROMOTE_FEWER_STRUCTS)/* Don't promote some structs that can be promoted */ \ STRESS_MODE(VN_BUDGET)/* Randomize the VN budget */ \ \ /* After COUNT_VARN, stress level 2 does all of these all the time */ \ \ STRESS_MODE(COUNT_VARN) \ \ /* "Check" stress areas that can be exhaustively used if we */ \ /* dont care about performance at all */ \ \ STRESS_MODE(FORCE_INLINE) /* Treat every method as AggressiveInlining */ \ STRESS_MODE(CHK_FLOW_UPDATE) \ STRESS_MODE(EMITTER) \ STRESS_MODE(CHK_REIMPORT) \ STRESS_MODE(FLATFP) \ STRESS_MODE(GENERIC_CHECK) \ STRESS_MODE(COUNT) enum compStressArea { #define STRESS_MODE(mode) STRESS_##mode, STRESS_MODES #undef STRESS_MODE }; // clang-format on #ifdef DEBUG static const LPCWSTR s_compStressModeNames[STRESS_COUNT + 1]; BYTE compActiveStressModes[STRESS_COUNT]; #endif // DEBUG #define MAX_STRESS_WEIGHT 100 bool compStressCompile(compStressArea stressArea, unsigned weightPercentage); bool compStressCompileHelper(compStressArea stressArea, unsigned weightPercentage); #ifdef DEBUG bool compInlineStress() { return compStressCompile(STRESS_LEGACY_INLINE, 50); } bool compRandomInlineStress() { return compStressCompile(STRESS_RANDOM_INLINE, 50); } bool compPromoteFewerStructs(unsigned lclNum); #endif // DEBUG bool compTailCallStress() { #ifdef DEBUG // Do not stress tailcalls in IL stubs as the runtime creates several IL // stubs to implement the tailcall mechanism, which would then // recursively create more IL stubs. return !opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB) && (JitConfig.TailcallStress() != 0 || compStressCompile(STRESS_TAILCALL, 5)); #else return false; #endif } const char* compGetTieringName(bool wantShortName = false) const; const char* compGetStressMessage() const; codeOptimize compCodeOpt() const { #if 0 // Switching between size & speed has measurable throughput impact // (3.5% on NGen CoreLib when measured). It used to be enabled for // DEBUG, but should generate identical code between CHK & RET builds, // so that's not acceptable. // TODO-Throughput: Figure out what to do about size vs. speed & throughput. // Investigate the cause of the throughput regression. return opts.compCodeOpt; #else return BLENDED_CODE; #endif } //--------------------- Info about the procedure -------------------------- struct Info { COMP_HANDLE compCompHnd; CORINFO_MODULE_HANDLE compScopeHnd; CORINFO_CLASS_HANDLE compClassHnd; CORINFO_METHOD_HANDLE compMethodHnd; CORINFO_METHOD_INFO* compMethodInfo; bool hasCircularClassConstraints; bool hasCircularMethodConstraints; #if defined(DEBUG) || defined(LATE_DISASM) || DUMP_FLOWGRAPHS const char* compMethodName; const char* compClassName; const char* compFullName; double compPerfScore; int compMethodSuperPMIIndex; // useful when debugging under SuperPMI #endif // defined(DEBUG) || defined(LATE_DISASM) || DUMP_FLOWGRAPHS #if defined(DEBUG) || defined(INLINE_DATA) // Method hash is logically const, but computed // on first demand. mutable unsigned compMethodHashPrivate; unsigned compMethodHash() const; #endif // defined(DEBUG) || defined(INLINE_DATA) #ifdef PSEUDORANDOM_NOP_INSERTION // things for pseudorandom nop insertion unsigned compChecksum; CLRRandom compRNG; #endif // The following holds the FLG_xxxx flags for the method we're compiling. unsigned compFlags; // The following holds the class attributes for the method we're compiling. unsigned compClassAttr; const BYTE* compCode; IL_OFFSET compILCodeSize; // The IL code size IL_OFFSET compILImportSize; // Estimated amount of IL actually imported IL_OFFSET compILEntry; // The IL entry point (normally 0) PatchpointInfo* compPatchpointInfo; // Patchpoint data for OSR (normally nullptr) UNATIVE_OFFSET compNativeCodeSize; // The native code size, after instructions are issued. This // is less than (compTotalHotCodeSize + compTotalColdCodeSize) only if: // (1) the code is not hot/cold split, and we issued less code than we expected, or // (2) the code is hot/cold split, and we issued less code than we expected // in the cold section (the hot section will always be padded out to compTotalHotCodeSize). bool compIsStatic : 1; // Is the method static (no 'this' pointer)? bool compIsVarArgs : 1; // Does the method have varargs parameters? bool compInitMem : 1; // Is the CORINFO_OPT_INIT_LOCALS bit set in the method info options? bool compProfilerCallback : 1; // JIT inserted a profiler Enter callback bool compPublishStubParam : 1; // EAX captured in prolog will be available through an intrinsic bool compHasNextCallRetAddr : 1; // The NextCallReturnAddress intrinsic is used. var_types compRetType; // Return type of the method as declared in IL var_types compRetNativeType; // Normalized return type as per target arch ABI unsigned compILargsCount; // Number of arguments (incl. implicit but not hidden) unsigned compArgsCount; // Number of arguments (incl. implicit and hidden) #if FEATURE_FASTTAILCALL unsigned compArgStackSize; // Incoming argument stack size in bytes #endif // FEATURE_FASTTAILCALL unsigned compRetBuffArg; // position of hidden return param var (0, 1) (BAD_VAR_NUM means not present); int compTypeCtxtArg; // position of hidden param for type context for generic code (CORINFO_CALLCONV_PARAMTYPE) unsigned compThisArg; // position of implicit this pointer param (not to be confused with lvaArg0Var) unsigned compILlocalsCount; // Number of vars : args + locals (incl. implicit but not hidden) unsigned compLocalsCount; // Number of vars : args + locals (incl. implicit and hidden) unsigned compMaxStack; UNATIVE_OFFSET compTotalHotCodeSize; // Total number of bytes of Hot Code in the method UNATIVE_OFFSET compTotalColdCodeSize; // Total number of bytes of Cold Code in the method unsigned compUnmanagedCallCountWithGCTransition; // count of unmanaged calls with GC transition. CorInfoCallConvExtension compCallConv; // The entry-point calling convention for this method. unsigned compLvFrameListRoot; // lclNum for the Frame root unsigned compXcptnsCount; // Number of exception-handling clauses read in the method's IL. // You should generally use compHndBBtabCount instead: it is the // current number of EH clauses (after additions like synchronized // methods and funclets, and removals like unreachable code deletion). Target::ArgOrder compArgOrder; bool compMatchedVM; // true if the VM is "matched": either the JIT is a cross-compiler // and the VM expects that, or the JIT is a "self-host" compiler // (e.g., x86 hosted targeting x86) and the VM expects that. /* The following holds IL scope information about local variables. */ unsigned compVarScopesCount; VarScopeDsc* compVarScopes; /* The following holds information about instr offsets for * which we need to report IP-mappings */ IL_OFFSET* compStmtOffsets; // sorted unsigned compStmtOffsetsCount; ICorDebugInfo::BoundaryTypes compStmtOffsetsImplicit; #define CPU_X86 0x0100 // The generic X86 CPU #define CPU_X86_PENTIUM_4 0x0110 #define CPU_X64 0x0200 // The generic x64 CPU #define CPU_AMD_X64 0x0210 // AMD x64 CPU #define CPU_INTEL_X64 0x0240 // Intel x64 CPU #define CPU_ARM 0x0300 // The generic ARM CPU #define CPU_ARM64 0x0400 // The generic ARM64 CPU unsigned genCPU; // What CPU are we running on // Number of class profile probes in this method unsigned compClassProbeCount; } info; // Returns true if the method being compiled returns a non-void and non-struct value. // Note that lvaInitTypeRef() normalizes compRetNativeType for struct returns in a // single register as per target arch ABI (e.g on Amd64 Windows structs of size 1, 2, // 4 or 8 gets normalized to TYP_BYTE/TYP_SHORT/TYP_INT/TYP_LONG; On Arm HFA structs). // Methods returning such structs are considered to return non-struct return value and // this method returns true in that case. bool compMethodReturnsNativeScalarType() { return (info.compRetType != TYP_VOID) && !varTypeIsStruct(info.compRetNativeType); } // Returns true if the method being compiled returns RetBuf addr as its return value bool compMethodReturnsRetBufAddr() { // There are cases where implicit RetBuf argument should be explicitly returned in a register. // In such cases the return type is changed to TYP_BYREF and appropriate IR is generated. // These cases are: CLANG_FORMAT_COMMENT_ANCHOR; #ifdef TARGET_AMD64 // 1. on x64 Windows and Unix the address of RetBuf needs to be returned by // methods with hidden RetBufArg in RAX. In such case GT_RETURN is of TYP_BYREF, // returning the address of RetBuf. return (info.compRetBuffArg != BAD_VAR_NUM); #else // TARGET_AMD64 #ifdef PROFILING_SUPPORTED // 2. Profiler Leave callback expects the address of retbuf as return value for // methods with hidden RetBuf argument. impReturnInstruction() when profiler // callbacks are needed creates GT_RETURN(TYP_BYREF, op1 = Addr of RetBuf) for // methods with hidden RetBufArg. if (compIsProfilerHookNeeded()) { return (info.compRetBuffArg != BAD_VAR_NUM); } #endif // 3. Windows ARM64 native instance calling convention requires the address of RetBuff // to be returned in x0. CLANG_FORMAT_COMMENT_ANCHOR; #if defined(TARGET_ARM64) if (TargetOS::IsWindows) { auto callConv = info.compCallConv; if (callConvIsInstanceMethodCallConv(callConv)) { return (info.compRetBuffArg != BAD_VAR_NUM); } } #endif // TARGET_ARM64 // 4. x86 unmanaged calling conventions require the address of RetBuff to be returned in eax. CLANG_FORMAT_COMMENT_ANCHOR; #if defined(TARGET_X86) if (info.compCallConv != CorInfoCallConvExtension::Managed) { return (info.compRetBuffArg != BAD_VAR_NUM); } #endif return false; #endif // TARGET_AMD64 } // Returns true if the method returns a value in more than one return register // TODO-ARM-Bug: Deal with multi-register genReturnLocaled structs? // TODO-ARM64: Does this apply for ARM64 too? bool compMethodReturnsMultiRegRetType() { #if FEATURE_MULTIREG_RET #if defined(TARGET_X86) // On x86, 64-bit longs and structs are returned in multiple registers return varTypeIsLong(info.compRetNativeType) || (varTypeIsStruct(info.compRetNativeType) && (info.compRetBuffArg == BAD_VAR_NUM)); #else // targets: X64-UNIX, ARM64 or ARM32 // On all other targets that support multireg return values: // Methods returning a struct in multiple registers have a return value of TYP_STRUCT. // Such method's compRetNativeType is TYP_STRUCT without a hidden RetBufArg return varTypeIsStruct(info.compRetNativeType) && (info.compRetBuffArg == BAD_VAR_NUM); #endif // TARGET_XXX #else // not FEATURE_MULTIREG_RET // For this architecture there are no multireg returns return false; #endif // FEATURE_MULTIREG_RET } bool compEnregLocals() { return ((opts.compFlags & CLFLG_REGVAR) != 0); } bool compEnregStructLocals() { return (JitConfig.JitEnregStructLocals() != 0); } bool compObjectStackAllocation() { return (JitConfig.JitObjectStackAllocation() != 0); } // Returns true if the method returns a value in more than one return register, // it should replace/be merged with compMethodReturnsMultiRegRetType when #36868 is fixed. // The difference from original `compMethodReturnsMultiRegRetType` is in ARM64 SIMD* handling, // this method correctly returns false for it (it is passed as HVA), when the original returns true. bool compMethodReturnsMultiRegRegTypeAlternate() { #if FEATURE_MULTIREG_RET #if defined(TARGET_X86) // On x86, 64-bit longs and structs are returned in multiple registers return varTypeIsLong(info.compRetNativeType) || (varTypeIsStruct(info.compRetNativeType) && (info.compRetBuffArg == BAD_VAR_NUM)); #else // targets: X64-UNIX, ARM64 or ARM32 #if defined(TARGET_ARM64) // TYP_SIMD* are returned in one register. if (varTypeIsSIMD(info.compRetNativeType)) { return false; } #endif // On all other targets that support multireg return values: // Methods returning a struct in multiple registers have a return value of TYP_STRUCT. // Such method's compRetNativeType is TYP_STRUCT without a hidden RetBufArg return varTypeIsStruct(info.compRetNativeType) && (info.compRetBuffArg == BAD_VAR_NUM); #endif // TARGET_XXX #else // not FEATURE_MULTIREG_RET // For this architecture there are no multireg returns return false; #endif // FEATURE_MULTIREG_RET } // Returns true if the method being compiled returns a value bool compMethodHasRetVal() { return compMethodReturnsNativeScalarType() || compMethodReturnsRetBufAddr() || compMethodReturnsMultiRegRetType(); } // Returns true if the method requires a PInvoke prolog and epilog bool compMethodRequiresPInvokeFrame() { return (info.compUnmanagedCallCountWithGCTransition > 0); } // Returns true if address-exposed user variables should be poisoned with a recognizable value bool compShouldPoisonFrame() { #ifdef FEATURE_ON_STACK_REPLACEMENT if (opts.IsOSR()) return false; #endif return !info.compInitMem && opts.compDbgCode; } // Returns true if the jit supports having patchpoints in this method. // Optionally, get the reason why not. bool compCanHavePatchpoints(const char** reason = nullptr); #if defined(DEBUG) void compDispLocalVars(); #endif // DEBUG private: class ClassLayoutTable* m_classLayoutTable; class ClassLayoutTable* typCreateClassLayoutTable(); class ClassLayoutTable* typGetClassLayoutTable(); public: // Get the layout having the specified layout number. ClassLayout* typGetLayoutByNum(unsigned layoutNum); // Get the layout number of the specified layout. unsigned typGetLayoutNum(ClassLayout* layout); // Get the layout having the specified size but no class handle. ClassLayout* typGetBlkLayout(unsigned blockSize); // Get the number of a layout having the specified size but no class handle. unsigned typGetBlkLayoutNum(unsigned blockSize); // Get the layout for the specified class handle. ClassLayout* typGetObjLayout(CORINFO_CLASS_HANDLE classHandle); // Get the number of a layout for the specified class handle. unsigned typGetObjLayoutNum(CORINFO_CLASS_HANDLE classHandle); //-------------------------- Global Compiler Data ------------------------------------ #ifdef DEBUG private: static LONG s_compMethodsCount; // to produce unique label names #endif public: #ifdef DEBUG LONG compMethodID; unsigned compGenTreeID; unsigned compStatementID; unsigned compBasicBlockID; #endif BasicBlock* compCurBB; // the current basic block in process Statement* compCurStmt; // the current statement in process GenTree* compCurTree; // the current tree in process // The following is used to create the 'method JIT info' block. size_t compInfoBlkSize; BYTE* compInfoBlkAddr; EHblkDsc* compHndBBtab; // array of EH data unsigned compHndBBtabCount; // element count of used elements in EH data array unsigned compHndBBtabAllocCount; // element count of allocated elements in EH data array #if defined(TARGET_X86) //------------------------------------------------------------------------- // Tracking of region covered by the monitor in synchronized methods void* syncStartEmitCookie; // the emitter cookie for first instruction after the call to MON_ENTER void* syncEndEmitCookie; // the emitter cookie for first instruction after the call to MON_EXIT #endif // !TARGET_X86 Phases mostRecentlyActivePhase; // the most recently active phase PhaseChecks activePhaseChecks; // the currently active phase checks //------------------------------------------------------------------------- // The following keeps track of how many bytes of local frame space we've // grabbed so far in the current function, and how many argument bytes we // need to pop when we return. // unsigned compLclFrameSize; // secObject+lclBlk+locals+temps // Count of callee-saved regs we pushed in the prolog. // Does not include EBP for isFramePointerUsed() and double-aligned frames. // In case of Amd64 this doesn't include float regs saved on stack. unsigned compCalleeRegsPushed; #if defined(TARGET_XARCH) // Mask of callee saved float regs on stack. regMaskTP compCalleeFPRegsSavedMask; #endif #ifdef TARGET_AMD64 // Quirk for VS debug-launch scenario to work: // Bytes of padding between save-reg area and locals. #define VSQUIRK_STACK_PAD (2 * REGSIZE_BYTES) unsigned compVSQuirkStackPaddingNeeded; #endif unsigned compArgSize; // total size of arguments in bytes (including register args (lvIsRegArg)) #ifdef TARGET_ARM bool compHasSplitParam; #endif unsigned compMapILargNum(unsigned ILargNum); // map accounting for hidden args unsigned compMapILvarNum(unsigned ILvarNum); // map accounting for hidden args unsigned compMap2ILvarNum(unsigned varNum) const; // map accounting for hidden args #if defined(TARGET_ARM64) struct FrameInfo { // Frame type (1-5) int frameType; // Distance from established (method body) SP to base of callee save area int calleeSaveSpOffset; // Amount to subtract from SP before saving (prolog) OR // to add to SP after restoring (epilog) callee saves int calleeSaveSpDelta; // Distance from established SP to where caller's FP was saved int offsetSpToSavedFp; } compFrameInfo; #endif //------------------------------------------------------------------------- static void compStartup(); // One-time initialization static void compShutdown(); // One-time finalization void compInit(ArenaAllocator* pAlloc, CORINFO_METHOD_HANDLE methodHnd, COMP_HANDLE compHnd, CORINFO_METHOD_INFO* methodInfo, InlineInfo* inlineInfo); void compDone(); static void compDisplayStaticSizes(FILE* fout); //------------ Some utility functions -------------- void* compGetHelperFtn(CorInfoHelpFunc ftnNum, /* IN */ void** ppIndirection); /* OUT */ // Several JIT/EE interface functions return a CorInfoType, and also return a // class handle as an out parameter if the type is a value class. Returns the // size of the type these describe. unsigned compGetTypeSize(CorInfoType cit, CORINFO_CLASS_HANDLE clsHnd); // Returns true if the method being compiled has a return buffer. bool compHasRetBuffArg(); #ifdef DEBUG // Components used by the compiler may write unit test suites, and // have them run within this method. They will be run only once per process, and only // in debug. (Perhaps should be under the control of a COMPlus_ flag.) // These should fail by asserting. void compDoComponentUnitTestsOnce(); #endif // DEBUG int compCompile(CORINFO_MODULE_HANDLE classPtr, void** methodCodePtr, uint32_t* methodCodeSize, JitFlags* compileFlags); void compCompileFinish(); int compCompileHelper(CORINFO_MODULE_HANDLE classPtr, COMP_HANDLE compHnd, CORINFO_METHOD_INFO* methodInfo, void** methodCodePtr, uint32_t* methodCodeSize, JitFlags* compileFlag); ArenaAllocator* compGetArenaAllocator(); void generatePatchpointInfo(); #if MEASURE_MEM_ALLOC static bool s_dspMemStats; // Display per-phase memory statistics for every function #endif // MEASURE_MEM_ALLOC #if LOOP_HOIST_STATS unsigned m_loopsConsidered; bool m_curLoopHasHoistedExpression; unsigned m_loopsWithHoistedExpressions; unsigned m_totalHoistedExpressions; void AddLoopHoistStats(); void PrintPerMethodLoopHoistStats(); static CritSecObject s_loopHoistStatsLock; // This lock protects the data structures below. static unsigned s_loopsConsidered; static unsigned s_loopsWithHoistedExpressions; static unsigned s_totalHoistedExpressions; static void PrintAggregateLoopHoistStats(FILE* f); #endif // LOOP_HOIST_STATS #if TRACK_ENREG_STATS class EnregisterStats { private: unsigned m_totalNumberOfVars; unsigned m_totalNumberOfStructVars; unsigned m_totalNumberOfEnregVars; unsigned m_totalNumberOfStructEnregVars; unsigned m_addrExposed; unsigned m_VMNeedsStackAddr; unsigned m_localField; unsigned m_blockOp; unsigned m_dontEnregStructs; unsigned m_notRegSizeStruct; unsigned m_structArg; unsigned m_lclAddrNode; unsigned m_castTakesAddr; unsigned m_storeBlkSrc; unsigned m_oneAsgRetyping; unsigned m_swizzleArg; unsigned m_blockOpRet; unsigned m_returnSpCheck; unsigned m_simdUserForcesDep; unsigned m_liveInOutHndlr; unsigned m_depField; unsigned m_noRegVars; unsigned m_minOptsGC; #ifdef JIT32_GCENCODER unsigned m_PinningRef; #endif // JIT32_GCENCODER #if !defined(TARGET_64BIT) unsigned m_longParamField; #endif // !TARGET_64BIT unsigned m_parentExposed; unsigned m_tooConservative; unsigned m_escapeAddress; unsigned m_osrExposed; unsigned m_stressLclFld; unsigned m_copyFldByFld; unsigned m_dispatchRetBuf; unsigned m_wideIndir; public: void RecordLocal(const LclVarDsc* varDsc); void Dump(FILE* fout) const; }; static EnregisterStats s_enregisterStats; #endif // TRACK_ENREG_STATS bool compIsForImportOnly(); bool compIsForInlining() const; bool compDonotInline(); #ifdef DEBUG // Get the default fill char value we randomize this value when JitStress is enabled. static unsigned char compGetJitDefaultFill(Compiler* comp); const char* compLocalVarName(unsigned varNum, unsigned offs); VarName compVarName(regNumber reg, bool isFloatReg = false); const char* compRegVarName(regNumber reg, bool displayVar = false, bool isFloatReg = false); const char* compRegNameForSize(regNumber reg, size_t size); const char* compFPregVarName(unsigned fpReg, bool displayVar = false); void compDspSrcLinesByNativeIP(UNATIVE_OFFSET curIP); void compDspSrcLinesByLineNum(unsigned line, bool seek = false); #endif // DEBUG //------------------------------------------------------------------------- struct VarScopeListNode { VarScopeDsc* data; VarScopeListNode* next; static VarScopeListNode* Create(VarScopeDsc* value, CompAllocator alloc) { VarScopeListNode* node = new (alloc) VarScopeListNode; node->data = value; node->next = nullptr; return node; } }; struct VarScopeMapInfo { VarScopeListNode* head; VarScopeListNode* tail; static VarScopeMapInfo* Create(VarScopeListNode* node, CompAllocator alloc) { VarScopeMapInfo* info = new (alloc) VarScopeMapInfo; info->head = node; info->tail = node; return info; } }; // Max value of scope count for which we would use linear search; for larger values we would use hashtable lookup. static const unsigned MAX_LINEAR_FIND_LCL_SCOPELIST = 32; typedef JitHashTable<unsigned, JitSmallPrimitiveKeyFuncs<unsigned>, VarScopeMapInfo*> VarNumToScopeDscMap; // Map to keep variables' scope indexed by varNum containing it's scope dscs at the index. VarNumToScopeDscMap* compVarScopeMap; VarScopeDsc* compFindLocalVar(unsigned varNum, unsigned lifeBeg, unsigned lifeEnd); VarScopeDsc* compFindLocalVar(unsigned varNum, unsigned offs); VarScopeDsc* compFindLocalVarLinear(unsigned varNum, unsigned offs); void compInitVarScopeMap(); VarScopeDsc** compEnterScopeList; // List has the offsets where variables // enter scope, sorted by instr offset unsigned compNextEnterScope; VarScopeDsc** compExitScopeList; // List has the offsets where variables // go out of scope, sorted by instr offset unsigned compNextExitScope; void compInitScopeLists(); void compResetScopeLists(); VarScopeDsc* compGetNextEnterScope(unsigned offs, bool scan = false); VarScopeDsc* compGetNextExitScope(unsigned offs, bool scan = false); void compProcessScopesUntil(unsigned offset, VARSET_TP* inScope, void (Compiler::*enterScopeFn)(VARSET_TP* inScope, VarScopeDsc*), void (Compiler::*exitScopeFn)(VARSET_TP* inScope, VarScopeDsc*)); #ifdef DEBUG void compDispScopeLists(); #endif // DEBUG bool compIsProfilerHookNeeded(); //------------------------------------------------------------------------- /* Statistical Data Gathering */ void compJitStats(); // call this function and enable // various ifdef's below for statistical data #if CALL_ARG_STATS void compCallArgStats(); static void compDispCallArgStats(FILE* fout); #endif //------------------------------------------------------------------------- protected: #ifdef DEBUG bool skipMethod(); #endif ArenaAllocator* compArenaAllocator; public: void compFunctionTraceStart(); void compFunctionTraceEnd(void* methodCodePtr, ULONG methodCodeSize, bool isNYI); protected: size_t compMaxUncheckedOffsetForNullObject; void compInitOptions(JitFlags* compileFlags); void compSetProcessor(); void compInitDebuggingInfo(); void compSetOptimizationLevel(); #ifdef TARGET_ARMARCH bool compRsvdRegCheck(FrameLayoutState curState); #endif void compCompile(void** methodCodePtr, uint32_t* methodCodeSize, JitFlags* compileFlags); // Clear annotations produced during optimizations; to be used between iterations when repeating opts. void ResetOptAnnotations(); // Regenerate loop descriptors; to be used between iterations when repeating opts. void RecomputeLoopInfo(); #ifdef PROFILING_SUPPORTED // Data required for generating profiler Enter/Leave/TailCall hooks bool compProfilerHookNeeded; // Whether profiler Enter/Leave/TailCall hook needs to be generated for the method void* compProfilerMethHnd; // Profiler handle of the method being compiled. Passed as param to ELT callbacks bool compProfilerMethHndIndirected; // Whether compProfilerHandle is pointer to the handle or is an actual handle #endif public: // Assumes called as part of process shutdown; does any compiler-specific work associated with that. static void ProcessShutdownWork(ICorStaticInfo* statInfo); CompAllocator getAllocator(CompMemKind cmk = CMK_Generic) { return CompAllocator(compArenaAllocator, cmk); } CompAllocator getAllocatorGC() { return getAllocator(CMK_GC); } CompAllocator getAllocatorLoopHoist() { return getAllocator(CMK_LoopHoist); } #ifdef DEBUG CompAllocator getAllocatorDebugOnly() { return getAllocator(CMK_DebugOnly); } #endif // DEBUG /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX typeInfo XX XX XX XX Checks for type compatibility and merges types XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ public: // Returns true if child is equal to or a subtype of parent for merge purposes // This support is necessary to suport attributes that are not described in // for example, signatures. For example, the permanent home byref (byref that // points to the gc heap), isn't a property of method signatures, therefore, // it is safe to have mismatches here (that tiCompatibleWith will not flag), // but when deciding if we need to reimport a block, we need to take these // in account bool tiMergeCompatibleWith(const typeInfo& pChild, const typeInfo& pParent, bool normalisedForStack) const; // Returns true if child is equal to or a subtype of parent. // normalisedForStack indicates that both types are normalised for the stack bool tiCompatibleWith(const typeInfo& pChild, const typeInfo& pParent, bool normalisedForStack) const; // Merges pDest and pSrc. Returns false if merge is undefined. // *pDest is modified to represent the merged type. Sets "*changed" to true // if this changes "*pDest". bool tiMergeToCommonParent(typeInfo* pDest, const typeInfo* pSrc, bool* changed) const; /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX IL verification stuff XX XX XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ public: // The following is used to track liveness of local variables, initialization // of valueclass constructors, and type safe use of IL instructions. // dynamic state info needed for verification EntryState verCurrentState; // this ptr of object type .ctors are considered intited only after // the base class ctor is called, or an alternate ctor is called. // An uninited this ptr can be used to access fields, but cannot // be used to call a member function. bool verTrackObjCtorInitState; void verInitBBEntryState(BasicBlock* block, EntryState* currentState); // Requires that "tis" is not TIS_Bottom -- it's a definite init/uninit state. void verSetThisInit(BasicBlock* block, ThisInitState tis); void verInitCurrentState(); void verResetCurrentState(BasicBlock* block, EntryState* currentState); // Merges the current verification state into the entry state of "block", return false if that merge fails, // TRUE if it succeeds. Further sets "*changed" to true if this changes the entry state of "block". bool verMergeEntryStates(BasicBlock* block, bool* changed); void verConvertBBToThrowVerificationException(BasicBlock* block DEBUGARG(bool logMsg)); void verHandleVerificationFailure(BasicBlock* block DEBUGARG(bool logMsg)); typeInfo verMakeTypeInfo(CORINFO_CLASS_HANDLE clsHnd, bool bashStructToRef = false); // converts from jit type representation to typeInfo typeInfo verMakeTypeInfo(CorInfoType ciType, CORINFO_CLASS_HANDLE clsHnd); // converts from jit type representation to typeInfo bool verIsSDArray(const typeInfo& ti); typeInfo verGetArrayElemType(const typeInfo& ti); typeInfo verParseArgSigToTypeInfo(CORINFO_SIG_INFO* sig, CORINFO_ARG_LIST_HANDLE args); bool verIsByRefLike(const typeInfo& ti); bool verIsSafeToReturnByRef(const typeInfo& ti); // generic type variables range over types that satisfy IsBoxable bool verIsBoxable(const typeInfo& ti); void DECLSPEC_NORETURN verRaiseVerifyException(INDEBUG(const char* reason) DEBUGARG(const char* file) DEBUGARG(unsigned line)); void verRaiseVerifyExceptionIfNeeded(INDEBUG(const char* reason) DEBUGARG(const char* file) DEBUGARG(unsigned line)); bool verCheckTailCallConstraint(OPCODE opcode, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, // Is this a "constrained." call // on a type parameter? bool speculative // If true, won't throw if verificatoin fails. Instead it will // return false to the caller. // If false, it will throw. ); bool verIsBoxedValueType(const typeInfo& ti); void verVerifyCall(OPCODE opcode, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, bool tailCall, bool readonlyCall, // is this a "readonly." call? const BYTE* delegateCreateStart, const BYTE* codeAddr, CORINFO_CALL_INFO* callInfo DEBUGARG(const char* methodName)); bool verCheckDelegateCreation(const BYTE* delegateCreateStart, const BYTE* codeAddr, mdMemberRef& targetMemberRef); typeInfo verVerifySTIND(const typeInfo& ptr, const typeInfo& value, const typeInfo& instrType); typeInfo verVerifyLDIND(const typeInfo& ptr, const typeInfo& instrType); void verVerifyField(CORINFO_RESOLVED_TOKEN* pResolvedToken, const CORINFO_FIELD_INFO& fieldInfo, const typeInfo* tiThis, bool mutator, bool allowPlainStructAsThis = false); void verVerifyCond(const typeInfo& tiOp1, const typeInfo& tiOp2, unsigned opcode); void verVerifyThisPtrInitialised(); bool verIsCallToInitThisPtr(CORINFO_CLASS_HANDLE context, CORINFO_CLASS_HANDLE target); #ifdef DEBUG // One line log function. Default level is 0. Increasing it gives you // more log information // levels are currently unused: #define JITDUMP(level,...) (); void JitLogEE(unsigned level, const char* fmt, ...); bool compDebugBreak; bool compJitHaltMethod(); #endif /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX GS Security checks for unsafe buffers XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ public: struct ShadowParamVarInfo { FixedBitVect* assignGroup; // the closure set of variables whose values depend on each other unsigned shadowCopy; // Lcl var num, if not valid set to BAD_VAR_NUM static bool mayNeedShadowCopy(LclVarDsc* varDsc) { #if defined(TARGET_AMD64) // GS cookie logic to create shadow slots, create trees to copy reg args to shadow // slots and update all trees to refer to shadow slots is done immediately after // fgMorph(). Lsra could potentially mark a param as DoNotEnregister after JIT determines // not to shadow a parameter. Also, LSRA could potentially spill a param which is passed // in register. Therefore, conservatively all params may need a shadow copy. Note that // GS cookie logic further checks whether the param is a ptr or an unsafe buffer before // creating a shadow slot even though this routine returns true. // // TODO-AMD64-CQ: Revisit this conservative approach as it could create more shadow slots than // required. There are two cases under which a reg arg could potentially be used from its // home location: // a) LSRA marks it as DoNotEnregister (see LinearScan::identifyCandidates()) // b) LSRA spills it // // Possible solution to address case (a) // - The conditions under which LSRA marks a varDsc as DoNotEnregister could be checked // in this routine. Note that live out of exception handler is something we may not be // able to do it here since GS cookie logic is invoked ahead of liveness computation. // Therefore, for methods with exception handling and need GS cookie check we might have // to take conservative approach. // // Possible solution to address case (b) // - Whenver a parameter passed in an argument register needs to be spilled by LSRA, we // create a new spill temp if the method needs GS cookie check. return varDsc->lvIsParam; #else // !defined(TARGET_AMD64) return varDsc->lvIsParam && !varDsc->lvIsRegArg; #endif } #ifdef DEBUG void Print() { printf("assignGroup [%p]; shadowCopy: [%d];\n", assignGroup, shadowCopy); } #endif }; GSCookie* gsGlobalSecurityCookieAddr; // Address of global cookie for unsafe buffer checks GSCookie gsGlobalSecurityCookieVal; // Value of global cookie if addr is NULL ShadowParamVarInfo* gsShadowVarInfo; // Table used by shadow param analysis code void gsGSChecksInitCookie(); // Grabs cookie variable void gsCopyShadowParams(); // Identify vulnerable params and create dhadow copies bool gsFindVulnerableParams(); // Shadow param analysis code void gsParamsToShadows(); // Insert copy code and replave param uses by shadow static fgWalkPreFn gsMarkPtrsAndAssignGroups; // Shadow param analysis tree-walk static fgWalkPreFn gsReplaceShadowParams; // Shadow param replacement tree-walk #define DEFAULT_MAX_INLINE_SIZE 100 // Methods with > DEFAULT_MAX_INLINE_SIZE IL bytes will never be inlined. // This can be overwritten by setting complus_JITInlineSize env variable. #define DEFAULT_MAX_INLINE_DEPTH 20 // Methods at more than this level deep will not be inlined #define DEFAULT_MAX_LOCALLOC_TO_LOCAL_SIZE 32 // fixed locallocs of this size or smaller will convert to local buffers private: #ifdef FEATURE_JIT_METHOD_PERF JitTimer* pCompJitTimer; // Timer data structure (by phases) for current compilation. static CompTimeSummaryInfo s_compJitTimerSummary; // Summary of the Timer information for the whole run. static LPCWSTR JitTimeLogCsv(); // Retrieve the file name for CSV from ConfigDWORD. static LPCWSTR compJitTimeLogFilename; // If a log file for JIT time is desired, filename to write it to. #endif void BeginPhase(Phases phase); // Indicate the start of the given phase. void EndPhase(Phases phase); // Indicate the end of the given phase. #if MEASURE_CLRAPI_CALLS // Thin wrappers that call into JitTimer (if present). inline void CLRApiCallEnter(unsigned apix); inline void CLRApiCallLeave(unsigned apix); public: inline void CLR_API_Enter(API_ICorJitInfo_Names ename); inline void CLR_API_Leave(API_ICorJitInfo_Names ename); private: #endif #if defined(DEBUG) || defined(INLINE_DATA) // These variables are associated with maintaining SQM data about compile time. unsigned __int64 m_compCyclesAtEndOfInlining; // The thread-virtualized cycle count at the end of the inlining phase // in the current compilation. unsigned __int64 m_compCycles; // Net cycle count for current compilation DWORD m_compTickCountAtEndOfInlining; // The result of GetTickCount() (# ms since some epoch marker) at the end of // the inlining phase in the current compilation. #endif // defined(DEBUG) || defined(INLINE_DATA) // Records the SQM-relevant (cycles and tick count). Should be called after inlining is complete. // (We do this after inlining because this marks the last point at which the JIT is likely to cause // type-loading and class initialization). void RecordStateAtEndOfInlining(); // Assumes being called at the end of compilation. Update the SQM state. void RecordStateAtEndOfCompilation(); public: #if FUNC_INFO_LOGGING static LPCWSTR compJitFuncInfoFilename; // If a log file for per-function information is required, this is the // filename to write it to. static FILE* compJitFuncInfoFile; // And this is the actual FILE* to write to. #endif // FUNC_INFO_LOGGING Compiler* prevCompiler; // Previous compiler on stack for TLS Compiler* linked list for reentrant compilers. #if MEASURE_NOWAY void RecordNowayAssert(const char* filename, unsigned line, const char* condStr); #endif // MEASURE_NOWAY #ifndef FEATURE_TRACELOGGING // Should we actually fire the noway assert body and the exception handler? bool compShouldThrowOnNoway(); #else // FEATURE_TRACELOGGING // Should we actually fire the noway assert body and the exception handler? bool compShouldThrowOnNoway(const char* filename, unsigned line); // Telemetry instance to use per method compilation. JitTelemetry compJitTelemetry; // Get common parameters that have to be logged with most telemetry data. void compGetTelemetryDefaults(const char** assemblyName, const char** scopeName, const char** methodName, unsigned* methodHash); #endif // !FEATURE_TRACELOGGING #ifdef DEBUG private: NodeToTestDataMap* m_nodeTestData; static const unsigned FIRST_LOOP_HOIST_CSE_CLASS = 1000; unsigned m_loopHoistCSEClass; // LoopHoist test annotations turn into CSE requirements; we // label them with CSE Class #'s starting at FIRST_LOOP_HOIST_CSE_CLASS. // Current kept in this. public: NodeToTestDataMap* GetNodeTestData() { Compiler* compRoot = impInlineRoot(); if (compRoot->m_nodeTestData == nullptr) { compRoot->m_nodeTestData = new (getAllocatorDebugOnly()) NodeToTestDataMap(getAllocatorDebugOnly()); } return compRoot->m_nodeTestData; } typedef JitHashTable<GenTree*, JitPtrKeyFuncs<GenTree>, int> NodeToIntMap; // Returns the set (i.e., the domain of the result map) of nodes that are keys in m_nodeTestData, and // currently occur in the AST graph. NodeToIntMap* FindReachableNodesInNodeTestData(); // Node "from" is being eliminated, and being replaced by node "to". If "from" had any associated // test data, associate that data with "to". void TransferTestDataToNode(GenTree* from, GenTree* to); // These are the methods that test that the various conditions implied by the // test attributes are satisfied. void JitTestCheckSSA(); // SSA builder tests. void JitTestCheckVN(); // Value numbering tests. #endif // DEBUG // The "FieldSeqStore", for canonicalizing field sequences. See the definition of FieldSeqStore for // operations. FieldSeqStore* m_fieldSeqStore; FieldSeqStore* GetFieldSeqStore() { Compiler* compRoot = impInlineRoot(); if (compRoot->m_fieldSeqStore == nullptr) { // Create a CompAllocator that labels sub-structure with CMK_FieldSeqStore, and use that for allocation. CompAllocator ialloc(getAllocator(CMK_FieldSeqStore)); compRoot->m_fieldSeqStore = new (ialloc) FieldSeqStore(ialloc); } return compRoot->m_fieldSeqStore; } typedef JitHashTable<GenTree*, JitPtrKeyFuncs<GenTree>, FieldSeqNode*> NodeToFieldSeqMap; // Some nodes of "TYP_BYREF" or "TYP_I_IMPL" actually represent the address of a field within a struct, but since // the offset of the field is zero, there's no "GT_ADD" node. We normally attach a field sequence to the constant // that is added, but what do we do when that constant is zero, and is thus not present? We use this mechanism to // attach the field sequence directly to the address node. NodeToFieldSeqMap* m_zeroOffsetFieldMap; NodeToFieldSeqMap* GetZeroOffsetFieldMap() { // Don't need to worry about inlining here if (m_zeroOffsetFieldMap == nullptr) { // Create a CompAllocator that labels sub-structure with CMK_ZeroOffsetFieldMap, and use that for // allocation. CompAllocator ialloc(getAllocator(CMK_ZeroOffsetFieldMap)); m_zeroOffsetFieldMap = new (ialloc) NodeToFieldSeqMap(ialloc); } return m_zeroOffsetFieldMap; } // Requires that "op1" is a node of type "TYP_BYREF" or "TYP_I_IMPL". We are dereferencing this with the fields in // "fieldSeq", whose offsets are required all to be zero. Ensures that any field sequence annotation currently on // "op1" or its components is augmented by appending "fieldSeq". In practice, if "op1" is a GT_LCL_FLD, it has // a field sequence as a member; otherwise, it may be the addition of an a byref and a constant, where the const // has a field sequence -- in this case "fieldSeq" is appended to that of the constant; otherwise, we // record the the field sequence using the ZeroOffsetFieldMap described above. // // One exception above is that "op1" is a node of type "TYP_REF" where "op1" is a GT_LCL_VAR. // This happens when System.Object vtable pointer is a regular field at offset 0 in System.Private.CoreLib in // CoreRT. Such case is handled same as the default case. void fgAddFieldSeqForZeroOffset(GenTree* op1, FieldSeqNode* fieldSeq); typedef JitHashTable<const GenTree*, JitPtrKeyFuncs<GenTree>, ArrayInfo> NodeToArrayInfoMap; NodeToArrayInfoMap* m_arrayInfoMap; NodeToArrayInfoMap* GetArrayInfoMap() { Compiler* compRoot = impInlineRoot(); if (compRoot->m_arrayInfoMap == nullptr) { // Create a CompAllocator that labels sub-structure with CMK_ArrayInfoMap, and use that for allocation. CompAllocator ialloc(getAllocator(CMK_ArrayInfoMap)); compRoot->m_arrayInfoMap = new (ialloc) NodeToArrayInfoMap(ialloc); } return compRoot->m_arrayInfoMap; } //----------------------------------------------------------------------------------------------------------------- // Compiler::TryGetArrayInfo: // Given an indirection node, checks to see whether or not that indirection represents an array access, and // if so returns information about the array. // // Arguments: // indir - The `GT_IND` node. // arrayInfo (out) - Information about the accessed array if this function returns true. Undefined otherwise. // // Returns: // True if the `GT_IND` node represents an array access; false otherwise. bool TryGetArrayInfo(GenTreeIndir* indir, ArrayInfo* arrayInfo) { if ((indir->gtFlags & GTF_IND_ARR_INDEX) == 0) { return false; } if (indir->gtOp1->OperIs(GT_INDEX_ADDR)) { GenTreeIndexAddr* const indexAddr = indir->gtOp1->AsIndexAddr(); *arrayInfo = ArrayInfo(indexAddr->gtElemType, indexAddr->gtElemSize, indexAddr->gtElemOffset, indexAddr->gtStructElemClass); return true; } bool found = GetArrayInfoMap()->Lookup(indir, arrayInfo); assert(found); return true; } NodeToUnsignedMap* m_memorySsaMap[MemoryKindCount]; // In some cases, we want to assign intermediate SSA #'s to memory states, and know what nodes create those memory // states. (We do this for try blocks, where, if the try block doesn't do a call that loses track of the memory // state, all the possible memory states are possible initial states of the corresponding catch block(s).) NodeToUnsignedMap* GetMemorySsaMap(MemoryKind memoryKind) { if (memoryKind == GcHeap && byrefStatesMatchGcHeapStates) { // Use the same map for GCHeap and ByrefExposed when their states match. memoryKind = ByrefExposed; } assert(memoryKind < MemoryKindCount); Compiler* compRoot = impInlineRoot(); if (compRoot->m_memorySsaMap[memoryKind] == nullptr) { // Create a CompAllocator that labels sub-structure with CMK_ArrayInfoMap, and use that for allocation. CompAllocator ialloc(getAllocator(CMK_ArrayInfoMap)); compRoot->m_memorySsaMap[memoryKind] = new (ialloc) NodeToUnsignedMap(ialloc); } return compRoot->m_memorySsaMap[memoryKind]; } // The Refany type is the only struct type whose structure is implicitly assumed by IL. We need its fields. CORINFO_CLASS_HANDLE m_refAnyClass; CORINFO_FIELD_HANDLE GetRefanyDataField() { if (m_refAnyClass == nullptr) { m_refAnyClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPED_BYREF); } return info.compCompHnd->getFieldInClass(m_refAnyClass, 0); } CORINFO_FIELD_HANDLE GetRefanyTypeField() { if (m_refAnyClass == nullptr) { m_refAnyClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPED_BYREF); } return info.compCompHnd->getFieldInClass(m_refAnyClass, 1); } #if VARSET_COUNTOPS static BitSetSupport::BitSetOpCounter m_varsetOpCounter; #endif #if ALLVARSET_COUNTOPS static BitSetSupport::BitSetOpCounter m_allvarsetOpCounter; #endif static HelperCallProperties s_helperCallProperties; #ifdef UNIX_AMD64_ABI static var_types GetTypeFromClassificationAndSizes(SystemVClassificationType classType, int size); static var_types GetEightByteType(const SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR& structDesc, unsigned slotNum); static void GetStructTypeOffset(const SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR& structDesc, var_types* type0, var_types* type1, unsigned __int8* offset0, unsigned __int8* offset1); void GetStructTypeOffset(CORINFO_CLASS_HANDLE typeHnd, var_types* type0, var_types* type1, unsigned __int8* offset0, unsigned __int8* offset1); #endif // defined(UNIX_AMD64_ABI) void fgMorphMultiregStructArgs(GenTreeCall* call); GenTree* fgMorphMultiregStructArg(GenTree* arg, fgArgTabEntry* fgEntryPtr); bool killGCRefs(GenTree* tree); }; // end of class Compiler //--------------------------------------------------------------------------------------------------------------------- // GenTreeVisitor: a flexible tree walker implemented using the curiously-recurring-template pattern. // // This class implements a configurable walker for IR trees. There are five configuration options (defaults values are // shown in parentheses): // // - ComputeStack (false): when true, the walker will push each node onto the `m_ancestors` stack. "Ancestors" is a bit // of a misnomer, as the first entry will always be the current node. // // - DoPreOrder (false): when true, the walker will invoke `TVisitor::PreOrderVisit` with the current node as an // argument before visiting the node's operands. // // - DoPostOrder (false): when true, the walker will invoke `TVisitor::PostOrderVisit` with the current node as an // argument after visiting the node's operands. // // - DoLclVarsOnly (false): when true, the walker will only invoke `TVisitor::PreOrderVisit` for lclVar nodes. // `DoPreOrder` must be true if this option is true. // // - UseExecutionOrder (false): when true, then walker will visit a node's operands in execution order (e.g. if a // binary operator has the `GTF_REVERSE_OPS` flag set, the second operand will be // visited before the first). // // At least one of `DoPreOrder` and `DoPostOrder` must be specified. // // A simple pre-order visitor might look something like the following: // // class CountingVisitor final : public GenTreeVisitor<CountingVisitor> // { // public: // enum // { // DoPreOrder = true // }; // // unsigned m_count; // // CountingVisitor(Compiler* compiler) // : GenTreeVisitor<CountingVisitor>(compiler), m_count(0) // { // } // // Compiler::fgWalkResult PreOrderVisit(GenTree* node) // { // m_count++; // } // }; // // This visitor would then be used like so: // // CountingVisitor countingVisitor(compiler); // countingVisitor.WalkTree(root); // template <typename TVisitor> class GenTreeVisitor { protected: typedef Compiler::fgWalkResult fgWalkResult; enum { ComputeStack = false, DoPreOrder = false, DoPostOrder = false, DoLclVarsOnly = false, UseExecutionOrder = false, }; Compiler* m_compiler; ArrayStack<GenTree*> m_ancestors; GenTreeVisitor(Compiler* compiler) : m_compiler(compiler), m_ancestors(compiler->getAllocator(CMK_ArrayStack)) { assert(compiler != nullptr); static_assert_no_msg(TVisitor::DoPreOrder || TVisitor::DoPostOrder); static_assert_no_msg(!TVisitor::DoLclVarsOnly || TVisitor::DoPreOrder); } fgWalkResult PreOrderVisit(GenTree** use, GenTree* user) { return fgWalkResult::WALK_CONTINUE; } fgWalkResult PostOrderVisit(GenTree** use, GenTree* user) { return fgWalkResult::WALK_CONTINUE; } public: fgWalkResult WalkTree(GenTree** use, GenTree* user) { assert(use != nullptr); GenTree* node = *use; if (TVisitor::ComputeStack) { m_ancestors.Push(node); } fgWalkResult result = fgWalkResult::WALK_CONTINUE; if (TVisitor::DoPreOrder && !TVisitor::DoLclVarsOnly) { result = reinterpret_cast<TVisitor*>(this)->PreOrderVisit(use, user); if (result == fgWalkResult::WALK_ABORT) { return result; } node = *use; if ((node == nullptr) || (result == fgWalkResult::WALK_SKIP_SUBTREES)) { goto DONE; } } switch (node->OperGet()) { // Leaf lclVars case GT_LCL_VAR: case GT_LCL_FLD: case GT_LCL_VAR_ADDR: case GT_LCL_FLD_ADDR: if (TVisitor::DoLclVarsOnly) { result = reinterpret_cast<TVisitor*>(this)->PreOrderVisit(use, user); if (result == fgWalkResult::WALK_ABORT) { return result; } } FALLTHROUGH; // Leaf nodes case GT_CATCH_ARG: case GT_LABEL: case GT_FTN_ADDR: case GT_RET_EXPR: case GT_CNS_INT: case GT_CNS_LNG: case GT_CNS_DBL: case GT_CNS_STR: case GT_MEMORYBARRIER: case GT_JMP: case GT_JCC: case GT_SETCC: case GT_NO_OP: case GT_START_NONGC: case GT_START_PREEMPTGC: case GT_PROF_HOOK: #if !defined(FEATURE_EH_FUNCLETS) case GT_END_LFIN: #endif // !FEATURE_EH_FUNCLETS case GT_PHI_ARG: case GT_JMPTABLE: case GT_CLS_VAR: case GT_CLS_VAR_ADDR: case GT_ARGPLACE: case GT_PHYSREG: case GT_EMITNOP: case GT_PINVOKE_PROLOG: case GT_PINVOKE_EPILOG: case GT_IL_OFFSET: break; // Lclvar unary operators case GT_STORE_LCL_VAR: case GT_STORE_LCL_FLD: if (TVisitor::DoLclVarsOnly) { result = reinterpret_cast<TVisitor*>(this)->PreOrderVisit(use, user); if (result == fgWalkResult::WALK_ABORT) { return result; } } FALLTHROUGH; // Standard unary operators case GT_NOT: case GT_NEG: case GT_BSWAP: case GT_BSWAP16: case GT_COPY: case GT_RELOAD: case GT_ARR_LENGTH: case GT_CAST: case GT_BITCAST: case GT_CKFINITE: case GT_LCLHEAP: case GT_ADDR: case GT_IND: case GT_OBJ: case GT_BLK: case GT_BOX: case GT_ALLOCOBJ: case GT_INIT_VAL: case GT_JTRUE: case GT_SWITCH: case GT_NULLCHECK: case GT_PUTARG_REG: case GT_PUTARG_STK: case GT_PUTARG_TYPE: case GT_RETURNTRAP: case GT_NOP: case GT_FIELD: case GT_RETURN: case GT_RETFILT: case GT_RUNTIMELOOKUP: case GT_KEEPALIVE: case GT_INC_SATURATE: { GenTreeUnOp* const unOp = node->AsUnOp(); if (unOp->gtOp1 != nullptr) { result = WalkTree(&unOp->gtOp1, unOp); if (result == fgWalkResult::WALK_ABORT) { return result; } } break; } // Special nodes case GT_PHI: for (GenTreePhi::Use& use : node->AsPhi()->Uses()) { result = WalkTree(&use.NodeRef(), node); if (result == fgWalkResult::WALK_ABORT) { return result; } } break; case GT_FIELD_LIST: for (GenTreeFieldList::Use& use : node->AsFieldList()->Uses()) { result = WalkTree(&use.NodeRef(), node); if (result == fgWalkResult::WALK_ABORT) { return result; } } break; case GT_CMPXCHG: { GenTreeCmpXchg* const cmpXchg = node->AsCmpXchg(); result = WalkTree(&cmpXchg->gtOpLocation, cmpXchg); if (result == fgWalkResult::WALK_ABORT) { return result; } result = WalkTree(&cmpXchg->gtOpValue, cmpXchg); if (result == fgWalkResult::WALK_ABORT) { return result; } result = WalkTree(&cmpXchg->gtOpComparand, cmpXchg); if (result == fgWalkResult::WALK_ABORT) { return result; } break; } case GT_ARR_ELEM: { GenTreeArrElem* const arrElem = node->AsArrElem(); result = WalkTree(&arrElem->gtArrObj, arrElem); if (result == fgWalkResult::WALK_ABORT) { return result; } const unsigned rank = arrElem->gtArrRank; for (unsigned dim = 0; dim < rank; dim++) { result = WalkTree(&arrElem->gtArrInds[dim], arrElem); if (result == fgWalkResult::WALK_ABORT) { return result; } } break; } case GT_ARR_OFFSET: { GenTreeArrOffs* const arrOffs = node->AsArrOffs(); result = WalkTree(&arrOffs->gtOffset, arrOffs); if (result == fgWalkResult::WALK_ABORT) { return result; } result = WalkTree(&arrOffs->gtIndex, arrOffs); if (result == fgWalkResult::WALK_ABORT) { return result; } result = WalkTree(&arrOffs->gtArrObj, arrOffs); if (result == fgWalkResult::WALK_ABORT) { return result; } break; } case GT_STORE_DYN_BLK: { GenTreeStoreDynBlk* const dynBlock = node->AsStoreDynBlk(); GenTree** op1Use = &dynBlock->gtOp1; GenTree** op2Use = &dynBlock->gtOp2; GenTree** op3Use = &dynBlock->gtDynamicSize; result = WalkTree(op1Use, dynBlock); if (result == fgWalkResult::WALK_ABORT) { return result; } result = WalkTree(op2Use, dynBlock); if (result == fgWalkResult::WALK_ABORT) { return result; } result = WalkTree(op3Use, dynBlock); if (result == fgWalkResult::WALK_ABORT) { return result; } break; } case GT_CALL: { GenTreeCall* const call = node->AsCall(); if (call->gtCallThisArg != nullptr) { result = WalkTree(&call->gtCallThisArg->NodeRef(), call); if (result == fgWalkResult::WALK_ABORT) { return result; } } for (GenTreeCall::Use& use : call->Args()) { result = WalkTree(&use.NodeRef(), call); if (result == fgWalkResult::WALK_ABORT) { return result; } } for (GenTreeCall::Use& use : call->LateArgs()) { result = WalkTree(&use.NodeRef(), call); if (result == fgWalkResult::WALK_ABORT) { return result; } } if (call->gtCallType == CT_INDIRECT) { if (call->gtCallCookie != nullptr) { result = WalkTree(&call->gtCallCookie, call); if (result == fgWalkResult::WALK_ABORT) { return result; } } result = WalkTree(&call->gtCallAddr, call); if (result == fgWalkResult::WALK_ABORT) { return result; } } if (call->gtControlExpr != nullptr) { result = WalkTree(&call->gtControlExpr, call); if (result == fgWalkResult::WALK_ABORT) { return result; } } break; } #if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS) #if defined(FEATURE_SIMD) case GT_SIMD: #endif #if defined(FEATURE_HW_INTRINSICS) case GT_HWINTRINSIC: #endif if (TVisitor::UseExecutionOrder && node->IsReverseOp()) { assert(node->AsMultiOp()->GetOperandCount() == 2); result = WalkTree(&node->AsMultiOp()->Op(2), node); if (result == fgWalkResult::WALK_ABORT) { return result; } result = WalkTree(&node->AsMultiOp()->Op(1), node); if (result == fgWalkResult::WALK_ABORT) { return result; } } else { for (GenTree** use : node->AsMultiOp()->UseEdges()) { result = WalkTree(use, node); if (result == fgWalkResult::WALK_ABORT) { return result; } } } break; #endif // defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS) // Binary nodes default: { assert(node->OperIsBinary()); GenTreeOp* const op = node->AsOp(); GenTree** op1Use = &op->gtOp1; GenTree** op2Use = &op->gtOp2; if (TVisitor::UseExecutionOrder && node->IsReverseOp()) { std::swap(op1Use, op2Use); } if (*op1Use != nullptr) { result = WalkTree(op1Use, op); if (result == fgWalkResult::WALK_ABORT) { return result; } } if (*op2Use != nullptr) { result = WalkTree(op2Use, op); if (result == fgWalkResult::WALK_ABORT) { return result; } } break; } } DONE: // Finally, visit the current node if (TVisitor::DoPostOrder) { result = reinterpret_cast<TVisitor*>(this)->PostOrderVisit(use, user); } if (TVisitor::ComputeStack) { m_ancestors.Pop(); } return result; } }; template <bool computeStack, bool doPreOrder, bool doPostOrder, bool doLclVarsOnly, bool useExecutionOrder> class GenericTreeWalker final : public GenTreeVisitor<GenericTreeWalker<computeStack, doPreOrder, doPostOrder, doLclVarsOnly, useExecutionOrder>> { public: enum { ComputeStack = computeStack, DoPreOrder = doPreOrder, DoPostOrder = doPostOrder, DoLclVarsOnly = doLclVarsOnly, UseExecutionOrder = useExecutionOrder, }; private: Compiler::fgWalkData* m_walkData; public: GenericTreeWalker(Compiler::fgWalkData* walkData) : GenTreeVisitor<GenericTreeWalker<computeStack, doPreOrder, doPostOrder, doLclVarsOnly, useExecutionOrder>>( walkData->compiler) , m_walkData(walkData) { assert(walkData != nullptr); if (computeStack) { walkData->parentStack = &this->m_ancestors; } } Compiler::fgWalkResult PreOrderVisit(GenTree** use, GenTree* user) { m_walkData->parent = user; return m_walkData->wtprVisitorFn(use, m_walkData); } Compiler::fgWalkResult PostOrderVisit(GenTree** use, GenTree* user) { m_walkData->parent = user; return m_walkData->wtpoVisitorFn(use, m_walkData); } }; // A dominator tree visitor implemented using the curiously-recurring-template pattern, similar to GenTreeVisitor. template <typename TVisitor> class DomTreeVisitor { protected: Compiler* const m_compiler; DomTreeNode* const m_domTree; DomTreeVisitor(Compiler* compiler, DomTreeNode* domTree) : m_compiler(compiler), m_domTree(domTree) { } void Begin() { } void PreOrderVisit(BasicBlock* block) { } void PostOrderVisit(BasicBlock* block) { } void End() { } public: //------------------------------------------------------------------------ // WalkTree: Walk the dominator tree, starting from fgFirstBB. // // Notes: // This performs a non-recursive, non-allocating walk of the tree by using // DomTreeNode's firstChild and nextSibling links to locate the children of // a node and BasicBlock's bbIDom parent link to go back up the tree when // no more children are left. // // Forests are also supported, provided that all the roots are chained via // DomTreeNode::nextSibling to fgFirstBB. // void WalkTree() { static_cast<TVisitor*>(this)->Begin(); for (BasicBlock *next, *block = m_compiler->fgFirstBB; block != nullptr; block = next) { static_cast<TVisitor*>(this)->PreOrderVisit(block); next = m_domTree[block->bbNum].firstChild; if (next != nullptr) { assert(next->bbIDom == block); continue; } do { static_cast<TVisitor*>(this)->PostOrderVisit(block); next = m_domTree[block->bbNum].nextSibling; if (next != nullptr) { assert(next->bbIDom == block->bbIDom); break; } block = block->bbIDom; } while (block != nullptr); } static_cast<TVisitor*>(this)->End(); } }; // EHClauses: adapter class for forward iteration of the exception handling table using range-based `for`, e.g.: // for (EHblkDsc* const ehDsc : EHClauses(compiler)) // class EHClauses { EHblkDsc* m_begin; EHblkDsc* m_end; // Forward iterator for the exception handling table entries. Iteration is in table order. // class iterator { EHblkDsc* m_ehDsc; public: iterator(EHblkDsc* ehDsc) : m_ehDsc(ehDsc) { } EHblkDsc* operator*() const { return m_ehDsc; } iterator& operator++() { ++m_ehDsc; return *this; } bool operator!=(const iterator& i) const { return m_ehDsc != i.m_ehDsc; } }; public: EHClauses(Compiler* comp) : m_begin(comp->compHndBBtab), m_end(comp->compHndBBtab + comp->compHndBBtabCount) { assert((m_begin != nullptr) || (m_begin == m_end)); } iterator begin() const { return iterator(m_begin); } iterator end() const { return iterator(m_end); } }; /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Miscellaneous Compiler stuff XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ // Values used to mark the types a stack slot is used for const unsigned TYPE_REF_INT = 0x01; // slot used as a 32-bit int const unsigned TYPE_REF_LNG = 0x02; // slot used as a 64-bit long const unsigned TYPE_REF_FLT = 0x04; // slot used as a 32-bit float const unsigned TYPE_REF_DBL = 0x08; // slot used as a 64-bit float const unsigned TYPE_REF_PTR = 0x10; // slot used as a 32-bit pointer const unsigned TYPE_REF_BYR = 0x20; // slot used as a byref pointer const unsigned TYPE_REF_STC = 0x40; // slot used as a struct const unsigned TYPE_REF_TYPEMASK = 0x7F; // bits that represent the type // const unsigned TYPE_REF_ADDR_TAKEN = 0x80; // slots address was taken /***************************************************************************** * * Variables to keep track of total code amounts. */ #if DISPLAY_SIZES extern size_t grossVMsize; extern size_t grossNCsize; extern size_t totalNCsize; extern unsigned genMethodICnt; extern unsigned genMethodNCnt; extern size_t gcHeaderISize; extern size_t gcPtrMapISize; extern size_t gcHeaderNSize; extern size_t gcPtrMapNSize; #endif // DISPLAY_SIZES /***************************************************************************** * * Variables to keep track of basic block counts (more data on 1 BB methods) */ #if COUNT_BASIC_BLOCKS extern Histogram bbCntTable; extern Histogram bbOneBBSizeTable; #endif /***************************************************************************** * * Used by optFindNaturalLoops to gather statistical information such as * - total number of natural loops * - number of loops with 1, 2, ... exit conditions * - number of loops that have an iterator (for like) * - number of loops that have a constant iterator */ #if COUNT_LOOPS extern unsigned totalLoopMethods; // counts the total number of methods that have natural loops extern unsigned maxLoopsPerMethod; // counts the maximum number of loops a method has extern unsigned totalLoopOverflows; // # of methods that identified more loops than we can represent extern unsigned totalLoopCount; // counts the total number of natural loops extern unsigned totalUnnatLoopCount; // counts the total number of (not-necessarily natural) loops extern unsigned totalUnnatLoopOverflows; // # of methods that identified more unnatural loops than we can represent extern unsigned iterLoopCount; // counts the # of loops with an iterator (for like) extern unsigned simpleTestLoopCount; // counts the # of loops with an iterator and a simple loop condition (iter < // const) extern unsigned constIterLoopCount; // counts the # of loops with a constant iterator (for like) extern bool hasMethodLoops; // flag to keep track if we already counted a method as having loops extern unsigned loopsThisMethod; // counts the number of loops in the current method extern bool loopOverflowThisMethod; // True if we exceeded the max # of loops in the method. extern Histogram loopCountTable; // Histogram of loop counts extern Histogram loopExitCountTable; // Histogram of loop exit counts #endif // COUNT_LOOPS /***************************************************************************** * variables to keep track of how many iterations we go in a dataflow pass */ #if DATAFLOW_ITER extern unsigned CSEiterCount; // counts the # of iteration for the CSE dataflow extern unsigned CFiterCount; // counts the # of iteration for the Const Folding dataflow #endif // DATAFLOW_ITER #if MEASURE_BLOCK_SIZE extern size_t genFlowNodeSize; extern size_t genFlowNodeCnt; #endif // MEASURE_BLOCK_SIZE #if MEASURE_NODE_SIZE struct NodeSizeStats { void Init() { genTreeNodeCnt = 0; genTreeNodeSize = 0; genTreeNodeActualSize = 0; } // Count of tree nodes allocated. unsigned __int64 genTreeNodeCnt; // The size we allocate. unsigned __int64 genTreeNodeSize; // The actual size of the node. Note that the actual size will likely be smaller // than the allocated size, but we sometimes use SetOper()/ChangeOper() to change // a smaller node to a larger one. TODO-Cleanup: add stats on // SetOper()/ChangeOper() usage to quantify this. unsigned __int64 genTreeNodeActualSize; }; extern NodeSizeStats genNodeSizeStats; // Total node size stats extern NodeSizeStats genNodeSizeStatsPerFunc; // Per-function node size stats extern Histogram genTreeNcntHist; extern Histogram genTreeNsizHist; #endif // MEASURE_NODE_SIZE /***************************************************************************** * Count fatal errors (including noway_asserts). */ #if MEASURE_FATAL extern unsigned fatal_badCode; extern unsigned fatal_noWay; extern unsigned fatal_implLimitation; extern unsigned fatal_NOMEM; extern unsigned fatal_noWayAssertBody; #ifdef DEBUG extern unsigned fatal_noWayAssertBodyArgs; #endif // DEBUG extern unsigned fatal_NYI; #endif // MEASURE_FATAL /***************************************************************************** * Codegen */ #ifdef TARGET_XARCH const instruction INS_SHIFT_LEFT_LOGICAL = INS_shl; const instruction INS_SHIFT_RIGHT_LOGICAL = INS_shr; const instruction INS_SHIFT_RIGHT_ARITHM = INS_sar; const instruction INS_AND = INS_and; const instruction INS_OR = INS_or; const instruction INS_XOR = INS_xor; const instruction INS_NEG = INS_neg; const instruction INS_TEST = INS_test; const instruction INS_MUL = INS_imul; const instruction INS_SIGNED_DIVIDE = INS_idiv; const instruction INS_UNSIGNED_DIVIDE = INS_div; const instruction INS_BREAKPOINT = INS_int3; const instruction INS_ADDC = INS_adc; const instruction INS_SUBC = INS_sbb; const instruction INS_NOT = INS_not; #endif // TARGET_XARCH #ifdef TARGET_ARM const instruction INS_SHIFT_LEFT_LOGICAL = INS_lsl; const instruction INS_SHIFT_RIGHT_LOGICAL = INS_lsr; const instruction INS_SHIFT_RIGHT_ARITHM = INS_asr; const instruction INS_AND = INS_and; const instruction INS_OR = INS_orr; const instruction INS_XOR = INS_eor; const instruction INS_NEG = INS_rsb; const instruction INS_TEST = INS_tst; const instruction INS_MUL = INS_mul; const instruction INS_MULADD = INS_mla; const instruction INS_SIGNED_DIVIDE = INS_sdiv; const instruction INS_UNSIGNED_DIVIDE = INS_udiv; const instruction INS_BREAKPOINT = INS_bkpt; const instruction INS_ADDC = INS_adc; const instruction INS_SUBC = INS_sbc; const instruction INS_NOT = INS_mvn; const instruction INS_ABS = INS_vabs; const instruction INS_SQRT = INS_vsqrt; #endif // TARGET_ARM #ifdef TARGET_ARM64 const instruction INS_MULADD = INS_madd; inline const instruction INS_BREAKPOINT_osHelper() { // GDB needs the encoding of brk #0 // Windbg needs the encoding of brk #F000 return TargetOS::IsUnix ? INS_brk_unix : INS_brk_windows; } #define INS_BREAKPOINT INS_BREAKPOINT_osHelper() const instruction INS_ABS = INS_fabs; const instruction INS_SQRT = INS_fsqrt; #endif // TARGET_ARM64 /*****************************************************************************/ extern const BYTE genTypeSizes[]; extern const BYTE genTypeAlignments[]; extern const BYTE genTypeStSzs[]; extern const BYTE genActualTypes[]; /*****************************************************************************/ #ifdef DEBUG void dumpConvertedVarSet(Compiler* comp, VARSET_VALARG_TP vars); #endif // DEBUG #include "compiler.hpp" // All the shared inline functions /*****************************************************************************/ #endif //_COMPILER_H_ /*****************************************************************************/
1
dotnet/runtime
66,282
Enable Fast Tail Call Optimization for ARM32
- Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
clamp03
2022-03-07T05:47:20Z
2022-03-13T11:50:20Z
227ff3d53784f655fa04dad059a98b3e8d291d61
51b90cc60b8528c77829ef18481b0f58db812776
Enable Fast Tail Call Optimization for ARM32. - Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
./src/coreclr/jit/compiler.hpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Inline functions XX XX XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ #ifndef _COMPILER_HPP_ #define _COMPILER_HPP_ #include "emit.h" // for emitter::emitAddLabel #include "bitvec.h" #include "compilerbitsettraits.hpp" /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Miscellaneous utility functions. Some of these are defined in Utils.cpp XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ /*****************************************************************************/ /*****************************************************************************/ inline bool getInlinePInvokeEnabled() { #ifdef DEBUG return JitConfig.JitPInvokeEnabled() && !JitConfig.StressCOMCall(); #else return true; #endif } inline bool getInlinePInvokeCheckEnabled() { #ifdef DEBUG return JitConfig.JitPInvokeCheckEnabled() != 0; #else return false; #endif } // Enforce float narrowing for buggy compilers (notably preWhidbey VC) inline float forceCastToFloat(double d) { Volatile<float> f = (float)d; return f; } // Enforce UInt32 narrowing for buggy compilers (notably Whidbey Beta 2 LKG) inline UINT32 forceCastToUInt32(double d) { Volatile<UINT32> u = (UINT32)d; return u; } enum RoundLevel { ROUND_NEVER = 0, // Never round ROUND_CMP_CONST = 1, // Round values compared against constants ROUND_CMP = 2, // Round comparands and return values ROUND_ALWAYS = 3, // Round always COUNT_ROUND_LEVEL, DEFAULT_ROUND_LEVEL = ROUND_NEVER }; inline RoundLevel getRoundFloatLevel() { #ifdef DEBUG return (RoundLevel)JitConfig.JitRoundFloat(); #else return DEFAULT_ROUND_LEVEL; #endif } /*****************************************************************************/ /***************************************************************************** * * Return the lowest bit that is set */ template <typename T> inline T genFindLowestBit(T value) { return (value & (0 - value)); } /*****************************************************************************/ /***************************************************************************** * * Return the highest bit that is set (that is, a mask that includes just the highest bit). * TODO-ARM64-Throughput: we should convert these to use the _BitScanReverse() / _BitScanReverse64() * compiler intrinsics, but our CRT header file intrin.h doesn't define these for ARM64 yet. */ inline unsigned int genFindHighestBit(unsigned int mask) { assert(mask != 0); unsigned int bit = 1U << ((sizeof(unsigned int) * 8) - 1); // start looking at the top while ((bit & mask) == 0) { bit >>= 1; } return bit; } inline unsigned __int64 genFindHighestBit(unsigned __int64 mask) { assert(mask != 0); unsigned __int64 bit = 1ULL << ((sizeof(unsigned __int64) * 8) - 1); // start looking at the top while ((bit & mask) == 0) { bit >>= 1; } return bit; } #if 0 // TODO-ARM64-Cleanup: These should probably be the implementation, when intrin.h is updated for ARM64 inline unsigned int genFindHighestBit(unsigned int mask) { assert(mask != 0); unsigned int index; _BitScanReverse(&index, mask); return 1L << index; } inline unsigned __int64 genFindHighestBit(unsigned __int64 mask) { assert(mask != 0); unsigned int index; _BitScanReverse64(&index, mask); return 1LL << index; } #endif // 0 /***************************************************************************** * * Return true if the given 64-bit value has exactly zero or one bits set. */ template <typename T> inline bool genMaxOneBit(T value) { return (value & (value - 1)) == 0; } /***************************************************************************** * * Return true if the given 32-bit value has exactly zero or one bits set. */ inline bool genMaxOneBit(unsigned value) { return (value & (value - 1)) == 0; } /***************************************************************************** * * Return true if the given 64-bit value has exactly one bit set. */ template <typename T> inline bool genExactlyOneBit(T value) { return ((value != 0) && genMaxOneBit(value)); } /***************************************************************************** * * Return true if the given 32-bit value has exactly zero or one bits set. */ inline bool genExactlyOneBit(unsigned value) { return ((value != 0) && genMaxOneBit(value)); } /***************************************************************************** * * Given a value that has exactly one bit set, return the position of that * bit, in other words return the logarithm in base 2 of the given value. */ inline unsigned genLog2(unsigned value) { return BitPosition(value); } // Given an unsigned 64-bit value, returns the lower 32-bits in unsigned format // inline unsigned ulo32(unsigned __int64 value) { return static_cast<unsigned>(value); } // Given an unsigned 64-bit value, returns the upper 32-bits in unsigned format // inline unsigned uhi32(unsigned __int64 value) { return static_cast<unsigned>(value >> 32); } /***************************************************************************** * * Given a value that has exactly one bit set, return the position of that * bit, in other words return the logarithm in base 2 of the given value. */ inline unsigned genLog2(unsigned __int64 value) { unsigned lo32 = ulo32(value); unsigned hi32 = uhi32(value); if (lo32 != 0) { assert(hi32 == 0); return genLog2(lo32); } else { return genLog2(hi32) + 32; } } /***************************************************************************** * * Return the lowest bit that is set in the given register mask. */ inline regMaskTP genFindLowestReg(regMaskTP value) { return (regMaskTP)genFindLowestBit(value); } /***************************************************************************** * * A rather simple routine that counts the number of bits in a given number. */ template <typename T> inline unsigned genCountBits(T bits) { unsigned cnt = 0; while (bits) { cnt++; bits -= genFindLowestBit(bits); } return cnt; } /***************************************************************************** * * Given 3 masks value, end, start, returns the bits of value between start * and end (exclusive). * * value[bitNum(end) - 1, bitNum(start) + 1] */ inline unsigned __int64 BitsBetween(unsigned __int64 value, unsigned __int64 end, unsigned __int64 start) { assert(start != 0); assert(start < end); assert((start & (start - 1)) == 0); assert((end & (end - 1)) == 0); return value & ~((start - 1) | start) & // Ones to the left of set bit in the start mask. (end - 1); // Ones to the right of set bit in the end mask. } /*****************************************************************************/ inline bool jitIsScaleIndexMul(size_t val) { switch (val) { case 1: case 2: case 4: case 8: return true; default: return false; } } // Returns "tree" iff "val" is a valid addressing mode scale shift amount on // the target architecture. inline bool jitIsScaleIndexShift(ssize_t val) { // It happens that this is the right test for all our current targets: x86, x64 and ARM. // This test would become target-dependent if we added a new target with a different constraint. return 0 < val && val < 4; } /***************************************************************************** * Returns true if value is between [start..end). * The comparison is inclusive of start, exclusive of end. */ /* static */ inline bool Compiler::jitIsBetween(unsigned value, unsigned start, unsigned end) { return start <= value && value < end; } /***************************************************************************** * Returns true if value is between [start..end]. * The comparison is inclusive of both start and end. */ /* static */ inline bool Compiler::jitIsBetweenInclusive(unsigned value, unsigned start, unsigned end) { return start <= value && value <= end; } /****************************************************************************************** * Return the EH descriptor for the given region index. */ inline EHblkDsc* Compiler::ehGetDsc(unsigned regionIndex) { assert(regionIndex < compHndBBtabCount); return &compHndBBtab[regionIndex]; } /****************************************************************************************** * Return the EH descriptor index of the enclosing try, for the given region index. */ inline unsigned Compiler::ehGetEnclosingTryIndex(unsigned regionIndex) { return ehGetDsc(regionIndex)->ebdEnclosingTryIndex; } /****************************************************************************************** * Return the EH descriptor index of the enclosing handler, for the given region index. */ inline unsigned Compiler::ehGetEnclosingHndIndex(unsigned regionIndex) { return ehGetDsc(regionIndex)->ebdEnclosingHndIndex; } /****************************************************************************************** * Return the EH index given a region descriptor. */ inline unsigned Compiler::ehGetIndex(EHblkDsc* ehDsc) { assert(compHndBBtab <= ehDsc && ehDsc < compHndBBtab + compHndBBtabCount); return (unsigned)(ehDsc - compHndBBtab); } /****************************************************************************************** * Return the EH descriptor for the most nested 'try' region this BasicBlock is a member of * (or nullptr if this block is not in a 'try' region). */ inline EHblkDsc* Compiler::ehGetBlockTryDsc(BasicBlock* block) { if (!block->hasTryIndex()) { return nullptr; } return ehGetDsc(block->getTryIndex()); } /****************************************************************************************** * Return the EH descriptor for the most nested filter or handler region this BasicBlock is a member of * (or nullptr if this block is not in a filter or handler region). */ inline EHblkDsc* Compiler::ehGetBlockHndDsc(BasicBlock* block) { if (!block->hasHndIndex()) { return nullptr; } return ehGetDsc(block->getHndIndex()); } #if defined(FEATURE_EH_FUNCLETS) /***************************************************************************** * Get the FuncInfoDsc for the funclet we are currently generating code for. * This is only valid during codegen. * */ inline FuncInfoDsc* Compiler::funCurrentFunc() { return funGetFunc(compCurrFuncIdx); } /***************************************************************************** * Change which funclet we are currently generating code for. * This is only valid after funclets are created. * */ inline void Compiler::funSetCurrentFunc(unsigned funcIdx) { assert(fgFuncletsCreated); assert(FitsIn<unsigned short>(funcIdx)); noway_assert(funcIdx < compFuncInfoCount); compCurrFuncIdx = (unsigned short)funcIdx; } /***************************************************************************** * Get the FuncInfoDsc for the given funclet. * This is only valid after funclets are created. * */ inline FuncInfoDsc* Compiler::funGetFunc(unsigned funcIdx) { assert(fgFuncletsCreated); assert(funcIdx < compFuncInfoCount); return &compFuncInfos[funcIdx]; } /***************************************************************************** * Get the funcIdx for the EH funclet that begins with block. * This is only valid after funclets are created. * It is only valid for blocks marked with BBF_FUNCLET_BEG because * otherwise we would have to do a more expensive check to determine * if this should return the filter funclet or the filter handler funclet. * */ inline unsigned Compiler::funGetFuncIdx(BasicBlock* block) { assert(fgFuncletsCreated); assert(block->bbFlags & BBF_FUNCLET_BEG); EHblkDsc* eh = ehGetDsc(block->getHndIndex()); unsigned int funcIdx = eh->ebdFuncIndex; if (eh->ebdHndBeg != block) { // If this is a filter EH clause, but we want the funclet // for the filter (not the filter handler), it is the previous one noway_assert(eh->HasFilter()); noway_assert(eh->ebdFilter == block); assert(funGetFunc(funcIdx)->funKind == FUNC_HANDLER); assert(funGetFunc(funcIdx)->funEHIndex == funGetFunc(funcIdx - 1)->funEHIndex); assert(funGetFunc(funcIdx - 1)->funKind == FUNC_FILTER); funcIdx--; } return funcIdx; } #else // !FEATURE_EH_FUNCLETS /***************************************************************************** * Get the FuncInfoDsc for the funclet we are currently generating code for. * This is only valid during codegen. For non-funclet platforms, this is * always the root function. * */ inline FuncInfoDsc* Compiler::funCurrentFunc() { return &compFuncInfoRoot; } /***************************************************************************** * Change which funclet we are currently generating code for. * This is only valid after funclets are created. * */ inline void Compiler::funSetCurrentFunc(unsigned funcIdx) { assert(funcIdx == 0); } /***************************************************************************** * Get the FuncInfoDsc for the givven funclet. * This is only valid after funclets are created. * */ inline FuncInfoDsc* Compiler::funGetFunc(unsigned funcIdx) { assert(funcIdx == 0); return &compFuncInfoRoot; } /***************************************************************************** * No funclets, so always 0. * */ inline unsigned Compiler::funGetFuncIdx(BasicBlock* block) { return 0; } #endif // !FEATURE_EH_FUNCLETS //------------------------------------------------------------------------------ // genRegNumFromMask : Maps a single register mask to a register number. // // Arguments: // mask - the register mask // // Return Value: // The number of the register contained in the mask. // // Assumptions: // The mask contains one and only one register. inline regNumber genRegNumFromMask(regMaskTP mask) { assert(mask != 0); // Must have one bit set, so can't have a mask of zero /* Convert the mask to a register number */ regNumber regNum = (regNumber)genLog2(mask); /* Make sure we got it right */ assert(genRegMask(regNum) == mask); return regNum; } /***************************************************************************** * * Return the size in bytes of the given type. */ extern const BYTE genTypeSizes[TYP_COUNT]; template <class T> inline unsigned genTypeSize(T value) { assert((unsigned)TypeGet(value) < ArrLen(genTypeSizes)); return genTypeSizes[TypeGet(value)]; } /***************************************************************************** * * Return the "stack slot count" of the given type. * returns 1 for 32-bit types and 2 for 64-bit types. */ extern const BYTE genTypeStSzs[TYP_COUNT]; template <class T> inline unsigned genTypeStSz(T value) { assert((unsigned)TypeGet(value) < ArrLen(genTypeStSzs)); return genTypeStSzs[TypeGet(value)]; } /***************************************************************************** * * Return the number of registers required to hold a value of the given type. */ /***************************************************************************** * * The following function maps a 'precise' type to an actual type as seen * by the VM (for example, 'byte' maps to 'int'). */ extern const BYTE genActualTypes[TYP_COUNT]; template <class T> inline var_types genActualType(T value) { /* Spot check to make certain the table is in synch with the enum */ assert(genActualTypes[TYP_DOUBLE] == TYP_DOUBLE); assert(genActualTypes[TYP_REF] == TYP_REF); assert((unsigned)TypeGet(value) < sizeof(genActualTypes)); return (var_types)genActualTypes[TypeGet(value)]; } /***************************************************************************** * Can this type be passed as a parameter in a register? */ inline bool isRegParamType(var_types type) { #if defined(TARGET_X86) return (type <= TYP_INT || type == TYP_REF || type == TYP_BYREF); #else // !TARGET_X86 return true; #endif // !TARGET_X86 } #if defined(TARGET_AMD64) || defined(TARGET_ARM64) /*****************************************************************************/ // Returns true if 'type' is a struct that can be enregistered for call args // or can be returned by value in multiple registers. // if 'type' is not a struct the return value will be false. // // Arguments: // type - the basic jit var_type for the item being queried // typeClass - the handle for the struct when 'type' is TYP_STRUCT // typeSize - Out param (if non-null) is updated with the size of 'type'. // forReturn - this is true when we asking about a GT_RETURN context; // this is false when we are asking about an argument context // isVarArg - whether or not this is a vararg fixed arg or variable argument // - if so on arm64 windows getArgTypeForStruct will ignore HFA // - types // callConv - the calling convention of the call // inline bool Compiler::VarTypeIsMultiByteAndCanEnreg(var_types type, CORINFO_CLASS_HANDLE typeClass, unsigned* typeSize, bool forReturn, bool isVarArg, CorInfoCallConvExtension callConv) { bool result = false; unsigned size = 0; if (varTypeIsStruct(type)) { assert(typeClass != nullptr); size = info.compCompHnd->getClassSize(typeClass); if (forReturn) { structPassingKind howToReturnStruct; type = getReturnTypeForStruct(typeClass, callConv, &howToReturnStruct, size); } else { structPassingKind howToPassStruct; type = getArgTypeForStruct(typeClass, &howToPassStruct, isVarArg, size); } if (type != TYP_UNKNOWN) { result = true; } } else { size = genTypeSize(type); } if (typeSize != nullptr) { *typeSize = size; } return result; } #endif // TARGET_AMD64 || TARGET_ARM64 /*****************************************************************************/ #ifdef DEBUG inline const char* varTypeGCstring(var_types type) { switch (type) { case TYP_REF: return "gcr"; case TYP_BYREF: return "byr"; default: return "non"; } } #endif /*****************************************************************************/ const char* varTypeName(var_types); /*****************************************************************************/ // Helpers to pull little-endian values out of a byte stream. inline unsigned __int8 getU1LittleEndian(const BYTE* ptr) { return *(UNALIGNED unsigned __int8*)ptr; } inline unsigned __int16 getU2LittleEndian(const BYTE* ptr) { return GET_UNALIGNED_VAL16(ptr); } inline unsigned __int32 getU4LittleEndian(const BYTE* ptr) { return GET_UNALIGNED_VAL32(ptr); } inline signed __int8 getI1LittleEndian(const BYTE* ptr) { return *(UNALIGNED signed __int8*)ptr; } inline signed __int16 getI2LittleEndian(const BYTE* ptr) { return GET_UNALIGNED_VAL16(ptr); } inline signed __int32 getI4LittleEndian(const BYTE* ptr) { return GET_UNALIGNED_VAL32(ptr); } inline signed __int64 getI8LittleEndian(const BYTE* ptr) { return GET_UNALIGNED_VAL64(ptr); } inline float getR4LittleEndian(const BYTE* ptr) { __int32 val = getI4LittleEndian(ptr); return *(float*)&val; } inline double getR8LittleEndian(const BYTE* ptr) { __int64 val = getI8LittleEndian(ptr); return *(double*)&val; } #ifdef DEBUG const char* genES2str(BitVecTraits* traits, EXPSET_TP set); const char* refCntWtd2str(weight_t refCntWtd); #endif /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX GenTree XX XX Inline functions XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ void* GenTree::operator new(size_t sz, Compiler* comp, genTreeOps oper) { size_t size = GenTree::s_gtNodeSizes[oper]; #if MEASURE_NODE_SIZE genNodeSizeStats.genTreeNodeCnt += 1; genNodeSizeStats.genTreeNodeSize += size; genNodeSizeStats.genTreeNodeActualSize += sz; genNodeSizeStatsPerFunc.genTreeNodeCnt += 1; genNodeSizeStatsPerFunc.genTreeNodeSize += size; genNodeSizeStatsPerFunc.genTreeNodeActualSize += sz; #endif // MEASURE_NODE_SIZE assert(size >= sz); return comp->getAllocator(CMK_ASTNode).allocate<char>(size); } // GenTree constructor inline GenTree::GenTree(genTreeOps oper, var_types type DEBUGARG(bool largeNode)) { gtOper = oper; gtType = type; gtFlags = GTF_EMPTY; gtLIRFlags = 0; #ifdef DEBUG gtDebugFlags = GTF_DEBUG_NONE; #endif // DEBUG gtCSEnum = NO_CSE; ClearAssertion(); gtNext = nullptr; gtPrev = nullptr; SetRegNum(REG_NA); INDEBUG(gtRegTag = GT_REGTAG_NONE;) INDEBUG(gtCostsInitialized = false;) #ifdef DEBUG size_t size = GenTree::s_gtNodeSizes[oper]; if (size == TREE_NODE_SZ_SMALL && !largeNode) { gtDebugFlags |= GTF_DEBUG_NODE_SMALL; } else if (size == TREE_NODE_SZ_LARGE || largeNode) { gtDebugFlags |= GTF_DEBUG_NODE_LARGE; } else { assert(!"bogus node size"); } #endif #if COUNT_AST_OPERS InterlockedIncrement(&s_gtNodeCounts[oper]); #endif #ifdef DEBUG gtSeqNum = 0; gtUseNum = -1; gtTreeID = JitTls::GetCompiler()->compGenTreeID++; gtVNPair.SetBoth(ValueNumStore::NoVN); gtRegTag = GT_REGTAG_NONE; gtOperSave = GT_NONE; #endif } /*****************************************************************************/ inline Statement* Compiler::gtNewStmt(GenTree* expr) { Statement* stmt = new (this->getAllocator(CMK_ASTNode)) Statement(expr DEBUGARG(compStatementID++)); return stmt; } inline Statement* Compiler::gtNewStmt(GenTree* expr, const DebugInfo& di) { Statement* stmt = gtNewStmt(expr); stmt->SetDebugInfo(di); return stmt; } /*****************************************************************************/ inline GenTree* Compiler::gtNewOperNode(genTreeOps oper, var_types type, GenTree* op1, bool doSimplifications) { assert((GenTree::OperKind(oper) & (GTK_UNOP | GTK_BINOP)) != 0); assert((GenTree::OperKind(oper) & GTK_EXOP) == 0); // Can't use this to construct any types that extend unary/binary operator. assert(op1 != nullptr || oper == GT_RETFILT || oper == GT_NOP || (oper == GT_RETURN && type == TYP_VOID)); if (doSimplifications) { // We do some simplifications here. // If this gets to be too many, try a switch... // TODO-Cleanup: With the factoring out of array bounds checks, it should not be the // case that we need to check for the array index case here, but without this check // we get failures (see for example jit\Directed\Languages\Python\test_methods_d.exe) if (oper == GT_IND) { // IND(ADDR(IND(x)) == IND(x) if (op1->gtOper == GT_ADDR) { GenTreeUnOp* addr = op1->AsUnOp(); GenTree* indir = addr->gtGetOp1(); if (indir->OperIs(GT_IND) && ((indir->gtFlags & GTF_IND_ARR_INDEX) == 0)) { op1 = indir->AsIndir()->Addr(); } } } else if (oper == GT_ADDR) { // if "x" is not an array index, ADDR(IND(x)) == x if (op1->gtOper == GT_IND && (op1->gtFlags & GTF_IND_ARR_INDEX) == 0) { return op1->AsOp()->gtOp1; } else { // Addr source can't be CSE-ed. op1->SetDoNotCSE(); } } } GenTree* node = new (this, oper) GenTreeOp(oper, type, op1, nullptr); return node; } // Returns an opcode that is of the largest node size in use. inline genTreeOps LargeOpOpcode() { assert(GenTree::s_gtNodeSizes[GT_CALL] == TREE_NODE_SZ_LARGE); return GT_CALL; } /****************************************************************************** * * Use to create nodes which may later be morphed to another (big) operator */ inline GenTree* Compiler::gtNewLargeOperNode(genTreeOps oper, var_types type, GenTree* op1, GenTree* op2) { assert((GenTree::OperKind(oper) & (GTK_UNOP | GTK_BINOP)) != 0); // Can't use this to construct any types that extend unary/binary operator. assert((GenTree::OperKind(oper) & GTK_EXOP) == 0); assert(GenTree::s_gtNodeSizes[oper] == TREE_NODE_SZ_SMALL); // Allocate a large node GenTree* node = new (this, LargeOpOpcode()) GenTreeOp(oper, type, op1, op2 DEBUGARG(/*largeNode*/ true)); return node; } /***************************************************************************** * * allocates a integer constant entry that represents a handle (something * that may need to be fixed up). */ inline GenTree* Compiler::gtNewIconHandleNode(size_t value, GenTreeFlags flags, FieldSeqNode* fields) { GenTree* node; assert((flags & (GTF_ICON_HDL_MASK | GTF_ICON_FIELD_OFF)) != 0); // Interpret "fields == NULL" as "not a field." if (fields == nullptr) { fields = FieldSeqStore::NotAField(); } #if defined(LATE_DISASM) node = new (this, LargeOpOpcode()) GenTreeIntCon(TYP_I_IMPL, value, fields DEBUGARG(/*largeNode*/ true)); #else node = new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, value, fields); #endif node->gtFlags |= flags; return node; } /***************************************************************************** * * It may not be allowed to embed HANDLEs directly into the JITed code (for eg, * as arguments to JIT helpers). Get a corresponding value that can be embedded. * These are versions for each specific type of HANDLE */ inline GenTree* Compiler::gtNewIconEmbScpHndNode(CORINFO_MODULE_HANDLE scpHnd) { void *embedScpHnd, *pEmbedScpHnd; embedScpHnd = (void*)info.compCompHnd->embedModuleHandle(scpHnd, &pEmbedScpHnd); assert((!embedScpHnd) != (!pEmbedScpHnd)); return gtNewIconEmbHndNode(embedScpHnd, pEmbedScpHnd, GTF_ICON_SCOPE_HDL, scpHnd); } //----------------------------------------------------------------------------- inline GenTree* Compiler::gtNewIconEmbClsHndNode(CORINFO_CLASS_HANDLE clsHnd) { void *embedClsHnd, *pEmbedClsHnd; embedClsHnd = (void*)info.compCompHnd->embedClassHandle(clsHnd, &pEmbedClsHnd); assert((!embedClsHnd) != (!pEmbedClsHnd)); return gtNewIconEmbHndNode(embedClsHnd, pEmbedClsHnd, GTF_ICON_CLASS_HDL, clsHnd); } //----------------------------------------------------------------------------- inline GenTree* Compiler::gtNewIconEmbMethHndNode(CORINFO_METHOD_HANDLE methHnd) { void *embedMethHnd, *pEmbedMethHnd; embedMethHnd = (void*)info.compCompHnd->embedMethodHandle(methHnd, &pEmbedMethHnd); assert((!embedMethHnd) != (!pEmbedMethHnd)); return gtNewIconEmbHndNode(embedMethHnd, pEmbedMethHnd, GTF_ICON_METHOD_HDL, methHnd); } //----------------------------------------------------------------------------- inline GenTree* Compiler::gtNewIconEmbFldHndNode(CORINFO_FIELD_HANDLE fldHnd) { void *embedFldHnd, *pEmbedFldHnd; embedFldHnd = (void*)info.compCompHnd->embedFieldHandle(fldHnd, &pEmbedFldHnd); assert((!embedFldHnd) != (!pEmbedFldHnd)); return gtNewIconEmbHndNode(embedFldHnd, pEmbedFldHnd, GTF_ICON_FIELD_HDL, fldHnd); } /*****************************************************************************/ //------------------------------------------------------------------------------ // gtNewHelperCallNode : Helper to create a call helper node. // // // Arguments: // helper - Call helper // type - Type of the node // args - Call args // // Return Value: // New CT_HELPER node inline GenTreeCall* Compiler::gtNewHelperCallNode(unsigned helper, var_types type, GenTreeCall::Use* args) { GenTreeFlags flags = s_helperCallProperties.NoThrow((CorInfoHelpFunc)helper) ? GTF_EMPTY : GTF_EXCEPT; GenTreeCall* result = gtNewCallNode(CT_HELPER, eeFindHelper(helper), type, args); result->gtFlags |= flags; #if DEBUG // Helper calls are never candidates. result->gtInlineObservation = InlineObservation::CALLSITE_IS_CALL_TO_HELPER; #endif return result; } //------------------------------------------------------------------------------ // gtNewRuntimeLookupHelperCallNode : Helper to create a runtime lookup call helper node. // // // Arguments: // helper - Call helper // type - Type of the node // args - Call args // // Return Value: // New CT_HELPER node inline GenTreeCall* Compiler::gtNewRuntimeLookupHelperCallNode(CORINFO_RUNTIME_LOOKUP* pRuntimeLookup, GenTree* ctxTree, void* compileTimeHandle) { GenTree* argNode = gtNewIconEmbHndNode(pRuntimeLookup->signature, nullptr, GTF_ICON_GLOBAL_PTR, compileTimeHandle); GenTreeCall::Use* helperArgs = gtNewCallArgs(ctxTree, argNode); return gtNewHelperCallNode(pRuntimeLookup->helper, TYP_I_IMPL, helperArgs); } //------------------------------------------------------------------------ // gtNewAllocObjNode: A little helper to create an object allocation node. // // Arguments: // helper - Value returned by ICorJitInfo::getNewHelper // helperHasSideEffects - True iff allocation helper has side effects // clsHnd - Corresponding class handle // type - Tree return type (e.g. TYP_REF) // op1 - Node containing an address of VtablePtr // // Return Value: // Returns GT_ALLOCOBJ node that will be later morphed into an // allocation helper call or local variable allocation on the stack. inline GenTreeAllocObj* Compiler::gtNewAllocObjNode( unsigned int helper, bool helperHasSideEffects, CORINFO_CLASS_HANDLE clsHnd, var_types type, GenTree* op1) { GenTreeAllocObj* node = new (this, GT_ALLOCOBJ) GenTreeAllocObj(type, helper, helperHasSideEffects, clsHnd, op1); return node; } //------------------------------------------------------------------------ // gtNewRuntimeLookup: Helper to create a runtime lookup node // // Arguments: // hnd - generic handle being looked up // hndTyp - type of the generic handle // tree - tree for the lookup // // Return Value: // New GenTreeRuntimeLookup node. inline GenTree* Compiler::gtNewRuntimeLookup(CORINFO_GENERIC_HANDLE hnd, CorInfoGenericHandleType hndTyp, GenTree* tree) { assert(tree != nullptr); GenTree* node = new (this, GT_RUNTIMELOOKUP) GenTreeRuntimeLookup(hnd, hndTyp, tree); return node; } //------------------------------------------------------------------------ // gtNewFieldRef: a helper for creating GT_FIELD nodes. // // Normalizes struct types (for SIMD vectors). Sets GTF_GLOB_REF for fields // that may be pointing into globally visible memory. // // Arguments: // type - type for the field node // fldHnd - the field handle // obj - the instance, an address // offset - the field offset // // Return Value: // The created node. // inline GenTreeField* Compiler::gtNewFieldRef(var_types type, CORINFO_FIELD_HANDLE fldHnd, GenTree* obj, DWORD offset) { // GT_FIELD nodes are transformed into GT_IND nodes. assert(GenTree::s_gtNodeSizes[GT_IND] <= GenTree::s_gtNodeSizes[GT_FIELD]); if (type == TYP_STRUCT) { CORINFO_CLASS_HANDLE structHnd; eeGetFieldType(fldHnd, &structHnd); type = impNormStructType(structHnd); } GenTreeField* fieldNode = new (this, GT_FIELD) GenTreeField(type, obj, fldHnd, offset); // If "obj" is the address of a local, note that a field of that struct local has been accessed. if ((obj != nullptr) && obj->OperIs(GT_ADDR) && varTypeIsStruct(obj->AsUnOp()->gtOp1) && obj->AsUnOp()->gtOp1->OperIs(GT_LCL_VAR)) { LclVarDsc* varDsc = lvaGetDesc(obj->AsUnOp()->gtOp1->AsLclVarCommon()); varDsc->lvFieldAccessed = 1; #if defined(TARGET_AMD64) || defined(TARGET_ARM64) // These structs are passed by reference and can easily become global // references if those references are exposed. We clear out // address-exposure information for these parameters when they are // converted into references in fgRetypeImplicitByRefArgs() so we do // not have the necessary information in morph to know if these // indirections are actually global references, so we have to be // conservative here. if (varDsc->lvIsParam) { fieldNode->gtFlags |= GTF_GLOB_REF; } #endif // defined(TARGET_AMD64) || defined(TARGET_ARM64) } else { fieldNode->gtFlags |= GTF_GLOB_REF; } return fieldNode; } /***************************************************************************** * * A little helper to create an array index node. */ inline GenTree* Compiler::gtNewIndexRef(var_types typ, GenTree* arrayOp, GenTree* indexOp) { GenTreeIndex* gtIndx = new (this, GT_INDEX) GenTreeIndex(typ, arrayOp, indexOp, genTypeSize(typ)); return gtIndx; } //------------------------------------------------------------------------------ // gtNewArrLen : Helper to create an array length node. // // // Arguments: // typ - Type of the node // arrayOp - Array node // lenOffset - Offset of the length field // block - Basic block that will contain the result // // Return Value: // New GT_ARR_LENGTH node inline GenTreeArrLen* Compiler::gtNewArrLen(var_types typ, GenTree* arrayOp, int lenOffset, BasicBlock* block) { GenTreeArrLen* arrLen = new (this, GT_ARR_LENGTH) GenTreeArrLen(typ, arrayOp, lenOffset); static_assert_no_msg(GTF_ARRLEN_NONFAULTING == GTF_IND_NONFAULTING); arrLen->SetIndirExceptionFlags(this); if (block != nullptr) { block->bbFlags |= BBF_HAS_IDX_LEN; } optMethodFlags |= OMF_HAS_ARRAYREF; return arrLen; } //------------------------------------------------------------------------------ // gtNewIndir : Helper to create an indirection node. // // Arguments: // typ - Type of the node // addr - Address of the indirection // // Return Value: // New GT_IND node inline GenTreeIndir* Compiler::gtNewIndir(var_types typ, GenTree* addr) { GenTree* indir = gtNewOperNode(GT_IND, typ, addr); indir->SetIndirExceptionFlags(this); return indir->AsIndir(); } //------------------------------------------------------------------------------ // gtNewNullCheck : Helper to create a null check node. // // Arguments: // addr - Address to null check // basicBlock - Basic block of the node // // Return Value: // New GT_NULLCHECK node inline GenTree* Compiler::gtNewNullCheck(GenTree* addr, BasicBlock* basicBlock) { assert(fgAddrCouldBeNull(addr)); GenTree* nullCheck = gtNewOperNode(GT_NULLCHECK, TYP_BYTE, addr); nullCheck->gtFlags |= GTF_EXCEPT; basicBlock->bbFlags |= BBF_HAS_NULLCHECK; optMethodFlags |= OMF_HAS_NULLCHECK; return nullCheck; } /***************************************************************************** * * Create (and check for) a "nothing" node, i.e. a node that doesn't produce * any code. We currently use a "nop" node of type void for this purpose. */ inline GenTree* Compiler::gtNewNothingNode() { return new (this, GT_NOP) GenTreeOp(GT_NOP, TYP_VOID); } /*****************************************************************************/ inline bool GenTree::IsNothingNode() const { return (gtOper == GT_NOP && gtType == TYP_VOID); } /***************************************************************************** * * Change the given node to a NOP - May be later changed to a GT_COMMA * *****************************************************************************/ inline void GenTree::gtBashToNOP() { ChangeOper(GT_NOP); gtType = TYP_VOID; AsOp()->gtOp1 = AsOp()->gtOp2 = nullptr; gtFlags &= ~(GTF_ALL_EFFECT | GTF_REVERSE_OPS); } // return new arg placeholder node. Does not do anything but has a type associated // with it so we can keep track of register arguments in lists associated w/ call nodes inline GenTree* Compiler::gtNewArgPlaceHolderNode(var_types type, CORINFO_CLASS_HANDLE clsHnd) { GenTree* node = new (this, GT_ARGPLACE) GenTreeArgPlace(type, clsHnd); return node; } /*****************************************************************************/ inline GenTree* Compiler::gtUnusedValNode(GenTree* expr) { return gtNewOperNode(GT_COMMA, TYP_VOID, expr, gtNewNothingNode()); } /***************************************************************************** * * A wrapper for gtSetEvalOrder and gtComputeFPlvls * Necessary because the FP levels may need to be re-computed if we reverse * operands */ inline void Compiler::gtSetStmtInfo(Statement* stmt) { GenTree* expr = stmt->GetRootNode(); /* Recursively process the expression */ gtSetEvalOrder(expr); } /*****************************************************************************/ inline void Compiler::fgUpdateConstTreeValueNumber(GenTree* tree) { if (vnStore != nullptr) { fgValueNumberTreeConst(tree); } } inline void GenTree::SetOper(genTreeOps oper, ValueNumberUpdate vnUpdate) { assert(((gtDebugFlags & GTF_DEBUG_NODE_SMALL) != 0) != ((gtDebugFlags & GTF_DEBUG_NODE_LARGE) != 0)); /* Make sure the node isn't too small for the new operator */ assert(GenTree::s_gtNodeSizes[gtOper] == TREE_NODE_SZ_SMALL || GenTree::s_gtNodeSizes[gtOper] == TREE_NODE_SZ_LARGE); assert(GenTree::s_gtNodeSizes[oper] == TREE_NODE_SZ_SMALL || GenTree::s_gtNodeSizes[oper] == TREE_NODE_SZ_LARGE); assert(GenTree::s_gtNodeSizes[oper] == TREE_NODE_SZ_SMALL || (gtDebugFlags & GTF_DEBUG_NODE_LARGE)); #if defined(HOST_64BIT) && !defined(TARGET_64BIT) if (gtOper == GT_CNS_LNG && oper == GT_CNS_INT) { // When casting from LONG to INT, we need to force cast of the value, // if the host architecture represents INT and LONG with the same data size. AsLngCon()->gtLconVal = (INT64)(INT32)AsLngCon()->gtLconVal; } #endif // defined(HOST_64BIT) && !defined(TARGET_64BIT) SetOperRaw(oper); #ifdef DEBUG // Maintain the invariant that unary operators always have NULL gtOp2. // If we ever start explicitly allocating GenTreeUnOp nodes, we wouldn't be // able to do that (but if we did, we'd have to have a check in GetOp() -- perhaps // a gtUnOp...) if (OperKind(oper) == GTK_UNOP) { AsOp()->gtOp2 = nullptr; } #endif // DEBUG #if DEBUGGABLE_GENTREE // Until we eliminate SetOper/ChangeOper, we also change the vtable of the node, so that // it shows up correctly in the debugger. SetVtableForOper(oper); #endif // DEBUGGABLE_GENTREE if (vnUpdate == CLEAR_VN) { // Clear the ValueNum field as well. gtVNPair.SetBoth(ValueNumStore::NoVN); } // Do "oper"-specific initializations. TODO-Cleanup: these are too ad-hoc to be reliable. // The bashing code should decide itself what to initialize and what to leave as it was. switch (oper) { case GT_CNS_INT: AsIntCon()->gtFieldSeq = FieldSeqStore::NotAField(); break; #if defined(TARGET_ARM) case GT_MUL_LONG: // We sometimes bash GT_MUL to GT_MUL_LONG, which converts it from GenTreeOp to GenTreeMultiRegOp. AsMultiRegOp()->gtOtherReg = REG_NA; AsMultiRegOp()->ClearOtherRegFlags(); break; #endif case GT_LCL_FLD: AsLclFld()->SetLclOffs(0); AsLclFld()->SetFieldSeq(FieldSeqStore::NotAField()); break; default: break; } } inline GenTree* Compiler::gtNewKeepAliveNode(GenTree* op) { GenTree* keepalive = gtNewOperNode(GT_KEEPALIVE, TYP_VOID, op); // Prevent both reordering and removal. Invalid optimizations of GC.KeepAlive are // very subtle and hard to observe. Thus we are conservatively marking it with both // GTF_CALL and GTF_GLOB_REF side-effects even though it may be more than strictly // necessary. The conservative side-effects are unlikely to have negative impact // on code quality in this case. keepalive->gtFlags |= (GTF_CALL | GTF_GLOB_REF); return keepalive; } inline GenTreeCast* Compiler::gtNewCastNode(var_types typ, GenTree* op1, bool fromUnsigned, var_types castType) { GenTreeCast* cast = new (this, GT_CAST) GenTreeCast(typ, op1, fromUnsigned, castType); return cast; } inline GenTreeCast* Compiler::gtNewCastNodeL(var_types typ, GenTree* op1, bool fromUnsigned, var_types castType) { /* Some casts get transformed into 'GT_CALL' or 'GT_IND' nodes */ assert(GenTree::s_gtNodeSizes[GT_CALL] >= GenTree::s_gtNodeSizes[GT_CAST]); assert(GenTree::s_gtNodeSizes[GT_CALL] >= GenTree::s_gtNodeSizes[GT_IND]); /* Make a big node first and then change it to be GT_CAST */ GenTreeCast* cast = new (this, LargeOpOpcode()) GenTreeCast(typ, op1, fromUnsigned, castType DEBUGARG(/*largeNode*/ true)); return cast; } inline GenTreeIndir* Compiler::gtNewMethodTableLookup(GenTree* object) { GenTreeIndir* result = gtNewIndir(TYP_I_IMPL, object); result->gtFlags |= GTF_IND_INVARIANT; return result; } /*****************************************************************************/ /*****************************************************************************/ inline void GenTree::SetOperRaw(genTreeOps oper) { // Please do not do anything here other than assign to gtOper (debug-only // code is OK, but should be kept to a minimum). RecordOperBashing(OperGet(), oper); // nop unless NODEBASH_STATS is enabled // Bashing to MultiOp nodes is not currently supported. assert(!OperIsMultiOp(oper)); gtOper = oper; } inline void GenTree::SetOperResetFlags(genTreeOps oper) { SetOper(oper); gtFlags &= GTF_NODE_MASK; } inline void GenTree::ChangeOper(genTreeOps oper, ValueNumberUpdate vnUpdate) { assert(!OperIsConst(oper)); // use BashToConst() instead GenTreeFlags mask = GTF_COMMON_MASK; if (this->OperIsIndirOrArrLength() && OperIsIndirOrArrLength(oper)) { mask |= GTF_IND_NONFAULTING; } SetOper(oper, vnUpdate); gtFlags &= mask; } inline void GenTree::ChangeOperUnchecked(genTreeOps oper) { GenTreeFlags mask = GTF_COMMON_MASK; if (this->OperIsIndirOrArrLength() && OperIsIndirOrArrLength(oper)) { mask |= GTF_IND_NONFAULTING; } SetOperRaw(oper); // Trust the caller and don't use SetOper() gtFlags &= mask; } //------------------------------------------------------------------------ // BashToConst: Bash the node to a constant one. // // The function will infer the node's new oper from the type: GT_CNS_INT // or GT_CNS_LNG for integers and GC types, GT_CNS_DBL for floats/doubles. // // The type is inferred from "value"'s type ("T") unless an explicit // one is provided via the second argument, in which case it is checked // for compatibility with "value". So, e. g., "BashToConst(0)" will bash // to GT_CNS_INT, type TYP_INT, "BashToConst(0, TYP_REF)" will bash to the // canonical "null" node, but "BashToConst(0.0, TYP_INT)" will assert. // // Arguments: // value - Value which the bashed constant will have // type - Type the bashed node will have // template <typename T> void GenTree::BashToConst(T value, var_types type /* = TYP_UNDEF */) { static_assert_no_msg((std::is_same<T, int32_t>::value || std::is_same<T, int64_t>::value || std::is_same<T, long long>::value || std::is_same<T, float>::value || std::is_same<T, double>::value)); static_assert_no_msg(sizeof(int64_t) == sizeof(long long)); var_types typeOfValue = TYP_UNDEF; if (std::is_floating_point<T>::value) { assert((type == TYP_UNDEF) || varTypeIsFloating(type)); typeOfValue = std::is_same<T, float>::value ? TYP_FLOAT : TYP_DOUBLE; } else { assert((type == TYP_UNDEF) || varTypeIsIntegral(type) || varTypeIsGC(type)); typeOfValue = std::is_same<T, int32_t>::value ? TYP_INT : TYP_LONG; } if (type == TYP_UNDEF) { type = typeOfValue; } assert(type == genActualType(type)); genTreeOps oper = GT_NONE; if (varTypeIsFloating(type)) { oper = GT_CNS_DBL; } else { oper = (type == TYP_LONG) ? GT_CNS_NATIVELONG : GT_CNS_INT; } SetOperResetFlags(oper); gtType = type; switch (oper) { case GT_CNS_INT: #if !defined(TARGET_64BIT) assert(type != TYP_LONG); #endif assert(varTypeIsIntegral(type) || varTypeIsGC(type)); if (genTypeSize(type) <= genTypeSize(TYP_INT)) { assert(FitsIn<int32_t>(value)); } AsIntCon()->SetIconValue(static_cast<ssize_t>(value)); AsIntCon()->gtFieldSeq = FieldSeqStore::NotAField(); break; #if !defined(TARGET_64BIT) case GT_CNS_LNG: assert(type == TYP_LONG); AsLngCon()->SetLngValue(static_cast<int64_t>(value)); break; #endif case GT_CNS_DBL: assert(varTypeIsFloating(type)); AsDblCon()->gtDconVal = static_cast<double>(value); break; default: unreached(); } } //------------------------------------------------------------------------ // BashToZeroConst: Bash the node to a constant representing "zero" of "type". // // Arguments: // type - Type the bashed node will have, currently only integers, // GC types and floating point types are supported. // inline void GenTree::BashToZeroConst(var_types type) { if (varTypeIsFloating(type)) { BashToConst(0.0, type); } else { assert(varTypeIsIntegral(type) || varTypeIsGC(type)); // "genActualType" so that we do not create CNS_INT(small type). BashToConst(0, genActualType(type)); } } /***************************************************************************** * * Returns true if the node is of the "ovf" variety, for example, add.ovf.i1. * + gtOverflow() can only be called for valid operators (that is, we know it is one * of the operators which may have GTF_OVERFLOW set). * + gtOverflowEx() is more expensive, and should be called only if gtOper may be * an operator for which GTF_OVERFLOW is invalid. */ inline bool GenTree::gtOverflow() const { assert(OperMayOverflow()); if ((gtFlags & GTF_OVERFLOW) != 0) { assert(varTypeIsIntegral(TypeGet())); return true; } else { return false; } } inline bool GenTree::gtOverflowEx() const { return OperMayOverflow() && gtOverflow(); } /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX LclVarsInfo XX XX Inline functions XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ inline bool Compiler::lvaHaveManyLocals() const { return (lvaCount >= (unsigned)JitConfig.JitMaxLocalsToTrack()); } /***************************************************************************** * * Allocate a temporary variable or a set of temp variables. */ inline unsigned Compiler::lvaGrabTemp(bool shortLifetime DEBUGARG(const char* reason)) { if (compIsForInlining()) { // Grab the temp using Inliner's Compiler instance. Compiler* pComp = impInlineInfo->InlinerCompiler; // The Compiler instance for the caller (i.e. the inliner) if (pComp->lvaHaveManyLocals()) { // Don't create more LclVar with inlining compInlineResult->NoteFatal(InlineObservation::CALLSITE_TOO_MANY_LOCALS); } unsigned tmpNum = pComp->lvaGrabTemp(shortLifetime DEBUGARG(reason)); lvaTable = pComp->lvaTable; lvaCount = pComp->lvaCount; lvaTableCnt = pComp->lvaTableCnt; return tmpNum; } // You cannot allocate more space after frame layout! noway_assert(lvaDoneFrameLayout < Compiler::TENTATIVE_FRAME_LAYOUT); /* Check if the lvaTable has to be grown */ if (lvaCount + 1 > lvaTableCnt) { unsigned newLvaTableCnt = lvaCount + (lvaCount / 2) + 1; // Check for overflow if (newLvaTableCnt <= lvaCount) { IMPL_LIMITATION("too many locals"); } LclVarDsc* newLvaTable = getAllocator(CMK_LvaTable).allocate<LclVarDsc>(newLvaTableCnt); memcpy(newLvaTable, lvaTable, lvaCount * sizeof(*lvaTable)); memset(newLvaTable + lvaCount, 0, (newLvaTableCnt - lvaCount) * sizeof(*lvaTable)); for (unsigned i = lvaCount; i < newLvaTableCnt; i++) { new (&newLvaTable[i], jitstd::placement_t()) LclVarDsc(); // call the constructor. } #ifdef DEBUG // Fill the old table with junks. So to detect the un-intended use. memset(lvaTable, JitConfig.JitDefaultFill(), lvaCount * sizeof(*lvaTable)); #endif lvaTableCnt = newLvaTableCnt; lvaTable = newLvaTable; } const unsigned tempNum = lvaCount; lvaCount++; // Initialize lvType, lvIsTemp and lvOnFrame lvaTable[tempNum].lvType = TYP_UNDEF; lvaTable[tempNum].lvIsTemp = shortLifetime; lvaTable[tempNum].lvOnFrame = true; // If we've started normal ref counting, bump the ref count of this // local, as we no longer do any incremental counting, and we presume // this new local will be referenced. if (lvaLocalVarRefCounted()) { if (opts.OptimizationDisabled()) { lvaTable[tempNum].lvImplicitlyReferenced = 1; } else { lvaTable[tempNum].setLvRefCnt(1); lvaTable[tempNum].setLvRefCntWtd(BB_UNITY_WEIGHT); } } #ifdef DEBUG lvaTable[tempNum].lvReason = reason; if (verbose) { printf("\nlvaGrabTemp returning %d (", tempNum); gtDispLclVar(tempNum, false); printf(")%s called for %s.\n", shortLifetime ? "" : " (a long lifetime temp)", reason); } #endif // DEBUG return tempNum; } inline unsigned Compiler::lvaGrabTemps(unsigned cnt DEBUGARG(const char* reason)) { if (compIsForInlining()) { // Grab the temps using Inliner's Compiler instance. unsigned tmpNum = impInlineInfo->InlinerCompiler->lvaGrabTemps(cnt DEBUGARG(reason)); lvaTable = impInlineInfo->InlinerCompiler->lvaTable; lvaCount = impInlineInfo->InlinerCompiler->lvaCount; lvaTableCnt = impInlineInfo->InlinerCompiler->lvaTableCnt; return tmpNum; } #ifdef DEBUG if (verbose) { printf("\nlvaGrabTemps(%d) returning %d..%d (long lifetime temps) called for %s", cnt, lvaCount, lvaCount + cnt - 1, reason); } #endif // Could handle this... assert(!lvaLocalVarRefCounted()); // You cannot allocate more space after frame layout! noway_assert(lvaDoneFrameLayout < Compiler::TENTATIVE_FRAME_LAYOUT); /* Check if the lvaTable has to be grown */ if (lvaCount + cnt > lvaTableCnt) { unsigned newLvaTableCnt = lvaCount + max(lvaCount / 2 + 1, cnt); // Check for overflow if (newLvaTableCnt <= lvaCount) { IMPL_LIMITATION("too many locals"); } LclVarDsc* newLvaTable = getAllocator(CMK_LvaTable).allocate<LclVarDsc>(newLvaTableCnt); memcpy(newLvaTable, lvaTable, lvaCount * sizeof(*lvaTable)); memset(newLvaTable + lvaCount, 0, (newLvaTableCnt - lvaCount) * sizeof(*lvaTable)); for (unsigned i = lvaCount; i < newLvaTableCnt; i++) { new (&newLvaTable[i], jitstd::placement_t()) LclVarDsc(); // call the constructor. } #ifdef DEBUG // Fill the old table with junks. So to detect the un-intended use. memset(lvaTable, JitConfig.JitDefaultFill(), lvaCount * sizeof(*lvaTable)); #endif lvaTableCnt = newLvaTableCnt; lvaTable = newLvaTable; } unsigned tempNum = lvaCount; while (cnt--) { lvaTable[lvaCount].lvType = TYP_UNDEF; // Initialize lvType, lvIsTemp and lvOnFrame lvaTable[lvaCount].lvIsTemp = false; lvaTable[lvaCount].lvOnFrame = true; lvaCount++; } return tempNum; } /***************************************************************************** * * Allocate a temporary variable which is implicitly used by code-gen * There will be no explicit references to the temp, and so it needs to * be forced to be kept alive, and not be optimized away. */ inline unsigned Compiler::lvaGrabTempWithImplicitUse(bool shortLifetime DEBUGARG(const char* reason)) { if (compIsForInlining()) { // Grab the temp using Inliner's Compiler instance. unsigned tmpNum = impInlineInfo->InlinerCompiler->lvaGrabTempWithImplicitUse(shortLifetime DEBUGARG(reason)); lvaTable = impInlineInfo->InlinerCompiler->lvaTable; lvaCount = impInlineInfo->InlinerCompiler->lvaCount; lvaTableCnt = impInlineInfo->InlinerCompiler->lvaTableCnt; return tmpNum; } unsigned lclNum = lvaGrabTemp(shortLifetime DEBUGARG(reason)); LclVarDsc* varDsc = lvaGetDesc(lclNum); // Note the implicit use varDsc->lvImplicitlyReferenced = 1; return lclNum; } /***************************************************************************** * * Increment the ref counts for a local variable */ inline void LclVarDsc::incRefCnts(weight_t weight, Compiler* comp, RefCountState state, bool propagate) { // In minopts and debug codegen, we don't maintain normal ref counts. if ((state == RCS_NORMAL) && !comp->PreciseRefCountsRequired()) { // Note, at least, that there is at least one reference. lvImplicitlyReferenced = 1; return; } Compiler::lvaPromotionType promotionType = DUMMY_INIT(Compiler::PROMOTION_TYPE_NONE); if (varTypeIsStruct(lvType)) { promotionType = comp->lvaGetPromotionType(this); } // // Increment counts on the local itself. // if ((lvType != TYP_STRUCT) || (promotionType != Compiler::PROMOTION_TYPE_INDEPENDENT)) { // We increment ref counts of this local for primitive types, including structs that have been retyped as their // only field, as well as for structs whose fields are not independently promoted. // // Increment lvRefCnt // int newRefCnt = lvRefCnt(state) + 1; if (newRefCnt == (unsigned short)newRefCnt) // lvRefCnt is an "unsigned short". Don't overflow it. { setLvRefCnt((unsigned short)newRefCnt, state); } // // Increment lvRefCntWtd // if (weight != 0) { // We double the weight of internal temps bool doubleWeight = lvIsTemp; #if defined(TARGET_AMD64) || defined(TARGET_ARM64) // and, for the time being, implicit byref params doubleWeight |= lvIsImplicitByRef; #endif // defined(TARGET_AMD64) || defined(TARGET_ARM64) if (doubleWeight && (weight * 2 > weight)) { weight *= 2; } weight_t newWeight = lvRefCntWtd(state) + weight; assert(newWeight >= lvRefCntWtd(state)); setLvRefCntWtd(newWeight, state); } } if (varTypeIsStruct(lvType) && propagate) { // For promoted struct locals, increment lvRefCnt on its field locals as well. if (promotionType == Compiler::PROMOTION_TYPE_INDEPENDENT || promotionType == Compiler::PROMOTION_TYPE_DEPENDENT) { for (unsigned i = lvFieldLclStart; i < lvFieldLclStart + lvFieldCnt; ++i) { comp->lvaTable[i].incRefCnts(weight, comp, state, false); // Don't propagate } } } if (lvIsStructField && propagate) { // Depending on the promotion type, increment the ref count for the parent struct as well. promotionType = comp->lvaGetParentPromotionType(this); LclVarDsc* parentvarDsc = comp->lvaGetDesc(lvParentLcl); assert(!parentvarDsc->lvRegStruct); if (promotionType == Compiler::PROMOTION_TYPE_DEPENDENT) { parentvarDsc->incRefCnts(weight, comp, state, false); // Don't propagate } } #ifdef DEBUG if (comp->verbose) { printf("New refCnts for V%02u: refCnt = %2u, refCntWtd = %s\n", comp->lvaGetLclNum(this), lvRefCnt(state), refCntWtd2str(lvRefCntWtd(state))); } #endif } /***************************************************************************** * * The following returns the mask of all tracked locals * referenced in a statement. */ inline VARSET_VALRET_TP Compiler::lvaStmtLclMask(Statement* stmt) { VARSET_TP lclMask(VarSetOps::MakeEmpty(this)); assert(fgStmtListThreaded); for (GenTree* const tree : stmt->TreeList()) { if (tree->gtOper != GT_LCL_VAR) { continue; } const LclVarDsc* varDsc = lvaGetDesc(tree->AsLclVarCommon()); if (!varDsc->lvTracked) { continue; } VarSetOps::UnionD(this, lclMask, VarSetOps::MakeSingleton(this, varDsc->lvVarIndex)); } return lclMask; } /***************************************************************************** Is this a synchronized instance method? If so, we will need to report "this" in the GC information, so that the EE can release the object lock in case of an exception We also need to report "this" and keep it alive for all shared generic code that gets the actual generic context from the "this" pointer and has exception handlers. For example, if List<T>::m() is shared between T = object and T = string, then inside m() an exception handler "catch E<T>" needs to be able to fetch the 'this' pointer to find out what 'T' is in order to tell if we should catch the exception or not. */ inline bool Compiler::lvaKeepAliveAndReportThis() { if (info.compIsStatic || lvaTable[0].TypeGet() != TYP_REF) { return false; } const bool genericsContextIsThis = (info.compMethodInfo->options & CORINFO_GENERICS_CTXT_FROM_THIS) != 0; #ifdef JIT32_GCENCODER if (info.compFlags & CORINFO_FLG_SYNCH) return true; if (genericsContextIsThis) { // TODO: Check if any of the exception clauses are // typed using a generic type. Else, we do not need to report this. if (info.compXcptnsCount > 0) return true; if (opts.compDbgCode) return true; if (lvaGenericsContextInUse) { JITDUMP("Reporting this as generic context\n"); return true; } } #else // !JIT32_GCENCODER // If the generics context is the this pointer we need to report it if either // the VM requires us to keep the generics context alive or it is used in a look-up. // We keep it alive in the lookup scenario, even when the VM didn't ask us to, // because collectible types need the generics context when gc-ing. // // Methoods that can inspire OSR methods must always report context as live // if (genericsContextIsThis) { const bool mustKeep = (info.compMethodInfo->options & CORINFO_GENERICS_CTXT_KEEP_ALIVE) != 0; const bool hasPatchpoint = doesMethodHavePatchpoints() || doesMethodHavePartialCompilationPatchpoints(); if (lvaGenericsContextInUse || mustKeep || hasPatchpoint) { JITDUMP("Reporting this as generic context: %s\n", mustKeep ? "must keep" : (hasPatchpoint ? "patchpoints" : "referenced")); return true; } } #endif return false; } /***************************************************************************** Similar to lvaKeepAliveAndReportThis */ inline bool Compiler::lvaReportParamTypeArg() { if (info.compMethodInfo->options & (CORINFO_GENERICS_CTXT_FROM_METHODDESC | CORINFO_GENERICS_CTXT_FROM_METHODTABLE)) { assert(info.compTypeCtxtArg != -1); // If the VM requires us to keep the generics context alive and report it (for example, if any catch // clause catches a type that uses a generic parameter of this method) this flag will be set. if (info.compMethodInfo->options & CORINFO_GENERICS_CTXT_KEEP_ALIVE) { return true; } // Otherwise, if an exact type parameter is needed in the body, report the generics context. // We do this because collectible types needs the generics context when gc-ing. if (lvaGenericsContextInUse) { return true; } // Methoods that have patchpoints always report context as live // if (doesMethodHavePatchpoints() || doesMethodHavePartialCompilationPatchpoints()) { return true; } } // Otherwise, we don't need to report it -- the generics context parameter is unused. return false; } //***************************************************************************** inline int Compiler::lvaCachedGenericContextArgOffset() { assert(lvaDoneFrameLayout == FINAL_FRAME_LAYOUT); return lvaCachedGenericContextArgOffs; } //------------------------------------------------------------------------ // lvaFrameAddress: Determine the stack frame offset of the given variable, // and how to generate an address to that stack frame. // // Arguments: // varNum - The variable to inquire about. Positive for user variables // or arguments, negative for spill-temporaries. // mustBeFPBased - [TARGET_ARM only] True if the base register must be FP. // After FINAL_FRAME_LAYOUT, if false, it also requires SP base register. // pBaseReg - [TARGET_ARM only] Out arg. *pBaseReg is set to the base // register to use. // addrModeOffset - [TARGET_ARM only] The mode offset within the variable that we need to address. // For example, for a large struct local, and a struct field reference, this will be the offset // of the field. Thus, for V02 + 0x28, if V02 itself is at offset SP + 0x10 // then addrModeOffset is what gets added beyond that, here 0x28. // isFloatUsage - [TARGET_ARM only] True if the instruction being generated is a floating // point instruction. This requires using floating-point offset restrictions. // Note that a variable can be non-float, e.g., struct, but accessed as a // float local field. // pFPbased - [non-TARGET_ARM] Out arg. Set *FPbased to true if the // variable is addressed off of FP, false if it's addressed // off of SP. // // Return Value: // Returns the variable offset from the given base register. // inline #ifdef TARGET_ARM int Compiler::lvaFrameAddress( int varNum, bool mustBeFPBased, regNumber* pBaseReg, int addrModeOffset, bool isFloatUsage) #else int Compiler::lvaFrameAddress(int varNum, bool* pFPbased) #endif { assert(lvaDoneFrameLayout != NO_FRAME_LAYOUT); int varOffset; bool FPbased; bool fConservative = false; if (varNum >= 0) { LclVarDsc* varDsc = lvaGetDesc(varNum); bool isPrespilledArg = false; #if defined(TARGET_ARM) && defined(PROFILING_SUPPORTED) isPrespilledArg = varDsc->lvIsParam && compIsProfilerHookNeeded() && lvaIsPreSpilled(varNum, codeGen->regSet.rsMaskPreSpillRegs(false)); #endif // If we have finished with register allocation, and this isn't a stack-based local, // check that this has a valid stack location. if (lvaDoneFrameLayout > REGALLOC_FRAME_LAYOUT && !varDsc->lvOnFrame) { #ifdef TARGET_AMD64 #ifndef UNIX_AMD64_ABI // On amd64, every param has a stack location, except on Unix-like systems. assert(varDsc->lvIsParam); #endif // UNIX_AMD64_ABI #else // !TARGET_AMD64 // For other targets, a stack parameter that is enregistered or prespilled // for profiling on ARM will have a stack location. assert((varDsc->lvIsParam && !varDsc->lvIsRegArg) || isPrespilledArg); #endif // !TARGET_AMD64 } FPbased = varDsc->lvFramePointerBased; #ifdef DEBUG #if FEATURE_FIXED_OUT_ARGS if ((unsigned)varNum == lvaOutgoingArgSpaceVar) { assert(FPbased == false); } else #endif { #if DOUBLE_ALIGN assert(FPbased == (isFramePointerUsed() || (genDoubleAlign() && varDsc->lvIsParam && !varDsc->lvIsRegArg))); #else #ifdef TARGET_X86 assert(FPbased == isFramePointerUsed()); #endif #endif } #endif // DEBUG varOffset = varDsc->GetStackOffset(); } else // Its a spill-temp { FPbased = isFramePointerUsed(); if (lvaDoneFrameLayout == Compiler::FINAL_FRAME_LAYOUT) { TempDsc* tmpDsc = codeGen->regSet.tmpFindNum(varNum); // The temp might be in use, since this might be during code generation. if (tmpDsc == nullptr) { tmpDsc = codeGen->regSet.tmpFindNum(varNum, RegSet::TEMP_USAGE_USED); } assert(tmpDsc != nullptr); varOffset = tmpDsc->tdTempOffs(); } else { // This value is an estimate until we calculate the // offset after the final frame layout // --------------------------------------------------- // : : // +-------------------------+ base --+ // | LR, ++N for ARM | | frameBaseOffset (= N) // +-------------------------+ | // | R11, ++N for ARM | <---FP | // +-------------------------+ --+ // | compCalleeRegsPushed - N| | lclFrameOffset // +-------------------------+ --+ // | lclVars | | // +-------------------------+ | // | tmp[MAX_SPILL_TEMP] | | // | tmp[1] | | // | tmp[0] | | compLclFrameSize // +-------------------------+ | // | outgoingArgSpaceSize | | // +-------------------------+ --+ // | | <---SP // : : // --------------------------------------------------- fConservative = true; if (!FPbased) { // Worst case stack based offset. CLANG_FORMAT_COMMENT_ANCHOR; #if FEATURE_FIXED_OUT_ARGS int outGoingArgSpaceSize = lvaOutgoingArgSpaceSize; #else int outGoingArgSpaceSize = 0; #endif varOffset = outGoingArgSpaceSize + max(-varNum * TARGET_POINTER_SIZE, (int)lvaGetMaxSpillTempSize()); } else { // Worst case FP based offset. CLANG_FORMAT_COMMENT_ANCHOR; #ifdef TARGET_ARM varOffset = codeGen->genCallerSPtoInitialSPdelta() - codeGen->genCallerSPtoFPdelta(); #else varOffset = -(codeGen->genTotalFrameSize()); #endif } } } #ifdef TARGET_ARM if (FPbased) { if (mustBeFPBased) { *pBaseReg = REG_FPBASE; } // Change the Frame Pointer (R11)-based addressing to the SP-based addressing when possible because // it generates smaller code on ARM. See frame picture above for the math. else { // If it is the final frame layout phase, we don't have a choice, we should stick // to either FP based or SP based that we decided in the earlier phase. Because // we have already selected the instruction. MinOpts will always reserve R10, so // for MinOpts always use SP-based offsets, using R10 as necessary, for simplicity. int spVarOffset = fConservative ? compLclFrameSize : varOffset + codeGen->genSPtoFPdelta(); int actualSPOffset = spVarOffset + addrModeOffset; int actualFPOffset = varOffset + addrModeOffset; int encodingLimitUpper = isFloatUsage ? 0x3FC : 0xFFF; int encodingLimitLower = isFloatUsage ? -0x3FC : -0xFF; // Use SP-based encoding. During encoding, we'll pick the best encoding for the actual offset we have. if (opts.MinOpts() || (actualSPOffset <= encodingLimitUpper)) { varOffset = spVarOffset; *pBaseReg = compLocallocUsed ? REG_SAVED_LOCALLOC_SP : REG_SPBASE; } // Use Frame Pointer (R11)-based encoding. else if ((encodingLimitLower <= actualFPOffset) && (actualFPOffset <= encodingLimitUpper)) { *pBaseReg = REG_FPBASE; } // Otherwise, use SP-based encoding. This is either (1) a small positive offset using a single movw, // (2) a large offset using movw/movt. In either case, we must have already reserved // the "reserved register", which will get used during encoding. else { varOffset = spVarOffset; *pBaseReg = compLocallocUsed ? REG_SAVED_LOCALLOC_SP : REG_SPBASE; } } } else { *pBaseReg = REG_SPBASE; } #else *pFPbased = FPbased; #endif return varOffset; } inline bool Compiler::lvaIsParameter(unsigned varNum) { const LclVarDsc* varDsc = lvaGetDesc(varNum); return varDsc->lvIsParam; } inline bool Compiler::lvaIsRegArgument(unsigned varNum) { LclVarDsc* varDsc = lvaGetDesc(varNum); return varDsc->lvIsRegArg; } inline bool Compiler::lvaIsOriginalThisArg(unsigned varNum) { assert(varNum < lvaCount); bool isOriginalThisArg = (varNum == info.compThisArg) && (info.compIsStatic == false); #ifdef DEBUG if (isOriginalThisArg) { LclVarDsc* varDsc = lvaGetDesc(varNum); // Should never write to or take the address of the original 'this' arg CLANG_FORMAT_COMMENT_ANCHOR; #ifndef JIT32_GCENCODER // With the general encoder/decoder, when the original 'this' arg is needed as a generics context param, we // copy to a new local, and mark the original as DoNotEnregister, to // ensure that it is stack-allocated. It should not be the case that the original one can be modified -- it // should not be written to, or address-exposed. assert(!varDsc->lvHasILStoreOp && (!varDsc->IsAddressExposed() || ((info.compMethodInfo->options & CORINFO_GENERICS_CTXT_FROM_THIS) != 0))); #else assert(!varDsc->lvHasILStoreOp && !varDsc->IsAddressExposed()); #endif } #endif return isOriginalThisArg; } inline bool Compiler::lvaIsOriginalThisReadOnly() { return lvaArg0Var == info.compThisArg; } /***************************************************************************** * * The following is used to detect the cases where the same local variable# * is used both as a long/double value and a 32-bit value and/or both as an * integer/address and a float value. */ /* static */ inline unsigned Compiler::lvaTypeRefMask(var_types type) { const static BYTE lvaTypeRefMasks[] = { #define DEF_TP(tn, nm, jitType, verType, sz, sze, asze, st, al, tf, howUsed) howUsed, #include "typelist.h" #undef DEF_TP }; assert((unsigned)type < sizeof(lvaTypeRefMasks)); assert(lvaTypeRefMasks[type] != 0); return lvaTypeRefMasks[type]; } /***************************************************************************** * * The following is used to detect the cases where the same local variable# * is used both as a long/double value and a 32-bit value and/or both as an * integer/address and a float value. */ inline var_types Compiler::lvaGetActualType(unsigned lclNum) { return genActualType(lvaGetRealType(lclNum)); } inline var_types Compiler::lvaGetRealType(unsigned lclNum) { return lvaTable[lclNum].TypeGet(); } /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX Importer XX XX Inline functions XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ inline unsigned Compiler::compMapILargNum(unsigned ILargNum) { assert(ILargNum < info.compILargsCount); // Note that this works because if compRetBuffArg/compTypeCtxtArg/lvVarargsHandleArg are not present // they will be BAD_VAR_NUM (MAX_UINT), which is larger than any variable number. if (ILargNum >= info.compRetBuffArg) { ILargNum++; assert(ILargNum < info.compLocalsCount); // compLocals count already adjusted. } if (ILargNum >= (unsigned)info.compTypeCtxtArg) { ILargNum++; assert(ILargNum < info.compLocalsCount); // compLocals count already adjusted. } if (ILargNum >= (unsigned)lvaVarargsHandleArg) { ILargNum++; assert(ILargNum < info.compLocalsCount); // compLocals count already adjusted. } assert(ILargNum < info.compArgsCount); return (ILargNum); } //------------------------------------------------------------------------ // Compiler::mangleVarArgsType: Retype float types to their corresponding // : int/long types. // // Notes: // // The mangling of types will only occur for incoming vararg fixed arguments // on windows arm|64 or on armel (softFP). // // NO-OP for all other cases. // inline var_types Compiler::mangleVarArgsType(var_types type) { #if defined(TARGET_ARMARCH) if (opts.compUseSoftFP || (TargetOS::IsWindows && info.compIsVarArgs)) { switch (type) { case TYP_FLOAT: return TYP_INT; case TYP_DOUBLE: return TYP_LONG; default: break; } } #endif // defined(TARGET_ARMARCH) return type; } // For CORECLR there is no vararg on System V systems. inline regNumber Compiler::getCallArgIntRegister(regNumber floatReg) { assert(compFeatureVarArg()); #ifdef TARGET_AMD64 switch (floatReg) { case REG_XMM0: return REG_RCX; case REG_XMM1: return REG_RDX; case REG_XMM2: return REG_R8; case REG_XMM3: return REG_R9; default: unreached(); } #else // !TARGET_AMD64 // How will float args be passed for RyuJIT/x86? NYI("getCallArgIntRegister for RyuJIT/x86"); return REG_NA; #endif // !TARGET_AMD64 } inline regNumber Compiler::getCallArgFloatRegister(regNumber intReg) { assert(compFeatureVarArg()); #ifdef TARGET_AMD64 switch (intReg) { case REG_RCX: return REG_XMM0; case REG_RDX: return REG_XMM1; case REG_R8: return REG_XMM2; case REG_R9: return REG_XMM3; default: unreached(); } #else // !TARGET_AMD64 // How will float args be passed for RyuJIT/x86? NYI("getCallArgFloatRegister for RyuJIT/x86"); return REG_NA; #endif // !TARGET_AMD64 } /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX Register Allocator XX XX Inline functions XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ /*****************************************************************************/ inline bool rpCanAsgOperWithoutReg(GenTree* op, bool lclvar) { var_types type; switch (op->OperGet()) { case GT_CNS_LNG: case GT_CNS_INT: return true; case GT_LCL_VAR: type = genActualType(op->TypeGet()); if (lclvar && ((type == TYP_INT) || (type == TYP_REF) || (type == TYP_BYREF))) { return true; } break; default: break; } return false; } /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX FlowGraph XX XX Inline functions XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ inline bool Compiler::compCanEncodePtrArgCntMax() { #ifdef JIT32_GCENCODER // DDB 204533: // The GC encoding for fully interruptible methods does not // support more than 1023 pushed arguments, so we have to // use a partially interruptible GC info/encoding. // return (fgPtrArgCntMax < MAX_PTRARG_OFS); #else // JIT32_GCENCODER return true; #endif } /***************************************************************************** * * Call the given function pointer for all nodes in the tree. The 'visitor' * fn should return one of the following values: * * WALK_ABORT stop walking and return immediately * WALK_CONTINUE continue walking * WALK_SKIP_SUBTREES don't walk any subtrees of the node just visited * * computeStack - true if we want to make stack visible to callback function */ inline Compiler::fgWalkResult Compiler::fgWalkTreePre( GenTree** pTree, fgWalkPreFn* visitor, void* callBackData, bool lclVarsOnly, bool computeStack) { fgWalkData walkData; walkData.compiler = this; walkData.wtprVisitorFn = visitor; walkData.pCallbackData = callBackData; walkData.parent = nullptr; walkData.wtprLclsOnly = lclVarsOnly; #ifdef DEBUG walkData.printModified = false; #endif fgWalkResult result; if (lclVarsOnly && computeStack) { GenericTreeWalker<true, true, false, true, true> walker(&walkData); result = walker.WalkTree(pTree, nullptr); } else if (lclVarsOnly) { GenericTreeWalker<false, true, false, true, true> walker(&walkData); result = walker.WalkTree(pTree, nullptr); } else if (computeStack) { GenericTreeWalker<true, true, false, false, true> walker(&walkData); result = walker.WalkTree(pTree, nullptr); } else { GenericTreeWalker<false, true, false, false, true> walker(&walkData); result = walker.WalkTree(pTree, nullptr); } #ifdef DEBUG if (verbose && walkData.printModified) { gtDispTree(*pTree); } #endif return result; } /***************************************************************************** * * Same as above, except the tree walk is performed in a depth-first fashion, * The 'visitor' fn should return one of the following values: * * WALK_ABORT stop walking and return immediately * WALK_CONTINUE continue walking * * computeStack - true if we want to make stack visible to callback function */ inline Compiler::fgWalkResult Compiler::fgWalkTreePost(GenTree** pTree, fgWalkPostFn* visitor, void* callBackData, bool computeStack) { fgWalkData walkData; walkData.compiler = this; walkData.wtpoVisitorFn = visitor; walkData.pCallbackData = callBackData; walkData.parent = nullptr; fgWalkResult result; if (computeStack) { GenericTreeWalker<true, false, true, false, true> walker(&walkData); result = walker.WalkTree(pTree, nullptr); } else { GenericTreeWalker<false, false, true, false, true> walker(&walkData); result = walker.WalkTree(pTree, nullptr); } assert(result == WALK_CONTINUE || result == WALK_ABORT); return result; } /***************************************************************************** * * Call the given function pointer for all nodes in the tree. The 'visitor' * fn should return one of the following values: * * WALK_ABORT stop walking and return immediately * WALK_CONTINUE continue walking * WALK_SKIP_SUBTREES don't walk any subtrees of the node just visited */ inline Compiler::fgWalkResult Compiler::fgWalkTree(GenTree** pTree, fgWalkPreFn* preVisitor, fgWalkPreFn* postVisitor, void* callBackData) { fgWalkData walkData; walkData.compiler = this; walkData.wtprVisitorFn = preVisitor; walkData.wtpoVisitorFn = postVisitor; walkData.pCallbackData = callBackData; walkData.parent = nullptr; walkData.wtprLclsOnly = false; #ifdef DEBUG walkData.printModified = false; #endif fgWalkResult result; assert(preVisitor || postVisitor); if (preVisitor && postVisitor) { GenericTreeWalker<true, true, true, false, true> walker(&walkData); result = walker.WalkTree(pTree, nullptr); } else if (preVisitor) { GenericTreeWalker<true, true, false, false, true> walker(&walkData); result = walker.WalkTree(pTree, nullptr); } else { GenericTreeWalker<true, false, true, false, true> walker(&walkData); result = walker.WalkTree(pTree, nullptr); } #ifdef DEBUG if (verbose && walkData.printModified) { gtDispTree(*pTree); } #endif return result; } /***************************************************************************** * * Has this block been added to throw an inlined exception * Returns true if the block was added to throw one of: * range-check exception * argument exception (used by feature SIMD) * argument range-check exception (used by feature SIMD) * divide by zero exception (Not used on X86/X64) * overflow exception */ inline bool Compiler::fgIsThrowHlpBlk(BasicBlock* block) { if (!fgIsCodeAdded()) { return false; } if (!(block->bbFlags & BBF_INTERNAL) || block->bbJumpKind != BBJ_THROW) { return false; } if (!block->IsLIR() && (block->lastStmt() == nullptr)) { return false; } // Special check blocks will always end in a throw helper call. // GenTree* const call = block->lastNode(); if ((call == nullptr) || (call->gtOper != GT_CALL)) { return false; } if (!((call->AsCall()->gtCallMethHnd == eeFindHelper(CORINFO_HELP_RNGCHKFAIL)) || (call->AsCall()->gtCallMethHnd == eeFindHelper(CORINFO_HELP_THROWDIVZERO)) || (call->AsCall()->gtCallMethHnd == eeFindHelper(CORINFO_HELP_THROW_ARGUMENTEXCEPTION)) || (call->AsCall()->gtCallMethHnd == eeFindHelper(CORINFO_HELP_THROW_ARGUMENTOUTOFRANGEEXCEPTION)) || (call->AsCall()->gtCallMethHnd == eeFindHelper(CORINFO_HELP_OVERFLOW)))) { return false; } // We can get to this point for blocks that we didn't create as throw helper blocks // under stress, with implausible flow graph optimizations. So, walk the fgAddCodeList // for the final determination. for (AddCodeDsc* add = fgAddCodeList; add; add = add->acdNext) { if (block == add->acdDstBlk) { return add->acdKind == SCK_RNGCHK_FAIL || add->acdKind == SCK_DIV_BY_ZERO || add->acdKind == SCK_OVERFLOW || add->acdKind == SCK_ARG_EXCPN || add->acdKind == SCK_ARG_RNG_EXCPN; } } // We couldn't find it in the fgAddCodeList return false; } #if !FEATURE_FIXED_OUT_ARGS /***************************************************************************** * * Return the stackLevel of the inserted block that throws exception * (by calling the EE helper). */ inline unsigned Compiler::fgThrowHlpBlkStkLevel(BasicBlock* block) { for (AddCodeDsc* add = fgAddCodeList; add; add = add->acdNext) { if (block == add->acdDstBlk) { // Compute assert cond separately as assert macro cannot have conditional compilation directives. bool cond = (add->acdKind == SCK_RNGCHK_FAIL || add->acdKind == SCK_DIV_BY_ZERO || add->acdKind == SCK_OVERFLOW || add->acdKind == SCK_ARG_EXCPN || add->acdKind == SCK_ARG_RNG_EXCPN); assert(cond); // TODO: bbTgtStkDepth is DEBUG-only. // Should we use it regularly and avoid this search. assert(block->bbTgtStkDepth == add->acdStkLvl); return add->acdStkLvl; } } noway_assert(!"fgThrowHlpBlkStkLevel should only be called if fgIsThrowHlpBlk() is true, but we can't find the " "block in the fgAddCodeList list"); /* We couldn't find the basic block: it must not have been a throw helper block */ return 0; } #endif // !FEATURE_FIXED_OUT_ARGS /* Small inline function to change a given block to a throw block. */ inline void Compiler::fgConvertBBToThrowBB(BasicBlock* block) { JITDUMP("Converting " FMT_BB " to BBJ_THROW\n", block->bbNum); // Ordering of the following operations matters. // First, note if we are looking at the first block of a call always pair. const bool isCallAlwaysPair = block->isBBCallAlwaysPair(); // Scrub this block from the pred lists of any successors fgRemoveBlockAsPred(block); // Update jump kind after the scrub. block->bbJumpKind = BBJ_THROW; // Any block with a throw is rare block->bbSetRunRarely(); // If we've converted a BBJ_CALLFINALLY block to a BBJ_THROW block, // then mark the subsequent BBJ_ALWAYS block as unreferenced. // // Must do this after we update bbJumpKind of block. if (isCallAlwaysPair) { BasicBlock* leaveBlk = block->bbNext; noway_assert(leaveBlk->bbJumpKind == BBJ_ALWAYS); // leaveBlk is now unreachable, so scrub the pred lists. leaveBlk->bbFlags &= ~BBF_DONT_REMOVE; leaveBlk->bbRefs = 0; leaveBlk->bbPreds = nullptr; #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) // This function (fgConvertBBToThrowBB) can be called before the predecessor lists are created (e.g., in // fgMorph). The fgClearFinallyTargetBit() function to update the BBF_FINALLY_TARGET bit depends on these // predecessor lists. If there are no predecessor lists, we immediately clear all BBF_FINALLY_TARGET bits // (to allow subsequent dead code elimination to delete such blocks without asserts), and set a flag to // recompute them later, before they are required. if (fgComputePredsDone) { fgClearFinallyTargetBit(leaveBlk->bbJumpDest); } else { fgClearAllFinallyTargetBits(); fgNeedToAddFinallyTargetBits = true; } #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) } } /***************************************************************************** * * Return true if we've added any new basic blocks. */ inline bool Compiler::fgIsCodeAdded() { return fgAddCodeModf; } /***************************************************************************** Is the offset too big? */ inline bool Compiler::fgIsBigOffset(size_t offset) { return (offset > compMaxUncheckedOffsetForNullObject); } /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX TempsInfo XX XX Inline functions XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ /*****************************************************************************/ /* static */ inline unsigned RegSet::tmpSlot(unsigned size) { noway_assert(size >= sizeof(int)); noway_assert(size <= TEMP_MAX_SIZE); assert((size % sizeof(int)) == 0); assert(size < UINT32_MAX); return size / sizeof(int) - 1; } /***************************************************************************** * * Finish allocating temps - should be called each time after a pass is made * over a function body. */ inline void RegSet::tmpEnd() { #ifdef DEBUG if (m_rsCompiler->verbose && (tmpCount > 0)) { printf("%d tmps used\n", tmpCount); } #endif // DEBUG } /***************************************************************************** * * Shuts down the temp-tracking code. Should be called once per function * compiled. */ inline void RegSet::tmpDone() { #ifdef DEBUG unsigned count; TempDsc* temp; assert(tmpAllFree()); for (temp = tmpListBeg(), count = temp ? 1 : 0; temp; temp = tmpListNxt(temp), count += temp ? 1 : 0) { assert(temp->tdLegalOffset()); } // Make sure that all the temps were released assert(count == tmpCount); assert(tmpGetCount == 0); #endif // DEBUG } #ifdef DEBUG inline bool Compiler::shouldUseVerboseTrees() { return (JitConfig.JitDumpVerboseTrees() == 1); } inline bool Compiler::shouldUseVerboseSsa() { return (JitConfig.JitDumpVerboseSsa() == 1); } //------------------------------------------------------------------------ // shouldDumpASCIITrees: Should we use only ASCII characters for tree dumps? // // Notes: // This is set to default to 1 in clrConfigValues.h inline bool Compiler::shouldDumpASCIITrees() { return (JitConfig.JitDumpASCII() == 1); } /***************************************************************************** * Should we enable JitStress mode? * 0: No stress * !=2: Vary stress. Performance will be slightly/moderately degraded * 2: Check-all stress. Performance will be REALLY horrible */ inline int getJitStressLevel() { return JitConfig.JitStress(); } #endif // DEBUG /*****************************************************************************/ /* Map a register argument number ("RegArgNum") to a register number ("RegNum"). * A RegArgNum is in this range: * [0, MAX_REG_ARG) -- for integer registers * [0, MAX_FLOAT_REG_ARG) -- for floating point registers * Note that RegArgNum's are overlapping for integer and floating-point registers, * while RegNum's are not (for ARM anyway, though for x86, it might be different). * If we have a fixed return buffer register and are given it's index * we return the fixed return buffer register */ inline regNumber genMapIntRegArgNumToRegNum(unsigned argNum) { if (hasFixedRetBuffReg() && (argNum == theFixedRetBuffArgNum())) { return theFixedRetBuffReg(); } assert(argNum < ArrLen(intArgRegs)); return intArgRegs[argNum]; } inline regNumber genMapFloatRegArgNumToRegNum(unsigned argNum) { #ifndef TARGET_X86 assert(argNum < ArrLen(fltArgRegs)); return fltArgRegs[argNum]; #else assert(!"no x86 float arg regs\n"); return REG_NA; #endif } __forceinline regNumber genMapRegArgNumToRegNum(unsigned argNum, var_types type) { if (varTypeUsesFloatArgReg(type)) { return genMapFloatRegArgNumToRegNum(argNum); } else { return genMapIntRegArgNumToRegNum(argNum); } } /*****************************************************************************/ /* Map a register argument number ("RegArgNum") to a register mask of the associated register. * Note that for floating-pointer registers, only the low register for a register pair * (for a double on ARM) is returned. */ inline regMaskTP genMapIntRegArgNumToRegMask(unsigned argNum) { assert(argNum < ArrLen(intArgMasks)); return intArgMasks[argNum]; } inline regMaskTP genMapFloatRegArgNumToRegMask(unsigned argNum) { #ifndef TARGET_X86 assert(argNum < ArrLen(fltArgMasks)); return fltArgMasks[argNum]; #else assert(!"no x86 float arg regs\n"); return RBM_NONE; #endif } __forceinline regMaskTP genMapArgNumToRegMask(unsigned argNum, var_types type) { regMaskTP result; if (varTypeUsesFloatArgReg(type)) { result = genMapFloatRegArgNumToRegMask(argNum); #ifdef TARGET_ARM if (type == TYP_DOUBLE) { assert((result & RBM_DBL_REGS) != 0); result |= (result << 1); } #endif } else { result = genMapIntRegArgNumToRegMask(argNum); } return result; } /*****************************************************************************/ /* Map a register number ("RegNum") to a register argument number ("RegArgNum") * If we have a fixed return buffer register we return theFixedRetBuffArgNum */ inline unsigned genMapIntRegNumToRegArgNum(regNumber regNum) { assert(genRegMask(regNum) & fullIntArgRegMask()); switch (regNum) { case REG_ARG_0: return 0; #if MAX_REG_ARG >= 2 case REG_ARG_1: return 1; #if MAX_REG_ARG >= 3 case REG_ARG_2: return 2; #if MAX_REG_ARG >= 4 case REG_ARG_3: return 3; #if MAX_REG_ARG >= 5 case REG_ARG_4: return 4; #if MAX_REG_ARG >= 6 case REG_ARG_5: return 5; #if MAX_REG_ARG >= 7 case REG_ARG_6: return 6; #if MAX_REG_ARG >= 8 case REG_ARG_7: return 7; #endif #endif #endif #endif #endif #endif #endif default: // Check for the Arm64 fixed return buffer argument register if (hasFixedRetBuffReg() && (regNum == theFixedRetBuffReg())) { return theFixedRetBuffArgNum(); } else { assert(!"invalid register arg register"); return BAD_VAR_NUM; } } } inline unsigned genMapFloatRegNumToRegArgNum(regNumber regNum) { assert(genRegMask(regNum) & RBM_FLTARG_REGS); #ifdef TARGET_ARM return regNum - REG_F0; #elif defined(TARGET_ARM64) return regNum - REG_V0; #elif defined(UNIX_AMD64_ABI) return regNum - REG_FLTARG_0; #else #if MAX_FLOAT_REG_ARG >= 1 switch (regNum) { case REG_FLTARG_0: return 0; #if MAX_REG_ARG >= 2 case REG_FLTARG_1: return 1; #if MAX_REG_ARG >= 3 case REG_FLTARG_2: return 2; #if MAX_REG_ARG >= 4 case REG_FLTARG_3: return 3; #if MAX_REG_ARG >= 5 case REG_FLTARG_4: return 4; #endif #endif #endif #endif default: assert(!"invalid register arg register"); return BAD_VAR_NUM; } #else assert(!"flt reg args not allowed"); return BAD_VAR_NUM; #endif #endif // !arm } inline unsigned genMapRegNumToRegArgNum(regNumber regNum, var_types type) { if (varTypeUsesFloatArgReg(type)) { return genMapFloatRegNumToRegArgNum(regNum); } else { return genMapIntRegNumToRegArgNum(regNum); } } /*****************************************************************************/ /* Return a register mask with the first 'numRegs' argument registers set. */ inline regMaskTP genIntAllRegArgMask(unsigned numRegs) { assert(numRegs <= MAX_REG_ARG); regMaskTP result = RBM_NONE; for (unsigned i = 0; i < numRegs; i++) { result |= intArgMasks[i]; } return result; } inline regMaskTP genFltAllRegArgMask(unsigned numRegs) { #ifndef TARGET_X86 assert(numRegs <= MAX_FLOAT_REG_ARG); regMaskTP result = RBM_NONE; for (unsigned i = 0; i < numRegs; i++) { result |= fltArgMasks[i]; } return result; #else assert(!"no x86 float arg regs\n"); return RBM_NONE; #endif } /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX Liveness XX XX Inline functions XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ //------------------------------------------------------------------------ // compUpdateLife: Update the GC's masks, register's masks and reports change on variable's homes given a set of // current live variables if changes have happened since "compCurLife". // // Arguments: // newLife - the set of variables that are alive. // // Assumptions: // The set of live variables reflects the result of only emitted code, it should not be considering the becoming // live/dead of instructions that has not been emitted yet. This is requires by "compChangeLife". template <bool ForCodeGen> inline void Compiler::compUpdateLife(VARSET_VALARG_TP newLife) { if (!VarSetOps::Equal(this, compCurLife, newLife)) { compChangeLife<ForCodeGen>(newLife); } #ifdef DEBUG else { if (verbose) { printf("Liveness not changing: %s ", VarSetOps::ToString(this, compCurLife)); dumpConvertedVarSet(this, compCurLife); printf("\n"); } } #endif // DEBUG } /***************************************************************************** * * We stash cookies in basic blocks for the code emitter; this call retrieves * the cookie associated with the given basic block. */ inline void* emitCodeGetCookie(BasicBlock* block) { assert(block); return block->bbEmitCookie; } /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX Optimizer XX XX Inline functions XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ /***************************************************************************** * * The following resets the value assignment table * used only during local assertion prop */ inline void Compiler::optAssertionReset(AssertionIndex limit) { PREFAST_ASSUME(optAssertionCount <= optMaxAssertionCount); while (optAssertionCount > limit) { AssertionIndex index = optAssertionCount; AssertionDsc* curAssertion = optGetAssertion(index); optAssertionCount--; unsigned lclNum = curAssertion->op1.lcl.lclNum; assert(lclNum < lvaCount); BitVecOps::RemoveElemD(apTraits, GetAssertionDep(lclNum), index - 1); // // Find the Copy assertions // if ((curAssertion->assertionKind == OAK_EQUAL) && (curAssertion->op1.kind == O1K_LCLVAR) && (curAssertion->op2.kind == O2K_LCLVAR_COPY)) { // // op2.lcl.lclNum no longer depends upon this assertion // lclNum = curAssertion->op2.lcl.lclNum; BitVecOps::RemoveElemD(apTraits, GetAssertionDep(lclNum), index - 1); } } while (optAssertionCount < limit) { AssertionIndex index = ++optAssertionCount; AssertionDsc* curAssertion = optGetAssertion(index); unsigned lclNum = curAssertion->op1.lcl.lclNum; BitVecOps::AddElemD(apTraits, GetAssertionDep(lclNum), index - 1); // // Check for Copy assertions // if ((curAssertion->assertionKind == OAK_EQUAL) && (curAssertion->op1.kind == O1K_LCLVAR) && (curAssertion->op2.kind == O2K_LCLVAR_COPY)) { // // op2.lcl.lclNum now depends upon this assertion // lclNum = curAssertion->op2.lcl.lclNum; BitVecOps::AddElemD(apTraits, GetAssertionDep(lclNum), index - 1); } } } /***************************************************************************** * * The following removes the i-th entry in the value assignment table * used only during local assertion prop */ inline void Compiler::optAssertionRemove(AssertionIndex index) { assert(index > 0); assert(index <= optAssertionCount); PREFAST_ASSUME(optAssertionCount <= optMaxAssertionCount); AssertionDsc* curAssertion = optGetAssertion(index); // Two cases to consider if (index == optAssertionCount) then the last // entry in the table is to be removed and that happens automatically when // optAssertionCount is decremented and we can just clear the optAssertionDep bits // The other case is when index < optAssertionCount and here we overwrite the // index-th entry in the table with the data found at the end of the table // Since we are reordering the rable the optAssertionDep bits need to be recreated // using optAssertionReset(0) and optAssertionReset(newAssertionCount) will // correctly update the optAssertionDep bits // if (index == optAssertionCount) { unsigned lclNum = curAssertion->op1.lcl.lclNum; BitVecOps::RemoveElemD(apTraits, GetAssertionDep(lclNum), index - 1); // // Check for Copy assertions // if ((curAssertion->assertionKind == OAK_EQUAL) && (curAssertion->op1.kind == O1K_LCLVAR) && (curAssertion->op2.kind == O2K_LCLVAR_COPY)) { // // op2.lcl.lclNum no longer depends upon this assertion // lclNum = curAssertion->op2.lcl.lclNum; BitVecOps::RemoveElemD(apTraits, GetAssertionDep(lclNum), index - 1); } optAssertionCount--; } else { AssertionDsc* lastAssertion = optGetAssertion(optAssertionCount); AssertionIndex newAssertionCount = optAssertionCount - 1; optAssertionReset(0); // This make optAssertionCount equal 0 memcpy(curAssertion, // the entry to be removed lastAssertion, // last entry in the table sizeof(AssertionDsc)); optAssertionReset(newAssertionCount); } } inline void Compiler::LoopDsc::AddModifiedField(Compiler* comp, CORINFO_FIELD_HANDLE fldHnd, FieldKindForVN fieldKind) { if (lpFieldsModified == nullptr) { lpFieldsModified = new (comp->getAllocatorLoopHoist()) Compiler::LoopDsc::FieldHandleSet(comp->getAllocatorLoopHoist()); } lpFieldsModified->Set(fldHnd, fieldKind, FieldHandleSet::Overwrite); } inline void Compiler::LoopDsc::AddModifiedElemType(Compiler* comp, CORINFO_CLASS_HANDLE structHnd) { if (lpArrayElemTypesModified == nullptr) { lpArrayElemTypesModified = new (comp->getAllocatorLoopHoist()) Compiler::LoopDsc::ClassHandleSet(comp->getAllocatorLoopHoist()); } lpArrayElemTypesModified->Set(structHnd, true, ClassHandleSet::Overwrite); } inline void Compiler::LoopDsc::VERIFY_lpIterTree() const { #ifdef DEBUG assert(lpFlags & LPFLG_ITER); // iterTree should be "lcl ASG lcl <op> const" assert(lpIterTree->OperIs(GT_ASG)); const GenTree* lhs = lpIterTree->AsOp()->gtOp1; const GenTree* rhs = lpIterTree->AsOp()->gtOp2; assert(lhs->OperGet() == GT_LCL_VAR); switch (rhs->gtOper) { case GT_ADD: case GT_SUB: case GT_MUL: case GT_RSH: case GT_LSH: break; default: assert(!"Unknown operator for loop increment"); } assert(rhs->AsOp()->gtOp1->OperGet() == GT_LCL_VAR); assert(rhs->AsOp()->gtOp1->AsLclVarCommon()->GetLclNum() == lhs->AsLclVarCommon()->GetLclNum()); assert(rhs->AsOp()->gtOp2->OperGet() == GT_CNS_INT); #endif } //----------------------------------------------------------------------------- inline unsigned Compiler::LoopDsc::lpIterVar() const { VERIFY_lpIterTree(); return lpIterTree->AsOp()->gtOp1->AsLclVarCommon()->GetLclNum(); } //----------------------------------------------------------------------------- inline int Compiler::LoopDsc::lpIterConst() const { VERIFY_lpIterTree(); GenTree* rhs = lpIterTree->AsOp()->gtOp2; return (int)rhs->AsOp()->gtOp2->AsIntCon()->gtIconVal; } //----------------------------------------------------------------------------- inline genTreeOps Compiler::LoopDsc::lpIterOper() const { VERIFY_lpIterTree(); GenTree* rhs = lpIterTree->AsOp()->gtOp2; return rhs->OperGet(); } inline var_types Compiler::LoopDsc::lpIterOperType() const { VERIFY_lpIterTree(); var_types type = lpIterTree->TypeGet(); assert(genActualType(type) == TYP_INT); if ((lpIterTree->gtFlags & GTF_UNSIGNED) && type == TYP_INT) { type = TYP_UINT; } return type; } inline void Compiler::LoopDsc::VERIFY_lpTestTree() const { #ifdef DEBUG assert(lpFlags & LPFLG_ITER); assert(lpTestTree); genTreeOps oper = lpTestTree->OperGet(); assert(GenTree::OperIsCompare(oper)); GenTree* iterator = nullptr; GenTree* limit = nullptr; if ((lpTestTree->AsOp()->gtOp2->gtOper == GT_LCL_VAR) && (lpTestTree->AsOp()->gtOp2->gtFlags & GTF_VAR_ITERATOR) != 0) { iterator = lpTestTree->AsOp()->gtOp2; limit = lpTestTree->AsOp()->gtOp1; } else if ((lpTestTree->AsOp()->gtOp1->gtOper == GT_LCL_VAR) && (lpTestTree->AsOp()->gtOp1->gtFlags & GTF_VAR_ITERATOR) != 0) { iterator = lpTestTree->AsOp()->gtOp1; limit = lpTestTree->AsOp()->gtOp2; } else { // one of the nodes has to be the iterator assert(false); } if (lpFlags & LPFLG_CONST_LIMIT) { assert(limit->OperIsConst()); } if (lpFlags & LPFLG_VAR_LIMIT) { assert(limit->OperGet() == GT_LCL_VAR); } if (lpFlags & LPFLG_ARRLEN_LIMIT) { assert(limit->OperGet() == GT_ARR_LENGTH); } #endif } //----------------------------------------------------------------------------- inline bool Compiler::LoopDsc::lpIsReversed() const { VERIFY_lpTestTree(); return ((lpTestTree->AsOp()->gtOp2->gtOper == GT_LCL_VAR) && (lpTestTree->AsOp()->gtOp2->gtFlags & GTF_VAR_ITERATOR) != 0); } //----------------------------------------------------------------------------- inline genTreeOps Compiler::LoopDsc::lpTestOper() const { VERIFY_lpTestTree(); genTreeOps op = lpTestTree->OperGet(); return lpIsReversed() ? GenTree::SwapRelop(op) : op; } //----------------------------------------------------------------------------- inline GenTree* Compiler::LoopDsc::lpIterator() const { VERIFY_lpTestTree(); return lpIsReversed() ? lpTestTree->AsOp()->gtOp2 : lpTestTree->AsOp()->gtOp1; } //----------------------------------------------------------------------------- inline GenTree* Compiler::LoopDsc::lpLimit() const { VERIFY_lpTestTree(); return lpIsReversed() ? lpTestTree->AsOp()->gtOp1 : lpTestTree->AsOp()->gtOp2; } //----------------------------------------------------------------------------- inline int Compiler::LoopDsc::lpConstLimit() const { VERIFY_lpTestTree(); assert(lpFlags & LPFLG_CONST_LIMIT); GenTree* limit = lpLimit(); assert(limit->OperIsConst()); return (int)limit->AsIntCon()->gtIconVal; } //----------------------------------------------------------------------------- inline unsigned Compiler::LoopDsc::lpVarLimit() const { VERIFY_lpTestTree(); assert(lpFlags & LPFLG_VAR_LIMIT); GenTree* limit = lpLimit(); assert(limit->OperGet() == GT_LCL_VAR); return limit->AsLclVarCommon()->GetLclNum(); } //----------------------------------------------------------------------------- inline bool Compiler::LoopDsc::lpArrLenLimit(Compiler* comp, ArrIndex* index) const { VERIFY_lpTestTree(); assert(lpFlags & LPFLG_ARRLEN_LIMIT); GenTree* limit = lpLimit(); assert(limit->OperGet() == GT_ARR_LENGTH); // Check if we have a.length or a[i][j].length if (limit->AsArrLen()->ArrRef()->gtOper == GT_LCL_VAR) { index->arrLcl = limit->AsArrLen()->ArrRef()->AsLclVarCommon()->GetLclNum(); index->rank = 0; return true; } // We have a[i].length, extract a[i] pattern. else if (limit->AsArrLen()->ArrRef()->gtOper == GT_COMMA) { return comp->optReconstructArrIndex(limit->AsArrLen()->ArrRef(), index, BAD_VAR_NUM); } return false; } /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX EEInterface XX XX Inline functions XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ extern var_types JITtype2varType(CorInfoType type); #include "ee_il_dll.hpp" inline CORINFO_METHOD_HANDLE Compiler::eeFindHelper(unsigned helper) { assert(helper < CORINFO_HELP_COUNT); /* Helpers are marked by the fact that they are odd numbers * force this to be an odd number (will shift it back to extract) */ return ((CORINFO_METHOD_HANDLE)((((size_t)helper) << 2) + 1)); } inline CorInfoHelpFunc Compiler::eeGetHelperNum(CORINFO_METHOD_HANDLE method) { // Helpers are marked by the fact that they are odd numbers if (!(((size_t)method) & 1)) { return (CORINFO_HELP_UNDEF); } return ((CorInfoHelpFunc)(((size_t)method) >> 2)); } // TODO-Cleanup: Replace calls to IsSharedStaticHelper with new HelperCallProperties // inline bool Compiler::IsSharedStaticHelper(GenTree* tree) { if (tree->gtOper != GT_CALL || tree->AsCall()->gtCallType != CT_HELPER) { return false; } CorInfoHelpFunc helper = eeGetHelperNum(tree->AsCall()->gtCallMethHnd); bool result1 = // More helpers being added to IsSharedStaticHelper (that have similar behaviors but are not true // ShareStaticHelperts) helper == CORINFO_HELP_STRCNS || helper == CORINFO_HELP_BOX || // helpers being added to IsSharedStaticHelper helper == CORINFO_HELP_GETSTATICFIELDADDR_TLS || helper == CORINFO_HELP_GETGENERICS_GCSTATIC_BASE || helper == CORINFO_HELP_GETGENERICS_NONGCSTATIC_BASE || helper == CORINFO_HELP_GETGENERICS_GCTHREADSTATIC_BASE || helper == CORINFO_HELP_GETGENERICS_NONGCTHREADSTATIC_BASE || helper == CORINFO_HELP_GETSHARED_GCSTATIC_BASE || helper == CORINFO_HELP_GETSHARED_NONGCSTATIC_BASE || helper == CORINFO_HELP_GETSHARED_GCSTATIC_BASE_NOCTOR || helper == CORINFO_HELP_GETSHARED_NONGCSTATIC_BASE_NOCTOR || helper == CORINFO_HELP_GETSHARED_GCSTATIC_BASE_DYNAMICCLASS || helper == CORINFO_HELP_GETSHARED_NONGCSTATIC_BASE_DYNAMICCLASS || helper == CORINFO_HELP_GETSHARED_GCTHREADSTATIC_BASE || helper == CORINFO_HELP_GETSHARED_NONGCTHREADSTATIC_BASE || helper == CORINFO_HELP_GETSHARED_GCTHREADSTATIC_BASE_NOCTOR || helper == CORINFO_HELP_GETSHARED_NONGCTHREADSTATIC_BASE_NOCTOR || helper == CORINFO_HELP_GETSHARED_GCTHREADSTATIC_BASE_DYNAMICCLASS || helper == CORINFO_HELP_GETSHARED_NONGCTHREADSTATIC_BASE_DYNAMICCLASS || #ifdef FEATURE_READYTORUN helper == CORINFO_HELP_READYTORUN_STATIC_BASE || helper == CORINFO_HELP_READYTORUN_GENERIC_STATIC_BASE || #endif helper == CORINFO_HELP_CLASSINIT_SHARED_DYNAMICCLASS; #if 0 // See above TODO-Cleanup bool result2 = s_helperCallProperties.IsPure(helper) && s_helperCallProperties.NonNullReturn(helper); assert (result1 == result2); #endif return result1; } inline bool Compiler::IsGcSafePoint(GenTreeCall* call) { if (!call->IsFastTailCall()) { if (call->IsUnmanaged() && call->IsSuppressGCTransition()) { // Both an indirect and user calls can be unmanaged // and have a request to suppress the GC transition so // the check is done prior to the separate handling of // indirect and user calls. return false; } else if (call->gtCallType == CT_INDIRECT) { return true; } else if (call->gtCallType == CT_USER_FUNC) { if ((call->gtCallMoreFlags & GTF_CALL_M_NOGCCHECK) == 0) { return true; } } // otherwise we have a CT_HELPER } return false; } // // Note that we want to have two special FIELD_HANDLES that will both // be considered non-Data Offset handles // // The special values that we use are FLD_GLOBAL_DS and FLD_GLOBAL_FS // inline bool jitStaticFldIsGlobAddr(CORINFO_FIELD_HANDLE fldHnd) { return (fldHnd == FLD_GLOBAL_DS || fldHnd == FLD_GLOBAL_FS); } #if defined(DEBUG) || defined(FEATURE_JIT_METHOD_PERF) || defined(FEATURE_SIMD) || defined(FEATURE_TRACELOGGING) inline bool Compiler::eeIsNativeMethod(CORINFO_METHOD_HANDLE method) { return ((((size_t)method) & 0x2) == 0x2); } inline CORINFO_METHOD_HANDLE Compiler::eeGetMethodHandleForNative(CORINFO_METHOD_HANDLE method) { assert((((size_t)method) & 0x3) == 0x2); return (CORINFO_METHOD_HANDLE)(((size_t)method) & ~0x3); } #endif inline CORINFO_METHOD_HANDLE Compiler::eeMarkNativeTarget(CORINFO_METHOD_HANDLE method) { assert((((size_t)method) & 0x3) == 0); if (method == nullptr) { return method; } else { return (CORINFO_METHOD_HANDLE)(((size_t)method) | 0x2); } } /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX Compiler XX XX Inline functions XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ #ifndef DEBUG inline bool Compiler::compStressCompile(compStressArea stressArea, unsigned weightPercentage) { return false; } #endif inline ArenaAllocator* Compiler::compGetArenaAllocator() { return compArenaAllocator; } inline bool Compiler::compIsProfilerHookNeeded() { #ifdef PROFILING_SUPPORTED return compProfilerHookNeeded // IL stubs are excluded by VM and we need to do the same even running // under a complus env hook to generate profiler hooks || (opts.compJitELTHookEnabled && !opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB)); #else // !PROFILING_SUPPORTED return false; #endif // !PROFILING_SUPPORTED } /***************************************************************************** * * Check for the special case where the object is the methods original 'this' pointer. * Note that, the original 'this' pointer is always local var 0 for non-static method, * even if we might have created the copy of 'this' pointer in lvaArg0Var. */ inline bool Compiler::impIsThis(GenTree* obj) { if (compIsForInlining()) { return impInlineInfo->InlinerCompiler->impIsThis(obj); } else { return ((obj != nullptr) && (obj->gtOper == GT_LCL_VAR) && lvaIsOriginalThisArg(obj->AsLclVarCommon()->GetLclNum())); } } /***************************************************************************** * * Check to see if the delegate is created using "LDFTN <TOK>" or not. */ inline bool Compiler::impIsLDFTN_TOKEN(const BYTE* delegateCreateStart, const BYTE* newobjCodeAddr) { assert(newobjCodeAddr[0] == CEE_NEWOBJ); return (newobjCodeAddr - delegateCreateStart == 6 && // LDFTN <TOK> takes 6 bytes delegateCreateStart[0] == CEE_PREFIX1 && delegateCreateStart[1] == (CEE_LDFTN & 0xFF)); } /***************************************************************************** * * Check to see if the delegate is created using "DUP LDVIRTFTN <TOK>" or not. */ inline bool Compiler::impIsDUP_LDVIRTFTN_TOKEN(const BYTE* delegateCreateStart, const BYTE* newobjCodeAddr) { assert(newobjCodeAddr[0] == CEE_NEWOBJ); return (newobjCodeAddr - delegateCreateStart == 7 && // DUP LDVIRTFTN <TOK> takes 6 bytes delegateCreateStart[0] == CEE_DUP && delegateCreateStart[1] == CEE_PREFIX1 && delegateCreateStart[2] == (CEE_LDVIRTFTN & 0xFF)); } /***************************************************************************** * * Returns true if the compiler instance is created for import only (verification). */ inline bool Compiler::compIsForImportOnly() { return opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IMPORT_ONLY); } /***************************************************************************** * * Returns true if the compiler instance is created for inlining. */ inline bool Compiler::compIsForInlining() const { return (impInlineInfo != nullptr); } /***************************************************************************** * * Check the inline result field in the compiler to see if inlining failed or not. */ inline bool Compiler::compDonotInline() { if (compIsForInlining()) { assert(compInlineResult != nullptr); return compInlineResult->IsFailure(); } else { return false; } } inline bool Compiler::impIsPrimitive(CorInfoType jitType) { return ((CORINFO_TYPE_BOOL <= jitType && jitType <= CORINFO_TYPE_DOUBLE) || jitType == CORINFO_TYPE_PTR); } /***************************************************************************** * * Get the promotion type of a struct local. */ inline Compiler::lvaPromotionType Compiler::lvaGetPromotionType(const LclVarDsc* varDsc) { assert(!varDsc->lvPromoted || varTypeIsPromotable(varDsc) || varDsc->lvUnusedStruct); if (!varDsc->lvPromoted) { // no struct promotion for this LclVar return PROMOTION_TYPE_NONE; } if (varDsc->lvDoNotEnregister) { // The struct is not enregistered return PROMOTION_TYPE_DEPENDENT; } if (!varDsc->lvIsParam) { // The struct is a register candidate return PROMOTION_TYPE_INDEPENDENT; } // Has struct promotion for arguments been disabled using COMPlus_JitNoStructPromotion=2 if (fgNoStructParamPromotion) { // The struct parameter is not enregistered return PROMOTION_TYPE_DEPENDENT; } // We have a parameter that could be enregistered #if defined(TARGET_ARM) // TODO-Cleanup: return INDEPENDENT for arm32. return PROMOTION_TYPE_DEPENDENT; #else // !TARGET_ARM return PROMOTION_TYPE_INDEPENDENT; #endif // !TARGET_ARM } /***************************************************************************** * * Get the promotion type of a struct local. */ inline Compiler::lvaPromotionType Compiler::lvaGetPromotionType(unsigned varNum) { return lvaGetPromotionType(lvaGetDesc(varNum)); } /***************************************************************************** * * Given a field local, get the promotion type of its parent struct local. */ inline Compiler::lvaPromotionType Compiler::lvaGetParentPromotionType(const LclVarDsc* varDsc) { assert(varDsc->lvIsStructField); lvaPromotionType promotionType = lvaGetPromotionType(varDsc->lvParentLcl); assert(promotionType != PROMOTION_TYPE_NONE); return promotionType; } /***************************************************************************** * * Given a field local, get the promotion type of its parent struct local. */ inline Compiler::lvaPromotionType Compiler::lvaGetParentPromotionType(unsigned varNum) { return lvaGetParentPromotionType(lvaGetDesc(varNum)); } /***************************************************************************** * * Return true if the local is a field local of a promoted struct of type PROMOTION_TYPE_DEPENDENT. * Return false otherwise. */ inline bool Compiler::lvaIsFieldOfDependentlyPromotedStruct(const LclVarDsc* varDsc) { if (!varDsc->lvIsStructField) { return false; } lvaPromotionType promotionType = lvaGetParentPromotionType(varDsc); if (promotionType == PROMOTION_TYPE_DEPENDENT) { return true; } assert(promotionType == PROMOTION_TYPE_INDEPENDENT); return false; } //------------------------------------------------------------------------ // lvaIsGCTracked: Determine whether this var should be reported // as tracked for GC purposes. // // Arguments: // varDsc - the LclVarDsc for the var in question. // // Return Value: // Returns true if the variable should be reported as tracked in the GC info. // // Notes: // This never returns true for struct variables, even if they are tracked. // This is because struct variables are never tracked as a whole for GC purposes. // It is up to the caller to ensure that the fields of struct variables are // correctly tracked. // On Amd64, we never GC-track fields of dependently promoted structs, even // though they may be tracked for optimization purposes. // It seems that on x86 and arm, we simply don't track these // fields, though I have not verified that. I attempted to make these GC-tracked, // but there was too much logic that depends on these being untracked, so changing // this would require non-trivial effort. inline bool Compiler::lvaIsGCTracked(const LclVarDsc* varDsc) { if (varDsc->lvTracked && (varDsc->lvType == TYP_REF || varDsc->lvType == TYP_BYREF)) { // Stack parameters are always untracked w.r.t. GC reportings const bool isStackParam = varDsc->lvIsParam && !varDsc->lvIsRegArg; #ifdef TARGET_AMD64 return !isStackParam && !lvaIsFieldOfDependentlyPromotedStruct(varDsc); #else // !TARGET_AMD64 return !isStackParam; #endif // !TARGET_AMD64 } else { return false; } } /*****************************************************************************/ #if MEASURE_CLRAPI_CALLS inline void Compiler::CLRApiCallEnter(unsigned apix) { if (pCompJitTimer != nullptr) { pCompJitTimer->CLRApiCallEnter(apix); } } inline void Compiler::CLRApiCallLeave(unsigned apix) { if (pCompJitTimer != nullptr) { pCompJitTimer->CLRApiCallLeave(apix); } } inline void Compiler::CLR_API_Enter(API_ICorJitInfo_Names ename) { CLRApiCallEnter(ename); } inline void Compiler::CLR_API_Leave(API_ICorJitInfo_Names ename) { CLRApiCallLeave(ename); } #endif // MEASURE_CLRAPI_CALLS //------------------------------------------------------------------------------ // fgVarIsNeverZeroInitializedInProlog : Check whether the variable is never zero initialized in the prolog. // // Arguments: // varNum - local variable number // // Returns: // true if this is a special variable that is never zero initialized in the prolog; // false otherwise // bool Compiler::fgVarIsNeverZeroInitializedInProlog(unsigned varNum) { LclVarDsc* varDsc = lvaGetDesc(varNum); bool result = varDsc->lvIsParam || lvaIsOSRLocal(varNum) || (varNum == lvaGSSecurityCookie) || (varNum == lvaInlinedPInvokeFrameVar) || (varNum == lvaStubArgumentVar) || (varNum == lvaRetAddrVar); #if FEATURE_FIXED_OUT_ARGS result = result || (varNum == lvaPInvokeFrameRegSaveVar) || (varNum == lvaOutgoingArgSpaceVar); #endif #if defined(FEATURE_EH_FUNCLETS) result = result || (varNum == lvaPSPSym); #endif return result; } //------------------------------------------------------------------------------ // fgVarNeedsExplicitZeroInit : Check whether the variable needs an explicit zero initialization. // // Arguments: // varNum - local var number // bbInALoop - true if the basic block may be in a loop // bbIsReturn - true if the basic block always returns // // Returns: // true if the var needs explicit zero-initialization in this basic block; // false otherwise // // Notes: // If the variable is not being initialized in a loop, we can avoid explicit zero initialization if // - the variable is a gc pointer, or // - the variable is a struct with gc pointer fields and either all fields are gc pointer fields // or the struct is big enough to guarantee block initialization, or // - compInitMem is set and the variable has a long lifetime or has gc fields. // In these cases we will insert zero-initialization in the prolog if necessary. bool Compiler::fgVarNeedsExplicitZeroInit(unsigned varNum, bool bbInALoop, bool bbIsReturn) { LclVarDsc* varDsc = lvaGetDesc(varNum); if (lvaIsFieldOfDependentlyPromotedStruct(varDsc)) { // Fields of dependently promoted structs may only be initialized in the prolog when the whole // struct is initialized in the prolog. return fgVarNeedsExplicitZeroInit(varDsc->lvParentLcl, bbInALoop, bbIsReturn); } if (bbInALoop && !bbIsReturn) { return true; } if (fgVarIsNeverZeroInitializedInProlog(varNum)) { return true; } if (varTypeIsGC(varDsc->lvType)) { return false; } if ((varDsc->lvType == TYP_STRUCT) && varDsc->HasGCPtr()) { ClassLayout* layout = varDsc->GetLayout(); if (layout->GetSlotCount() == layout->GetGCPtrCount()) { return false; } // Below conditions guarantee block initialization, which will initialize // all struct fields. If the logic for block initialization in CodeGen::genCheckUseBlockInit() // changes, these conditions need to be updated. #ifdef TARGET_64BIT #if defined(TARGET_AMD64) // We can clear using aligned SIMD so the threshold is lower, // and clears in order which is better for auto-prefetching if (roundUp(varDsc->lvSize(), TARGET_POINTER_SIZE) / sizeof(int) > 4) #else // !defined(TARGET_AMD64) if (roundUp(varDsc->lvSize(), TARGET_POINTER_SIZE) / sizeof(int) > 8) #endif #else if (roundUp(varDsc->lvSize(), TARGET_POINTER_SIZE) / sizeof(int) > 4) #endif { return false; } } return !info.compInitMem || (varDsc->lvIsTemp && !varDsc->HasGCPtr()); } /*****************************************************************************/ ValueNum Compiler::GetUseAsgDefVNOrTreeVN(GenTree* op) { if (op->gtFlags & GTF_VAR_USEASG) { unsigned lclNum = op->AsLclVarCommon()->GetLclNum(); unsigned ssaNum = GetSsaNumForLocalVarDef(op); return lvaTable[lclNum].GetPerSsaData(ssaNum)->m_vnPair.GetConservative(); } else { return op->gtVNPair.GetConservative(); } } /*****************************************************************************/ unsigned Compiler::GetSsaNumForLocalVarDef(GenTree* lcl) { // Address-taken variables don't have SSA numbers. if (!lvaInSsa(lcl->AsLclVarCommon()->GetLclNum())) { return SsaConfig::RESERVED_SSA_NUM; } if (lcl->gtFlags & GTF_VAR_USEASG) { // It's partial definition of a struct. "lcl" is both used and defined here; // we've chosen in this case to annotate "lcl" with the SSA number (and VN) of the use, // and to store the SSA number of the def in a side table. unsigned ssaNum; // In case of a remorph (fgMorph) in CSE/AssertionProp after SSA phase, there // wouldn't be an entry for the USEASG portion of the indir addr, return // reserved. if (!GetOpAsgnVarDefSsaNums()->Lookup(lcl, &ssaNum)) { return SsaConfig::RESERVED_SSA_NUM; } return ssaNum; } else { return lcl->AsLclVarCommon()->GetSsaNum(); } } inline bool Compiler::PreciseRefCountsRequired() { return opts.OptimizationEnabled(); } template <typename TVisitor> void GenTree::VisitOperands(TVisitor visitor) { switch (OperGet()) { // Leaf nodes case GT_LCL_VAR: case GT_LCL_FLD: case GT_LCL_VAR_ADDR: case GT_LCL_FLD_ADDR: case GT_CATCH_ARG: case GT_LABEL: case GT_FTN_ADDR: case GT_RET_EXPR: case GT_CNS_INT: case GT_CNS_LNG: case GT_CNS_DBL: case GT_CNS_STR: case GT_MEMORYBARRIER: case GT_JMP: case GT_JCC: case GT_SETCC: case GT_NO_OP: case GT_START_NONGC: case GT_START_PREEMPTGC: case GT_PROF_HOOK: #if !defined(FEATURE_EH_FUNCLETS) case GT_END_LFIN: #endif // !FEATURE_EH_FUNCLETS case GT_PHI_ARG: case GT_JMPTABLE: case GT_CLS_VAR: case GT_CLS_VAR_ADDR: case GT_ARGPLACE: case GT_PHYSREG: case GT_EMITNOP: case GT_PINVOKE_PROLOG: case GT_PINVOKE_EPILOG: case GT_IL_OFFSET: return; // Unary operators with an optional operand case GT_NOP: case GT_FIELD: case GT_RETURN: case GT_RETFILT: if (this->AsUnOp()->gtOp1 == nullptr) { return; } FALLTHROUGH; // Standard unary operators case GT_STORE_LCL_VAR: case GT_STORE_LCL_FLD: case GT_NOT: case GT_NEG: case GT_BSWAP: case GT_BSWAP16: case GT_COPY: case GT_RELOAD: case GT_ARR_LENGTH: case GT_CAST: case GT_BITCAST: case GT_CKFINITE: case GT_LCLHEAP: case GT_ADDR: case GT_IND: case GT_OBJ: case GT_BLK: case GT_BOX: case GT_ALLOCOBJ: case GT_INIT_VAL: case GT_RUNTIMELOOKUP: case GT_JTRUE: case GT_SWITCH: case GT_NULLCHECK: case GT_PUTARG_REG: case GT_PUTARG_STK: case GT_PUTARG_TYPE: #if FEATURE_ARG_SPLIT case GT_PUTARG_SPLIT: #endif // FEATURE_ARG_SPLIT case GT_RETURNTRAP: case GT_KEEPALIVE: case GT_INC_SATURATE: visitor(this->AsUnOp()->gtOp1); return; // Variadic nodes #if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS) #if defined(FEATURE_SIMD) case GT_SIMD: #endif #if defined(FEATURE_HW_INTRINSICS) case GT_HWINTRINSIC: #endif for (GenTree* operand : this->AsMultiOp()->Operands()) { if (visitor(operand) == VisitResult::Abort) { break; } } return; #endif // defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS) // Special nodes case GT_PHI: for (GenTreePhi::Use& use : AsPhi()->Uses()) { if (visitor(use.GetNode()) == VisitResult::Abort) { break; } } return; case GT_FIELD_LIST: for (GenTreeFieldList::Use& field : AsFieldList()->Uses()) { if (visitor(field.GetNode()) == VisitResult::Abort) { break; } } return; case GT_CMPXCHG: { GenTreeCmpXchg* const cmpXchg = this->AsCmpXchg(); if (visitor(cmpXchg->gtOpLocation) == VisitResult::Abort) { return; } if (visitor(cmpXchg->gtOpValue) == VisitResult::Abort) { return; } visitor(cmpXchg->gtOpComparand); return; } case GT_ARR_ELEM: { GenTreeArrElem* const arrElem = this->AsArrElem(); if (visitor(arrElem->gtArrObj) == VisitResult::Abort) { return; } for (unsigned i = 0; i < arrElem->gtArrRank; i++) { if (visitor(arrElem->gtArrInds[i]) == VisitResult::Abort) { return; } } return; } case GT_ARR_OFFSET: { GenTreeArrOffs* const arrOffs = this->AsArrOffs(); if (visitor(arrOffs->gtOffset) == VisitResult::Abort) { return; } if (visitor(arrOffs->gtIndex) == VisitResult::Abort) { return; } visitor(arrOffs->gtArrObj); return; } case GT_STORE_DYN_BLK: { GenTreeStoreDynBlk* const dynBlock = this->AsStoreDynBlk(); if (visitor(dynBlock->gtOp1) == VisitResult::Abort) { return; } if (visitor(dynBlock->gtOp2) == VisitResult::Abort) { return; } visitor(dynBlock->gtDynamicSize); return; } case GT_CALL: { GenTreeCall* const call = this->AsCall(); if ((call->gtCallThisArg != nullptr) && (visitor(call->gtCallThisArg->GetNode()) == VisitResult::Abort)) { return; } for (GenTreeCall::Use& use : call->Args()) { if (visitor(use.GetNode()) == VisitResult::Abort) { return; } } for (GenTreeCall::Use& use : call->LateArgs()) { if (visitor(use.GetNode()) == VisitResult::Abort) { return; } } if (call->gtCallType == CT_INDIRECT) { if ((call->gtCallCookie != nullptr) && (visitor(call->gtCallCookie) == VisitResult::Abort)) { return; } if ((call->gtCallAddr != nullptr) && (visitor(call->gtCallAddr) == VisitResult::Abort)) { return; } } if ((call->gtControlExpr != nullptr)) { visitor(call->gtControlExpr); } return; } // Binary nodes default: assert(this->OperIsBinary()); VisitBinOpOperands<TVisitor>(visitor); return; } } template <typename TVisitor> void GenTree::VisitBinOpOperands(TVisitor visitor) { assert(this->OperIsBinary()); GenTreeOp* const op = this->AsOp(); GenTree* const op1 = op->gtOp1; if ((op1 != nullptr) && (visitor(op1) == VisitResult::Abort)) { return; } GenTree* const op2 = op->gtOp2; if (op2 != nullptr) { visitor(op2); } } /***************************************************************************** * operator new * * Note that compiler's allocator is an arena allocator that returns memory that is * not zero-initialized and can contain data from a prior allocation lifetime. */ inline void* __cdecl operator new(size_t sz, Compiler* compiler, CompMemKind cmk) { return compiler->getAllocator(cmk).allocate<char>(sz); } inline void* __cdecl operator new[](size_t sz, Compiler* compiler, CompMemKind cmk) { return compiler->getAllocator(cmk).allocate<char>(sz); } inline void* __cdecl operator new(size_t sz, void* p, const jitstd::placement_t& /* syntax_difference */) { return p; } /*****************************************************************************/ #ifdef DEBUG inline void printRegMask(regMaskTP mask) { printf(REG_MASK_ALL_FMT, mask); } inline char* regMaskToString(regMaskTP mask, Compiler* context) { const size_t cchRegMask = 24; char* regmask = new (context, CMK_Unknown) char[cchRegMask]; sprintf_s(regmask, cchRegMask, REG_MASK_ALL_FMT, mask); return regmask; } inline void printRegMaskInt(regMaskTP mask) { printf(REG_MASK_INT_FMT, (mask & RBM_ALLINT)); } inline char* regMaskIntToString(regMaskTP mask, Compiler* context) { const size_t cchRegMask = 24; char* regmask = new (context, CMK_Unknown) char[cchRegMask]; sprintf_s(regmask, cchRegMask, REG_MASK_INT_FMT, (mask & RBM_ALLINT)); return regmask; } #endif // DEBUG inline static bool StructHasOverlappingFields(DWORD attribs) { return ((attribs & CORINFO_FLG_OVERLAPPING_FIELDS) != 0); } inline static bool StructHasCustomLayout(DWORD attribs) { return ((attribs & CORINFO_FLG_CUSTOMLAYOUT) != 0); } inline static bool StructHasDontDigFieldsFlagSet(DWORD attribs) { return ((attribs & CORINFO_FLG_DONT_DIG_FIELDS) != 0); } //------------------------------------------------------------------------------ // DEBUG_DESTROY_NODE: sets value of tree to garbage to catch extra references // // Arguments: // tree: This node should not be referenced by anyone now // inline void DEBUG_DESTROY_NODE(GenTree* tree) { #ifdef DEBUG // printf("DEBUG_DESTROY_NODE for [0x%08x]\n", tree); // Save gtOper in case we want to find out what this node was tree->gtOperSave = tree->gtOper; tree->gtType = TYP_UNDEF; tree->gtFlags |= ~GTF_NODE_MASK; if (tree->OperIsSimple()) { tree->AsOp()->gtOp1 = tree->AsOp()->gtOp2 = nullptr; } // Must do this last, because the "AsOp()" check above will fail otherwise. // Don't call SetOper, because GT_COUNT is not a valid value tree->gtOper = GT_COUNT; #endif } //------------------------------------------------------------------------------ // DEBUG_DESTROY_NODE: sets value of trees to garbage to catch extra references // // Arguments: // tree, ...rest: These nodes should not be referenced by anyone now // template <typename... T> void DEBUG_DESTROY_NODE(GenTree* tree, T... rest) { DEBUG_DESTROY_NODE(tree); DEBUG_DESTROY_NODE(rest...); } //------------------------------------------------------------------------------ // lvRefCnt: access reference count for this local var // // Arguments: // state: the requestor's expected ref count state; defaults to RCS_NORMAL // // Return Value: // Ref count for the local. inline unsigned short LclVarDsc::lvRefCnt(RefCountState state) const { #if defined(DEBUG) assert(state != RCS_INVALID); Compiler* compiler = JitTls::GetCompiler(); assert(compiler->lvaRefCountState == state); #endif if (lvImplicitlyReferenced && (m_lvRefCnt == 0)) { return 1; } return m_lvRefCnt; } //------------------------------------------------------------------------------ // incLvRefCnt: increment reference count for this local var // // Arguments: // delta: the amount of the increment // state: the requestor's expected ref count state; defaults to RCS_NORMAL // // Notes: // It is currently the caller's responsibilty to ensure this increment // will not cause overflow. inline void LclVarDsc::incLvRefCnt(unsigned short delta, RefCountState state) { #if defined(DEBUG) assert(state != RCS_INVALID); Compiler* compiler = JitTls::GetCompiler(); assert(compiler->lvaRefCountState == state); #endif unsigned short oldRefCnt = m_lvRefCnt; m_lvRefCnt += delta; assert(m_lvRefCnt >= oldRefCnt); } //------------------------------------------------------------------------------ // setLvRefCnt: set the reference count for this local var // // Arguments: // newValue: the desired new reference count // state: the requestor's expected ref count state; defaults to RCS_NORMAL // // Notes: // Generally after calling v->setLvRefCnt(Y), v->lvRefCnt() == Y. // However this may not be true when v->lvImplicitlyReferenced == 1. inline void LclVarDsc::setLvRefCnt(unsigned short newValue, RefCountState state) { #if defined(DEBUG) assert(state != RCS_INVALID); Compiler* compiler = JitTls::GetCompiler(); assert(compiler->lvaRefCountState == state); #endif m_lvRefCnt = newValue; } //------------------------------------------------------------------------------ // lvRefCntWtd: access wighted reference count for this local var // // Arguments: // state: the requestor's expected ref count state; defaults to RCS_NORMAL // // Return Value: // Weighted ref count for the local. inline weight_t LclVarDsc::lvRefCntWtd(RefCountState state) const { #if defined(DEBUG) assert(state != RCS_INVALID); Compiler* compiler = JitTls::GetCompiler(); assert(compiler->lvaRefCountState == state); #endif if (lvImplicitlyReferenced && (m_lvRefCntWtd == 0)) { return BB_UNITY_WEIGHT; } return m_lvRefCntWtd; } //------------------------------------------------------------------------------ // incLvRefCntWtd: increment weighted reference count for this local var // // Arguments: // delta: the amount of the increment // state: the requestor's expected ref count state; defaults to RCS_NORMAL // // Notes: // It is currently the caller's responsibilty to ensure this increment // will not cause overflow. inline void LclVarDsc::incLvRefCntWtd(weight_t delta, RefCountState state) { #if defined(DEBUG) assert(state != RCS_INVALID); Compiler* compiler = JitTls::GetCompiler(); assert(compiler->lvaRefCountState == state); #endif weight_t oldRefCntWtd = m_lvRefCntWtd; m_lvRefCntWtd += delta; assert(m_lvRefCntWtd >= oldRefCntWtd); } //------------------------------------------------------------------------------ // setLvRefCntWtd: set the weighted reference count for this local var // // Arguments: // newValue: the desired new weighted reference count // state: the requestor's expected ref count state; defaults to RCS_NORMAL // // Notes: // Generally after calling v->setLvRefCntWtd(Y), v->lvRefCntWtd() == Y. // However this may not be true when v->lvImplicitlyReferenced == 1. inline void LclVarDsc::setLvRefCntWtd(weight_t newValue, RefCountState state) { #if defined(DEBUG) assert(state != RCS_INVALID); Compiler* compiler = JitTls::GetCompiler(); assert(compiler->lvaRefCountState == state); #endif m_lvRefCntWtd = newValue; } //------------------------------------------------------------------------------ // compCanHavePatchpoints: return true if patchpoints are supported in this // method. // // Arguments: // reason - [out, optional] reason why patchpoints are not supported // // Returns: // True if patchpoints are supported in this method. // inline bool Compiler::compCanHavePatchpoints(const char** reason) { const char* whyNot = nullptr; #ifdef FEATURE_ON_STACK_REPLACEMENT if (compLocallocSeen) { whyNot = "OSR can't handle localloc"; } else if (compHasBackwardJumpInHandler) { whyNot = "OSR can't handle loop in handler"; } else if (opts.IsReversePInvoke()) { whyNot = "OSR can't handle reverse pinvoke"; } #else whyNot = "OSR feature not defined in build"; #endif if (reason != nullptr) { *reason = whyNot; } return whyNot == nullptr; } /*****************************************************************************/ #endif //_COMPILER_HPP_ /*****************************************************************************/
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Inline functions XX XX XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ #ifndef _COMPILER_HPP_ #define _COMPILER_HPP_ #include "emit.h" // for emitter::emitAddLabel #include "bitvec.h" #include "compilerbitsettraits.hpp" /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Miscellaneous utility functions. Some of these are defined in Utils.cpp XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ /*****************************************************************************/ /*****************************************************************************/ inline bool getInlinePInvokeEnabled() { #ifdef DEBUG return JitConfig.JitPInvokeEnabled() && !JitConfig.StressCOMCall(); #else return true; #endif } inline bool getInlinePInvokeCheckEnabled() { #ifdef DEBUG return JitConfig.JitPInvokeCheckEnabled() != 0; #else return false; #endif } // Enforce float narrowing for buggy compilers (notably preWhidbey VC) inline float forceCastToFloat(double d) { Volatile<float> f = (float)d; return f; } // Enforce UInt32 narrowing for buggy compilers (notably Whidbey Beta 2 LKG) inline UINT32 forceCastToUInt32(double d) { Volatile<UINT32> u = (UINT32)d; return u; } enum RoundLevel { ROUND_NEVER = 0, // Never round ROUND_CMP_CONST = 1, // Round values compared against constants ROUND_CMP = 2, // Round comparands and return values ROUND_ALWAYS = 3, // Round always COUNT_ROUND_LEVEL, DEFAULT_ROUND_LEVEL = ROUND_NEVER }; inline RoundLevel getRoundFloatLevel() { #ifdef DEBUG return (RoundLevel)JitConfig.JitRoundFloat(); #else return DEFAULT_ROUND_LEVEL; #endif } /*****************************************************************************/ /***************************************************************************** * * Return the lowest bit that is set */ template <typename T> inline T genFindLowestBit(T value) { return (value & (0 - value)); } /*****************************************************************************/ /***************************************************************************** * * Return the highest bit that is set (that is, a mask that includes just the highest bit). * TODO-ARM64-Throughput: we should convert these to use the _BitScanReverse() / _BitScanReverse64() * compiler intrinsics, but our CRT header file intrin.h doesn't define these for ARM64 yet. */ inline unsigned int genFindHighestBit(unsigned int mask) { assert(mask != 0); unsigned int bit = 1U << ((sizeof(unsigned int) * 8) - 1); // start looking at the top while ((bit & mask) == 0) { bit >>= 1; } return bit; } inline unsigned __int64 genFindHighestBit(unsigned __int64 mask) { assert(mask != 0); unsigned __int64 bit = 1ULL << ((sizeof(unsigned __int64) * 8) - 1); // start looking at the top while ((bit & mask) == 0) { bit >>= 1; } return bit; } #if 0 // TODO-ARM64-Cleanup: These should probably be the implementation, when intrin.h is updated for ARM64 inline unsigned int genFindHighestBit(unsigned int mask) { assert(mask != 0); unsigned int index; _BitScanReverse(&index, mask); return 1L << index; } inline unsigned __int64 genFindHighestBit(unsigned __int64 mask) { assert(mask != 0); unsigned int index; _BitScanReverse64(&index, mask); return 1LL << index; } #endif // 0 /***************************************************************************** * * Return true if the given 64-bit value has exactly zero or one bits set. */ template <typename T> inline bool genMaxOneBit(T value) { return (value & (value - 1)) == 0; } /***************************************************************************** * * Return true if the given 32-bit value has exactly zero or one bits set. */ inline bool genMaxOneBit(unsigned value) { return (value & (value - 1)) == 0; } /***************************************************************************** * * Return true if the given 64-bit value has exactly one bit set. */ template <typename T> inline bool genExactlyOneBit(T value) { return ((value != 0) && genMaxOneBit(value)); } /***************************************************************************** * * Return true if the given 32-bit value has exactly zero or one bits set. */ inline bool genExactlyOneBit(unsigned value) { return ((value != 0) && genMaxOneBit(value)); } /***************************************************************************** * * Given a value that has exactly one bit set, return the position of that * bit, in other words return the logarithm in base 2 of the given value. */ inline unsigned genLog2(unsigned value) { return BitPosition(value); } // Given an unsigned 64-bit value, returns the lower 32-bits in unsigned format // inline unsigned ulo32(unsigned __int64 value) { return static_cast<unsigned>(value); } // Given an unsigned 64-bit value, returns the upper 32-bits in unsigned format // inline unsigned uhi32(unsigned __int64 value) { return static_cast<unsigned>(value >> 32); } /***************************************************************************** * * Given a value that has exactly one bit set, return the position of that * bit, in other words return the logarithm in base 2 of the given value. */ inline unsigned genLog2(unsigned __int64 value) { unsigned lo32 = ulo32(value); unsigned hi32 = uhi32(value); if (lo32 != 0) { assert(hi32 == 0); return genLog2(lo32); } else { return genLog2(hi32) + 32; } } /***************************************************************************** * * Return the lowest bit that is set in the given register mask. */ inline regMaskTP genFindLowestReg(regMaskTP value) { return (regMaskTP)genFindLowestBit(value); } /***************************************************************************** * * A rather simple routine that counts the number of bits in a given number. */ template <typename T> inline unsigned genCountBits(T bits) { unsigned cnt = 0; while (bits) { cnt++; bits -= genFindLowestBit(bits); } return cnt; } /***************************************************************************** * * Given 3 masks value, end, start, returns the bits of value between start * and end (exclusive). * * value[bitNum(end) - 1, bitNum(start) + 1] */ inline unsigned __int64 BitsBetween(unsigned __int64 value, unsigned __int64 end, unsigned __int64 start) { assert(start != 0); assert(start < end); assert((start & (start - 1)) == 0); assert((end & (end - 1)) == 0); return value & ~((start - 1) | start) & // Ones to the left of set bit in the start mask. (end - 1); // Ones to the right of set bit in the end mask. } /*****************************************************************************/ inline bool jitIsScaleIndexMul(size_t val) { switch (val) { case 1: case 2: case 4: case 8: return true; default: return false; } } // Returns "tree" iff "val" is a valid addressing mode scale shift amount on // the target architecture. inline bool jitIsScaleIndexShift(ssize_t val) { // It happens that this is the right test for all our current targets: x86, x64 and ARM. // This test would become target-dependent if we added a new target with a different constraint. return 0 < val && val < 4; } /***************************************************************************** * Returns true if value is between [start..end). * The comparison is inclusive of start, exclusive of end. */ /* static */ inline bool Compiler::jitIsBetween(unsigned value, unsigned start, unsigned end) { return start <= value && value < end; } /***************************************************************************** * Returns true if value is between [start..end]. * The comparison is inclusive of both start and end. */ /* static */ inline bool Compiler::jitIsBetweenInclusive(unsigned value, unsigned start, unsigned end) { return start <= value && value <= end; } /****************************************************************************************** * Return the EH descriptor for the given region index. */ inline EHblkDsc* Compiler::ehGetDsc(unsigned regionIndex) { assert(regionIndex < compHndBBtabCount); return &compHndBBtab[regionIndex]; } /****************************************************************************************** * Return the EH descriptor index of the enclosing try, for the given region index. */ inline unsigned Compiler::ehGetEnclosingTryIndex(unsigned regionIndex) { return ehGetDsc(regionIndex)->ebdEnclosingTryIndex; } /****************************************************************************************** * Return the EH descriptor index of the enclosing handler, for the given region index. */ inline unsigned Compiler::ehGetEnclosingHndIndex(unsigned regionIndex) { return ehGetDsc(regionIndex)->ebdEnclosingHndIndex; } /****************************************************************************************** * Return the EH index given a region descriptor. */ inline unsigned Compiler::ehGetIndex(EHblkDsc* ehDsc) { assert(compHndBBtab <= ehDsc && ehDsc < compHndBBtab + compHndBBtabCount); return (unsigned)(ehDsc - compHndBBtab); } /****************************************************************************************** * Return the EH descriptor for the most nested 'try' region this BasicBlock is a member of * (or nullptr if this block is not in a 'try' region). */ inline EHblkDsc* Compiler::ehGetBlockTryDsc(BasicBlock* block) { if (!block->hasTryIndex()) { return nullptr; } return ehGetDsc(block->getTryIndex()); } /****************************************************************************************** * Return the EH descriptor for the most nested filter or handler region this BasicBlock is a member of * (or nullptr if this block is not in a filter or handler region). */ inline EHblkDsc* Compiler::ehGetBlockHndDsc(BasicBlock* block) { if (!block->hasHndIndex()) { return nullptr; } return ehGetDsc(block->getHndIndex()); } #if defined(FEATURE_EH_FUNCLETS) /***************************************************************************** * Get the FuncInfoDsc for the funclet we are currently generating code for. * This is only valid during codegen. * */ inline FuncInfoDsc* Compiler::funCurrentFunc() { return funGetFunc(compCurrFuncIdx); } /***************************************************************************** * Change which funclet we are currently generating code for. * This is only valid after funclets are created. * */ inline void Compiler::funSetCurrentFunc(unsigned funcIdx) { assert(fgFuncletsCreated); assert(FitsIn<unsigned short>(funcIdx)); noway_assert(funcIdx < compFuncInfoCount); compCurrFuncIdx = (unsigned short)funcIdx; } /***************************************************************************** * Get the FuncInfoDsc for the given funclet. * This is only valid after funclets are created. * */ inline FuncInfoDsc* Compiler::funGetFunc(unsigned funcIdx) { assert(fgFuncletsCreated); assert(funcIdx < compFuncInfoCount); return &compFuncInfos[funcIdx]; } /***************************************************************************** * Get the funcIdx for the EH funclet that begins with block. * This is only valid after funclets are created. * It is only valid for blocks marked with BBF_FUNCLET_BEG because * otherwise we would have to do a more expensive check to determine * if this should return the filter funclet or the filter handler funclet. * */ inline unsigned Compiler::funGetFuncIdx(BasicBlock* block) { assert(fgFuncletsCreated); assert(block->bbFlags & BBF_FUNCLET_BEG); EHblkDsc* eh = ehGetDsc(block->getHndIndex()); unsigned int funcIdx = eh->ebdFuncIndex; if (eh->ebdHndBeg != block) { // If this is a filter EH clause, but we want the funclet // for the filter (not the filter handler), it is the previous one noway_assert(eh->HasFilter()); noway_assert(eh->ebdFilter == block); assert(funGetFunc(funcIdx)->funKind == FUNC_HANDLER); assert(funGetFunc(funcIdx)->funEHIndex == funGetFunc(funcIdx - 1)->funEHIndex); assert(funGetFunc(funcIdx - 1)->funKind == FUNC_FILTER); funcIdx--; } return funcIdx; } #else // !FEATURE_EH_FUNCLETS /***************************************************************************** * Get the FuncInfoDsc for the funclet we are currently generating code for. * This is only valid during codegen. For non-funclet platforms, this is * always the root function. * */ inline FuncInfoDsc* Compiler::funCurrentFunc() { return &compFuncInfoRoot; } /***************************************************************************** * Change which funclet we are currently generating code for. * This is only valid after funclets are created. * */ inline void Compiler::funSetCurrentFunc(unsigned funcIdx) { assert(funcIdx == 0); } /***************************************************************************** * Get the FuncInfoDsc for the givven funclet. * This is only valid after funclets are created. * */ inline FuncInfoDsc* Compiler::funGetFunc(unsigned funcIdx) { assert(funcIdx == 0); return &compFuncInfoRoot; } /***************************************************************************** * No funclets, so always 0. * */ inline unsigned Compiler::funGetFuncIdx(BasicBlock* block) { return 0; } #endif // !FEATURE_EH_FUNCLETS //------------------------------------------------------------------------------ // genRegNumFromMask : Maps a single register mask to a register number. // // Arguments: // mask - the register mask // // Return Value: // The number of the register contained in the mask. // // Assumptions: // The mask contains one and only one register. inline regNumber genRegNumFromMask(regMaskTP mask) { assert(mask != 0); // Must have one bit set, so can't have a mask of zero /* Convert the mask to a register number */ regNumber regNum = (regNumber)genLog2(mask); /* Make sure we got it right */ assert(genRegMask(regNum) == mask); return regNum; } /***************************************************************************** * * Return the size in bytes of the given type. */ extern const BYTE genTypeSizes[TYP_COUNT]; template <class T> inline unsigned genTypeSize(T value) { assert((unsigned)TypeGet(value) < ArrLen(genTypeSizes)); return genTypeSizes[TypeGet(value)]; } /***************************************************************************** * * Return the "stack slot count" of the given type. * returns 1 for 32-bit types and 2 for 64-bit types. */ extern const BYTE genTypeStSzs[TYP_COUNT]; template <class T> inline unsigned genTypeStSz(T value) { assert((unsigned)TypeGet(value) < ArrLen(genTypeStSzs)); return genTypeStSzs[TypeGet(value)]; } /***************************************************************************** * * Return the number of registers required to hold a value of the given type. */ /***************************************************************************** * * The following function maps a 'precise' type to an actual type as seen * by the VM (for example, 'byte' maps to 'int'). */ extern const BYTE genActualTypes[TYP_COUNT]; template <class T> inline var_types genActualType(T value) { /* Spot check to make certain the table is in synch with the enum */ assert(genActualTypes[TYP_DOUBLE] == TYP_DOUBLE); assert(genActualTypes[TYP_REF] == TYP_REF); assert((unsigned)TypeGet(value) < sizeof(genActualTypes)); return (var_types)genActualTypes[TypeGet(value)]; } /***************************************************************************** * Can this type be passed as a parameter in a register? */ inline bool isRegParamType(var_types type) { #if defined(TARGET_X86) return (type <= TYP_INT || type == TYP_REF || type == TYP_BYREF); #else // !TARGET_X86 return true; #endif // !TARGET_X86 } #if defined(TARGET_AMD64) || defined(TARGET_ARMARCH) /*****************************************************************************/ // Returns true if 'type' is a struct that can be enregistered for call args // or can be returned by value in multiple registers. // if 'type' is not a struct the return value will be false. // // Arguments: // type - the basic jit var_type for the item being queried // typeClass - the handle for the struct when 'type' is TYP_STRUCT // typeSize - Out param (if non-null) is updated with the size of 'type'. // forReturn - this is true when we asking about a GT_RETURN context; // this is false when we are asking about an argument context // isVarArg - whether or not this is a vararg fixed arg or variable argument // - if so on arm64 windows getArgTypeForStruct will ignore HFA // - types // callConv - the calling convention of the call // inline bool Compiler::VarTypeIsMultiByteAndCanEnreg(var_types type, CORINFO_CLASS_HANDLE typeClass, unsigned* typeSize, bool forReturn, bool isVarArg, CorInfoCallConvExtension callConv) { bool result = false; unsigned size = 0; if (varTypeIsStruct(type)) { assert(typeClass != nullptr); size = info.compCompHnd->getClassSize(typeClass); if (forReturn) { structPassingKind howToReturnStruct; type = getReturnTypeForStruct(typeClass, callConv, &howToReturnStruct, size); } else { structPassingKind howToPassStruct; type = getArgTypeForStruct(typeClass, &howToPassStruct, isVarArg, size); } if (type != TYP_UNKNOWN) { result = true; } } else { size = genTypeSize(type); } if (typeSize != nullptr) { *typeSize = size; } return result; } #endif // TARGET_AMD64 || TARGET_ARMARCH /*****************************************************************************/ #ifdef DEBUG inline const char* varTypeGCstring(var_types type) { switch (type) { case TYP_REF: return "gcr"; case TYP_BYREF: return "byr"; default: return "non"; } } #endif /*****************************************************************************/ const char* varTypeName(var_types); /*****************************************************************************/ // Helpers to pull little-endian values out of a byte stream. inline unsigned __int8 getU1LittleEndian(const BYTE* ptr) { return *(UNALIGNED unsigned __int8*)ptr; } inline unsigned __int16 getU2LittleEndian(const BYTE* ptr) { return GET_UNALIGNED_VAL16(ptr); } inline unsigned __int32 getU4LittleEndian(const BYTE* ptr) { return GET_UNALIGNED_VAL32(ptr); } inline signed __int8 getI1LittleEndian(const BYTE* ptr) { return *(UNALIGNED signed __int8*)ptr; } inline signed __int16 getI2LittleEndian(const BYTE* ptr) { return GET_UNALIGNED_VAL16(ptr); } inline signed __int32 getI4LittleEndian(const BYTE* ptr) { return GET_UNALIGNED_VAL32(ptr); } inline signed __int64 getI8LittleEndian(const BYTE* ptr) { return GET_UNALIGNED_VAL64(ptr); } inline float getR4LittleEndian(const BYTE* ptr) { __int32 val = getI4LittleEndian(ptr); return *(float*)&val; } inline double getR8LittleEndian(const BYTE* ptr) { __int64 val = getI8LittleEndian(ptr); return *(double*)&val; } #ifdef DEBUG const char* genES2str(BitVecTraits* traits, EXPSET_TP set); const char* refCntWtd2str(weight_t refCntWtd); #endif /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX GenTree XX XX Inline functions XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ void* GenTree::operator new(size_t sz, Compiler* comp, genTreeOps oper) { size_t size = GenTree::s_gtNodeSizes[oper]; #if MEASURE_NODE_SIZE genNodeSizeStats.genTreeNodeCnt += 1; genNodeSizeStats.genTreeNodeSize += size; genNodeSizeStats.genTreeNodeActualSize += sz; genNodeSizeStatsPerFunc.genTreeNodeCnt += 1; genNodeSizeStatsPerFunc.genTreeNodeSize += size; genNodeSizeStatsPerFunc.genTreeNodeActualSize += sz; #endif // MEASURE_NODE_SIZE assert(size >= sz); return comp->getAllocator(CMK_ASTNode).allocate<char>(size); } // GenTree constructor inline GenTree::GenTree(genTreeOps oper, var_types type DEBUGARG(bool largeNode)) { gtOper = oper; gtType = type; gtFlags = GTF_EMPTY; gtLIRFlags = 0; #ifdef DEBUG gtDebugFlags = GTF_DEBUG_NONE; #endif // DEBUG gtCSEnum = NO_CSE; ClearAssertion(); gtNext = nullptr; gtPrev = nullptr; SetRegNum(REG_NA); INDEBUG(gtRegTag = GT_REGTAG_NONE;) INDEBUG(gtCostsInitialized = false;) #ifdef DEBUG size_t size = GenTree::s_gtNodeSizes[oper]; if (size == TREE_NODE_SZ_SMALL && !largeNode) { gtDebugFlags |= GTF_DEBUG_NODE_SMALL; } else if (size == TREE_NODE_SZ_LARGE || largeNode) { gtDebugFlags |= GTF_DEBUG_NODE_LARGE; } else { assert(!"bogus node size"); } #endif #if COUNT_AST_OPERS InterlockedIncrement(&s_gtNodeCounts[oper]); #endif #ifdef DEBUG gtSeqNum = 0; gtUseNum = -1; gtTreeID = JitTls::GetCompiler()->compGenTreeID++; gtVNPair.SetBoth(ValueNumStore::NoVN); gtRegTag = GT_REGTAG_NONE; gtOperSave = GT_NONE; #endif } /*****************************************************************************/ inline Statement* Compiler::gtNewStmt(GenTree* expr) { Statement* stmt = new (this->getAllocator(CMK_ASTNode)) Statement(expr DEBUGARG(compStatementID++)); return stmt; } inline Statement* Compiler::gtNewStmt(GenTree* expr, const DebugInfo& di) { Statement* stmt = gtNewStmt(expr); stmt->SetDebugInfo(di); return stmt; } /*****************************************************************************/ inline GenTree* Compiler::gtNewOperNode(genTreeOps oper, var_types type, GenTree* op1, bool doSimplifications) { assert((GenTree::OperKind(oper) & (GTK_UNOP | GTK_BINOP)) != 0); assert((GenTree::OperKind(oper) & GTK_EXOP) == 0); // Can't use this to construct any types that extend unary/binary operator. assert(op1 != nullptr || oper == GT_RETFILT || oper == GT_NOP || (oper == GT_RETURN && type == TYP_VOID)); if (doSimplifications) { // We do some simplifications here. // If this gets to be too many, try a switch... // TODO-Cleanup: With the factoring out of array bounds checks, it should not be the // case that we need to check for the array index case here, but without this check // we get failures (see for example jit\Directed\Languages\Python\test_methods_d.exe) if (oper == GT_IND) { // IND(ADDR(IND(x)) == IND(x) if (op1->gtOper == GT_ADDR) { GenTreeUnOp* addr = op1->AsUnOp(); GenTree* indir = addr->gtGetOp1(); if (indir->OperIs(GT_IND) && ((indir->gtFlags & GTF_IND_ARR_INDEX) == 0)) { op1 = indir->AsIndir()->Addr(); } } } else if (oper == GT_ADDR) { // if "x" is not an array index, ADDR(IND(x)) == x if (op1->gtOper == GT_IND && (op1->gtFlags & GTF_IND_ARR_INDEX) == 0) { return op1->AsOp()->gtOp1; } else { // Addr source can't be CSE-ed. op1->SetDoNotCSE(); } } } GenTree* node = new (this, oper) GenTreeOp(oper, type, op1, nullptr); return node; } // Returns an opcode that is of the largest node size in use. inline genTreeOps LargeOpOpcode() { assert(GenTree::s_gtNodeSizes[GT_CALL] == TREE_NODE_SZ_LARGE); return GT_CALL; } /****************************************************************************** * * Use to create nodes which may later be morphed to another (big) operator */ inline GenTree* Compiler::gtNewLargeOperNode(genTreeOps oper, var_types type, GenTree* op1, GenTree* op2) { assert((GenTree::OperKind(oper) & (GTK_UNOP | GTK_BINOP)) != 0); // Can't use this to construct any types that extend unary/binary operator. assert((GenTree::OperKind(oper) & GTK_EXOP) == 0); assert(GenTree::s_gtNodeSizes[oper] == TREE_NODE_SZ_SMALL); // Allocate a large node GenTree* node = new (this, LargeOpOpcode()) GenTreeOp(oper, type, op1, op2 DEBUGARG(/*largeNode*/ true)); return node; } /***************************************************************************** * * allocates a integer constant entry that represents a handle (something * that may need to be fixed up). */ inline GenTree* Compiler::gtNewIconHandleNode(size_t value, GenTreeFlags flags, FieldSeqNode* fields) { GenTree* node; assert((flags & (GTF_ICON_HDL_MASK | GTF_ICON_FIELD_OFF)) != 0); // Interpret "fields == NULL" as "not a field." if (fields == nullptr) { fields = FieldSeqStore::NotAField(); } #if defined(LATE_DISASM) node = new (this, LargeOpOpcode()) GenTreeIntCon(TYP_I_IMPL, value, fields DEBUGARG(/*largeNode*/ true)); #else node = new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, value, fields); #endif node->gtFlags |= flags; return node; } /***************************************************************************** * * It may not be allowed to embed HANDLEs directly into the JITed code (for eg, * as arguments to JIT helpers). Get a corresponding value that can be embedded. * These are versions for each specific type of HANDLE */ inline GenTree* Compiler::gtNewIconEmbScpHndNode(CORINFO_MODULE_HANDLE scpHnd) { void *embedScpHnd, *pEmbedScpHnd; embedScpHnd = (void*)info.compCompHnd->embedModuleHandle(scpHnd, &pEmbedScpHnd); assert((!embedScpHnd) != (!pEmbedScpHnd)); return gtNewIconEmbHndNode(embedScpHnd, pEmbedScpHnd, GTF_ICON_SCOPE_HDL, scpHnd); } //----------------------------------------------------------------------------- inline GenTree* Compiler::gtNewIconEmbClsHndNode(CORINFO_CLASS_HANDLE clsHnd) { void *embedClsHnd, *pEmbedClsHnd; embedClsHnd = (void*)info.compCompHnd->embedClassHandle(clsHnd, &pEmbedClsHnd); assert((!embedClsHnd) != (!pEmbedClsHnd)); return gtNewIconEmbHndNode(embedClsHnd, pEmbedClsHnd, GTF_ICON_CLASS_HDL, clsHnd); } //----------------------------------------------------------------------------- inline GenTree* Compiler::gtNewIconEmbMethHndNode(CORINFO_METHOD_HANDLE methHnd) { void *embedMethHnd, *pEmbedMethHnd; embedMethHnd = (void*)info.compCompHnd->embedMethodHandle(methHnd, &pEmbedMethHnd); assert((!embedMethHnd) != (!pEmbedMethHnd)); return gtNewIconEmbHndNode(embedMethHnd, pEmbedMethHnd, GTF_ICON_METHOD_HDL, methHnd); } //----------------------------------------------------------------------------- inline GenTree* Compiler::gtNewIconEmbFldHndNode(CORINFO_FIELD_HANDLE fldHnd) { void *embedFldHnd, *pEmbedFldHnd; embedFldHnd = (void*)info.compCompHnd->embedFieldHandle(fldHnd, &pEmbedFldHnd); assert((!embedFldHnd) != (!pEmbedFldHnd)); return gtNewIconEmbHndNode(embedFldHnd, pEmbedFldHnd, GTF_ICON_FIELD_HDL, fldHnd); } /*****************************************************************************/ //------------------------------------------------------------------------------ // gtNewHelperCallNode : Helper to create a call helper node. // // // Arguments: // helper - Call helper // type - Type of the node // args - Call args // // Return Value: // New CT_HELPER node inline GenTreeCall* Compiler::gtNewHelperCallNode(unsigned helper, var_types type, GenTreeCall::Use* args) { GenTreeFlags flags = s_helperCallProperties.NoThrow((CorInfoHelpFunc)helper) ? GTF_EMPTY : GTF_EXCEPT; GenTreeCall* result = gtNewCallNode(CT_HELPER, eeFindHelper(helper), type, args); result->gtFlags |= flags; #if DEBUG // Helper calls are never candidates. result->gtInlineObservation = InlineObservation::CALLSITE_IS_CALL_TO_HELPER; #endif return result; } //------------------------------------------------------------------------------ // gtNewRuntimeLookupHelperCallNode : Helper to create a runtime lookup call helper node. // // // Arguments: // helper - Call helper // type - Type of the node // args - Call args // // Return Value: // New CT_HELPER node inline GenTreeCall* Compiler::gtNewRuntimeLookupHelperCallNode(CORINFO_RUNTIME_LOOKUP* pRuntimeLookup, GenTree* ctxTree, void* compileTimeHandle) { GenTree* argNode = gtNewIconEmbHndNode(pRuntimeLookup->signature, nullptr, GTF_ICON_GLOBAL_PTR, compileTimeHandle); GenTreeCall::Use* helperArgs = gtNewCallArgs(ctxTree, argNode); return gtNewHelperCallNode(pRuntimeLookup->helper, TYP_I_IMPL, helperArgs); } //------------------------------------------------------------------------ // gtNewAllocObjNode: A little helper to create an object allocation node. // // Arguments: // helper - Value returned by ICorJitInfo::getNewHelper // helperHasSideEffects - True iff allocation helper has side effects // clsHnd - Corresponding class handle // type - Tree return type (e.g. TYP_REF) // op1 - Node containing an address of VtablePtr // // Return Value: // Returns GT_ALLOCOBJ node that will be later morphed into an // allocation helper call or local variable allocation on the stack. inline GenTreeAllocObj* Compiler::gtNewAllocObjNode( unsigned int helper, bool helperHasSideEffects, CORINFO_CLASS_HANDLE clsHnd, var_types type, GenTree* op1) { GenTreeAllocObj* node = new (this, GT_ALLOCOBJ) GenTreeAllocObj(type, helper, helperHasSideEffects, clsHnd, op1); return node; } //------------------------------------------------------------------------ // gtNewRuntimeLookup: Helper to create a runtime lookup node // // Arguments: // hnd - generic handle being looked up // hndTyp - type of the generic handle // tree - tree for the lookup // // Return Value: // New GenTreeRuntimeLookup node. inline GenTree* Compiler::gtNewRuntimeLookup(CORINFO_GENERIC_HANDLE hnd, CorInfoGenericHandleType hndTyp, GenTree* tree) { assert(tree != nullptr); GenTree* node = new (this, GT_RUNTIMELOOKUP) GenTreeRuntimeLookup(hnd, hndTyp, tree); return node; } //------------------------------------------------------------------------ // gtNewFieldRef: a helper for creating GT_FIELD nodes. // // Normalizes struct types (for SIMD vectors). Sets GTF_GLOB_REF for fields // that may be pointing into globally visible memory. // // Arguments: // type - type for the field node // fldHnd - the field handle // obj - the instance, an address // offset - the field offset // // Return Value: // The created node. // inline GenTreeField* Compiler::gtNewFieldRef(var_types type, CORINFO_FIELD_HANDLE fldHnd, GenTree* obj, DWORD offset) { // GT_FIELD nodes are transformed into GT_IND nodes. assert(GenTree::s_gtNodeSizes[GT_IND] <= GenTree::s_gtNodeSizes[GT_FIELD]); if (type == TYP_STRUCT) { CORINFO_CLASS_HANDLE structHnd; eeGetFieldType(fldHnd, &structHnd); type = impNormStructType(structHnd); } GenTreeField* fieldNode = new (this, GT_FIELD) GenTreeField(type, obj, fldHnd, offset); // If "obj" is the address of a local, note that a field of that struct local has been accessed. if ((obj != nullptr) && obj->OperIs(GT_ADDR) && varTypeIsStruct(obj->AsUnOp()->gtOp1) && obj->AsUnOp()->gtOp1->OperIs(GT_LCL_VAR)) { LclVarDsc* varDsc = lvaGetDesc(obj->AsUnOp()->gtOp1->AsLclVarCommon()); varDsc->lvFieldAccessed = 1; #if defined(TARGET_AMD64) || defined(TARGET_ARM64) // These structs are passed by reference and can easily become global // references if those references are exposed. We clear out // address-exposure information for these parameters when they are // converted into references in fgRetypeImplicitByRefArgs() so we do // not have the necessary information in morph to know if these // indirections are actually global references, so we have to be // conservative here. if (varDsc->lvIsParam) { fieldNode->gtFlags |= GTF_GLOB_REF; } #endif // defined(TARGET_AMD64) || defined(TARGET_ARM64) } else { fieldNode->gtFlags |= GTF_GLOB_REF; } return fieldNode; } /***************************************************************************** * * A little helper to create an array index node. */ inline GenTree* Compiler::gtNewIndexRef(var_types typ, GenTree* arrayOp, GenTree* indexOp) { GenTreeIndex* gtIndx = new (this, GT_INDEX) GenTreeIndex(typ, arrayOp, indexOp, genTypeSize(typ)); return gtIndx; } //------------------------------------------------------------------------------ // gtNewArrLen : Helper to create an array length node. // // // Arguments: // typ - Type of the node // arrayOp - Array node // lenOffset - Offset of the length field // block - Basic block that will contain the result // // Return Value: // New GT_ARR_LENGTH node inline GenTreeArrLen* Compiler::gtNewArrLen(var_types typ, GenTree* arrayOp, int lenOffset, BasicBlock* block) { GenTreeArrLen* arrLen = new (this, GT_ARR_LENGTH) GenTreeArrLen(typ, arrayOp, lenOffset); static_assert_no_msg(GTF_ARRLEN_NONFAULTING == GTF_IND_NONFAULTING); arrLen->SetIndirExceptionFlags(this); if (block != nullptr) { block->bbFlags |= BBF_HAS_IDX_LEN; } optMethodFlags |= OMF_HAS_ARRAYREF; return arrLen; } //------------------------------------------------------------------------------ // gtNewIndir : Helper to create an indirection node. // // Arguments: // typ - Type of the node // addr - Address of the indirection // // Return Value: // New GT_IND node inline GenTreeIndir* Compiler::gtNewIndir(var_types typ, GenTree* addr) { GenTree* indir = gtNewOperNode(GT_IND, typ, addr); indir->SetIndirExceptionFlags(this); return indir->AsIndir(); } //------------------------------------------------------------------------------ // gtNewNullCheck : Helper to create a null check node. // // Arguments: // addr - Address to null check // basicBlock - Basic block of the node // // Return Value: // New GT_NULLCHECK node inline GenTree* Compiler::gtNewNullCheck(GenTree* addr, BasicBlock* basicBlock) { assert(fgAddrCouldBeNull(addr)); GenTree* nullCheck = gtNewOperNode(GT_NULLCHECK, TYP_BYTE, addr); nullCheck->gtFlags |= GTF_EXCEPT; basicBlock->bbFlags |= BBF_HAS_NULLCHECK; optMethodFlags |= OMF_HAS_NULLCHECK; return nullCheck; } /***************************************************************************** * * Create (and check for) a "nothing" node, i.e. a node that doesn't produce * any code. We currently use a "nop" node of type void for this purpose. */ inline GenTree* Compiler::gtNewNothingNode() { return new (this, GT_NOP) GenTreeOp(GT_NOP, TYP_VOID); } /*****************************************************************************/ inline bool GenTree::IsNothingNode() const { return (gtOper == GT_NOP && gtType == TYP_VOID); } /***************************************************************************** * * Change the given node to a NOP - May be later changed to a GT_COMMA * *****************************************************************************/ inline void GenTree::gtBashToNOP() { ChangeOper(GT_NOP); gtType = TYP_VOID; AsOp()->gtOp1 = AsOp()->gtOp2 = nullptr; gtFlags &= ~(GTF_ALL_EFFECT | GTF_REVERSE_OPS); } // return new arg placeholder node. Does not do anything but has a type associated // with it so we can keep track of register arguments in lists associated w/ call nodes inline GenTree* Compiler::gtNewArgPlaceHolderNode(var_types type, CORINFO_CLASS_HANDLE clsHnd) { GenTree* node = new (this, GT_ARGPLACE) GenTreeArgPlace(type, clsHnd); return node; } /*****************************************************************************/ inline GenTree* Compiler::gtUnusedValNode(GenTree* expr) { return gtNewOperNode(GT_COMMA, TYP_VOID, expr, gtNewNothingNode()); } /***************************************************************************** * * A wrapper for gtSetEvalOrder and gtComputeFPlvls * Necessary because the FP levels may need to be re-computed if we reverse * operands */ inline void Compiler::gtSetStmtInfo(Statement* stmt) { GenTree* expr = stmt->GetRootNode(); /* Recursively process the expression */ gtSetEvalOrder(expr); } /*****************************************************************************/ inline void Compiler::fgUpdateConstTreeValueNumber(GenTree* tree) { if (vnStore != nullptr) { fgValueNumberTreeConst(tree); } } inline void GenTree::SetOper(genTreeOps oper, ValueNumberUpdate vnUpdate) { assert(((gtDebugFlags & GTF_DEBUG_NODE_SMALL) != 0) != ((gtDebugFlags & GTF_DEBUG_NODE_LARGE) != 0)); /* Make sure the node isn't too small for the new operator */ assert(GenTree::s_gtNodeSizes[gtOper] == TREE_NODE_SZ_SMALL || GenTree::s_gtNodeSizes[gtOper] == TREE_NODE_SZ_LARGE); assert(GenTree::s_gtNodeSizes[oper] == TREE_NODE_SZ_SMALL || GenTree::s_gtNodeSizes[oper] == TREE_NODE_SZ_LARGE); assert(GenTree::s_gtNodeSizes[oper] == TREE_NODE_SZ_SMALL || (gtDebugFlags & GTF_DEBUG_NODE_LARGE)); #if defined(HOST_64BIT) && !defined(TARGET_64BIT) if (gtOper == GT_CNS_LNG && oper == GT_CNS_INT) { // When casting from LONG to INT, we need to force cast of the value, // if the host architecture represents INT and LONG with the same data size. AsLngCon()->gtLconVal = (INT64)(INT32)AsLngCon()->gtLconVal; } #endif // defined(HOST_64BIT) && !defined(TARGET_64BIT) SetOperRaw(oper); #ifdef DEBUG // Maintain the invariant that unary operators always have NULL gtOp2. // If we ever start explicitly allocating GenTreeUnOp nodes, we wouldn't be // able to do that (but if we did, we'd have to have a check in GetOp() -- perhaps // a gtUnOp...) if (OperKind(oper) == GTK_UNOP) { AsOp()->gtOp2 = nullptr; } #endif // DEBUG #if DEBUGGABLE_GENTREE // Until we eliminate SetOper/ChangeOper, we also change the vtable of the node, so that // it shows up correctly in the debugger. SetVtableForOper(oper); #endif // DEBUGGABLE_GENTREE if (vnUpdate == CLEAR_VN) { // Clear the ValueNum field as well. gtVNPair.SetBoth(ValueNumStore::NoVN); } // Do "oper"-specific initializations. TODO-Cleanup: these are too ad-hoc to be reliable. // The bashing code should decide itself what to initialize and what to leave as it was. switch (oper) { case GT_CNS_INT: AsIntCon()->gtFieldSeq = FieldSeqStore::NotAField(); break; #if defined(TARGET_ARM) case GT_MUL_LONG: // We sometimes bash GT_MUL to GT_MUL_LONG, which converts it from GenTreeOp to GenTreeMultiRegOp. AsMultiRegOp()->gtOtherReg = REG_NA; AsMultiRegOp()->ClearOtherRegFlags(); break; #endif case GT_LCL_FLD: AsLclFld()->SetLclOffs(0); AsLclFld()->SetFieldSeq(FieldSeqStore::NotAField()); break; default: break; } } inline GenTree* Compiler::gtNewKeepAliveNode(GenTree* op) { GenTree* keepalive = gtNewOperNode(GT_KEEPALIVE, TYP_VOID, op); // Prevent both reordering and removal. Invalid optimizations of GC.KeepAlive are // very subtle and hard to observe. Thus we are conservatively marking it with both // GTF_CALL and GTF_GLOB_REF side-effects even though it may be more than strictly // necessary. The conservative side-effects are unlikely to have negative impact // on code quality in this case. keepalive->gtFlags |= (GTF_CALL | GTF_GLOB_REF); return keepalive; } inline GenTreeCast* Compiler::gtNewCastNode(var_types typ, GenTree* op1, bool fromUnsigned, var_types castType) { GenTreeCast* cast = new (this, GT_CAST) GenTreeCast(typ, op1, fromUnsigned, castType); return cast; } inline GenTreeCast* Compiler::gtNewCastNodeL(var_types typ, GenTree* op1, bool fromUnsigned, var_types castType) { /* Some casts get transformed into 'GT_CALL' or 'GT_IND' nodes */ assert(GenTree::s_gtNodeSizes[GT_CALL] >= GenTree::s_gtNodeSizes[GT_CAST]); assert(GenTree::s_gtNodeSizes[GT_CALL] >= GenTree::s_gtNodeSizes[GT_IND]); /* Make a big node first and then change it to be GT_CAST */ GenTreeCast* cast = new (this, LargeOpOpcode()) GenTreeCast(typ, op1, fromUnsigned, castType DEBUGARG(/*largeNode*/ true)); return cast; } inline GenTreeIndir* Compiler::gtNewMethodTableLookup(GenTree* object) { GenTreeIndir* result = gtNewIndir(TYP_I_IMPL, object); result->gtFlags |= GTF_IND_INVARIANT; return result; } /*****************************************************************************/ /*****************************************************************************/ inline void GenTree::SetOperRaw(genTreeOps oper) { // Please do not do anything here other than assign to gtOper (debug-only // code is OK, but should be kept to a minimum). RecordOperBashing(OperGet(), oper); // nop unless NODEBASH_STATS is enabled // Bashing to MultiOp nodes is not currently supported. assert(!OperIsMultiOp(oper)); gtOper = oper; } inline void GenTree::SetOperResetFlags(genTreeOps oper) { SetOper(oper); gtFlags &= GTF_NODE_MASK; } inline void GenTree::ChangeOper(genTreeOps oper, ValueNumberUpdate vnUpdate) { assert(!OperIsConst(oper)); // use BashToConst() instead GenTreeFlags mask = GTF_COMMON_MASK; if (this->OperIsIndirOrArrLength() && OperIsIndirOrArrLength(oper)) { mask |= GTF_IND_NONFAULTING; } SetOper(oper, vnUpdate); gtFlags &= mask; } inline void GenTree::ChangeOperUnchecked(genTreeOps oper) { GenTreeFlags mask = GTF_COMMON_MASK; if (this->OperIsIndirOrArrLength() && OperIsIndirOrArrLength(oper)) { mask |= GTF_IND_NONFAULTING; } SetOperRaw(oper); // Trust the caller and don't use SetOper() gtFlags &= mask; } //------------------------------------------------------------------------ // BashToConst: Bash the node to a constant one. // // The function will infer the node's new oper from the type: GT_CNS_INT // or GT_CNS_LNG for integers and GC types, GT_CNS_DBL for floats/doubles. // // The type is inferred from "value"'s type ("T") unless an explicit // one is provided via the second argument, in which case it is checked // for compatibility with "value". So, e. g., "BashToConst(0)" will bash // to GT_CNS_INT, type TYP_INT, "BashToConst(0, TYP_REF)" will bash to the // canonical "null" node, but "BashToConst(0.0, TYP_INT)" will assert. // // Arguments: // value - Value which the bashed constant will have // type - Type the bashed node will have // template <typename T> void GenTree::BashToConst(T value, var_types type /* = TYP_UNDEF */) { static_assert_no_msg((std::is_same<T, int32_t>::value || std::is_same<T, int64_t>::value || std::is_same<T, long long>::value || std::is_same<T, float>::value || std::is_same<T, double>::value)); static_assert_no_msg(sizeof(int64_t) == sizeof(long long)); var_types typeOfValue = TYP_UNDEF; if (std::is_floating_point<T>::value) { assert((type == TYP_UNDEF) || varTypeIsFloating(type)); typeOfValue = std::is_same<T, float>::value ? TYP_FLOAT : TYP_DOUBLE; } else { assert((type == TYP_UNDEF) || varTypeIsIntegral(type) || varTypeIsGC(type)); typeOfValue = std::is_same<T, int32_t>::value ? TYP_INT : TYP_LONG; } if (type == TYP_UNDEF) { type = typeOfValue; } assert(type == genActualType(type)); genTreeOps oper = GT_NONE; if (varTypeIsFloating(type)) { oper = GT_CNS_DBL; } else { oper = (type == TYP_LONG) ? GT_CNS_NATIVELONG : GT_CNS_INT; } SetOperResetFlags(oper); gtType = type; switch (oper) { case GT_CNS_INT: #if !defined(TARGET_64BIT) assert(type != TYP_LONG); #endif assert(varTypeIsIntegral(type) || varTypeIsGC(type)); if (genTypeSize(type) <= genTypeSize(TYP_INT)) { assert(FitsIn<int32_t>(value)); } AsIntCon()->SetIconValue(static_cast<ssize_t>(value)); AsIntCon()->gtFieldSeq = FieldSeqStore::NotAField(); break; #if !defined(TARGET_64BIT) case GT_CNS_LNG: assert(type == TYP_LONG); AsLngCon()->SetLngValue(static_cast<int64_t>(value)); break; #endif case GT_CNS_DBL: assert(varTypeIsFloating(type)); AsDblCon()->gtDconVal = static_cast<double>(value); break; default: unreached(); } } //------------------------------------------------------------------------ // BashToZeroConst: Bash the node to a constant representing "zero" of "type". // // Arguments: // type - Type the bashed node will have, currently only integers, // GC types and floating point types are supported. // inline void GenTree::BashToZeroConst(var_types type) { if (varTypeIsFloating(type)) { BashToConst(0.0, type); } else { assert(varTypeIsIntegral(type) || varTypeIsGC(type)); // "genActualType" so that we do not create CNS_INT(small type). BashToConst(0, genActualType(type)); } } /***************************************************************************** * * Returns true if the node is of the "ovf" variety, for example, add.ovf.i1. * + gtOverflow() can only be called for valid operators (that is, we know it is one * of the operators which may have GTF_OVERFLOW set). * + gtOverflowEx() is more expensive, and should be called only if gtOper may be * an operator for which GTF_OVERFLOW is invalid. */ inline bool GenTree::gtOverflow() const { assert(OperMayOverflow()); if ((gtFlags & GTF_OVERFLOW) != 0) { assert(varTypeIsIntegral(TypeGet())); return true; } else { return false; } } inline bool GenTree::gtOverflowEx() const { return OperMayOverflow() && gtOverflow(); } /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX LclVarsInfo XX XX Inline functions XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ inline bool Compiler::lvaHaveManyLocals() const { return (lvaCount >= (unsigned)JitConfig.JitMaxLocalsToTrack()); } /***************************************************************************** * * Allocate a temporary variable or a set of temp variables. */ inline unsigned Compiler::lvaGrabTemp(bool shortLifetime DEBUGARG(const char* reason)) { if (compIsForInlining()) { // Grab the temp using Inliner's Compiler instance. Compiler* pComp = impInlineInfo->InlinerCompiler; // The Compiler instance for the caller (i.e. the inliner) if (pComp->lvaHaveManyLocals()) { // Don't create more LclVar with inlining compInlineResult->NoteFatal(InlineObservation::CALLSITE_TOO_MANY_LOCALS); } unsigned tmpNum = pComp->lvaGrabTemp(shortLifetime DEBUGARG(reason)); lvaTable = pComp->lvaTable; lvaCount = pComp->lvaCount; lvaTableCnt = pComp->lvaTableCnt; return tmpNum; } // You cannot allocate more space after frame layout! noway_assert(lvaDoneFrameLayout < Compiler::TENTATIVE_FRAME_LAYOUT); /* Check if the lvaTable has to be grown */ if (lvaCount + 1 > lvaTableCnt) { unsigned newLvaTableCnt = lvaCount + (lvaCount / 2) + 1; // Check for overflow if (newLvaTableCnt <= lvaCount) { IMPL_LIMITATION("too many locals"); } LclVarDsc* newLvaTable = getAllocator(CMK_LvaTable).allocate<LclVarDsc>(newLvaTableCnt); memcpy(newLvaTable, lvaTable, lvaCount * sizeof(*lvaTable)); memset(newLvaTable + lvaCount, 0, (newLvaTableCnt - lvaCount) * sizeof(*lvaTable)); for (unsigned i = lvaCount; i < newLvaTableCnt; i++) { new (&newLvaTable[i], jitstd::placement_t()) LclVarDsc(); // call the constructor. } #ifdef DEBUG // Fill the old table with junks. So to detect the un-intended use. memset(lvaTable, JitConfig.JitDefaultFill(), lvaCount * sizeof(*lvaTable)); #endif lvaTableCnt = newLvaTableCnt; lvaTable = newLvaTable; } const unsigned tempNum = lvaCount; lvaCount++; // Initialize lvType, lvIsTemp and lvOnFrame lvaTable[tempNum].lvType = TYP_UNDEF; lvaTable[tempNum].lvIsTemp = shortLifetime; lvaTable[tempNum].lvOnFrame = true; // If we've started normal ref counting, bump the ref count of this // local, as we no longer do any incremental counting, and we presume // this new local will be referenced. if (lvaLocalVarRefCounted()) { if (opts.OptimizationDisabled()) { lvaTable[tempNum].lvImplicitlyReferenced = 1; } else { lvaTable[tempNum].setLvRefCnt(1); lvaTable[tempNum].setLvRefCntWtd(BB_UNITY_WEIGHT); } } #ifdef DEBUG lvaTable[tempNum].lvReason = reason; if (verbose) { printf("\nlvaGrabTemp returning %d (", tempNum); gtDispLclVar(tempNum, false); printf(")%s called for %s.\n", shortLifetime ? "" : " (a long lifetime temp)", reason); } #endif // DEBUG return tempNum; } inline unsigned Compiler::lvaGrabTemps(unsigned cnt DEBUGARG(const char* reason)) { if (compIsForInlining()) { // Grab the temps using Inliner's Compiler instance. unsigned tmpNum = impInlineInfo->InlinerCompiler->lvaGrabTemps(cnt DEBUGARG(reason)); lvaTable = impInlineInfo->InlinerCompiler->lvaTable; lvaCount = impInlineInfo->InlinerCompiler->lvaCount; lvaTableCnt = impInlineInfo->InlinerCompiler->lvaTableCnt; return tmpNum; } #ifdef DEBUG if (verbose) { printf("\nlvaGrabTemps(%d) returning %d..%d (long lifetime temps) called for %s", cnt, lvaCount, lvaCount + cnt - 1, reason); } #endif // Could handle this... assert(!lvaLocalVarRefCounted()); // You cannot allocate more space after frame layout! noway_assert(lvaDoneFrameLayout < Compiler::TENTATIVE_FRAME_LAYOUT); /* Check if the lvaTable has to be grown */ if (lvaCount + cnt > lvaTableCnt) { unsigned newLvaTableCnt = lvaCount + max(lvaCount / 2 + 1, cnt); // Check for overflow if (newLvaTableCnt <= lvaCount) { IMPL_LIMITATION("too many locals"); } LclVarDsc* newLvaTable = getAllocator(CMK_LvaTable).allocate<LclVarDsc>(newLvaTableCnt); memcpy(newLvaTable, lvaTable, lvaCount * sizeof(*lvaTable)); memset(newLvaTable + lvaCount, 0, (newLvaTableCnt - lvaCount) * sizeof(*lvaTable)); for (unsigned i = lvaCount; i < newLvaTableCnt; i++) { new (&newLvaTable[i], jitstd::placement_t()) LclVarDsc(); // call the constructor. } #ifdef DEBUG // Fill the old table with junks. So to detect the un-intended use. memset(lvaTable, JitConfig.JitDefaultFill(), lvaCount * sizeof(*lvaTable)); #endif lvaTableCnt = newLvaTableCnt; lvaTable = newLvaTable; } unsigned tempNum = lvaCount; while (cnt--) { lvaTable[lvaCount].lvType = TYP_UNDEF; // Initialize lvType, lvIsTemp and lvOnFrame lvaTable[lvaCount].lvIsTemp = false; lvaTable[lvaCount].lvOnFrame = true; lvaCount++; } return tempNum; } /***************************************************************************** * * Allocate a temporary variable which is implicitly used by code-gen * There will be no explicit references to the temp, and so it needs to * be forced to be kept alive, and not be optimized away. */ inline unsigned Compiler::lvaGrabTempWithImplicitUse(bool shortLifetime DEBUGARG(const char* reason)) { if (compIsForInlining()) { // Grab the temp using Inliner's Compiler instance. unsigned tmpNum = impInlineInfo->InlinerCompiler->lvaGrabTempWithImplicitUse(shortLifetime DEBUGARG(reason)); lvaTable = impInlineInfo->InlinerCompiler->lvaTable; lvaCount = impInlineInfo->InlinerCompiler->lvaCount; lvaTableCnt = impInlineInfo->InlinerCompiler->lvaTableCnt; return tmpNum; } unsigned lclNum = lvaGrabTemp(shortLifetime DEBUGARG(reason)); LclVarDsc* varDsc = lvaGetDesc(lclNum); // Note the implicit use varDsc->lvImplicitlyReferenced = 1; return lclNum; } /***************************************************************************** * * Increment the ref counts for a local variable */ inline void LclVarDsc::incRefCnts(weight_t weight, Compiler* comp, RefCountState state, bool propagate) { // In minopts and debug codegen, we don't maintain normal ref counts. if ((state == RCS_NORMAL) && !comp->PreciseRefCountsRequired()) { // Note, at least, that there is at least one reference. lvImplicitlyReferenced = 1; return; } Compiler::lvaPromotionType promotionType = DUMMY_INIT(Compiler::PROMOTION_TYPE_NONE); if (varTypeIsStruct(lvType)) { promotionType = comp->lvaGetPromotionType(this); } // // Increment counts on the local itself. // if ((lvType != TYP_STRUCT) || (promotionType != Compiler::PROMOTION_TYPE_INDEPENDENT)) { // We increment ref counts of this local for primitive types, including structs that have been retyped as their // only field, as well as for structs whose fields are not independently promoted. // // Increment lvRefCnt // int newRefCnt = lvRefCnt(state) + 1; if (newRefCnt == (unsigned short)newRefCnt) // lvRefCnt is an "unsigned short". Don't overflow it. { setLvRefCnt((unsigned short)newRefCnt, state); } // // Increment lvRefCntWtd // if (weight != 0) { // We double the weight of internal temps bool doubleWeight = lvIsTemp; #if defined(TARGET_AMD64) || defined(TARGET_ARM64) // and, for the time being, implicit byref params doubleWeight |= lvIsImplicitByRef; #endif // defined(TARGET_AMD64) || defined(TARGET_ARM64) if (doubleWeight && (weight * 2 > weight)) { weight *= 2; } weight_t newWeight = lvRefCntWtd(state) + weight; assert(newWeight >= lvRefCntWtd(state)); setLvRefCntWtd(newWeight, state); } } if (varTypeIsStruct(lvType) && propagate) { // For promoted struct locals, increment lvRefCnt on its field locals as well. if (promotionType == Compiler::PROMOTION_TYPE_INDEPENDENT || promotionType == Compiler::PROMOTION_TYPE_DEPENDENT) { for (unsigned i = lvFieldLclStart; i < lvFieldLclStart + lvFieldCnt; ++i) { comp->lvaTable[i].incRefCnts(weight, comp, state, false); // Don't propagate } } } if (lvIsStructField && propagate) { // Depending on the promotion type, increment the ref count for the parent struct as well. promotionType = comp->lvaGetParentPromotionType(this); LclVarDsc* parentvarDsc = comp->lvaGetDesc(lvParentLcl); assert(!parentvarDsc->lvRegStruct); if (promotionType == Compiler::PROMOTION_TYPE_DEPENDENT) { parentvarDsc->incRefCnts(weight, comp, state, false); // Don't propagate } } #ifdef DEBUG if (comp->verbose) { printf("New refCnts for V%02u: refCnt = %2u, refCntWtd = %s\n", comp->lvaGetLclNum(this), lvRefCnt(state), refCntWtd2str(lvRefCntWtd(state))); } #endif } /***************************************************************************** * * The following returns the mask of all tracked locals * referenced in a statement. */ inline VARSET_VALRET_TP Compiler::lvaStmtLclMask(Statement* stmt) { VARSET_TP lclMask(VarSetOps::MakeEmpty(this)); assert(fgStmtListThreaded); for (GenTree* const tree : stmt->TreeList()) { if (tree->gtOper != GT_LCL_VAR) { continue; } const LclVarDsc* varDsc = lvaGetDesc(tree->AsLclVarCommon()); if (!varDsc->lvTracked) { continue; } VarSetOps::UnionD(this, lclMask, VarSetOps::MakeSingleton(this, varDsc->lvVarIndex)); } return lclMask; } /***************************************************************************** Is this a synchronized instance method? If so, we will need to report "this" in the GC information, so that the EE can release the object lock in case of an exception We also need to report "this" and keep it alive for all shared generic code that gets the actual generic context from the "this" pointer and has exception handlers. For example, if List<T>::m() is shared between T = object and T = string, then inside m() an exception handler "catch E<T>" needs to be able to fetch the 'this' pointer to find out what 'T' is in order to tell if we should catch the exception or not. */ inline bool Compiler::lvaKeepAliveAndReportThis() { if (info.compIsStatic || lvaTable[0].TypeGet() != TYP_REF) { return false; } const bool genericsContextIsThis = (info.compMethodInfo->options & CORINFO_GENERICS_CTXT_FROM_THIS) != 0; #ifdef JIT32_GCENCODER if (info.compFlags & CORINFO_FLG_SYNCH) return true; if (genericsContextIsThis) { // TODO: Check if any of the exception clauses are // typed using a generic type. Else, we do not need to report this. if (info.compXcptnsCount > 0) return true; if (opts.compDbgCode) return true; if (lvaGenericsContextInUse) { JITDUMP("Reporting this as generic context\n"); return true; } } #else // !JIT32_GCENCODER // If the generics context is the this pointer we need to report it if either // the VM requires us to keep the generics context alive or it is used in a look-up. // We keep it alive in the lookup scenario, even when the VM didn't ask us to, // because collectible types need the generics context when gc-ing. // // Methoods that can inspire OSR methods must always report context as live // if (genericsContextIsThis) { const bool mustKeep = (info.compMethodInfo->options & CORINFO_GENERICS_CTXT_KEEP_ALIVE) != 0; const bool hasPatchpoint = doesMethodHavePatchpoints() || doesMethodHavePartialCompilationPatchpoints(); if (lvaGenericsContextInUse || mustKeep || hasPatchpoint) { JITDUMP("Reporting this as generic context: %s\n", mustKeep ? "must keep" : (hasPatchpoint ? "patchpoints" : "referenced")); return true; } } #endif return false; } /***************************************************************************** Similar to lvaKeepAliveAndReportThis */ inline bool Compiler::lvaReportParamTypeArg() { if (info.compMethodInfo->options & (CORINFO_GENERICS_CTXT_FROM_METHODDESC | CORINFO_GENERICS_CTXT_FROM_METHODTABLE)) { assert(info.compTypeCtxtArg != -1); // If the VM requires us to keep the generics context alive and report it (for example, if any catch // clause catches a type that uses a generic parameter of this method) this flag will be set. if (info.compMethodInfo->options & CORINFO_GENERICS_CTXT_KEEP_ALIVE) { return true; } // Otherwise, if an exact type parameter is needed in the body, report the generics context. // We do this because collectible types needs the generics context when gc-ing. if (lvaGenericsContextInUse) { return true; } // Methoods that have patchpoints always report context as live // if (doesMethodHavePatchpoints() || doesMethodHavePartialCompilationPatchpoints()) { return true; } } // Otherwise, we don't need to report it -- the generics context parameter is unused. return false; } //***************************************************************************** inline int Compiler::lvaCachedGenericContextArgOffset() { assert(lvaDoneFrameLayout == FINAL_FRAME_LAYOUT); return lvaCachedGenericContextArgOffs; } //------------------------------------------------------------------------ // lvaFrameAddress: Determine the stack frame offset of the given variable, // and how to generate an address to that stack frame. // // Arguments: // varNum - The variable to inquire about. Positive for user variables // or arguments, negative for spill-temporaries. // mustBeFPBased - [TARGET_ARM only] True if the base register must be FP. // After FINAL_FRAME_LAYOUT, if false, it also requires SP base register. // pBaseReg - [TARGET_ARM only] Out arg. *pBaseReg is set to the base // register to use. // addrModeOffset - [TARGET_ARM only] The mode offset within the variable that we need to address. // For example, for a large struct local, and a struct field reference, this will be the offset // of the field. Thus, for V02 + 0x28, if V02 itself is at offset SP + 0x10 // then addrModeOffset is what gets added beyond that, here 0x28. // isFloatUsage - [TARGET_ARM only] True if the instruction being generated is a floating // point instruction. This requires using floating-point offset restrictions. // Note that a variable can be non-float, e.g., struct, but accessed as a // float local field. // pFPbased - [non-TARGET_ARM] Out arg. Set *FPbased to true if the // variable is addressed off of FP, false if it's addressed // off of SP. // // Return Value: // Returns the variable offset from the given base register. // inline #ifdef TARGET_ARM int Compiler::lvaFrameAddress( int varNum, bool mustBeFPBased, regNumber* pBaseReg, int addrModeOffset, bool isFloatUsage) #else int Compiler::lvaFrameAddress(int varNum, bool* pFPbased) #endif { assert(lvaDoneFrameLayout != NO_FRAME_LAYOUT); int varOffset; bool FPbased; bool fConservative = false; if (varNum >= 0) { LclVarDsc* varDsc = lvaGetDesc(varNum); bool isPrespilledArg = false; #if defined(TARGET_ARM) && defined(PROFILING_SUPPORTED) isPrespilledArg = varDsc->lvIsParam && compIsProfilerHookNeeded() && lvaIsPreSpilled(varNum, codeGen->regSet.rsMaskPreSpillRegs(false)); #endif // If we have finished with register allocation, and this isn't a stack-based local, // check that this has a valid stack location. if (lvaDoneFrameLayout > REGALLOC_FRAME_LAYOUT && !varDsc->lvOnFrame) { #ifdef TARGET_AMD64 #ifndef UNIX_AMD64_ABI // On amd64, every param has a stack location, except on Unix-like systems. assert(varDsc->lvIsParam); #endif // UNIX_AMD64_ABI #else // !TARGET_AMD64 // For other targets, a stack parameter that is enregistered or prespilled // for profiling on ARM will have a stack location. assert((varDsc->lvIsParam && !varDsc->lvIsRegArg) || isPrespilledArg); #endif // !TARGET_AMD64 } FPbased = varDsc->lvFramePointerBased; #ifdef DEBUG #if FEATURE_FIXED_OUT_ARGS if ((unsigned)varNum == lvaOutgoingArgSpaceVar) { assert(FPbased == false); } else #endif { #if DOUBLE_ALIGN assert(FPbased == (isFramePointerUsed() || (genDoubleAlign() && varDsc->lvIsParam && !varDsc->lvIsRegArg))); #else #ifdef TARGET_X86 assert(FPbased == isFramePointerUsed()); #endif #endif } #endif // DEBUG varOffset = varDsc->GetStackOffset(); } else // Its a spill-temp { FPbased = isFramePointerUsed(); if (lvaDoneFrameLayout == Compiler::FINAL_FRAME_LAYOUT) { TempDsc* tmpDsc = codeGen->regSet.tmpFindNum(varNum); // The temp might be in use, since this might be during code generation. if (tmpDsc == nullptr) { tmpDsc = codeGen->regSet.tmpFindNum(varNum, RegSet::TEMP_USAGE_USED); } assert(tmpDsc != nullptr); varOffset = tmpDsc->tdTempOffs(); } else { // This value is an estimate until we calculate the // offset after the final frame layout // --------------------------------------------------- // : : // +-------------------------+ base --+ // | LR, ++N for ARM | | frameBaseOffset (= N) // +-------------------------+ | // | R11, ++N for ARM | <---FP | // +-------------------------+ --+ // | compCalleeRegsPushed - N| | lclFrameOffset // +-------------------------+ --+ // | lclVars | | // +-------------------------+ | // | tmp[MAX_SPILL_TEMP] | | // | tmp[1] | | // | tmp[0] | | compLclFrameSize // +-------------------------+ | // | outgoingArgSpaceSize | | // +-------------------------+ --+ // | | <---SP // : : // --------------------------------------------------- fConservative = true; if (!FPbased) { // Worst case stack based offset. CLANG_FORMAT_COMMENT_ANCHOR; #if FEATURE_FIXED_OUT_ARGS int outGoingArgSpaceSize = lvaOutgoingArgSpaceSize; #else int outGoingArgSpaceSize = 0; #endif varOffset = outGoingArgSpaceSize + max(-varNum * TARGET_POINTER_SIZE, (int)lvaGetMaxSpillTempSize()); } else { // Worst case FP based offset. CLANG_FORMAT_COMMENT_ANCHOR; #ifdef TARGET_ARM varOffset = codeGen->genCallerSPtoInitialSPdelta() - codeGen->genCallerSPtoFPdelta(); #else varOffset = -(codeGen->genTotalFrameSize()); #endif } } } #ifdef TARGET_ARM if (FPbased) { if (mustBeFPBased) { *pBaseReg = REG_FPBASE; } // Change the Frame Pointer (R11)-based addressing to the SP-based addressing when possible because // it generates smaller code on ARM. See frame picture above for the math. else { // If it is the final frame layout phase, we don't have a choice, we should stick // to either FP based or SP based that we decided in the earlier phase. Because // we have already selected the instruction. MinOpts will always reserve R10, so // for MinOpts always use SP-based offsets, using R10 as necessary, for simplicity. int spVarOffset = fConservative ? compLclFrameSize : varOffset + codeGen->genSPtoFPdelta(); int actualSPOffset = spVarOffset + addrModeOffset; int actualFPOffset = varOffset + addrModeOffset; int encodingLimitUpper = isFloatUsage ? 0x3FC : 0xFFF; int encodingLimitLower = isFloatUsage ? -0x3FC : -0xFF; // Use SP-based encoding. During encoding, we'll pick the best encoding for the actual offset we have. if (opts.MinOpts() || (actualSPOffset <= encodingLimitUpper)) { varOffset = spVarOffset; *pBaseReg = compLocallocUsed ? REG_SAVED_LOCALLOC_SP : REG_SPBASE; } // Use Frame Pointer (R11)-based encoding. else if ((encodingLimitLower <= actualFPOffset) && (actualFPOffset <= encodingLimitUpper)) { *pBaseReg = REG_FPBASE; } // Otherwise, use SP-based encoding. This is either (1) a small positive offset using a single movw, // (2) a large offset using movw/movt. In either case, we must have already reserved // the "reserved register", which will get used during encoding. else { varOffset = spVarOffset; *pBaseReg = compLocallocUsed ? REG_SAVED_LOCALLOC_SP : REG_SPBASE; } } } else { *pBaseReg = REG_SPBASE; } #else *pFPbased = FPbased; #endif return varOffset; } inline bool Compiler::lvaIsParameter(unsigned varNum) { const LclVarDsc* varDsc = lvaGetDesc(varNum); return varDsc->lvIsParam; } inline bool Compiler::lvaIsRegArgument(unsigned varNum) { LclVarDsc* varDsc = lvaGetDesc(varNum); return varDsc->lvIsRegArg; } inline bool Compiler::lvaIsOriginalThisArg(unsigned varNum) { assert(varNum < lvaCount); bool isOriginalThisArg = (varNum == info.compThisArg) && (info.compIsStatic == false); #ifdef DEBUG if (isOriginalThisArg) { LclVarDsc* varDsc = lvaGetDesc(varNum); // Should never write to or take the address of the original 'this' arg CLANG_FORMAT_COMMENT_ANCHOR; #ifndef JIT32_GCENCODER // With the general encoder/decoder, when the original 'this' arg is needed as a generics context param, we // copy to a new local, and mark the original as DoNotEnregister, to // ensure that it is stack-allocated. It should not be the case that the original one can be modified -- it // should not be written to, or address-exposed. assert(!varDsc->lvHasILStoreOp && (!varDsc->IsAddressExposed() || ((info.compMethodInfo->options & CORINFO_GENERICS_CTXT_FROM_THIS) != 0))); #else assert(!varDsc->lvHasILStoreOp && !varDsc->IsAddressExposed()); #endif } #endif return isOriginalThisArg; } inline bool Compiler::lvaIsOriginalThisReadOnly() { return lvaArg0Var == info.compThisArg; } /***************************************************************************** * * The following is used to detect the cases where the same local variable# * is used both as a long/double value and a 32-bit value and/or both as an * integer/address and a float value. */ /* static */ inline unsigned Compiler::lvaTypeRefMask(var_types type) { const static BYTE lvaTypeRefMasks[] = { #define DEF_TP(tn, nm, jitType, verType, sz, sze, asze, st, al, tf, howUsed) howUsed, #include "typelist.h" #undef DEF_TP }; assert((unsigned)type < sizeof(lvaTypeRefMasks)); assert(lvaTypeRefMasks[type] != 0); return lvaTypeRefMasks[type]; } /***************************************************************************** * * The following is used to detect the cases where the same local variable# * is used both as a long/double value and a 32-bit value and/or both as an * integer/address and a float value. */ inline var_types Compiler::lvaGetActualType(unsigned lclNum) { return genActualType(lvaGetRealType(lclNum)); } inline var_types Compiler::lvaGetRealType(unsigned lclNum) { return lvaTable[lclNum].TypeGet(); } /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX Importer XX XX Inline functions XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ inline unsigned Compiler::compMapILargNum(unsigned ILargNum) { assert(ILargNum < info.compILargsCount); // Note that this works because if compRetBuffArg/compTypeCtxtArg/lvVarargsHandleArg are not present // they will be BAD_VAR_NUM (MAX_UINT), which is larger than any variable number. if (ILargNum >= info.compRetBuffArg) { ILargNum++; assert(ILargNum < info.compLocalsCount); // compLocals count already adjusted. } if (ILargNum >= (unsigned)info.compTypeCtxtArg) { ILargNum++; assert(ILargNum < info.compLocalsCount); // compLocals count already adjusted. } if (ILargNum >= (unsigned)lvaVarargsHandleArg) { ILargNum++; assert(ILargNum < info.compLocalsCount); // compLocals count already adjusted. } assert(ILargNum < info.compArgsCount); return (ILargNum); } //------------------------------------------------------------------------ // Compiler::mangleVarArgsType: Retype float types to their corresponding // : int/long types. // // Notes: // // The mangling of types will only occur for incoming vararg fixed arguments // on windows arm|64 or on armel (softFP). // // NO-OP for all other cases. // inline var_types Compiler::mangleVarArgsType(var_types type) { #if defined(TARGET_ARMARCH) if (opts.compUseSoftFP || (TargetOS::IsWindows && info.compIsVarArgs)) { switch (type) { case TYP_FLOAT: return TYP_INT; case TYP_DOUBLE: return TYP_LONG; default: break; } } #endif // defined(TARGET_ARMARCH) return type; } // For CORECLR there is no vararg on System V systems. inline regNumber Compiler::getCallArgIntRegister(regNumber floatReg) { assert(compFeatureVarArg()); #ifdef TARGET_AMD64 switch (floatReg) { case REG_XMM0: return REG_RCX; case REG_XMM1: return REG_RDX; case REG_XMM2: return REG_R8; case REG_XMM3: return REG_R9; default: unreached(); } #else // !TARGET_AMD64 // How will float args be passed for RyuJIT/x86? NYI("getCallArgIntRegister for RyuJIT/x86"); return REG_NA; #endif // !TARGET_AMD64 } inline regNumber Compiler::getCallArgFloatRegister(regNumber intReg) { assert(compFeatureVarArg()); #ifdef TARGET_AMD64 switch (intReg) { case REG_RCX: return REG_XMM0; case REG_RDX: return REG_XMM1; case REG_R8: return REG_XMM2; case REG_R9: return REG_XMM3; default: unreached(); } #else // !TARGET_AMD64 // How will float args be passed for RyuJIT/x86? NYI("getCallArgFloatRegister for RyuJIT/x86"); return REG_NA; #endif // !TARGET_AMD64 } /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX Register Allocator XX XX Inline functions XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ /*****************************************************************************/ inline bool rpCanAsgOperWithoutReg(GenTree* op, bool lclvar) { var_types type; switch (op->OperGet()) { case GT_CNS_LNG: case GT_CNS_INT: return true; case GT_LCL_VAR: type = genActualType(op->TypeGet()); if (lclvar && ((type == TYP_INT) || (type == TYP_REF) || (type == TYP_BYREF))) { return true; } break; default: break; } return false; } /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX FlowGraph XX XX Inline functions XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ inline bool Compiler::compCanEncodePtrArgCntMax() { #ifdef JIT32_GCENCODER // DDB 204533: // The GC encoding for fully interruptible methods does not // support more than 1023 pushed arguments, so we have to // use a partially interruptible GC info/encoding. // return (fgPtrArgCntMax < MAX_PTRARG_OFS); #else // JIT32_GCENCODER return true; #endif } /***************************************************************************** * * Call the given function pointer for all nodes in the tree. The 'visitor' * fn should return one of the following values: * * WALK_ABORT stop walking and return immediately * WALK_CONTINUE continue walking * WALK_SKIP_SUBTREES don't walk any subtrees of the node just visited * * computeStack - true if we want to make stack visible to callback function */ inline Compiler::fgWalkResult Compiler::fgWalkTreePre( GenTree** pTree, fgWalkPreFn* visitor, void* callBackData, bool lclVarsOnly, bool computeStack) { fgWalkData walkData; walkData.compiler = this; walkData.wtprVisitorFn = visitor; walkData.pCallbackData = callBackData; walkData.parent = nullptr; walkData.wtprLclsOnly = lclVarsOnly; #ifdef DEBUG walkData.printModified = false; #endif fgWalkResult result; if (lclVarsOnly && computeStack) { GenericTreeWalker<true, true, false, true, true> walker(&walkData); result = walker.WalkTree(pTree, nullptr); } else if (lclVarsOnly) { GenericTreeWalker<false, true, false, true, true> walker(&walkData); result = walker.WalkTree(pTree, nullptr); } else if (computeStack) { GenericTreeWalker<true, true, false, false, true> walker(&walkData); result = walker.WalkTree(pTree, nullptr); } else { GenericTreeWalker<false, true, false, false, true> walker(&walkData); result = walker.WalkTree(pTree, nullptr); } #ifdef DEBUG if (verbose && walkData.printModified) { gtDispTree(*pTree); } #endif return result; } /***************************************************************************** * * Same as above, except the tree walk is performed in a depth-first fashion, * The 'visitor' fn should return one of the following values: * * WALK_ABORT stop walking and return immediately * WALK_CONTINUE continue walking * * computeStack - true if we want to make stack visible to callback function */ inline Compiler::fgWalkResult Compiler::fgWalkTreePost(GenTree** pTree, fgWalkPostFn* visitor, void* callBackData, bool computeStack) { fgWalkData walkData; walkData.compiler = this; walkData.wtpoVisitorFn = visitor; walkData.pCallbackData = callBackData; walkData.parent = nullptr; fgWalkResult result; if (computeStack) { GenericTreeWalker<true, false, true, false, true> walker(&walkData); result = walker.WalkTree(pTree, nullptr); } else { GenericTreeWalker<false, false, true, false, true> walker(&walkData); result = walker.WalkTree(pTree, nullptr); } assert(result == WALK_CONTINUE || result == WALK_ABORT); return result; } /***************************************************************************** * * Call the given function pointer for all nodes in the tree. The 'visitor' * fn should return one of the following values: * * WALK_ABORT stop walking and return immediately * WALK_CONTINUE continue walking * WALK_SKIP_SUBTREES don't walk any subtrees of the node just visited */ inline Compiler::fgWalkResult Compiler::fgWalkTree(GenTree** pTree, fgWalkPreFn* preVisitor, fgWalkPreFn* postVisitor, void* callBackData) { fgWalkData walkData; walkData.compiler = this; walkData.wtprVisitorFn = preVisitor; walkData.wtpoVisitorFn = postVisitor; walkData.pCallbackData = callBackData; walkData.parent = nullptr; walkData.wtprLclsOnly = false; #ifdef DEBUG walkData.printModified = false; #endif fgWalkResult result; assert(preVisitor || postVisitor); if (preVisitor && postVisitor) { GenericTreeWalker<true, true, true, false, true> walker(&walkData); result = walker.WalkTree(pTree, nullptr); } else if (preVisitor) { GenericTreeWalker<true, true, false, false, true> walker(&walkData); result = walker.WalkTree(pTree, nullptr); } else { GenericTreeWalker<true, false, true, false, true> walker(&walkData); result = walker.WalkTree(pTree, nullptr); } #ifdef DEBUG if (verbose && walkData.printModified) { gtDispTree(*pTree); } #endif return result; } /***************************************************************************** * * Has this block been added to throw an inlined exception * Returns true if the block was added to throw one of: * range-check exception * argument exception (used by feature SIMD) * argument range-check exception (used by feature SIMD) * divide by zero exception (Not used on X86/X64) * overflow exception */ inline bool Compiler::fgIsThrowHlpBlk(BasicBlock* block) { if (!fgIsCodeAdded()) { return false; } if (!(block->bbFlags & BBF_INTERNAL) || block->bbJumpKind != BBJ_THROW) { return false; } if (!block->IsLIR() && (block->lastStmt() == nullptr)) { return false; } // Special check blocks will always end in a throw helper call. // GenTree* const call = block->lastNode(); if ((call == nullptr) || (call->gtOper != GT_CALL)) { return false; } if (!((call->AsCall()->gtCallMethHnd == eeFindHelper(CORINFO_HELP_RNGCHKFAIL)) || (call->AsCall()->gtCallMethHnd == eeFindHelper(CORINFO_HELP_THROWDIVZERO)) || (call->AsCall()->gtCallMethHnd == eeFindHelper(CORINFO_HELP_THROW_ARGUMENTEXCEPTION)) || (call->AsCall()->gtCallMethHnd == eeFindHelper(CORINFO_HELP_THROW_ARGUMENTOUTOFRANGEEXCEPTION)) || (call->AsCall()->gtCallMethHnd == eeFindHelper(CORINFO_HELP_OVERFLOW)))) { return false; } // We can get to this point for blocks that we didn't create as throw helper blocks // under stress, with implausible flow graph optimizations. So, walk the fgAddCodeList // for the final determination. for (AddCodeDsc* add = fgAddCodeList; add; add = add->acdNext) { if (block == add->acdDstBlk) { return add->acdKind == SCK_RNGCHK_FAIL || add->acdKind == SCK_DIV_BY_ZERO || add->acdKind == SCK_OVERFLOW || add->acdKind == SCK_ARG_EXCPN || add->acdKind == SCK_ARG_RNG_EXCPN; } } // We couldn't find it in the fgAddCodeList return false; } #if !FEATURE_FIXED_OUT_ARGS /***************************************************************************** * * Return the stackLevel of the inserted block that throws exception * (by calling the EE helper). */ inline unsigned Compiler::fgThrowHlpBlkStkLevel(BasicBlock* block) { for (AddCodeDsc* add = fgAddCodeList; add; add = add->acdNext) { if (block == add->acdDstBlk) { // Compute assert cond separately as assert macro cannot have conditional compilation directives. bool cond = (add->acdKind == SCK_RNGCHK_FAIL || add->acdKind == SCK_DIV_BY_ZERO || add->acdKind == SCK_OVERFLOW || add->acdKind == SCK_ARG_EXCPN || add->acdKind == SCK_ARG_RNG_EXCPN); assert(cond); // TODO: bbTgtStkDepth is DEBUG-only. // Should we use it regularly and avoid this search. assert(block->bbTgtStkDepth == add->acdStkLvl); return add->acdStkLvl; } } noway_assert(!"fgThrowHlpBlkStkLevel should only be called if fgIsThrowHlpBlk() is true, but we can't find the " "block in the fgAddCodeList list"); /* We couldn't find the basic block: it must not have been a throw helper block */ return 0; } #endif // !FEATURE_FIXED_OUT_ARGS /* Small inline function to change a given block to a throw block. */ inline void Compiler::fgConvertBBToThrowBB(BasicBlock* block) { JITDUMP("Converting " FMT_BB " to BBJ_THROW\n", block->bbNum); // Ordering of the following operations matters. // First, note if we are looking at the first block of a call always pair. const bool isCallAlwaysPair = block->isBBCallAlwaysPair(); // Scrub this block from the pred lists of any successors fgRemoveBlockAsPred(block); // Update jump kind after the scrub. block->bbJumpKind = BBJ_THROW; // Any block with a throw is rare block->bbSetRunRarely(); // If we've converted a BBJ_CALLFINALLY block to a BBJ_THROW block, // then mark the subsequent BBJ_ALWAYS block as unreferenced. // // Must do this after we update bbJumpKind of block. if (isCallAlwaysPair) { BasicBlock* leaveBlk = block->bbNext; noway_assert(leaveBlk->bbJumpKind == BBJ_ALWAYS); // leaveBlk is now unreachable, so scrub the pred lists. leaveBlk->bbFlags &= ~BBF_DONT_REMOVE; leaveBlk->bbRefs = 0; leaveBlk->bbPreds = nullptr; #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) // This function (fgConvertBBToThrowBB) can be called before the predecessor lists are created (e.g., in // fgMorph). The fgClearFinallyTargetBit() function to update the BBF_FINALLY_TARGET bit depends on these // predecessor lists. If there are no predecessor lists, we immediately clear all BBF_FINALLY_TARGET bits // (to allow subsequent dead code elimination to delete such blocks without asserts), and set a flag to // recompute them later, before they are required. if (fgComputePredsDone) { fgClearFinallyTargetBit(leaveBlk->bbJumpDest); } else { fgClearAllFinallyTargetBits(); fgNeedToAddFinallyTargetBits = true; } #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) } } /***************************************************************************** * * Return true if we've added any new basic blocks. */ inline bool Compiler::fgIsCodeAdded() { return fgAddCodeModf; } /***************************************************************************** Is the offset too big? */ inline bool Compiler::fgIsBigOffset(size_t offset) { return (offset > compMaxUncheckedOffsetForNullObject); } /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX TempsInfo XX XX Inline functions XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ /*****************************************************************************/ /* static */ inline unsigned RegSet::tmpSlot(unsigned size) { noway_assert(size >= sizeof(int)); noway_assert(size <= TEMP_MAX_SIZE); assert((size % sizeof(int)) == 0); assert(size < UINT32_MAX); return size / sizeof(int) - 1; } /***************************************************************************** * * Finish allocating temps - should be called each time after a pass is made * over a function body. */ inline void RegSet::tmpEnd() { #ifdef DEBUG if (m_rsCompiler->verbose && (tmpCount > 0)) { printf("%d tmps used\n", tmpCount); } #endif // DEBUG } /***************************************************************************** * * Shuts down the temp-tracking code. Should be called once per function * compiled. */ inline void RegSet::tmpDone() { #ifdef DEBUG unsigned count; TempDsc* temp; assert(tmpAllFree()); for (temp = tmpListBeg(), count = temp ? 1 : 0; temp; temp = tmpListNxt(temp), count += temp ? 1 : 0) { assert(temp->tdLegalOffset()); } // Make sure that all the temps were released assert(count == tmpCount); assert(tmpGetCount == 0); #endif // DEBUG } #ifdef DEBUG inline bool Compiler::shouldUseVerboseTrees() { return (JitConfig.JitDumpVerboseTrees() == 1); } inline bool Compiler::shouldUseVerboseSsa() { return (JitConfig.JitDumpVerboseSsa() == 1); } //------------------------------------------------------------------------ // shouldDumpASCIITrees: Should we use only ASCII characters for tree dumps? // // Notes: // This is set to default to 1 in clrConfigValues.h inline bool Compiler::shouldDumpASCIITrees() { return (JitConfig.JitDumpASCII() == 1); } /***************************************************************************** * Should we enable JitStress mode? * 0: No stress * !=2: Vary stress. Performance will be slightly/moderately degraded * 2: Check-all stress. Performance will be REALLY horrible */ inline int getJitStressLevel() { return JitConfig.JitStress(); } #endif // DEBUG /*****************************************************************************/ /* Map a register argument number ("RegArgNum") to a register number ("RegNum"). * A RegArgNum is in this range: * [0, MAX_REG_ARG) -- for integer registers * [0, MAX_FLOAT_REG_ARG) -- for floating point registers * Note that RegArgNum's are overlapping for integer and floating-point registers, * while RegNum's are not (for ARM anyway, though for x86, it might be different). * If we have a fixed return buffer register and are given it's index * we return the fixed return buffer register */ inline regNumber genMapIntRegArgNumToRegNum(unsigned argNum) { if (hasFixedRetBuffReg() && (argNum == theFixedRetBuffArgNum())) { return theFixedRetBuffReg(); } assert(argNum < ArrLen(intArgRegs)); return intArgRegs[argNum]; } inline regNumber genMapFloatRegArgNumToRegNum(unsigned argNum) { #ifndef TARGET_X86 assert(argNum < ArrLen(fltArgRegs)); return fltArgRegs[argNum]; #else assert(!"no x86 float arg regs\n"); return REG_NA; #endif } __forceinline regNumber genMapRegArgNumToRegNum(unsigned argNum, var_types type) { if (varTypeUsesFloatArgReg(type)) { return genMapFloatRegArgNumToRegNum(argNum); } else { return genMapIntRegArgNumToRegNum(argNum); } } /*****************************************************************************/ /* Map a register argument number ("RegArgNum") to a register mask of the associated register. * Note that for floating-pointer registers, only the low register for a register pair * (for a double on ARM) is returned. */ inline regMaskTP genMapIntRegArgNumToRegMask(unsigned argNum) { assert(argNum < ArrLen(intArgMasks)); return intArgMasks[argNum]; } inline regMaskTP genMapFloatRegArgNumToRegMask(unsigned argNum) { #ifndef TARGET_X86 assert(argNum < ArrLen(fltArgMasks)); return fltArgMasks[argNum]; #else assert(!"no x86 float arg regs\n"); return RBM_NONE; #endif } __forceinline regMaskTP genMapArgNumToRegMask(unsigned argNum, var_types type) { regMaskTP result; if (varTypeUsesFloatArgReg(type)) { result = genMapFloatRegArgNumToRegMask(argNum); #ifdef TARGET_ARM if (type == TYP_DOUBLE) { assert((result & RBM_DBL_REGS) != 0); result |= (result << 1); } #endif } else { result = genMapIntRegArgNumToRegMask(argNum); } return result; } /*****************************************************************************/ /* Map a register number ("RegNum") to a register argument number ("RegArgNum") * If we have a fixed return buffer register we return theFixedRetBuffArgNum */ inline unsigned genMapIntRegNumToRegArgNum(regNumber regNum) { assert(genRegMask(regNum) & fullIntArgRegMask()); switch (regNum) { case REG_ARG_0: return 0; #if MAX_REG_ARG >= 2 case REG_ARG_1: return 1; #if MAX_REG_ARG >= 3 case REG_ARG_2: return 2; #if MAX_REG_ARG >= 4 case REG_ARG_3: return 3; #if MAX_REG_ARG >= 5 case REG_ARG_4: return 4; #if MAX_REG_ARG >= 6 case REG_ARG_5: return 5; #if MAX_REG_ARG >= 7 case REG_ARG_6: return 6; #if MAX_REG_ARG >= 8 case REG_ARG_7: return 7; #endif #endif #endif #endif #endif #endif #endif default: // Check for the Arm64 fixed return buffer argument register if (hasFixedRetBuffReg() && (regNum == theFixedRetBuffReg())) { return theFixedRetBuffArgNum(); } else { assert(!"invalid register arg register"); return BAD_VAR_NUM; } } } inline unsigned genMapFloatRegNumToRegArgNum(regNumber regNum) { assert(genRegMask(regNum) & RBM_FLTARG_REGS); #ifdef TARGET_ARM return regNum - REG_F0; #elif defined(TARGET_ARM64) return regNum - REG_V0; #elif defined(UNIX_AMD64_ABI) return regNum - REG_FLTARG_0; #else #if MAX_FLOAT_REG_ARG >= 1 switch (regNum) { case REG_FLTARG_0: return 0; #if MAX_REG_ARG >= 2 case REG_FLTARG_1: return 1; #if MAX_REG_ARG >= 3 case REG_FLTARG_2: return 2; #if MAX_REG_ARG >= 4 case REG_FLTARG_3: return 3; #if MAX_REG_ARG >= 5 case REG_FLTARG_4: return 4; #endif #endif #endif #endif default: assert(!"invalid register arg register"); return BAD_VAR_NUM; } #else assert(!"flt reg args not allowed"); return BAD_VAR_NUM; #endif #endif // !arm } inline unsigned genMapRegNumToRegArgNum(regNumber regNum, var_types type) { if (varTypeUsesFloatArgReg(type)) { return genMapFloatRegNumToRegArgNum(regNum); } else { return genMapIntRegNumToRegArgNum(regNum); } } /*****************************************************************************/ /* Return a register mask with the first 'numRegs' argument registers set. */ inline regMaskTP genIntAllRegArgMask(unsigned numRegs) { assert(numRegs <= MAX_REG_ARG); regMaskTP result = RBM_NONE; for (unsigned i = 0; i < numRegs; i++) { result |= intArgMasks[i]; } return result; } inline regMaskTP genFltAllRegArgMask(unsigned numRegs) { #ifndef TARGET_X86 assert(numRegs <= MAX_FLOAT_REG_ARG); regMaskTP result = RBM_NONE; for (unsigned i = 0; i < numRegs; i++) { result |= fltArgMasks[i]; } return result; #else assert(!"no x86 float arg regs\n"); return RBM_NONE; #endif } /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX Liveness XX XX Inline functions XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ //------------------------------------------------------------------------ // compUpdateLife: Update the GC's masks, register's masks and reports change on variable's homes given a set of // current live variables if changes have happened since "compCurLife". // // Arguments: // newLife - the set of variables that are alive. // // Assumptions: // The set of live variables reflects the result of only emitted code, it should not be considering the becoming // live/dead of instructions that has not been emitted yet. This is requires by "compChangeLife". template <bool ForCodeGen> inline void Compiler::compUpdateLife(VARSET_VALARG_TP newLife) { if (!VarSetOps::Equal(this, compCurLife, newLife)) { compChangeLife<ForCodeGen>(newLife); } #ifdef DEBUG else { if (verbose) { printf("Liveness not changing: %s ", VarSetOps::ToString(this, compCurLife)); dumpConvertedVarSet(this, compCurLife); printf("\n"); } } #endif // DEBUG } /***************************************************************************** * * We stash cookies in basic blocks for the code emitter; this call retrieves * the cookie associated with the given basic block. */ inline void* emitCodeGetCookie(BasicBlock* block) { assert(block); return block->bbEmitCookie; } /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX Optimizer XX XX Inline functions XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ /***************************************************************************** * * The following resets the value assignment table * used only during local assertion prop */ inline void Compiler::optAssertionReset(AssertionIndex limit) { PREFAST_ASSUME(optAssertionCount <= optMaxAssertionCount); while (optAssertionCount > limit) { AssertionIndex index = optAssertionCount; AssertionDsc* curAssertion = optGetAssertion(index); optAssertionCount--; unsigned lclNum = curAssertion->op1.lcl.lclNum; assert(lclNum < lvaCount); BitVecOps::RemoveElemD(apTraits, GetAssertionDep(lclNum), index - 1); // // Find the Copy assertions // if ((curAssertion->assertionKind == OAK_EQUAL) && (curAssertion->op1.kind == O1K_LCLVAR) && (curAssertion->op2.kind == O2K_LCLVAR_COPY)) { // // op2.lcl.lclNum no longer depends upon this assertion // lclNum = curAssertion->op2.lcl.lclNum; BitVecOps::RemoveElemD(apTraits, GetAssertionDep(lclNum), index - 1); } } while (optAssertionCount < limit) { AssertionIndex index = ++optAssertionCount; AssertionDsc* curAssertion = optGetAssertion(index); unsigned lclNum = curAssertion->op1.lcl.lclNum; BitVecOps::AddElemD(apTraits, GetAssertionDep(lclNum), index - 1); // // Check for Copy assertions // if ((curAssertion->assertionKind == OAK_EQUAL) && (curAssertion->op1.kind == O1K_LCLVAR) && (curAssertion->op2.kind == O2K_LCLVAR_COPY)) { // // op2.lcl.lclNum now depends upon this assertion // lclNum = curAssertion->op2.lcl.lclNum; BitVecOps::AddElemD(apTraits, GetAssertionDep(lclNum), index - 1); } } } /***************************************************************************** * * The following removes the i-th entry in the value assignment table * used only during local assertion prop */ inline void Compiler::optAssertionRemove(AssertionIndex index) { assert(index > 0); assert(index <= optAssertionCount); PREFAST_ASSUME(optAssertionCount <= optMaxAssertionCount); AssertionDsc* curAssertion = optGetAssertion(index); // Two cases to consider if (index == optAssertionCount) then the last // entry in the table is to be removed and that happens automatically when // optAssertionCount is decremented and we can just clear the optAssertionDep bits // The other case is when index < optAssertionCount and here we overwrite the // index-th entry in the table with the data found at the end of the table // Since we are reordering the rable the optAssertionDep bits need to be recreated // using optAssertionReset(0) and optAssertionReset(newAssertionCount) will // correctly update the optAssertionDep bits // if (index == optAssertionCount) { unsigned lclNum = curAssertion->op1.lcl.lclNum; BitVecOps::RemoveElemD(apTraits, GetAssertionDep(lclNum), index - 1); // // Check for Copy assertions // if ((curAssertion->assertionKind == OAK_EQUAL) && (curAssertion->op1.kind == O1K_LCLVAR) && (curAssertion->op2.kind == O2K_LCLVAR_COPY)) { // // op2.lcl.lclNum no longer depends upon this assertion // lclNum = curAssertion->op2.lcl.lclNum; BitVecOps::RemoveElemD(apTraits, GetAssertionDep(lclNum), index - 1); } optAssertionCount--; } else { AssertionDsc* lastAssertion = optGetAssertion(optAssertionCount); AssertionIndex newAssertionCount = optAssertionCount - 1; optAssertionReset(0); // This make optAssertionCount equal 0 memcpy(curAssertion, // the entry to be removed lastAssertion, // last entry in the table sizeof(AssertionDsc)); optAssertionReset(newAssertionCount); } } inline void Compiler::LoopDsc::AddModifiedField(Compiler* comp, CORINFO_FIELD_HANDLE fldHnd, FieldKindForVN fieldKind) { if (lpFieldsModified == nullptr) { lpFieldsModified = new (comp->getAllocatorLoopHoist()) Compiler::LoopDsc::FieldHandleSet(comp->getAllocatorLoopHoist()); } lpFieldsModified->Set(fldHnd, fieldKind, FieldHandleSet::Overwrite); } inline void Compiler::LoopDsc::AddModifiedElemType(Compiler* comp, CORINFO_CLASS_HANDLE structHnd) { if (lpArrayElemTypesModified == nullptr) { lpArrayElemTypesModified = new (comp->getAllocatorLoopHoist()) Compiler::LoopDsc::ClassHandleSet(comp->getAllocatorLoopHoist()); } lpArrayElemTypesModified->Set(structHnd, true, ClassHandleSet::Overwrite); } inline void Compiler::LoopDsc::VERIFY_lpIterTree() const { #ifdef DEBUG assert(lpFlags & LPFLG_ITER); // iterTree should be "lcl ASG lcl <op> const" assert(lpIterTree->OperIs(GT_ASG)); const GenTree* lhs = lpIterTree->AsOp()->gtOp1; const GenTree* rhs = lpIterTree->AsOp()->gtOp2; assert(lhs->OperGet() == GT_LCL_VAR); switch (rhs->gtOper) { case GT_ADD: case GT_SUB: case GT_MUL: case GT_RSH: case GT_LSH: break; default: assert(!"Unknown operator for loop increment"); } assert(rhs->AsOp()->gtOp1->OperGet() == GT_LCL_VAR); assert(rhs->AsOp()->gtOp1->AsLclVarCommon()->GetLclNum() == lhs->AsLclVarCommon()->GetLclNum()); assert(rhs->AsOp()->gtOp2->OperGet() == GT_CNS_INT); #endif } //----------------------------------------------------------------------------- inline unsigned Compiler::LoopDsc::lpIterVar() const { VERIFY_lpIterTree(); return lpIterTree->AsOp()->gtOp1->AsLclVarCommon()->GetLclNum(); } //----------------------------------------------------------------------------- inline int Compiler::LoopDsc::lpIterConst() const { VERIFY_lpIterTree(); GenTree* rhs = lpIterTree->AsOp()->gtOp2; return (int)rhs->AsOp()->gtOp2->AsIntCon()->gtIconVal; } //----------------------------------------------------------------------------- inline genTreeOps Compiler::LoopDsc::lpIterOper() const { VERIFY_lpIterTree(); GenTree* rhs = lpIterTree->AsOp()->gtOp2; return rhs->OperGet(); } inline var_types Compiler::LoopDsc::lpIterOperType() const { VERIFY_lpIterTree(); var_types type = lpIterTree->TypeGet(); assert(genActualType(type) == TYP_INT); if ((lpIterTree->gtFlags & GTF_UNSIGNED) && type == TYP_INT) { type = TYP_UINT; } return type; } inline void Compiler::LoopDsc::VERIFY_lpTestTree() const { #ifdef DEBUG assert(lpFlags & LPFLG_ITER); assert(lpTestTree); genTreeOps oper = lpTestTree->OperGet(); assert(GenTree::OperIsCompare(oper)); GenTree* iterator = nullptr; GenTree* limit = nullptr; if ((lpTestTree->AsOp()->gtOp2->gtOper == GT_LCL_VAR) && (lpTestTree->AsOp()->gtOp2->gtFlags & GTF_VAR_ITERATOR) != 0) { iterator = lpTestTree->AsOp()->gtOp2; limit = lpTestTree->AsOp()->gtOp1; } else if ((lpTestTree->AsOp()->gtOp1->gtOper == GT_LCL_VAR) && (lpTestTree->AsOp()->gtOp1->gtFlags & GTF_VAR_ITERATOR) != 0) { iterator = lpTestTree->AsOp()->gtOp1; limit = lpTestTree->AsOp()->gtOp2; } else { // one of the nodes has to be the iterator assert(false); } if (lpFlags & LPFLG_CONST_LIMIT) { assert(limit->OperIsConst()); } if (lpFlags & LPFLG_VAR_LIMIT) { assert(limit->OperGet() == GT_LCL_VAR); } if (lpFlags & LPFLG_ARRLEN_LIMIT) { assert(limit->OperGet() == GT_ARR_LENGTH); } #endif } //----------------------------------------------------------------------------- inline bool Compiler::LoopDsc::lpIsReversed() const { VERIFY_lpTestTree(); return ((lpTestTree->AsOp()->gtOp2->gtOper == GT_LCL_VAR) && (lpTestTree->AsOp()->gtOp2->gtFlags & GTF_VAR_ITERATOR) != 0); } //----------------------------------------------------------------------------- inline genTreeOps Compiler::LoopDsc::lpTestOper() const { VERIFY_lpTestTree(); genTreeOps op = lpTestTree->OperGet(); return lpIsReversed() ? GenTree::SwapRelop(op) : op; } //----------------------------------------------------------------------------- inline GenTree* Compiler::LoopDsc::lpIterator() const { VERIFY_lpTestTree(); return lpIsReversed() ? lpTestTree->AsOp()->gtOp2 : lpTestTree->AsOp()->gtOp1; } //----------------------------------------------------------------------------- inline GenTree* Compiler::LoopDsc::lpLimit() const { VERIFY_lpTestTree(); return lpIsReversed() ? lpTestTree->AsOp()->gtOp1 : lpTestTree->AsOp()->gtOp2; } //----------------------------------------------------------------------------- inline int Compiler::LoopDsc::lpConstLimit() const { VERIFY_lpTestTree(); assert(lpFlags & LPFLG_CONST_LIMIT); GenTree* limit = lpLimit(); assert(limit->OperIsConst()); return (int)limit->AsIntCon()->gtIconVal; } //----------------------------------------------------------------------------- inline unsigned Compiler::LoopDsc::lpVarLimit() const { VERIFY_lpTestTree(); assert(lpFlags & LPFLG_VAR_LIMIT); GenTree* limit = lpLimit(); assert(limit->OperGet() == GT_LCL_VAR); return limit->AsLclVarCommon()->GetLclNum(); } //----------------------------------------------------------------------------- inline bool Compiler::LoopDsc::lpArrLenLimit(Compiler* comp, ArrIndex* index) const { VERIFY_lpTestTree(); assert(lpFlags & LPFLG_ARRLEN_LIMIT); GenTree* limit = lpLimit(); assert(limit->OperGet() == GT_ARR_LENGTH); // Check if we have a.length or a[i][j].length if (limit->AsArrLen()->ArrRef()->gtOper == GT_LCL_VAR) { index->arrLcl = limit->AsArrLen()->ArrRef()->AsLclVarCommon()->GetLclNum(); index->rank = 0; return true; } // We have a[i].length, extract a[i] pattern. else if (limit->AsArrLen()->ArrRef()->gtOper == GT_COMMA) { return comp->optReconstructArrIndex(limit->AsArrLen()->ArrRef(), index, BAD_VAR_NUM); } return false; } /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX EEInterface XX XX Inline functions XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ extern var_types JITtype2varType(CorInfoType type); #include "ee_il_dll.hpp" inline CORINFO_METHOD_HANDLE Compiler::eeFindHelper(unsigned helper) { assert(helper < CORINFO_HELP_COUNT); /* Helpers are marked by the fact that they are odd numbers * force this to be an odd number (will shift it back to extract) */ return ((CORINFO_METHOD_HANDLE)((((size_t)helper) << 2) + 1)); } inline CorInfoHelpFunc Compiler::eeGetHelperNum(CORINFO_METHOD_HANDLE method) { // Helpers are marked by the fact that they are odd numbers if (!(((size_t)method) & 1)) { return (CORINFO_HELP_UNDEF); } return ((CorInfoHelpFunc)(((size_t)method) >> 2)); } // TODO-Cleanup: Replace calls to IsSharedStaticHelper with new HelperCallProperties // inline bool Compiler::IsSharedStaticHelper(GenTree* tree) { if (tree->gtOper != GT_CALL || tree->AsCall()->gtCallType != CT_HELPER) { return false; } CorInfoHelpFunc helper = eeGetHelperNum(tree->AsCall()->gtCallMethHnd); bool result1 = // More helpers being added to IsSharedStaticHelper (that have similar behaviors but are not true // ShareStaticHelperts) helper == CORINFO_HELP_STRCNS || helper == CORINFO_HELP_BOX || // helpers being added to IsSharedStaticHelper helper == CORINFO_HELP_GETSTATICFIELDADDR_TLS || helper == CORINFO_HELP_GETGENERICS_GCSTATIC_BASE || helper == CORINFO_HELP_GETGENERICS_NONGCSTATIC_BASE || helper == CORINFO_HELP_GETGENERICS_GCTHREADSTATIC_BASE || helper == CORINFO_HELP_GETGENERICS_NONGCTHREADSTATIC_BASE || helper == CORINFO_HELP_GETSHARED_GCSTATIC_BASE || helper == CORINFO_HELP_GETSHARED_NONGCSTATIC_BASE || helper == CORINFO_HELP_GETSHARED_GCSTATIC_BASE_NOCTOR || helper == CORINFO_HELP_GETSHARED_NONGCSTATIC_BASE_NOCTOR || helper == CORINFO_HELP_GETSHARED_GCSTATIC_BASE_DYNAMICCLASS || helper == CORINFO_HELP_GETSHARED_NONGCSTATIC_BASE_DYNAMICCLASS || helper == CORINFO_HELP_GETSHARED_GCTHREADSTATIC_BASE || helper == CORINFO_HELP_GETSHARED_NONGCTHREADSTATIC_BASE || helper == CORINFO_HELP_GETSHARED_GCTHREADSTATIC_BASE_NOCTOR || helper == CORINFO_HELP_GETSHARED_NONGCTHREADSTATIC_BASE_NOCTOR || helper == CORINFO_HELP_GETSHARED_GCTHREADSTATIC_BASE_DYNAMICCLASS || helper == CORINFO_HELP_GETSHARED_NONGCTHREADSTATIC_BASE_DYNAMICCLASS || #ifdef FEATURE_READYTORUN helper == CORINFO_HELP_READYTORUN_STATIC_BASE || helper == CORINFO_HELP_READYTORUN_GENERIC_STATIC_BASE || #endif helper == CORINFO_HELP_CLASSINIT_SHARED_DYNAMICCLASS; #if 0 // See above TODO-Cleanup bool result2 = s_helperCallProperties.IsPure(helper) && s_helperCallProperties.NonNullReturn(helper); assert (result1 == result2); #endif return result1; } inline bool Compiler::IsGcSafePoint(GenTreeCall* call) { if (!call->IsFastTailCall()) { if (call->IsUnmanaged() && call->IsSuppressGCTransition()) { // Both an indirect and user calls can be unmanaged // and have a request to suppress the GC transition so // the check is done prior to the separate handling of // indirect and user calls. return false; } else if (call->gtCallType == CT_INDIRECT) { return true; } else if (call->gtCallType == CT_USER_FUNC) { if ((call->gtCallMoreFlags & GTF_CALL_M_NOGCCHECK) == 0) { return true; } } // otherwise we have a CT_HELPER } return false; } // // Note that we want to have two special FIELD_HANDLES that will both // be considered non-Data Offset handles // // The special values that we use are FLD_GLOBAL_DS and FLD_GLOBAL_FS // inline bool jitStaticFldIsGlobAddr(CORINFO_FIELD_HANDLE fldHnd) { return (fldHnd == FLD_GLOBAL_DS || fldHnd == FLD_GLOBAL_FS); } #if defined(DEBUG) || defined(FEATURE_JIT_METHOD_PERF) || defined(FEATURE_SIMD) || defined(FEATURE_TRACELOGGING) inline bool Compiler::eeIsNativeMethod(CORINFO_METHOD_HANDLE method) { return ((((size_t)method) & 0x2) == 0x2); } inline CORINFO_METHOD_HANDLE Compiler::eeGetMethodHandleForNative(CORINFO_METHOD_HANDLE method) { assert((((size_t)method) & 0x3) == 0x2); return (CORINFO_METHOD_HANDLE)(((size_t)method) & ~0x3); } #endif inline CORINFO_METHOD_HANDLE Compiler::eeMarkNativeTarget(CORINFO_METHOD_HANDLE method) { assert((((size_t)method) & 0x3) == 0); if (method == nullptr) { return method; } else { return (CORINFO_METHOD_HANDLE)(((size_t)method) | 0x2); } } /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX Compiler XX XX Inline functions XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ #ifndef DEBUG inline bool Compiler::compStressCompile(compStressArea stressArea, unsigned weightPercentage) { return false; } #endif inline ArenaAllocator* Compiler::compGetArenaAllocator() { return compArenaAllocator; } inline bool Compiler::compIsProfilerHookNeeded() { #ifdef PROFILING_SUPPORTED return compProfilerHookNeeded // IL stubs are excluded by VM and we need to do the same even running // under a complus env hook to generate profiler hooks || (opts.compJitELTHookEnabled && !opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB)); #else // !PROFILING_SUPPORTED return false; #endif // !PROFILING_SUPPORTED } /***************************************************************************** * * Check for the special case where the object is the methods original 'this' pointer. * Note that, the original 'this' pointer is always local var 0 for non-static method, * even if we might have created the copy of 'this' pointer in lvaArg0Var. */ inline bool Compiler::impIsThis(GenTree* obj) { if (compIsForInlining()) { return impInlineInfo->InlinerCompiler->impIsThis(obj); } else { return ((obj != nullptr) && (obj->gtOper == GT_LCL_VAR) && lvaIsOriginalThisArg(obj->AsLclVarCommon()->GetLclNum())); } } /***************************************************************************** * * Check to see if the delegate is created using "LDFTN <TOK>" or not. */ inline bool Compiler::impIsLDFTN_TOKEN(const BYTE* delegateCreateStart, const BYTE* newobjCodeAddr) { assert(newobjCodeAddr[0] == CEE_NEWOBJ); return (newobjCodeAddr - delegateCreateStart == 6 && // LDFTN <TOK> takes 6 bytes delegateCreateStart[0] == CEE_PREFIX1 && delegateCreateStart[1] == (CEE_LDFTN & 0xFF)); } /***************************************************************************** * * Check to see if the delegate is created using "DUP LDVIRTFTN <TOK>" or not. */ inline bool Compiler::impIsDUP_LDVIRTFTN_TOKEN(const BYTE* delegateCreateStart, const BYTE* newobjCodeAddr) { assert(newobjCodeAddr[0] == CEE_NEWOBJ); return (newobjCodeAddr - delegateCreateStart == 7 && // DUP LDVIRTFTN <TOK> takes 6 bytes delegateCreateStart[0] == CEE_DUP && delegateCreateStart[1] == CEE_PREFIX1 && delegateCreateStart[2] == (CEE_LDVIRTFTN & 0xFF)); } /***************************************************************************** * * Returns true if the compiler instance is created for import only (verification). */ inline bool Compiler::compIsForImportOnly() { return opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IMPORT_ONLY); } /***************************************************************************** * * Returns true if the compiler instance is created for inlining. */ inline bool Compiler::compIsForInlining() const { return (impInlineInfo != nullptr); } /***************************************************************************** * * Check the inline result field in the compiler to see if inlining failed or not. */ inline bool Compiler::compDonotInline() { if (compIsForInlining()) { assert(compInlineResult != nullptr); return compInlineResult->IsFailure(); } else { return false; } } inline bool Compiler::impIsPrimitive(CorInfoType jitType) { return ((CORINFO_TYPE_BOOL <= jitType && jitType <= CORINFO_TYPE_DOUBLE) || jitType == CORINFO_TYPE_PTR); } /***************************************************************************** * * Get the promotion type of a struct local. */ inline Compiler::lvaPromotionType Compiler::lvaGetPromotionType(const LclVarDsc* varDsc) { assert(!varDsc->lvPromoted || varTypeIsPromotable(varDsc) || varDsc->lvUnusedStruct); if (!varDsc->lvPromoted) { // no struct promotion for this LclVar return PROMOTION_TYPE_NONE; } if (varDsc->lvDoNotEnregister) { // The struct is not enregistered return PROMOTION_TYPE_DEPENDENT; } if (!varDsc->lvIsParam) { // The struct is a register candidate return PROMOTION_TYPE_INDEPENDENT; } // Has struct promotion for arguments been disabled using COMPlus_JitNoStructPromotion=2 if (fgNoStructParamPromotion) { // The struct parameter is not enregistered return PROMOTION_TYPE_DEPENDENT; } // We have a parameter that could be enregistered #if defined(TARGET_ARM) // TODO-Cleanup: return INDEPENDENT for arm32. return PROMOTION_TYPE_DEPENDENT; #else // !TARGET_ARM return PROMOTION_TYPE_INDEPENDENT; #endif // !TARGET_ARM } /***************************************************************************** * * Get the promotion type of a struct local. */ inline Compiler::lvaPromotionType Compiler::lvaGetPromotionType(unsigned varNum) { return lvaGetPromotionType(lvaGetDesc(varNum)); } /***************************************************************************** * * Given a field local, get the promotion type of its parent struct local. */ inline Compiler::lvaPromotionType Compiler::lvaGetParentPromotionType(const LclVarDsc* varDsc) { assert(varDsc->lvIsStructField); lvaPromotionType promotionType = lvaGetPromotionType(varDsc->lvParentLcl); assert(promotionType != PROMOTION_TYPE_NONE); return promotionType; } /***************************************************************************** * * Given a field local, get the promotion type of its parent struct local. */ inline Compiler::lvaPromotionType Compiler::lvaGetParentPromotionType(unsigned varNum) { return lvaGetParentPromotionType(lvaGetDesc(varNum)); } /***************************************************************************** * * Return true if the local is a field local of a promoted struct of type PROMOTION_TYPE_DEPENDENT. * Return false otherwise. */ inline bool Compiler::lvaIsFieldOfDependentlyPromotedStruct(const LclVarDsc* varDsc) { if (!varDsc->lvIsStructField) { return false; } lvaPromotionType promotionType = lvaGetParentPromotionType(varDsc); if (promotionType == PROMOTION_TYPE_DEPENDENT) { return true; } assert(promotionType == PROMOTION_TYPE_INDEPENDENT); return false; } //------------------------------------------------------------------------ // lvaIsGCTracked: Determine whether this var should be reported // as tracked for GC purposes. // // Arguments: // varDsc - the LclVarDsc for the var in question. // // Return Value: // Returns true if the variable should be reported as tracked in the GC info. // // Notes: // This never returns true for struct variables, even if they are tracked. // This is because struct variables are never tracked as a whole for GC purposes. // It is up to the caller to ensure that the fields of struct variables are // correctly tracked. // On Amd64, we never GC-track fields of dependently promoted structs, even // though they may be tracked for optimization purposes. // It seems that on x86 and arm, we simply don't track these // fields, though I have not verified that. I attempted to make these GC-tracked, // but there was too much logic that depends on these being untracked, so changing // this would require non-trivial effort. inline bool Compiler::lvaIsGCTracked(const LclVarDsc* varDsc) { if (varDsc->lvTracked && (varDsc->lvType == TYP_REF || varDsc->lvType == TYP_BYREF)) { // Stack parameters are always untracked w.r.t. GC reportings const bool isStackParam = varDsc->lvIsParam && !varDsc->lvIsRegArg; #ifdef TARGET_AMD64 return !isStackParam && !lvaIsFieldOfDependentlyPromotedStruct(varDsc); #else // !TARGET_AMD64 return !isStackParam; #endif // !TARGET_AMD64 } else { return false; } } /*****************************************************************************/ #if MEASURE_CLRAPI_CALLS inline void Compiler::CLRApiCallEnter(unsigned apix) { if (pCompJitTimer != nullptr) { pCompJitTimer->CLRApiCallEnter(apix); } } inline void Compiler::CLRApiCallLeave(unsigned apix) { if (pCompJitTimer != nullptr) { pCompJitTimer->CLRApiCallLeave(apix); } } inline void Compiler::CLR_API_Enter(API_ICorJitInfo_Names ename) { CLRApiCallEnter(ename); } inline void Compiler::CLR_API_Leave(API_ICorJitInfo_Names ename) { CLRApiCallLeave(ename); } #endif // MEASURE_CLRAPI_CALLS //------------------------------------------------------------------------------ // fgVarIsNeverZeroInitializedInProlog : Check whether the variable is never zero initialized in the prolog. // // Arguments: // varNum - local variable number // // Returns: // true if this is a special variable that is never zero initialized in the prolog; // false otherwise // bool Compiler::fgVarIsNeverZeroInitializedInProlog(unsigned varNum) { LclVarDsc* varDsc = lvaGetDesc(varNum); bool result = varDsc->lvIsParam || lvaIsOSRLocal(varNum) || (varNum == lvaGSSecurityCookie) || (varNum == lvaInlinedPInvokeFrameVar) || (varNum == lvaStubArgumentVar) || (varNum == lvaRetAddrVar); #if FEATURE_FIXED_OUT_ARGS result = result || (varNum == lvaPInvokeFrameRegSaveVar) || (varNum == lvaOutgoingArgSpaceVar); #endif #if defined(FEATURE_EH_FUNCLETS) result = result || (varNum == lvaPSPSym); #endif return result; } //------------------------------------------------------------------------------ // fgVarNeedsExplicitZeroInit : Check whether the variable needs an explicit zero initialization. // // Arguments: // varNum - local var number // bbInALoop - true if the basic block may be in a loop // bbIsReturn - true if the basic block always returns // // Returns: // true if the var needs explicit zero-initialization in this basic block; // false otherwise // // Notes: // If the variable is not being initialized in a loop, we can avoid explicit zero initialization if // - the variable is a gc pointer, or // - the variable is a struct with gc pointer fields and either all fields are gc pointer fields // or the struct is big enough to guarantee block initialization, or // - compInitMem is set and the variable has a long lifetime or has gc fields. // In these cases we will insert zero-initialization in the prolog if necessary. bool Compiler::fgVarNeedsExplicitZeroInit(unsigned varNum, bool bbInALoop, bool bbIsReturn) { LclVarDsc* varDsc = lvaGetDesc(varNum); if (lvaIsFieldOfDependentlyPromotedStruct(varDsc)) { // Fields of dependently promoted structs may only be initialized in the prolog when the whole // struct is initialized in the prolog. return fgVarNeedsExplicitZeroInit(varDsc->lvParentLcl, bbInALoop, bbIsReturn); } if (bbInALoop && !bbIsReturn) { return true; } if (fgVarIsNeverZeroInitializedInProlog(varNum)) { return true; } if (varTypeIsGC(varDsc->lvType)) { return false; } if ((varDsc->lvType == TYP_STRUCT) && varDsc->HasGCPtr()) { ClassLayout* layout = varDsc->GetLayout(); if (layout->GetSlotCount() == layout->GetGCPtrCount()) { return false; } // Below conditions guarantee block initialization, which will initialize // all struct fields. If the logic for block initialization in CodeGen::genCheckUseBlockInit() // changes, these conditions need to be updated. #ifdef TARGET_64BIT #if defined(TARGET_AMD64) // We can clear using aligned SIMD so the threshold is lower, // and clears in order which is better for auto-prefetching if (roundUp(varDsc->lvSize(), TARGET_POINTER_SIZE) / sizeof(int) > 4) #else // !defined(TARGET_AMD64) if (roundUp(varDsc->lvSize(), TARGET_POINTER_SIZE) / sizeof(int) > 8) #endif #else if (roundUp(varDsc->lvSize(), TARGET_POINTER_SIZE) / sizeof(int) > 4) #endif { return false; } } return !info.compInitMem || (varDsc->lvIsTemp && !varDsc->HasGCPtr()); } /*****************************************************************************/ ValueNum Compiler::GetUseAsgDefVNOrTreeVN(GenTree* op) { if (op->gtFlags & GTF_VAR_USEASG) { unsigned lclNum = op->AsLclVarCommon()->GetLclNum(); unsigned ssaNum = GetSsaNumForLocalVarDef(op); return lvaTable[lclNum].GetPerSsaData(ssaNum)->m_vnPair.GetConservative(); } else { return op->gtVNPair.GetConservative(); } } /*****************************************************************************/ unsigned Compiler::GetSsaNumForLocalVarDef(GenTree* lcl) { // Address-taken variables don't have SSA numbers. if (!lvaInSsa(lcl->AsLclVarCommon()->GetLclNum())) { return SsaConfig::RESERVED_SSA_NUM; } if (lcl->gtFlags & GTF_VAR_USEASG) { // It's partial definition of a struct. "lcl" is both used and defined here; // we've chosen in this case to annotate "lcl" with the SSA number (and VN) of the use, // and to store the SSA number of the def in a side table. unsigned ssaNum; // In case of a remorph (fgMorph) in CSE/AssertionProp after SSA phase, there // wouldn't be an entry for the USEASG portion of the indir addr, return // reserved. if (!GetOpAsgnVarDefSsaNums()->Lookup(lcl, &ssaNum)) { return SsaConfig::RESERVED_SSA_NUM; } return ssaNum; } else { return lcl->AsLclVarCommon()->GetSsaNum(); } } inline bool Compiler::PreciseRefCountsRequired() { return opts.OptimizationEnabled(); } template <typename TVisitor> void GenTree::VisitOperands(TVisitor visitor) { switch (OperGet()) { // Leaf nodes case GT_LCL_VAR: case GT_LCL_FLD: case GT_LCL_VAR_ADDR: case GT_LCL_FLD_ADDR: case GT_CATCH_ARG: case GT_LABEL: case GT_FTN_ADDR: case GT_RET_EXPR: case GT_CNS_INT: case GT_CNS_LNG: case GT_CNS_DBL: case GT_CNS_STR: case GT_MEMORYBARRIER: case GT_JMP: case GT_JCC: case GT_SETCC: case GT_NO_OP: case GT_START_NONGC: case GT_START_PREEMPTGC: case GT_PROF_HOOK: #if !defined(FEATURE_EH_FUNCLETS) case GT_END_LFIN: #endif // !FEATURE_EH_FUNCLETS case GT_PHI_ARG: case GT_JMPTABLE: case GT_CLS_VAR: case GT_CLS_VAR_ADDR: case GT_ARGPLACE: case GT_PHYSREG: case GT_EMITNOP: case GT_PINVOKE_PROLOG: case GT_PINVOKE_EPILOG: case GT_IL_OFFSET: return; // Unary operators with an optional operand case GT_NOP: case GT_FIELD: case GT_RETURN: case GT_RETFILT: if (this->AsUnOp()->gtOp1 == nullptr) { return; } FALLTHROUGH; // Standard unary operators case GT_STORE_LCL_VAR: case GT_STORE_LCL_FLD: case GT_NOT: case GT_NEG: case GT_BSWAP: case GT_BSWAP16: case GT_COPY: case GT_RELOAD: case GT_ARR_LENGTH: case GT_CAST: case GT_BITCAST: case GT_CKFINITE: case GT_LCLHEAP: case GT_ADDR: case GT_IND: case GT_OBJ: case GT_BLK: case GT_BOX: case GT_ALLOCOBJ: case GT_INIT_VAL: case GT_RUNTIMELOOKUP: case GT_JTRUE: case GT_SWITCH: case GT_NULLCHECK: case GT_PUTARG_REG: case GT_PUTARG_STK: case GT_PUTARG_TYPE: #if FEATURE_ARG_SPLIT case GT_PUTARG_SPLIT: #endif // FEATURE_ARG_SPLIT case GT_RETURNTRAP: case GT_KEEPALIVE: case GT_INC_SATURATE: visitor(this->AsUnOp()->gtOp1); return; // Variadic nodes #if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS) #if defined(FEATURE_SIMD) case GT_SIMD: #endif #if defined(FEATURE_HW_INTRINSICS) case GT_HWINTRINSIC: #endif for (GenTree* operand : this->AsMultiOp()->Operands()) { if (visitor(operand) == VisitResult::Abort) { break; } } return; #endif // defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS) // Special nodes case GT_PHI: for (GenTreePhi::Use& use : AsPhi()->Uses()) { if (visitor(use.GetNode()) == VisitResult::Abort) { break; } } return; case GT_FIELD_LIST: for (GenTreeFieldList::Use& field : AsFieldList()->Uses()) { if (visitor(field.GetNode()) == VisitResult::Abort) { break; } } return; case GT_CMPXCHG: { GenTreeCmpXchg* const cmpXchg = this->AsCmpXchg(); if (visitor(cmpXchg->gtOpLocation) == VisitResult::Abort) { return; } if (visitor(cmpXchg->gtOpValue) == VisitResult::Abort) { return; } visitor(cmpXchg->gtOpComparand); return; } case GT_ARR_ELEM: { GenTreeArrElem* const arrElem = this->AsArrElem(); if (visitor(arrElem->gtArrObj) == VisitResult::Abort) { return; } for (unsigned i = 0; i < arrElem->gtArrRank; i++) { if (visitor(arrElem->gtArrInds[i]) == VisitResult::Abort) { return; } } return; } case GT_ARR_OFFSET: { GenTreeArrOffs* const arrOffs = this->AsArrOffs(); if (visitor(arrOffs->gtOffset) == VisitResult::Abort) { return; } if (visitor(arrOffs->gtIndex) == VisitResult::Abort) { return; } visitor(arrOffs->gtArrObj); return; } case GT_STORE_DYN_BLK: { GenTreeStoreDynBlk* const dynBlock = this->AsStoreDynBlk(); if (visitor(dynBlock->gtOp1) == VisitResult::Abort) { return; } if (visitor(dynBlock->gtOp2) == VisitResult::Abort) { return; } visitor(dynBlock->gtDynamicSize); return; } case GT_CALL: { GenTreeCall* const call = this->AsCall(); if ((call->gtCallThisArg != nullptr) && (visitor(call->gtCallThisArg->GetNode()) == VisitResult::Abort)) { return; } for (GenTreeCall::Use& use : call->Args()) { if (visitor(use.GetNode()) == VisitResult::Abort) { return; } } for (GenTreeCall::Use& use : call->LateArgs()) { if (visitor(use.GetNode()) == VisitResult::Abort) { return; } } if (call->gtCallType == CT_INDIRECT) { if ((call->gtCallCookie != nullptr) && (visitor(call->gtCallCookie) == VisitResult::Abort)) { return; } if ((call->gtCallAddr != nullptr) && (visitor(call->gtCallAddr) == VisitResult::Abort)) { return; } } if ((call->gtControlExpr != nullptr)) { visitor(call->gtControlExpr); } return; } // Binary nodes default: assert(this->OperIsBinary()); VisitBinOpOperands<TVisitor>(visitor); return; } } template <typename TVisitor> void GenTree::VisitBinOpOperands(TVisitor visitor) { assert(this->OperIsBinary()); GenTreeOp* const op = this->AsOp(); GenTree* const op1 = op->gtOp1; if ((op1 != nullptr) && (visitor(op1) == VisitResult::Abort)) { return; } GenTree* const op2 = op->gtOp2; if (op2 != nullptr) { visitor(op2); } } /***************************************************************************** * operator new * * Note that compiler's allocator is an arena allocator that returns memory that is * not zero-initialized and can contain data from a prior allocation lifetime. */ inline void* __cdecl operator new(size_t sz, Compiler* compiler, CompMemKind cmk) { return compiler->getAllocator(cmk).allocate<char>(sz); } inline void* __cdecl operator new[](size_t sz, Compiler* compiler, CompMemKind cmk) { return compiler->getAllocator(cmk).allocate<char>(sz); } inline void* __cdecl operator new(size_t sz, void* p, const jitstd::placement_t& /* syntax_difference */) { return p; } /*****************************************************************************/ #ifdef DEBUG inline void printRegMask(regMaskTP mask) { printf(REG_MASK_ALL_FMT, mask); } inline char* regMaskToString(regMaskTP mask, Compiler* context) { const size_t cchRegMask = 24; char* regmask = new (context, CMK_Unknown) char[cchRegMask]; sprintf_s(regmask, cchRegMask, REG_MASK_ALL_FMT, mask); return regmask; } inline void printRegMaskInt(regMaskTP mask) { printf(REG_MASK_INT_FMT, (mask & RBM_ALLINT)); } inline char* regMaskIntToString(regMaskTP mask, Compiler* context) { const size_t cchRegMask = 24; char* regmask = new (context, CMK_Unknown) char[cchRegMask]; sprintf_s(regmask, cchRegMask, REG_MASK_INT_FMT, (mask & RBM_ALLINT)); return regmask; } #endif // DEBUG inline static bool StructHasOverlappingFields(DWORD attribs) { return ((attribs & CORINFO_FLG_OVERLAPPING_FIELDS) != 0); } inline static bool StructHasCustomLayout(DWORD attribs) { return ((attribs & CORINFO_FLG_CUSTOMLAYOUT) != 0); } inline static bool StructHasDontDigFieldsFlagSet(DWORD attribs) { return ((attribs & CORINFO_FLG_DONT_DIG_FIELDS) != 0); } //------------------------------------------------------------------------------ // DEBUG_DESTROY_NODE: sets value of tree to garbage to catch extra references // // Arguments: // tree: This node should not be referenced by anyone now // inline void DEBUG_DESTROY_NODE(GenTree* tree) { #ifdef DEBUG // printf("DEBUG_DESTROY_NODE for [0x%08x]\n", tree); // Save gtOper in case we want to find out what this node was tree->gtOperSave = tree->gtOper; tree->gtType = TYP_UNDEF; tree->gtFlags |= ~GTF_NODE_MASK; if (tree->OperIsSimple()) { tree->AsOp()->gtOp1 = tree->AsOp()->gtOp2 = nullptr; } // Must do this last, because the "AsOp()" check above will fail otherwise. // Don't call SetOper, because GT_COUNT is not a valid value tree->gtOper = GT_COUNT; #endif } //------------------------------------------------------------------------------ // DEBUG_DESTROY_NODE: sets value of trees to garbage to catch extra references // // Arguments: // tree, ...rest: These nodes should not be referenced by anyone now // template <typename... T> void DEBUG_DESTROY_NODE(GenTree* tree, T... rest) { DEBUG_DESTROY_NODE(tree); DEBUG_DESTROY_NODE(rest...); } //------------------------------------------------------------------------------ // lvRefCnt: access reference count for this local var // // Arguments: // state: the requestor's expected ref count state; defaults to RCS_NORMAL // // Return Value: // Ref count for the local. inline unsigned short LclVarDsc::lvRefCnt(RefCountState state) const { #if defined(DEBUG) assert(state != RCS_INVALID); Compiler* compiler = JitTls::GetCompiler(); assert(compiler->lvaRefCountState == state); #endif if (lvImplicitlyReferenced && (m_lvRefCnt == 0)) { return 1; } return m_lvRefCnt; } //------------------------------------------------------------------------------ // incLvRefCnt: increment reference count for this local var // // Arguments: // delta: the amount of the increment // state: the requestor's expected ref count state; defaults to RCS_NORMAL // // Notes: // It is currently the caller's responsibilty to ensure this increment // will not cause overflow. inline void LclVarDsc::incLvRefCnt(unsigned short delta, RefCountState state) { #if defined(DEBUG) assert(state != RCS_INVALID); Compiler* compiler = JitTls::GetCompiler(); assert(compiler->lvaRefCountState == state); #endif unsigned short oldRefCnt = m_lvRefCnt; m_lvRefCnt += delta; assert(m_lvRefCnt >= oldRefCnt); } //------------------------------------------------------------------------------ // setLvRefCnt: set the reference count for this local var // // Arguments: // newValue: the desired new reference count // state: the requestor's expected ref count state; defaults to RCS_NORMAL // // Notes: // Generally after calling v->setLvRefCnt(Y), v->lvRefCnt() == Y. // However this may not be true when v->lvImplicitlyReferenced == 1. inline void LclVarDsc::setLvRefCnt(unsigned short newValue, RefCountState state) { #if defined(DEBUG) assert(state != RCS_INVALID); Compiler* compiler = JitTls::GetCompiler(); assert(compiler->lvaRefCountState == state); #endif m_lvRefCnt = newValue; } //------------------------------------------------------------------------------ // lvRefCntWtd: access wighted reference count for this local var // // Arguments: // state: the requestor's expected ref count state; defaults to RCS_NORMAL // // Return Value: // Weighted ref count for the local. inline weight_t LclVarDsc::lvRefCntWtd(RefCountState state) const { #if defined(DEBUG) assert(state != RCS_INVALID); Compiler* compiler = JitTls::GetCompiler(); assert(compiler->lvaRefCountState == state); #endif if (lvImplicitlyReferenced && (m_lvRefCntWtd == 0)) { return BB_UNITY_WEIGHT; } return m_lvRefCntWtd; } //------------------------------------------------------------------------------ // incLvRefCntWtd: increment weighted reference count for this local var // // Arguments: // delta: the amount of the increment // state: the requestor's expected ref count state; defaults to RCS_NORMAL // // Notes: // It is currently the caller's responsibilty to ensure this increment // will not cause overflow. inline void LclVarDsc::incLvRefCntWtd(weight_t delta, RefCountState state) { #if defined(DEBUG) assert(state != RCS_INVALID); Compiler* compiler = JitTls::GetCompiler(); assert(compiler->lvaRefCountState == state); #endif weight_t oldRefCntWtd = m_lvRefCntWtd; m_lvRefCntWtd += delta; assert(m_lvRefCntWtd >= oldRefCntWtd); } //------------------------------------------------------------------------------ // setLvRefCntWtd: set the weighted reference count for this local var // // Arguments: // newValue: the desired new weighted reference count // state: the requestor's expected ref count state; defaults to RCS_NORMAL // // Notes: // Generally after calling v->setLvRefCntWtd(Y), v->lvRefCntWtd() == Y. // However this may not be true when v->lvImplicitlyReferenced == 1. inline void LclVarDsc::setLvRefCntWtd(weight_t newValue, RefCountState state) { #if defined(DEBUG) assert(state != RCS_INVALID); Compiler* compiler = JitTls::GetCompiler(); assert(compiler->lvaRefCountState == state); #endif m_lvRefCntWtd = newValue; } //------------------------------------------------------------------------------ // compCanHavePatchpoints: return true if patchpoints are supported in this // method. // // Arguments: // reason - [out, optional] reason why patchpoints are not supported // // Returns: // True if patchpoints are supported in this method. // inline bool Compiler::compCanHavePatchpoints(const char** reason) { const char* whyNot = nullptr; #ifdef FEATURE_ON_STACK_REPLACEMENT if (compLocallocSeen) { whyNot = "OSR can't handle localloc"; } else if (compHasBackwardJumpInHandler) { whyNot = "OSR can't handle loop in handler"; } else if (opts.IsReversePInvoke()) { whyNot = "OSR can't handle reverse pinvoke"; } #else whyNot = "OSR feature not defined in build"; #endif if (reason != nullptr) { *reason = whyNot; } return whyNot == nullptr; } /*****************************************************************************/ #endif //_COMPILER_HPP_ /*****************************************************************************/
1
dotnet/runtime
66,282
Enable Fast Tail Call Optimization for ARM32
- Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
clamp03
2022-03-07T05:47:20Z
2022-03-13T11:50:20Z
227ff3d53784f655fa04dad059a98b3e8d291d61
51b90cc60b8528c77829ef18481b0f58db812776
Enable Fast Tail Call Optimization for ARM32. - Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
./src/coreclr/jit/importer.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Importer XX XX XX XX Imports the given method and converts it to semantic trees XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ #include "jitpch.h" #ifdef _MSC_VER #pragma hdrstop #endif #include "corexcep.h" #define Verify(cond, msg) \ do \ { \ if (!(cond)) \ { \ verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__)); \ } \ } while (0) #define VerifyOrReturn(cond, msg) \ do \ { \ if (!(cond)) \ { \ verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__)); \ return; \ } \ } while (0) #define VerifyOrReturnSpeculative(cond, msg, speculative) \ do \ { \ if (speculative) \ { \ if (!(cond)) \ { \ return false; \ } \ } \ else \ { \ if (!(cond)) \ { \ verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__)); \ return false; \ } \ } \ } while (0) /*****************************************************************************/ void Compiler::impInit() { impStmtList = impLastStmt = nullptr; #ifdef DEBUG impInlinedCodeSize = 0; #endif // DEBUG } /***************************************************************************** * * Pushes the given tree on the stack. */ void Compiler::impPushOnStack(GenTree* tree, typeInfo ti) { /* Check for overflow. If inlining, we may be using a bigger stack */ if ((verCurrentState.esStackDepth >= info.compMaxStack) && (verCurrentState.esStackDepth >= impStkSize || ((compCurBB->bbFlags & BBF_IMPORTED) == 0))) { BADCODE("stack overflow"); } #ifdef DEBUG // If we are pushing a struct, make certain we know the precise type! if (tree->TypeGet() == TYP_STRUCT) { assert(ti.IsType(TI_STRUCT)); CORINFO_CLASS_HANDLE clsHnd = ti.GetClassHandle(); assert(clsHnd != NO_CLASS_HANDLE); } #endif // DEBUG verCurrentState.esStack[verCurrentState.esStackDepth].seTypeInfo = ti; verCurrentState.esStack[verCurrentState.esStackDepth++].val = tree; if ((tree->gtType == TYP_LONG) && (compLongUsed == false)) { compLongUsed = true; } else if (((tree->gtType == TYP_FLOAT) || (tree->gtType == TYP_DOUBLE)) && (compFloatingPointUsed == false)) { compFloatingPointUsed = true; } } inline void Compiler::impPushNullObjRefOnStack() { impPushOnStack(gtNewIconNode(0, TYP_REF), typeInfo(TI_NULL)); } // This method gets called when we run into unverifiable code // (and we are verifying the method) inline void Compiler::verRaiseVerifyExceptionIfNeeded(INDEBUG(const char* msg) DEBUGARG(const char* file) DEBUGARG(unsigned line)) { #ifdef DEBUG const char* tail = strrchr(file, '\\'); if (tail) { file = tail + 1; } if (JitConfig.JitBreakOnUnsafeCode()) { assert(!"Unsafe code detected"); } #endif JITLOG((LL_INFO10000, "Detected unsafe code: %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line, msg, info.compFullName, impCurOpcName, impCurOpcOffs)); if (compIsForImportOnly()) { JITLOG((LL_ERROR, "Verification failure: %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line, msg, info.compFullName, impCurOpcName, impCurOpcOffs)); verRaiseVerifyException(INDEBUG(msg) DEBUGARG(file) DEBUGARG(line)); } } inline void DECLSPEC_NORETURN Compiler::verRaiseVerifyException(INDEBUG(const char* msg) DEBUGARG(const char* file) DEBUGARG(unsigned line)) { JITLOG((LL_ERROR, "Verification failure: %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line, msg, info.compFullName, impCurOpcName, impCurOpcOffs)); #ifdef DEBUG // BreakIfDebuggerPresent(); if (getBreakOnBadCode()) { assert(!"Typechecking error"); } #endif RaiseException(SEH_VERIFICATION_EXCEPTION, EXCEPTION_NONCONTINUABLE, 0, nullptr); UNREACHABLE(); } // helper function that will tell us if the IL instruction at the addr passed // by param consumes an address at the top of the stack. We use it to save // us lvAddrTaken bool Compiler::impILConsumesAddr(const BYTE* codeAddr) { assert(!compIsForInlining()); OPCODE opcode; opcode = (OPCODE)getU1LittleEndian(codeAddr); switch (opcode) { // case CEE_LDFLDA: We're taking this one out as if you have a sequence // like // // ldloca.0 // ldflda whatever // // of a primitivelike struct, you end up after morphing with addr of a local // that's not marked as addrtaken, which is wrong. Also ldflda is usually used // for structs that contain other structs, which isnt a case we handle very // well now for other reasons. case CEE_LDFLD: { // We won't collapse small fields. This is probably not the right place to have this // check, but we're only using the function for this purpose, and is easy to factor // out if we need to do so. CORINFO_RESOLVED_TOKEN resolvedToken; impResolveToken(codeAddr + sizeof(__int8), &resolvedToken, CORINFO_TOKENKIND_Field); var_types lclTyp = JITtype2varType(info.compCompHnd->getFieldType(resolvedToken.hField)); // Preserve 'small' int types if (!varTypeIsSmall(lclTyp)) { lclTyp = genActualType(lclTyp); } if (varTypeIsSmall(lclTyp)) { return false; } return true; } default: break; } return false; } void Compiler::impResolveToken(const BYTE* addr, CORINFO_RESOLVED_TOKEN* pResolvedToken, CorInfoTokenKind kind) { pResolvedToken->tokenContext = impTokenLookupContextHandle; pResolvedToken->tokenScope = info.compScopeHnd; pResolvedToken->token = getU4LittleEndian(addr); pResolvedToken->tokenType = kind; info.compCompHnd->resolveToken(pResolvedToken); } /***************************************************************************** * * Pop one tree from the stack. */ StackEntry Compiler::impPopStack() { if (verCurrentState.esStackDepth == 0) { BADCODE("stack underflow"); } return verCurrentState.esStack[--verCurrentState.esStackDepth]; } /***************************************************************************** * * Peep at n'th (0-based) tree on the top of the stack. */ StackEntry& Compiler::impStackTop(unsigned n) { if (verCurrentState.esStackDepth <= n) { BADCODE("stack underflow"); } return verCurrentState.esStack[verCurrentState.esStackDepth - n - 1]; } unsigned Compiler::impStackHeight() { return verCurrentState.esStackDepth; } /***************************************************************************** * Some of the trees are spilled specially. While unspilling them, or * making a copy, these need to be handled specially. The function * enumerates the operators possible after spilling. */ #ifdef DEBUG // only used in asserts static bool impValidSpilledStackEntry(GenTree* tree) { if (tree->gtOper == GT_LCL_VAR) { return true; } if (tree->OperIsConst()) { return true; } return false; } #endif /***************************************************************************** * * The following logic is used to save/restore stack contents. * If 'copy' is true, then we make a copy of the trees on the stack. These * have to all be cloneable/spilled values. */ void Compiler::impSaveStackState(SavedStack* savePtr, bool copy) { savePtr->ssDepth = verCurrentState.esStackDepth; if (verCurrentState.esStackDepth) { savePtr->ssTrees = new (this, CMK_ImpStack) StackEntry[verCurrentState.esStackDepth]; size_t saveSize = verCurrentState.esStackDepth * sizeof(*savePtr->ssTrees); if (copy) { StackEntry* table = savePtr->ssTrees; /* Make a fresh copy of all the stack entries */ for (unsigned level = 0; level < verCurrentState.esStackDepth; level++, table++) { table->seTypeInfo = verCurrentState.esStack[level].seTypeInfo; GenTree* tree = verCurrentState.esStack[level].val; assert(impValidSpilledStackEntry(tree)); switch (tree->gtOper) { case GT_CNS_INT: case GT_CNS_LNG: case GT_CNS_DBL: case GT_CNS_STR: case GT_LCL_VAR: table->val = gtCloneExpr(tree); break; default: assert(!"Bad oper - Not covered by impValidSpilledStackEntry()"); break; } } } else { memcpy(savePtr->ssTrees, verCurrentState.esStack, saveSize); } } } void Compiler::impRestoreStackState(SavedStack* savePtr) { verCurrentState.esStackDepth = savePtr->ssDepth; if (verCurrentState.esStackDepth) { memcpy(verCurrentState.esStack, savePtr->ssTrees, verCurrentState.esStackDepth * sizeof(*verCurrentState.esStack)); } } //------------------------------------------------------------------------ // impBeginTreeList: Get the tree list started for a new basic block. // inline void Compiler::impBeginTreeList() { assert(impStmtList == nullptr && impLastStmt == nullptr); } /***************************************************************************** * * Store the given start and end stmt in the given basic block. This is * mostly called by impEndTreeList(BasicBlock *block). It is called * directly only for handling CEE_LEAVEs out of finally-protected try's. */ inline void Compiler::impEndTreeList(BasicBlock* block, Statement* firstStmt, Statement* lastStmt) { /* Make the list circular, so that we can easily walk it backwards */ firstStmt->SetPrevStmt(lastStmt); /* Store the tree list in the basic block */ block->bbStmtList = firstStmt; /* The block should not already be marked as imported */ assert((block->bbFlags & BBF_IMPORTED) == 0); block->bbFlags |= BBF_IMPORTED; } inline void Compiler::impEndTreeList(BasicBlock* block) { if (impStmtList == nullptr) { // The block should not already be marked as imported. assert((block->bbFlags & BBF_IMPORTED) == 0); // Empty block. Just mark it as imported. block->bbFlags |= BBF_IMPORTED; } else { impEndTreeList(block, impStmtList, impLastStmt); } #ifdef DEBUG if (impLastILoffsStmt != nullptr) { impLastILoffsStmt->SetLastILOffset(compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs); impLastILoffsStmt = nullptr; } #endif impStmtList = impLastStmt = nullptr; } /***************************************************************************** * * Check that storing the given tree doesnt mess up the semantic order. Note * that this has only limited value as we can only check [0..chkLevel). */ inline void Compiler::impAppendStmtCheck(Statement* stmt, unsigned chkLevel) { #ifndef DEBUG return; #else if (chkLevel == (unsigned)CHECK_SPILL_ALL) { chkLevel = verCurrentState.esStackDepth; } if (verCurrentState.esStackDepth == 0 || chkLevel == 0 || chkLevel == (unsigned)CHECK_SPILL_NONE) { return; } GenTree* tree = stmt->GetRootNode(); // Calls can only be appended if there are no GTF_GLOB_EFFECT on the stack if (tree->gtFlags & GTF_CALL) { for (unsigned level = 0; level < chkLevel; level++) { assert((verCurrentState.esStack[level].val->gtFlags & GTF_GLOB_EFFECT) == 0); } } if (tree->gtOper == GT_ASG) { // For an assignment to a local variable, all references of that // variable have to be spilled. If it is aliased, all calls and // indirect accesses have to be spilled if (tree->AsOp()->gtOp1->gtOper == GT_LCL_VAR) { unsigned lclNum = tree->AsOp()->gtOp1->AsLclVarCommon()->GetLclNum(); for (unsigned level = 0; level < chkLevel; level++) { assert(!gtHasRef(verCurrentState.esStack[level].val, lclNum)); assert(!lvaTable[lclNum].IsAddressExposed() || (verCurrentState.esStack[level].val->gtFlags & GTF_SIDE_EFFECT) == 0); } } // If the access may be to global memory, all side effects have to be spilled. else if (tree->AsOp()->gtOp1->gtFlags & GTF_GLOB_REF) { for (unsigned level = 0; level < chkLevel; level++) { assert((verCurrentState.esStack[level].val->gtFlags & GTF_GLOB_REF) == 0); } } } #endif } //------------------------------------------------------------------------ // impAppendStmt: Append the given statement to the current block's tree list. // // // Arguments: // stmt - The statement to add. // chkLevel - [0..chkLevel) is the portion of the stack which we will check // for interference with stmt and spill if needed. // checkConsumedDebugInfo - Whether to check for consumption of impCurStmtDI. impCurStmtDI // marks the debug info of the current boundary and is set when we // start importing IL at that boundary. If this parameter is true, // then the function checks if 'stmt' has been associated with the // current boundary, and if so, clears it so that we do not attach // it to more upcoming statements. // void Compiler::impAppendStmt(Statement* stmt, unsigned chkLevel, bool checkConsumedDebugInfo) { if (chkLevel == (unsigned)CHECK_SPILL_ALL) { chkLevel = verCurrentState.esStackDepth; } if ((chkLevel != 0) && (chkLevel != (unsigned)CHECK_SPILL_NONE)) { assert(chkLevel <= verCurrentState.esStackDepth); /* If the statement being appended has any side-effects, check the stack to see if anything needs to be spilled to preserve correct ordering. */ GenTree* expr = stmt->GetRootNode(); GenTreeFlags flags = expr->gtFlags & GTF_GLOB_EFFECT; // Assignment to (unaliased) locals don't count as a side-effect as // we handle them specially using impSpillLclRefs(). Temp locals should // be fine too. if ((expr->gtOper == GT_ASG) && (expr->AsOp()->gtOp1->gtOper == GT_LCL_VAR) && ((expr->AsOp()->gtOp1->gtFlags & GTF_GLOB_REF) == 0) && !gtHasLocalsWithAddrOp(expr->AsOp()->gtOp2)) { GenTreeFlags op2Flags = expr->AsOp()->gtOp2->gtFlags & GTF_GLOB_EFFECT; assert(flags == (op2Flags | GTF_ASG)); flags = op2Flags; } if (flags != 0) { bool spillGlobEffects = false; if ((flags & GTF_CALL) != 0) { // If there is a call, we have to spill global refs spillGlobEffects = true; } else if (!expr->OperIs(GT_ASG)) { if ((flags & GTF_ASG) != 0) { // The expression is not an assignment node but it has an assignment side effect, it // must be an atomic op, HW intrinsic or some other kind of node that stores to memory. // Since we don't know what it assigns to, we need to spill global refs. spillGlobEffects = true; } } else { GenTree* lhs = expr->gtGetOp1(); GenTree* rhs = expr->gtGetOp2(); if (((rhs->gtFlags | lhs->gtFlags) & GTF_ASG) != 0) { // Either side of the assignment node has an assignment side effect. // Since we don't know what it assigns to, we need to spill global refs. spillGlobEffects = true; } else if ((lhs->gtFlags & GTF_GLOB_REF) != 0) { spillGlobEffects = true; } } impSpillSideEffects(spillGlobEffects, chkLevel DEBUGARG("impAppendStmt")); } else { impSpillSpecialSideEff(); } } impAppendStmtCheck(stmt, chkLevel); impAppendStmt(stmt); #ifdef FEATURE_SIMD impMarkContiguousSIMDFieldAssignments(stmt); #endif // Once we set the current offset as debug info in an appended tree, we are // ready to report the following offsets. Note that we need to compare // offsets here instead of debug info, since we do not set the "is call" // bit in impCurStmtDI. if (checkConsumedDebugInfo && (impLastStmt->GetDebugInfo().GetLocation().GetOffset() == impCurStmtDI.GetLocation().GetOffset())) { impCurStmtOffsSet(BAD_IL_OFFSET); } #ifdef DEBUG if (impLastILoffsStmt == nullptr) { impLastILoffsStmt = stmt; } if (verbose) { printf("\n\n"); gtDispStmt(stmt); } #endif } //------------------------------------------------------------------------ // impAppendStmt: Add the statement to the current stmts list. // // Arguments: // stmt - the statement to add. // inline void Compiler::impAppendStmt(Statement* stmt) { if (impStmtList == nullptr) { // The stmt is the first in the list. impStmtList = stmt; } else { // Append the expression statement to the existing list. impLastStmt->SetNextStmt(stmt); stmt->SetPrevStmt(impLastStmt); } impLastStmt = stmt; } //------------------------------------------------------------------------ // impExtractLastStmt: Extract the last statement from the current stmts list. // // Return Value: // The extracted statement. // // Notes: // It assumes that the stmt will be reinserted later. // Statement* Compiler::impExtractLastStmt() { assert(impLastStmt != nullptr); Statement* stmt = impLastStmt; impLastStmt = impLastStmt->GetPrevStmt(); if (impLastStmt == nullptr) { impStmtList = nullptr; } return stmt; } //------------------------------------------------------------------------- // impInsertStmtBefore: Insert the given "stmt" before "stmtBefore". // // Arguments: // stmt - a statement to insert; // stmtBefore - an insertion point to insert "stmt" before. // inline void Compiler::impInsertStmtBefore(Statement* stmt, Statement* stmtBefore) { assert(stmt != nullptr); assert(stmtBefore != nullptr); if (stmtBefore == impStmtList) { impStmtList = stmt; } else { Statement* stmtPrev = stmtBefore->GetPrevStmt(); stmt->SetPrevStmt(stmtPrev); stmtPrev->SetNextStmt(stmt); } stmt->SetNextStmt(stmtBefore); stmtBefore->SetPrevStmt(stmt); } //------------------------------------------------------------------------ // impAppendTree: Append the given expression tree to the current block's tree list. // // // Arguments: // tree - The tree that will be the root of the newly created statement. // chkLevel - [0..chkLevel) is the portion of the stack which we will check // for interference with stmt and spill if needed. // di - Debug information to associate with the statement. // checkConsumedDebugInfo - Whether to check for consumption of impCurStmtDI. impCurStmtDI // marks the debug info of the current boundary and is set when we // start importing IL at that boundary. If this parameter is true, // then the function checks if 'stmt' has been associated with the // current boundary, and if so, clears it so that we do not attach // it to more upcoming statements. // // Return value: // The newly created statement. // Statement* Compiler::impAppendTree(GenTree* tree, unsigned chkLevel, const DebugInfo& di, bool checkConsumedDebugInfo) { assert(tree); /* Allocate an 'expression statement' node */ Statement* stmt = gtNewStmt(tree, di); /* Append the statement to the current block's stmt list */ impAppendStmt(stmt, chkLevel, checkConsumedDebugInfo); return stmt; } /***************************************************************************** * * Insert the given expression tree before "stmtBefore" */ void Compiler::impInsertTreeBefore(GenTree* tree, const DebugInfo& di, Statement* stmtBefore) { /* Allocate an 'expression statement' node */ Statement* stmt = gtNewStmt(tree, di); /* Append the statement to the current block's stmt list */ impInsertStmtBefore(stmt, stmtBefore); } /***************************************************************************** * * Append an assignment of the given value to a temp to the current tree list. * curLevel is the stack level for which the spill to the temp is being done. */ void Compiler::impAssignTempGen(unsigned tmp, GenTree* val, unsigned curLevel, Statement** pAfterStmt, /* = NULL */ const DebugInfo& di, /* = DebugInfo() */ BasicBlock* block /* = NULL */ ) { GenTree* asg = gtNewTempAssign(tmp, val); if (!asg->IsNothingNode()) { if (pAfterStmt) { Statement* asgStmt = gtNewStmt(asg, di); fgInsertStmtAfter(block, *pAfterStmt, asgStmt); *pAfterStmt = asgStmt; } else { impAppendTree(asg, curLevel, impCurStmtDI); } } } /***************************************************************************** * same as above, but handle the valueclass case too */ void Compiler::impAssignTempGen(unsigned tmpNum, GenTree* val, CORINFO_CLASS_HANDLE structType, unsigned curLevel, Statement** pAfterStmt, /* = NULL */ const DebugInfo& di, /* = DebugInfo() */ BasicBlock* block /* = NULL */ ) { GenTree* asg; assert(val->TypeGet() != TYP_STRUCT || structType != NO_CLASS_HANDLE); if (varTypeIsStruct(val) && (structType != NO_CLASS_HANDLE)) { assert(tmpNum < lvaCount); assert(structType != NO_CLASS_HANDLE); // if the method is non-verifiable the assert is not true // so at least ignore it in the case when verification is turned on // since any block that tries to use the temp would have failed verification. var_types varType = lvaTable[tmpNum].lvType; assert(varType == TYP_UNDEF || varTypeIsStruct(varType)); lvaSetStruct(tmpNum, structType, false); varType = lvaTable[tmpNum].lvType; // Now, set the type of the struct value. Note that lvaSetStruct may modify the type // of the lclVar to a specialized type (e.g. TYP_SIMD), based on the handle (structType) // that has been passed in for the value being assigned to the temp, in which case we // need to set 'val' to that same type. // Note also that if we always normalized the types of any node that might be a struct // type, this would not be necessary - but that requires additional JIT/EE interface // calls that may not actually be required - e.g. if we only access a field of a struct. GenTree* dst = gtNewLclvNode(tmpNum, varType); asg = impAssignStruct(dst, val, structType, curLevel, pAfterStmt, di, block); } else { asg = gtNewTempAssign(tmpNum, val); } if (!asg->IsNothingNode()) { if (pAfterStmt) { Statement* asgStmt = gtNewStmt(asg, di); fgInsertStmtAfter(block, *pAfterStmt, asgStmt); *pAfterStmt = asgStmt; } else { impAppendTree(asg, curLevel, impCurStmtDI); } } } /***************************************************************************** * * Pop the given number of values from the stack and return a list node with * their values. * The 'prefixTree' argument may optionally contain an argument * list that is prepended to the list returned from this function. * * The notion of prepended is a bit misleading in that the list is backwards * from the way I would expect: The first element popped is at the end of * the returned list, and prefixTree is 'before' that, meaning closer to * the end of the list. To get to prefixTree, you have to walk to the * end of the list. * * For ARG_ORDER_R2L prefixTree is only used to insert extra arguments, as * such we reverse its meaning such that returnValue has a reversed * prefixTree at the head of the list. */ GenTreeCall::Use* Compiler::impPopCallArgs(unsigned count, CORINFO_SIG_INFO* sig, GenTreeCall::Use* prefixArgs) { assert(sig == nullptr || count == sig->numArgs); CORINFO_CLASS_HANDLE structType; GenTreeCall::Use* argList; if (Target::g_tgtArgOrder == Target::ARG_ORDER_R2L) { argList = nullptr; } else { // ARG_ORDER_L2R argList = prefixArgs; } while (count--) { StackEntry se = impPopStack(); typeInfo ti = se.seTypeInfo; GenTree* temp = se.val; if (varTypeIsStruct(temp)) { // Morph trees that aren't already OBJs or MKREFANY to be OBJs assert(ti.IsType(TI_STRUCT)); structType = ti.GetClassHandleForValueClass(); bool forceNormalization = false; if (varTypeIsSIMD(temp)) { // We need to ensure that fgMorphArgs will use the correct struct handle to ensure proper // ABI handling of this argument. // Note that this can happen, for example, if we have a SIMD intrinsic that returns a SIMD type // with a different baseType than we've seen. // We also need to ensure an OBJ node if we have a FIELD node that might be transformed to LCL_FLD // or a plain GT_IND. // TODO-Cleanup: Consider whether we can eliminate all of these cases. if ((gtGetStructHandleIfPresent(temp) != structType) || temp->OperIs(GT_FIELD)) { forceNormalization = true; } } #ifdef DEBUG if (verbose) { printf("Calling impNormStructVal on:\n"); gtDispTree(temp); } #endif temp = impNormStructVal(temp, structType, (unsigned)CHECK_SPILL_ALL, forceNormalization); #ifdef DEBUG if (verbose) { printf("resulting tree:\n"); gtDispTree(temp); } #endif } /* NOTE: we defer bashing the type for I_IMPL to fgMorphArgs */ argList = gtPrependNewCallArg(temp, argList); } if (sig != nullptr) { if (sig->retTypeSigClass != nullptr && sig->retType != CORINFO_TYPE_CLASS && sig->retType != CORINFO_TYPE_BYREF && sig->retType != CORINFO_TYPE_PTR && sig->retType != CORINFO_TYPE_VAR) { // Make sure that all valuetypes (including enums) that we push are loaded. // This is to guarantee that if a GC is triggerred from the prestub of this methods, // all valuetypes in the method signature are already loaded. // We need to be able to find the size of the valuetypes, but we cannot // do a class-load from within GC. info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(sig->retTypeSigClass); } CORINFO_ARG_LIST_HANDLE sigArgs = sig->args; GenTreeCall::Use* arg; for (arg = argList, count = sig->numArgs; count > 0; arg = arg->GetNext(), count--) { PREFIX_ASSUME(arg != nullptr); CORINFO_CLASS_HANDLE classHnd; CorInfoType corType = strip(info.compCompHnd->getArgType(sig, sigArgs, &classHnd)); var_types jitSigType = JITtype2varType(corType); if (!impCheckImplicitArgumentCoercion(jitSigType, arg->GetNode()->TypeGet())) { BADCODE("the call argument has a type that can't be implicitly converted to the signature type"); } // insert implied casts (from float to double or double to float) if ((jitSigType == TYP_DOUBLE) && (arg->GetNode()->TypeGet() == TYP_FLOAT)) { arg->SetNode(gtNewCastNode(TYP_DOUBLE, arg->GetNode(), false, TYP_DOUBLE)); } else if ((jitSigType == TYP_FLOAT) && (arg->GetNode()->TypeGet() == TYP_DOUBLE)) { arg->SetNode(gtNewCastNode(TYP_FLOAT, arg->GetNode(), false, TYP_FLOAT)); } // insert any widening or narrowing casts for backwards compatibility arg->SetNode(impImplicitIorI4Cast(arg->GetNode(), jitSigType)); if (corType != CORINFO_TYPE_CLASS && corType != CORINFO_TYPE_BYREF && corType != CORINFO_TYPE_PTR && corType != CORINFO_TYPE_VAR) { CORINFO_CLASS_HANDLE argRealClass = info.compCompHnd->getArgClass(sig, sigArgs); if (argRealClass != nullptr) { // Make sure that all valuetypes (including enums) that we push are loaded. // This is to guarantee that if a GC is triggered from the prestub of this methods, // all valuetypes in the method signature are already loaded. // We need to be able to find the size of the valuetypes, but we cannot // do a class-load from within GC. info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(argRealClass); } } const var_types nodeArgType = arg->GetNode()->TypeGet(); if (!varTypeIsStruct(jitSigType) && genTypeSize(nodeArgType) != genTypeSize(jitSigType)) { assert(!varTypeIsStruct(nodeArgType)); // Some ABI require precise size information for call arguments less than target pointer size, // for example arm64 OSX. Create a special node to keep this information until morph // consumes it into `fgArgInfo`. GenTree* putArgType = gtNewOperNode(GT_PUTARG_TYPE, jitSigType, arg->GetNode()); arg->SetNode(putArgType); } sigArgs = info.compCompHnd->getArgNext(sigArgs); } } if (Target::g_tgtArgOrder == Target::ARG_ORDER_R2L) { // Prepend the prefixTree // Simple in-place reversal to place treeList // at the end of a reversed prefixTree while (prefixArgs != nullptr) { GenTreeCall::Use* next = prefixArgs->GetNext(); prefixArgs->SetNext(argList); argList = prefixArgs; prefixArgs = next; } } return argList; } static bool TypeIs(var_types type1, var_types type2) { return type1 == type2; } // Check if type1 matches any type from the list. template <typename... T> static bool TypeIs(var_types type1, var_types type2, T... rest) { return TypeIs(type1, type2) || TypeIs(type1, rest...); } //------------------------------------------------------------------------ // impCheckImplicitArgumentCoercion: check that the node's type is compatible with // the signature's type using ECMA implicit argument coercion table. // // Arguments: // sigType - the type in the call signature; // nodeType - the node type. // // Return Value: // true if they are compatible, false otherwise. // // Notes: // - it is currently allowing byref->long passing, should be fixed in VM; // - it can't check long -> native int case on 64-bit platforms, // so the behavior is different depending on the target bitness. // bool Compiler::impCheckImplicitArgumentCoercion(var_types sigType, var_types nodeType) const { if (sigType == nodeType) { return true; } if (TypeIs(sigType, TYP_BOOL, TYP_UBYTE, TYP_BYTE, TYP_USHORT, TYP_SHORT, TYP_UINT, TYP_INT)) { if (TypeIs(nodeType, TYP_BOOL, TYP_UBYTE, TYP_BYTE, TYP_USHORT, TYP_SHORT, TYP_UINT, TYP_INT, TYP_I_IMPL)) { return true; } } else if (TypeIs(sigType, TYP_ULONG, TYP_LONG)) { if (TypeIs(nodeType, TYP_LONG)) { return true; } } else if (TypeIs(sigType, TYP_FLOAT, TYP_DOUBLE)) { if (TypeIs(nodeType, TYP_FLOAT, TYP_DOUBLE)) { return true; } } else if (TypeIs(sigType, TYP_BYREF)) { if (TypeIs(nodeType, TYP_I_IMPL)) { return true; } // This condition tolerates such IL: // ; V00 this ref this class-hnd // ldarg.0 // call(byref) if (TypeIs(nodeType, TYP_REF)) { return true; } } else if (varTypeIsStruct(sigType)) { if (varTypeIsStruct(nodeType)) { return true; } } // This condition should not be under `else` because `TYP_I_IMPL` // intersects with `TYP_LONG` or `TYP_INT`. if (TypeIs(sigType, TYP_I_IMPL, TYP_U_IMPL)) { // Note that it allows `ldc.i8 1; call(nint)` on 64-bit platforms, // but we can't distinguish `nint` from `long` there. if (TypeIs(nodeType, TYP_I_IMPL, TYP_U_IMPL, TYP_INT, TYP_UINT)) { return true; } // It tolerates IL that ECMA does not allow but that is commonly used. // Example: // V02 loc1 struct <RTL_OSVERSIONINFOEX, 32> // ldloca.s 0x2 // call(native int) if (TypeIs(nodeType, TYP_BYREF)) { return true; } } return false; } /***************************************************************************** * * Pop the given number of values from the stack in reverse order (STDCALL/CDECL etc.) * The first "skipReverseCount" items are not reversed. */ GenTreeCall::Use* Compiler::impPopReverseCallArgs(unsigned count, CORINFO_SIG_INFO* sig, unsigned skipReverseCount) { assert(skipReverseCount <= count); GenTreeCall::Use* list = impPopCallArgs(count, sig); // reverse the list if (list == nullptr || skipReverseCount == count) { return list; } GenTreeCall::Use* ptr = nullptr; // Initialized to the first node that needs to be reversed GenTreeCall::Use* lastSkipNode = nullptr; // Will be set to the last node that does not need to be reversed if (skipReverseCount == 0) { ptr = list; } else { lastSkipNode = list; // Get to the first node that needs to be reversed for (unsigned i = 0; i < skipReverseCount - 1; i++) { lastSkipNode = lastSkipNode->GetNext(); } PREFIX_ASSUME(lastSkipNode != nullptr); ptr = lastSkipNode->GetNext(); } GenTreeCall::Use* reversedList = nullptr; do { GenTreeCall::Use* tmp = ptr->GetNext(); ptr->SetNext(reversedList); reversedList = ptr; ptr = tmp; } while (ptr != nullptr); if (skipReverseCount) { lastSkipNode->SetNext(reversedList); return list; } else { return reversedList; } } //------------------------------------------------------------------------ // impAssignStruct: Create a struct assignment // // Arguments: // dest - the destination of the assignment // src - the value to be assigned // structHnd - handle representing the struct type // curLevel - stack level for which a spill may be being done // pAfterStmt - statement to insert any additional statements after // ilOffset - il offset for new statements // block - block to insert any additional statements in // // Return Value: // The tree that should be appended to the statement list that represents the assignment. // // Notes: // Temp assignments may be appended to impStmtList if spilling is necessary. GenTree* Compiler::impAssignStruct(GenTree* dest, GenTree* src, CORINFO_CLASS_HANDLE structHnd, unsigned curLevel, Statement** pAfterStmt, /* = nullptr */ const DebugInfo& di, /* = DebugInfo() */ BasicBlock* block /* = nullptr */ ) { assert(varTypeIsStruct(dest)); DebugInfo usedDI = di; if (!usedDI.IsValid()) { usedDI = impCurStmtDI; } while (dest->gtOper == GT_COMMA) { // Second thing is the struct. assert(varTypeIsStruct(dest->AsOp()->gtOp2)); // Append all the op1 of GT_COMMA trees before we evaluate op2 of the GT_COMMA tree. if (pAfterStmt) { Statement* newStmt = gtNewStmt(dest->AsOp()->gtOp1, usedDI); fgInsertStmtAfter(block, *pAfterStmt, newStmt); *pAfterStmt = newStmt; } else { impAppendTree(dest->AsOp()->gtOp1, curLevel, usedDI); // do the side effect } // set dest to the second thing dest = dest->AsOp()->gtOp2; } assert(dest->gtOper == GT_LCL_VAR || dest->gtOper == GT_RETURN || dest->gtOper == GT_FIELD || dest->gtOper == GT_IND || dest->gtOper == GT_OBJ || dest->gtOper == GT_INDEX); // Return a NOP if this is a self-assignment. if (dest->OperGet() == GT_LCL_VAR && src->OperGet() == GT_LCL_VAR && src->AsLclVarCommon()->GetLclNum() == dest->AsLclVarCommon()->GetLclNum()) { return gtNewNothingNode(); } // TODO-1stClassStructs: Avoid creating an address if it is not needed, // or re-creating a Blk node if it is. GenTree* destAddr; if (dest->gtOper == GT_IND || dest->OperIsBlk()) { destAddr = dest->AsOp()->gtOp1; } else { destAddr = gtNewOperNode(GT_ADDR, TYP_BYREF, dest); } return (impAssignStructPtr(destAddr, src, structHnd, curLevel, pAfterStmt, usedDI, block)); } //------------------------------------------------------------------------ // impAssignStructPtr: Assign (copy) the structure from 'src' to 'destAddr'. // // Arguments: // destAddr - address of the destination of the assignment // src - source of the assignment // structHnd - handle representing the struct type // curLevel - stack level for which a spill may be being done // pAfterStmt - statement to insert any additional statements after // di - debug info for new statements // block - block to insert any additional statements in // // Return Value: // The tree that should be appended to the statement list that represents the assignment. // // Notes: // Temp assignments may be appended to impStmtList if spilling is necessary. GenTree* Compiler::impAssignStructPtr(GenTree* destAddr, GenTree* src, CORINFO_CLASS_HANDLE structHnd, unsigned curLevel, Statement** pAfterStmt, /* = NULL */ const DebugInfo& di, /* = DebugInfo() */ BasicBlock* block /* = NULL */ ) { GenTree* dest = nullptr; GenTreeFlags destFlags = GTF_EMPTY; DebugInfo usedDI = di; if (!usedDI.IsValid()) { usedDI = impCurStmtDI; } #ifdef DEBUG #ifdef FEATURE_HW_INTRINSICS if (src->OperIs(GT_HWINTRINSIC)) { const GenTreeHWIntrinsic* intrinsic = src->AsHWIntrinsic(); if (HWIntrinsicInfo::IsMultiReg(intrinsic->GetHWIntrinsicId())) { assert(src->TypeGet() == TYP_STRUCT); } else { assert(varTypeIsSIMD(src)); } } else #endif // FEATURE_HW_INTRINSICS { assert(src->OperIs(GT_LCL_VAR, GT_LCL_FLD, GT_FIELD, GT_IND, GT_OBJ, GT_CALL, GT_MKREFANY, GT_RET_EXPR, GT_COMMA) || ((src->TypeGet() != TYP_STRUCT) && src->OperIsSIMD())); } #endif // DEBUG var_types asgType = src->TypeGet(); if (src->gtOper == GT_CALL) { GenTreeCall* srcCall = src->AsCall(); if (srcCall->TreatAsHasRetBufArg(this)) { // Case of call returning a struct via hidden retbuf arg CLANG_FORMAT_COMMENT_ANCHOR; #if !defined(TARGET_ARM) // Unmanaged instance methods on Windows or Unix X86 need the retbuf arg after the first (this) parameter if ((TargetOS::IsWindows || compUnixX86Abi()) && srcCall->IsUnmanaged()) { if (callConvIsInstanceMethodCallConv(srcCall->GetUnmanagedCallConv())) { #ifdef TARGET_X86 // The argument list has already been reversed. // Insert the return buffer as the second-to-last node // so it will be pushed on to the stack after the user args but before the native this arg // as required by the native ABI. GenTreeCall::Use* lastArg = srcCall->gtCallArgs; if (lastArg == nullptr) { srcCall->gtCallArgs = gtPrependNewCallArg(destAddr, srcCall->gtCallArgs); } else if (srcCall->GetUnmanagedCallConv() == CorInfoCallConvExtension::Thiscall) { // For thiscall, the "this" parameter is not included in the argument list reversal, // so we need to put the return buffer as the last parameter. for (; lastArg->GetNext() != nullptr; lastArg = lastArg->GetNext()) ; gtInsertNewCallArgAfter(destAddr, lastArg); } else if (lastArg->GetNext() == nullptr) { srcCall->gtCallArgs = gtPrependNewCallArg(destAddr, lastArg); } else { assert(lastArg != nullptr && lastArg->GetNext() != nullptr); GenTreeCall::Use* secondLastArg = lastArg; lastArg = lastArg->GetNext(); for (; lastArg->GetNext() != nullptr; secondLastArg = lastArg, lastArg = lastArg->GetNext()) ; assert(secondLastArg->GetNext() != nullptr); gtInsertNewCallArgAfter(destAddr, secondLastArg); } #else GenTreeCall::Use* thisArg = gtInsertNewCallArgAfter(destAddr, srcCall->gtCallArgs); #endif } else { #ifdef TARGET_X86 // The argument list has already been reversed. // Insert the return buffer as the last node so it will be pushed on to the stack last // as required by the native ABI. GenTreeCall::Use* lastArg = srcCall->gtCallArgs; if (lastArg == nullptr) { srcCall->gtCallArgs = gtPrependNewCallArg(destAddr, srcCall->gtCallArgs); } else { for (; lastArg->GetNext() != nullptr; lastArg = lastArg->GetNext()) ; gtInsertNewCallArgAfter(destAddr, lastArg); } #else // insert the return value buffer into the argument list as first byref parameter srcCall->gtCallArgs = gtPrependNewCallArg(destAddr, srcCall->gtCallArgs); #endif } } else #endif // !defined(TARGET_ARM) { // insert the return value buffer into the argument list as first byref parameter srcCall->gtCallArgs = gtPrependNewCallArg(destAddr, srcCall->gtCallArgs); } // now returns void, not a struct src->gtType = TYP_VOID; // return the morphed call node return src; } else { // Case of call returning a struct in one or more registers. var_types returnType = (var_types)srcCall->gtReturnType; // First we try to change this to "LclVar/LclFld = call" // if ((destAddr->gtOper == GT_ADDR) && (destAddr->AsOp()->gtOp1->gtOper == GT_LCL_VAR)) { // If it is a multi-reg struct return, don't change the oper to GT_LCL_FLD. // That is, the IR will be of the form lclVar = call for multi-reg return // GenTreeLclVar* lcl = destAddr->AsOp()->gtOp1->AsLclVar(); unsigned lclNum = lcl->GetLclNum(); LclVarDsc* varDsc = lvaGetDesc(lclNum); if (src->AsCall()->HasMultiRegRetVal()) { // Mark the struct LclVar as used in a MultiReg return context // which currently makes it non promotable. // TODO-1stClassStructs: Eliminate this pessimization when we can more generally // handle multireg returns. lcl->gtFlags |= GTF_DONT_CSE; varDsc->lvIsMultiRegRet = true; } dest = lcl; #if defined(TARGET_ARM) // TODO-Cleanup: This should have been taken care of in the above HasMultiRegRetVal() case, // but that method has not been updadted to include ARM. impMarkLclDstNotPromotable(lclNum, src, structHnd); lcl->gtFlags |= GTF_DONT_CSE; #elif defined(UNIX_AMD64_ABI) // Not allowed for FEATURE_CORCLR which is the only SKU available for System V OSs. assert(!src->AsCall()->IsVarargs() && "varargs not allowed for System V OSs."); // Make the struct non promotable. The eightbytes could contain multiple fields. // TODO-1stClassStructs: Eliminate this pessimization when we can more generally // handle multireg returns. // TODO-Cleanup: Why is this needed here? This seems that it will set this even for // non-multireg returns. lcl->gtFlags |= GTF_DONT_CSE; varDsc->lvIsMultiRegRet = true; #endif } else // we don't have a GT_ADDR of a GT_LCL_VAR { // !!! The destination could be on stack. !!! // This flag will let us choose the correct write barrier. asgType = returnType; destFlags = GTF_IND_TGTANYWHERE; } } } else if (src->gtOper == GT_RET_EXPR) { GenTreeCall* call = src->AsRetExpr()->gtInlineCandidate->AsCall(); noway_assert(call->gtOper == GT_CALL); if (call->HasRetBufArg()) { // insert the return value buffer into the argument list as first byref parameter call->gtCallArgs = gtPrependNewCallArg(destAddr, call->gtCallArgs); // now returns void, not a struct src->gtType = TYP_VOID; call->gtType = TYP_VOID; // We already have appended the write to 'dest' GT_CALL's args // So now we just return an empty node (pruning the GT_RET_EXPR) return src; } else { // Case of inline method returning a struct in one or more registers. // We won't need a return buffer asgType = src->gtType; if ((destAddr->gtOper != GT_ADDR) || (destAddr->AsOp()->gtOp1->gtOper != GT_LCL_VAR)) { // !!! The destination could be on stack. !!! // This flag will let us choose the correct write barrier. destFlags = GTF_IND_TGTANYWHERE; } } } else if (src->OperIsBlk()) { asgType = impNormStructType(structHnd); if (src->gtOper == GT_OBJ) { assert(src->AsObj()->GetLayout()->GetClassHandle() == structHnd); } } else if (src->gtOper == GT_INDEX) { asgType = impNormStructType(structHnd); assert(src->AsIndex()->gtStructElemClass == structHnd); } else if (src->gtOper == GT_MKREFANY) { // Since we are assigning the result of a GT_MKREFANY, // "destAddr" must point to a refany. GenTree* destAddrClone; destAddr = impCloneExpr(destAddr, &destAddrClone, structHnd, curLevel, pAfterStmt DEBUGARG("MKREFANY assignment")); assert(OFFSETOF__CORINFO_TypedReference__dataPtr == 0); assert(destAddr->gtType == TYP_I_IMPL || destAddr->gtType == TYP_BYREF); fgAddFieldSeqForZeroOffset(destAddr, GetFieldSeqStore()->CreateSingleton(GetRefanyDataField())); GenTree* ptrSlot = gtNewOperNode(GT_IND, TYP_I_IMPL, destAddr); GenTreeIntCon* typeFieldOffset = gtNewIconNode(OFFSETOF__CORINFO_TypedReference__type, TYP_I_IMPL); typeFieldOffset->gtFieldSeq = GetFieldSeqStore()->CreateSingleton(GetRefanyTypeField()); GenTree* typeSlot = gtNewOperNode(GT_IND, TYP_I_IMPL, gtNewOperNode(GT_ADD, destAddr->gtType, destAddrClone, typeFieldOffset)); // append the assign of the pointer value GenTree* asg = gtNewAssignNode(ptrSlot, src->AsOp()->gtOp1); if (pAfterStmt) { Statement* newStmt = gtNewStmt(asg, usedDI); fgInsertStmtAfter(block, *pAfterStmt, newStmt); *pAfterStmt = newStmt; } else { impAppendTree(asg, curLevel, usedDI); } // return the assign of the type value, to be appended return gtNewAssignNode(typeSlot, src->AsOp()->gtOp2); } else if (src->gtOper == GT_COMMA) { // The second thing is the struct or its address. assert(varTypeIsStruct(src->AsOp()->gtOp2) || src->AsOp()->gtOp2->gtType == TYP_BYREF); if (pAfterStmt) { // Insert op1 after '*pAfterStmt' Statement* newStmt = gtNewStmt(src->AsOp()->gtOp1, usedDI); fgInsertStmtAfter(block, *pAfterStmt, newStmt); *pAfterStmt = newStmt; } else if (impLastStmt != nullptr) { // Do the side-effect as a separate statement. impAppendTree(src->AsOp()->gtOp1, curLevel, usedDI); } else { // In this case we have neither been given a statement to insert after, nor are we // in the importer where we can append the side effect. // Instead, we're going to sink the assignment below the COMMA. src->AsOp()->gtOp2 = impAssignStructPtr(destAddr, src->AsOp()->gtOp2, structHnd, curLevel, pAfterStmt, usedDI, block); return src; } // Evaluate the second thing using recursion. return impAssignStructPtr(destAddr, src->AsOp()->gtOp2, structHnd, curLevel, pAfterStmt, usedDI, block); } else if (src->IsLocal()) { asgType = src->TypeGet(); } else if (asgType == TYP_STRUCT) { // It should already have the appropriate type. assert(asgType == impNormStructType(structHnd)); } if ((dest == nullptr) && (destAddr->OperGet() == GT_ADDR)) { GenTree* destNode = destAddr->gtGetOp1(); // If the actual destination is a local, a GT_INDEX or a block node, or is a node that // will be morphed, don't insert an OBJ(ADDR) if it already has the right type. if (destNode->OperIs(GT_LCL_VAR, GT_INDEX) || destNode->OperIsBlk()) { var_types destType = destNode->TypeGet(); // If one or both types are TYP_STRUCT (one may not yet be normalized), they are compatible // iff their handles are the same. // Otherwise, they are compatible if their types are the same. bool typesAreCompatible = ((destType == TYP_STRUCT) || (asgType == TYP_STRUCT)) ? ((gtGetStructHandleIfPresent(destNode) == structHnd) && varTypeIsStruct(asgType)) : (destType == asgType); if (typesAreCompatible) { dest = destNode; if (destType != TYP_STRUCT) { // Use a normalized type if available. We know from above that they're equivalent. asgType = destType; } } } } if (dest == nullptr) { if (asgType == TYP_STRUCT) { dest = gtNewObjNode(structHnd, destAddr); gtSetObjGcInfo(dest->AsObj()); // Although an obj as a call argument was always assumed to be a globRef // (which is itself overly conservative), that is not true of the operands // of a block assignment. dest->gtFlags &= ~GTF_GLOB_REF; dest->gtFlags |= (destAddr->gtFlags & GTF_GLOB_REF); } else { dest = gtNewOperNode(GT_IND, asgType, destAddr); } } if (dest->OperIs(GT_LCL_VAR) && (src->IsMultiRegNode() || (src->OperIs(GT_RET_EXPR) && src->AsRetExpr()->gtInlineCandidate->AsCall()->HasMultiRegRetVal()))) { if (lvaEnregMultiRegVars && varTypeIsStruct(dest)) { dest->AsLclVar()->SetMultiReg(); } if (src->OperIs(GT_CALL)) { lvaGetDesc(dest->AsLclVar())->lvIsMultiRegRet = true; } } dest->gtFlags |= destFlags; destFlags = dest->gtFlags; // return an assignment node, to be appended GenTree* asgNode = gtNewAssignNode(dest, src); gtBlockOpInit(asgNode, dest, src, false); // TODO-1stClassStructs: Clean up the settings of GTF_DONT_CSE on the lhs // of assignments. if ((destFlags & GTF_DONT_CSE) == 0) { dest->gtFlags &= ~(GTF_DONT_CSE); } return asgNode; } /***************************************************************************** Given a struct value, and the class handle for that structure, return the expression for the address for that structure value. willDeref - does the caller guarantee to dereference the pointer. */ GenTree* Compiler::impGetStructAddr(GenTree* structVal, CORINFO_CLASS_HANDLE structHnd, unsigned curLevel, bool willDeref) { assert(varTypeIsStruct(structVal) || eeIsValueClass(structHnd)); var_types type = structVal->TypeGet(); genTreeOps oper = structVal->gtOper; if (oper == GT_OBJ && willDeref) { assert(structVal->AsObj()->GetLayout()->GetClassHandle() == structHnd); return (structVal->AsObj()->Addr()); } else if (oper == GT_CALL || oper == GT_RET_EXPR || oper == GT_OBJ || oper == GT_MKREFANY || structVal->OperIsSimdOrHWintrinsic()) { unsigned tmpNum = lvaGrabTemp(true DEBUGARG("struct address for call/obj")); impAssignTempGen(tmpNum, structVal, structHnd, curLevel); // The 'return value' is now the temp itself type = genActualType(lvaTable[tmpNum].TypeGet()); GenTree* temp = gtNewLclvNode(tmpNum, type); temp = gtNewOperNode(GT_ADDR, TYP_BYREF, temp); return temp; } else if (oper == GT_COMMA) { assert(structVal->AsOp()->gtOp2->gtType == type); // Second thing is the struct Statement* oldLastStmt = impLastStmt; structVal->AsOp()->gtOp2 = impGetStructAddr(structVal->AsOp()->gtOp2, structHnd, curLevel, willDeref); structVal->gtType = TYP_BYREF; if (oldLastStmt != impLastStmt) { // Some temp assignment statement was placed on the statement list // for Op2, but that would be out of order with op1, so we need to // spill op1 onto the statement list after whatever was last // before we recursed on Op2 (i.e. before whatever Op2 appended). Statement* beforeStmt; if (oldLastStmt == nullptr) { // The op1 stmt should be the first in the list. beforeStmt = impStmtList; } else { // Insert after the oldLastStmt before the first inserted for op2. beforeStmt = oldLastStmt->GetNextStmt(); } impInsertTreeBefore(structVal->AsOp()->gtOp1, impCurStmtDI, beforeStmt); structVal->AsOp()->gtOp1 = gtNewNothingNode(); } return (structVal); } return (gtNewOperNode(GT_ADDR, TYP_BYREF, structVal)); } //------------------------------------------------------------------------ // impNormStructType: Normalize the type of a (known to be) struct class handle. // // Arguments: // structHnd - The class handle for the struct type of interest. // pSimdBaseJitType - (optional, default nullptr) - if non-null, and the struct is a SIMD // type, set to the SIMD base JIT type // // Return Value: // The JIT type for the struct (e.g. TYP_STRUCT, or TYP_SIMD*). // It may also modify the compFloatingPointUsed flag if the type is a SIMD type. // // Notes: // Normalizing the type involves examining the struct type to determine if it should // be modified to one that is handled specially by the JIT, possibly being a candidate // for full enregistration, e.g. TYP_SIMD16. If the size of the struct is already known // call structSizeMightRepresentSIMDType to determine if this api needs to be called. var_types Compiler::impNormStructType(CORINFO_CLASS_HANDLE structHnd, CorInfoType* pSimdBaseJitType) { assert(structHnd != NO_CLASS_HANDLE); var_types structType = TYP_STRUCT; #ifdef FEATURE_SIMD if (supportSIMDTypes()) { const DWORD structFlags = info.compCompHnd->getClassAttribs(structHnd); // Don't bother if the struct contains GC references of byrefs, it can't be a SIMD type. if ((structFlags & (CORINFO_FLG_CONTAINS_GC_PTR | CORINFO_FLG_BYREF_LIKE)) == 0) { unsigned originalSize = info.compCompHnd->getClassSize(structHnd); if (structSizeMightRepresentSIMDType(originalSize)) { unsigned int sizeBytes; CorInfoType simdBaseJitType = getBaseJitTypeAndSizeOfSIMDType(structHnd, &sizeBytes); if (simdBaseJitType != CORINFO_TYPE_UNDEF) { assert(sizeBytes == originalSize); structType = getSIMDTypeForSize(sizeBytes); if (pSimdBaseJitType != nullptr) { *pSimdBaseJitType = simdBaseJitType; } // Also indicate that we use floating point registers. compFloatingPointUsed = true; } } } } #endif // FEATURE_SIMD return structType; } //------------------------------------------------------------------------ // Compiler::impNormStructVal: Normalize a struct value // // Arguments: // structVal - the node we are going to normalize // structHnd - the class handle for the node // curLevel - the current stack level // forceNormalization - Force the creation of an OBJ node (default is false). // // Notes: // Given struct value 'structVal', make sure it is 'canonical', that is // it is either: // - a known struct type (non-TYP_STRUCT, e.g. TYP_SIMD8) // - an OBJ or a MKREFANY node, or // - a node (e.g. GT_INDEX) that will be morphed. // If the node is a CALL or RET_EXPR, a copy will be made to a new temp. // GenTree* Compiler::impNormStructVal(GenTree* structVal, CORINFO_CLASS_HANDLE structHnd, unsigned curLevel, bool forceNormalization /*=false*/) { assert(forceNormalization || varTypeIsStruct(structVal)); assert(structHnd != NO_CLASS_HANDLE); var_types structType = structVal->TypeGet(); bool makeTemp = false; if (structType == TYP_STRUCT) { structType = impNormStructType(structHnd); } bool alreadyNormalized = false; GenTreeLclVarCommon* structLcl = nullptr; genTreeOps oper = structVal->OperGet(); switch (oper) { // GT_RETURN and GT_MKREFANY don't capture the handle. case GT_RETURN: break; case GT_MKREFANY: alreadyNormalized = true; break; case GT_CALL: structVal->AsCall()->gtRetClsHnd = structHnd; makeTemp = true; break; case GT_RET_EXPR: structVal->AsRetExpr()->gtRetClsHnd = structHnd; makeTemp = true; break; case GT_ARGPLACE: structVal->AsArgPlace()->gtArgPlaceClsHnd = structHnd; break; case GT_INDEX: // This will be transformed to an OBJ later. alreadyNormalized = true; structVal->AsIndex()->gtStructElemClass = structHnd; structVal->AsIndex()->gtIndElemSize = info.compCompHnd->getClassSize(structHnd); break; case GT_FIELD: // Wrap it in a GT_OBJ, if needed. structVal->gtType = structType; if ((structType == TYP_STRUCT) || forceNormalization) { structVal = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal)); } break; case GT_LCL_VAR: case GT_LCL_FLD: structLcl = structVal->AsLclVarCommon(); // Wrap it in a GT_OBJ. structVal = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal)); FALLTHROUGH; case GT_OBJ: case GT_BLK: case GT_ASG: // These should already have the appropriate type. assert(structVal->gtType == structType); alreadyNormalized = true; break; case GT_IND: assert(structVal->gtType == structType); structVal = gtNewObjNode(structHnd, structVal->gtGetOp1()); alreadyNormalized = true; break; #ifdef FEATURE_SIMD case GT_SIMD: assert(varTypeIsSIMD(structVal) && (structVal->gtType == structType)); break; #endif // FEATURE_SIMD #ifdef FEATURE_HW_INTRINSICS case GT_HWINTRINSIC: assert(structVal->gtType == structType); assert(varTypeIsSIMD(structVal) || HWIntrinsicInfo::IsMultiReg(structVal->AsHWIntrinsic()->GetHWIntrinsicId())); break; #endif case GT_COMMA: { // The second thing could either be a block node or a GT_FIELD or a GT_SIMD or a GT_COMMA node. GenTree* blockNode = structVal->AsOp()->gtOp2; assert(blockNode->gtType == structType); // Is this GT_COMMA(op1, GT_COMMA())? GenTree* parent = structVal; if (blockNode->OperGet() == GT_COMMA) { // Find the last node in the comma chain. do { assert(blockNode->gtType == structType); parent = blockNode; blockNode = blockNode->AsOp()->gtOp2; } while (blockNode->OperGet() == GT_COMMA); } if (blockNode->OperGet() == GT_FIELD) { // If we have a GT_FIELD then wrap it in a GT_OBJ. blockNode = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, blockNode)); } #ifdef FEATURE_SIMD if (blockNode->OperIsSimdOrHWintrinsic()) { parent->AsOp()->gtOp2 = impNormStructVal(blockNode, structHnd, curLevel, forceNormalization); alreadyNormalized = true; } else #endif { noway_assert(blockNode->OperIsBlk()); // Sink the GT_COMMA below the blockNode addr. // That is GT_COMMA(op1, op2=blockNode) is tranformed into // blockNode(GT_COMMA(TYP_BYREF, op1, op2's op1)). // // In case of a chained GT_COMMA case, we sink the last // GT_COMMA below the blockNode addr. GenTree* blockNodeAddr = blockNode->AsOp()->gtOp1; assert(blockNodeAddr->gtType == TYP_BYREF); GenTree* commaNode = parent; commaNode->gtType = TYP_BYREF; commaNode->AsOp()->gtOp2 = blockNodeAddr; blockNode->AsOp()->gtOp1 = commaNode; if (parent == structVal) { structVal = blockNode; } alreadyNormalized = true; } } break; default: noway_assert(!"Unexpected node in impNormStructVal()"); break; } structVal->gtType = structType; if (!alreadyNormalized || forceNormalization) { if (makeTemp) { unsigned tmpNum = lvaGrabTemp(true DEBUGARG("struct address for call/obj")); impAssignTempGen(tmpNum, structVal, structHnd, curLevel); // The structVal is now the temp itself structLcl = gtNewLclvNode(tmpNum, structType)->AsLclVarCommon(); structVal = structLcl; } if ((forceNormalization || (structType == TYP_STRUCT)) && !structVal->OperIsBlk()) { // Wrap it in a GT_OBJ structVal = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal)); } } if (structLcl != nullptr) { // A OBJ on a ADDR(LCL_VAR) can never raise an exception // so we don't set GTF_EXCEPT here. if (!lvaIsImplicitByRefLocal(structLcl->GetLclNum())) { structVal->gtFlags &= ~GTF_GLOB_REF; } } else if (structVal->OperIsBlk()) { // In general a OBJ is an indirection and could raise an exception. structVal->gtFlags |= GTF_EXCEPT; } return structVal; } /******************************************************************************/ // Given a type token, generate code that will evaluate to the correct // handle representation of that token (type handle, field handle, or method handle) // // For most cases, the handle is determined at compile-time, and the code // generated is simply an embedded handle. // // Run-time lookup is required if the enclosing method is shared between instantiations // and the token refers to formal type parameters whose instantiation is not known // at compile-time. // GenTree* Compiler::impTokenToHandle(CORINFO_RESOLVED_TOKEN* pResolvedToken, bool* pRuntimeLookup /* = NULL */, bool mustRestoreHandle /* = false */, bool importParent /* = false */) { assert(!fgGlobalMorph); CORINFO_GENERICHANDLE_RESULT embedInfo; info.compCompHnd->embedGenericHandle(pResolvedToken, importParent, &embedInfo); if (pRuntimeLookup) { *pRuntimeLookup = embedInfo.lookup.lookupKind.needsRuntimeLookup; } if (mustRestoreHandle && !embedInfo.lookup.lookupKind.needsRuntimeLookup) { switch (embedInfo.handleType) { case CORINFO_HANDLETYPE_CLASS: info.compCompHnd->classMustBeLoadedBeforeCodeIsRun((CORINFO_CLASS_HANDLE)embedInfo.compileTimeHandle); break; case CORINFO_HANDLETYPE_METHOD: info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun((CORINFO_METHOD_HANDLE)embedInfo.compileTimeHandle); break; case CORINFO_HANDLETYPE_FIELD: info.compCompHnd->classMustBeLoadedBeforeCodeIsRun( info.compCompHnd->getFieldClass((CORINFO_FIELD_HANDLE)embedInfo.compileTimeHandle)); break; default: break; } } // Generate the full lookup tree. May be null if we're abandoning an inline attempt. GenTree* result = impLookupToTree(pResolvedToken, &embedInfo.lookup, gtTokenToIconFlags(pResolvedToken->token), embedInfo.compileTimeHandle); // If we have a result and it requires runtime lookup, wrap it in a runtime lookup node. if ((result != nullptr) && embedInfo.lookup.lookupKind.needsRuntimeLookup) { result = gtNewRuntimeLookup(embedInfo.compileTimeHandle, embedInfo.handleType, result); } return result; } GenTree* Compiler::impLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_LOOKUP* pLookup, GenTreeFlags handleFlags, void* compileTimeHandle) { if (!pLookup->lookupKind.needsRuntimeLookup) { // No runtime lookup is required. // Access is direct or memory-indirect (of a fixed address) reference CORINFO_GENERIC_HANDLE handle = nullptr; void* pIndirection = nullptr; assert(pLookup->constLookup.accessType != IAT_PPVALUE && pLookup->constLookup.accessType != IAT_RELPVALUE); if (pLookup->constLookup.accessType == IAT_VALUE) { handle = pLookup->constLookup.handle; } else if (pLookup->constLookup.accessType == IAT_PVALUE) { pIndirection = pLookup->constLookup.addr; } GenTree* addr = gtNewIconEmbHndNode(handle, pIndirection, handleFlags, compileTimeHandle); #ifdef DEBUG size_t handleToTrack; if (handleFlags == GTF_ICON_TOKEN_HDL) { handleToTrack = 0; } else { handleToTrack = (size_t)compileTimeHandle; } if (handle != nullptr) { addr->AsIntCon()->gtTargetHandle = handleToTrack; } else { addr->gtGetOp1()->AsIntCon()->gtTargetHandle = handleToTrack; } #endif return addr; } if (pLookup->lookupKind.runtimeLookupKind == CORINFO_LOOKUP_NOT_SUPPORTED) { // Runtime does not support inlining of all shapes of runtime lookups // Inlining has to be aborted in such a case assert(compIsForInlining()); compInlineResult->NoteFatal(InlineObservation::CALLSITE_GENERIC_DICTIONARY_LOOKUP); return nullptr; } // Need to use dictionary-based access which depends on the typeContext // which is only available at runtime, not at compile-time. return impRuntimeLookupToTree(pResolvedToken, pLookup, compileTimeHandle); } #ifdef FEATURE_READYTORUN GenTree* Compiler::impReadyToRunLookupToTree(CORINFO_CONST_LOOKUP* pLookup, GenTreeFlags handleFlags, void* compileTimeHandle) { CORINFO_GENERIC_HANDLE handle = nullptr; void* pIndirection = nullptr; assert(pLookup->accessType != IAT_PPVALUE && pLookup->accessType != IAT_RELPVALUE); if (pLookup->accessType == IAT_VALUE) { handle = pLookup->handle; } else if (pLookup->accessType == IAT_PVALUE) { pIndirection = pLookup->addr; } GenTree* addr = gtNewIconEmbHndNode(handle, pIndirection, handleFlags, compileTimeHandle); #ifdef DEBUG assert((handleFlags == GTF_ICON_CLASS_HDL) || (handleFlags == GTF_ICON_METHOD_HDL)); if (handle != nullptr) { addr->AsIntCon()->gtTargetHandle = (size_t)compileTimeHandle; } else { addr->gtGetOp1()->AsIntCon()->gtTargetHandle = (size_t)compileTimeHandle; } #endif // DEBUG return addr; } //------------------------------------------------------------------------ // impIsCastHelperEligibleForClassProbe: Checks whether a tree is a cast helper eligible to // to be profiled and then optimized with PGO data // // Arguments: // tree - the tree object to check // // Returns: // true if the tree is a cast helper eligible to be profiled // bool Compiler::impIsCastHelperEligibleForClassProbe(GenTree* tree) { if (!opts.jitFlags->IsSet(JitFlags::JIT_FLAG_BBINSTR) || (JitConfig.JitCastProfiling() != 1)) { return false; } if (tree->IsCall() && tree->AsCall()->gtCallType == CT_HELPER) { const CorInfoHelpFunc helper = eeGetHelperNum(tree->AsCall()->gtCallMethHnd); if ((helper == CORINFO_HELP_ISINSTANCEOFINTERFACE) || (helper == CORINFO_HELP_ISINSTANCEOFCLASS) || (helper == CORINFO_HELP_CHKCASTCLASS) || (helper == CORINFO_HELP_CHKCASTINTERFACE)) { return true; } } return false; } //------------------------------------------------------------------------ // impIsCastHelperMayHaveProfileData: Checks whether a tree is a cast helper that might // have profile data // // Arguments: // tree - the tree object to check // // Returns: // true if the tree is a cast helper with potential profile data // bool Compiler::impIsCastHelperMayHaveProfileData(GenTree* tree) { if (!opts.jitFlags->IsSet(JitFlags::JIT_FLAG_BBOPT) || (JitConfig.JitCastProfiling() != 1)) { return false; } if (tree->IsCall() && tree->AsCall()->gtCallType == CT_HELPER) { const CorInfoHelpFunc helper = eeGetHelperNum(tree->AsCall()->gtCallMethHnd); if ((helper == CORINFO_HELP_ISINSTANCEOFINTERFACE) || (helper == CORINFO_HELP_ISINSTANCEOFCLASS) || (helper == CORINFO_HELP_CHKCASTCLASS) || (helper == CORINFO_HELP_CHKCASTINTERFACE)) { return true; } } return false; } GenTreeCall* Compiler::impReadyToRunHelperToTree( CORINFO_RESOLVED_TOKEN* pResolvedToken, CorInfoHelpFunc helper, var_types type, GenTreeCall::Use* args /* = nullptr */, CORINFO_LOOKUP_KIND* pGenericLookupKind /* =NULL. Only used with generics */) { CORINFO_CONST_LOOKUP lookup; if (!info.compCompHnd->getReadyToRunHelper(pResolvedToken, pGenericLookupKind, helper, &lookup)) { return nullptr; } GenTreeCall* op1 = gtNewHelperCallNode(helper, type, args); op1->setEntryPoint(lookup); return op1; } #endif GenTree* Compiler::impMethodPointer(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo) { GenTree* op1 = nullptr; switch (pCallInfo->kind) { case CORINFO_CALL: op1 = new (this, GT_FTN_ADDR) GenTreeFptrVal(TYP_I_IMPL, pCallInfo->hMethod); #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { op1->AsFptrVal()->gtEntryPoint = pCallInfo->codePointerLookup.constLookup; } #endif break; case CORINFO_CALL_CODE_POINTER: op1 = impLookupToTree(pResolvedToken, &pCallInfo->codePointerLookup, GTF_ICON_FTN_ADDR, pCallInfo->hMethod); break; default: noway_assert(!"unknown call kind"); break; } return op1; } //------------------------------------------------------------------------ // getRuntimeContextTree: find pointer to context for runtime lookup. // // Arguments: // kind - lookup kind. // // Return Value: // Return GenTree pointer to generic shared context. // // Notes: // Reports about generic context using. GenTree* Compiler::getRuntimeContextTree(CORINFO_RUNTIME_LOOKUP_KIND kind) { GenTree* ctxTree = nullptr; // Collectible types requires that for shared generic code, if we use the generic context parameter // that we report it. (This is a conservative approach, we could detect some cases particularly when the // context parameter is this that we don't need the eager reporting logic.) lvaGenericsContextInUse = true; Compiler* pRoot = impInlineRoot(); if (kind == CORINFO_LOOKUP_THISOBJ) { // this Object ctxTree = gtNewLclvNode(pRoot->info.compThisArg, TYP_REF); ctxTree->gtFlags |= GTF_VAR_CONTEXT; // context is the method table pointer of the this object ctxTree = gtNewMethodTableLookup(ctxTree); } else { assert(kind == CORINFO_LOOKUP_METHODPARAM || kind == CORINFO_LOOKUP_CLASSPARAM); // Exact method descriptor as passed in ctxTree = gtNewLclvNode(pRoot->info.compTypeCtxtArg, TYP_I_IMPL); ctxTree->gtFlags |= GTF_VAR_CONTEXT; } return ctxTree; } /*****************************************************************************/ /* Import a dictionary lookup to access a handle in code shared between generic instantiations. The lookup depends on the typeContext which is only available at runtime, and not at compile-time. pLookup->token1 and pLookup->token2 specify the handle that is needed. The cases are: 1. pLookup->indirections == CORINFO_USEHELPER : Call a helper passing it the instantiation-specific handle, and the tokens to lookup the handle. 2. pLookup->indirections != CORINFO_USEHELPER : 2a. pLookup->testForNull == false : Dereference the instantiation-specific handle to get the handle. 2b. pLookup->testForNull == true : Dereference the instantiation-specific handle. If it is non-NULL, it is the handle required. Else, call a helper to lookup the handle. */ GenTree* Compiler::impRuntimeLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_LOOKUP* pLookup, void* compileTimeHandle) { GenTree* ctxTree = getRuntimeContextTree(pLookup->lookupKind.runtimeLookupKind); CORINFO_RUNTIME_LOOKUP* pRuntimeLookup = &pLookup->runtimeLookup; // It's available only via the run-time helper function if (pRuntimeLookup->indirections == CORINFO_USEHELPER) { #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { return impReadyToRunHelperToTree(pResolvedToken, CORINFO_HELP_READYTORUN_GENERIC_HANDLE, TYP_I_IMPL, gtNewCallArgs(ctxTree), &pLookup->lookupKind); } #endif return gtNewRuntimeLookupHelperCallNode(pRuntimeLookup, ctxTree, compileTimeHandle); } // Slot pointer GenTree* slotPtrTree = ctxTree; if (pRuntimeLookup->testForNull) { slotPtrTree = impCloneExpr(ctxTree, &ctxTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("impRuntimeLookup slot")); } GenTree* indOffTree = nullptr; GenTree* lastIndOfTree = nullptr; // Applied repeated indirections for (WORD i = 0; i < pRuntimeLookup->indirections; i++) { if ((i == 1 && pRuntimeLookup->indirectFirstOffset) || (i == 2 && pRuntimeLookup->indirectSecondOffset)) { indOffTree = impCloneExpr(slotPtrTree, &slotPtrTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("impRuntimeLookup indirectOffset")); } // The last indirection could be subject to a size check (dynamic dictionary expansion) bool isLastIndirectionWithSizeCheck = ((i == pRuntimeLookup->indirections - 1) && (pRuntimeLookup->sizeOffset != CORINFO_NO_SIZE_CHECK)); if (i != 0) { slotPtrTree = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree); slotPtrTree->gtFlags |= GTF_IND_NONFAULTING; if (!isLastIndirectionWithSizeCheck) { slotPtrTree->gtFlags |= GTF_IND_INVARIANT; } } if ((i == 1 && pRuntimeLookup->indirectFirstOffset) || (i == 2 && pRuntimeLookup->indirectSecondOffset)) { slotPtrTree = gtNewOperNode(GT_ADD, TYP_I_IMPL, indOffTree, slotPtrTree); } if (pRuntimeLookup->offsets[i] != 0) { if (isLastIndirectionWithSizeCheck) { lastIndOfTree = impCloneExpr(slotPtrTree, &slotPtrTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("impRuntimeLookup indirectOffset")); } slotPtrTree = gtNewOperNode(GT_ADD, TYP_I_IMPL, slotPtrTree, gtNewIconNode(pRuntimeLookup->offsets[i], TYP_I_IMPL)); } } // No null test required if (!pRuntimeLookup->testForNull) { if (pRuntimeLookup->indirections == 0) { return slotPtrTree; } slotPtrTree = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree); slotPtrTree->gtFlags |= GTF_IND_NONFAULTING; if (!pRuntimeLookup->testForFixup) { return slotPtrTree; } impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark0")); unsigned slotLclNum = lvaGrabTemp(true DEBUGARG("impRuntimeLookup test")); impAssignTempGen(slotLclNum, slotPtrTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr, impCurStmtDI); GenTree* slot = gtNewLclvNode(slotLclNum, TYP_I_IMPL); // downcast the pointer to a TYP_INT on 64-bit targets slot = impImplicitIorI4Cast(slot, TYP_INT); // Use a GT_AND to check for the lowest bit and indirect if it is set GenTree* test = gtNewOperNode(GT_AND, TYP_INT, slot, gtNewIconNode(1)); GenTree* relop = gtNewOperNode(GT_EQ, TYP_INT, test, gtNewIconNode(0)); // slot = GT_IND(slot - 1) slot = gtNewLclvNode(slotLclNum, TYP_I_IMPL); GenTree* add = gtNewOperNode(GT_ADD, TYP_I_IMPL, slot, gtNewIconNode(-1, TYP_I_IMPL)); GenTree* indir = gtNewOperNode(GT_IND, TYP_I_IMPL, add); indir->gtFlags |= GTF_IND_NONFAULTING; indir->gtFlags |= GTF_IND_INVARIANT; slot = gtNewLclvNode(slotLclNum, TYP_I_IMPL); GenTree* asg = gtNewAssignNode(slot, indir); GenTreeColon* colon = new (this, GT_COLON) GenTreeColon(TYP_VOID, gtNewNothingNode(), asg); GenTreeQmark* qmark = gtNewQmarkNode(TYP_VOID, relop, colon); impAppendTree(qmark, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); return gtNewLclvNode(slotLclNum, TYP_I_IMPL); } assert(pRuntimeLookup->indirections != 0); impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark1")); // Extract the handle GenTree* handleForNullCheck = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree); handleForNullCheck->gtFlags |= GTF_IND_NONFAULTING; // Call the helper // - Setup argNode with the pointer to the signature returned by the lookup GenTree* argNode = gtNewIconEmbHndNode(pRuntimeLookup->signature, nullptr, GTF_ICON_GLOBAL_PTR, compileTimeHandle); GenTreeCall::Use* helperArgs = gtNewCallArgs(ctxTree, argNode); GenTreeCall* helperCall = gtNewHelperCallNode(pRuntimeLookup->helper, TYP_I_IMPL, helperArgs); // Check for null and possibly call helper GenTree* nullCheck = gtNewOperNode(GT_NE, TYP_INT, handleForNullCheck, gtNewIconNode(0, TYP_I_IMPL)); GenTree* handleForResult = gtCloneExpr(handleForNullCheck); GenTree* result = nullptr; if (pRuntimeLookup->sizeOffset != CORINFO_NO_SIZE_CHECK) { // Dynamic dictionary expansion support assert((lastIndOfTree != nullptr) && (pRuntimeLookup->indirections > 0)); // sizeValue = dictionary[pRuntimeLookup->sizeOffset] GenTreeIntCon* sizeOffset = gtNewIconNode(pRuntimeLookup->sizeOffset, TYP_I_IMPL); GenTree* sizeValueOffset = gtNewOperNode(GT_ADD, TYP_I_IMPL, lastIndOfTree, sizeOffset); GenTree* sizeValue = gtNewOperNode(GT_IND, TYP_I_IMPL, sizeValueOffset); sizeValue->gtFlags |= GTF_IND_NONFAULTING; // sizeCheck fails if sizeValue < pRuntimeLookup->offsets[i] GenTree* offsetValue = gtNewIconNode(pRuntimeLookup->offsets[pRuntimeLookup->indirections - 1], TYP_I_IMPL); GenTree* sizeCheck = gtNewOperNode(GT_LE, TYP_INT, sizeValue, offsetValue); // revert null check condition. nullCheck->ChangeOperUnchecked(GT_EQ); // ((sizeCheck fails || nullCheck fails))) ? (helperCall : handle). // Add checks and the handle as call arguments, indirect call transformer will handle this. helperCall->gtCallArgs = gtPrependNewCallArg(handleForResult, helperCall->gtCallArgs); helperCall->gtCallArgs = gtPrependNewCallArg(sizeCheck, helperCall->gtCallArgs); helperCall->gtCallArgs = gtPrependNewCallArg(nullCheck, helperCall->gtCallArgs); result = helperCall; addExpRuntimeLookupCandidate(helperCall); } else { GenTreeColon* colonNullCheck = new (this, GT_COLON) GenTreeColon(TYP_I_IMPL, handleForResult, helperCall); result = gtNewQmarkNode(TYP_I_IMPL, nullCheck, colonNullCheck); } unsigned tmp = lvaGrabTemp(true DEBUGARG("spilling Runtime Lookup tree")); impAssignTempGen(tmp, result, (unsigned)CHECK_SPILL_NONE); return gtNewLclvNode(tmp, TYP_I_IMPL); } /****************************************************************************** * Spills the stack at verCurrentState.esStack[level] and replaces it with a temp. * If tnum!=BAD_VAR_NUM, the temp var used to replace the tree is tnum, * else, grab a new temp. * For structs (which can be pushed on the stack using obj, etc), * special handling is needed */ struct RecursiveGuard { public: RecursiveGuard() { m_pAddress = nullptr; } ~RecursiveGuard() { if (m_pAddress) { *m_pAddress = false; } } void Init(bool* pAddress, bool bInitialize) { assert(pAddress && *pAddress == false && "Recursive guard violation"); m_pAddress = pAddress; if (bInitialize) { *m_pAddress = true; } } protected: bool* m_pAddress; }; bool Compiler::impSpillStackEntry(unsigned level, unsigned tnum #ifdef DEBUG , bool bAssertOnRecursion, const char* reason #endif ) { #ifdef DEBUG RecursiveGuard guard; guard.Init(&impNestedStackSpill, bAssertOnRecursion); #endif GenTree* tree = verCurrentState.esStack[level].val; /* Allocate a temp if we haven't been asked to use a particular one */ if (tnum != BAD_VAR_NUM && (tnum >= lvaCount)) { return false; } bool isNewTemp = false; if (tnum == BAD_VAR_NUM) { tnum = lvaGrabTemp(true DEBUGARG(reason)); isNewTemp = true; } /* Assign the spilled entry to the temp */ impAssignTempGen(tnum, tree, verCurrentState.esStack[level].seTypeInfo.GetClassHandle(), level); // If temp is newly introduced and a ref type, grab what type info we can. if (isNewTemp && (lvaTable[tnum].lvType == TYP_REF)) { assert(lvaTable[tnum].lvSingleDef == 0); lvaTable[tnum].lvSingleDef = 1; JITDUMP("Marked V%02u as a single def temp\n", tnum); CORINFO_CLASS_HANDLE stkHnd = verCurrentState.esStack[level].seTypeInfo.GetClassHandle(); lvaSetClass(tnum, tree, stkHnd); // If we're assigning a GT_RET_EXPR, note the temp over on the call, // so the inliner can use it in case it needs a return spill temp. if (tree->OperGet() == GT_RET_EXPR) { JITDUMP("\n*** see V%02u = GT_RET_EXPR, noting temp\n", tnum); GenTree* call = tree->AsRetExpr()->gtInlineCandidate; InlineCandidateInfo* ici = call->AsCall()->gtInlineCandidateInfo; ici->preexistingSpillTemp = tnum; } } // The tree type may be modified by impAssignTempGen, so use the type of the lclVar. var_types type = genActualType(lvaTable[tnum].TypeGet()); GenTree* temp = gtNewLclvNode(tnum, type); verCurrentState.esStack[level].val = temp; return true; } /***************************************************************************** * * Ensure that the stack has only spilled values */ void Compiler::impSpillStackEnsure(bool spillLeaves) { assert(!spillLeaves || opts.compDbgCode); for (unsigned level = 0; level < verCurrentState.esStackDepth; level++) { GenTree* tree = verCurrentState.esStack[level].val; if (!spillLeaves && tree->OperIsLeaf()) { continue; } // Temps introduced by the importer itself don't need to be spilled bool isTempLcl = (tree->OperGet() == GT_LCL_VAR) && (tree->AsLclVarCommon()->GetLclNum() >= info.compLocalsCount); if (isTempLcl) { continue; } impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillStackEnsure")); } } void Compiler::impSpillEvalStack() { for (unsigned level = 0; level < verCurrentState.esStackDepth; level++) { impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillEvalStack")); } } /***************************************************************************** * * If the stack contains any trees with side effects in them, assign those * trees to temps and append the assignments to the statement list. * On return the stack is guaranteed to be empty. */ inline void Compiler::impEvalSideEffects() { impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("impEvalSideEffects")); verCurrentState.esStackDepth = 0; } /***************************************************************************** * * If the stack contains any trees with side effects in them, assign those * trees to temps and replace them on the stack with refs to their temps. * [0..chkLevel) is the portion of the stack which will be checked and spilled. */ inline void Compiler::impSpillSideEffects(bool spillGlobEffects, unsigned chkLevel DEBUGARG(const char* reason)) { assert(chkLevel != (unsigned)CHECK_SPILL_NONE); /* Before we make any appends to the tree list we must spill the * "special" side effects (GTF_ORDER_SIDEEFF on a GT_CATCH_ARG) */ impSpillSpecialSideEff(); if (chkLevel == (unsigned)CHECK_SPILL_ALL) { chkLevel = verCurrentState.esStackDepth; } assert(chkLevel <= verCurrentState.esStackDepth); GenTreeFlags spillFlags = spillGlobEffects ? GTF_GLOB_EFFECT : GTF_SIDE_EFFECT; for (unsigned i = 0; i < chkLevel; i++) { GenTree* tree = verCurrentState.esStack[i].val; if ((tree->gtFlags & spillFlags) != 0 || (spillGlobEffects && // Only consider the following when spillGlobEffects == true !impIsAddressInLocal(tree) && // No need to spill the GT_ADDR node on a local. gtHasLocalsWithAddrOp(tree))) // Spill if we still see GT_LCL_VAR that contains lvHasLdAddrOp or // lvAddrTaken flag. { impSpillStackEntry(i, BAD_VAR_NUM DEBUGARG(false) DEBUGARG(reason)); } } } /***************************************************************************** * * If the stack contains any trees with special side effects in them, assign * those trees to temps and replace them on the stack with refs to their temps. */ inline void Compiler::impSpillSpecialSideEff() { // Only exception objects need to be carefully handled if (!compCurBB->bbCatchTyp) { return; } for (unsigned level = 0; level < verCurrentState.esStackDepth; level++) { GenTree* tree = verCurrentState.esStack[level].val; // Make sure if we have an exception object in the sub tree we spill ourselves. if (gtHasCatchArg(tree)) { impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillSpecialSideEff")); } } } /***************************************************************************** * * Spill all stack references to value classes (TYP_STRUCT nodes) */ void Compiler::impSpillValueClasses() { for (unsigned level = 0; level < verCurrentState.esStackDepth; level++) { GenTree* tree = verCurrentState.esStack[level].val; if (fgWalkTreePre(&tree, impFindValueClasses) == WALK_ABORT) { // Tree walk was aborted, which means that we found a // value class on the stack. Need to spill that // stack entry. impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillValueClasses")); } } } /***************************************************************************** * * Callback that checks if a tree node is TYP_STRUCT */ Compiler::fgWalkResult Compiler::impFindValueClasses(GenTree** pTree, fgWalkData* data) { fgWalkResult walkResult = WALK_CONTINUE; if ((*pTree)->gtType == TYP_STRUCT) { // Abort the walk and indicate that we found a value class walkResult = WALK_ABORT; } return walkResult; } /***************************************************************************** * * If the stack contains any trees with references to local #lclNum, assign * those trees to temps and replace their place on the stack with refs to * their temps. */ void Compiler::impSpillLclRefs(ssize_t lclNum) { /* Before we make any appends to the tree list we must spill the * "special" side effects (GTF_ORDER_SIDEEFF) - GT_CATCH_ARG */ impSpillSpecialSideEff(); for (unsigned level = 0; level < verCurrentState.esStackDepth; level++) { GenTree* tree = verCurrentState.esStack[level].val; /* If the tree may throw an exception, and the block has a handler, then we need to spill assignments to the local if the local is live on entry to the handler. Just spill 'em all without considering the liveness */ bool xcptnCaught = ehBlockHasExnFlowDsc(compCurBB) && (tree->gtFlags & (GTF_CALL | GTF_EXCEPT)); /* Skip the tree if it doesn't have an affected reference, unless xcptnCaught */ if (xcptnCaught || gtHasRef(tree, lclNum)) { impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillLclRefs")); } } } /***************************************************************************** * * Push catch arg onto the stack. * If there are jumps to the beginning of the handler, insert basic block * and spill catch arg to a temp. Update the handler block if necessary. * * Returns the basic block of the actual handler. */ BasicBlock* Compiler::impPushCatchArgOnStack(BasicBlock* hndBlk, CORINFO_CLASS_HANDLE clsHnd, bool isSingleBlockFilter) { // Do not inject the basic block twice on reimport. This should be // hit only under JIT stress. See if the block is the one we injected. // Note that EH canonicalization can inject internal blocks here. We might // be able to re-use such a block (but we don't, right now). if ((hndBlk->bbFlags & (BBF_IMPORTED | BBF_INTERNAL | BBF_DONT_REMOVE)) == (BBF_IMPORTED | BBF_INTERNAL | BBF_DONT_REMOVE)) { Statement* stmt = hndBlk->firstStmt(); if (stmt != nullptr) { GenTree* tree = stmt->GetRootNode(); assert(tree != nullptr); if ((tree->gtOper == GT_ASG) && (tree->AsOp()->gtOp1->gtOper == GT_LCL_VAR) && (tree->AsOp()->gtOp2->gtOper == GT_CATCH_ARG)) { tree = gtNewLclvNode(tree->AsOp()->gtOp1->AsLclVarCommon()->GetLclNum(), TYP_REF); impPushOnStack(tree, typeInfo(TI_REF, clsHnd)); return hndBlk->bbNext; } } // If we get here, it must have been some other kind of internal block. It's possible that // someone prepended something to our injected block, but that's unlikely. } /* Push the exception address value on the stack */ GenTree* arg = new (this, GT_CATCH_ARG) GenTree(GT_CATCH_ARG, TYP_REF); /* Mark the node as having a side-effect - i.e. cannot be * moved around since it is tied to a fixed location (EAX) */ arg->gtFlags |= GTF_ORDER_SIDEEFF; #if defined(JIT32_GCENCODER) const bool forceInsertNewBlock = isSingleBlockFilter || compStressCompile(STRESS_CATCH_ARG, 5); #else const bool forceInsertNewBlock = compStressCompile(STRESS_CATCH_ARG, 5); #endif // defined(JIT32_GCENCODER) /* Spill GT_CATCH_ARG to a temp if there are jumps to the beginning of the handler */ if (hndBlk->bbRefs > 1 || forceInsertNewBlock) { if (hndBlk->bbRefs == 1) { hndBlk->bbRefs++; } /* Create extra basic block for the spill */ BasicBlock* newBlk = fgNewBBbefore(BBJ_NONE, hndBlk, /* extendRegion */ true); newBlk->bbFlags |= BBF_IMPORTED | BBF_DONT_REMOVE; newBlk->inheritWeight(hndBlk); newBlk->bbCodeOffs = hndBlk->bbCodeOffs; /* Account for the new link we are about to create */ hndBlk->bbRefs++; // Spill into a temp. unsigned tempNum = lvaGrabTemp(false DEBUGARG("SpillCatchArg")); lvaTable[tempNum].lvType = TYP_REF; GenTree* argAsg = gtNewTempAssign(tempNum, arg); arg = gtNewLclvNode(tempNum, TYP_REF); hndBlk->bbStkTempsIn = tempNum; Statement* argStmt; if (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES) { // Report the debug info. impImportBlockCode won't treat the actual handler as exception block and thus // won't do it for us. // TODO-DEBUGINFO: Previous code always set stack as non-empty // here. Can we not just use impCurStmtOffsSet? Are we out of sync // here with the stack? impCurStmtDI = DebugInfo(compInlineContext, ILLocation(newBlk->bbCodeOffs, false, false)); argStmt = gtNewStmt(argAsg, impCurStmtDI); } else { argStmt = gtNewStmt(argAsg); } fgInsertStmtAtEnd(newBlk, argStmt); } impPushOnStack(arg, typeInfo(TI_REF, clsHnd)); return hndBlk; } /***************************************************************************** * * Given a tree, clone it. *pClone is set to the cloned tree. * Returns the original tree if the cloning was easy, * else returns the temp to which the tree had to be spilled to. * If the tree has side-effects, it will be spilled to a temp. */ GenTree* Compiler::impCloneExpr(GenTree* tree, GenTree** pClone, CORINFO_CLASS_HANDLE structHnd, unsigned curLevel, Statement** pAfterStmt DEBUGARG(const char* reason)) { if (!(tree->gtFlags & GTF_GLOB_EFFECT)) { GenTree* clone = gtClone(tree, true); if (clone) { *pClone = clone; return tree; } } /* Store the operand in a temp and return the temp */ unsigned temp = lvaGrabTemp(true DEBUGARG(reason)); // impAssignTempGen() may change tree->gtType to TYP_VOID for calls which // return a struct type. It also may modify the struct type to a more // specialized type (e.g. a SIMD type). So we will get the type from // the lclVar AFTER calling impAssignTempGen(). impAssignTempGen(temp, tree, structHnd, curLevel, pAfterStmt, impCurStmtDI); var_types type = genActualType(lvaTable[temp].TypeGet()); *pClone = gtNewLclvNode(temp, type); return gtNewLclvNode(temp, type); } //------------------------------------------------------------------------ // impCreateDIWithCurrentStackInfo: Create a DebugInfo instance with the // specified IL offset and 'is call' bit, using the current stack to determine // whether to set the 'stack empty' bit. // // Arguments: // offs - the IL offset for the DebugInfo // isCall - whether the created DebugInfo should have the IsCall bit set // // Return Value: // The DebugInfo instance. // DebugInfo Compiler::impCreateDIWithCurrentStackInfo(IL_OFFSET offs, bool isCall) { assert(offs != BAD_IL_OFFSET); bool isStackEmpty = verCurrentState.esStackDepth <= 0; return DebugInfo(compInlineContext, ILLocation(offs, isStackEmpty, isCall)); } //------------------------------------------------------------------------ // impCurStmtOffsSet: Set the "current debug info" to attach to statements that // we are generating next. // // Arguments: // offs - the IL offset // // Remarks: // This function will be called in the main IL processing loop when it is // determined that we have reached a location in the IL stream for which we // want to report debug information. This is the main way we determine which // statements to report debug info for to the EE: for other statements, they // will have no debug information attached. // inline void Compiler::impCurStmtOffsSet(IL_OFFSET offs) { if (offs == BAD_IL_OFFSET) { impCurStmtDI = DebugInfo(compInlineContext, ILLocation()); } else { impCurStmtDI = impCreateDIWithCurrentStackInfo(offs, false); } } //------------------------------------------------------------------------ // impCanSpillNow: check is it possible to spill all values from eeStack to local variables. // // Arguments: // prevOpcode - last importer opcode // // Return Value: // true if it is legal, false if it could be a sequence that we do not want to divide. bool Compiler::impCanSpillNow(OPCODE prevOpcode) { // Don't spill after ldtoken, newarr and newobj, because it could be a part of the InitializeArray sequence. // Avoid breaking up to guarantee that impInitializeArrayIntrinsic can succeed. return (prevOpcode != CEE_LDTOKEN) && (prevOpcode != CEE_NEWARR) && (prevOpcode != CEE_NEWOBJ); } /***************************************************************************** * * Remember the instr offset for the statements * * When we do impAppendTree(tree), we can't set stmt->SetLastILOffset(impCurOpcOffs), * if the append was done because of a partial stack spill, * as some of the trees corresponding to code up to impCurOpcOffs might * still be sitting on the stack. * So we delay calling of SetLastILOffset() until impNoteLastILoffs(). * This should be called when an opcode finally/explicitly causes * impAppendTree(tree) to be called (as opposed to being called because of * a spill caused by the opcode) */ #ifdef DEBUG void Compiler::impNoteLastILoffs() { if (impLastILoffsStmt == nullptr) { // We should have added a statement for the current basic block // Is this assert correct ? assert(impLastStmt); impLastStmt->SetLastILOffset(compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs); } else { impLastILoffsStmt->SetLastILOffset(compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs); impLastILoffsStmt = nullptr; } } #endif // DEBUG /***************************************************************************** * We don't create any GenTree (excluding spills) for a branch. * For debugging info, we need a placeholder so that we can note * the IL offset in gtStmt.gtStmtOffs. So append an empty statement. */ void Compiler::impNoteBranchOffs() { if (opts.compDbgCode) { impAppendTree(gtNewNothingNode(), (unsigned)CHECK_SPILL_NONE, impCurStmtDI); } } /***************************************************************************** * Locate the next stmt boundary for which we need to record info. * We will have to spill the stack at such boundaries if it is not * already empty. * Returns the next stmt boundary (after the start of the block) */ unsigned Compiler::impInitBlockLineInfo() { /* Assume the block does not correspond with any IL offset. This prevents us from reporting extra offsets. Extra mappings can cause confusing stepping, especially if the extra mapping is a jump-target, and the debugger does not ignore extra mappings, but instead rewinds to the nearest known offset */ impCurStmtOffsSet(BAD_IL_OFFSET); IL_OFFSET blockOffs = compCurBB->bbCodeOffs; if ((verCurrentState.esStackDepth == 0) && (info.compStmtOffsetsImplicit & ICorDebugInfo::STACK_EMPTY_BOUNDARIES)) { impCurStmtOffsSet(blockOffs); } /* Always report IL offset 0 or some tests get confused. Probably a good idea anyways */ if (blockOffs == 0) { impCurStmtOffsSet(blockOffs); } if (!info.compStmtOffsetsCount) { return ~0; } /* Find the lowest explicit stmt boundary within the block */ /* Start looking at an entry that is based on our instr offset */ unsigned index = (info.compStmtOffsetsCount * blockOffs) / info.compILCodeSize; if (index >= info.compStmtOffsetsCount) { index = info.compStmtOffsetsCount - 1; } /* If we've guessed too far, back up */ while (index > 0 && info.compStmtOffsets[index - 1] >= blockOffs) { index--; } /* If we guessed short, advance ahead */ while (info.compStmtOffsets[index] < blockOffs) { index++; if (index == info.compStmtOffsetsCount) { return info.compStmtOffsetsCount; } } assert(index < info.compStmtOffsetsCount); if (info.compStmtOffsets[index] == blockOffs) { /* There is an explicit boundary for the start of this basic block. So we will start with bbCodeOffs. Else we will wait until we get to the next explicit boundary */ impCurStmtOffsSet(blockOffs); index++; } return index; } /*****************************************************************************/ bool Compiler::impOpcodeIsCallOpcode(OPCODE opcode) { switch (opcode) { case CEE_CALL: case CEE_CALLI: case CEE_CALLVIRT: return true; default: return false; } } /*****************************************************************************/ static inline bool impOpcodeIsCallSiteBoundary(OPCODE opcode) { switch (opcode) { case CEE_CALL: case CEE_CALLI: case CEE_CALLVIRT: case CEE_JMP: case CEE_NEWOBJ: case CEE_NEWARR: return true; default: return false; } } /*****************************************************************************/ // One might think it is worth caching these values, but results indicate // that it isn't. // In addition, caching them causes SuperPMI to be unable to completely // encapsulate an individual method context. CORINFO_CLASS_HANDLE Compiler::impGetRefAnyClass() { CORINFO_CLASS_HANDLE refAnyClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPED_BYREF); assert(refAnyClass != (CORINFO_CLASS_HANDLE) nullptr); return refAnyClass; } CORINFO_CLASS_HANDLE Compiler::impGetTypeHandleClass() { CORINFO_CLASS_HANDLE typeHandleClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPE_HANDLE); assert(typeHandleClass != (CORINFO_CLASS_HANDLE) nullptr); return typeHandleClass; } CORINFO_CLASS_HANDLE Compiler::impGetRuntimeArgumentHandle() { CORINFO_CLASS_HANDLE argIteratorClass = info.compCompHnd->getBuiltinClass(CLASSID_ARGUMENT_HANDLE); assert(argIteratorClass != (CORINFO_CLASS_HANDLE) nullptr); return argIteratorClass; } CORINFO_CLASS_HANDLE Compiler::impGetStringClass() { CORINFO_CLASS_HANDLE stringClass = info.compCompHnd->getBuiltinClass(CLASSID_STRING); assert(stringClass != (CORINFO_CLASS_HANDLE) nullptr); return stringClass; } CORINFO_CLASS_HANDLE Compiler::impGetObjectClass() { CORINFO_CLASS_HANDLE objectClass = info.compCompHnd->getBuiltinClass(CLASSID_SYSTEM_OBJECT); assert(objectClass != (CORINFO_CLASS_HANDLE) nullptr); return objectClass; } /***************************************************************************** * "&var" can be used either as TYP_BYREF or TYP_I_IMPL, but we * set its type to TYP_BYREF when we create it. We know if it can be * changed to TYP_I_IMPL only at the point where we use it */ /* static */ void Compiler::impBashVarAddrsToI(GenTree* tree1, GenTree* tree2) { if (tree1->IsLocalAddrExpr() != nullptr) { tree1->gtType = TYP_I_IMPL; } if (tree2 && (tree2->IsLocalAddrExpr() != nullptr)) { tree2->gtType = TYP_I_IMPL; } } /***************************************************************************** * TYP_INT and TYP_I_IMPL can be used almost interchangeably, but we want * to make that an explicit cast in our trees, so any implicit casts that * exist in the IL (at least on 64-bit where TYP_I_IMPL != TYP_INT) are * turned into explicit casts here. * We also allow an implicit conversion of a ldnull into a TYP_I_IMPL(0) */ GenTree* Compiler::impImplicitIorI4Cast(GenTree* tree, var_types dstTyp) { var_types currType = genActualType(tree->gtType); var_types wantedType = genActualType(dstTyp); if (wantedType != currType) { // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL if ((tree->OperGet() == GT_CNS_INT) && varTypeIsI(dstTyp)) { if (!varTypeIsI(tree->gtType) || ((tree->gtType == TYP_REF) && (tree->AsIntCon()->gtIconVal == 0))) { tree->gtType = TYP_I_IMPL; } } #ifdef TARGET_64BIT else if (varTypeIsI(wantedType) && (currType == TYP_INT)) { // Note that this allows TYP_INT to be cast to a TYP_I_IMPL when wantedType is a TYP_BYREF or TYP_REF tree = gtNewCastNode(TYP_I_IMPL, tree, false, TYP_I_IMPL); } else if ((wantedType == TYP_INT) && varTypeIsI(currType)) { // Note that this allows TYP_BYREF or TYP_REF to be cast to a TYP_INT tree = gtNewCastNode(TYP_INT, tree, false, TYP_INT); } #endif // TARGET_64BIT } return tree; } /***************************************************************************** * TYP_FLOAT and TYP_DOUBLE can be used almost interchangeably in some cases, * but we want to make that an explicit cast in our trees, so any implicit casts * that exist in the IL are turned into explicit casts here. */ GenTree* Compiler::impImplicitR4orR8Cast(GenTree* tree, var_types dstTyp) { if (varTypeIsFloating(tree) && varTypeIsFloating(dstTyp) && (dstTyp != tree->gtType)) { tree = gtNewCastNode(dstTyp, tree, false, dstTyp); } return tree; } //------------------------------------------------------------------------ // impInitializeArrayIntrinsic: Attempts to replace a call to InitializeArray // with a GT_COPYBLK node. // // Arguments: // sig - The InitializeArray signature. // // Return Value: // A pointer to the newly created GT_COPYBLK node if the replacement succeeds or // nullptr otherwise. // // Notes: // The function recognizes the following IL pattern: // ldc <length> or a list of ldc <lower bound>/<length> // newarr or newobj // dup // ldtoken <field handle> // call InitializeArray // The lower bounds need not be constant except when the array rank is 1. // The function recognizes all kinds of arrays thus enabling a small runtime // such as CoreRT to skip providing an implementation for InitializeArray. GenTree* Compiler::impInitializeArrayIntrinsic(CORINFO_SIG_INFO* sig) { assert(sig->numArgs == 2); GenTree* fieldTokenNode = impStackTop(0).val; GenTree* arrayLocalNode = impStackTop(1).val; // // Verify that the field token is known and valid. Note that It's also // possible for the token to come from reflection, in which case we cannot do // the optimization and must therefore revert to calling the helper. You can // see an example of this in bvt\DynIL\initarray2.exe (in Main). // // Check to see if the ldtoken helper call is what we see here. if (fieldTokenNode->gtOper != GT_CALL || (fieldTokenNode->AsCall()->gtCallType != CT_HELPER) || (fieldTokenNode->AsCall()->gtCallMethHnd != eeFindHelper(CORINFO_HELP_FIELDDESC_TO_STUBRUNTIMEFIELD))) { return nullptr; } // Strip helper call away fieldTokenNode = fieldTokenNode->AsCall()->gtCallArgs->GetNode(); if (fieldTokenNode->gtOper == GT_IND) { fieldTokenNode = fieldTokenNode->AsOp()->gtOp1; } // Check for constant if (fieldTokenNode->gtOper != GT_CNS_INT) { return nullptr; } CORINFO_FIELD_HANDLE fieldToken = (CORINFO_FIELD_HANDLE)fieldTokenNode->AsIntCon()->gtCompileTimeHandle; if (!fieldTokenNode->IsIconHandle(GTF_ICON_FIELD_HDL) || (fieldToken == nullptr)) { return nullptr; } // // We need to get the number of elements in the array and the size of each element. // We verify that the newarr statement is exactly what we expect it to be. // If it's not then we just return NULL and we don't optimize this call // // It is possible the we don't have any statements in the block yet. if (impLastStmt == nullptr) { return nullptr; } // // We start by looking at the last statement, making sure it's an assignment, and // that the target of the assignment is the array passed to InitializeArray. // GenTree* arrayAssignment = impLastStmt->GetRootNode(); if ((arrayAssignment->gtOper != GT_ASG) || (arrayAssignment->AsOp()->gtOp1->gtOper != GT_LCL_VAR) || (arrayLocalNode->gtOper != GT_LCL_VAR) || (arrayAssignment->AsOp()->gtOp1->AsLclVarCommon()->GetLclNum() != arrayLocalNode->AsLclVarCommon()->GetLclNum())) { return nullptr; } // // Make sure that the object being assigned is a helper call. // GenTree* newArrayCall = arrayAssignment->AsOp()->gtOp2; if ((newArrayCall->gtOper != GT_CALL) || (newArrayCall->AsCall()->gtCallType != CT_HELPER)) { return nullptr; } // // Verify that it is one of the new array helpers. // bool isMDArray = false; if (newArrayCall->AsCall()->gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_DIRECT) && newArrayCall->AsCall()->gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_OBJ) && newArrayCall->AsCall()->gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_VC) && newArrayCall->AsCall()->gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_ALIGN8) #ifdef FEATURE_READYTORUN && newArrayCall->AsCall()->gtCallMethHnd != eeFindHelper(CORINFO_HELP_READYTORUN_NEWARR_1) #endif ) { if (newArrayCall->AsCall()->gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEW_MDARR)) { return nullptr; } isMDArray = true; } CORINFO_CLASS_HANDLE arrayClsHnd = (CORINFO_CLASS_HANDLE)newArrayCall->AsCall()->compileTimeHelperArgumentHandle; // // Make sure we found a compile time handle to the array // if (!arrayClsHnd) { return nullptr; } unsigned rank = 0; S_UINT32 numElements; if (isMDArray) { rank = info.compCompHnd->getArrayRank(arrayClsHnd); if (rank == 0) { return nullptr; } GenTreeCall::Use* tokenArg = newArrayCall->AsCall()->gtCallArgs; assert(tokenArg != nullptr); GenTreeCall::Use* numArgsArg = tokenArg->GetNext(); assert(numArgsArg != nullptr); GenTreeCall::Use* argsArg = numArgsArg->GetNext(); assert(argsArg != nullptr); // // The number of arguments should be a constant between 1 and 64. The rank can't be 0 // so at least one length must be present and the rank can't exceed 32 so there can // be at most 64 arguments - 32 lengths and 32 lower bounds. // if ((!numArgsArg->GetNode()->IsCnsIntOrI()) || (numArgsArg->GetNode()->AsIntCon()->IconValue() < 1) || (numArgsArg->GetNode()->AsIntCon()->IconValue() > 64)) { return nullptr; } unsigned numArgs = static_cast<unsigned>(numArgsArg->GetNode()->AsIntCon()->IconValue()); bool lowerBoundsSpecified; if (numArgs == rank * 2) { lowerBoundsSpecified = true; } else if (numArgs == rank) { lowerBoundsSpecified = false; // // If the rank is 1 and a lower bound isn't specified then the runtime creates // a SDArray. Note that even if a lower bound is specified it can be 0 and then // we get a SDArray as well, see the for loop below. // if (rank == 1) { isMDArray = false; } } else { return nullptr; } // // The rank is known to be at least 1 so we can start with numElements being 1 // to avoid the need to special case the first dimension. // numElements = S_UINT32(1); struct Match { static bool IsArgsFieldInit(GenTree* tree, unsigned index, unsigned lvaNewObjArrayArgs) { return (tree->OperGet() == GT_ASG) && IsArgsFieldIndir(tree->gtGetOp1(), index, lvaNewObjArrayArgs) && IsArgsAddr(tree->gtGetOp1()->gtGetOp1()->gtGetOp1(), lvaNewObjArrayArgs); } static bool IsArgsFieldIndir(GenTree* tree, unsigned index, unsigned lvaNewObjArrayArgs) { return (tree->OperGet() == GT_IND) && (tree->gtGetOp1()->OperGet() == GT_ADD) && (tree->gtGetOp1()->gtGetOp2()->IsIntegralConst(sizeof(INT32) * index)) && IsArgsAddr(tree->gtGetOp1()->gtGetOp1(), lvaNewObjArrayArgs); } static bool IsArgsAddr(GenTree* tree, unsigned lvaNewObjArrayArgs) { return (tree->OperGet() == GT_ADDR) && (tree->gtGetOp1()->OperGet() == GT_LCL_VAR) && (tree->gtGetOp1()->AsLclVar()->GetLclNum() == lvaNewObjArrayArgs); } static bool IsComma(GenTree* tree) { return (tree != nullptr) && (tree->OperGet() == GT_COMMA); } }; unsigned argIndex = 0; GenTree* comma; for (comma = argsArg->GetNode(); Match::IsComma(comma); comma = comma->gtGetOp2()) { if (lowerBoundsSpecified) { // // In general lower bounds can be ignored because they're not needed to // calculate the total number of elements. But for single dimensional arrays // we need to know if the lower bound is 0 because in this case the runtime // creates a SDArray and this affects the way the array data offset is calculated. // if (rank == 1) { GenTree* lowerBoundAssign = comma->gtGetOp1(); assert(Match::IsArgsFieldInit(lowerBoundAssign, argIndex, lvaNewObjArrayArgs)); GenTree* lowerBoundNode = lowerBoundAssign->gtGetOp2(); if (lowerBoundNode->IsIntegralConst(0)) { isMDArray = false; } } comma = comma->gtGetOp2(); argIndex++; } GenTree* lengthNodeAssign = comma->gtGetOp1(); assert(Match::IsArgsFieldInit(lengthNodeAssign, argIndex, lvaNewObjArrayArgs)); GenTree* lengthNode = lengthNodeAssign->gtGetOp2(); if (!lengthNode->IsCnsIntOrI()) { return nullptr; } numElements *= S_SIZE_T(lengthNode->AsIntCon()->IconValue()); argIndex++; } assert((comma != nullptr) && Match::IsArgsAddr(comma, lvaNewObjArrayArgs)); if (argIndex != numArgs) { return nullptr; } } else { // // Make sure there are exactly two arguments: the array class and // the number of elements. // GenTree* arrayLengthNode; GenTreeCall::Use* args = newArrayCall->AsCall()->gtCallArgs; #ifdef FEATURE_READYTORUN if (newArrayCall->AsCall()->gtCallMethHnd == eeFindHelper(CORINFO_HELP_READYTORUN_NEWARR_1)) { // Array length is 1st argument for readytorun helper arrayLengthNode = args->GetNode(); } else #endif { // Array length is 2nd argument for regular helper arrayLengthNode = args->GetNext()->GetNode(); } // // This optimization is only valid for a constant array size. // if (arrayLengthNode->gtOper != GT_CNS_INT) { return nullptr; } numElements = S_SIZE_T(arrayLengthNode->AsIntCon()->gtIconVal); if (!info.compCompHnd->isSDArray(arrayClsHnd)) { return nullptr; } } CORINFO_CLASS_HANDLE elemClsHnd; var_types elementType = JITtype2varType(info.compCompHnd->getChildType(arrayClsHnd, &elemClsHnd)); // // Note that genTypeSize will return zero for non primitive types, which is exactly // what we want (size will then be 0, and we will catch this in the conditional below). // Note that we don't expect this to fail for valid binaries, so we assert in the // non-verification case (the verification case should not assert but rather correctly // handle bad binaries). This assert is not guarding any specific invariant, but rather // saying that we don't expect this to happen, and if it is hit, we need to investigate // why. // S_UINT32 elemSize(genTypeSize(elementType)); S_UINT32 size = elemSize * S_UINT32(numElements); if (size.IsOverflow()) { return nullptr; } if ((size.Value() == 0) || (varTypeIsGC(elementType))) { return nullptr; } void* initData = info.compCompHnd->getArrayInitializationData(fieldToken, size.Value()); if (!initData) { return nullptr; } // // At this point we are ready to commit to implementing the InitializeArray // intrinsic using a struct assignment. Pop the arguments from the stack and // return the struct assignment node. // impPopStack(); impPopStack(); const unsigned blkSize = size.Value(); unsigned dataOffset; if (isMDArray) { dataOffset = eeGetMDArrayDataOffset(rank); } else { dataOffset = eeGetArrayDataOffset(); } GenTree* dstAddr = gtNewOperNode(GT_ADD, TYP_BYREF, arrayLocalNode, gtNewIconNode(dataOffset, TYP_I_IMPL)); GenTree* dst = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, dstAddr, typGetBlkLayout(blkSize)); GenTree* src = gtNewIndOfIconHandleNode(TYP_STRUCT, (size_t)initData, GTF_ICON_CONST_PTR, true); #ifdef DEBUG src->gtGetOp1()->AsIntCon()->gtTargetHandle = THT_IntializeArrayIntrinsics; #endif return gtNewBlkOpNode(dst, // dst src, // src false, // volatile true); // copyBlock } GenTree* Compiler::impCreateSpanIntrinsic(CORINFO_SIG_INFO* sig) { assert(sig->numArgs == 1); assert(sig->sigInst.methInstCount == 1); GenTree* fieldTokenNode = impStackTop(0).val; // // Verify that the field token is known and valid. Note that it's also // possible for the token to come from reflection, in which case we cannot do // the optimization and must therefore revert to calling the helper. You can // see an example of this in bvt\DynIL\initarray2.exe (in Main). // // Check to see if the ldtoken helper call is what we see here. if (fieldTokenNode->gtOper != GT_CALL || (fieldTokenNode->AsCall()->gtCallType != CT_HELPER) || (fieldTokenNode->AsCall()->gtCallMethHnd != eeFindHelper(CORINFO_HELP_FIELDDESC_TO_STUBRUNTIMEFIELD))) { return nullptr; } // Strip helper call away fieldTokenNode = fieldTokenNode->AsCall()->gtCallArgs->GetNode(); if (fieldTokenNode->gtOper == GT_IND) { fieldTokenNode = fieldTokenNode->AsOp()->gtOp1; } // Check for constant if (fieldTokenNode->gtOper != GT_CNS_INT) { return nullptr; } CORINFO_FIELD_HANDLE fieldToken = (CORINFO_FIELD_HANDLE)fieldTokenNode->AsIntCon()->gtCompileTimeHandle; if (!fieldTokenNode->IsIconHandle(GTF_ICON_FIELD_HDL) || (fieldToken == nullptr)) { return nullptr; } CORINFO_CLASS_HANDLE fieldOwnerHnd = info.compCompHnd->getFieldClass(fieldToken); CORINFO_CLASS_HANDLE fieldClsHnd; var_types fieldElementType = JITtype2varType(info.compCompHnd->getFieldType(fieldToken, &fieldClsHnd, fieldOwnerHnd)); unsigned totalFieldSize; // Most static initialization data fields are of some structure, but it is possible for them to be of various // primitive types as well if (fieldElementType == var_types::TYP_STRUCT) { totalFieldSize = info.compCompHnd->getClassSize(fieldClsHnd); } else { totalFieldSize = genTypeSize(fieldElementType); } // Limit to primitive or enum type - see ArrayNative::GetSpanDataFrom() CORINFO_CLASS_HANDLE targetElemHnd = sig->sigInst.methInst[0]; if (info.compCompHnd->getTypeForPrimitiveValueClass(targetElemHnd) == CORINFO_TYPE_UNDEF) { return nullptr; } const unsigned targetElemSize = info.compCompHnd->getClassSize(targetElemHnd); assert(targetElemSize != 0); const unsigned count = totalFieldSize / targetElemSize; if (count == 0) { return nullptr; } void* data = info.compCompHnd->getArrayInitializationData(fieldToken, totalFieldSize); if (!data) { return nullptr; } // // Ready to commit to the work // impPopStack(); // Turn count and pointer value into constants. GenTree* lengthValue = gtNewIconNode(count, TYP_INT); GenTree* pointerValue = gtNewIconHandleNode((size_t)data, GTF_ICON_CONST_PTR); // Construct ReadOnlySpan<T> to return. CORINFO_CLASS_HANDLE spanHnd = sig->retTypeClass; unsigned spanTempNum = lvaGrabTemp(true DEBUGARG("ReadOnlySpan<T> for CreateSpan<T>")); lvaSetStruct(spanTempNum, spanHnd, false); CORINFO_FIELD_HANDLE pointerFieldHnd = info.compCompHnd->getFieldInClass(spanHnd, 0); CORINFO_FIELD_HANDLE lengthFieldHnd = info.compCompHnd->getFieldInClass(spanHnd, 1); GenTreeLclFld* pointerField = gtNewLclFldNode(spanTempNum, TYP_BYREF, 0); pointerField->SetFieldSeq(GetFieldSeqStore()->CreateSingleton(pointerFieldHnd)); GenTree* pointerFieldAsg = gtNewAssignNode(pointerField, pointerValue); GenTreeLclFld* lengthField = gtNewLclFldNode(spanTempNum, TYP_INT, TARGET_POINTER_SIZE); lengthField->SetFieldSeq(GetFieldSeqStore()->CreateSingleton(lengthFieldHnd)); GenTree* lengthFieldAsg = gtNewAssignNode(lengthField, lengthValue); // Now append a few statements the initialize the span impAppendTree(lengthFieldAsg, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); impAppendTree(pointerFieldAsg, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); // And finally create a tree that points at the span. return impCreateLocalNode(spanTempNum DEBUGARG(0)); } //------------------------------------------------------------------------ // impIntrinsic: possibly expand intrinsic call into alternate IR sequence // // Arguments: // newobjThis - for constructor calls, the tree for the newly allocated object // clsHnd - handle for the intrinsic method's class // method - handle for the intrinsic method // sig - signature of the intrinsic method // methodFlags - CORINFO_FLG_XXX flags of the intrinsic method // memberRef - the token for the intrinsic method // readonlyCall - true if call has a readonly prefix // tailCall - true if call is in tail position // pConstrainedResolvedToken -- resolved token for constrained call, or nullptr // if call is not constrained // constraintCallThisTransform -- this transform to apply for a constrained call // pIntrinsicName [OUT] -- intrinsic name (see enumeration in namedintrinsiclist.h) // for "traditional" jit intrinsics // isSpecialIntrinsic [OUT] -- set true if intrinsic expansion is a call // that is amenable to special downstream optimization opportunities // // Returns: // IR tree to use in place of the call, or nullptr if the jit should treat // the intrinsic call like a normal call. // // pIntrinsicName set to non-illegal value if the call is recognized as a // traditional jit intrinsic, even if the intrinsic is not expaned. // // isSpecial set true if the expansion is subject to special // optimizations later in the jit processing // // Notes: // On success the IR tree may be a call to a different method or an inline // sequence. If it is a call, then the intrinsic processing here is responsible // for handling all the special cases, as upon return to impImportCall // expanded intrinsics bypass most of the normal call processing. // // Intrinsics are generally not recognized in minopts and debug codegen. // // However, certain traditional intrinsics are identifed as "must expand" // if there is no fallback implmentation to invoke; these must be handled // in all codegen modes. // // New style intrinsics (where the fallback implementation is in IL) are // identified as "must expand" if they are invoked from within their // own method bodies. // GenTree* Compiler::impIntrinsic(GenTree* newobjThis, CORINFO_CLASS_HANDLE clsHnd, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, unsigned methodFlags, int memberRef, bool readonlyCall, bool tailCall, CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, CORINFO_THIS_TRANSFORM constraintCallThisTransform, NamedIntrinsic* pIntrinsicName, bool* isSpecialIntrinsic) { assert((methodFlags & CORINFO_FLG_INTRINSIC) != 0); bool mustExpand = false; bool isSpecial = false; NamedIntrinsic ni = NI_Illegal; if ((methodFlags & CORINFO_FLG_INTRINSIC) != 0) { // The recursive non-virtual calls to Jit intrinsics are must-expand by convention. mustExpand = mustExpand || (gtIsRecursiveCall(method) && !(methodFlags & CORINFO_FLG_VIRTUAL)); ni = lookupNamedIntrinsic(method); // We specially support the following on all platforms to allow for dead // code optimization and to more generally support recursive intrinsics. if (ni == NI_IsSupported_True) { assert(sig->numArgs == 0); return gtNewIconNode(true); } if (ni == NI_IsSupported_False) { assert(sig->numArgs == 0); return gtNewIconNode(false); } if (ni == NI_Throw_PlatformNotSupportedException) { return impUnsupportedNamedIntrinsic(CORINFO_HELP_THROW_PLATFORM_NOT_SUPPORTED, method, sig, mustExpand); } #ifdef FEATURE_HW_INTRINSICS if ((ni > NI_HW_INTRINSIC_START) && (ni < NI_HW_INTRINSIC_END)) { GenTree* hwintrinsic = impHWIntrinsic(ni, clsHnd, method, sig, mustExpand); if (mustExpand && (hwintrinsic == nullptr)) { return impUnsupportedNamedIntrinsic(CORINFO_HELP_THROW_NOT_IMPLEMENTED, method, sig, mustExpand); } return hwintrinsic; } if ((ni > NI_SIMD_AS_HWINTRINSIC_START) && (ni < NI_SIMD_AS_HWINTRINSIC_END)) { // These intrinsics aren't defined recursively and so they will never be mustExpand // Instead, they provide software fallbacks that will be executed instead. assert(!mustExpand); return impSimdAsHWIntrinsic(ni, clsHnd, method, sig, newobjThis); } #endif // FEATURE_HW_INTRINSICS } *pIntrinsicName = ni; if (ni == NI_System_StubHelpers_GetStubContext) { // must be done regardless of DbgCode and MinOpts return gtNewLclvNode(lvaStubArgumentVar, TYP_I_IMPL); } if (ni == NI_System_StubHelpers_NextCallReturnAddress) { // For now we just avoid inlining anything into these methods since // this intrinsic is only rarely used. We could do this better if we // wanted to by trying to match which call is the one we need to get // the return address of. info.compHasNextCallRetAddr = true; return new (this, GT_LABEL) GenTree(GT_LABEL, TYP_I_IMPL); } switch (ni) { // CreateSpan must be expanded for NativeAOT case NI_System_Runtime_CompilerServices_RuntimeHelpers_CreateSpan: case NI_System_Runtime_CompilerServices_RuntimeHelpers_InitializeArray: mustExpand |= IsTargetAbi(CORINFO_CORERT_ABI); break; case NI_System_ByReference_ctor: case NI_System_ByReference_get_Value: case NI_System_Activator_AllocatorOf: case NI_System_Activator_DefaultConstructorOf: case NI_System_Object_MethodTableOf: case NI_System_EETypePtr_EETypePtrOf: mustExpand = true; break; default: break; } GenTree* retNode = nullptr; // Under debug and minopts, only expand what is required. // NextCallReturnAddress intrinsic returns the return address of the next call. // If that call is an intrinsic and is expanded, codegen for NextCallReturnAddress will fail. // To avoid that we conservatively expand only required intrinsics in methods that call // the NextCallReturnAddress intrinsic. if (!mustExpand && (opts.OptimizationDisabled() || info.compHasNextCallRetAddr)) { *pIntrinsicName = NI_Illegal; return retNode; } CorInfoType callJitType = sig->retType; var_types callType = JITtype2varType(callJitType); /* First do the intrinsics which are always smaller than a call */ if (ni != NI_Illegal) { assert(retNode == nullptr); switch (ni) { case NI_Array_Address: case NI_Array_Get: case NI_Array_Set: retNode = impArrayAccessIntrinsic(clsHnd, sig, memberRef, readonlyCall, ni); break; case NI_System_String_Equals: { retNode = impStringEqualsOrStartsWith(/*startsWith:*/ false, sig, methodFlags); break; } case NI_System_MemoryExtensions_Equals: case NI_System_MemoryExtensions_SequenceEqual: { retNode = impSpanEqualsOrStartsWith(/*startsWith:*/ false, sig, methodFlags); break; } case NI_System_String_StartsWith: { retNode = impStringEqualsOrStartsWith(/*startsWith:*/ true, sig, methodFlags); break; } case NI_System_MemoryExtensions_StartsWith: { retNode = impSpanEqualsOrStartsWith(/*startsWith:*/ true, sig, methodFlags); break; } case NI_System_MemoryExtensions_AsSpan: case NI_System_String_op_Implicit: { assert(sig->numArgs == 1); isSpecial = impStackTop().val->OperIs(GT_CNS_STR); break; } case NI_System_String_get_Chars: { GenTree* op2 = impPopStack().val; GenTree* op1 = impPopStack().val; retNode = gtNewIndexRef(TYP_USHORT, op1, op2); retNode->gtFlags |= GTF_INX_STRING_LAYOUT; break; } case NI_System_String_get_Length: { GenTree* op1 = impPopStack().val; if (op1->OperIs(GT_CNS_STR)) { // Optimize `ldstr + String::get_Length()` to CNS_INT // e.g. "Hello".Length => 5 GenTreeIntCon* iconNode = gtNewStringLiteralLength(op1->AsStrCon()); if (iconNode != nullptr) { retNode = iconNode; break; } } GenTreeArrLen* arrLen = gtNewArrLen(TYP_INT, op1, OFFSETOF__CORINFO_String__stringLen, compCurBB); op1 = arrLen; // Getting the length of a null string should throw op1->gtFlags |= GTF_EXCEPT; retNode = op1; break; } // Implement ByReference Ctor. This wraps the assignment of the ref into a byref-like field // in a value type. The canonical example of this is Span<T>. In effect this is just a // substitution. The parameter byref will be assigned into the newly allocated object. case NI_System_ByReference_ctor: { // Remove call to constructor and directly assign the byref passed // to the call to the first slot of the ByReference struct. GenTree* op1 = impPopStack().val; GenTree* thisptr = newobjThis; CORINFO_FIELD_HANDLE fldHnd = info.compCompHnd->getFieldInClass(clsHnd, 0); GenTree* field = gtNewFieldRef(TYP_BYREF, fldHnd, thisptr, 0); GenTree* assign = gtNewAssignNode(field, op1); GenTree* byReferenceStruct = gtCloneExpr(thisptr->gtGetOp1()); assert(byReferenceStruct != nullptr); impPushOnStack(byReferenceStruct, typeInfo(TI_STRUCT, clsHnd)); retNode = assign; break; } // Implement ptr value getter for ByReference struct. case NI_System_ByReference_get_Value: { GenTree* op1 = impPopStack().val; CORINFO_FIELD_HANDLE fldHnd = info.compCompHnd->getFieldInClass(clsHnd, 0); GenTree* field = gtNewFieldRef(TYP_BYREF, fldHnd, op1, 0); retNode = field; break; } case NI_System_Runtime_CompilerServices_RuntimeHelpers_CreateSpan: { retNode = impCreateSpanIntrinsic(sig); break; } case NI_System_Runtime_CompilerServices_RuntimeHelpers_InitializeArray: { retNode = impInitializeArrayIntrinsic(sig); break; } case NI_System_Runtime_CompilerServices_RuntimeHelpers_IsKnownConstant: { GenTree* op1 = impPopStack().val; if (op1->OperIsConst()) { // op1 is a known constant, replace with 'true'. retNode = gtNewIconNode(1); JITDUMP("\nExpanding RuntimeHelpers.IsKnownConstant to true early\n"); // We can also consider FTN_ADDR and typeof(T) here } else { // op1 is not a known constant, we'll do the expansion in morph retNode = new (this, GT_INTRINSIC) GenTreeIntrinsic(TYP_INT, op1, ni, method); JITDUMP("\nConverting RuntimeHelpers.IsKnownConstant to:\n"); DISPTREE(retNode); } break; } case NI_System_Activator_AllocatorOf: case NI_System_Activator_DefaultConstructorOf: case NI_System_Object_MethodTableOf: case NI_System_EETypePtr_EETypePtrOf: { assert(IsTargetAbi(CORINFO_CORERT_ABI)); // Only CoreRT supports it. CORINFO_RESOLVED_TOKEN resolvedToken; resolvedToken.tokenContext = impTokenLookupContextHandle; resolvedToken.tokenScope = info.compScopeHnd; resolvedToken.token = memberRef; resolvedToken.tokenType = CORINFO_TOKENKIND_Method; CORINFO_GENERICHANDLE_RESULT embedInfo; info.compCompHnd->expandRawHandleIntrinsic(&resolvedToken, &embedInfo); GenTree* rawHandle = impLookupToTree(&resolvedToken, &embedInfo.lookup, gtTokenToIconFlags(memberRef), embedInfo.compileTimeHandle); if (rawHandle == nullptr) { return nullptr; } noway_assert(genTypeSize(rawHandle->TypeGet()) == genTypeSize(TYP_I_IMPL)); unsigned rawHandleSlot = lvaGrabTemp(true DEBUGARG("rawHandle")); impAssignTempGen(rawHandleSlot, rawHandle, clsHnd, (unsigned)CHECK_SPILL_NONE); GenTree* lclVar = gtNewLclvNode(rawHandleSlot, TYP_I_IMPL); GenTree* lclVarAddr = gtNewOperNode(GT_ADDR, TYP_I_IMPL, lclVar); var_types resultType = JITtype2varType(sig->retType); retNode = gtNewOperNode(GT_IND, resultType, lclVarAddr); break; } case NI_System_Span_get_Item: case NI_System_ReadOnlySpan_get_Item: { // Have index, stack pointer-to Span<T> s on the stack. Expand to: // // For Span<T> // Comma // BoundsCheck(index, s->_length) // s->_pointer + index * sizeof(T) // // For ReadOnlySpan<T> -- same expansion, as it now returns a readonly ref // // Signature should show one class type parameter, which // we need to examine. assert(sig->sigInst.classInstCount == 1); assert(sig->numArgs == 1); CORINFO_CLASS_HANDLE spanElemHnd = sig->sigInst.classInst[0]; const unsigned elemSize = info.compCompHnd->getClassSize(spanElemHnd); assert(elemSize > 0); const bool isReadOnly = (ni == NI_System_ReadOnlySpan_get_Item); JITDUMP("\nimpIntrinsic: Expanding %sSpan<T>.get_Item, T=%s, sizeof(T)=%u\n", isReadOnly ? "ReadOnly" : "", info.compCompHnd->getClassName(spanElemHnd), elemSize); GenTree* index = impPopStack().val; GenTree* ptrToSpan = impPopStack().val; GenTree* indexClone = nullptr; GenTree* ptrToSpanClone = nullptr; assert(genActualType(index) == TYP_INT); assert(ptrToSpan->TypeGet() == TYP_BYREF); #if defined(DEBUG) if (verbose) { printf("with ptr-to-span\n"); gtDispTree(ptrToSpan); printf("and index\n"); gtDispTree(index); } #endif // defined(DEBUG) // We need to use both index and ptr-to-span twice, so clone or spill. index = impCloneExpr(index, &indexClone, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Span.get_Item index")); ptrToSpan = impCloneExpr(ptrToSpan, &ptrToSpanClone, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Span.get_Item ptrToSpan")); // Bounds check CORINFO_FIELD_HANDLE lengthHnd = info.compCompHnd->getFieldInClass(clsHnd, 1); const unsigned lengthOffset = info.compCompHnd->getFieldOffset(lengthHnd); GenTree* length = gtNewFieldRef(TYP_INT, lengthHnd, ptrToSpan, lengthOffset); GenTree* boundsCheck = new (this, GT_BOUNDS_CHECK) GenTreeBoundsChk(index, length, SCK_RNGCHK_FAIL); // Element access index = indexClone; #ifdef TARGET_64BIT if (index->OperGet() == GT_CNS_INT) { index->gtType = TYP_I_IMPL; } else { index = gtNewCastNode(TYP_I_IMPL, index, true, TYP_I_IMPL); } #endif if (elemSize != 1) { GenTree* sizeofNode = gtNewIconNode(static_cast<ssize_t>(elemSize), TYP_I_IMPL); index = gtNewOperNode(GT_MUL, TYP_I_IMPL, index, sizeofNode); } CORINFO_FIELD_HANDLE ptrHnd = info.compCompHnd->getFieldInClass(clsHnd, 0); const unsigned ptrOffset = info.compCompHnd->getFieldOffset(ptrHnd); GenTree* data = gtNewFieldRef(TYP_BYREF, ptrHnd, ptrToSpanClone, ptrOffset); GenTree* result = gtNewOperNode(GT_ADD, TYP_BYREF, data, index); // Prepare result var_types resultType = JITtype2varType(sig->retType); assert(resultType == result->TypeGet()); retNode = gtNewOperNode(GT_COMMA, resultType, boundsCheck, result); break; } case NI_System_RuntimeTypeHandle_GetValueInternal: { GenTree* op1 = impStackTop(0).val; if (op1->gtOper == GT_CALL && (op1->AsCall()->gtCallType == CT_HELPER) && gtIsTypeHandleToRuntimeTypeHandleHelper(op1->AsCall())) { // Old tree // Helper-RuntimeTypeHandle -> TreeToGetNativeTypeHandle // // New tree // TreeToGetNativeTypeHandle // Remove call to helper and return the native TypeHandle pointer that was the parameter // to that helper. op1 = impPopStack().val; // Get native TypeHandle argument to old helper GenTreeCall::Use* arg = op1->AsCall()->gtCallArgs; assert(arg->GetNext() == nullptr); op1 = arg->GetNode(); retNode = op1; } // Call the regular function. break; } case NI_System_Type_GetTypeFromHandle: { GenTree* op1 = impStackTop(0).val; CorInfoHelpFunc typeHandleHelper; if (op1->gtOper == GT_CALL && (op1->AsCall()->gtCallType == CT_HELPER) && gtIsTypeHandleToRuntimeTypeHandleHelper(op1->AsCall(), &typeHandleHelper)) { op1 = impPopStack().val; // Replace helper with a more specialized helper that returns RuntimeType if (typeHandleHelper == CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPEHANDLE) { typeHandleHelper = CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE; } else { assert(typeHandleHelper == CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPEHANDLE_MAYBENULL); typeHandleHelper = CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE_MAYBENULL; } assert(op1->AsCall()->gtCallArgs->GetNext() == nullptr); op1 = gtNewHelperCallNode(typeHandleHelper, TYP_REF, op1->AsCall()->gtCallArgs); op1->gtType = TYP_REF; retNode = op1; } break; } case NI_System_Type_op_Equality: case NI_System_Type_op_Inequality: { JITDUMP("Importing Type.op_*Equality intrinsic\n"); GenTree* op1 = impStackTop(1).val; GenTree* op2 = impStackTop(0).val; GenTree* optTree = gtFoldTypeEqualityCall(ni == NI_System_Type_op_Equality, op1, op2); if (optTree != nullptr) { // Success, clean up the evaluation stack. impPopStack(); impPopStack(); // See if we can optimize even further, to a handle compare. optTree = gtFoldTypeCompare(optTree); // See if we can now fold a handle compare to a constant. optTree = gtFoldExpr(optTree); retNode = optTree; } else { // Retry optimizing these later isSpecial = true; } break; } case NI_System_Enum_HasFlag: { GenTree* thisOp = impStackTop(1).val; GenTree* flagOp = impStackTop(0).val; GenTree* optTree = gtOptimizeEnumHasFlag(thisOp, flagOp); if (optTree != nullptr) { // Optimization successful. Pop the stack for real. impPopStack(); impPopStack(); retNode = optTree; } else { // Retry optimizing this during morph. isSpecial = true; } break; } case NI_System_Type_IsAssignableFrom: { GenTree* typeTo = impStackTop(1).val; GenTree* typeFrom = impStackTop(0).val; retNode = impTypeIsAssignable(typeTo, typeFrom); break; } case NI_System_Type_IsAssignableTo: { GenTree* typeTo = impStackTop(0).val; GenTree* typeFrom = impStackTop(1).val; retNode = impTypeIsAssignable(typeTo, typeFrom); break; } case NI_System_Type_get_IsValueType: { // Optimize // // call Type.GetTypeFromHandle (which is replaced with CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE) // call Type.IsValueType // // to `true` or `false` // e.g. `typeof(int).IsValueType` => `true` if (impStackTop().val->IsCall()) { GenTreeCall* call = impStackTop().val->AsCall(); if (call->gtCallMethHnd == eeFindHelper(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE)) { CORINFO_CLASS_HANDLE hClass = gtGetHelperArgClassHandle(call->gtCallArgs->GetNode()); if (hClass != NO_CLASS_HANDLE) { retNode = gtNewIconNode((eeIsValueClass(hClass) && // pointers are not value types (e.g. typeof(int*).IsValueType is false) info.compCompHnd->asCorInfoType(hClass) != CORINFO_TYPE_PTR) ? 1 : 0); impPopStack(); // drop CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE call } } } break; } case NI_System_Threading_Thread_get_ManagedThreadId: { if (impStackTop().val->OperIs(GT_RET_EXPR)) { GenTreeCall* call = impStackTop().val->AsRetExpr()->gtInlineCandidate->AsCall(); if (call->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC) { if (lookupNamedIntrinsic(call->gtCallMethHnd) == NI_System_Threading_Thread_get_CurrentThread) { // drop get_CurrentThread() call impPopStack(); call->ReplaceWith(gtNewNothingNode(), this); retNode = gtNewHelperCallNode(CORINFO_HELP_GETCURRENTMANAGEDTHREADID, TYP_INT); } } } break; } #ifdef TARGET_ARM64 // Intrinsify Interlocked.Or and Interlocked.And only for arm64-v8.1 (and newer) // TODO-CQ: Implement for XArch (https://github.com/dotnet/runtime/issues/32239). case NI_System_Threading_Interlocked_Or: case NI_System_Threading_Interlocked_And: { if (compOpportunisticallyDependsOn(InstructionSet_Atomics)) { assert(sig->numArgs == 2); GenTree* op2 = impPopStack().val; GenTree* op1 = impPopStack().val; genTreeOps op = (ni == NI_System_Threading_Interlocked_Or) ? GT_XORR : GT_XAND; retNode = gtNewOperNode(op, genActualType(callType), op1, op2); retNode->gtFlags |= GTF_GLOB_REF | GTF_ASG; } break; } #endif // TARGET_ARM64 #if defined(TARGET_XARCH) || defined(TARGET_ARM64) // TODO-ARM-CQ: reenable treating InterlockedCmpXchg32 operation as intrinsic case NI_System_Threading_Interlocked_CompareExchange: { var_types retType = JITtype2varType(sig->retType); if ((retType == TYP_LONG) && (TARGET_POINTER_SIZE == 4)) { break; } if ((retType != TYP_INT) && (retType != TYP_LONG)) { break; } assert(callType != TYP_STRUCT); assert(sig->numArgs == 3); GenTree* op3 = impPopStack().val; // comparand GenTree* op2 = impPopStack().val; // value GenTree* op1 = impPopStack().val; // location GenTree* node = new (this, GT_CMPXCHG) GenTreeCmpXchg(genActualType(callType), op1, op2, op3); node->AsCmpXchg()->gtOpLocation->gtFlags |= GTF_DONT_CSE; retNode = node; break; } case NI_System_Threading_Interlocked_Exchange: case NI_System_Threading_Interlocked_ExchangeAdd: { assert(callType != TYP_STRUCT); assert(sig->numArgs == 2); var_types retType = JITtype2varType(sig->retType); if ((retType == TYP_LONG) && (TARGET_POINTER_SIZE == 4)) { break; } if ((retType != TYP_INT) && (retType != TYP_LONG)) { break; } GenTree* op2 = impPopStack().val; GenTree* op1 = impPopStack().val; // This creates: // val // XAdd // addr // field (for example) // // In the case where the first argument is the address of a local, we might // want to make this *not* make the var address-taken -- but atomic instructions // on a local are probably pretty useless anyway, so we probably don't care. op1 = gtNewOperNode(ni == NI_System_Threading_Interlocked_ExchangeAdd ? GT_XADD : GT_XCHG, genActualType(callType), op1, op2); op1->gtFlags |= GTF_GLOB_REF | GTF_ASG; retNode = op1; break; } #endif // defined(TARGET_XARCH) || defined(TARGET_ARM64) case NI_System_Threading_Interlocked_MemoryBarrier: case NI_System_Threading_Interlocked_ReadMemoryBarrier: { assert(sig->numArgs == 0); GenTree* op1 = new (this, GT_MEMORYBARRIER) GenTree(GT_MEMORYBARRIER, TYP_VOID); op1->gtFlags |= GTF_GLOB_REF | GTF_ASG; // On XARCH `NI_System_Threading_Interlocked_ReadMemoryBarrier` fences need not be emitted. // However, we still need to capture the effect on reordering. if (ni == NI_System_Threading_Interlocked_ReadMemoryBarrier) { op1->gtFlags |= GTF_MEMORYBARRIER_LOAD; } retNode = op1; break; } #ifdef FEATURE_HW_INTRINSICS case NI_System_Math_FusedMultiplyAdd: { #ifdef TARGET_XARCH if (compExactlyDependsOn(InstructionSet_FMA) && supportSIMDTypes()) { assert(varTypeIsFloating(callType)); // We are constructing a chain of intrinsics similar to: // return FMA.MultiplyAddScalar( // Vector128.CreateScalarUnsafe(x), // Vector128.CreateScalarUnsafe(y), // Vector128.CreateScalarUnsafe(z) // ).ToScalar(); GenTree* op3 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, impPopStack().val, NI_Vector128_CreateScalarUnsafe, callJitType, 16); GenTree* op2 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, impPopStack().val, NI_Vector128_CreateScalarUnsafe, callJitType, 16); GenTree* op1 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, impPopStack().val, NI_Vector128_CreateScalarUnsafe, callJitType, 16); GenTree* res = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, op2, op3, NI_FMA_MultiplyAddScalar, callJitType, 16); retNode = gtNewSimdHWIntrinsicNode(callType, res, NI_Vector128_ToScalar, callJitType, 16); break; } #elif defined(TARGET_ARM64) if (compExactlyDependsOn(InstructionSet_AdvSimd)) { assert(varTypeIsFloating(callType)); // We are constructing a chain of intrinsics similar to: // return AdvSimd.FusedMultiplyAddScalar( // Vector64.Create{ScalarUnsafe}(z), // Vector64.Create{ScalarUnsafe}(y), // Vector64.Create{ScalarUnsafe}(x) // ).ToScalar(); NamedIntrinsic createVector64 = (callType == TYP_DOUBLE) ? NI_Vector64_Create : NI_Vector64_CreateScalarUnsafe; constexpr unsigned int simdSize = 8; GenTree* op3 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, impPopStack().val, createVector64, callJitType, simdSize); GenTree* op2 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, impPopStack().val, createVector64, callJitType, simdSize); GenTree* op1 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, impPopStack().val, createVector64, callJitType, simdSize); // Note that AdvSimd.FusedMultiplyAddScalar(op1,op2,op3) corresponds to op1 + op2 * op3 // while Math{F}.FusedMultiplyAddScalar(op1,op2,op3) corresponds to op1 * op2 + op3 retNode = gtNewSimdHWIntrinsicNode(TYP_SIMD8, op3, op2, op1, NI_AdvSimd_FusedMultiplyAddScalar, callJitType, simdSize); retNode = gtNewSimdHWIntrinsicNode(callType, retNode, NI_Vector64_ToScalar, callJitType, simdSize); break; } #endif // TODO-CQ-XArch: Ideally we would create a GT_INTRINSIC node for fma, however, that currently // requires more extensive changes to valuenum to support methods with 3 operands // We want to generate a GT_INTRINSIC node in the case the call can't be treated as // a target intrinsic so that we can still benefit from CSE and constant folding. break; } #endif // FEATURE_HW_INTRINSICS case NI_System_Math_Abs: case NI_System_Math_Acos: case NI_System_Math_Acosh: case NI_System_Math_Asin: case NI_System_Math_Asinh: case NI_System_Math_Atan: case NI_System_Math_Atanh: case NI_System_Math_Atan2: case NI_System_Math_Cbrt: case NI_System_Math_Ceiling: case NI_System_Math_Cos: case NI_System_Math_Cosh: case NI_System_Math_Exp: case NI_System_Math_Floor: case NI_System_Math_FMod: case NI_System_Math_ILogB: case NI_System_Math_Log: case NI_System_Math_Log2: case NI_System_Math_Log10: #ifdef TARGET_ARM64 // ARM64 has fmax/fmin which are IEEE754:2019 minimum/maximum compatible // TODO-XARCH-CQ: Enable this for XARCH when one of the arguments is a constant // so we can then emit maxss/minss and avoid NaN/-0.0 handling case NI_System_Math_Max: case NI_System_Math_Min: #endif case NI_System_Math_Pow: case NI_System_Math_Round: case NI_System_Math_Sin: case NI_System_Math_Sinh: case NI_System_Math_Sqrt: case NI_System_Math_Tan: case NI_System_Math_Tanh: case NI_System_Math_Truncate: { retNode = impMathIntrinsic(method, sig, callType, ni, tailCall); break; } case NI_System_Array_Clone: case NI_System_Collections_Generic_Comparer_get_Default: case NI_System_Collections_Generic_EqualityComparer_get_Default: case NI_System_Object_MemberwiseClone: case NI_System_Threading_Thread_get_CurrentThread: { // Flag for later handling. isSpecial = true; break; } case NI_System_Object_GetType: { JITDUMP("\n impIntrinsic: call to Object.GetType\n"); GenTree* op1 = impStackTop(0).val; // If we're calling GetType on a boxed value, just get the type directly. if (op1->IsBoxedValue()) { JITDUMP("Attempting to optimize box(...).getType() to direct type construction\n"); // Try and clean up the box. Obtain the handle we // were going to pass to the newobj. GenTree* boxTypeHandle = gtTryRemoveBoxUpstreamEffects(op1, BR_REMOVE_AND_NARROW_WANT_TYPE_HANDLE); if (boxTypeHandle != nullptr) { // Note we don't need to play the TYP_STRUCT games here like // do for LDTOKEN since the return value of this operator is Type, // not RuntimeTypeHandle. impPopStack(); GenTreeCall::Use* helperArgs = gtNewCallArgs(boxTypeHandle); GenTree* runtimeType = gtNewHelperCallNode(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE, TYP_REF, helperArgs); retNode = runtimeType; } } // If we have a constrained callvirt with a "box this" transform // we know we have a value class and hence an exact type. // // If so, instead of boxing and then extracting the type, just // construct the type directly. if ((retNode == nullptr) && (pConstrainedResolvedToken != nullptr) && (constraintCallThisTransform == CORINFO_BOX_THIS)) { // Ensure this is one of the is simple box cases (in particular, rule out nullables). const CorInfoHelpFunc boxHelper = info.compCompHnd->getBoxHelper(pConstrainedResolvedToken->hClass); const bool isSafeToOptimize = (boxHelper == CORINFO_HELP_BOX); if (isSafeToOptimize) { JITDUMP("Optimizing constrained box-this obj.getType() to direct type construction\n"); impPopStack(); GenTree* typeHandleOp = impTokenToHandle(pConstrainedResolvedToken, nullptr, true /* mustRestoreHandle */); if (typeHandleOp == nullptr) { assert(compDonotInline()); return nullptr; } GenTreeCall::Use* helperArgs = gtNewCallArgs(typeHandleOp); GenTree* runtimeType = gtNewHelperCallNode(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE, TYP_REF, helperArgs); retNode = runtimeType; } } #ifdef DEBUG if (retNode != nullptr) { JITDUMP("Optimized result for call to GetType is\n"); if (verbose) { gtDispTree(retNode); } } #endif // Else expand as an intrinsic, unless the call is constrained, // in which case we defer expansion to allow impImportCall do the // special constraint processing. if ((retNode == nullptr) && (pConstrainedResolvedToken == nullptr)) { JITDUMP("Expanding as special intrinsic\n"); impPopStack(); op1 = new (this, GT_INTRINSIC) GenTreeIntrinsic(genActualType(callType), op1, ni, method); // Set the CALL flag to indicate that the operator is implemented by a call. // Set also the EXCEPTION flag because the native implementation of // NI_System_Object_GetType intrinsic can throw NullReferenceException. op1->gtFlags |= (GTF_CALL | GTF_EXCEPT); retNode = op1; // Might be further optimizable, so arrange to leave a mark behind isSpecial = true; } if (retNode == nullptr) { JITDUMP("Leaving as normal call\n"); // Might be further optimizable, so arrange to leave a mark behind isSpecial = true; } break; } case NI_System_Array_GetLength: case NI_System_Array_GetLowerBound: case NI_System_Array_GetUpperBound: { // System.Array.GetLength(Int32) method: // public int GetLength(int dimension) // System.Array.GetLowerBound(Int32) method: // public int GetLowerBound(int dimension) // System.Array.GetUpperBound(Int32) method: // public int GetUpperBound(int dimension) // // Only implement these as intrinsics for multi-dimensional arrays. // Only handle constant dimension arguments. GenTree* gtDim = impStackTop().val; GenTree* gtArr = impStackTop(1).val; if (gtDim->IsIntegralConst()) { bool isExact = false; bool isNonNull = false; CORINFO_CLASS_HANDLE arrCls = gtGetClassHandle(gtArr, &isExact, &isNonNull); if (arrCls != NO_CLASS_HANDLE) { unsigned rank = info.compCompHnd->getArrayRank(arrCls); if ((rank > 1) && !info.compCompHnd->isSDArray(arrCls)) { // `rank` is guaranteed to be <=32 (see MAX_RANK in vm\array.h). Any constant argument // is `int` sized. INT64 dimValue = gtDim->AsIntConCommon()->IntegralValue(); assert((unsigned int)dimValue == dimValue); unsigned dim = (unsigned int)dimValue; if (dim < rank) { // This is now known to be a multi-dimension array with a constant dimension // that is in range; we can expand it as an intrinsic. impPopStack().val; // Pop the dim and array object; we already have a pointer to them. impPopStack().val; // Make sure there are no global effects in the array (such as it being a function // call), so we can mark the generated indirection with GTF_IND_INVARIANT. In the // GetUpperBound case we need the cloned object, since we refer to the array // object twice. In the other cases, we don't need to clone. GenTree* gtArrClone = nullptr; if (((gtArr->gtFlags & GTF_GLOB_EFFECT) != 0) || (ni == NI_System_Array_GetUpperBound)) { gtArr = impCloneExpr(gtArr, &gtArrClone, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("MD intrinsics array")); } switch (ni) { case NI_System_Array_GetLength: { // Generate *(array + offset-to-length-array + sizeof(int) * dim) unsigned offs = eeGetMDArrayLengthOffset(rank, dim); GenTree* gtOffs = gtNewIconNode(offs, TYP_I_IMPL); GenTree* gtAddr = gtNewOperNode(GT_ADD, TYP_BYREF, gtArr, gtOffs); retNode = gtNewIndir(TYP_INT, gtAddr); retNode->gtFlags |= GTF_IND_INVARIANT; break; } case NI_System_Array_GetLowerBound: { // Generate *(array + offset-to-bounds-array + sizeof(int) * dim) unsigned offs = eeGetMDArrayLowerBoundOffset(rank, dim); GenTree* gtOffs = gtNewIconNode(offs, TYP_I_IMPL); GenTree* gtAddr = gtNewOperNode(GT_ADD, TYP_BYREF, gtArr, gtOffs); retNode = gtNewIndir(TYP_INT, gtAddr); retNode->gtFlags |= GTF_IND_INVARIANT; break; } case NI_System_Array_GetUpperBound: { assert(gtArrClone != nullptr); // Generate: // *(array + offset-to-length-array + sizeof(int) * dim) + // *(array + offset-to-bounds-array + sizeof(int) * dim) - 1 unsigned offs = eeGetMDArrayLowerBoundOffset(rank, dim); GenTree* gtOffs = gtNewIconNode(offs, TYP_I_IMPL); GenTree* gtAddr = gtNewOperNode(GT_ADD, TYP_BYREF, gtArr, gtOffs); GenTree* gtLowerBound = gtNewIndir(TYP_INT, gtAddr); gtLowerBound->gtFlags |= GTF_IND_INVARIANT; offs = eeGetMDArrayLengthOffset(rank, dim); gtOffs = gtNewIconNode(offs, TYP_I_IMPL); gtAddr = gtNewOperNode(GT_ADD, TYP_BYREF, gtArrClone, gtOffs); GenTree* gtLength = gtNewIndir(TYP_INT, gtAddr); gtLength->gtFlags |= GTF_IND_INVARIANT; GenTree* gtSum = gtNewOperNode(GT_ADD, TYP_INT, gtLowerBound, gtLength); GenTree* gtOne = gtNewIconNode(1, TYP_INT); retNode = gtNewOperNode(GT_SUB, TYP_INT, gtSum, gtOne); break; } default: unreached(); } } } } } break; } case NI_System_Buffers_Binary_BinaryPrimitives_ReverseEndianness: { assert(sig->numArgs == 1); // We expect the return type of the ReverseEndianness routine to match the type of the // one and only argument to the method. We use a special instruction for 16-bit // BSWAPs since on x86 processors this is implemented as ROR <16-bit reg>, 8. Additionally, // we only emit 64-bit BSWAP instructions on 64-bit archs; if we're asked to perform a // 64-bit byte swap on a 32-bit arch, we'll fall to the default case in the switch block below. switch (sig->retType) { case CorInfoType::CORINFO_TYPE_SHORT: case CorInfoType::CORINFO_TYPE_USHORT: retNode = gtNewCastNode(TYP_INT, gtNewOperNode(GT_BSWAP16, TYP_INT, impPopStack().val), false, callType); break; case CorInfoType::CORINFO_TYPE_INT: case CorInfoType::CORINFO_TYPE_UINT: #ifdef TARGET_64BIT case CorInfoType::CORINFO_TYPE_LONG: case CorInfoType::CORINFO_TYPE_ULONG: #endif // TARGET_64BIT retNode = gtNewOperNode(GT_BSWAP, callType, impPopStack().val); break; default: // This default case gets hit on 32-bit archs when a call to a 64-bit overload // of ReverseEndianness is encountered. In that case we'll let JIT treat this as a standard // method call, where the implementation decomposes the operation into two 32-bit // bswap routines. If the input to the 64-bit function is a constant, then we rely // on inlining + constant folding of 32-bit bswaps to effectively constant fold // the 64-bit call site. break; } break; } // Fold PopCount for constant input case NI_System_Numerics_BitOperations_PopCount: { assert(sig->numArgs == 1); if (impStackTop().val->IsIntegralConst()) { typeInfo argType = verParseArgSigToTypeInfo(sig, sig->args).NormaliseForStack(); INT64 cns = impPopStack().val->AsIntConCommon()->IntegralValue(); if (argType.IsType(TI_LONG)) { retNode = gtNewIconNode(genCountBits(cns), callType); } else { assert(argType.IsType(TI_INT)); retNode = gtNewIconNode(genCountBits(static_cast<unsigned>(cns)), callType); } } break; } case NI_System_GC_KeepAlive: { retNode = impKeepAliveIntrinsic(impPopStack().val); break; } default: break; } } if (mustExpand && (retNode == nullptr)) { assert(!"Unhandled must expand intrinsic, throwing PlatformNotSupportedException"); return impUnsupportedNamedIntrinsic(CORINFO_HELP_THROW_PLATFORM_NOT_SUPPORTED, method, sig, mustExpand); } // Optionally report if this intrinsic is special // (that is, potentially re-optimizable during morph). if (isSpecialIntrinsic != nullptr) { *isSpecialIntrinsic = isSpecial; } return retNode; } GenTree* Compiler::impTypeIsAssignable(GenTree* typeTo, GenTree* typeFrom) { // Optimize patterns like: // // typeof(TTo).IsAssignableFrom(typeof(TTFrom)) // valueTypeVar.GetType().IsAssignableFrom(typeof(TTFrom)) // typeof(TTFrom).IsAssignableTo(typeof(TTo)) // typeof(TTFrom).IsAssignableTo(valueTypeVar.GetType()) // // to true/false if (typeTo->IsCall() && typeFrom->IsCall()) { // make sure both arguments are `typeof()` CORINFO_METHOD_HANDLE hTypeof = eeFindHelper(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE); if ((typeTo->AsCall()->gtCallMethHnd == hTypeof) && (typeFrom->AsCall()->gtCallMethHnd == hTypeof)) { CORINFO_CLASS_HANDLE hClassTo = gtGetHelperArgClassHandle(typeTo->AsCall()->gtCallArgs->GetNode()); CORINFO_CLASS_HANDLE hClassFrom = gtGetHelperArgClassHandle(typeFrom->AsCall()->gtCallArgs->GetNode()); if (hClassTo == NO_CLASS_HANDLE || hClassFrom == NO_CLASS_HANDLE) { return nullptr; } TypeCompareState castResult = info.compCompHnd->compareTypesForCast(hClassFrom, hClassTo); if (castResult == TypeCompareState::May) { // requires runtime check // e.g. __Canon, COMObjects, Nullable return nullptr; } GenTreeIntCon* retNode = gtNewIconNode((castResult == TypeCompareState::Must) ? 1 : 0); impPopStack(); // drop both CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE calls impPopStack(); return retNode; } } return nullptr; } GenTree* Compiler::impMathIntrinsic(CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, var_types callType, NamedIntrinsic intrinsicName, bool tailCall) { GenTree* op1; GenTree* op2; assert(callType != TYP_STRUCT); assert(IsMathIntrinsic(intrinsicName)); op1 = nullptr; #if !defined(TARGET_X86) // Intrinsics that are not implemented directly by target instructions will // be re-materialized as users calls in rationalizer. For prefixed tail calls, // don't do this optimization, because // a) For back compatibility reasons on desktop .NET Framework 4.6 / 4.6.1 // b) It will be non-trivial task or too late to re-materialize a surviving // tail prefixed GT_INTRINSIC as tail call in rationalizer. if (!IsIntrinsicImplementedByUserCall(intrinsicName) || !tailCall) #else // On x86 RyuJIT, importing intrinsics that are implemented as user calls can cause incorrect calculation // of the depth of the stack if these intrinsics are used as arguments to another call. This causes bad // code generation for certain EH constructs. if (!IsIntrinsicImplementedByUserCall(intrinsicName)) #endif { CORINFO_CLASS_HANDLE tmpClass; CORINFO_ARG_LIST_HANDLE arg; var_types op1Type; var_types op2Type; switch (sig->numArgs) { case 1: op1 = impPopStack().val; arg = sig->args; op1Type = JITtype2varType(strip(info.compCompHnd->getArgType(sig, arg, &tmpClass))); if (op1->TypeGet() != genActualType(op1Type)) { assert(varTypeIsFloating(op1)); op1 = gtNewCastNode(callType, op1, false, callType); } op1 = new (this, GT_INTRINSIC) GenTreeIntrinsic(genActualType(callType), op1, intrinsicName, method); break; case 2: op2 = impPopStack().val; op1 = impPopStack().val; arg = sig->args; op1Type = JITtype2varType(strip(info.compCompHnd->getArgType(sig, arg, &tmpClass))); if (op1->TypeGet() != genActualType(op1Type)) { assert(varTypeIsFloating(op1)); op1 = gtNewCastNode(callType, op1, false, callType); } arg = info.compCompHnd->getArgNext(arg); op2Type = JITtype2varType(strip(info.compCompHnd->getArgType(sig, arg, &tmpClass))); if (op2->TypeGet() != genActualType(op2Type)) { assert(varTypeIsFloating(op2)); op2 = gtNewCastNode(callType, op2, false, callType); } op1 = new (this, GT_INTRINSIC) GenTreeIntrinsic(genActualType(callType), op1, op2, intrinsicName, method); break; default: NO_WAY("Unsupported number of args for Math Intrinsic"); } if (IsIntrinsicImplementedByUserCall(intrinsicName)) { op1->gtFlags |= GTF_CALL; } } return op1; } //------------------------------------------------------------------------ // lookupNamedIntrinsic: map method to jit named intrinsic value // // Arguments: // method -- method handle for method // // Return Value: // Id for the named intrinsic, or Illegal if none. // // Notes: // method should have CORINFO_FLG_INTRINSIC set in its attributes, // otherwise it is not a named jit intrinsic. // NamedIntrinsic Compiler::lookupNamedIntrinsic(CORINFO_METHOD_HANDLE method) { const char* className = nullptr; const char* namespaceName = nullptr; const char* enclosingClassName = nullptr; const char* methodName = info.compCompHnd->getMethodNameFromMetadata(method, &className, &namespaceName, &enclosingClassName); JITDUMP("Named Intrinsic "); if (namespaceName != nullptr) { JITDUMP("%s.", namespaceName); } if (enclosingClassName != nullptr) { JITDUMP("%s.", enclosingClassName); } if (className != nullptr) { JITDUMP("%s.", className); } if (methodName != nullptr) { JITDUMP("%s", methodName); } if ((namespaceName == nullptr) || (className == nullptr) || (methodName == nullptr)) { // Check if we are dealing with an MD array's known runtime method CorInfoArrayIntrinsic arrayFuncIndex = info.compCompHnd->getArrayIntrinsicID(method); switch (arrayFuncIndex) { case CorInfoArrayIntrinsic::GET: JITDUMP("ARRAY_FUNC_GET: Recognized\n"); return NI_Array_Get; case CorInfoArrayIntrinsic::SET: JITDUMP("ARRAY_FUNC_SET: Recognized\n"); return NI_Array_Set; case CorInfoArrayIntrinsic::ADDRESS: JITDUMP("ARRAY_FUNC_ADDRESS: Recognized\n"); return NI_Array_Address; default: break; } JITDUMP(": Not recognized, not enough metadata\n"); return NI_Illegal; } JITDUMP(": "); NamedIntrinsic result = NI_Illegal; if (strcmp(namespaceName, "System") == 0) { if ((strcmp(className, "Enum") == 0) && (strcmp(methodName, "HasFlag") == 0)) { result = NI_System_Enum_HasFlag; } else if (strcmp(className, "Activator") == 0) { if (strcmp(methodName, "AllocatorOf") == 0) { result = NI_System_Activator_AllocatorOf; } else if (strcmp(methodName, "DefaultConstructorOf") == 0) { result = NI_System_Activator_DefaultConstructorOf; } } else if (strcmp(className, "ByReference`1") == 0) { if (strcmp(methodName, ".ctor") == 0) { result = NI_System_ByReference_ctor; } else if (strcmp(methodName, "get_Value") == 0) { result = NI_System_ByReference_get_Value; } } else if (strcmp(className, "Math") == 0 || strcmp(className, "MathF") == 0) { if (strcmp(methodName, "Abs") == 0) { result = NI_System_Math_Abs; } else if (strcmp(methodName, "Acos") == 0) { result = NI_System_Math_Acos; } else if (strcmp(methodName, "Acosh") == 0) { result = NI_System_Math_Acosh; } else if (strcmp(methodName, "Asin") == 0) { result = NI_System_Math_Asin; } else if (strcmp(methodName, "Asinh") == 0) { result = NI_System_Math_Asinh; } else if (strcmp(methodName, "Atan") == 0) { result = NI_System_Math_Atan; } else if (strcmp(methodName, "Atanh") == 0) { result = NI_System_Math_Atanh; } else if (strcmp(methodName, "Atan2") == 0) { result = NI_System_Math_Atan2; } else if (strcmp(methodName, "Cbrt") == 0) { result = NI_System_Math_Cbrt; } else if (strcmp(methodName, "Ceiling") == 0) { result = NI_System_Math_Ceiling; } else if (strcmp(methodName, "Cos") == 0) { result = NI_System_Math_Cos; } else if (strcmp(methodName, "Cosh") == 0) { result = NI_System_Math_Cosh; } else if (strcmp(methodName, "Exp") == 0) { result = NI_System_Math_Exp; } else if (strcmp(methodName, "Floor") == 0) { result = NI_System_Math_Floor; } else if (strcmp(methodName, "FMod") == 0) { result = NI_System_Math_FMod; } else if (strcmp(methodName, "FusedMultiplyAdd") == 0) { result = NI_System_Math_FusedMultiplyAdd; } else if (strcmp(methodName, "ILogB") == 0) { result = NI_System_Math_ILogB; } else if (strcmp(methodName, "Log") == 0) { result = NI_System_Math_Log; } else if (strcmp(methodName, "Log2") == 0) { result = NI_System_Math_Log2; } else if (strcmp(methodName, "Log10") == 0) { result = NI_System_Math_Log10; } else if (strcmp(methodName, "Max") == 0) { result = NI_System_Math_Max; } else if (strcmp(methodName, "Min") == 0) { result = NI_System_Math_Min; } else if (strcmp(methodName, "Pow") == 0) { result = NI_System_Math_Pow; } else if (strcmp(methodName, "Round") == 0) { result = NI_System_Math_Round; } else if (strcmp(methodName, "Sin") == 0) { result = NI_System_Math_Sin; } else if (strcmp(methodName, "Sinh") == 0) { result = NI_System_Math_Sinh; } else if (strcmp(methodName, "Sqrt") == 0) { result = NI_System_Math_Sqrt; } else if (strcmp(methodName, "Tan") == 0) { result = NI_System_Math_Tan; } else if (strcmp(methodName, "Tanh") == 0) { result = NI_System_Math_Tanh; } else if (strcmp(methodName, "Truncate") == 0) { result = NI_System_Math_Truncate; } } else if (strcmp(className, "GC") == 0) { if (strcmp(methodName, "KeepAlive") == 0) { result = NI_System_GC_KeepAlive; } } else if (strcmp(className, "Array") == 0) { if (strcmp(methodName, "Clone") == 0) { result = NI_System_Array_Clone; } else if (strcmp(methodName, "GetLength") == 0) { result = NI_System_Array_GetLength; } else if (strcmp(methodName, "GetLowerBound") == 0) { result = NI_System_Array_GetLowerBound; } else if (strcmp(methodName, "GetUpperBound") == 0) { result = NI_System_Array_GetUpperBound; } } else if (strcmp(className, "Object") == 0) { if (strcmp(methodName, "MemberwiseClone") == 0) { result = NI_System_Object_MemberwiseClone; } else if (strcmp(methodName, "GetType") == 0) { result = NI_System_Object_GetType; } else if (strcmp(methodName, "MethodTableOf") == 0) { result = NI_System_Object_MethodTableOf; } } else if (strcmp(className, "RuntimeTypeHandle") == 0) { if (strcmp(methodName, "GetValueInternal") == 0) { result = NI_System_RuntimeTypeHandle_GetValueInternal; } } else if (strcmp(className, "Type") == 0) { if (strcmp(methodName, "get_IsValueType") == 0) { result = NI_System_Type_get_IsValueType; } else if (strcmp(methodName, "IsAssignableFrom") == 0) { result = NI_System_Type_IsAssignableFrom; } else if (strcmp(methodName, "IsAssignableTo") == 0) { result = NI_System_Type_IsAssignableTo; } else if (strcmp(methodName, "op_Equality") == 0) { result = NI_System_Type_op_Equality; } else if (strcmp(methodName, "op_Inequality") == 0) { result = NI_System_Type_op_Inequality; } else if (strcmp(methodName, "GetTypeFromHandle") == 0) { result = NI_System_Type_GetTypeFromHandle; } } else if (strcmp(className, "String") == 0) { if (strcmp(methodName, "Equals") == 0) { result = NI_System_String_Equals; } else if (strcmp(methodName, "get_Chars") == 0) { result = NI_System_String_get_Chars; } else if (strcmp(methodName, "get_Length") == 0) { result = NI_System_String_get_Length; } else if (strcmp(methodName, "op_Implicit") == 0) { result = NI_System_String_op_Implicit; } else if (strcmp(methodName, "StartsWith") == 0) { result = NI_System_String_StartsWith; } } else if (strcmp(className, "MemoryExtensions") == 0) { if (strcmp(methodName, "AsSpan") == 0) { result = NI_System_MemoryExtensions_AsSpan; } if (strcmp(methodName, "SequenceEqual") == 0) { result = NI_System_MemoryExtensions_SequenceEqual; } else if (strcmp(methodName, "Equals") == 0) { result = NI_System_MemoryExtensions_Equals; } else if (strcmp(methodName, "StartsWith") == 0) { result = NI_System_MemoryExtensions_StartsWith; } } else if (strcmp(className, "Span`1") == 0) { if (strcmp(methodName, "get_Item") == 0) { result = NI_System_Span_get_Item; } } else if (strcmp(className, "ReadOnlySpan`1") == 0) { if (strcmp(methodName, "get_Item") == 0) { result = NI_System_ReadOnlySpan_get_Item; } } else if (strcmp(className, "EETypePtr") == 0) { if (strcmp(methodName, "EETypePtrOf") == 0) { result = NI_System_EETypePtr_EETypePtrOf; } } } else if (strcmp(namespaceName, "System.Threading") == 0) { if (strcmp(className, "Thread") == 0) { if (strcmp(methodName, "get_CurrentThread") == 0) { result = NI_System_Threading_Thread_get_CurrentThread; } else if (strcmp(methodName, "get_ManagedThreadId") == 0) { result = NI_System_Threading_Thread_get_ManagedThreadId; } } else if (strcmp(className, "Interlocked") == 0) { #ifndef TARGET_ARM64 // TODO-CQ: Implement for XArch (https://github.com/dotnet/runtime/issues/32239). if (strcmp(methodName, "And") == 0) { result = NI_System_Threading_Interlocked_And; } else if (strcmp(methodName, "Or") == 0) { result = NI_System_Threading_Interlocked_Or; } #endif if (strcmp(methodName, "CompareExchange") == 0) { result = NI_System_Threading_Interlocked_CompareExchange; } else if (strcmp(methodName, "Exchange") == 0) { result = NI_System_Threading_Interlocked_Exchange; } else if (strcmp(methodName, "ExchangeAdd") == 0) { result = NI_System_Threading_Interlocked_ExchangeAdd; } else if (strcmp(methodName, "MemoryBarrier") == 0) { result = NI_System_Threading_Interlocked_MemoryBarrier; } else if (strcmp(methodName, "ReadMemoryBarrier") == 0) { result = NI_System_Threading_Interlocked_ReadMemoryBarrier; } } } #if defined(TARGET_XARCH) || defined(TARGET_ARM64) else if (strcmp(namespaceName, "System.Buffers.Binary") == 0) { if ((strcmp(className, "BinaryPrimitives") == 0) && (strcmp(methodName, "ReverseEndianness") == 0)) { result = NI_System_Buffers_Binary_BinaryPrimitives_ReverseEndianness; } } #endif // defined(TARGET_XARCH) || defined(TARGET_ARM64) else if (strcmp(namespaceName, "System.Collections.Generic") == 0) { if ((strcmp(className, "EqualityComparer`1") == 0) && (strcmp(methodName, "get_Default") == 0)) { result = NI_System_Collections_Generic_EqualityComparer_get_Default; } else if ((strcmp(className, "Comparer`1") == 0) && (strcmp(methodName, "get_Default") == 0)) { result = NI_System_Collections_Generic_Comparer_get_Default; } } else if ((strcmp(namespaceName, "System.Numerics") == 0) && (strcmp(className, "BitOperations") == 0)) { if (strcmp(methodName, "PopCount") == 0) { result = NI_System_Numerics_BitOperations_PopCount; } } #ifdef FEATURE_HW_INTRINSICS else if (strcmp(namespaceName, "System.Numerics") == 0) { CORINFO_SIG_INFO sig; info.compCompHnd->getMethodSig(method, &sig); int sizeOfVectorT = getSIMDVectorRegisterByteLength(); result = SimdAsHWIntrinsicInfo::lookupId(&sig, className, methodName, enclosingClassName, sizeOfVectorT); } #endif // FEATURE_HW_INTRINSICS else if ((strcmp(namespaceName, "System.Runtime.CompilerServices") == 0) && (strcmp(className, "RuntimeHelpers") == 0)) { if (strcmp(methodName, "CreateSpan") == 0) { result = NI_System_Runtime_CompilerServices_RuntimeHelpers_CreateSpan; } else if (strcmp(methodName, "InitializeArray") == 0) { result = NI_System_Runtime_CompilerServices_RuntimeHelpers_InitializeArray; } else if (strcmp(methodName, "IsKnownConstant") == 0) { result = NI_System_Runtime_CompilerServices_RuntimeHelpers_IsKnownConstant; } } else if (strncmp(namespaceName, "System.Runtime.Intrinsics", 25) == 0) { // We go down this path even when FEATURE_HW_INTRINSICS isn't enabled // so we can specially handle IsSupported and recursive calls. // This is required to appropriately handle the intrinsics on platforms // which don't support them. On such a platform methods like Vector64.Create // will be seen as `Intrinsic` and `mustExpand` due to having a code path // which is recursive. When such a path is hit we expect it to be handled by // the importer and we fire an assert if it wasn't and in previous versions // of the JIT would fail fast. This was changed to throw a PNSE instead but // we still assert as most intrinsics should have been recognized/handled. // In order to avoid the assert, we specially handle the IsSupported checks // (to better allow dead-code optimizations) and we explicitly throw a PNSE // as we know that is the desired behavior for the HWIntrinsics when not // supported. For cases like Vector64.Create, this is fine because it will // be behind a relevant IsSupported check and will never be hit and the // software fallback will be executed instead. CLANG_FORMAT_COMMENT_ANCHOR; #ifdef FEATURE_HW_INTRINSICS namespaceName += 25; const char* platformNamespaceName; #if defined(TARGET_XARCH) platformNamespaceName = ".X86"; #elif defined(TARGET_ARM64) platformNamespaceName = ".Arm"; #else #error Unsupported platform #endif if ((namespaceName[0] == '\0') || (strcmp(namespaceName, platformNamespaceName) == 0)) { CORINFO_SIG_INFO sig; info.compCompHnd->getMethodSig(method, &sig); result = HWIntrinsicInfo::lookupId(this, &sig, className, methodName, enclosingClassName); } #endif // FEATURE_HW_INTRINSICS if (result == NI_Illegal) { if ((strcmp(methodName, "get_IsSupported") == 0) || (strcmp(methodName, "get_IsHardwareAccelerated") == 0)) { // This allows the relevant code paths to be dropped as dead code even // on platforms where FEATURE_HW_INTRINSICS is not supported. result = NI_IsSupported_False; } else if (gtIsRecursiveCall(method)) { // For the framework itself, any recursive intrinsics will either be // only supported on a single platform or will be guarded by a relevant // IsSupported check so the throw PNSE will be valid or dropped. result = NI_Throw_PlatformNotSupportedException; } } } else if (strcmp(namespaceName, "System.StubHelpers") == 0) { if (strcmp(className, "StubHelpers") == 0) { if (strcmp(methodName, "GetStubContext") == 0) { result = NI_System_StubHelpers_GetStubContext; } else if (strcmp(methodName, "NextCallReturnAddress") == 0) { result = NI_System_StubHelpers_NextCallReturnAddress; } } } if (result == NI_Illegal) { JITDUMP("Not recognized\n"); } else if (result == NI_IsSupported_False) { JITDUMP("Unsupported - return false"); } else if (result == NI_Throw_PlatformNotSupportedException) { JITDUMP("Unsupported - throw PlatformNotSupportedException"); } else { JITDUMP("Recognized\n"); } return result; } //------------------------------------------------------------------------ // impUnsupportedNamedIntrinsic: Throws an exception for an unsupported named intrinsic // // Arguments: // helper - JIT helper ID for the exception to be thrown // method - method handle of the intrinsic function. // sig - signature of the intrinsic call // mustExpand - true if the intrinsic must return a GenTree*; otherwise, false // // Return Value: // a gtNewMustThrowException if mustExpand is true; otherwise, nullptr // GenTree* Compiler::impUnsupportedNamedIntrinsic(unsigned helper, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, bool mustExpand) { // We've hit some error case and may need to return a node for the given error. // // When `mustExpand=false`, we are attempting to inline the intrinsic directly into another method. In this // scenario, we need to return `nullptr` so that a GT_CALL to the intrinsic is emitted instead. This is to // ensure that everything continues to behave correctly when optimizations are enabled (e.g. things like the // inliner may expect the node we return to have a certain signature, and the `MustThrowException` node won't // match that). // // When `mustExpand=true`, we are in a GT_CALL to the intrinsic and are attempting to JIT it. This will generally // be in response to an indirect call (e.g. done via reflection) or in response to an earlier attempt returning // `nullptr` (under `mustExpand=false`). In that scenario, we are safe to return the `MustThrowException` node. if (mustExpand) { for (unsigned i = 0; i < sig->numArgs; i++) { impPopStack(); } return gtNewMustThrowException(helper, JITtype2varType(sig->retType), sig->retTypeClass); } else { return nullptr; } } /*****************************************************************************/ GenTree* Compiler::impArrayAccessIntrinsic( CORINFO_CLASS_HANDLE clsHnd, CORINFO_SIG_INFO* sig, int memberRef, bool readonlyCall, NamedIntrinsic intrinsicName) { /* If we are generating SMALL_CODE, we don't want to use intrinsics for the following, as it generates fatter code. */ if (compCodeOpt() == SMALL_CODE) { return nullptr; } /* These intrinsics generate fatter (but faster) code and are only done if we don't need SMALL_CODE */ unsigned rank = (intrinsicName == NI_Array_Set) ? (sig->numArgs - 1) : sig->numArgs; // The rank 1 case is special because it has to handle two array formats // we will simply not do that case if (rank > GT_ARR_MAX_RANK || rank <= 1) { return nullptr; } CORINFO_CLASS_HANDLE arrElemClsHnd = nullptr; var_types elemType = JITtype2varType(info.compCompHnd->getChildType(clsHnd, &arrElemClsHnd)); // For the ref case, we will only be able to inline if the types match // (verifier checks for this, we don't care for the nonverified case and the // type is final (so we don't need to do the cast) if ((intrinsicName != NI_Array_Get) && !readonlyCall && varTypeIsGC(elemType)) { // Get the call site signature CORINFO_SIG_INFO LocalSig; eeGetCallSiteSig(memberRef, info.compScopeHnd, impTokenLookupContextHandle, &LocalSig); assert(LocalSig.hasThis()); CORINFO_CLASS_HANDLE actualElemClsHnd; if (intrinsicName == NI_Array_Set) { // Fetch the last argument, the one that indicates the type we are setting. CORINFO_ARG_LIST_HANDLE argType = LocalSig.args; for (unsigned r = 0; r < rank; r++) { argType = info.compCompHnd->getArgNext(argType); } typeInfo argInfo = verParseArgSigToTypeInfo(&LocalSig, argType); actualElemClsHnd = argInfo.GetClassHandle(); } else { assert(intrinsicName == NI_Array_Address); // Fetch the return type typeInfo retInfo = verMakeTypeInfo(LocalSig.retType, LocalSig.retTypeClass); assert(retInfo.IsByRef()); actualElemClsHnd = retInfo.GetClassHandle(); } // if it's not final, we can't do the optimization if (!(info.compCompHnd->getClassAttribs(actualElemClsHnd) & CORINFO_FLG_FINAL)) { return nullptr; } } unsigned arrayElemSize; if (elemType == TYP_STRUCT) { assert(arrElemClsHnd); arrayElemSize = info.compCompHnd->getClassSize(arrElemClsHnd); } else { arrayElemSize = genTypeSize(elemType); } if ((unsigned char)arrayElemSize != arrayElemSize) { // arrayElemSize would be truncated as an unsigned char. // This means the array element is too large. Don't do the optimization. return nullptr; } GenTree* val = nullptr; if (intrinsicName == NI_Array_Set) { // Assignment of a struct is more work, and there are more gets than sets. if (elemType == TYP_STRUCT) { return nullptr; } val = impPopStack().val; assert(genActualType(elemType) == genActualType(val->gtType) || (elemType == TYP_FLOAT && val->gtType == TYP_DOUBLE) || (elemType == TYP_INT && val->gtType == TYP_BYREF) || (elemType == TYP_DOUBLE && val->gtType == TYP_FLOAT)); } noway_assert((unsigned char)GT_ARR_MAX_RANK == GT_ARR_MAX_RANK); GenTree* inds[GT_ARR_MAX_RANK]; for (unsigned k = rank; k > 0; k--) { inds[k - 1] = impPopStack().val; } GenTree* arr = impPopStack().val; assert(arr->gtType == TYP_REF); GenTree* arrElem = new (this, GT_ARR_ELEM) GenTreeArrElem(TYP_BYREF, arr, static_cast<unsigned char>(rank), static_cast<unsigned char>(arrayElemSize), elemType, &inds[0]); if (intrinsicName != NI_Array_Address) { if (varTypeIsStruct(elemType)) { arrElem = gtNewObjNode(sig->retTypeClass, arrElem); } else { arrElem = gtNewOperNode(GT_IND, elemType, arrElem); } } if (intrinsicName == NI_Array_Set) { assert(val != nullptr); return gtNewAssignNode(arrElem, val); } else { return arrElem; } } //------------------------------------------------------------------------ // impKeepAliveIntrinsic: Import the GC.KeepAlive intrinsic call // // Imports the intrinsic as a GT_KEEPALIVE node, and, as an optimization, // if the object to keep alive is a GT_BOX, removes its side effects and // uses the address of a local (copied from the box's source if needed) // as the operand for GT_KEEPALIVE. For the BOX optimization, if the class // of the box has no GC fields, a GT_NOP is returned. // // Arguments: // objToKeepAlive - the intrinisic call's argument // // Return Value: // The imported GT_KEEPALIVE or GT_NOP - see description. // GenTree* Compiler::impKeepAliveIntrinsic(GenTree* objToKeepAlive) { assert(objToKeepAlive->TypeIs(TYP_REF)); if (opts.OptimizationEnabled() && objToKeepAlive->IsBoxedValue()) { CORINFO_CLASS_HANDLE boxedClass = lvaGetDesc(objToKeepAlive->AsBox()->BoxOp()->AsLclVar())->lvClassHnd; ClassLayout* layout = typGetObjLayout(boxedClass); if (!layout->HasGCPtr()) { gtTryRemoveBoxUpstreamEffects(objToKeepAlive, BR_REMOVE_AND_NARROW); JITDUMP("\nBOX class has no GC fields, KEEPALIVE is a NOP"); return gtNewNothingNode(); } GenTree* boxSrc = gtTryRemoveBoxUpstreamEffects(objToKeepAlive, BR_REMOVE_BUT_NOT_NARROW); if (boxSrc != nullptr) { unsigned boxTempNum; if (boxSrc->OperIs(GT_LCL_VAR)) { boxTempNum = boxSrc->AsLclVarCommon()->GetLclNum(); } else { boxTempNum = lvaGrabTemp(true DEBUGARG("Temp for the box source")); GenTree* boxTempAsg = gtNewTempAssign(boxTempNum, boxSrc); Statement* boxAsgStmt = objToKeepAlive->AsBox()->gtCopyStmtWhenInlinedBoxValue; boxAsgStmt->SetRootNode(boxTempAsg); } JITDUMP("\nImporting KEEPALIVE(BOX) as KEEPALIVE(ADDR(LCL_VAR V%02u))", boxTempNum); GenTree* boxTemp = gtNewLclvNode(boxTempNum, boxSrc->TypeGet()); GenTree* boxTempAddr = gtNewOperNode(GT_ADDR, TYP_BYREF, boxTemp); return gtNewKeepAliveNode(boxTempAddr); } } return gtNewKeepAliveNode(objToKeepAlive); } bool Compiler::verMergeEntryStates(BasicBlock* block, bool* changed) { unsigned i; // do some basic checks first if (block->bbStackDepthOnEntry() != verCurrentState.esStackDepth) { return false; } if (verCurrentState.esStackDepth > 0) { // merge stack types StackEntry* parentStack = block->bbStackOnEntry(); StackEntry* childStack = verCurrentState.esStack; for (i = 0; i < verCurrentState.esStackDepth; i++, parentStack++, childStack++) { if (tiMergeToCommonParent(&parentStack->seTypeInfo, &childStack->seTypeInfo, changed) == false) { return false; } } } // merge initialization status of this ptr if (verTrackObjCtorInitState) { // If we're tracking the CtorInitState, then it must not be unknown in the current state. assert(verCurrentState.thisInitialized != TIS_Bottom); // If the successor block's thisInit state is unknown, copy it from the current state. if (block->bbThisOnEntry() == TIS_Bottom) { *changed = true; verSetThisInit(block, verCurrentState.thisInitialized); } else if (verCurrentState.thisInitialized != block->bbThisOnEntry()) { if (block->bbThisOnEntry() != TIS_Top) { *changed = true; verSetThisInit(block, TIS_Top); if (block->bbFlags & BBF_FAILED_VERIFICATION) { // The block is bad. Control can flow through the block to any handler that catches the // verification exception, but the importer ignores bad blocks and therefore won't model // this flow in the normal way. To complete the merge into the bad block, the new state // needs to be manually pushed to the handlers that may be reached after the verification // exception occurs. // // Usually, the new state was already propagated to the relevant handlers while processing // the predecessors of the bad block. The exception is when the bad block is at the start // of a try region, meaning it is protected by additional handlers that do not protect its // predecessors. // if (block->hasTryIndex() && ((block->bbFlags & BBF_TRY_BEG) != 0)) { // Push TIS_Top to the handlers that protect the bad block. Note that this can cause // recursive calls back into this code path (if successors of the current bad block are // also bad blocks). // ThisInitState origTIS = verCurrentState.thisInitialized; verCurrentState.thisInitialized = TIS_Top; impVerifyEHBlock(block, true); verCurrentState.thisInitialized = origTIS; } } } } } else { assert(verCurrentState.thisInitialized == TIS_Bottom && block->bbThisOnEntry() == TIS_Bottom); } return true; } /***************************************************************************** * 'logMsg' is true if a log message needs to be logged. false if the caller has * already logged it (presumably in a more detailed fashion than done here) */ void Compiler::verConvertBBToThrowVerificationException(BasicBlock* block DEBUGARG(bool logMsg)) { block->bbJumpKind = BBJ_THROW; block->bbFlags |= BBF_FAILED_VERIFICATION; block->bbFlags &= ~BBF_IMPORTED; impCurStmtOffsSet(block->bbCodeOffs); // Clear the statement list as it exists so far; we're only going to have a verification exception. impStmtList = impLastStmt = nullptr; #ifdef DEBUG if (logMsg) { JITLOG((LL_ERROR, "Verification failure: while compiling %s near IL offset %x..%xh \n", info.compFullName, block->bbCodeOffs, block->bbCodeOffsEnd)); if (verbose) { printf("\n\nVerification failure: %s near IL %xh \n", info.compFullName, block->bbCodeOffs); } } if (JitConfig.DebugBreakOnVerificationFailure()) { DebugBreak(); } #endif impBeginTreeList(); // if the stack is non-empty evaluate all the side-effects if (verCurrentState.esStackDepth > 0) { impEvalSideEffects(); } assert(verCurrentState.esStackDepth == 0); GenTree* op1 = gtNewHelperCallNode(CORINFO_HELP_VERIFICATION, TYP_VOID, gtNewCallArgs(gtNewIconNode(block->bbCodeOffs))); // verCurrentState.esStackDepth = 0; impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); // The inliner is not able to handle methods that require throw block, so // make sure this methods never gets inlined. info.compCompHnd->setMethodAttribs(info.compMethodHnd, CORINFO_FLG_BAD_INLINEE); } /***************************************************************************** * */ void Compiler::verHandleVerificationFailure(BasicBlock* block DEBUGARG(bool logMsg)) { verResetCurrentState(block, &verCurrentState); verConvertBBToThrowVerificationException(block DEBUGARG(logMsg)); #ifdef DEBUG impNoteLastILoffs(); // Remember at which BC offset the tree was finished #endif // DEBUG } /******************************************************************************/ typeInfo Compiler::verMakeTypeInfo(CorInfoType ciType, CORINFO_CLASS_HANDLE clsHnd) { assert(ciType < CORINFO_TYPE_COUNT); typeInfo tiResult; switch (ciType) { case CORINFO_TYPE_STRING: case CORINFO_TYPE_CLASS: tiResult = verMakeTypeInfo(clsHnd); if (!tiResult.IsType(TI_REF)) { // type must be consistent with element type return typeInfo(); } break; #ifdef TARGET_64BIT case CORINFO_TYPE_NATIVEINT: case CORINFO_TYPE_NATIVEUINT: if (clsHnd) { // If we have more precise information, use it return verMakeTypeInfo(clsHnd); } else { return typeInfo::nativeInt(); } break; #endif // TARGET_64BIT case CORINFO_TYPE_VALUECLASS: case CORINFO_TYPE_REFANY: tiResult = verMakeTypeInfo(clsHnd); // type must be constant with element type; if (!tiResult.IsValueClass()) { return typeInfo(); } break; case CORINFO_TYPE_VAR: return verMakeTypeInfo(clsHnd); case CORINFO_TYPE_PTR: // for now, pointers are treated as an error case CORINFO_TYPE_VOID: return typeInfo(); break; case CORINFO_TYPE_BYREF: { CORINFO_CLASS_HANDLE childClassHandle; CorInfoType childType = info.compCompHnd->getChildType(clsHnd, &childClassHandle); return ByRef(verMakeTypeInfo(childType, childClassHandle)); } break; default: if (clsHnd) { // If we have more precise information, use it return typeInfo(TI_STRUCT, clsHnd); } else { return typeInfo(JITtype2tiType(ciType)); } } return tiResult; } /******************************************************************************/ typeInfo Compiler::verMakeTypeInfo(CORINFO_CLASS_HANDLE clsHnd, bool bashStructToRef /* = false */) { if (clsHnd == nullptr) { return typeInfo(); } // Byrefs should only occur in method and local signatures, which are accessed // using ICorClassInfo and ICorClassInfo.getChildType. // So findClass() and getClassAttribs() should not be called for byrefs if (JITtype2varType(info.compCompHnd->asCorInfoType(clsHnd)) == TYP_BYREF) { assert(!"Did findClass() return a Byref?"); return typeInfo(); } unsigned attribs = info.compCompHnd->getClassAttribs(clsHnd); if (attribs & CORINFO_FLG_VALUECLASS) { CorInfoType t = info.compCompHnd->getTypeForPrimitiveValueClass(clsHnd); // Meta-data validation should ensure that CORINF_TYPE_BYREF should // not occur here, so we may want to change this to an assert instead. if (t == CORINFO_TYPE_VOID || t == CORINFO_TYPE_BYREF || t == CORINFO_TYPE_PTR) { return typeInfo(); } #ifdef TARGET_64BIT if (t == CORINFO_TYPE_NATIVEINT || t == CORINFO_TYPE_NATIVEUINT) { return typeInfo::nativeInt(); } #endif // TARGET_64BIT if (t != CORINFO_TYPE_UNDEF) { return (typeInfo(JITtype2tiType(t))); } else if (bashStructToRef) { return (typeInfo(TI_REF, clsHnd)); } else { return (typeInfo(TI_STRUCT, clsHnd)); } } else if (attribs & CORINFO_FLG_GENERIC_TYPE_VARIABLE) { // See comment in _typeInfo.h for why we do it this way. return (typeInfo(TI_REF, clsHnd, true)); } else { return (typeInfo(TI_REF, clsHnd)); } } /******************************************************************************/ bool Compiler::verIsSDArray(const typeInfo& ti) { if (ti.IsNullObjRef()) { // nulls are SD arrays return true; } if (!ti.IsType(TI_REF)) { return false; } if (!info.compCompHnd->isSDArray(ti.GetClassHandleForObjRef())) { return false; } return true; } /******************************************************************************/ /* Given 'arrayObjectType' which is an array type, fetch the element type. */ /* Returns an error type if anything goes wrong */ typeInfo Compiler::verGetArrayElemType(const typeInfo& arrayObjectType) { assert(!arrayObjectType.IsNullObjRef()); // you need to check for null explicitly since that is a success case if (!verIsSDArray(arrayObjectType)) { return typeInfo(); } CORINFO_CLASS_HANDLE childClassHandle = nullptr; CorInfoType ciType = info.compCompHnd->getChildType(arrayObjectType.GetClassHandleForObjRef(), &childClassHandle); return verMakeTypeInfo(ciType, childClassHandle); } /***************************************************************************** */ typeInfo Compiler::verParseArgSigToTypeInfo(CORINFO_SIG_INFO* sig, CORINFO_ARG_LIST_HANDLE args) { CORINFO_CLASS_HANDLE classHandle; CorInfoType ciType = strip(info.compCompHnd->getArgType(sig, args, &classHandle)); var_types type = JITtype2varType(ciType); if (varTypeIsGC(type)) { // For efficiency, getArgType only returns something in classHandle for // value types. For other types that have addition type info, you // have to call back explicitly classHandle = info.compCompHnd->getArgClass(sig, args); if (!classHandle) { NO_WAY("Could not figure out Class specified in argument or local signature"); } } return verMakeTypeInfo(ciType, classHandle); } bool Compiler::verIsByRefLike(const typeInfo& ti) { if (ti.IsByRef()) { return true; } if (!ti.IsType(TI_STRUCT)) { return false; } return info.compCompHnd->getClassAttribs(ti.GetClassHandleForValueClass()) & CORINFO_FLG_BYREF_LIKE; } bool Compiler::verIsSafeToReturnByRef(const typeInfo& ti) { if (ti.IsPermanentHomeByRef()) { return true; } else { return false; } } bool Compiler::verIsBoxable(const typeInfo& ti) { return (ti.IsPrimitiveType() || ti.IsObjRef() // includes boxed generic type variables || ti.IsUnboxedGenericTypeVar() || (ti.IsType(TI_STRUCT) && // exclude byreflike structs !(info.compCompHnd->getClassAttribs(ti.GetClassHandleForValueClass()) & CORINFO_FLG_BYREF_LIKE))); } // Is it a boxed value type? bool Compiler::verIsBoxedValueType(const typeInfo& ti) { if (ti.GetType() == TI_REF) { CORINFO_CLASS_HANDLE clsHnd = ti.GetClassHandleForObjRef(); return !!eeIsValueClass(clsHnd); } else { return false; } } /***************************************************************************** * * Check if a TailCall is legal. */ bool Compiler::verCheckTailCallConstraint( OPCODE opcode, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, // Is this a "constrained." call on a type parameter? bool speculative // If true, won't throw if verificatoin fails. Instead it will // return false to the caller. // If false, it will throw. ) { DWORD mflags; CORINFO_SIG_INFO sig; unsigned int popCount = 0; // we can't pop the stack since impImportCall needs it, so // this counter is used to keep track of how many items have been // virtually popped CORINFO_METHOD_HANDLE methodHnd = nullptr; CORINFO_CLASS_HANDLE methodClassHnd = nullptr; unsigned methodClassFlgs = 0; assert(impOpcodeIsCallOpcode(opcode)); if (compIsForInlining()) { return false; } // for calli, VerifyOrReturn that this is not a virtual method if (opcode == CEE_CALLI) { /* Get the call sig */ eeGetSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, &sig); // We don't know the target method, so we have to infer the flags, or // assume the worst-case. mflags = (sig.callConv & CORINFO_CALLCONV_HASTHIS) ? 0 : CORINFO_FLG_STATIC; } else { methodHnd = pResolvedToken->hMethod; mflags = info.compCompHnd->getMethodAttribs(methodHnd); // When verifying generic code we pair the method handle with its // owning class to get the exact method signature. methodClassHnd = pResolvedToken->hClass; assert(methodClassHnd); eeGetMethodSig(methodHnd, &sig, methodClassHnd); // opcode specific check methodClassFlgs = info.compCompHnd->getClassAttribs(methodClassHnd); } // We must have got the methodClassHnd if opcode is not CEE_CALLI assert((methodHnd != nullptr && methodClassHnd != nullptr) || opcode == CEE_CALLI); if ((sig.callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG) { eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, &sig); } // check compatibility of the arguments unsigned int argCount; argCount = sig.numArgs; CORINFO_ARG_LIST_HANDLE args; args = sig.args; while (argCount--) { typeInfo tiDeclared = verParseArgSigToTypeInfo(&sig, args).NormaliseForStack(); // check that the argument is not a byref for tailcalls VerifyOrReturnSpeculative(!verIsByRefLike(tiDeclared), "tailcall on byrefs", speculative); // For unsafe code, we might have parameters containing pointer to the stack location. // Disallow the tailcall for this kind. CORINFO_CLASS_HANDLE classHandle; CorInfoType ciType = strip(info.compCompHnd->getArgType(&sig, args, &classHandle)); VerifyOrReturnSpeculative(ciType != CORINFO_TYPE_PTR, "tailcall on CORINFO_TYPE_PTR", speculative); args = info.compCompHnd->getArgNext(args); } // update popCount popCount += sig.numArgs; // check for 'this' which is on non-static methods, not called via NEWOBJ if (!(mflags & CORINFO_FLG_STATIC)) { // Always update the popCount. // This is crucial for the stack calculation to be correct. typeInfo tiThis = impStackTop(popCount).seTypeInfo; popCount++; if (opcode == CEE_CALLI) { // For CALLI, we don't know the methodClassHnd. Therefore, let's check the "this" object // on the stack. if (tiThis.IsValueClass()) { tiThis.MakeByRef(); } VerifyOrReturnSpeculative(!verIsByRefLike(tiThis), "byref in tailcall", speculative); } else { // Check type compatibility of the this argument typeInfo tiDeclaredThis = verMakeTypeInfo(methodClassHnd); if (tiDeclaredThis.IsValueClass()) { tiDeclaredThis.MakeByRef(); } VerifyOrReturnSpeculative(!verIsByRefLike(tiDeclaredThis), "byref in tailcall", speculative); } } // Tail calls on constrained calls should be illegal too: // when instantiated at a value type, a constrained call may pass the address of a stack allocated value VerifyOrReturnSpeculative(!pConstrainedResolvedToken, "byref in constrained tailcall", speculative); // Get the exact view of the signature for an array method if (sig.retType != CORINFO_TYPE_VOID) { if (methodClassFlgs & CORINFO_FLG_ARRAY) { assert(opcode != CEE_CALLI); eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, &sig); } } typeInfo tiCalleeRetType = verMakeTypeInfo(sig.retType, sig.retTypeClass); typeInfo tiCallerRetType = verMakeTypeInfo(info.compMethodInfo->args.retType, info.compMethodInfo->args.retTypeClass); // void return type gets morphed into the error type, so we have to treat them specially here if (sig.retType == CORINFO_TYPE_VOID) { VerifyOrReturnSpeculative(info.compMethodInfo->args.retType == CORINFO_TYPE_VOID, "tailcall return mismatch", speculative); } else { VerifyOrReturnSpeculative(tiCompatibleWith(NormaliseForStack(tiCalleeRetType), NormaliseForStack(tiCallerRetType), true), "tailcall return mismatch", speculative); } // for tailcall, stack must be empty VerifyOrReturnSpeculative(verCurrentState.esStackDepth == popCount, "stack non-empty on tailcall", speculative); return true; // Yes, tailcall is legal } /***************************************************************************** * * Checks the IL verification rules for the call */ void Compiler::verVerifyCall(OPCODE opcode, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, bool tailCall, bool readonlyCall, const BYTE* delegateCreateStart, const BYTE* codeAddr, CORINFO_CALL_INFO* callInfo DEBUGARG(const char* methodName)) { DWORD mflags; CORINFO_SIG_INFO* sig = nullptr; unsigned int popCount = 0; // we can't pop the stack since impImportCall needs it, so // this counter is used to keep track of how many items have been // virtually popped // for calli, VerifyOrReturn that this is not a virtual method if (opcode == CEE_CALLI) { Verify(false, "Calli not verifiable"); return; } //<NICE> It would be nice to cache the rest of it, but eeFindMethod is the big ticket item. mflags = callInfo->verMethodFlags; sig = &callInfo->verSig; if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG) { eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, sig); } // opcode specific check unsigned methodClassFlgs = callInfo->classFlags; switch (opcode) { case CEE_CALLVIRT: // cannot do callvirt on valuetypes VerifyOrReturn(!(methodClassFlgs & CORINFO_FLG_VALUECLASS), "callVirt on value class"); VerifyOrReturn(sig->hasThis(), "CallVirt on static method"); break; case CEE_NEWOBJ: { assert(!tailCall); // Importer should not allow this VerifyOrReturn((mflags & CORINFO_FLG_CONSTRUCTOR) && !(mflags & CORINFO_FLG_STATIC), "newobj must be on instance"); if (methodClassFlgs & CORINFO_FLG_DELEGATE) { VerifyOrReturn(sig->numArgs == 2, "wrong number args to delegate ctor"); typeInfo tiDeclaredObj = verParseArgSigToTypeInfo(sig, sig->args).NormaliseForStack(); typeInfo tiDeclaredFtn = verParseArgSigToTypeInfo(sig, info.compCompHnd->getArgNext(sig->args)).NormaliseForStack(); VerifyOrReturn(tiDeclaredFtn.IsNativeIntType(), "ftn arg needs to be a native int type"); assert(popCount == 0); typeInfo tiActualObj = impStackTop(1).seTypeInfo; typeInfo tiActualFtn = impStackTop(0).seTypeInfo; VerifyOrReturn(tiActualFtn.IsMethod(), "delegate needs method as first arg"); VerifyOrReturn(tiCompatibleWith(tiActualObj, tiDeclaredObj, true), "delegate object type mismatch"); VerifyOrReturn(tiActualObj.IsNullObjRef() || tiActualObj.IsType(TI_REF), "delegate object type mismatch"); CORINFO_CLASS_HANDLE objTypeHandle = tiActualObj.IsNullObjRef() ? nullptr : tiActualObj.GetClassHandleForObjRef(); // the method signature must be compatible with the delegate's invoke method // check that for virtual functions, the type of the object used to get the // ftn ptr is the same as the type of the object passed to the delegate ctor. // since this is a bit of work to determine in general, we pattern match stylized // code sequences // the delegate creation code check, which used to be done later, is now done here // so we can read delegateMethodRef directly from // from the preceding LDFTN or CEE_LDVIRTFN instruction sequence; // we then use it in our call to isCompatibleDelegate(). mdMemberRef delegateMethodRef = mdMemberRefNil; VerifyOrReturn(verCheckDelegateCreation(delegateCreateStart, codeAddr, delegateMethodRef), "must create delegates with certain IL"); CORINFO_RESOLVED_TOKEN delegateResolvedToken; delegateResolvedToken.tokenContext = impTokenLookupContextHandle; delegateResolvedToken.tokenScope = info.compScopeHnd; delegateResolvedToken.token = delegateMethodRef; delegateResolvedToken.tokenType = CORINFO_TOKENKIND_Method; info.compCompHnd->resolveToken(&delegateResolvedToken); CORINFO_CALL_INFO delegateCallInfo; eeGetCallInfo(&delegateResolvedToken, nullptr /* constraint typeRef */, CORINFO_CALLINFO_SECURITYCHECKS, &delegateCallInfo); bool isOpenDelegate = false; VerifyOrReturn(info.compCompHnd->isCompatibleDelegate(objTypeHandle, delegateResolvedToken.hClass, tiActualFtn.GetMethod(), pResolvedToken->hClass, &isOpenDelegate), "function incompatible with delegate"); // check the constraints on the target method VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(delegateResolvedToken.hClass), "delegate target has unsatisfied class constraints"); VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(delegateResolvedToken.hClass, tiActualFtn.GetMethod()), "delegate target has unsatisfied method constraints"); // See ECMA spec section 1.8.1.5.2 (Delegating via instance dispatch) // for additional verification rules for delegates CORINFO_METHOD_HANDLE actualMethodHandle = tiActualFtn.GetMethod(); DWORD actualMethodAttribs = info.compCompHnd->getMethodAttribs(actualMethodHandle); if (impIsLDFTN_TOKEN(delegateCreateStart, codeAddr)) { if ((actualMethodAttribs & CORINFO_FLG_VIRTUAL) && ((actualMethodAttribs & CORINFO_FLG_FINAL) == 0)) { VerifyOrReturn((tiActualObj.IsThisPtr() && lvaIsOriginalThisReadOnly()) || verIsBoxedValueType(tiActualObj), "The 'this' parameter to the call must be either the calling method's " "'this' parameter or " "a boxed value type."); } } if (actualMethodAttribs & CORINFO_FLG_PROTECTED) { bool targetIsStatic = actualMethodAttribs & CORINFO_FLG_STATIC; Verify(targetIsStatic || !isOpenDelegate, "Unverifiable creation of an open instance delegate for a protected member."); CORINFO_CLASS_HANDLE instanceClassHnd = (tiActualObj.IsNullObjRef() || targetIsStatic) ? info.compClassHnd : tiActualObj.GetClassHandleForObjRef(); // In the case of protected methods, it is a requirement that the 'this' // pointer be a subclass of the current context. Perform this check. Verify(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClassHnd), "Accessing protected method through wrong type."); } goto DONE_ARGS; } } // fall thru to default checks FALLTHROUGH; default: VerifyOrReturn(!(mflags & CORINFO_FLG_ABSTRACT), "method abstract"); } VerifyOrReturn(!((mflags & CORINFO_FLG_CONSTRUCTOR) && (methodClassFlgs & CORINFO_FLG_DELEGATE)), "can only newobj a delegate constructor"); // check compatibility of the arguments unsigned int argCount; argCount = sig->numArgs; CORINFO_ARG_LIST_HANDLE args; args = sig->args; while (argCount--) { typeInfo tiActual = impStackTop(popCount + argCount).seTypeInfo; typeInfo tiDeclared = verParseArgSigToTypeInfo(sig, args).NormaliseForStack(); VerifyOrReturn(tiCompatibleWith(tiActual, tiDeclared, true), "type mismatch"); args = info.compCompHnd->getArgNext(args); } DONE_ARGS: // update popCount popCount += sig->numArgs; // check for 'this' which are is non-static methods, not called via NEWOBJ CORINFO_CLASS_HANDLE instanceClassHnd = info.compClassHnd; if (!(mflags & CORINFO_FLG_STATIC) && (opcode != CEE_NEWOBJ)) { typeInfo tiThis = impStackTop(popCount).seTypeInfo; popCount++; // If it is null, we assume we can access it (since it will AV shortly) // If it is anything but a reference class, there is no hierarchy, so // again, we don't need the precise instance class to compute 'protected' access if (tiThis.IsType(TI_REF)) { instanceClassHnd = tiThis.GetClassHandleForObjRef(); } // Check type compatibility of the this argument typeInfo tiDeclaredThis = verMakeTypeInfo(pResolvedToken->hClass); if (tiDeclaredThis.IsValueClass()) { tiDeclaredThis.MakeByRef(); } // If this is a call to the base class .ctor, set thisPtr Init for // this block. if (mflags & CORINFO_FLG_CONSTRUCTOR) { if (verTrackObjCtorInitState && tiThis.IsThisPtr() && verIsCallToInitThisPtr(info.compClassHnd, pResolvedToken->hClass)) { assert(verCurrentState.thisInitialized != TIS_Bottom); // This should never be the case just from the logic of the verifier. VerifyOrReturn(verCurrentState.thisInitialized == TIS_Uninit, "Call to base class constructor when 'this' is possibly initialized"); // Otherwise, 'this' is now initialized. verCurrentState.thisInitialized = TIS_Init; tiThis.SetInitialisedObjRef(); } else { // We allow direct calls to value type constructors // NB: we have to check that the contents of tiThis is a value type, otherwise we could use a // constrained callvirt to illegally re-enter a .ctor on a value of reference type. VerifyOrReturn(tiThis.IsByRef() && DereferenceByRef(tiThis).IsValueClass(), "Bad call to a constructor"); } } if (pConstrainedResolvedToken != nullptr) { VerifyOrReturn(tiThis.IsByRef(), "non-byref this type in constrained call"); typeInfo tiConstraint = verMakeTypeInfo(pConstrainedResolvedToken->hClass); // We just dereference this and test for equality tiThis.DereferenceByRef(); VerifyOrReturn(typeInfo::AreEquivalent(tiThis, tiConstraint), "this type mismatch with constrained type operand"); // Now pretend the this type is the boxed constrained type, for the sake of subsequent checks tiThis = typeInfo(TI_REF, pConstrainedResolvedToken->hClass); } // To support direct calls on readonly byrefs, just pretend tiDeclaredThis is readonly too if (tiDeclaredThis.IsByRef() && tiThis.IsReadonlyByRef()) { tiDeclaredThis.SetIsReadonlyByRef(); } VerifyOrReturn(tiCompatibleWith(tiThis, tiDeclaredThis, true), "this type mismatch"); if (tiThis.IsByRef()) { // Find the actual type where the method exists (as opposed to what is declared // in the metadata). This is to prevent passing a byref as the "this" argument // while calling methods like System.ValueType.GetHashCode() which expect boxed objects. CORINFO_CLASS_HANDLE actualClassHnd = info.compCompHnd->getMethodClass(pResolvedToken->hMethod); VerifyOrReturn(eeIsValueClass(actualClassHnd), "Call to base type of valuetype (which is never a valuetype)"); } // Rules for non-virtual call to a non-final virtual method: // Define: // The "this" pointer is considered to be "possibly written" if // 1. Its address have been taken (LDARGA 0) anywhere in the method. // (or) // 2. It has been stored to (STARG.0) anywhere in the method. // A non-virtual call to a non-final virtual method is only allowed if // 1. The this pointer passed to the callee is an instance of a boxed value type. // (or) // 2. The this pointer passed to the callee is the current method's this pointer. // (and) The current method's this pointer is not "possibly written". // Thus the rule is that if you assign to this ANYWHERE you can't make "base" calls to // virtual methods. (Luckily this does affect .ctors, since they are not virtual). // This is stronger that is strictly needed, but implementing a laxer rule is significantly // hard and more error prone. if (opcode == CEE_CALL && (mflags & CORINFO_FLG_VIRTUAL) && ((mflags & CORINFO_FLG_FINAL) == 0)) { VerifyOrReturn((tiThis.IsThisPtr() && lvaIsOriginalThisReadOnly()) || verIsBoxedValueType(tiThis), "The 'this' parameter to the call must be either the calling method's 'this' parameter or " "a boxed value type."); } } // check any constraints on the callee's class and type parameters VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(pResolvedToken->hClass), "method has unsatisfied class constraints"); VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(pResolvedToken->hClass, pResolvedToken->hMethod), "method has unsatisfied method constraints"); if (mflags & CORINFO_FLG_PROTECTED) { VerifyOrReturn(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClassHnd), "Can't access protected method"); } // Get the exact view of the signature for an array method if (sig->retType != CORINFO_TYPE_VOID) { eeGetMethodSig(pResolvedToken->hMethod, sig, pResolvedToken->hClass); } // "readonly." prefixed calls only allowed for the Address operation on arrays. // The methods supported by array types are under the control of the EE // so we can trust that only the Address operation returns a byref. if (readonlyCall) { typeInfo tiCalleeRetType = verMakeTypeInfo(sig->retType, sig->retTypeClass); VerifyOrReturn((methodClassFlgs & CORINFO_FLG_ARRAY) && tiCalleeRetType.IsByRef(), "unexpected use of readonly prefix"); } // Verify the tailcall if (tailCall) { verCheckTailCallConstraint(opcode, pResolvedToken, pConstrainedResolvedToken, false); } } /***************************************************************************** * Checks that a delegate creation is done using the following pattern: * dup * ldvirtftn targetMemberRef * OR * ldftn targetMemberRef * * 'delegateCreateStart' points at the last dup or ldftn in this basic block (null if * not in this basic block) * * targetMemberRef is read from the code sequence. * targetMemberRef is validated iff verificationNeeded. */ bool Compiler::verCheckDelegateCreation(const BYTE* delegateCreateStart, const BYTE* codeAddr, mdMemberRef& targetMemberRef) { if (impIsLDFTN_TOKEN(delegateCreateStart, codeAddr)) { targetMemberRef = getU4LittleEndian(&delegateCreateStart[2]); return true; } else if (impIsDUP_LDVIRTFTN_TOKEN(delegateCreateStart, codeAddr)) { targetMemberRef = getU4LittleEndian(&delegateCreateStart[3]); return true; } return false; } typeInfo Compiler::verVerifySTIND(const typeInfo& tiTo, const typeInfo& value, const typeInfo& instrType) { Verify(!tiTo.IsReadonlyByRef(), "write to readonly byref"); typeInfo ptrVal = verVerifyLDIND(tiTo, instrType); typeInfo normPtrVal = typeInfo(ptrVal).NormaliseForStack(); if (!tiCompatibleWith(value, normPtrVal, true)) { Verify(tiCompatibleWith(value, normPtrVal, true), "type mismatch"); } return ptrVal; } typeInfo Compiler::verVerifyLDIND(const typeInfo& ptr, const typeInfo& instrType) { assert(!instrType.IsStruct()); typeInfo ptrVal; if (ptr.IsByRef()) { ptrVal = DereferenceByRef(ptr); if (instrType.IsObjRef() && !ptrVal.IsObjRef()) { Verify(false, "bad pointer"); } else if (!instrType.IsObjRef() && !typeInfo::AreEquivalent(instrType, ptrVal)) { Verify(false, "pointer not consistent with instr"); } } else { Verify(false, "pointer not byref"); } return ptrVal; } // Verify that the field is used properly. 'tiThis' is NULL for statics, // 'fieldFlags' is the fields attributes, and mutator is true if it is a // ld*flda or a st*fld. // 'enclosingClass' is given if we are accessing a field in some specific type. void Compiler::verVerifyField(CORINFO_RESOLVED_TOKEN* pResolvedToken, const CORINFO_FIELD_INFO& fieldInfo, const typeInfo* tiThis, bool mutator, bool allowPlainStructAsThis) { CORINFO_CLASS_HANDLE enclosingClass = pResolvedToken->hClass; unsigned fieldFlags = fieldInfo.fieldFlags; CORINFO_CLASS_HANDLE instanceClass = info.compClassHnd; // for statics, we imagine the instance is the current class. bool isStaticField = ((fieldFlags & CORINFO_FLG_FIELD_STATIC) != 0); if (mutator) { Verify(!(fieldFlags & CORINFO_FLG_FIELD_UNMANAGED), "mutating an RVA bases static"); if ((fieldFlags & CORINFO_FLG_FIELD_FINAL)) { Verify((info.compFlags & CORINFO_FLG_CONSTRUCTOR) && enclosingClass == info.compClassHnd && info.compIsStatic == isStaticField, "bad use of initonly field (set or address taken)"); } } if (tiThis == nullptr) { Verify(isStaticField, "used static opcode with non-static field"); } else { typeInfo tThis = *tiThis; if (allowPlainStructAsThis && tThis.IsValueClass()) { tThis.MakeByRef(); } // If it is null, we assume we can access it (since it will AV shortly) // If it is anything but a refernce class, there is no hierarchy, so // again, we don't need the precise instance class to compute 'protected' access if (tiThis->IsType(TI_REF)) { instanceClass = tiThis->GetClassHandleForObjRef(); } // Note that even if the field is static, we require that the this pointer // satisfy the same constraints as a non-static field This happens to // be simpler and seems reasonable typeInfo tiDeclaredThis = verMakeTypeInfo(enclosingClass); if (tiDeclaredThis.IsValueClass()) { tiDeclaredThis.MakeByRef(); // we allow read-only tThis, on any field access (even stores!), because if the // class implementor wants to prohibit stores he should make the field private. // we do this by setting the read-only bit on the type we compare tThis to. tiDeclaredThis.SetIsReadonlyByRef(); } else if (verTrackObjCtorInitState && tThis.IsThisPtr()) { // Any field access is legal on "uninitialized" this pointers. // The easiest way to implement this is to simply set the // initialized bit for the duration of the type check on the // field access only. It does not change the state of the "this" // for the function as a whole. Note that the "tThis" is a copy // of the original "this" type (*tiThis) passed in. tThis.SetInitialisedObjRef(); } Verify(tiCompatibleWith(tThis, tiDeclaredThis, true), "this type mismatch"); } // Presently the JIT does not check that we don't store or take the address of init-only fields // since we cannot guarantee their immutability and it is not a security issue. // check any constraints on the fields's class --- accessing the field might cause a class constructor to run. VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(enclosingClass), "field has unsatisfied class constraints"); if (fieldFlags & CORINFO_FLG_FIELD_PROTECTED) { Verify(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClass), "Accessing protected method through wrong type."); } } void Compiler::verVerifyCond(const typeInfo& tiOp1, const typeInfo& tiOp2, unsigned opcode) { if (tiOp1.IsNumberType()) { #ifdef TARGET_64BIT Verify(tiCompatibleWith(tiOp1, tiOp2, true), "Cond type mismatch"); #else // TARGET_64BIT // [10/17/2013] Consider changing this: to put on my verification lawyer hat, // this is non-conforming to the ECMA Spec: types don't have to be equivalent, // but compatible, since we can coalesce native int with int32 (see section III.1.5). Verify(typeInfo::AreEquivalent(tiOp1, tiOp2), "Cond type mismatch"); #endif // !TARGET_64BIT } else if (tiOp1.IsObjRef()) { switch (opcode) { case CEE_BEQ_S: case CEE_BEQ: case CEE_BNE_UN_S: case CEE_BNE_UN: case CEE_CEQ: case CEE_CGT_UN: break; default: Verify(false, "Cond not allowed on object types"); } Verify(tiOp2.IsObjRef(), "Cond type mismatch"); } else if (tiOp1.IsByRef()) { Verify(tiOp2.IsByRef(), "Cond type mismatch"); } else { Verify(tiOp1.IsMethod() && tiOp2.IsMethod(), "Cond type mismatch"); } } void Compiler::verVerifyThisPtrInitialised() { if (verTrackObjCtorInitState) { Verify(verCurrentState.thisInitialized == TIS_Init, "this ptr is not initialized"); } } bool Compiler::verIsCallToInitThisPtr(CORINFO_CLASS_HANDLE context, CORINFO_CLASS_HANDLE target) { // Either target == context, in this case calling an alternate .ctor // Or target is the immediate parent of context return ((target == context) || (target == info.compCompHnd->getParentType(context))); } GenTree* Compiler::impImportLdvirtftn(GenTree* thisPtr, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo) { if ((pCallInfo->methodFlags & CORINFO_FLG_EnC) && !(pCallInfo->classFlags & CORINFO_FLG_INTERFACE)) { NO_WAY("Virtual call to a function added via EnC is not supported"); } // CoreRT generic virtual method if ((pCallInfo->sig.sigInst.methInstCount != 0) && IsTargetAbi(CORINFO_CORERT_ABI)) { GenTree* runtimeMethodHandle = impLookupToTree(pResolvedToken, &pCallInfo->codePointerLookup, GTF_ICON_METHOD_HDL, pCallInfo->hMethod); return gtNewHelperCallNode(CORINFO_HELP_GVMLOOKUP_FOR_SLOT, TYP_I_IMPL, gtNewCallArgs(thisPtr, runtimeMethodHandle)); } #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { if (!pCallInfo->exactContextNeedsRuntimeLookup) { GenTreeCall* call = gtNewHelperCallNode(CORINFO_HELP_READYTORUN_VIRTUAL_FUNC_PTR, TYP_I_IMPL, gtNewCallArgs(thisPtr)); call->setEntryPoint(pCallInfo->codePointerLookup.constLookup); return call; } // We need a runtime lookup. CoreRT has a ReadyToRun helper for that too. if (IsTargetAbi(CORINFO_CORERT_ABI)) { GenTree* ctxTree = getRuntimeContextTree(pCallInfo->codePointerLookup.lookupKind.runtimeLookupKind); return impReadyToRunHelperToTree(pResolvedToken, CORINFO_HELP_READYTORUN_GENERIC_HANDLE, TYP_I_IMPL, gtNewCallArgs(ctxTree), &pCallInfo->codePointerLookup.lookupKind); } } #endif // Get the exact descriptor for the static callsite GenTree* exactTypeDesc = impParentClassTokenToHandle(pResolvedToken); if (exactTypeDesc == nullptr) { // compDonotInline() return nullptr; } GenTree* exactMethodDesc = impTokenToHandle(pResolvedToken); if (exactMethodDesc == nullptr) { // compDonotInline() return nullptr; } GenTreeCall::Use* helpArgs = gtNewCallArgs(exactMethodDesc); helpArgs = gtPrependNewCallArg(exactTypeDesc, helpArgs); helpArgs = gtPrependNewCallArg(thisPtr, helpArgs); // Call helper function. This gets the target address of the final destination callsite. return gtNewHelperCallNode(CORINFO_HELP_VIRTUAL_FUNC_PTR, TYP_I_IMPL, helpArgs); } //------------------------------------------------------------------------ // impBoxPatternMatch: match and import common box idioms // // Arguments: // pResolvedToken - resolved token from the box operation // codeAddr - position in IL stream after the box instruction // codeEndp - end of IL stream // // Return Value: // Number of IL bytes matched and imported, -1 otherwise // // Notes: // pResolvedToken is known to be a value type; ref type boxing // is handled in the CEE_BOX clause. int Compiler::impBoxPatternMatch(CORINFO_RESOLVED_TOKEN* pResolvedToken, const BYTE* codeAddr, const BYTE* codeEndp, bool makeInlineObservation) { if (codeAddr >= codeEndp) { return -1; } switch (codeAddr[0]) { case CEE_UNBOX_ANY: // box + unbox.any if (codeAddr + 1 + sizeof(mdToken) <= codeEndp) { if (makeInlineObservation) { compInlineResult->Note(InlineObservation::CALLEE_FOLDABLE_BOX); return 1 + sizeof(mdToken); } CORINFO_RESOLVED_TOKEN unboxResolvedToken; impResolveToken(codeAddr + 1, &unboxResolvedToken, CORINFO_TOKENKIND_Class); // See if the resolved tokens describe types that are equal. const TypeCompareState compare = info.compCompHnd->compareTypesForEquality(unboxResolvedToken.hClass, pResolvedToken->hClass); // If so, box/unbox.any is a nop. if (compare == TypeCompareState::Must) { JITDUMP("\n Importing BOX; UNBOX.ANY as NOP\n"); // Skip the next unbox.any instruction return 1 + sizeof(mdToken); } } break; case CEE_BRTRUE: case CEE_BRTRUE_S: case CEE_BRFALSE: case CEE_BRFALSE_S: // box + br_true/false if ((codeAddr + ((codeAddr[0] >= CEE_BRFALSE) ? 5 : 2)) <= codeEndp) { if (makeInlineObservation) { compInlineResult->Note(InlineObservation::CALLEE_FOLDABLE_BOX); return 0; } GenTree* const treeToBox = impStackTop().val; bool canOptimize = true; GenTree* treeToNullcheck = nullptr; // Can the thing being boxed cause a side effect? if ((treeToBox->gtFlags & GTF_SIDE_EFFECT) != 0) { // Is this a side effect we can replicate cheaply? if (((treeToBox->gtFlags & GTF_SIDE_EFFECT) == GTF_EXCEPT) && treeToBox->OperIs(GT_OBJ, GT_BLK, GT_IND)) { // Yes, we just need to perform a null check if needed. GenTree* const addr = treeToBox->AsOp()->gtGetOp1(); if (fgAddrCouldBeNull(addr)) { treeToNullcheck = addr; } } else { canOptimize = false; } } if (canOptimize) { CorInfoHelpFunc boxHelper = info.compCompHnd->getBoxHelper(pResolvedToken->hClass); if (boxHelper == CORINFO_HELP_BOX) { JITDUMP("\n Importing BOX; BR_TRUE/FALSE as %sconstant\n", treeToNullcheck == nullptr ? "" : "nullcheck+"); impPopStack(); GenTree* result = gtNewIconNode(1); if (treeToNullcheck != nullptr) { GenTree* nullcheck = gtNewNullCheck(treeToNullcheck, compCurBB); result = gtNewOperNode(GT_COMMA, TYP_INT, nullcheck, result); } impPushOnStack(result, typeInfo(TI_INT)); return 0; } } } break; case CEE_ISINST: if (codeAddr + 1 + sizeof(mdToken) + 1 <= codeEndp) { const BYTE* nextCodeAddr = codeAddr + 1 + sizeof(mdToken); switch (nextCodeAddr[0]) { // box + isinst + br_true/false case CEE_BRTRUE: case CEE_BRTRUE_S: case CEE_BRFALSE: case CEE_BRFALSE_S: if ((nextCodeAddr + ((nextCodeAddr[0] >= CEE_BRFALSE) ? 5 : 2)) <= codeEndp) { if (makeInlineObservation) { compInlineResult->Note(InlineObservation::CALLEE_FOLDABLE_BOX); return 1 + sizeof(mdToken); } if (!(impStackTop().val->gtFlags & GTF_SIDE_EFFECT)) { CorInfoHelpFunc boxHelper = info.compCompHnd->getBoxHelper(pResolvedToken->hClass); if (boxHelper == CORINFO_HELP_BOX) { CORINFO_RESOLVED_TOKEN isInstResolvedToken; impResolveToken(codeAddr + 1, &isInstResolvedToken, CORINFO_TOKENKIND_Casting); TypeCompareState castResult = info.compCompHnd->compareTypesForCast(pResolvedToken->hClass, isInstResolvedToken.hClass); if (castResult != TypeCompareState::May) { JITDUMP("\n Importing BOX; ISINST; BR_TRUE/FALSE as constant\n"); impPopStack(); impPushOnStack(gtNewIconNode((castResult == TypeCompareState::Must) ? 1 : 0), typeInfo(TI_INT)); // Skip the next isinst instruction return 1 + sizeof(mdToken); } } else if (boxHelper == CORINFO_HELP_BOX_NULLABLE) { // For nullable we're going to fold it to "ldfld hasValue + brtrue/brfalse" or // "ldc.i4.0 + brtrue/brfalse" in case if the underlying type is not castable to // the target type. CORINFO_RESOLVED_TOKEN isInstResolvedToken; impResolveToken(codeAddr + 1, &isInstResolvedToken, CORINFO_TOKENKIND_Casting); CORINFO_CLASS_HANDLE nullableCls = pResolvedToken->hClass; CORINFO_CLASS_HANDLE underlyingCls = info.compCompHnd->getTypeForBox(nullableCls); TypeCompareState castResult = info.compCompHnd->compareTypesForCast(underlyingCls, isInstResolvedToken.hClass); if (castResult == TypeCompareState::Must) { const CORINFO_FIELD_HANDLE hasValueFldHnd = info.compCompHnd->getFieldInClass(nullableCls, 0); assert(info.compCompHnd->getFieldOffset(hasValueFldHnd) == 0); assert(!strcmp(info.compCompHnd->getFieldName(hasValueFldHnd, nullptr), "hasValue")); GenTree* objToBox = impPopStack().val; // Spill struct to get its address (to access hasValue field) objToBox = impGetStructAddr(objToBox, nullableCls, (unsigned)CHECK_SPILL_ALL, true); impPushOnStack(gtNewFieldRef(TYP_BOOL, hasValueFldHnd, objToBox, 0), typeInfo(TI_INT)); JITDUMP("\n Importing BOX; ISINST; BR_TRUE/FALSE as nullableVT.hasValue\n"); return 1 + sizeof(mdToken); } else if (castResult == TypeCompareState::MustNot) { impPopStack(); impPushOnStack(gtNewIconNode(0), typeInfo(TI_INT)); JITDUMP("\n Importing BOX; ISINST; BR_TRUE/FALSE as constant (false)\n"); return 1 + sizeof(mdToken); } } } } break; // box + isinst + unbox.any case CEE_UNBOX_ANY: if ((nextCodeAddr + 1 + sizeof(mdToken)) <= codeEndp) { if (makeInlineObservation) { compInlineResult->Note(InlineObservation::CALLEE_FOLDABLE_BOX); return 2 + sizeof(mdToken) * 2; } // See if the resolved tokens in box, isinst and unbox.any describe types that are equal. CORINFO_RESOLVED_TOKEN isinstResolvedToken = {}; impResolveToken(codeAddr + 1, &isinstResolvedToken, CORINFO_TOKENKIND_Class); if (info.compCompHnd->compareTypesForEquality(isinstResolvedToken.hClass, pResolvedToken->hClass) == TypeCompareState::Must) { CORINFO_RESOLVED_TOKEN unboxResolvedToken = {}; impResolveToken(nextCodeAddr + 1, &unboxResolvedToken, CORINFO_TOKENKIND_Class); // If so, box + isinst + unbox.any is a nop. if (info.compCompHnd->compareTypesForEquality(unboxResolvedToken.hClass, pResolvedToken->hClass) == TypeCompareState::Must) { JITDUMP("\n Importing BOX; ISINST, UNBOX.ANY as NOP\n"); return 2 + sizeof(mdToken) * 2; } } } break; } } break; default: break; } return -1; } //------------------------------------------------------------------------ // impImportAndPushBox: build and import a value-type box // // Arguments: // pResolvedToken - resolved token from the box operation // // Return Value: // None. // // Side Effects: // The value to be boxed is popped from the stack, and a tree for // the boxed value is pushed. This method may create upstream // statements, spill side effecting trees, and create new temps. // // If importing an inlinee, we may also discover the inline must // fail. If so there is no new value pushed on the stack. Callers // should use CompDoNotInline after calling this method to see if // ongoing importation should be aborted. // // Notes: // Boxing of ref classes results in the same value as the value on // the top of the stack, so is handled inline in impImportBlockCode // for the CEE_BOX case. Only value or primitive type boxes make it // here. // // Boxing for nullable types is done via a helper call; boxing // of other value types is expanded inline or handled via helper // call, depending on the jit's codegen mode. // // When the jit is operating in size and time constrained modes, // using a helper call here can save jit time and code size. But it // also may inhibit cleanup optimizations that could have also had a // even greater benefit effect on code size and jit time. An optimal // strategy may need to peek ahead and see if it is easy to tell how // the box is being used. For now, we defer. void Compiler::impImportAndPushBox(CORINFO_RESOLVED_TOKEN* pResolvedToken) { // Spill any special side effects impSpillSpecialSideEff(); // Get get the expression to box from the stack. GenTree* op1 = nullptr; GenTree* op2 = nullptr; StackEntry se = impPopStack(); CORINFO_CLASS_HANDLE operCls = se.seTypeInfo.GetClassHandle(); GenTree* exprToBox = se.val; // Look at what helper we should use. CorInfoHelpFunc boxHelper = info.compCompHnd->getBoxHelper(pResolvedToken->hClass); // Determine what expansion to prefer. // // In size/time/debuggable constrained modes, the helper call // expansion for box is generally smaller and is preferred, unless // the value to box is a struct that comes from a call. In that // case the call can construct its return value directly into the // box payload, saving possibly some up-front zeroing. // // Currently primitive type boxes always get inline expanded. We may // want to do the same for small structs if they don't come from // calls and don't have GC pointers, since explicitly copying such // structs is cheap. JITDUMP("\nCompiler::impImportAndPushBox -- handling BOX(value class) via"); bool canExpandInline = (boxHelper == CORINFO_HELP_BOX); bool optForSize = !exprToBox->IsCall() && (operCls != nullptr) && opts.OptimizationDisabled(); bool expandInline = canExpandInline && !optForSize; if (expandInline) { JITDUMP(" inline allocate/copy sequence\n"); // we are doing 'normal' boxing. This means that we can inline the box operation // Box(expr) gets morphed into // temp = new(clsHnd) // cpobj(temp+4, expr, clsHnd) // push temp // The code paths differ slightly below for structs and primitives because // "cpobj" differs in these cases. In one case you get // impAssignStructPtr(temp+4, expr, clsHnd) // and the other you get // *(temp+4) = expr if (opts.OptimizationDisabled()) { // For minopts/debug code, try and minimize the total number // of box temps by reusing an existing temp when possible. if (impBoxTempInUse || impBoxTemp == BAD_VAR_NUM) { impBoxTemp = lvaGrabTemp(true DEBUGARG("Reusable Box Helper")); } } else { // When optimizing, use a new temp for each box operation // since we then know the exact class of the box temp. impBoxTemp = lvaGrabTemp(true DEBUGARG("Single-def Box Helper")); lvaTable[impBoxTemp].lvType = TYP_REF; lvaTable[impBoxTemp].lvSingleDef = 1; JITDUMP("Marking V%02u as a single def local\n", impBoxTemp); const bool isExact = true; lvaSetClass(impBoxTemp, pResolvedToken->hClass, isExact); } // needs to stay in use until this box expression is appended // some other node. We approximate this by keeping it alive until // the opcode stack becomes empty impBoxTempInUse = true; // Remember the current last statement in case we need to move // a range of statements to ensure the box temp is initialized // before it's used. // Statement* const cursor = impLastStmt; const bool useParent = false; op1 = gtNewAllocObjNode(pResolvedToken, useParent); if (op1 == nullptr) { // If we fail to create the newobj node, we must be inlining // and have run across a type we can't describe. // assert(compDonotInline()); return; } // Remember that this basic block contains 'new' of an object, // and so does this method // compCurBB->bbFlags |= BBF_HAS_NEWOBJ; optMethodFlags |= OMF_HAS_NEWOBJ; // Assign the boxed object to the box temp. // GenTree* asg = gtNewTempAssign(impBoxTemp, op1); Statement* asgStmt = impAppendTree(asg, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); // If the exprToBox is a call that returns its value via a ret buf arg, // move the assignment statement(s) before the call (which must be a top level tree). // // We do this because impAssignStructPtr (invoked below) will // back-substitute into a call when it sees a GT_RET_EXPR and the call // has a hidden buffer pointer, So we need to reorder things to avoid // creating out-of-sequence IR. // if (varTypeIsStruct(exprToBox) && exprToBox->OperIs(GT_RET_EXPR)) { GenTreeCall* const call = exprToBox->AsRetExpr()->gtInlineCandidate->AsCall(); if (call->HasRetBufArg()) { JITDUMP("Must insert newobj stmts for box before call [%06u]\n", dspTreeID(call)); // Walk back through the statements in this block, looking for the one // that has this call as the root node. // // Because gtNewTempAssign (above) may have added statements that // feed into the actual assignment we need to move this set of added // statements as a group. // // Note boxed allocations are side-effect free (no com or finalizer) so // our only worries here are (correctness) not overlapping the box temp // lifetime and (perf) stretching the temp lifetime across the inlinee // body. // // Since this is an inline candidate, we must be optimizing, and so we have // a unique box temp per call. So no worries about overlap. // assert(!opts.OptimizationDisabled()); // Lifetime stretching could addressed with some extra cleverness--sinking // the allocation back down to just before the copy, once we figure out // where the copy is. We defer for now. // Statement* insertBeforeStmt = cursor; noway_assert(insertBeforeStmt != nullptr); while (true) { if (insertBeforeStmt->GetRootNode() == call) { break; } // If we've searched all the statements in the block and failed to // find the call, then something's wrong. // noway_assert(insertBeforeStmt != impStmtList); insertBeforeStmt = insertBeforeStmt->GetPrevStmt(); } // Found the call. Move the statements comprising the assignment. // JITDUMP("Moving " FMT_STMT "..." FMT_STMT " before " FMT_STMT "\n", cursor->GetNextStmt()->GetID(), asgStmt->GetID(), insertBeforeStmt->GetID()); assert(asgStmt == impLastStmt); do { Statement* movingStmt = impExtractLastStmt(); impInsertStmtBefore(movingStmt, insertBeforeStmt); insertBeforeStmt = movingStmt; } while (impLastStmt != cursor); } } // Create a pointer to the box payload in op1. // op1 = gtNewLclvNode(impBoxTemp, TYP_REF); op2 = gtNewIconNode(TARGET_POINTER_SIZE, TYP_I_IMPL); op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1, op2); // Copy from the exprToBox to the box payload. // if (varTypeIsStruct(exprToBox)) { assert(info.compCompHnd->getClassSize(pResolvedToken->hClass) == info.compCompHnd->getClassSize(operCls)); op1 = impAssignStructPtr(op1, exprToBox, operCls, (unsigned)CHECK_SPILL_ALL); } else { var_types lclTyp = exprToBox->TypeGet(); if (lclTyp == TYP_BYREF) { lclTyp = TYP_I_IMPL; } CorInfoType jitType = info.compCompHnd->asCorInfoType(pResolvedToken->hClass); if (impIsPrimitive(jitType)) { lclTyp = JITtype2varType(jitType); } var_types srcTyp = exprToBox->TypeGet(); var_types dstTyp = lclTyp; // We allow float <-> double mismatches and implicit truncation for small types. assert((genActualType(srcTyp) == genActualType(dstTyp)) || (varTypeIsFloating(srcTyp) == varTypeIsFloating(dstTyp))); // Note regarding small types. // We are going to store to the box here via an indirection, so the cast added below is // redundant, since the store has an implicit truncation semantic. The reason we still // add this cast is so that the code which deals with GT_BOX optimizations does not have // to account for this implicit truncation (e. g. understand that BOX<byte>(0xFF + 1) is // actually BOX<byte>(0) or deal with signedness mismatch and other GT_CAST complexities). if (srcTyp != dstTyp) { exprToBox = gtNewCastNode(genActualType(dstTyp), exprToBox, false, dstTyp); } op1 = gtNewAssignNode(gtNewOperNode(GT_IND, dstTyp, op1), exprToBox); } // Spill eval stack to flush out any pending side effects. impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportAndPushBox")); // Set up this copy as a second assignment. Statement* copyStmt = impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); op1 = gtNewLclvNode(impBoxTemp, TYP_REF); // Record that this is a "box" node and keep track of the matching parts. op1 = new (this, GT_BOX) GenTreeBox(TYP_REF, op1, asgStmt, copyStmt); // If it is a value class, mark the "box" node. We can use this information // to optimise several cases: // "box(x) == null" --> false // "(box(x)).CallAnInterfaceMethod(...)" --> "(&x).CallAValueTypeMethod" // "(box(x)).CallAnObjectMethod(...)" --> "(&x).CallAValueTypeMethod" op1->gtFlags |= GTF_BOX_VALUE; assert(op1->IsBoxedValue()); assert(asg->gtOper == GT_ASG); } else { // Don't optimize, just call the helper and be done with it. JITDUMP(" helper call because: %s\n", canExpandInline ? "optimizing for size" : "nullable"); assert(operCls != nullptr); // Ensure that the value class is restored op2 = impTokenToHandle(pResolvedToken, nullptr, true /* mustRestoreHandle */); if (op2 == nullptr) { // We must be backing out of an inline. assert(compDonotInline()); return; } GenTreeCall::Use* args = gtNewCallArgs(op2, impGetStructAddr(exprToBox, operCls, (unsigned)CHECK_SPILL_ALL, true)); op1 = gtNewHelperCallNode(boxHelper, TYP_REF, args); } /* Push the result back on the stack, */ /* even if clsHnd is a value class we want the TI_REF */ typeInfo tiRetVal = typeInfo(TI_REF, info.compCompHnd->getTypeForBox(pResolvedToken->hClass)); impPushOnStack(op1, tiRetVal); } //------------------------------------------------------------------------ // impImportNewObjArray: Build and import `new` of multi-dimmensional array // // Arguments: // pResolvedToken - The CORINFO_RESOLVED_TOKEN that has been initialized // by a call to CEEInfo::resolveToken(). // pCallInfo - The CORINFO_CALL_INFO that has been initialized // by a call to CEEInfo::getCallInfo(). // // Assumptions: // The multi-dimensional array constructor arguments (array dimensions) are // pushed on the IL stack on entry to this method. // // Notes: // Multi-dimensional array constructors are imported as calls to a JIT // helper, not as regular calls. void Compiler::impImportNewObjArray(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo) { GenTree* classHandle = impParentClassTokenToHandle(pResolvedToken); if (classHandle == nullptr) { // compDonotInline() return; } assert(pCallInfo->sig.numArgs); GenTree* node; // Reuse the temp used to pass the array dimensions to avoid bloating // the stack frame in case there are multiple calls to multi-dim array // constructors within a single method. if (lvaNewObjArrayArgs == BAD_VAR_NUM) { lvaNewObjArrayArgs = lvaGrabTemp(false DEBUGARG("NewObjArrayArgs")); lvaTable[lvaNewObjArrayArgs].lvType = TYP_BLK; lvaTable[lvaNewObjArrayArgs].lvExactSize = 0; } // Increase size of lvaNewObjArrayArgs to be the largest size needed to hold 'numArgs' integers // for our call to CORINFO_HELP_NEW_MDARR. lvaTable[lvaNewObjArrayArgs].lvExactSize = max(lvaTable[lvaNewObjArrayArgs].lvExactSize, pCallInfo->sig.numArgs * sizeof(INT32)); // The side-effects may include allocation of more multi-dimensional arrays. Spill all side-effects // to ensure that the shared lvaNewObjArrayArgs local variable is only ever used to pass arguments // to one allocation at a time. impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportNewObjArray")); // // The arguments of the CORINFO_HELP_NEW_MDARR helper are: // - Array class handle // - Number of dimension arguments // - Pointer to block of int32 dimensions - address of lvaNewObjArrayArgs temp. // node = gtNewLclvNode(lvaNewObjArrayArgs, TYP_BLK); node = gtNewOperNode(GT_ADDR, TYP_I_IMPL, node); // Pop dimension arguments from the stack one at a time and store it // into lvaNewObjArrayArgs temp. for (int i = pCallInfo->sig.numArgs - 1; i >= 0; i--) { GenTree* arg = impImplicitIorI4Cast(impPopStack().val, TYP_INT); GenTree* dest = gtNewLclvNode(lvaNewObjArrayArgs, TYP_BLK); dest = gtNewOperNode(GT_ADDR, TYP_I_IMPL, dest); dest = gtNewOperNode(GT_ADD, TYP_I_IMPL, dest, new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, sizeof(INT32) * i)); dest = gtNewOperNode(GT_IND, TYP_INT, dest); node = gtNewOperNode(GT_COMMA, node->TypeGet(), gtNewAssignNode(dest, arg), node); } GenTreeCall::Use* args = gtNewCallArgs(node); // pass number of arguments to the helper args = gtPrependNewCallArg(gtNewIconNode(pCallInfo->sig.numArgs), args); args = gtPrependNewCallArg(classHandle, args); node = gtNewHelperCallNode(CORINFO_HELP_NEW_MDARR, TYP_REF, args); for (GenTreeCall::Use& use : node->AsCall()->Args()) { node->gtFlags |= use.GetNode()->gtFlags & GTF_GLOB_EFFECT; } node->AsCall()->compileTimeHelperArgumentHandle = (CORINFO_GENERIC_HANDLE)pResolvedToken->hClass; // Remember that this basic block contains 'new' of a md array compCurBB->bbFlags |= BBF_HAS_NEWARRAY; impPushOnStack(node, typeInfo(TI_REF, pResolvedToken->hClass)); } GenTree* Compiler::impTransformThis(GenTree* thisPtr, CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, CORINFO_THIS_TRANSFORM transform) { switch (transform) { case CORINFO_DEREF_THIS: { GenTree* obj = thisPtr; // This does a LDIND on the obj, which should be a byref. pointing to a ref impBashVarAddrsToI(obj); assert(genActualType(obj->gtType) == TYP_I_IMPL || obj->gtType == TYP_BYREF); CorInfoType constraintTyp = info.compCompHnd->asCorInfoType(pConstrainedResolvedToken->hClass); obj = gtNewOperNode(GT_IND, JITtype2varType(constraintTyp), obj); // ldind could point anywhere, example a boxed class static int obj->gtFlags |= (GTF_EXCEPT | GTF_GLOB_REF | GTF_IND_TGTANYWHERE); return obj; } case CORINFO_BOX_THIS: { // Constraint calls where there might be no // unboxed entry point require us to implement the call via helper. // These only occur when a possible target of the call // may have inherited an implementation of an interface // method from System.Object or System.ValueType. The EE does not provide us with // "unboxed" versions of these methods. GenTree* obj = thisPtr; assert(obj->TypeGet() == TYP_BYREF || obj->TypeGet() == TYP_I_IMPL); obj = gtNewObjNode(pConstrainedResolvedToken->hClass, obj); obj->gtFlags |= GTF_EXCEPT; CorInfoType jitTyp = info.compCompHnd->asCorInfoType(pConstrainedResolvedToken->hClass); if (impIsPrimitive(jitTyp)) { if (obj->OperIsBlk()) { obj->ChangeOperUnchecked(GT_IND); // Obj could point anywhere, example a boxed class static int obj->gtFlags |= GTF_IND_TGTANYWHERE; obj->AsOp()->gtOp2 = nullptr; // must be zero for tree walkers } obj->gtType = JITtype2varType(jitTyp); assert(varTypeIsArithmetic(obj->gtType)); } // This pushes on the dereferenced byref // This is then used immediately to box. impPushOnStack(obj, verMakeTypeInfo(pConstrainedResolvedToken->hClass).NormaliseForStack()); // This pops off the byref-to-a-value-type remaining on the stack and // replaces it with a boxed object. // This is then used as the object to the virtual call immediately below. impImportAndPushBox(pConstrainedResolvedToken); if (compDonotInline()) { return nullptr; } obj = impPopStack().val; return obj; } case CORINFO_NO_THIS_TRANSFORM: default: return thisPtr; } } //------------------------------------------------------------------------ // impCanPInvokeInline: check whether PInvoke inlining should enabled in current method. // // Return Value: // true if PInvoke inlining should be enabled in current method, false otherwise // // Notes: // Checks a number of ambient conditions where we could pinvoke but choose not to bool Compiler::impCanPInvokeInline() { return getInlinePInvokeEnabled() && (!opts.compDbgCode) && (compCodeOpt() != SMALL_CODE) && (!opts.compNoPInvokeInlineCB) // profiler is preventing inline pinvoke ; } //------------------------------------------------------------------------ // impCanPInvokeInlineCallSite: basic legality checks using information // from a call to see if the call qualifies as an inline pinvoke. // // Arguments: // block - block contaning the call, or for inlinees, block // containing the call being inlined // // Return Value: // true if this call can legally qualify as an inline pinvoke, false otherwise // // Notes: // For runtimes that support exception handling interop there are // restrictions on using inline pinvoke in handler regions. // // * We have to disable pinvoke inlining inside of filters because // in case the main execution (i.e. in the try block) is inside // unmanaged code, we cannot reuse the inlined stub (we still need // the original state until we are in the catch handler) // // * We disable pinvoke inlining inside handlers since the GSCookie // is in the inlined Frame (see // CORINFO_EE_INFO::InlinedCallFrameInfo::offsetOfGSCookie), but // this would not protect framelets/return-address of handlers. // // These restrictions are currently also in place for CoreCLR but // can be relaxed when coreclr/#8459 is addressed. bool Compiler::impCanPInvokeInlineCallSite(BasicBlock* block) { if (block->hasHndIndex()) { return false; } // The remaining limitations do not apply to CoreRT if (IsTargetAbi(CORINFO_CORERT_ABI)) { return true; } #ifdef TARGET_64BIT // On 64-bit platforms, we disable pinvoke inlining inside of try regions. // Note that this could be needed on other architectures too, but we // haven't done enough investigation to know for sure at this point. // // Here is the comment from JIT64 explaining why: // [VSWhidbey: 611015] - because the jitted code links in the // Frame (instead of the stub) we rely on the Frame not being // 'active' until inside the stub. This normally happens by the // stub setting the return address pointer in the Frame object // inside the stub. On a normal return, the return address // pointer is zeroed out so the Frame can be safely re-used, but // if an exception occurs, nobody zeros out the return address // pointer. Thus if we re-used the Frame object, it would go // 'active' as soon as we link it into the Frame chain. // // Technically we only need to disable PInvoke inlining if we're // in a handler or if we're in a try body with a catch or // filter/except where other non-handler code in this method // might run and try to re-use the dirty Frame object. // // A desktop test case where this seems to matter is // jit\jit64\ebvts\mcpp\sources2\ijw\__clrcall\vector_ctor_dtor.02\deldtor_clr.exe if (block->hasTryIndex()) { // This does not apply to the raw pinvoke call that is inside the pinvoke // ILStub. In this case, we have to inline the raw pinvoke call into the stub, // otherwise we would end up with a stub that recursively calls itself, and end // up with a stack overflow. if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB) && opts.ShouldUsePInvokeHelpers()) { return true; } return false; } #endif // TARGET_64BIT return true; } //------------------------------------------------------------------------ // impCheckForPInvokeCall examine call to see if it is a pinvoke and if so // if it can be expressed as an inline pinvoke. // // Arguments: // call - tree for the call // methHnd - handle for the method being called (may be null) // sig - signature of the method being called // mflags - method flags for the method being called // block - block contaning the call, or for inlinees, block // containing the call being inlined // // Notes: // Sets GTF_CALL_M_PINVOKE on the call for pinvokes. // // Also sets GTF_CALL_UNMANAGED on call for inline pinvokes if the // call passes a combination of legality and profitabilty checks. // // If GTF_CALL_UNMANAGED is set, increments info.compUnmanagedCallCountWithGCTransition void Compiler::impCheckForPInvokeCall( GenTreeCall* call, CORINFO_METHOD_HANDLE methHnd, CORINFO_SIG_INFO* sig, unsigned mflags, BasicBlock* block) { CorInfoCallConvExtension unmanagedCallConv; // If VM flagged it as Pinvoke, flag the call node accordingly if ((mflags & CORINFO_FLG_PINVOKE) != 0) { call->gtCallMoreFlags |= GTF_CALL_M_PINVOKE; } bool suppressGCTransition = false; if (methHnd) { if ((mflags & CORINFO_FLG_PINVOKE) == 0) { return; } unmanagedCallConv = info.compCompHnd->getUnmanagedCallConv(methHnd, nullptr, &suppressGCTransition); } else { if (sig->getCallConv() == CORINFO_CALLCONV_DEFAULT || sig->getCallConv() == CORINFO_CALLCONV_VARARG) { return; } unmanagedCallConv = info.compCompHnd->getUnmanagedCallConv(nullptr, sig, &suppressGCTransition); assert(!call->gtCallCookie); } if (suppressGCTransition) { call->gtCallMoreFlags |= GTF_CALL_M_SUPPRESS_GC_TRANSITION; } // If we can't get the unmanaged calling convention or the calling convention is unsupported in the JIT, // return here without inlining the native call. if (unmanagedCallConv == CorInfoCallConvExtension::Managed || unmanagedCallConv == CorInfoCallConvExtension::Fastcall || unmanagedCallConv == CorInfoCallConvExtension::FastcallMemberFunction) { return; } optNativeCallCount++; if (methHnd == nullptr && (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB) || IsTargetAbi(CORINFO_CORERT_ABI))) { // PInvoke in CoreRT ABI must be always inlined. Non-inlineable CALLI cases have been // converted to regular method calls earlier using convertPInvokeCalliToCall. // PInvoke CALLI in IL stubs must be inlined } else { // Check legality if (!impCanPInvokeInlineCallSite(block)) { return; } // Legal PInvoke CALL in PInvoke IL stubs must be inlined to avoid infinite recursive // inlining in CoreRT. Skip the ambient conditions checks and profitability checks. if (!IsTargetAbi(CORINFO_CORERT_ABI) || (info.compFlags & CORINFO_FLG_PINVOKE) == 0) { if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB) && opts.ShouldUsePInvokeHelpers()) { // Raw PInvoke call in PInvoke IL stub generated must be inlined to avoid infinite // recursive calls to the stub. } else { if (!impCanPInvokeInline()) { return; } // Size-speed tradeoff: don't use inline pinvoke at rarely // executed call sites. The non-inline version is more // compact. if (block->isRunRarely()) { return; } } } // The expensive check should be last if (info.compCompHnd->pInvokeMarshalingRequired(methHnd, sig)) { return; } } JITLOG((LL_INFO1000000, "\nInline a CALLI PINVOKE call from method %s\n", info.compFullName)); call->gtFlags |= GTF_CALL_UNMANAGED; call->unmgdCallConv = unmanagedCallConv; if (!call->IsSuppressGCTransition()) { info.compUnmanagedCallCountWithGCTransition++; } // AMD64 convention is same for native and managed if (unmanagedCallConv == CorInfoCallConvExtension::C || unmanagedCallConv == CorInfoCallConvExtension::CMemberFunction) { call->gtFlags |= GTF_CALL_POP_ARGS; } if (unmanagedCallConv == CorInfoCallConvExtension::Thiscall) { call->gtCallMoreFlags |= GTF_CALL_M_UNMGD_THISCALL; } } GenTreeCall* Compiler::impImportIndirectCall(CORINFO_SIG_INFO* sig, const DebugInfo& di) { var_types callRetTyp = JITtype2varType(sig->retType); /* The function pointer is on top of the stack - It may be a * complex expression. As it is evaluated after the args, * it may cause registered args to be spilled. Simply spill it. */ // Ignore this trivial case. if (impStackTop().val->gtOper != GT_LCL_VAR) { impSpillStackEntry(verCurrentState.esStackDepth - 1, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impImportIndirectCall")); } /* Get the function pointer */ GenTree* fptr = impPopStack().val; // The function pointer is typically a sized to match the target pointer size // However, stubgen IL optimization can change LDC.I8 to LDC.I4 // See ILCodeStream::LowerOpcode assert(genActualType(fptr->gtType) == TYP_I_IMPL || genActualType(fptr->gtType) == TYP_INT); #ifdef DEBUG // This temporary must never be converted to a double in stress mode, // because that can introduce a call to the cast helper after the // arguments have already been evaluated. if (fptr->OperGet() == GT_LCL_VAR) { lvaTable[fptr->AsLclVarCommon()->GetLclNum()].lvKeepType = 1; } #endif /* Create the call node */ GenTreeCall* call = gtNewIndCallNode(fptr, callRetTyp, nullptr, di); call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT); #ifdef UNIX_X86_ABI call->gtFlags &= ~GTF_CALL_POP_ARGS; #endif return call; } /*****************************************************************************/ void Compiler::impPopArgsForUnmanagedCall(GenTree* call, CORINFO_SIG_INFO* sig) { assert(call->gtFlags & GTF_CALL_UNMANAGED); /* Since we push the arguments in reverse order (i.e. right -> left) * spill any side effects from the stack * * OBS: If there is only one side effect we do not need to spill it * thus we have to spill all side-effects except last one */ unsigned lastLevelWithSideEffects = UINT_MAX; unsigned argsToReverse = sig->numArgs; // For "thiscall", the first argument goes in a register. Since its // order does not need to be changed, we do not need to spill it if (call->AsCall()->gtCallMoreFlags & GTF_CALL_M_UNMGD_THISCALL) { assert(argsToReverse); argsToReverse--; } #ifndef TARGET_X86 // Don't reverse args on ARM or x64 - first four args always placed in regs in order argsToReverse = 0; #endif for (unsigned level = verCurrentState.esStackDepth - argsToReverse; level < verCurrentState.esStackDepth; level++) { if (verCurrentState.esStack[level].val->gtFlags & GTF_ORDER_SIDEEFF) { assert(lastLevelWithSideEffects == UINT_MAX); impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impPopArgsForUnmanagedCall - other side effect")); } else if (verCurrentState.esStack[level].val->gtFlags & GTF_SIDE_EFFECT) { if (lastLevelWithSideEffects != UINT_MAX) { /* We had a previous side effect - must spill it */ impSpillStackEntry(lastLevelWithSideEffects, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impPopArgsForUnmanagedCall - side effect")); /* Record the level for the current side effect in case we will spill it */ lastLevelWithSideEffects = level; } else { /* This is the first side effect encountered - record its level */ lastLevelWithSideEffects = level; } } } /* The argument list is now "clean" - no out-of-order side effects * Pop the argument list in reverse order */ GenTreeCall::Use* args = impPopReverseCallArgs(sig->numArgs, sig, sig->numArgs - argsToReverse); call->AsCall()->gtCallArgs = args; if (call->AsCall()->gtCallMoreFlags & GTF_CALL_M_UNMGD_THISCALL) { GenTree* thisPtr = args->GetNode(); impBashVarAddrsToI(thisPtr); assert(thisPtr->TypeGet() == TYP_I_IMPL || thisPtr->TypeGet() == TYP_BYREF); } for (GenTreeCall::Use& argUse : GenTreeCall::UseList(args)) { GenTree* arg = argUse.GetNode(); call->gtFlags |= arg->gtFlags & GTF_GLOB_EFFECT; // We should not be passing gc typed args to an unmanaged call. if (varTypeIsGC(arg->TypeGet())) { // Tolerate byrefs by retyping to native int. // // This is needed or we'll generate inconsistent GC info // for this arg at the call site (gc info says byref, // pinvoke sig says native int). // if (arg->TypeGet() == TYP_BYREF) { arg->ChangeType(TYP_I_IMPL); } else { assert(!"*** invalid IL: gc ref passed to unmanaged call"); } } } } //------------------------------------------------------------------------ // impInitClass: Build a node to initialize the class before accessing the // field if necessary // // Arguments: // pResolvedToken - The CORINFO_RESOLVED_TOKEN that has been initialized // by a call to CEEInfo::resolveToken(). // // Return Value: If needed, a pointer to the node that will perform the class // initializtion. Otherwise, nullptr. // GenTree* Compiler::impInitClass(CORINFO_RESOLVED_TOKEN* pResolvedToken) { CorInfoInitClassResult initClassResult = info.compCompHnd->initClass(pResolvedToken->hField, info.compMethodHnd, impTokenLookupContextHandle); if ((initClassResult & CORINFO_INITCLASS_USE_HELPER) == 0) { return nullptr; } bool runtimeLookup; GenTree* node = impParentClassTokenToHandle(pResolvedToken, &runtimeLookup); if (node == nullptr) { assert(compDonotInline()); return nullptr; } if (runtimeLookup) { node = gtNewHelperCallNode(CORINFO_HELP_INITCLASS, TYP_VOID, gtNewCallArgs(node)); } else { // Call the shared non gc static helper, as its the fastest node = fgGetSharedCCtor(pResolvedToken->hClass); } return node; } GenTree* Compiler::impImportStaticReadOnlyField(void* fldAddr, var_types lclTyp) { GenTree* op1 = nullptr; #if defined(DEBUG) // If we're replaying under SuperPMI, we're going to read the data stored by SuperPMI and use it // for optimization. Unfortunately, SuperPMI doesn't implement a guarantee on the alignment of // this data, so for some platforms which don't allow unaligned access (e.g., Linux arm32), // this can fault. We should fix SuperPMI to guarantee alignment, but that is a big change. // Instead, simply fix up the data here for future use. // This variable should be the largest size element, with the largest alignment requirement, // and the native C++ compiler should guarantee sufficient alignment. double aligned_data = 0.0; void* p_aligned_data = &aligned_data; if (info.compMethodSuperPMIIndex != -1) { switch (lclTyp) { case TYP_BOOL: case TYP_BYTE: case TYP_UBYTE: static_assert_no_msg(sizeof(unsigned __int8) == sizeof(bool)); static_assert_no_msg(sizeof(unsigned __int8) == sizeof(signed char)); static_assert_no_msg(sizeof(unsigned __int8) == sizeof(unsigned char)); // No alignment necessary for byte. break; case TYP_SHORT: case TYP_USHORT: static_assert_no_msg(sizeof(unsigned __int16) == sizeof(short)); static_assert_no_msg(sizeof(unsigned __int16) == sizeof(unsigned short)); if ((size_t)fldAddr % sizeof(unsigned __int16) != 0) { *(unsigned __int16*)p_aligned_data = GET_UNALIGNED_16(fldAddr); fldAddr = p_aligned_data; } break; case TYP_INT: case TYP_UINT: case TYP_FLOAT: static_assert_no_msg(sizeof(unsigned __int32) == sizeof(int)); static_assert_no_msg(sizeof(unsigned __int32) == sizeof(unsigned int)); static_assert_no_msg(sizeof(unsigned __int32) == sizeof(float)); if ((size_t)fldAddr % sizeof(unsigned __int32) != 0) { *(unsigned __int32*)p_aligned_data = GET_UNALIGNED_32(fldAddr); fldAddr = p_aligned_data; } break; case TYP_LONG: case TYP_ULONG: case TYP_DOUBLE: static_assert_no_msg(sizeof(unsigned __int64) == sizeof(__int64)); static_assert_no_msg(sizeof(unsigned __int64) == sizeof(double)); if ((size_t)fldAddr % sizeof(unsigned __int64) != 0) { *(unsigned __int64*)p_aligned_data = GET_UNALIGNED_64(fldAddr); fldAddr = p_aligned_data; } break; default: assert(!"Unexpected lclTyp"); break; } } #endif // DEBUG switch (lclTyp) { int ival; __int64 lval; double dval; case TYP_BOOL: ival = *((bool*)fldAddr); goto IVAL_COMMON; case TYP_BYTE: ival = *((signed char*)fldAddr); goto IVAL_COMMON; case TYP_UBYTE: ival = *((unsigned char*)fldAddr); goto IVAL_COMMON; case TYP_SHORT: ival = *((short*)fldAddr); goto IVAL_COMMON; case TYP_USHORT: ival = *((unsigned short*)fldAddr); goto IVAL_COMMON; case TYP_UINT: case TYP_INT: ival = *((int*)fldAddr); IVAL_COMMON: op1 = gtNewIconNode(ival); break; case TYP_LONG: case TYP_ULONG: lval = *((__int64*)fldAddr); op1 = gtNewLconNode(lval); break; case TYP_FLOAT: dval = *((float*)fldAddr); op1 = gtNewDconNode(dval); op1->gtType = TYP_FLOAT; break; case TYP_DOUBLE: dval = *((double*)fldAddr); op1 = gtNewDconNode(dval); break; default: assert(!"Unexpected lclTyp"); break; } return op1; } GenTree* Compiler::impImportStaticFieldAccess(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_ACCESS_FLAGS access, CORINFO_FIELD_INFO* pFieldInfo, var_types lclTyp) { // Ordinary static fields never overlap. RVA statics, however, can overlap (if they're // mapped to the same ".data" declaration). That said, such mappings only appear to be // possible with ILASM, and in ILASM-produced (ILONLY) images, RVA statics are always // read-only (using "stsfld" on them is UB). In mixed-mode assemblies, RVA statics can // be mutable, but the only current producer of such images, the C++/CLI compiler, does // not appear to support mapping different fields to the same address. So we will say // that "mutable overlapping RVA statics" are UB as well. If this ever changes, code in // morph and value numbering will need to be updated to respect "gtFldMayOverlap" and // "NotAField FldSeq". // For statics that are not "boxed", the initial address tree will contain the field sequence. // For those that are, we will attach it later, when adding the indirection for the box, since // that tree will represent the true address. bool isBoxedStatic = (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP) != 0; FieldSeqNode* innerFldSeq = !isBoxedStatic ? GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField) : FieldSeqStore::NotAField(); GenTree* op1; switch (pFieldInfo->fieldAccessor) { case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER: { assert(!compIsForInlining()); // We first call a special helper to get the statics base pointer op1 = impParentClassTokenToHandle(pResolvedToken); // compIsForInlining() is false so we should not get NULL here assert(op1 != nullptr); var_types type = TYP_BYREF; switch (pFieldInfo->helper) { case CORINFO_HELP_GETGENERICS_NONGCTHREADSTATIC_BASE: type = TYP_I_IMPL; break; case CORINFO_HELP_GETGENERICS_GCSTATIC_BASE: case CORINFO_HELP_GETGENERICS_NONGCSTATIC_BASE: case CORINFO_HELP_GETGENERICS_GCTHREADSTATIC_BASE: break; default: assert(!"unknown generic statics helper"); break; } op1 = gtNewHelperCallNode(pFieldInfo->helper, type, gtNewCallArgs(op1)); op1 = gtNewOperNode(GT_ADD, type, op1, gtNewIconNode(pFieldInfo->offset, innerFldSeq)); } break; case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER: { #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { GenTreeFlags callFlags = GTF_EMPTY; if (info.compCompHnd->getClassAttribs(pResolvedToken->hClass) & CORINFO_FLG_BEFOREFIELDINIT) { callFlags |= GTF_CALL_HOISTABLE; } op1 = gtNewHelperCallNode(CORINFO_HELP_READYTORUN_STATIC_BASE, TYP_BYREF); op1->gtFlags |= callFlags; op1->AsCall()->setEntryPoint(pFieldInfo->fieldLookup); } else #endif { op1 = fgGetStaticsCCtorHelper(pResolvedToken->hClass, pFieldInfo->helper); } op1 = gtNewOperNode(GT_ADD, op1->TypeGet(), op1, gtNewIconNode(pFieldInfo->offset, innerFldSeq)); break; } case CORINFO_FIELD_STATIC_READYTORUN_HELPER: { #ifdef FEATURE_READYTORUN assert(opts.IsReadyToRun()); assert(!compIsForInlining()); CORINFO_LOOKUP_KIND kind; info.compCompHnd->getLocationOfThisType(info.compMethodHnd, &kind); assert(kind.needsRuntimeLookup); GenTree* ctxTree = getRuntimeContextTree(kind.runtimeLookupKind); GenTreeCall::Use* args = gtNewCallArgs(ctxTree); GenTreeFlags callFlags = GTF_EMPTY; if (info.compCompHnd->getClassAttribs(pResolvedToken->hClass) & CORINFO_FLG_BEFOREFIELDINIT) { callFlags |= GTF_CALL_HOISTABLE; } var_types type = TYP_BYREF; op1 = gtNewHelperCallNode(CORINFO_HELP_READYTORUN_GENERIC_STATIC_BASE, type, args); op1->gtFlags |= callFlags; op1->AsCall()->setEntryPoint(pFieldInfo->fieldLookup); op1 = gtNewOperNode(GT_ADD, type, op1, gtNewIconNode(pFieldInfo->offset, innerFldSeq)); #else unreached(); #endif // FEATURE_READYTORUN } break; default: { // Do we need the address of a static field? // if (access & CORINFO_ACCESS_ADDRESS) { void** pFldAddr = nullptr; void* fldAddr = info.compCompHnd->getFieldAddress(pResolvedToken->hField, (void**)&pFldAddr); // We should always be able to access this static's address directly. assert(pFldAddr == nullptr); // Create the address node. GenTreeFlags handleKind = isBoxedStatic ? GTF_ICON_STATIC_BOX_PTR : GTF_ICON_STATIC_HDL; op1 = gtNewIconHandleNode((size_t)fldAddr, handleKind, innerFldSeq); #ifdef DEBUG op1->AsIntCon()->gtTargetHandle = op1->AsIntCon()->gtIconVal; #endif if (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_INITCLASS) { op1->gtFlags |= GTF_ICON_INITCLASS; } } else // We need the value of a static field { // In future, it may be better to just create the right tree here instead of folding it later. op1 = gtNewFieldRef(lclTyp, pResolvedToken->hField); if (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_INITCLASS) { op1->gtFlags |= GTF_FLD_INITCLASS; } if (isBoxedStatic) { FieldSeqNode* outerFldSeq = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField); op1->ChangeType(TYP_REF); // points at boxed object op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1, gtNewIconNode(TARGET_POINTER_SIZE, outerFldSeq)); if (varTypeIsStruct(lclTyp)) { // Constructor adds GTF_GLOB_REF. Note that this is *not* GTF_EXCEPT. op1 = gtNewObjNode(pFieldInfo->structType, op1); } else { op1 = gtNewOperNode(GT_IND, lclTyp, op1); op1->gtFlags |= GTF_GLOB_REF | GTF_IND_NONFAULTING; } } return op1; } break; } } if (isBoxedStatic) { FieldSeqNode* outerFldSeq = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField); op1 = gtNewOperNode(GT_IND, TYP_REF, op1); op1->gtFlags |= (GTF_IND_INVARIANT | GTF_IND_NONFAULTING | GTF_IND_NONNULL); op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1, gtNewIconNode(TARGET_POINTER_SIZE, outerFldSeq)); } if (!(access & CORINFO_ACCESS_ADDRESS)) { if (varTypeIsStruct(lclTyp)) { // Constructor adds GTF_GLOB_REF. Note that this is *not* GTF_EXCEPT. op1 = gtNewObjNode(pFieldInfo->structType, op1); } else { op1 = gtNewOperNode(GT_IND, lclTyp, op1); op1->gtFlags |= GTF_GLOB_REF; } } return op1; } // In general try to call this before most of the verification work. Most people expect the access // exceptions before the verification exceptions. If you do this after, that usually doesn't happen. Turns // out if you can't access something we also think that you're unverifiable for other reasons. void Compiler::impHandleAccessAllowed(CorInfoIsAccessAllowedResult result, CORINFO_HELPER_DESC* helperCall) { if (result != CORINFO_ACCESS_ALLOWED) { impHandleAccessAllowedInternal(result, helperCall); } } void Compiler::impHandleAccessAllowedInternal(CorInfoIsAccessAllowedResult result, CORINFO_HELPER_DESC* helperCall) { switch (result) { case CORINFO_ACCESS_ALLOWED: break; case CORINFO_ACCESS_ILLEGAL: // if we're verifying, then we need to reject the illegal access to ensure that we don't think the // method is verifiable. Otherwise, delay the exception to runtime. if (compIsForImportOnly()) { info.compCompHnd->ThrowExceptionForHelper(helperCall); } else { impInsertHelperCall(helperCall); } break; } } void Compiler::impInsertHelperCall(CORINFO_HELPER_DESC* helperInfo) { // Construct the argument list GenTreeCall::Use* args = nullptr; assert(helperInfo->helperNum != CORINFO_HELP_UNDEF); for (unsigned i = helperInfo->numArgs; i > 0; --i) { const CORINFO_HELPER_ARG& helperArg = helperInfo->args[i - 1]; GenTree* currentArg = nullptr; switch (helperArg.argType) { case CORINFO_HELPER_ARG_TYPE_Field: info.compCompHnd->classMustBeLoadedBeforeCodeIsRun( info.compCompHnd->getFieldClass(helperArg.fieldHandle)); currentArg = gtNewIconEmbFldHndNode(helperArg.fieldHandle); break; case CORINFO_HELPER_ARG_TYPE_Method: info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun(helperArg.methodHandle); currentArg = gtNewIconEmbMethHndNode(helperArg.methodHandle); break; case CORINFO_HELPER_ARG_TYPE_Class: info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(helperArg.classHandle); currentArg = gtNewIconEmbClsHndNode(helperArg.classHandle); break; case CORINFO_HELPER_ARG_TYPE_Module: currentArg = gtNewIconEmbScpHndNode(helperArg.moduleHandle); break; case CORINFO_HELPER_ARG_TYPE_Const: currentArg = gtNewIconNode(helperArg.constant); break; default: NO_WAY("Illegal helper arg type"); } args = gtPrependNewCallArg(currentArg, args); } /* TODO-Review: * Mark as CSE'able, and hoistable. Consider marking hoistable unless you're in the inlinee. * Also, consider sticking this in the first basic block. */ GenTree* callout = gtNewHelperCallNode(helperInfo->helperNum, TYP_VOID, args); impAppendTree(callout, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); } //------------------------------------------------------------------------ // impTailCallRetTypeCompatible: Checks whether the return types of caller // and callee are compatible so that calle can be tail called. // sizes are not supported integral type sizes return values to temps. // // Arguments: // allowWidening -- whether to allow implicit widening by the callee. // For instance, allowing int32 -> int16 tailcalls. // The managed calling convention allows this, but // we don't want explicit tailcalls to depend on this // detail of the managed calling convention. // callerRetType -- the caller's return type // callerRetTypeClass - the caller's return struct type // callerCallConv -- calling convention of the caller // calleeRetType -- the callee's return type // calleeRetTypeClass - the callee return struct type // calleeCallConv -- calling convention of the callee // // Returns: // True if the tailcall types are compatible. // // Remarks: // Note that here we don't check compatibility in IL Verifier sense, but on the // lines of return types getting returned in the same return register. bool Compiler::impTailCallRetTypeCompatible(bool allowWidening, var_types callerRetType, CORINFO_CLASS_HANDLE callerRetTypeClass, CorInfoCallConvExtension callerCallConv, var_types calleeRetType, CORINFO_CLASS_HANDLE calleeRetTypeClass, CorInfoCallConvExtension calleeCallConv) { // Early out if the types are the same. if (callerRetType == calleeRetType) { return true; } // For integral types the managed calling convention dictates that callee // will widen the return value to 4 bytes, so we can allow implicit widening // in managed to managed tailcalls when dealing with <= 4 bytes. bool isManaged = (callerCallConv == CorInfoCallConvExtension::Managed) && (calleeCallConv == CorInfoCallConvExtension::Managed); if (allowWidening && isManaged && varTypeIsIntegral(callerRetType) && varTypeIsIntegral(calleeRetType) && (genTypeSize(callerRetType) <= 4) && (genTypeSize(calleeRetType) <= genTypeSize(callerRetType))) { return true; } // If the class handles are the same and not null, the return types are compatible. if ((callerRetTypeClass != nullptr) && (callerRetTypeClass == calleeRetTypeClass)) { return true; } #if defined(TARGET_AMD64) || defined(TARGET_ARM64) // Jit64 compat: if (callerRetType == TYP_VOID) { // This needs to be allowed to support the following IL pattern that Jit64 allows: // tail.call // pop // ret // // Note that the above IL pattern is not valid as per IL verification rules. // Therefore, only full trust code can take advantage of this pattern. return true; } // These checks return true if the return value type sizes are the same and // get returned in the same return register i.e. caller doesn't need to normalize // return value. Some of the tail calls permitted by below checks would have // been rejected by IL Verifier before we reached here. Therefore, only full // trust code can make those tail calls. unsigned callerRetTypeSize = 0; unsigned calleeRetTypeSize = 0; bool isCallerRetTypMBEnreg = VarTypeIsMultiByteAndCanEnreg(callerRetType, callerRetTypeClass, &callerRetTypeSize, true, info.compIsVarArgs, callerCallConv); bool isCalleeRetTypMBEnreg = VarTypeIsMultiByteAndCanEnreg(calleeRetType, calleeRetTypeClass, &calleeRetTypeSize, true, info.compIsVarArgs, calleeCallConv); if (varTypeIsIntegral(callerRetType) || isCallerRetTypMBEnreg) { return (varTypeIsIntegral(calleeRetType) || isCalleeRetTypMBEnreg) && (callerRetTypeSize == calleeRetTypeSize); } #endif // TARGET_AMD64 || TARGET_ARM64 return false; } /******************************************************************************** * * Returns true if the current opcode and and the opcodes following it correspond * to a supported tail call IL pattern. * */ bool Compiler::impIsTailCallILPattern( bool tailPrefixed, OPCODE curOpcode, const BYTE* codeAddrOfNextOpcode, const BYTE* codeEnd, bool isRecursive) { // Bail out if the current opcode is not a call. if (!impOpcodeIsCallOpcode(curOpcode)) { return false; } #if !FEATURE_TAILCALL_OPT_SHARED_RETURN // If shared ret tail opt is not enabled, we will enable // it for recursive methods. if (isRecursive) #endif { // we can actually handle if the ret is in a fallthrough block, as long as that is the only part of the // sequence. Make sure we don't go past the end of the IL however. codeEnd = min(codeEnd + 1, info.compCode + info.compILCodeSize); } // Bail out if there is no next opcode after call if (codeAddrOfNextOpcode >= codeEnd) { return false; } OPCODE nextOpcode = (OPCODE)getU1LittleEndian(codeAddrOfNextOpcode); return (nextOpcode == CEE_RET); } /***************************************************************************** * * Determine whether the call could be converted to an implicit tail call * */ bool Compiler::impIsImplicitTailCallCandidate( OPCODE opcode, const BYTE* codeAddrOfNextOpcode, const BYTE* codeEnd, int prefixFlags, bool isRecursive) { #if FEATURE_TAILCALL_OPT if (!opts.compTailCallOpt) { return false; } if (opts.OptimizationDisabled()) { return false; } // must not be tail prefixed if (prefixFlags & PREFIX_TAILCALL_EXPLICIT) { return false; } #if !FEATURE_TAILCALL_OPT_SHARED_RETURN // the block containing call is marked as BBJ_RETURN // We allow shared ret tail call optimization on recursive calls even under // !FEATURE_TAILCALL_OPT_SHARED_RETURN. if (!isRecursive && (compCurBB->bbJumpKind != BBJ_RETURN)) return false; #endif // !FEATURE_TAILCALL_OPT_SHARED_RETURN // must be call+ret or call+pop+ret if (!impIsTailCallILPattern(false, opcode, codeAddrOfNextOpcode, codeEnd, isRecursive)) { return false; } return true; #else return false; #endif // FEATURE_TAILCALL_OPT } //------------------------------------------------------------------------ // impImportCall: import a call-inspiring opcode // // Arguments: // opcode - opcode that inspires the call // pResolvedToken - resolved token for the call target // pConstrainedResolvedToken - resolved constraint token (or nullptr) // newObjThis - tree for this pointer or uninitalized newobj temp (or nullptr) // prefixFlags - IL prefix flags for the call // callInfo - EE supplied info for the call // rawILOffset - IL offset of the opcode, used for guarded devirtualization. // // Returns: // Type of the call's return value. // If we're importing an inlinee and have realized the inline must fail, the call return type should be TYP_UNDEF. // However we can't assert for this here yet because there are cases we miss. See issue #13272. // // // Notes: // opcode can be CEE_CALL, CEE_CALLI, CEE_CALLVIRT, or CEE_NEWOBJ. // // For CEE_NEWOBJ, newobjThis should be the temp grabbed for the allocated // uninitalized object. #ifdef _PREFAST_ #pragma warning(push) #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function #endif var_types Compiler::impImportCall(OPCODE opcode, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, GenTree* newobjThis, int prefixFlags, CORINFO_CALL_INFO* callInfo, IL_OFFSET rawILOffset) { assert(opcode == CEE_CALL || opcode == CEE_CALLVIRT || opcode == CEE_NEWOBJ || opcode == CEE_CALLI); // The current statement DI may not refer to the exact call, but for calls // we wish to be able to attach the exact IL instruction to get "return // value" support in the debugger, so create one with the exact IL offset. DebugInfo di = impCreateDIWithCurrentStackInfo(rawILOffset, true); var_types callRetTyp = TYP_COUNT; CORINFO_SIG_INFO* sig = nullptr; CORINFO_METHOD_HANDLE methHnd = nullptr; CORINFO_CLASS_HANDLE clsHnd = nullptr; unsigned clsFlags = 0; unsigned mflags = 0; GenTree* call = nullptr; GenTreeCall::Use* args = nullptr; CORINFO_THIS_TRANSFORM constraintCallThisTransform = CORINFO_NO_THIS_TRANSFORM; CORINFO_CONTEXT_HANDLE exactContextHnd = nullptr; bool exactContextNeedsRuntimeLookup = false; bool canTailCall = true; const char* szCanTailCallFailReason = nullptr; const int tailCallFlags = (prefixFlags & PREFIX_TAILCALL); const bool isReadonlyCall = (prefixFlags & PREFIX_READONLY) != 0; CORINFO_RESOLVED_TOKEN* ldftnToken = nullptr; // Synchronized methods need to call CORINFO_HELP_MON_EXIT at the end. We could // do that before tailcalls, but that is probably not the intended // semantic. So just disallow tailcalls from synchronized methods. // Also, popping arguments in a varargs function is more work and NYI // If we have a security object, we have to keep our frame around for callers // to see any imperative security. // Reverse P/Invokes need a call to CORINFO_HELP_JIT_REVERSE_PINVOKE_EXIT // at the end, so tailcalls should be disabled. if (info.compFlags & CORINFO_FLG_SYNCH) { canTailCall = false; szCanTailCallFailReason = "Caller is synchronized"; } else if (opts.IsReversePInvoke()) { canTailCall = false; szCanTailCallFailReason = "Caller is Reverse P/Invoke"; } #if !FEATURE_FIXED_OUT_ARGS else if (info.compIsVarArgs) { canTailCall = false; szCanTailCallFailReason = "Caller is varargs"; } #endif // FEATURE_FIXED_OUT_ARGS // We only need to cast the return value of pinvoke inlined calls that return small types // TODO-AMD64-Cleanup: Remove this when we stop interoperating with JIT64, or if we decide to stop // widening everything! CoreCLR does not support JIT64 interoperation so no need to widen there. // The existing x64 JIT doesn't bother widening all types to int, so we have to assume for // the time being that the callee might be compiled by the other JIT and thus the return // value will need to be widened by us (or not widened at all...) // ReadyToRun code sticks with default calling convention that does not widen small return types. bool checkForSmallType = opts.IsReadyToRun(); bool bIntrinsicImported = false; CORINFO_SIG_INFO calliSig; GenTreeCall::Use* extraArg = nullptr; /*------------------------------------------------------------------------- * First create the call node */ if (opcode == CEE_CALLI) { if (IsTargetAbi(CORINFO_CORERT_ABI)) { // See comment in impCheckForPInvokeCall BasicBlock* block = compIsForInlining() ? impInlineInfo->iciBlock : compCurBB; if (info.compCompHnd->convertPInvokeCalliToCall(pResolvedToken, !impCanPInvokeInlineCallSite(block))) { eeGetCallInfo(pResolvedToken, nullptr, CORINFO_CALLINFO_ALLOWINSTPARAM, callInfo); return impImportCall(CEE_CALL, pResolvedToken, nullptr, nullptr, prefixFlags, callInfo, rawILOffset); } } /* Get the call site sig */ eeGetSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, &calliSig); callRetTyp = JITtype2varType(calliSig.retType); call = impImportIndirectCall(&calliSig, di); // We don't know the target method, so we have to infer the flags, or // assume the worst-case. mflags = (calliSig.callConv & CORINFO_CALLCONV_HASTHIS) ? 0 : CORINFO_FLG_STATIC; #ifdef DEBUG if (verbose) { unsigned structSize = (callRetTyp == TYP_STRUCT) ? info.compCompHnd->getClassSize(calliSig.retTypeSigClass) : 0; printf("\nIn Compiler::impImportCall: opcode is %s, kind=%d, callRetType is %s, structSize is %d\n", opcodeNames[opcode], callInfo->kind, varTypeName(callRetTyp), structSize); } #endif sig = &calliSig; } else // (opcode != CEE_CALLI) { NamedIntrinsic ni = NI_Illegal; // Passing CORINFO_CALLINFO_ALLOWINSTPARAM indicates that this JIT is prepared to // supply the instantiation parameters necessary to make direct calls to underlying // shared generic code, rather than calling through instantiating stubs. If the // returned signature has CORINFO_CALLCONV_PARAMTYPE then this indicates that the JIT // must indeed pass an instantiation parameter. methHnd = callInfo->hMethod; sig = &(callInfo->sig); callRetTyp = JITtype2varType(sig->retType); mflags = callInfo->methodFlags; #ifdef DEBUG if (verbose) { unsigned structSize = (callRetTyp == TYP_STRUCT) ? info.compCompHnd->getClassSize(sig->retTypeSigClass) : 0; printf("\nIn Compiler::impImportCall: opcode is %s, kind=%d, callRetType is %s, structSize is %d\n", opcodeNames[opcode], callInfo->kind, varTypeName(callRetTyp), structSize); } #endif if (compIsForInlining()) { /* Does the inlinee use StackCrawlMark */ if (mflags & CORINFO_FLG_DONT_INLINE_CALLER) { compInlineResult->NoteFatal(InlineObservation::CALLEE_STACK_CRAWL_MARK); return TYP_UNDEF; } /* For now ignore varargs */ if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG) { compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NATIVE_VARARGS); return TYP_UNDEF; } if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG) { compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_MANAGED_VARARGS); return TYP_UNDEF; } if ((mflags & CORINFO_FLG_VIRTUAL) && (sig->sigInst.methInstCount != 0) && (opcode == CEE_CALLVIRT)) { compInlineResult->NoteFatal(InlineObservation::CALLEE_IS_GENERIC_VIRTUAL); return TYP_UNDEF; } } clsHnd = pResolvedToken->hClass; clsFlags = callInfo->classFlags; #ifdef DEBUG // If this is a call to JitTestLabel.Mark, do "early inlining", and record the test attribute. // This recognition should really be done by knowing the methHnd of the relevant Mark method(s). // These should be in corelib.h, and available through a JIT/EE interface call. const char* modName; const char* className; const char* methodName; if ((className = eeGetClassName(clsHnd)) != nullptr && strcmp(className, "System.Runtime.CompilerServices.JitTestLabel") == 0 && (methodName = eeGetMethodName(methHnd, &modName)) != nullptr && strcmp(methodName, "Mark") == 0) { return impImportJitTestLabelMark(sig->numArgs); } #endif // DEBUG // <NICE> Factor this into getCallInfo </NICE> bool isSpecialIntrinsic = false; if ((mflags & (CORINFO_FLG_INTRINSIC | CORINFO_FLG_INTRINSIC)) != 0) { const bool isTailCall = canTailCall && (tailCallFlags != 0); call = impIntrinsic(newobjThis, clsHnd, methHnd, sig, mflags, pResolvedToken->token, isReadonlyCall, isTailCall, pConstrainedResolvedToken, callInfo->thisTransform, &ni, &isSpecialIntrinsic); if (compDonotInline()) { return TYP_UNDEF; } if (call != nullptr) { #ifdef FEATURE_READYTORUN if (call->OperGet() == GT_INTRINSIC) { if (opts.IsReadyToRun()) { noway_assert(callInfo->kind == CORINFO_CALL); call->AsIntrinsic()->gtEntryPoint = callInfo->codePointerLookup.constLookup; } else { call->AsIntrinsic()->gtEntryPoint.addr = nullptr; call->AsIntrinsic()->gtEntryPoint.accessType = IAT_VALUE; } } #endif bIntrinsicImported = true; goto DONE_CALL; } } #ifdef FEATURE_SIMD if (supportSIMDTypes()) { call = impSIMDIntrinsic(opcode, newobjThis, clsHnd, methHnd, sig, mflags, pResolvedToken->token); if (call != nullptr) { bIntrinsicImported = true; goto DONE_CALL; } } #endif // FEATURE_SIMD if ((mflags & CORINFO_FLG_VIRTUAL) && (mflags & CORINFO_FLG_EnC) && (opcode == CEE_CALLVIRT)) { NO_WAY("Virtual call to a function added via EnC is not supported"); } if ((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_DEFAULT && (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG && (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG) { BADCODE("Bad calling convention"); } //------------------------------------------------------------------------- // Construct the call node // // Work out what sort of call we're making. // Dispense with virtual calls implemented via LDVIRTFTN immediately. constraintCallThisTransform = callInfo->thisTransform; exactContextHnd = callInfo->contextHandle; exactContextNeedsRuntimeLookup = callInfo->exactContextNeedsRuntimeLookup; switch (callInfo->kind) { case CORINFO_VIRTUALCALL_STUB: { assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method assert(!(clsFlags & CORINFO_FLG_VALUECLASS)); if (callInfo->stubLookup.lookupKind.needsRuntimeLookup) { if (callInfo->stubLookup.lookupKind.runtimeLookupKind == CORINFO_LOOKUP_NOT_SUPPORTED) { // Runtime does not support inlining of all shapes of runtime lookups // Inlining has to be aborted in such a case compInlineResult->NoteFatal(InlineObservation::CALLSITE_HAS_COMPLEX_HANDLE); return TYP_UNDEF; } GenTree* stubAddr = impRuntimeLookupToTree(pResolvedToken, &callInfo->stubLookup, methHnd); assert(!compDonotInline()); // This is the rough code to set up an indirect stub call assert(stubAddr != nullptr); // The stubAddr may be a // complex expression. As it is evaluated after the args, // it may cause registered args to be spilled. Simply spill it. unsigned lclNum = lvaGrabTemp(true DEBUGARG("VirtualCall with runtime lookup")); impAssignTempGen(lclNum, stubAddr, (unsigned)CHECK_SPILL_NONE); stubAddr = gtNewLclvNode(lclNum, TYP_I_IMPL); // Create the actual call node assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG && (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG); call = gtNewIndCallNode(stubAddr, callRetTyp, nullptr); call->gtFlags |= GTF_EXCEPT | (stubAddr->gtFlags & GTF_GLOB_EFFECT); call->gtFlags |= GTF_CALL_VIRT_STUB; #ifdef TARGET_X86 // No tailcalls allowed for these yet... canTailCall = false; szCanTailCallFailReason = "VirtualCall with runtime lookup"; #endif } else { // The stub address is known at compile time call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, di); call->AsCall()->gtStubCallStubAddr = callInfo->stubLookup.constLookup.addr; call->gtFlags |= GTF_CALL_VIRT_STUB; assert(callInfo->stubLookup.constLookup.accessType != IAT_PPVALUE && callInfo->stubLookup.constLookup.accessType != IAT_RELPVALUE); if (callInfo->stubLookup.constLookup.accessType == IAT_PVALUE) { call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_VIRTSTUB_REL_INDIRECT; } } #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { // Null check is sometimes needed for ready to run to handle // non-virtual <-> virtual changes between versions if (callInfo->nullInstanceCheck) { call->gtFlags |= GTF_CALL_NULLCHECK; } } #endif break; } case CORINFO_VIRTUALCALL_VTABLE: { assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method assert(!(clsFlags & CORINFO_FLG_VALUECLASS)); call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, di); call->gtFlags |= GTF_CALL_VIRT_VTABLE; // Should we expand virtual call targets early for this method? // if (opts.compExpandCallsEarly) { // Mark this method to expand the virtual call target early in fgMorpgCall call->AsCall()->SetExpandedEarly(); } break; } case CORINFO_VIRTUALCALL_LDVIRTFTN: { if (compIsForInlining()) { compInlineResult->NoteFatal(InlineObservation::CALLSITE_HAS_CALL_VIA_LDVIRTFTN); return TYP_UNDEF; } assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method assert(!(clsFlags & CORINFO_FLG_VALUECLASS)); // OK, We've been told to call via LDVIRTFTN, so just // take the call now.... GenTreeCall::Use* args = impPopCallArgs(sig->numArgs, sig); GenTree* thisPtr = impPopStack().val; thisPtr = impTransformThis(thisPtr, pConstrainedResolvedToken, callInfo->thisTransform); assert(thisPtr != nullptr); // Clone the (possibly transformed) "this" pointer GenTree* thisPtrCopy; thisPtr = impCloneExpr(thisPtr, &thisPtrCopy, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("LDVIRTFTN this pointer")); GenTree* fptr = impImportLdvirtftn(thisPtr, pResolvedToken, callInfo); assert(fptr != nullptr); thisPtr = nullptr; // can't reuse it // Now make an indirect call through the function pointer unsigned lclNum = lvaGrabTemp(true DEBUGARG("VirtualCall through function pointer")); impAssignTempGen(lclNum, fptr, (unsigned)CHECK_SPILL_ALL); fptr = gtNewLclvNode(lclNum, TYP_I_IMPL); // Create the actual call node call = gtNewIndCallNode(fptr, callRetTyp, args, di); call->AsCall()->gtCallThisArg = gtNewCallArgs(thisPtrCopy); call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT); if ((sig->sigInst.methInstCount != 0) && IsTargetAbi(CORINFO_CORERT_ABI)) { // CoreRT generic virtual method: need to handle potential fat function pointers addFatPointerCandidate(call->AsCall()); } #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { // Null check is needed for ready to run to handle // non-virtual <-> virtual changes between versions call->gtFlags |= GTF_CALL_NULLCHECK; } #endif // Sine we are jumping over some code, check that its OK to skip that code assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG && (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG); goto DONE; } case CORINFO_CALL: { // This is for a non-virtual, non-interface etc. call call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, di); // We remove the nullcheck for the GetType call intrinsic. // TODO-CQ: JIT64 does not introduce the null check for many more helper calls // and intrinsics. if (callInfo->nullInstanceCheck && !((mflags & CORINFO_FLG_INTRINSIC) != 0 && (ni == NI_System_Object_GetType))) { call->gtFlags |= GTF_CALL_NULLCHECK; } #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { call->AsCall()->setEntryPoint(callInfo->codePointerLookup.constLookup); } #endif break; } case CORINFO_CALL_CODE_POINTER: { // The EE has asked us to call by computing a code pointer and then doing an // indirect call. This is because a runtime lookup is required to get the code entry point. // These calls always follow a uniform calling convention, i.e. no extra hidden params assert((sig->callConv & CORINFO_CALLCONV_PARAMTYPE) == 0); assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG); assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG); GenTree* fptr = impLookupToTree(pResolvedToken, &callInfo->codePointerLookup, GTF_ICON_FTN_ADDR, callInfo->hMethod); if (compDonotInline()) { return TYP_UNDEF; } // Now make an indirect call through the function pointer unsigned lclNum = lvaGrabTemp(true DEBUGARG("Indirect call through function pointer")); impAssignTempGen(lclNum, fptr, (unsigned)CHECK_SPILL_ALL); fptr = gtNewLclvNode(lclNum, TYP_I_IMPL); call = gtNewIndCallNode(fptr, callRetTyp, nullptr, di); call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT); if (callInfo->nullInstanceCheck) { call->gtFlags |= GTF_CALL_NULLCHECK; } break; } default: assert(!"unknown call kind"); break; } //------------------------------------------------------------------------- // Set more flags PREFIX_ASSUME(call != nullptr); if (mflags & CORINFO_FLG_NOGCCHECK) { call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_NOGCCHECK; } // Mark call if it's one of the ones we will maybe treat as an intrinsic if (isSpecialIntrinsic) { call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_SPECIAL_INTRINSIC; } } assert(sig); assert(clsHnd || (opcode == CEE_CALLI)); // We're never verifying for CALLI, so this is not set. /* Some sanity checks */ // CALL_VIRT and NEWOBJ must have a THIS pointer assert((opcode != CEE_CALLVIRT && opcode != CEE_NEWOBJ) || (sig->callConv & CORINFO_CALLCONV_HASTHIS)); // static bit and hasThis are negations of one another assert(((mflags & CORINFO_FLG_STATIC) != 0) == ((sig->callConv & CORINFO_CALLCONV_HASTHIS) == 0)); assert(call != nullptr); /*------------------------------------------------------------------------- * Check special-cases etc */ /* Special case - Check if it is a call to Delegate.Invoke(). */ if (mflags & CORINFO_FLG_DELEGATE_INVOKE) { assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method assert(mflags & CORINFO_FLG_FINAL); /* Set the delegate flag */ call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_DELEGATE_INV; if (callInfo->wrapperDelegateInvoke) { call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_WRAPPER_DELEGATE_INV; } if (opcode == CEE_CALLVIRT) { assert(mflags & CORINFO_FLG_FINAL); /* It should have the GTF_CALL_NULLCHECK flag set. Reset it */ assert(call->gtFlags & GTF_CALL_NULLCHECK); call->gtFlags &= ~GTF_CALL_NULLCHECK; } } CORINFO_CLASS_HANDLE actualMethodRetTypeSigClass; actualMethodRetTypeSigClass = sig->retTypeSigClass; /* Check for varargs */ if (!compFeatureVarArg() && ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG || (sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG)) { BADCODE("Varargs not supported."); } if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG || (sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG) { assert(!compIsForInlining()); /* Set the right flags */ call->gtFlags |= GTF_CALL_POP_ARGS; call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_VARARGS; /* Can't allow tailcall for varargs as it is caller-pop. The caller will be expecting to pop a certain number of arguments, but if we tailcall to a function with a different number of arguments, we are hosed. There are ways around this (caller remembers esp value, varargs is not caller-pop, etc), but not worth it. */ CLANG_FORMAT_COMMENT_ANCHOR; #ifdef TARGET_X86 if (canTailCall) { canTailCall = false; szCanTailCallFailReason = "Callee is varargs"; } #endif /* Get the total number of arguments - this is already correct * for CALLI - for methods we have to get it from the call site */ if (opcode != CEE_CALLI) { #ifdef DEBUG unsigned numArgsDef = sig->numArgs; #endif eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, sig); // For vararg calls we must be sure to load the return type of the // method actually being called, as well as the return types of the // specified in the vararg signature. With type equivalency, these types // may not be the same. if (sig->retTypeSigClass != actualMethodRetTypeSigClass) { if (actualMethodRetTypeSigClass != nullptr && sig->retType != CORINFO_TYPE_CLASS && sig->retType != CORINFO_TYPE_BYREF && sig->retType != CORINFO_TYPE_PTR && sig->retType != CORINFO_TYPE_VAR) { // Make sure that all valuetypes (including enums) that we push are loaded. // This is to guarantee that if a GC is triggerred from the prestub of this methods, // all valuetypes in the method signature are already loaded. // We need to be able to find the size of the valuetypes, but we cannot // do a class-load from within GC. info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(actualMethodRetTypeSigClass); } } assert(numArgsDef <= sig->numArgs); } /* We will have "cookie" as the last argument but we cannot push * it on the operand stack because we may overflow, so we append it * to the arg list next after we pop them */ } //--------------------------- Inline NDirect ------------------------------ // For inline cases we technically should look at both the current // block and the call site block (or just the latter if we've // fused the EH trees). However the block-related checks pertain to // EH and we currently won't inline a method with EH. So for // inlinees, just checking the call site block is sufficient. { // New lexical block here to avoid compilation errors because of GOTOs. BasicBlock* block = compIsForInlining() ? impInlineInfo->iciBlock : compCurBB; impCheckForPInvokeCall(call->AsCall(), methHnd, sig, mflags, block); } #ifdef UNIX_X86_ABI // On Unix x86 we use caller-cleaned convention. if ((call->gtFlags & GTF_CALL_UNMANAGED) == 0) call->gtFlags |= GTF_CALL_POP_ARGS; #endif // UNIX_X86_ABI if (call->gtFlags & GTF_CALL_UNMANAGED) { // We set up the unmanaged call by linking the frame, disabling GC, etc // This needs to be cleaned up on return. // In addition, native calls have different normalization rules than managed code // (managed calling convention always widens return values in the callee) if (canTailCall) { canTailCall = false; szCanTailCallFailReason = "Callee is native"; } checkForSmallType = true; impPopArgsForUnmanagedCall(call, sig); goto DONE; } else if ((opcode == CEE_CALLI) && ((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_DEFAULT) && ((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG)) { if (!info.compCompHnd->canGetCookieForPInvokeCalliSig(sig)) { // Normally this only happens with inlining. // However, a generic method (or type) being NGENd into another module // can run into this issue as well. There's not an easy fall-back for NGEN // so instead we fallback to JIT. if (compIsForInlining()) { compInlineResult->NoteFatal(InlineObservation::CALLSITE_CANT_EMBED_PINVOKE_COOKIE); } else { IMPL_LIMITATION("Can't get PInvoke cookie (cross module generics)"); } return TYP_UNDEF; } GenTree* cookie = eeGetPInvokeCookie(sig); // This cookie is required to be either a simple GT_CNS_INT or // an indirection of a GT_CNS_INT // GenTree* cookieConst = cookie; if (cookie->gtOper == GT_IND) { cookieConst = cookie->AsOp()->gtOp1; } assert(cookieConst->gtOper == GT_CNS_INT); // Setting GTF_DONT_CSE on the GT_CNS_INT as well as on the GT_IND (if it exists) will ensure that // we won't allow this tree to participate in any CSE logic // cookie->gtFlags |= GTF_DONT_CSE; cookieConst->gtFlags |= GTF_DONT_CSE; call->AsCall()->gtCallCookie = cookie; if (canTailCall) { canTailCall = false; szCanTailCallFailReason = "PInvoke calli"; } } /*------------------------------------------------------------------------- * Create the argument list */ //------------------------------------------------------------------------- // Special case - for varargs we have an implicit last argument if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG) { assert(!compIsForInlining()); void *varCookie, *pVarCookie; if (!info.compCompHnd->canGetVarArgsHandle(sig)) { compInlineResult->NoteFatal(InlineObservation::CALLSITE_CANT_EMBED_VARARGS_COOKIE); return TYP_UNDEF; } varCookie = info.compCompHnd->getVarArgsHandle(sig, &pVarCookie); assert((!varCookie) != (!pVarCookie)); GenTree* cookie = gtNewIconEmbHndNode(varCookie, pVarCookie, GTF_ICON_VARG_HDL, sig); assert(extraArg == nullptr); extraArg = gtNewCallArgs(cookie); } //------------------------------------------------------------------------- // Extra arg for shared generic code and array methods // // Extra argument containing instantiation information is passed in the // following circumstances: // (a) To the "Address" method on array classes; the extra parameter is // the array's type handle (a TypeDesc) // (b) To shared-code instance methods in generic structs; the extra parameter // is the struct's type handle (a vtable ptr) // (c) To shared-code per-instantiation non-generic static methods in generic // classes and structs; the extra parameter is the type handle // (d) To shared-code generic methods; the extra parameter is an // exact-instantiation MethodDesc // // We also set the exact type context associated with the call so we can // inline the call correctly later on. if (sig->callConv & CORINFO_CALLCONV_PARAMTYPE) { assert(call->AsCall()->gtCallType == CT_USER_FUNC); if (clsHnd == nullptr) { NO_WAY("CALLI on parameterized type"); } assert(opcode != CEE_CALLI); GenTree* instParam; bool runtimeLookup; // Instantiated generic method if (((SIZE_T)exactContextHnd & CORINFO_CONTEXTFLAGS_MASK) == CORINFO_CONTEXTFLAGS_METHOD) { assert(exactContextHnd != METHOD_BEING_COMPILED_CONTEXT()); CORINFO_METHOD_HANDLE exactMethodHandle = (CORINFO_METHOD_HANDLE)((SIZE_T)exactContextHnd & ~CORINFO_CONTEXTFLAGS_MASK); if (!exactContextNeedsRuntimeLookup) { #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { instParam = impReadyToRunLookupToTree(&callInfo->instParamLookup, GTF_ICON_METHOD_HDL, exactMethodHandle); if (instParam == nullptr) { assert(compDonotInline()); return TYP_UNDEF; } } else #endif { instParam = gtNewIconEmbMethHndNode(exactMethodHandle); info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun(exactMethodHandle); } } else { instParam = impTokenToHandle(pResolvedToken, &runtimeLookup, true /*mustRestoreHandle*/); if (instParam == nullptr) { assert(compDonotInline()); return TYP_UNDEF; } } } // otherwise must be an instance method in a generic struct, // a static method in a generic type, or a runtime-generated array method else { assert(((SIZE_T)exactContextHnd & CORINFO_CONTEXTFLAGS_MASK) == CORINFO_CONTEXTFLAGS_CLASS); CORINFO_CLASS_HANDLE exactClassHandle = eeGetClassFromContext(exactContextHnd); if (compIsForInlining() && (clsFlags & CORINFO_FLG_ARRAY) != 0) { compInlineResult->NoteFatal(InlineObservation::CALLEE_IS_ARRAY_METHOD); return TYP_UNDEF; } if ((clsFlags & CORINFO_FLG_ARRAY) && isReadonlyCall) { // We indicate "readonly" to the Address operation by using a null // instParam. instParam = gtNewIconNode(0, TYP_REF); } else if (!exactContextNeedsRuntimeLookup) { #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { instParam = impReadyToRunLookupToTree(&callInfo->instParamLookup, GTF_ICON_CLASS_HDL, exactClassHandle); if (instParam == nullptr) { assert(compDonotInline()); return TYP_UNDEF; } } else #endif { instParam = gtNewIconEmbClsHndNode(exactClassHandle); info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(exactClassHandle); } } else { instParam = impParentClassTokenToHandle(pResolvedToken, &runtimeLookup, true /*mustRestoreHandle*/); if (instParam == nullptr) { assert(compDonotInline()); return TYP_UNDEF; } } } assert(extraArg == nullptr); extraArg = gtNewCallArgs(instParam); } if ((opcode == CEE_NEWOBJ) && ((clsFlags & CORINFO_FLG_DELEGATE) != 0)) { // Only verifiable cases are supported. // dup; ldvirtftn; newobj; or ldftn; newobj. // IL test could contain unverifiable sequence, in this case optimization should not be done. if (impStackHeight() > 0) { typeInfo delegateTypeInfo = impStackTop().seTypeInfo; if (delegateTypeInfo.IsToken()) { ldftnToken = delegateTypeInfo.GetToken(); } } } //------------------------------------------------------------------------- // The main group of arguments args = impPopCallArgs(sig->numArgs, sig, extraArg); call->AsCall()->gtCallArgs = args; for (GenTreeCall::Use& use : call->AsCall()->Args()) { call->gtFlags |= use.GetNode()->gtFlags & GTF_GLOB_EFFECT; } //------------------------------------------------------------------------- // The "this" pointer if (((mflags & CORINFO_FLG_STATIC) == 0) && ((sig->callConv & CORINFO_CALLCONV_EXPLICITTHIS) == 0) && !((opcode == CEE_NEWOBJ) && (newobjThis == nullptr))) { GenTree* obj; if (opcode == CEE_NEWOBJ) { obj = newobjThis; } else { obj = impPopStack().val; obj = impTransformThis(obj, pConstrainedResolvedToken, constraintCallThisTransform); if (compDonotInline()) { return TYP_UNDEF; } } // Store the "this" value in the call call->gtFlags |= obj->gtFlags & GTF_GLOB_EFFECT; call->AsCall()->gtCallThisArg = gtNewCallArgs(obj); // Is this a virtual or interface call? if (call->AsCall()->IsVirtual()) { // only true object pointers can be virtual assert(obj->gtType == TYP_REF); // See if we can devirtualize. const bool isExplicitTailCall = (tailCallFlags & PREFIX_TAILCALL_EXPLICIT) != 0; const bool isLateDevirtualization = false; impDevirtualizeCall(call->AsCall(), pResolvedToken, &callInfo->hMethod, &callInfo->methodFlags, &callInfo->contextHandle, &exactContextHnd, isLateDevirtualization, isExplicitTailCall, // Take care to pass raw IL offset here as the 'debug info' might be different for // inlinees. rawILOffset); // Devirtualization may change which method gets invoked. Update our local cache. // methHnd = callInfo->hMethod; } if (impIsThis(obj)) { call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_NONVIRT_SAME_THIS; } } //------------------------------------------------------------------------- // The "this" pointer for "newobj" if (opcode == CEE_NEWOBJ) { if (clsFlags & CORINFO_FLG_VAROBJSIZE) { assert(!(clsFlags & CORINFO_FLG_ARRAY)); // arrays handled separately // This is a 'new' of a variable sized object, wher // the constructor is to return the object. In this case // the constructor claims to return VOID but we know it // actually returns the new object assert(callRetTyp == TYP_VOID); callRetTyp = TYP_REF; call->gtType = TYP_REF; impSpillSpecialSideEff(); impPushOnStack(call, typeInfo(TI_REF, clsHnd)); } else { if (clsFlags & CORINFO_FLG_DELEGATE) { // New inliner morph it in impImportCall. // This will allow us to inline the call to the delegate constructor. call = fgOptimizeDelegateConstructor(call->AsCall(), &exactContextHnd, ldftnToken); } if (!bIntrinsicImported) { #if defined(DEBUG) || defined(INLINE_DATA) // Keep track of the raw IL offset of the call call->AsCall()->gtRawILOffset = rawILOffset; #endif // defined(DEBUG) || defined(INLINE_DATA) // Is it an inline candidate? impMarkInlineCandidate(call, exactContextHnd, exactContextNeedsRuntimeLookup, callInfo); } // append the call node. impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtDI); // Now push the value of the 'new onto the stack // This is a 'new' of a non-variable sized object. // Append the new node (op1) to the statement list, // and then push the local holding the value of this // new instruction on the stack. if (clsFlags & CORINFO_FLG_VALUECLASS) { assert(newobjThis->gtOper == GT_ADDR && newobjThis->AsOp()->gtOp1->gtOper == GT_LCL_VAR); unsigned tmp = newobjThis->AsOp()->gtOp1->AsLclVarCommon()->GetLclNum(); impPushOnStack(gtNewLclvNode(tmp, lvaGetRealType(tmp)), verMakeTypeInfo(clsHnd).NormaliseForStack()); } else { if (newobjThis->gtOper == GT_COMMA) { // We must have inserted the callout. Get the real newobj. newobjThis = newobjThis->AsOp()->gtOp2; } assert(newobjThis->gtOper == GT_LCL_VAR); impPushOnStack(gtNewLclvNode(newobjThis->AsLclVarCommon()->GetLclNum(), TYP_REF), typeInfo(TI_REF, clsHnd)); } } return callRetTyp; } DONE: #ifdef DEBUG // In debug we want to be able to register callsites with the EE. assert(call->AsCall()->callSig == nullptr); call->AsCall()->callSig = new (this, CMK_Generic) CORINFO_SIG_INFO; *call->AsCall()->callSig = *sig; #endif // Final importer checks for calls flagged as tail calls. // if (tailCallFlags != 0) { const bool isExplicitTailCall = (tailCallFlags & PREFIX_TAILCALL_EXPLICIT) != 0; const bool isImplicitTailCall = (tailCallFlags & PREFIX_TAILCALL_IMPLICIT) != 0; const bool isStressTailCall = (tailCallFlags & PREFIX_TAILCALL_STRESS) != 0; // Exactly one of these should be true. assert(isExplicitTailCall != isImplicitTailCall); // This check cannot be performed for implicit tail calls for the reason // that impIsImplicitTailCallCandidate() is not checking whether return // types are compatible before marking a call node with PREFIX_TAILCALL_IMPLICIT. // As a result it is possible that in the following case, we find that // the type stack is non-empty if Callee() is considered for implicit // tail calling. // int Caller(..) { .... void Callee(); ret val; ... } // // Note that we cannot check return type compatibility before ImpImportCall() // as we don't have required info or need to duplicate some of the logic of // ImpImportCall(). // // For implicit tail calls, we perform this check after return types are // known to be compatible. if (isExplicitTailCall && (verCurrentState.esStackDepth != 0)) { BADCODE("Stack should be empty after tailcall"); } // For opportunistic tailcalls we allow implicit widening, i.e. tailcalls from int32 -> int16, since the // managed calling convention dictates that the callee widens the value. For explicit tailcalls we don't // want to require this detail of the calling convention to bubble up to the tailcall helpers bool allowWidening = isImplicitTailCall; if (canTailCall && !impTailCallRetTypeCompatible(allowWidening, info.compRetType, info.compMethodInfo->args.retTypeClass, info.compCallConv, callRetTyp, sig->retTypeClass, call->AsCall()->GetUnmanagedCallConv())) { canTailCall = false; szCanTailCallFailReason = "Return types are not tail call compatible"; } // Stack empty check for implicit tail calls. if (canTailCall && isImplicitTailCall && (verCurrentState.esStackDepth != 0)) { #ifdef TARGET_AMD64 // JIT64 Compatibility: Opportunistic tail call stack mismatch throws a VerificationException // in JIT64, not an InvalidProgramException. Verify(false, "Stack should be empty after tailcall"); #else // TARGET_64BIT BADCODE("Stack should be empty after tailcall"); #endif //! TARGET_64BIT } // assert(compCurBB is not a catch, finally or filter block); // assert(compCurBB is not a try block protected by a finally block); assert(!isExplicitTailCall || compCurBB->bbJumpKind == BBJ_RETURN); // Ask VM for permission to tailcall if (canTailCall) { // True virtual or indirect calls, shouldn't pass in a callee handle. CORINFO_METHOD_HANDLE exactCalleeHnd = ((call->AsCall()->gtCallType != CT_USER_FUNC) || call->AsCall()->IsVirtual()) ? nullptr : methHnd; if (info.compCompHnd->canTailCall(info.compMethodHnd, methHnd, exactCalleeHnd, isExplicitTailCall)) { if (isExplicitTailCall) { // In case of explicit tail calls, mark it so that it is not considered // for in-lining. call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_EXPLICIT_TAILCALL; JITDUMP("\nGTF_CALL_M_EXPLICIT_TAILCALL set for call [%06u]\n", dspTreeID(call)); if (isStressTailCall) { call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_STRESS_TAILCALL; JITDUMP("\nGTF_CALL_M_STRESS_TAILCALL set for call [%06u]\n", dspTreeID(call)); } } else { #if FEATURE_TAILCALL_OPT // Must be an implicit tail call. assert(isImplicitTailCall); // It is possible that a call node is both an inline candidate and marked // for opportunistic tail calling. In-lining happens before morhphing of // trees. If in-lining of an in-line candidate gets aborted for whatever // reason, it will survive to the morphing stage at which point it will be // transformed into a tail call after performing additional checks. call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_IMPLICIT_TAILCALL; JITDUMP("\nGTF_CALL_M_IMPLICIT_TAILCALL set for call [%06u]\n", dspTreeID(call)); #else //! FEATURE_TAILCALL_OPT NYI("Implicit tail call prefix on a target which doesn't support opportunistic tail calls"); #endif // FEATURE_TAILCALL_OPT } // This might or might not turn into a tailcall. We do more // checks in morph. For explicit tailcalls we need more // information in morph in case it turns out to be a // helper-based tailcall. if (isExplicitTailCall) { assert(call->AsCall()->tailCallInfo == nullptr); call->AsCall()->tailCallInfo = new (this, CMK_CorTailCallInfo) TailCallSiteInfo; switch (opcode) { case CEE_CALLI: call->AsCall()->tailCallInfo->SetCalli(sig); break; case CEE_CALLVIRT: call->AsCall()->tailCallInfo->SetCallvirt(sig, pResolvedToken); break; default: call->AsCall()->tailCallInfo->SetCall(sig, pResolvedToken); break; } } } else { // canTailCall reported its reasons already canTailCall = false; JITDUMP("\ninfo.compCompHnd->canTailCall returned false for call [%06u]\n", dspTreeID(call)); } } else { // If this assert fires it means that canTailCall was set to false without setting a reason! assert(szCanTailCallFailReason != nullptr); JITDUMP("\nRejecting %splicit tail call for [%06u], reason: '%s'\n", isExplicitTailCall ? "ex" : "im", dspTreeID(call), szCanTailCallFailReason); info.compCompHnd->reportTailCallDecision(info.compMethodHnd, methHnd, isExplicitTailCall, TAILCALL_FAIL, szCanTailCallFailReason); } } // Note: we assume that small return types are already normalized by the managed callee // or by the pinvoke stub for calls to unmanaged code. if (!bIntrinsicImported) { // // Things needed to be checked when bIntrinsicImported is false. // assert(call->gtOper == GT_CALL); assert(callInfo != nullptr); if (compIsForInlining() && opcode == CEE_CALLVIRT) { GenTree* callObj = call->AsCall()->gtCallThisArg->GetNode(); if ((call->AsCall()->IsVirtual() || (call->gtFlags & GTF_CALL_NULLCHECK)) && impInlineIsGuaranteedThisDerefBeforeAnySideEffects(nullptr, call->AsCall()->gtCallArgs, callObj, impInlineInfo->inlArgInfo)) { impInlineInfo->thisDereferencedFirst = true; } } #if defined(DEBUG) || defined(INLINE_DATA) // Keep track of the raw IL offset of the call call->AsCall()->gtRawILOffset = rawILOffset; #endif // defined(DEBUG) || defined(INLINE_DATA) // Is it an inline candidate? impMarkInlineCandidate(call, exactContextHnd, exactContextNeedsRuntimeLookup, callInfo); } // Extra checks for tail calls and tail recursion. // // A tail recursive call is a potential loop from the current block to the start of the root method. // If we see a tail recursive call, mark the blocks from the call site back to the entry as potentially // being in a loop. // // Note: if we're importing an inlinee we don't mark the right set of blocks, but by then it's too // late. Currently this doesn't lead to problems. See GitHub issue 33529. // // OSR also needs to handle tail calls specially: // * block profiling in OSR methods needs to ensure probes happen before tail calls, not after. // * the root method entry must be imported if there's a recursive tail call or a potentially // inlineable tail call. // if ((tailCallFlags != 0) && canTailCall) { if (gtIsRecursiveCall(methHnd)) { assert(verCurrentState.esStackDepth == 0); BasicBlock* loopHead = nullptr; if (!compIsForInlining() && opts.IsOSR()) { // For root method OSR we may branch back to the actual method entry, // which is not fgFirstBB, and which we will need to import. assert(fgEntryBB != nullptr); loopHead = fgEntryBB; } else { // For normal jitting we may branch back to the firstBB; this // should already be imported. loopHead = fgFirstBB; } JITDUMP("\nTail recursive call [%06u] in the method. Mark " FMT_BB " to " FMT_BB " as having a backward branch.\n", dspTreeID(call), loopHead->bbNum, compCurBB->bbNum); fgMarkBackwardJump(loopHead, compCurBB); } // We only do these OSR checks in the root method because: // * If we fail to import the root method entry when importing the root method, we can't go back // and import it during inlining. So instead of checking jsut for recursive tail calls we also // have to check for anything that might introduce a recursive tail call. // * We only instrument root method blocks in OSR methods, // if (opts.IsOSR() && !compIsForInlining()) { // If a root method tail call candidate block is not a BBJ_RETURN, it should have a unique // BBJ_RETURN successor. Mark that successor so we can handle it specially during profile // instrumentation. // if (compCurBB->bbJumpKind != BBJ_RETURN) { BasicBlock* const successor = compCurBB->GetUniqueSucc(); assert(successor->bbJumpKind == BBJ_RETURN); successor->bbFlags |= BBF_TAILCALL_SUCCESSOR; optMethodFlags |= OMF_HAS_TAILCALL_SUCCESSOR; } // If this call might eventually turn into a loop back to method entry, make sure we // import the method entry. // assert(call->IsCall()); GenTreeCall* const actualCall = call->AsCall(); const bool mustImportEntryBlock = gtIsRecursiveCall(methHnd) || actualCall->IsInlineCandidate() || actualCall->IsGuardedDevirtualizationCandidate(); // Only schedule importation if we're not currently importing. // if (mustImportEntryBlock && (compCurBB != fgEntryBB)) { JITDUMP("\nOSR: inlineable or recursive tail call [%06u] in the method, so scheduling " FMT_BB " for importation\n", dspTreeID(call), fgEntryBB->bbNum); impImportBlockPending(fgEntryBB); } } } if ((sig->flags & CORINFO_SIGFLAG_FAT_CALL) != 0) { assert(opcode == CEE_CALLI || callInfo->kind == CORINFO_CALL_CODE_POINTER); addFatPointerCandidate(call->AsCall()); } DONE_CALL: // Push or append the result of the call if (callRetTyp == TYP_VOID) { if (opcode == CEE_NEWOBJ) { // we actually did push something, so don't spill the thing we just pushed. assert(verCurrentState.esStackDepth > 0); impAppendTree(call, verCurrentState.esStackDepth - 1, impCurStmtDI); } else { impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtDI); } } else { impSpillSpecialSideEff(); if (clsFlags & CORINFO_FLG_ARRAY) { eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, sig); } typeInfo tiRetVal = verMakeTypeInfo(sig->retType, sig->retTypeClass); tiRetVal.NormaliseForStack(); // The CEE_READONLY prefix modifies the verification semantics of an Address // operation on an array type. if ((clsFlags & CORINFO_FLG_ARRAY) && isReadonlyCall && tiRetVal.IsByRef()) { tiRetVal.SetIsReadonlyByRef(); } if (call->IsCall()) { // Sometimes "call" is not a GT_CALL (if we imported an intrinsic that didn't turn into a call) GenTreeCall* origCall = call->AsCall(); const bool isFatPointerCandidate = origCall->IsFatPointerCandidate(); const bool isInlineCandidate = origCall->IsInlineCandidate(); const bool isGuardedDevirtualizationCandidate = origCall->IsGuardedDevirtualizationCandidate(); if (varTypeIsStruct(callRetTyp)) { // Need to treat all "split tree" cases here, not just inline candidates call = impFixupCallStructReturn(call->AsCall(), sig->retTypeClass); } // TODO: consider handling fatcalli cases this way too...? if (isInlineCandidate || isGuardedDevirtualizationCandidate) { // We should not have made any adjustments in impFixupCallStructReturn // as we defer those until we know the fate of the call. assert(call == origCall); assert(opts.OptEnabled(CLFLG_INLINING)); assert(!isFatPointerCandidate); // We should not try to inline calli. // Make the call its own tree (spill the stack if needed). // Do not consume the debug info here. This is particularly // important if we give up on the inline, in which case the // call will typically end up in the statement that contains // the GT_RET_EXPR that we leave on the stack. impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtDI, false); // TODO: Still using the widened type. GenTree* retExpr = gtNewInlineCandidateReturnExpr(call, genActualType(callRetTyp), compCurBB->bbFlags); // Link the retExpr to the call so if necessary we can manipulate it later. origCall->gtInlineCandidateInfo->retExpr = retExpr; // Propagate retExpr as the placeholder for the call. call = retExpr; } else { // If the call is virtual, and has a generics context, and is not going to have a class probe, // record the context for possible use during late devirt. // // If we ever want to devirt at Tier0, and/or see issues where OSR methods under PGO lose // important devirtualizations, we'll want to allow both a class probe and a captured context. // if (origCall->IsVirtual() && (origCall->gtCallType != CT_INDIRECT) && (exactContextHnd != nullptr) && (origCall->gtClassProfileCandidateInfo == nullptr)) { JITDUMP("\nSaving context %p for call [%06u]\n", exactContextHnd, dspTreeID(origCall)); origCall->gtCallMoreFlags |= GTF_CALL_M_LATE_DEVIRT; LateDevirtualizationInfo* const info = new (this, CMK_Inlining) LateDevirtualizationInfo; info->exactContextHnd = exactContextHnd; origCall->gtLateDevirtualizationInfo = info; } if (isFatPointerCandidate) { // fatPointer candidates should be in statements of the form call() or var = call(). // Such form allows to find statements with fat calls without walking through whole trees // and removes problems with cutting trees. assert(!bIntrinsicImported); assert(IsTargetAbi(CORINFO_CORERT_ABI)); if (call->OperGet() != GT_LCL_VAR) // can be already converted by impFixupCallStructReturn. { unsigned calliSlot = lvaGrabTemp(true DEBUGARG("calli")); LclVarDsc* varDsc = lvaGetDesc(calliSlot); varDsc->lvVerTypeInfo = tiRetVal; impAssignTempGen(calliSlot, call, tiRetVal.GetClassHandle(), (unsigned)CHECK_SPILL_NONE); // impAssignTempGen can change src arg list and return type for call that returns struct. var_types type = genActualType(lvaTable[calliSlot].TypeGet()); call = gtNewLclvNode(calliSlot, type); } } // For non-candidates we must also spill, since we // might have locals live on the eval stack that this // call can modify. // // Suppress this for certain well-known call targets // that we know won't modify locals, eg calls that are // recognized in gtCanOptimizeTypeEquality. Otherwise // we may break key fragile pattern matches later on. bool spillStack = true; if (call->IsCall()) { GenTreeCall* callNode = call->AsCall(); if ((callNode->gtCallType == CT_HELPER) && (gtIsTypeHandleToRuntimeTypeHelper(callNode) || gtIsTypeHandleToRuntimeTypeHandleHelper(callNode))) { spillStack = false; } else if ((callNode->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC) != 0) { spillStack = false; } } if (spillStack) { impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("non-inline candidate call")); } } } if (!bIntrinsicImported) { //------------------------------------------------------------------------- // /* If the call is of a small type and the callee is managed, the callee will normalize the result before returning. However, we need to normalize small type values returned by unmanaged functions (pinvoke). The pinvoke stub does the normalization, but we need to do it here if we use the shorter inlined pinvoke stub. */ if (checkForSmallType && varTypeIsIntegral(callRetTyp) && genTypeSize(callRetTyp) < genTypeSize(TYP_INT)) { call = gtNewCastNode(genActualType(callRetTyp), call, false, callRetTyp); } } impPushOnStack(call, tiRetVal); } // VSD functions get a new call target each time we getCallInfo, so clear the cache. // Also, the call info cache for CALLI instructions is largely incomplete, so clear it out. // if ( (opcode == CEE_CALLI) || (callInfoCache.fetchCallInfo().kind == CORINFO_VIRTUALCALL_STUB)) // callInfoCache.uncacheCallInfo(); return callRetTyp; } #ifdef _PREFAST_ #pragma warning(pop) #endif bool Compiler::impMethodInfo_hasRetBuffArg(CORINFO_METHOD_INFO* methInfo, CorInfoCallConvExtension callConv) { CorInfoType corType = methInfo->args.retType; if ((corType == CORINFO_TYPE_VALUECLASS) || (corType == CORINFO_TYPE_REFANY)) { // We have some kind of STRUCT being returned structPassingKind howToReturnStruct = SPK_Unknown; var_types returnType = getReturnTypeForStruct(methInfo->args.retTypeClass, callConv, &howToReturnStruct); if (howToReturnStruct == SPK_ByReference) { return true; } } return false; } #ifdef DEBUG // var_types Compiler::impImportJitTestLabelMark(int numArgs) { TestLabelAndNum tlAndN; if (numArgs == 2) { tlAndN.m_num = 0; StackEntry se = impPopStack(); assert(se.seTypeInfo.GetType() == TI_INT); GenTree* val = se.val; assert(val->IsCnsIntOrI()); tlAndN.m_tl = (TestLabel)val->AsIntConCommon()->IconValue(); } else if (numArgs == 3) { StackEntry se = impPopStack(); assert(se.seTypeInfo.GetType() == TI_INT); GenTree* val = se.val; assert(val->IsCnsIntOrI()); tlAndN.m_num = val->AsIntConCommon()->IconValue(); se = impPopStack(); assert(se.seTypeInfo.GetType() == TI_INT); val = se.val; assert(val->IsCnsIntOrI()); tlAndN.m_tl = (TestLabel)val->AsIntConCommon()->IconValue(); } else { assert(false); } StackEntry expSe = impPopStack(); GenTree* node = expSe.val; // There are a small number of special cases, where we actually put the annotation on a subnode. if (tlAndN.m_tl == TL_LoopHoist && tlAndN.m_num >= 100) { // A loop hoist annotation with value >= 100 means that the expression should be a static field access, // a GT_IND of a static field address, which should be the sum of a (hoistable) helper call and possibly some // offset within the the static field block whose address is returned by the helper call. // The annotation is saying that this address calculation, but not the entire access, should be hoisted. assert(node->OperGet() == GT_IND); tlAndN.m_num -= 100; GetNodeTestData()->Set(node->AsOp()->gtOp1, tlAndN); GetNodeTestData()->Remove(node); } else { GetNodeTestData()->Set(node, tlAndN); } impPushOnStack(node, expSe.seTypeInfo); return node->TypeGet(); } #endif // DEBUG //----------------------------------------------------------------------------------- // impFixupCallStructReturn: For a call node that returns a struct do one of the following: // - set the flag to indicate struct return via retbuf arg; // - adjust the return type to a SIMD type if it is returned in 1 reg; // - spill call result into a temp if it is returned into 2 registers or more and not tail call or inline candidate. // // Arguments: // call - GT_CALL GenTree node // retClsHnd - Class handle of return type of the call // // Return Value: // Returns new GenTree node after fixing struct return of call node // GenTree* Compiler::impFixupCallStructReturn(GenTreeCall* call, CORINFO_CLASS_HANDLE retClsHnd) { if (!varTypeIsStruct(call)) { return call; } call->gtRetClsHnd = retClsHnd; #if FEATURE_MULTIREG_RET call->InitializeStructReturnType(this, retClsHnd, call->GetUnmanagedCallConv()); const ReturnTypeDesc* retTypeDesc = call->GetReturnTypeDesc(); const unsigned retRegCount = retTypeDesc->GetReturnRegCount(); #else // !FEATURE_MULTIREG_RET const unsigned retRegCount = 1; #endif // !FEATURE_MULTIREG_RET structPassingKind howToReturnStruct; var_types returnType = getReturnTypeForStruct(retClsHnd, call->GetUnmanagedCallConv(), &howToReturnStruct); if (howToReturnStruct == SPK_ByReference) { assert(returnType == TYP_UNKNOWN); call->gtCallMoreFlags |= GTF_CALL_M_RETBUFFARG; return call; } // Recognize SIMD types as we do for LCL_VARs, // note it could be not the ABI specific type, for example, on x64 we can set 'TYP_SIMD8` // for `System.Numerics.Vector2` here but lower will change it to long as ABI dictates. var_types simdReturnType = impNormStructType(call->gtRetClsHnd); if (simdReturnType != call->TypeGet()) { assert(varTypeIsSIMD(simdReturnType)); JITDUMP("changing the type of a call [%06u] from %s to %s\n", dspTreeID(call), varTypeName(call->TypeGet()), varTypeName(simdReturnType)); call->ChangeType(simdReturnType); } if (retRegCount == 1) { return call; } #if FEATURE_MULTIREG_RET assert(varTypeIsStruct(call)); // It could be a SIMD returned in several regs. assert(returnType == TYP_STRUCT); assert((howToReturnStruct == SPK_ByValueAsHfa) || (howToReturnStruct == SPK_ByValue)); #ifdef UNIX_AMD64_ABI // must be a struct returned in two registers assert(retRegCount == 2); #else // not UNIX_AMD64_ABI assert(retRegCount >= 2); #endif // not UNIX_AMD64_ABI if (!call->CanTailCall() && !call->IsInlineCandidate()) { // Force a call returning multi-reg struct to be always of the IR form // tmp = call // // No need to assign a multi-reg struct to a local var if: // - It is a tail call or // - The call is marked for in-lining later return impAssignMultiRegTypeToVar(call, retClsHnd DEBUGARG(call->GetUnmanagedCallConv())); } return call; #endif // FEATURE_MULTIREG_RET } /***************************************************************************** For struct return values, re-type the operand in the case where the ABI does not use a struct return buffer */ //------------------------------------------------------------------------ // impFixupStructReturnType: For struct return values it sets appropriate flags in MULTIREG returns case; // in non-multiref case it handles two special helpers: `CORINFO_HELP_GETFIELDSTRUCT`, `CORINFO_HELP_UNBOX_NULLABLE`. // // Arguments: // op - the return value; // retClsHnd - the struct handle; // unmgdCallConv - the calling convention of the function that returns this struct. // // Return Value: // the result tree that does the return. // GenTree* Compiler::impFixupStructReturnType(GenTree* op, CORINFO_CLASS_HANDLE retClsHnd, CorInfoCallConvExtension unmgdCallConv) { assert(varTypeIsStruct(info.compRetType)); assert(info.compRetBuffArg == BAD_VAR_NUM); JITDUMP("\nimpFixupStructReturnType: retyping\n"); DISPTREE(op); #if defined(TARGET_XARCH) #if FEATURE_MULTIREG_RET // No VarArgs for CoreCLR on x64 Unix UNIX_AMD64_ABI_ONLY(assert(!info.compIsVarArgs)); // Is method returning a multi-reg struct? if (varTypeIsStruct(info.compRetNativeType) && IsMultiRegReturnedType(retClsHnd, unmgdCallConv)) { // In case of multi-reg struct return, we force IR to be one of the following: // GT_RETURN(lclvar) or GT_RETURN(call). If op is anything other than a // lclvar or call, it is assigned to a temp to create: temp = op and GT_RETURN(tmp). if (op->gtOper == GT_LCL_VAR) { // Note that this is a multi-reg return. unsigned lclNum = op->AsLclVarCommon()->GetLclNum(); lvaTable[lclNum].lvIsMultiRegRet = true; // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns. op->gtFlags |= GTF_DONT_CSE; return op; } if (op->gtOper == GT_CALL) { return op; } return impAssignMultiRegTypeToVar(op, retClsHnd DEBUGARG(unmgdCallConv)); } #else assert(info.compRetNativeType != TYP_STRUCT); #endif // defined(UNIX_AMD64_ABI) || defined(TARGET_X86) #elif FEATURE_MULTIREG_RET && defined(TARGET_ARM) if (varTypeIsStruct(info.compRetNativeType) && !info.compIsVarArgs && IsHfa(retClsHnd)) { if (op->gtOper == GT_LCL_VAR) { // This LCL_VAR is an HFA return value, it stays as a TYP_STRUCT unsigned lclNum = op->AsLclVarCommon()->GetLclNum(); // Make sure this struct type stays as struct so that we can return it as an HFA lvaTable[lclNum].lvIsMultiRegRet = true; // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns. op->gtFlags |= GTF_DONT_CSE; return op; } if (op->gtOper == GT_CALL) { if (op->AsCall()->IsVarargs()) { // We cannot tail call because control needs to return to fixup the calling // convention for result return. op->AsCall()->gtCallMoreFlags &= ~GTF_CALL_M_TAILCALL; op->AsCall()->gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL; } else { return op; } } return impAssignMultiRegTypeToVar(op, retClsHnd DEBUGARG(unmgdCallConv)); } #elif FEATURE_MULTIREG_RET && defined(TARGET_ARM64) // Is method returning a multi-reg struct? if (IsMultiRegReturnedType(retClsHnd, unmgdCallConv)) { if (op->gtOper == GT_LCL_VAR) { // This LCL_VAR stays as a TYP_STRUCT unsigned lclNum = op->AsLclVarCommon()->GetLclNum(); if (!lvaIsImplicitByRefLocal(lclNum)) { // Make sure this struct type is not struct promoted lvaTable[lclNum].lvIsMultiRegRet = true; // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns. op->gtFlags |= GTF_DONT_CSE; return op; } } if (op->gtOper == GT_CALL) { if (op->AsCall()->IsVarargs()) { // We cannot tail call because control needs to return to fixup the calling // convention for result return. op->AsCall()->gtCallMoreFlags &= ~GTF_CALL_M_TAILCALL; op->AsCall()->gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL; } else { return op; } } return impAssignMultiRegTypeToVar(op, retClsHnd DEBUGARG(unmgdCallConv)); } #endif // FEATURE_MULTIREG_RET && TARGET_ARM64 if (!op->IsCall() || !op->AsCall()->TreatAsHasRetBufArg(this)) { // Don't retype `struct` as a primitive type in `ret` instruction. return op; } // This must be one of those 'special' helpers that don't // really have a return buffer, but instead use it as a way // to keep the trees cleaner with fewer address-taken temps. // // Well now we have to materialize the the return buffer as // an address-taken temp. Then we can return the temp. // // NOTE: this code assumes that since the call directly // feeds the return, then the call must be returning the // same structure/class/type. // unsigned tmpNum = lvaGrabTemp(true DEBUGARG("pseudo return buffer")); // No need to spill anything as we're about to return. impAssignTempGen(tmpNum, op, info.compMethodInfo->args.retTypeClass, (unsigned)CHECK_SPILL_NONE); op = gtNewLclvNode(tmpNum, info.compRetType); JITDUMP("\nimpFixupStructReturnType: created a pseudo-return buffer for a special helper\n"); DISPTREE(op); return op; } /***************************************************************************** CEE_LEAVE may be jumping out of a protected block, viz, a catch or a finally-protected try. We find the finally blocks protecting the current offset (in order) by walking over the complete exception table and finding enclosing clauses. This assumes that the table is sorted. This will create a series of BBJ_CALLFINALLY -> BBJ_CALLFINALLY ... -> BBJ_ALWAYS. If we are leaving a catch handler, we need to attach the CPX_ENDCATCHes to the correct BBJ_CALLFINALLY blocks. After this function, the BBJ_LEAVE block has been converted to a different type. */ #if !defined(FEATURE_EH_FUNCLETS) void Compiler::impImportLeave(BasicBlock* block) { #ifdef DEBUG if (verbose) { printf("\nBefore import CEE_LEAVE:\n"); fgDispBasicBlocks(); fgDispHandlerTab(); } #endif // DEBUG bool invalidatePreds = false; // If we create new blocks, invalidate the predecessor lists (if created) unsigned blkAddr = block->bbCodeOffs; BasicBlock* leaveTarget = block->bbJumpDest; unsigned jmpAddr = leaveTarget->bbCodeOffs; // LEAVE clears the stack, spill side effects, and set stack to 0 impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportLeave")); verCurrentState.esStackDepth = 0; assert(block->bbJumpKind == BBJ_LEAVE); assert(fgBBs == (BasicBlock**)0xCDCD || fgLookupBB(jmpAddr) != NULL); // should be a BB boundary BasicBlock* step = DUMMY_INIT(NULL); unsigned encFinallies = 0; // Number of enclosing finallies. GenTree* endCatches = NULL; Statement* endLFinStmt = NULL; // The statement tree to indicate the end of locally-invoked finally. unsigned XTnum; EHblkDsc* HBtab; for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++) { // Grab the handler offsets IL_OFFSET tryBeg = HBtab->ebdTryBegOffs(); IL_OFFSET tryEnd = HBtab->ebdTryEndOffs(); IL_OFFSET hndBeg = HBtab->ebdHndBegOffs(); IL_OFFSET hndEnd = HBtab->ebdHndEndOffs(); /* Is this a catch-handler we are CEE_LEAVEing out of? * If so, we need to call CORINFO_HELP_ENDCATCH. */ if (jitIsBetween(blkAddr, hndBeg, hndEnd) && !jitIsBetween(jmpAddr, hndBeg, hndEnd)) { // Can't CEE_LEAVE out of a finally/fault handler if (HBtab->HasFinallyOrFaultHandler()) BADCODE("leave out of fault/finally block"); // Create the call to CORINFO_HELP_ENDCATCH GenTree* endCatch = gtNewHelperCallNode(CORINFO_HELP_ENDCATCH, TYP_VOID); // Make a list of all the currently pending endCatches if (endCatches) endCatches = gtNewOperNode(GT_COMMA, TYP_VOID, endCatches, endCatch); else endCatches = endCatch; #ifdef DEBUG if (verbose) { printf("impImportLeave - " FMT_BB " jumping out of catch handler EH#%u, adding call to " "CORINFO_HELP_ENDCATCH\n", block->bbNum, XTnum); } #endif } else if (HBtab->HasFinallyHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) && !jitIsBetween(jmpAddr, tryBeg, tryEnd)) { /* This is a finally-protected try we are jumping out of */ /* If there are any pending endCatches, and we have already jumped out of a finally-protected try, then the endCatches have to be put in a block in an outer try for async exceptions to work correctly. Else, just use append to the original block */ BasicBlock* callBlock; assert(!encFinallies == !endLFinStmt); // if we have finallies, we better have an endLFin tree, and vice-versa if (encFinallies == 0) { assert(step == DUMMY_INIT(NULL)); callBlock = block; callBlock->bbJumpKind = BBJ_CALLFINALLY; // convert the BBJ_LEAVE to BBJ_CALLFINALLY if (endCatches) impAppendTree(endCatches, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); #ifdef DEBUG if (verbose) { printf("impImportLeave - jumping out of a finally-protected try, convert block to BBJ_CALLFINALLY " "block %s\n", callBlock->dspToString()); } #endif } else { assert(step != DUMMY_INIT(NULL)); /* Calling the finally block */ callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, XTnum + 1, 0, step); assert(step->bbJumpKind == BBJ_ALWAYS); step->bbJumpDest = callBlock; // the previous call to a finally returns to this call (to the next // finally in the chain) step->bbJumpDest->bbRefs++; /* The new block will inherit this block's weight */ callBlock->inheritWeight(block); #ifdef DEBUG if (verbose) { printf("impImportLeave - jumping out of a finally-protected try, new BBJ_CALLFINALLY block %s\n", callBlock->dspToString()); } #endif Statement* lastStmt; if (endCatches) { lastStmt = gtNewStmt(endCatches); endLFinStmt->SetNextStmt(lastStmt); lastStmt->SetPrevStmt(endLFinStmt); } else { lastStmt = endLFinStmt; } // note that this sets BBF_IMPORTED on the block impEndTreeList(callBlock, endLFinStmt, lastStmt); } step = fgNewBBafter(BBJ_ALWAYS, callBlock, true); /* The new block will inherit this block's weight */ step->inheritWeight(block); step->bbFlags |= BBF_IMPORTED | BBF_KEEP_BBJ_ALWAYS; #ifdef DEBUG if (verbose) { printf("impImportLeave - jumping out of a finally-protected try, created step (BBJ_ALWAYS) block %s\n", step->dspToString()); } #endif unsigned finallyNesting = compHndBBtab[XTnum].ebdHandlerNestingLevel; assert(finallyNesting <= compHndBBtabCount); callBlock->bbJumpDest = HBtab->ebdHndBeg; // This callBlock will call the "finally" handler. GenTree* endLFin = new (this, GT_END_LFIN) GenTreeVal(GT_END_LFIN, TYP_VOID, finallyNesting); endLFinStmt = gtNewStmt(endLFin); endCatches = NULL; encFinallies++; invalidatePreds = true; } } /* Append any remaining endCatches, if any */ assert(!encFinallies == !endLFinStmt); if (encFinallies == 0) { assert(step == DUMMY_INIT(NULL)); block->bbJumpKind = BBJ_ALWAYS; // convert the BBJ_LEAVE to a BBJ_ALWAYS if (endCatches) impAppendTree(endCatches, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); #ifdef DEBUG if (verbose) { printf("impImportLeave - no enclosing finally-protected try blocks; convert CEE_LEAVE block to BBJ_ALWAYS " "block %s\n", block->dspToString()); } #endif } else { // If leaveTarget is the start of another try block, we want to make sure that // we do not insert finalStep into that try block. Hence, we find the enclosing // try block. unsigned tryIndex = bbFindInnermostCommonTryRegion(step, leaveTarget); // Insert a new BB either in the try region indicated by tryIndex or // the handler region indicated by leaveTarget->bbHndIndex, // depending on which is the inner region. BasicBlock* finalStep = fgNewBBinRegion(BBJ_ALWAYS, tryIndex, leaveTarget->bbHndIndex, step); finalStep->bbFlags |= BBF_KEEP_BBJ_ALWAYS; step->bbJumpDest = finalStep; /* The new block will inherit this block's weight */ finalStep->inheritWeight(block); #ifdef DEBUG if (verbose) { printf("impImportLeave - finalStep block required (encFinallies(%d) > 0), new block %s\n", encFinallies, finalStep->dspToString()); } #endif Statement* lastStmt; if (endCatches) { lastStmt = gtNewStmt(endCatches); endLFinStmt->SetNextStmt(lastStmt); lastStmt->SetPrevStmt(endLFinStmt); } else { lastStmt = endLFinStmt; } impEndTreeList(finalStep, endLFinStmt, lastStmt); finalStep->bbJumpDest = leaveTarget; // this is the ultimate destination of the LEAVE // Queue up the jump target for importing impImportBlockPending(leaveTarget); invalidatePreds = true; } if (invalidatePreds && fgComputePredsDone) { JITDUMP("\n**** impImportLeave - Removing preds after creating new blocks\n"); fgRemovePreds(); } #ifdef DEBUG fgVerifyHandlerTab(); if (verbose) { printf("\nAfter import CEE_LEAVE:\n"); fgDispBasicBlocks(); fgDispHandlerTab(); } #endif // DEBUG } #else // FEATURE_EH_FUNCLETS void Compiler::impImportLeave(BasicBlock* block) { #ifdef DEBUG if (verbose) { printf("\nBefore import CEE_LEAVE in " FMT_BB " (targetting " FMT_BB "):\n", block->bbNum, block->bbJumpDest->bbNum); fgDispBasicBlocks(); fgDispHandlerTab(); } #endif // DEBUG bool invalidatePreds = false; // If we create new blocks, invalidate the predecessor lists (if created) unsigned blkAddr = block->bbCodeOffs; BasicBlock* leaveTarget = block->bbJumpDest; unsigned jmpAddr = leaveTarget->bbCodeOffs; // LEAVE clears the stack, spill side effects, and set stack to 0 impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportLeave")); verCurrentState.esStackDepth = 0; assert(block->bbJumpKind == BBJ_LEAVE); assert(fgBBs == (BasicBlock**)0xCDCD || fgLookupBB(jmpAddr) != nullptr); // should be a BB boundary BasicBlock* step = nullptr; enum StepType { // No step type; step == NULL. ST_None, // Is the step block the BBJ_ALWAYS block of a BBJ_CALLFINALLY/BBJ_ALWAYS pair? // That is, is step->bbJumpDest where a finally will return to? ST_FinallyReturn, // The step block is a catch return. ST_Catch, // The step block is in a "try", created as the target for a finally return or the target for a catch return. ST_Try }; StepType stepType = ST_None; unsigned XTnum; EHblkDsc* HBtab; for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++) { // Grab the handler offsets IL_OFFSET tryBeg = HBtab->ebdTryBegOffs(); IL_OFFSET tryEnd = HBtab->ebdTryEndOffs(); IL_OFFSET hndBeg = HBtab->ebdHndBegOffs(); IL_OFFSET hndEnd = HBtab->ebdHndEndOffs(); /* Is this a catch-handler we are CEE_LEAVEing out of? */ if (jitIsBetween(blkAddr, hndBeg, hndEnd) && !jitIsBetween(jmpAddr, hndBeg, hndEnd)) { // Can't CEE_LEAVE out of a finally/fault handler if (HBtab->HasFinallyOrFaultHandler()) { BADCODE("leave out of fault/finally block"); } /* We are jumping out of a catch */ if (step == nullptr) { step = block; step->bbJumpKind = BBJ_EHCATCHRET; // convert the BBJ_LEAVE to BBJ_EHCATCHRET stepType = ST_Catch; #ifdef DEBUG if (verbose) { printf("impImportLeave - jumping out of a catch (EH#%u), convert block " FMT_BB " to BBJ_EHCATCHRET " "block\n", XTnum, step->bbNum); } #endif } else { BasicBlock* exitBlock; /* Create a new catch exit block in the catch region for the existing step block to jump to in this * scope */ exitBlock = fgNewBBinRegion(BBJ_EHCATCHRET, 0, XTnum + 1, step); assert(step->KindIs(BBJ_ALWAYS, BBJ_EHCATCHRET)); step->bbJumpDest = exitBlock; // the previous step (maybe a call to a nested finally, or a nested catch // exit) returns to this block step->bbJumpDest->bbRefs++; #if defined(TARGET_ARM) if (stepType == ST_FinallyReturn) { assert(step->bbJumpKind == BBJ_ALWAYS); // Mark the target of a finally return step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET; } #endif // defined(TARGET_ARM) /* The new block will inherit this block's weight */ exitBlock->inheritWeight(block); exitBlock->bbFlags |= BBF_IMPORTED; /* This exit block is the new step */ step = exitBlock; stepType = ST_Catch; invalidatePreds = true; #ifdef DEBUG if (verbose) { printf("impImportLeave - jumping out of a catch (EH#%u), new BBJ_EHCATCHRET block " FMT_BB "\n", XTnum, exitBlock->bbNum); } #endif } } else if (HBtab->HasFinallyHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) && !jitIsBetween(jmpAddr, tryBeg, tryEnd)) { /* We are jumping out of a finally-protected try */ BasicBlock* callBlock; if (step == nullptr) { #if FEATURE_EH_CALLFINALLY_THUNKS // Put the call to the finally in the enclosing region. unsigned callFinallyTryIndex = (HBtab->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingTryIndex + 1; unsigned callFinallyHndIndex = (HBtab->ebdEnclosingHndIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingHndIndex + 1; callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, callFinallyTryIndex, callFinallyHndIndex, block); // Convert the BBJ_LEAVE to BBJ_ALWAYS, jumping to the new BBJ_CALLFINALLY. This is because // the new BBJ_CALLFINALLY is in a different EH region, thus it can't just replace the BBJ_LEAVE, // which might be in the middle of the "try". In most cases, the BBJ_ALWAYS will jump to the // next block, and flow optimizations will remove it. block->bbJumpKind = BBJ_ALWAYS; block->bbJumpDest = callBlock; block->bbJumpDest->bbRefs++; /* The new block will inherit this block's weight */ callBlock->inheritWeight(block); callBlock->bbFlags |= BBF_IMPORTED; #ifdef DEBUG if (verbose) { printf("impImportLeave - jumping out of a finally-protected try (EH#%u), convert block " FMT_BB " to " "BBJ_ALWAYS, add BBJ_CALLFINALLY block " FMT_BB "\n", XTnum, block->bbNum, callBlock->bbNum); } #endif #else // !FEATURE_EH_CALLFINALLY_THUNKS callBlock = block; callBlock->bbJumpKind = BBJ_CALLFINALLY; // convert the BBJ_LEAVE to BBJ_CALLFINALLY #ifdef DEBUG if (verbose) { printf("impImportLeave - jumping out of a finally-protected try (EH#%u), convert block " FMT_BB " to " "BBJ_CALLFINALLY block\n", XTnum, callBlock->bbNum); } #endif #endif // !FEATURE_EH_CALLFINALLY_THUNKS } else { // Calling the finally block. We already have a step block that is either the call-to-finally from a // more nested try/finally (thus we are jumping out of multiple nested 'try' blocks, each protected by // a 'finally'), or the step block is the return from a catch. // // Due to ThreadAbortException, we can't have the catch return target the call-to-finally block // directly. Note that if a 'catch' ends without resetting the ThreadAbortException, the VM will // automatically re-raise the exception, using the return address of the catch (that is, the target // block of the BBJ_EHCATCHRET) as the re-raise address. If this address is in a finally, the VM will // refuse to do the re-raise, and the ThreadAbortException will get eaten (and lost). On AMD64/ARM64, // we put the call-to-finally thunk in a special "cloned finally" EH region that does look like a // finally clause to the VM. Thus, on these platforms, we can't have BBJ_EHCATCHRET target a // BBJ_CALLFINALLY directly. (Note that on ARM32, we don't mark the thunk specially -- it lives directly // within the 'try' region protected by the finally, since we generate code in such a way that execution // never returns to the call-to-finally call, and the finally-protected 'try' region doesn't appear on // stack walks.) assert(step->KindIs(BBJ_ALWAYS, BBJ_EHCATCHRET)); #if FEATURE_EH_CALLFINALLY_THUNKS if (step->bbJumpKind == BBJ_EHCATCHRET) { // Need to create another step block in the 'try' region that will actually branch to the // call-to-finally thunk. BasicBlock* step2 = fgNewBBinRegion(BBJ_ALWAYS, XTnum + 1, 0, step); step->bbJumpDest = step2; step->bbJumpDest->bbRefs++; step2->inheritWeight(block); step2->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED; #ifdef DEBUG if (verbose) { printf("impImportLeave - jumping out of a finally-protected try (EH#%u), step block is " "BBJ_EHCATCHRET (" FMT_BB "), new BBJ_ALWAYS step-step block " FMT_BB "\n", XTnum, step->bbNum, step2->bbNum); } #endif step = step2; assert(stepType == ST_Catch); // Leave it as catch type for now. } #endif // FEATURE_EH_CALLFINALLY_THUNKS #if FEATURE_EH_CALLFINALLY_THUNKS unsigned callFinallyTryIndex = (HBtab->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingTryIndex + 1; unsigned callFinallyHndIndex = (HBtab->ebdEnclosingHndIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingHndIndex + 1; #else // !FEATURE_EH_CALLFINALLY_THUNKS unsigned callFinallyTryIndex = XTnum + 1; unsigned callFinallyHndIndex = 0; // don't care #endif // !FEATURE_EH_CALLFINALLY_THUNKS callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, callFinallyTryIndex, callFinallyHndIndex, step); step->bbJumpDest = callBlock; // the previous call to a finally returns to this call (to the next // finally in the chain) step->bbJumpDest->bbRefs++; #if defined(TARGET_ARM) if (stepType == ST_FinallyReturn) { assert(step->bbJumpKind == BBJ_ALWAYS); // Mark the target of a finally return step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET; } #endif // defined(TARGET_ARM) /* The new block will inherit this block's weight */ callBlock->inheritWeight(block); callBlock->bbFlags |= BBF_IMPORTED; #ifdef DEBUG if (verbose) { printf("impImportLeave - jumping out of a finally-protected try (EH#%u), new BBJ_CALLFINALLY " "block " FMT_BB "\n", XTnum, callBlock->bbNum); } #endif } step = fgNewBBafter(BBJ_ALWAYS, callBlock, true); stepType = ST_FinallyReturn; /* The new block will inherit this block's weight */ step->inheritWeight(block); step->bbFlags |= BBF_IMPORTED | BBF_KEEP_BBJ_ALWAYS; #ifdef DEBUG if (verbose) { printf("impImportLeave - jumping out of a finally-protected try (EH#%u), created step (BBJ_ALWAYS) " "block " FMT_BB "\n", XTnum, step->bbNum); } #endif callBlock->bbJumpDest = HBtab->ebdHndBeg; // This callBlock will call the "finally" handler. invalidatePreds = true; } else if (HBtab->HasCatchHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) && !jitIsBetween(jmpAddr, tryBeg, tryEnd)) { // We are jumping out of a catch-protected try. // // If we are returning from a call to a finally, then we must have a step block within a try // that is protected by a catch. This is so when unwinding from that finally (e.g., if code within the // finally raises an exception), the VM will find this step block, notice that it is in a protected region, // and invoke the appropriate catch. // // We also need to handle a special case with the handling of ThreadAbortException. If a try/catch // catches a ThreadAbortException (which might be because it catches a parent, e.g. System.Exception), // and the catch doesn't call System.Threading.Thread::ResetAbort(), then when the catch returns to the VM, // the VM will automatically re-raise the ThreadAbortException. When it does this, it uses the target // address of the catch return as the new exception address. That is, the re-raised exception appears to // occur at the catch return address. If this exception return address skips an enclosing try/catch that // catches ThreadAbortException, then the enclosing try/catch will not catch the exception, as it should. // For example: // // try { // try { // // something here raises ThreadAbortException // LEAVE LABEL_1; // no need to stop at LABEL_2 // } catch (Exception) { // // This catches ThreadAbortException, but doesn't call System.Threading.Thread::ResetAbort(), so // // ThreadAbortException is re-raised by the VM at the address specified by the LEAVE opcode. // // This is bad, since it means the outer try/catch won't get a chance to catch the re-raised // // ThreadAbortException. So, instead, create step block LABEL_2 and LEAVE to that. We only // // need to do this transformation if the current EH block is a try/catch that catches // // ThreadAbortException (or one of its parents), however we might not be able to find that // // information, so currently we do it for all catch types. // LEAVE LABEL_1; // Convert this to LEAVE LABEL2; // } // LABEL_2: LEAVE LABEL_1; // inserted by this step creation code // } catch (ThreadAbortException) { // } // LABEL_1: // // Note that this pattern isn't theoretical: it occurs in ASP.NET, in IL code generated by the Roslyn C# // compiler. if ((stepType == ST_FinallyReturn) || (stepType == ST_Catch)) { BasicBlock* catchStep; assert(step); if (stepType == ST_FinallyReturn) { assert(step->bbJumpKind == BBJ_ALWAYS); } else { assert(stepType == ST_Catch); assert(step->bbJumpKind == BBJ_EHCATCHRET); } /* Create a new exit block in the try region for the existing step block to jump to in this scope */ catchStep = fgNewBBinRegion(BBJ_ALWAYS, XTnum + 1, 0, step); step->bbJumpDest = catchStep; step->bbJumpDest->bbRefs++; #if defined(TARGET_ARM) if (stepType == ST_FinallyReturn) { // Mark the target of a finally return step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET; } #endif // defined(TARGET_ARM) /* The new block will inherit this block's weight */ catchStep->inheritWeight(block); catchStep->bbFlags |= BBF_IMPORTED; #ifdef DEBUG if (verbose) { if (stepType == ST_FinallyReturn) { printf("impImportLeave - return from finally jumping out of a catch-protected try (EH#%u), new " "BBJ_ALWAYS block " FMT_BB "\n", XTnum, catchStep->bbNum); } else { assert(stepType == ST_Catch); printf("impImportLeave - return from catch jumping out of a catch-protected try (EH#%u), new " "BBJ_ALWAYS block " FMT_BB "\n", XTnum, catchStep->bbNum); } } #endif // DEBUG /* This block is the new step */ step = catchStep; stepType = ST_Try; invalidatePreds = true; } } } if (step == nullptr) { block->bbJumpKind = BBJ_ALWAYS; // convert the BBJ_LEAVE to a BBJ_ALWAYS #ifdef DEBUG if (verbose) { printf("impImportLeave - no enclosing finally-protected try blocks or catch handlers; convert CEE_LEAVE " "block " FMT_BB " to BBJ_ALWAYS\n", block->bbNum); } #endif } else { step->bbJumpDest = leaveTarget; // this is the ultimate destination of the LEAVE #if defined(TARGET_ARM) if (stepType == ST_FinallyReturn) { assert(step->bbJumpKind == BBJ_ALWAYS); // Mark the target of a finally return step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET; } #endif // defined(TARGET_ARM) #ifdef DEBUG if (verbose) { printf("impImportLeave - final destination of step blocks set to " FMT_BB "\n", leaveTarget->bbNum); } #endif // Queue up the jump target for importing impImportBlockPending(leaveTarget); } if (invalidatePreds && fgComputePredsDone) { JITDUMP("\n**** impImportLeave - Removing preds after creating new blocks\n"); fgRemovePreds(); } #ifdef DEBUG fgVerifyHandlerTab(); if (verbose) { printf("\nAfter import CEE_LEAVE:\n"); fgDispBasicBlocks(); fgDispHandlerTab(); } #endif // DEBUG } #endif // FEATURE_EH_FUNCLETS /*****************************************************************************/ // This is called when reimporting a leave block. It resets the JumpKind, // JumpDest, and bbNext to the original values void Compiler::impResetLeaveBlock(BasicBlock* block, unsigned jmpAddr) { #if defined(FEATURE_EH_FUNCLETS) // With EH Funclets, while importing leave opcode we create another block ending with BBJ_ALWAYS (call it B1) // and the block containing leave (say B0) is marked as BBJ_CALLFINALLY. Say for some reason we reimport B0, // it is reset (in this routine) by marking as ending with BBJ_LEAVE and further down when B0 is reimported, we // create another BBJ_ALWAYS (call it B2). In this process B1 gets orphaned and any blocks to which B1 is the // only predecessor are also considered orphans and attempted to be deleted. // // try { // .... // try // { // .... // leave OUTSIDE; // B0 is the block containing this leave, following this would be B1 // } finally { } // } finally { } // OUTSIDE: // // In the above nested try-finally example, we create a step block (call it Bstep) which in branches to a block // where a finally would branch to (and such block is marked as finally target). Block B1 branches to step block. // Because of re-import of B0, Bstep is also orphaned. Since Bstep is a finally target it cannot be removed. To // work around this we will duplicate B0 (call it B0Dup) before reseting. B0Dup is marked as BBJ_CALLFINALLY and // only serves to pair up with B1 (BBJ_ALWAYS) that got orphaned. Now during orphan block deletion B0Dup and B1 // will be treated as pair and handled correctly. if (block->bbJumpKind == BBJ_CALLFINALLY) { BasicBlock* dupBlock = bbNewBasicBlock(block->bbJumpKind); dupBlock->bbFlags = block->bbFlags; dupBlock->bbJumpDest = block->bbJumpDest; dupBlock->copyEHRegion(block); dupBlock->bbCatchTyp = block->bbCatchTyp; // Mark this block as // a) not referenced by any other block to make sure that it gets deleted // b) weight zero // c) prevent from being imported // d) as internal // e) as rarely run dupBlock->bbRefs = 0; dupBlock->bbWeight = BB_ZERO_WEIGHT; dupBlock->bbFlags |= BBF_IMPORTED | BBF_INTERNAL | BBF_RUN_RARELY; // Insert the block right after the block which is getting reset so that BBJ_CALLFINALLY and BBJ_ALWAYS // will be next to each other. fgInsertBBafter(block, dupBlock); #ifdef DEBUG if (verbose) { printf("New Basic Block " FMT_BB " duplicate of " FMT_BB " created.\n", dupBlock->bbNum, block->bbNum); } #endif } #endif // FEATURE_EH_FUNCLETS block->bbJumpKind = BBJ_LEAVE; fgInitBBLookup(); block->bbJumpDest = fgLookupBB(jmpAddr); // We will leave the BBJ_ALWAYS block we introduced. When it's reimported // the BBJ_ALWAYS block will be unreachable, and will be removed after. The // reason we don't want to remove the block at this point is that if we call // fgInitBBLookup() again we will do it wrong as the BBJ_ALWAYS block won't be // added and the linked list length will be different than fgBBcount. } /*****************************************************************************/ // Get the first non-prefix opcode. Used for verification of valid combinations // of prefixes and actual opcodes. OPCODE Compiler::impGetNonPrefixOpcode(const BYTE* codeAddr, const BYTE* codeEndp) { while (codeAddr < codeEndp) { OPCODE opcode = (OPCODE)getU1LittleEndian(codeAddr); codeAddr += sizeof(__int8); if (opcode == CEE_PREFIX1) { if (codeAddr >= codeEndp) { break; } opcode = (OPCODE)(getU1LittleEndian(codeAddr) + 256); codeAddr += sizeof(__int8); } switch (opcode) { case CEE_UNALIGNED: case CEE_VOLATILE: case CEE_TAILCALL: case CEE_CONSTRAINED: case CEE_READONLY: break; default: return opcode; } codeAddr += opcodeSizes[opcode]; } return CEE_ILLEGAL; } /*****************************************************************************/ // Checks whether the opcode is a valid opcode for volatile. and unaligned. prefixes void Compiler::impValidateMemoryAccessOpcode(const BYTE* codeAddr, const BYTE* codeEndp, bool volatilePrefix) { OPCODE opcode = impGetNonPrefixOpcode(codeAddr, codeEndp); if (!( // Opcode of all ldind and stdind happen to be in continuous, except stind.i. ((CEE_LDIND_I1 <= opcode) && (opcode <= CEE_STIND_R8)) || (opcode == CEE_STIND_I) || (opcode == CEE_LDFLD) || (opcode == CEE_STFLD) || (opcode == CEE_LDOBJ) || (opcode == CEE_STOBJ) || (opcode == CEE_INITBLK) || (opcode == CEE_CPBLK) || // volatile. prefix is allowed with the ldsfld and stsfld (volatilePrefix && ((opcode == CEE_LDSFLD) || (opcode == CEE_STSFLD))))) { BADCODE("Invalid opcode for unaligned. or volatile. prefix"); } } /*****************************************************************************/ #ifdef DEBUG #undef RETURN // undef contracts RETURN macro enum controlFlow_t { NEXT, CALL, RETURN, THROW, BRANCH, COND_BRANCH, BREAK, PHI, META, }; const static controlFlow_t controlFlow[] = { #define OPDEF(c, s, pop, push, args, type, l, s1, s2, flow) flow, #include "opcode.def" #undef OPDEF }; #endif // DEBUG /***************************************************************************** * Determine the result type of an arithemetic operation * On 64-bit inserts upcasts when native int is mixed with int32 */ var_types Compiler::impGetByRefResultType(genTreeOps oper, bool fUnsigned, GenTree** pOp1, GenTree** pOp2) { var_types type = TYP_UNDEF; GenTree* op1 = *pOp1; GenTree* op2 = *pOp2; // Arithemetic operations are generally only allowed with // primitive types, but certain operations are allowed // with byrefs if ((oper == GT_SUB) && (genActualType(op1->TypeGet()) == TYP_BYREF || genActualType(op2->TypeGet()) == TYP_BYREF)) { if ((genActualType(op1->TypeGet()) == TYP_BYREF) && (genActualType(op2->TypeGet()) == TYP_BYREF)) { // byref1-byref2 => gives a native int type = TYP_I_IMPL; } else if (genActualTypeIsIntOrI(op1->TypeGet()) && (genActualType(op2->TypeGet()) == TYP_BYREF)) { // [native] int - byref => gives a native int // // The reason is that it is possible, in managed C++, // to have a tree like this: // // - // / \. // / \. // / \. // / \. // const(h) int addr byref // // <BUGNUM> VSW 318822 </BUGNUM> // // So here we decide to make the resulting type to be a native int. CLANG_FORMAT_COMMENT_ANCHOR; #ifdef TARGET_64BIT if (genActualType(op1->TypeGet()) != TYP_I_IMPL) { // insert an explicit upcast op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL); } #endif // TARGET_64BIT type = TYP_I_IMPL; } else { // byref - [native] int => gives a byref assert(genActualType(op1->TypeGet()) == TYP_BYREF && genActualTypeIsIntOrI(op2->TypeGet())); #ifdef TARGET_64BIT if ((genActualType(op2->TypeGet()) != TYP_I_IMPL)) { // insert an explicit upcast op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL); } #endif // TARGET_64BIT type = TYP_BYREF; } } else if ((oper == GT_ADD) && (genActualType(op1->TypeGet()) == TYP_BYREF || genActualType(op2->TypeGet()) == TYP_BYREF)) { // byref + [native] int => gives a byref // (or) // [native] int + byref => gives a byref // only one can be a byref : byref op byref not allowed assert(genActualType(op1->TypeGet()) != TYP_BYREF || genActualType(op2->TypeGet()) != TYP_BYREF); assert(genActualTypeIsIntOrI(op1->TypeGet()) || genActualTypeIsIntOrI(op2->TypeGet())); #ifdef TARGET_64BIT if (genActualType(op2->TypeGet()) == TYP_BYREF) { if (genActualType(op1->TypeGet()) != TYP_I_IMPL) { // insert an explicit upcast op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL); } } else if (genActualType(op2->TypeGet()) != TYP_I_IMPL) { // insert an explicit upcast op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL); } #endif // TARGET_64BIT type = TYP_BYREF; } #ifdef TARGET_64BIT else if (genActualType(op1->TypeGet()) == TYP_I_IMPL || genActualType(op2->TypeGet()) == TYP_I_IMPL) { assert(!varTypeIsFloating(op1->gtType) && !varTypeIsFloating(op2->gtType)); // int + long => gives long // long + int => gives long // we get this because in the IL the long isn't Int64, it's just IntPtr if (genActualType(op1->TypeGet()) != TYP_I_IMPL) { // insert an explicit upcast op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL); } else if (genActualType(op2->TypeGet()) != TYP_I_IMPL) { // insert an explicit upcast op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL); } type = TYP_I_IMPL; } #else // 32-bit TARGET else if (genActualType(op1->TypeGet()) == TYP_LONG || genActualType(op2->TypeGet()) == TYP_LONG) { assert(!varTypeIsFloating(op1->gtType) && !varTypeIsFloating(op2->gtType)); // int + long => gives long // long + int => gives long type = TYP_LONG; } #endif // TARGET_64BIT else { // int + int => gives an int assert(genActualType(op1->TypeGet()) != TYP_BYREF && genActualType(op2->TypeGet()) != TYP_BYREF); assert(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) || (varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType))); type = genActualType(op1->gtType); // If both operands are TYP_FLOAT, then leave it as TYP_FLOAT. // Otherwise, turn floats into doubles if ((type == TYP_FLOAT) && (genActualType(op2->gtType) != TYP_FLOAT)) { assert(genActualType(op2->gtType) == TYP_DOUBLE); type = TYP_DOUBLE; } } assert(type == TYP_BYREF || type == TYP_DOUBLE || type == TYP_FLOAT || type == TYP_LONG || type == TYP_INT); return type; } //------------------------------------------------------------------------ // impOptimizeCastClassOrIsInst: attempt to resolve a cast when jitting // // Arguments: // op1 - value to cast // pResolvedToken - resolved token for type to cast to // isCastClass - true if this is a castclass, false if isinst // // Return Value: // tree representing optimized cast, or null if no optimization possible GenTree* Compiler::impOptimizeCastClassOrIsInst(GenTree* op1, CORINFO_RESOLVED_TOKEN* pResolvedToken, bool isCastClass) { assert(op1->TypeGet() == TYP_REF); // Don't optimize for minopts or debug codegen. if (opts.OptimizationDisabled()) { return nullptr; } // See what we know about the type of the object being cast. bool isExact = false; bool isNonNull = false; CORINFO_CLASS_HANDLE fromClass = gtGetClassHandle(op1, &isExact, &isNonNull); if (fromClass != nullptr) { CORINFO_CLASS_HANDLE toClass = pResolvedToken->hClass; JITDUMP("\nConsidering optimization of %s from %s%p (%s) to %p (%s)\n", isCastClass ? "castclass" : "isinst", isExact ? "exact " : "", dspPtr(fromClass), info.compCompHnd->getClassName(fromClass), dspPtr(toClass), info.compCompHnd->getClassName(toClass)); // Perhaps we know if the cast will succeed or fail. TypeCompareState castResult = info.compCompHnd->compareTypesForCast(fromClass, toClass); if (castResult == TypeCompareState::Must) { // Cast will succeed, result is simply op1. JITDUMP("Cast will succeed, optimizing to simply return input\n"); return op1; } else if (castResult == TypeCompareState::MustNot) { // See if we can sharpen exactness by looking for final classes if (!isExact) { isExact = impIsClassExact(fromClass); } // Cast to exact type will fail. Handle case where we have // an exact type (that is, fromClass is not a subtype) // and we're not going to throw on failure. if (isExact && !isCastClass) { JITDUMP("Cast will fail, optimizing to return null\n"); GenTree* result = gtNewIconNode(0, TYP_REF); // If the cast was fed by a box, we can remove that too. if (op1->IsBoxedValue()) { JITDUMP("Also removing upstream box\n"); gtTryRemoveBoxUpstreamEffects(op1); } return result; } else if (isExact) { JITDUMP("Not optimizing failing castclass (yet)\n"); } else { JITDUMP("Can't optimize since fromClass is inexact\n"); } } else { JITDUMP("Result of cast unknown, must generate runtime test\n"); } } else { JITDUMP("\nCan't optimize since fromClass is unknown\n"); } return nullptr; } //------------------------------------------------------------------------ // impCastClassOrIsInstToTree: build and import castclass/isinst // // Arguments: // op1 - value to cast // op2 - type handle for type to cast to // pResolvedToken - resolved token from the cast operation // isCastClass - true if this is castclass, false means isinst // // Return Value: // Tree representing the cast // // Notes: // May expand into a series of runtime checks or a helper call. GenTree* Compiler::impCastClassOrIsInstToTree( GenTree* op1, GenTree* op2, CORINFO_RESOLVED_TOKEN* pResolvedToken, bool isCastClass, IL_OFFSET ilOffset) { assert(op1->TypeGet() == TYP_REF); // Optimistically assume the jit should expand this as an inline test bool shouldExpandInline = true; // Profitability check. // // Don't bother with inline expansion when jit is trying to // generate code quickly, or the cast is in code that won't run very // often, or the method already is pretty big. if (compCurBB->isRunRarely() || opts.OptimizationDisabled()) { // not worth the code expansion if jitting fast or in a rarely run block shouldExpandInline = false; } else if ((op1->gtFlags & GTF_GLOB_EFFECT) && lvaHaveManyLocals()) { // not worth creating an untracked local variable shouldExpandInline = false; } else if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_BBINSTR) && (JitConfig.JitCastProfiling() == 1)) { // Optimizations are enabled but we're still instrumenting (including casts) if (isCastClass && !impIsClassExact(pResolvedToken->hClass)) { // Usually, we make a speculative assumption that it makes sense to expand castclass // even for non-sealed classes, but let's rely on PGO in this specific case shouldExpandInline = false; } } // Pessimistically assume the jit cannot expand this as an inline test bool canExpandInline = false; const CorInfoHelpFunc helper = info.compCompHnd->getCastingHelper(pResolvedToken, isCastClass); // Legality check. // // Not all classclass/isinst operations can be inline expanded. // Check legality only if an inline expansion is desirable. if (shouldExpandInline) { if (isCastClass) { // Jit can only inline expand the normal CHKCASTCLASS helper. canExpandInline = (helper == CORINFO_HELP_CHKCASTCLASS); } else { if (helper == CORINFO_HELP_ISINSTANCEOFCLASS) { // If the class is exact, the jit can expand the IsInst check inline. canExpandInline = impIsClassExact(pResolvedToken->hClass); } } } const bool expandInline = canExpandInline && shouldExpandInline; if (!expandInline) { JITDUMP("\nExpanding %s as call because %s\n", isCastClass ? "castclass" : "isinst", canExpandInline ? "want smaller code or faster jitting" : "inline expansion not legal"); // If we CSE this class handle we prevent assertionProp from making SubType assertions // so instead we force the CSE logic to not consider CSE-ing this class handle. // op2->gtFlags |= GTF_DONT_CSE; GenTreeCall* call = gtNewHelperCallNode(helper, TYP_REF, gtNewCallArgs(op2, op1)); if (impIsCastHelperEligibleForClassProbe(call) && !impIsClassExact(pResolvedToken->hClass)) { ClassProfileCandidateInfo* pInfo = new (this, CMK_Inlining) ClassProfileCandidateInfo; pInfo->ilOffset = ilOffset; pInfo->probeIndex = info.compClassProbeCount++; call->gtClassProfileCandidateInfo = pInfo; compCurBB->bbFlags |= BBF_HAS_CLASS_PROFILE; } return call; } JITDUMP("\nExpanding %s inline\n", isCastClass ? "castclass" : "isinst"); impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark2")); GenTree* temp; GenTree* condMT; // // expand the methodtable match: // // condMT ==> GT_NE // / \. // GT_IND op2 (typically CNS_INT) // | // op1Copy // // This can replace op1 with a GT_COMMA that evaluates op1 into a local // op1 = impCloneExpr(op1, &temp, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("CASTCLASS eval op1")); // // op1 is now known to be a non-complex tree // thus we can use gtClone(op1) from now on // GenTree* op2Var = op2; if (isCastClass) { op2Var = fgInsertCommaFormTemp(&op2); lvaTable[op2Var->AsLclVarCommon()->GetLclNum()].lvIsCSE = true; } temp = gtNewMethodTableLookup(temp); condMT = gtNewOperNode(GT_NE, TYP_INT, temp, op2); GenTree* condNull; // // expand the null check: // // condNull ==> GT_EQ // / \. // op1Copy CNS_INT // null // condNull = gtNewOperNode(GT_EQ, TYP_INT, gtClone(op1), gtNewIconNode(0, TYP_REF)); // // expand the true and false trees for the condMT // GenTree* condFalse = gtClone(op1); GenTree* condTrue; if (isCastClass) { // // use the special helper that skips the cases checked by our inlined cast // const CorInfoHelpFunc specialHelper = CORINFO_HELP_CHKCASTCLASS_SPECIAL; condTrue = gtNewHelperCallNode(specialHelper, TYP_REF, gtNewCallArgs(op2Var, gtClone(op1))); } else { condTrue = gtNewIconNode(0, TYP_REF); } GenTree* qmarkMT; // // Generate first QMARK - COLON tree // // qmarkMT ==> GT_QMARK // / \. // condMT GT_COLON // / \. // condFalse condTrue // temp = new (this, GT_COLON) GenTreeColon(TYP_REF, condTrue, condFalse); qmarkMT = gtNewQmarkNode(TYP_REF, condMT, temp->AsColon()); if (isCastClass && impIsClassExact(pResolvedToken->hClass) && condTrue->OperIs(GT_CALL)) { // condTrue is used only for throwing InvalidCastException in case of casting to an exact class. condTrue->AsCall()->gtCallMoreFlags |= GTF_CALL_M_DOES_NOT_RETURN; } GenTree* qmarkNull; // // Generate second QMARK - COLON tree // // qmarkNull ==> GT_QMARK // / \. // condNull GT_COLON // / \. // qmarkMT op1Copy // temp = new (this, GT_COLON) GenTreeColon(TYP_REF, gtClone(op1), qmarkMT); qmarkNull = gtNewQmarkNode(TYP_REF, condNull, temp->AsColon()); qmarkNull->gtFlags |= GTF_QMARK_CAST_INSTOF; // Make QMark node a top level node by spilling it. unsigned tmp = lvaGrabTemp(true DEBUGARG("spilling QMark2")); impAssignTempGen(tmp, qmarkNull, (unsigned)CHECK_SPILL_NONE); // TODO-CQ: Is it possible op1 has a better type? // // See also gtGetHelperCallClassHandle where we make the same // determination for the helper call variants. LclVarDsc* lclDsc = lvaGetDesc(tmp); assert(lclDsc->lvSingleDef == 0); lclDsc->lvSingleDef = 1; JITDUMP("Marked V%02u as a single def temp\n", tmp); lvaSetClass(tmp, pResolvedToken->hClass); return gtNewLclvNode(tmp, TYP_REF); } #ifndef DEBUG #define assertImp(cond) ((void)0) #else #define assertImp(cond) \ do \ { \ if (!(cond)) \ { \ const int cchAssertImpBuf = 600; \ char* assertImpBuf = (char*)_alloca(cchAssertImpBuf); \ _snprintf_s(assertImpBuf, cchAssertImpBuf, cchAssertImpBuf - 1, \ "%s : Possibly bad IL with CEE_%s at offset %04Xh (op1=%s op2=%s stkDepth=%d)", #cond, \ impCurOpcName, impCurOpcOffs, op1 ? varTypeName(op1->TypeGet()) : "NULL", \ op2 ? varTypeName(op2->TypeGet()) : "NULL", verCurrentState.esStackDepth); \ assertAbort(assertImpBuf, __FILE__, __LINE__); \ } \ } while (0) #endif // DEBUG //------------------------------------------------------------------------ // impBlockIsInALoop: check if a block might be in a loop // // Arguments: // block - block to check // // Returns: // true if the block might be in a loop. // // Notes: // Conservatively correct; may return true for some blocks that are // not actually in loops. // bool Compiler::impBlockIsInALoop(BasicBlock* block) { return (compIsForInlining() && ((impInlineInfo->iciBlock->bbFlags & BBF_BACKWARD_JUMP) != 0)) || ((block->bbFlags & BBF_BACKWARD_JUMP) != 0); } #ifdef _PREFAST_ #pragma warning(push) #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function #endif /***************************************************************************** * Import the instr for the given basic block */ void Compiler::impImportBlockCode(BasicBlock* block) { #define _impResolveToken(kind) impResolveToken(codeAddr, &resolvedToken, kind) #ifdef DEBUG if (verbose) { printf("\nImporting " FMT_BB " (PC=%03u) of '%s'", block->bbNum, block->bbCodeOffs, info.compFullName); } #endif unsigned nxtStmtIndex = impInitBlockLineInfo(); IL_OFFSET nxtStmtOffs; CorInfoHelpFunc helper; CorInfoIsAccessAllowedResult accessAllowedResult; CORINFO_HELPER_DESC calloutHelper; const BYTE* lastLoadToken = nullptr; /* Get the tree list started */ impBeginTreeList(); #ifdef FEATURE_ON_STACK_REPLACEMENT bool enablePatchpoints = opts.jitFlags->IsSet(JitFlags::JIT_FLAG_TIER0) && (JitConfig.TC_OnStackReplacement() > 0); #ifdef DEBUG // Optionally suppress patchpoints by method hash // static ConfigMethodRange JitEnablePatchpointRange; JitEnablePatchpointRange.EnsureInit(JitConfig.JitEnablePatchpointRange()); const unsigned hash = impInlineRoot()->info.compMethodHash(); const bool inRange = JitEnablePatchpointRange.Contains(hash); enablePatchpoints &= inRange; #endif // DEBUG if (enablePatchpoints) { // We don't inline at Tier0, if we do, we may need rethink our approach. // Could probably support inlines that don't introduce flow. // assert(!compIsForInlining()); // OSR is not yet supported for methods with explicit tail calls. // // But we also do not have to switch these methods to be optimized, as we should be // able to avoid getting trapped in Tier0 code by normal call counting. // So instead, just suppress adding patchpoints. // if (!compTailPrefixSeen) { // We only need to add patchpoints if the method can loop. // if (compHasBackwardJump) { assert(compCanHavePatchpoints()); // By default we use the "adaptive" strategy. // // This can create both source and target patchpoints within a given // loop structure, which isn't ideal, but is not incorrect. We will // just have some extra Tier0 overhead. // // Todo: implement support for mid-block patchpoints. If `block` // is truly a backedge source (and not in a handler) then we should be // able to find a stack empty point somewhere in the block. // const int patchpointStrategy = JitConfig.TC_PatchpointStrategy(); bool addPatchpoint = false; bool mustUseTargetPatchpoint = false; switch (patchpointStrategy) { default: { // Patchpoints at backedge sources, if possible, otherwise targets. // addPatchpoint = ((block->bbFlags & BBF_BACKWARD_JUMP_SOURCE) == BBF_BACKWARD_JUMP_SOURCE); mustUseTargetPatchpoint = (verCurrentState.esStackDepth != 0) || block->hasHndIndex(); break; } case 1: { // Patchpoints at stackempty backedge targets. // Note if we have loops where the IL stack is not empty on the backedge we can't patchpoint // them. // // We should not have allowed OSR if there were backedges in handlers. // assert(!block->hasHndIndex()); addPatchpoint = ((block->bbFlags & BBF_BACKWARD_JUMP_TARGET) == BBF_BACKWARD_JUMP_TARGET) && (verCurrentState.esStackDepth == 0); break; } case 2: { // Adaptive strategy. // // Patchpoints at backedge targets if there are multiple backedges, // otherwise at backedge sources, if possible. Note a block can be both; if so we // just need one patchpoint. // if ((block->bbFlags & BBF_BACKWARD_JUMP_TARGET) == BBF_BACKWARD_JUMP_TARGET) { // We don't know backedge count, so just use ref count. // addPatchpoint = (block->bbRefs > 1) && (verCurrentState.esStackDepth == 0); } if (!addPatchpoint && ((block->bbFlags & BBF_BACKWARD_JUMP_SOURCE) == BBF_BACKWARD_JUMP_SOURCE)) { addPatchpoint = true; mustUseTargetPatchpoint = (verCurrentState.esStackDepth != 0) || block->hasHndIndex(); // Also force target patchpoint if target block has multiple (backedge) preds. // if (!mustUseTargetPatchpoint) { for (BasicBlock* const succBlock : block->Succs(this)) { if ((succBlock->bbNum <= block->bbNum) && (succBlock->bbRefs > 1)) { mustUseTargetPatchpoint = true; break; } } } } break; } } if (addPatchpoint) { if (mustUseTargetPatchpoint) { // We wanted a source patchpoint, but could not have one. // So, add patchpoints to the backedge targets. // for (BasicBlock* const succBlock : block->Succs(this)) { if (succBlock->bbNum <= block->bbNum) { // The succBlock had better agree it's a target. // assert((succBlock->bbFlags & BBF_BACKWARD_JUMP_TARGET) == BBF_BACKWARD_JUMP_TARGET); // We may already have decided to put a patchpoint in succBlock. If not, add one. // if ((succBlock->bbFlags & BBF_PATCHPOINT) != 0) { // In some cases the target may not be stack-empty at entry. // If so, we will bypass patchpoints for this backedge. // if (succBlock->bbStackDepthOnEntry() > 0) { JITDUMP("\nCan't set source patchpoint at " FMT_BB ", can't use target " FMT_BB " as it has non-empty stack on entry.\n", block->bbNum, succBlock->bbNum); } else { JITDUMP("\nCan't set source patchpoint at " FMT_BB ", using target " FMT_BB " instead\n", block->bbNum, succBlock->bbNum); assert(!succBlock->hasHndIndex()); succBlock->bbFlags |= BBF_PATCHPOINT; } } } } } else { assert(!block->hasHndIndex()); block->bbFlags |= BBF_PATCHPOINT; } setMethodHasPatchpoint(); } } else { // Should not see backward branch targets w/o backwards branches. // So if !compHasBackwardsBranch, these flags should never be set. // assert((block->bbFlags & (BBF_BACKWARD_JUMP_TARGET | BBF_BACKWARD_JUMP_SOURCE)) == 0); } } #ifdef DEBUG // As a stress test, we can place patchpoints at the start of any block // that is a stack empty point and is not within a handler. // // Todo: enable for mid-block stack empty points too. // const int offsetOSR = JitConfig.JitOffsetOnStackReplacement(); const int randomOSR = JitConfig.JitRandomOnStackReplacement(); const bool tryOffsetOSR = offsetOSR >= 0; const bool tryRandomOSR = randomOSR > 0; if (compCanHavePatchpoints() && (tryOffsetOSR || tryRandomOSR) && (verCurrentState.esStackDepth == 0) && !block->hasHndIndex() && ((block->bbFlags & BBF_PATCHPOINT) == 0)) { // Block start can have a patchpoint. See if we should add one. // bool addPatchpoint = false; // Specific offset? // if (tryOffsetOSR) { if (impCurOpcOffs == (unsigned)offsetOSR) { addPatchpoint = true; } } // Random? // else { // Reuse the random inliner's random state. // Note m_inlineStrategy is always created, even if we're not inlining. // CLRRandom* const random = impInlineRoot()->m_inlineStrategy->GetRandom(randomOSR); const int randomValue = (int)random->Next(100); addPatchpoint = (randomValue < randomOSR); } if (addPatchpoint) { block->bbFlags |= BBF_PATCHPOINT; setMethodHasPatchpoint(); } JITDUMP("\n** %s patchpoint%s added to " FMT_BB " (il offset %u)\n", tryOffsetOSR ? "offset" : "random", addPatchpoint ? "" : " not", block->bbNum, impCurOpcOffs); } #endif // DEBUG } // Mark stack-empty rare blocks to be considered for partial compilation. // // Ideally these are conditionally executed blocks -- if the method is going // to unconditionally throw, there's not as much to be gained by deferring jitting. // For now, we just screen out the entry bb. // // In general we might want track all the IL stack empty points so we can // propagate rareness back through flow and place the partial compilation patchpoints "earlier" // so there are fewer overall. // // Note unlike OSR, it's ok to forgo these. // // Todo: stress mode... // if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_TIER0) && (JitConfig.TC_PartialCompilation() > 0) && compCanHavePatchpoints() && !compTailPrefixSeen) { // Is this block a good place for partial compilation? // if ((block != fgFirstBB) && block->isRunRarely() && (verCurrentState.esStackDepth == 0) && ((block->bbFlags & BBF_PATCHPOINT) == 0) && !block->hasHndIndex()) { JITDUMP("\nBlock " FMT_BB " will be a partial compilation patchpoint -- not importing\n", block->bbNum); block->bbFlags |= BBF_PARTIAL_COMPILATION_PATCHPOINT; setMethodHasPartialCompilationPatchpoint(); // Change block to BBJ_THROW so we won't trigger importation of successors. // block->bbJumpKind = BBJ_THROW; // If this method has a explicit generic context, the only uses of it may be in // the IL for this block. So assume it's used. // if (info.compMethodInfo->options & (CORINFO_GENERICS_CTXT_FROM_METHODDESC | CORINFO_GENERICS_CTXT_FROM_METHODTABLE)) { lvaGenericsContextInUse = true; } return; } } #endif // FEATURE_ON_STACK_REPLACEMENT /* Walk the opcodes that comprise the basic block */ const BYTE* codeAddr = info.compCode + block->bbCodeOffs; const BYTE* codeEndp = info.compCode + block->bbCodeOffsEnd; IL_OFFSET opcodeOffs = block->bbCodeOffs; IL_OFFSET lastSpillOffs = opcodeOffs; signed jmpDist; /* remember the start of the delegate creation sequence (used for verification) */ const BYTE* delegateCreateStart = nullptr; int prefixFlags = 0; bool explicitTailCall, constraintCall, readonlyCall; typeInfo tiRetVal; unsigned numArgs = info.compArgsCount; /* Now process all the opcodes in the block */ var_types callTyp = TYP_COUNT; OPCODE prevOpcode = CEE_ILLEGAL; if (block->bbCatchTyp) { if (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES) { impCurStmtOffsSet(block->bbCodeOffs); } // We will spill the GT_CATCH_ARG and the input of the BB_QMARK block // to a temp. This is a trade off for code simplicity impSpillSpecialSideEff(); } while (codeAddr < codeEndp) { #ifdef FEATURE_READYTORUN bool usingReadyToRunHelper = false; #endif CORINFO_RESOLVED_TOKEN resolvedToken; CORINFO_RESOLVED_TOKEN constrainedResolvedToken; CORINFO_CALL_INFO callInfo; CORINFO_FIELD_INFO fieldInfo; tiRetVal = typeInfo(); // Default type info //--------------------------------------------------------------------- /* We need to restrict the max tree depth as many of the Compiler functions are recursive. We do this by spilling the stack */ if (verCurrentState.esStackDepth) { /* Has it been a while since we last saw a non-empty stack (which guarantees that the tree depth isnt accumulating. */ if ((opcodeOffs - lastSpillOffs) > MAX_TREE_SIZE && impCanSpillNow(prevOpcode)) { impSpillStackEnsure(); lastSpillOffs = opcodeOffs; } } else { lastSpillOffs = opcodeOffs; impBoxTempInUse = false; // nothing on the stack, box temp OK to use again } /* Compute the current instr offset */ opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode); #ifndef DEBUG if (opts.compDbgInfo) #endif { nxtStmtOffs = (nxtStmtIndex < info.compStmtOffsetsCount) ? info.compStmtOffsets[nxtStmtIndex] : BAD_IL_OFFSET; /* Have we reached the next stmt boundary ? */ if (nxtStmtOffs != BAD_IL_OFFSET && opcodeOffs >= nxtStmtOffs) { assert(nxtStmtOffs == info.compStmtOffsets[nxtStmtIndex]); if (verCurrentState.esStackDepth != 0 && opts.compDbgCode) { /* We need to provide accurate IP-mapping at this point. So spill anything on the stack so that it will form gtStmts with the correct stmt offset noted */ impSpillStackEnsure(true); } // Have we reported debug info for any tree? if (impCurStmtDI.IsValid() && opts.compDbgCode) { GenTree* placeHolder = new (this, GT_NO_OP) GenTree(GT_NO_OP, TYP_VOID); impAppendTree(placeHolder, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); assert(!impCurStmtDI.IsValid()); } if (!impCurStmtDI.IsValid()) { /* Make sure that nxtStmtIndex is in sync with opcodeOffs. If opcodeOffs has gone past nxtStmtIndex, catch up */ while ((nxtStmtIndex + 1) < info.compStmtOffsetsCount && info.compStmtOffsets[nxtStmtIndex + 1] <= opcodeOffs) { nxtStmtIndex++; } /* Go to the new stmt */ impCurStmtOffsSet(info.compStmtOffsets[nxtStmtIndex]); /* Update the stmt boundary index */ nxtStmtIndex++; assert(nxtStmtIndex <= info.compStmtOffsetsCount); /* Are there any more line# entries after this one? */ if (nxtStmtIndex < info.compStmtOffsetsCount) { /* Remember where the next line# starts */ nxtStmtOffs = info.compStmtOffsets[nxtStmtIndex]; } else { /* No more line# entries */ nxtStmtOffs = BAD_IL_OFFSET; } } } else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::STACK_EMPTY_BOUNDARIES) && (verCurrentState.esStackDepth == 0)) { /* At stack-empty locations, we have already added the tree to the stmt list with the last offset. We just need to update impCurStmtDI */ impCurStmtOffsSet(opcodeOffs); } else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES) && impOpcodeIsCallSiteBoundary(prevOpcode)) { /* Make sure we have a type cached */ assert(callTyp != TYP_COUNT); if (callTyp == TYP_VOID) { impCurStmtOffsSet(opcodeOffs); } else if (opts.compDbgCode) { impSpillStackEnsure(true); impCurStmtOffsSet(opcodeOffs); } } else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::NOP_BOUNDARIES) && (prevOpcode == CEE_NOP)) { if (opts.compDbgCode) { impSpillStackEnsure(true); } impCurStmtOffsSet(opcodeOffs); } assert(!impCurStmtDI.IsValid() || (nxtStmtOffs == BAD_IL_OFFSET) || (impCurStmtDI.GetLocation().GetOffset() <= nxtStmtOffs)); } CORINFO_CLASS_HANDLE clsHnd = DUMMY_INIT(NULL); CORINFO_CLASS_HANDLE ldelemClsHnd = DUMMY_INIT(NULL); CORINFO_CLASS_HANDLE stelemClsHnd = DUMMY_INIT(NULL); var_types lclTyp, ovflType = TYP_UNKNOWN; GenTree* op1 = DUMMY_INIT(NULL); GenTree* op2 = DUMMY_INIT(NULL); GenTree* newObjThisPtr = DUMMY_INIT(NULL); bool uns = DUMMY_INIT(false); bool isLocal = false; /* Get the next opcode and the size of its parameters */ OPCODE opcode = (OPCODE)getU1LittleEndian(codeAddr); codeAddr += sizeof(__int8); #ifdef DEBUG impCurOpcOffs = (IL_OFFSET)(codeAddr - info.compCode - 1); JITDUMP("\n [%2u] %3u (0x%03x) ", verCurrentState.esStackDepth, impCurOpcOffs, impCurOpcOffs); #endif DECODE_OPCODE: // Return if any previous code has caused inline to fail. if (compDonotInline()) { return; } /* Get the size of additional parameters */ signed int sz = opcodeSizes[opcode]; #ifdef DEBUG clsHnd = NO_CLASS_HANDLE; lclTyp = TYP_COUNT; callTyp = TYP_COUNT; impCurOpcOffs = (IL_OFFSET)(codeAddr - info.compCode - 1); impCurOpcName = opcodeNames[opcode]; if (verbose && (opcode != CEE_PREFIX1)) { printf("%s", impCurOpcName); } /* Use assertImp() to display the opcode */ op1 = op2 = nullptr; #endif /* See what kind of an opcode we have, then */ unsigned mflags = 0; unsigned clsFlags = 0; switch (opcode) { unsigned lclNum; var_types type; GenTree* op3; genTreeOps oper; unsigned size; int val; CORINFO_SIG_INFO sig; IL_OFFSET jmpAddr; bool ovfl, unordered, callNode; bool ldstruct; CORINFO_CLASS_HANDLE tokenType; union { int intVal; float fltVal; __int64 lngVal; double dblVal; } cval; case CEE_PREFIX1: opcode = (OPCODE)(getU1LittleEndian(codeAddr) + 256); opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode); codeAddr += sizeof(__int8); goto DECODE_OPCODE; SPILL_APPEND: // We need to call impSpillLclRefs() for a struct type lclVar. // This is because there may be loads of that lclVar on the evaluation stack, and // we need to ensure that those loads are completed before we modify it. if ((op1->OperGet() == GT_ASG) && varTypeIsStruct(op1->gtGetOp1())) { GenTree* lhs = op1->gtGetOp1(); GenTreeLclVarCommon* lclVar = nullptr; if (lhs->gtOper == GT_LCL_VAR) { lclVar = lhs->AsLclVarCommon(); } else if (lhs->OperIsBlk()) { // Check if LHS address is within some struct local, to catch // cases where we're updating the struct by something other than a stfld GenTree* addr = lhs->AsBlk()->Addr(); // Catches ADDR(LCL_VAR), or ADD(ADDR(LCL_VAR),CNS_INT)) lclVar = addr->IsLocalAddrExpr(); // Catches ADDR(FIELD(... ADDR(LCL_VAR))) if (lclVar == nullptr) { GenTree* lclTree = nullptr; if (impIsAddressInLocal(addr, &lclTree)) { lclVar = lclTree->AsLclVarCommon(); } } } if (lclVar != nullptr) { impSpillLclRefs(lclVar->GetLclNum()); } } /* Append 'op1' to the list of statements */ impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtDI); goto DONE_APPEND; APPEND: /* Append 'op1' to the list of statements */ impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); goto DONE_APPEND; DONE_APPEND: #ifdef DEBUG // Remember at which BC offset the tree was finished impNoteLastILoffs(); #endif break; case CEE_LDNULL: impPushNullObjRefOnStack(); break; case CEE_LDC_I4_M1: case CEE_LDC_I4_0: case CEE_LDC_I4_1: case CEE_LDC_I4_2: case CEE_LDC_I4_3: case CEE_LDC_I4_4: case CEE_LDC_I4_5: case CEE_LDC_I4_6: case CEE_LDC_I4_7: case CEE_LDC_I4_8: cval.intVal = (opcode - CEE_LDC_I4_0); assert(-1 <= cval.intVal && cval.intVal <= 8); goto PUSH_I4CON; case CEE_LDC_I4_S: cval.intVal = getI1LittleEndian(codeAddr); goto PUSH_I4CON; case CEE_LDC_I4: cval.intVal = getI4LittleEndian(codeAddr); goto PUSH_I4CON; PUSH_I4CON: JITDUMP(" %d", cval.intVal); impPushOnStack(gtNewIconNode(cval.intVal), typeInfo(TI_INT)); break; case CEE_LDC_I8: cval.lngVal = getI8LittleEndian(codeAddr); JITDUMP(" 0x%016llx", cval.lngVal); impPushOnStack(gtNewLconNode(cval.lngVal), typeInfo(TI_LONG)); break; case CEE_LDC_R8: cval.dblVal = getR8LittleEndian(codeAddr); JITDUMP(" %#.17g", cval.dblVal); impPushOnStack(gtNewDconNode(cval.dblVal), typeInfo(TI_DOUBLE)); break; case CEE_LDC_R4: cval.dblVal = getR4LittleEndian(codeAddr); JITDUMP(" %#.17g", cval.dblVal); impPushOnStack(gtNewDconNode(cval.dblVal, TYP_FLOAT), typeInfo(TI_DOUBLE)); break; case CEE_LDSTR: val = getU4LittleEndian(codeAddr); JITDUMP(" %08X", val); impPushOnStack(gtNewSconNode(val, info.compScopeHnd), tiRetVal); break; case CEE_LDARG: lclNum = getU2LittleEndian(codeAddr); JITDUMP(" %u", lclNum); impLoadArg(lclNum, opcodeOffs + sz + 1); break; case CEE_LDARG_S: lclNum = getU1LittleEndian(codeAddr); JITDUMP(" %u", lclNum); impLoadArg(lclNum, opcodeOffs + sz + 1); break; case CEE_LDARG_0: case CEE_LDARG_1: case CEE_LDARG_2: case CEE_LDARG_3: lclNum = (opcode - CEE_LDARG_0); assert(lclNum >= 0 && lclNum < 4); impLoadArg(lclNum, opcodeOffs + sz + 1); break; case CEE_LDLOC: lclNum = getU2LittleEndian(codeAddr); JITDUMP(" %u", lclNum); impLoadLoc(lclNum, opcodeOffs + sz + 1); break; case CEE_LDLOC_S: lclNum = getU1LittleEndian(codeAddr); JITDUMP(" %u", lclNum); impLoadLoc(lclNum, opcodeOffs + sz + 1); break; case CEE_LDLOC_0: case CEE_LDLOC_1: case CEE_LDLOC_2: case CEE_LDLOC_3: lclNum = (opcode - CEE_LDLOC_0); assert(lclNum >= 0 && lclNum < 4); impLoadLoc(lclNum, opcodeOffs + sz + 1); break; case CEE_STARG: lclNum = getU2LittleEndian(codeAddr); goto STARG; case CEE_STARG_S: lclNum = getU1LittleEndian(codeAddr); STARG: JITDUMP(" %u", lclNum); if (compIsForInlining()) { op1 = impInlineFetchArg(lclNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo); noway_assert(op1->gtOper == GT_LCL_VAR); lclNum = op1->AsLclVar()->GetLclNum(); goto VAR_ST_VALID; } lclNum = compMapILargNum(lclNum); // account for possible hidden param assertImp(lclNum < numArgs); if (lclNum == info.compThisArg) { lclNum = lvaArg0Var; } // We should have seen this arg write in the prescan assert(lvaTable[lclNum].lvHasILStoreOp); goto VAR_ST; case CEE_STLOC: lclNum = getU2LittleEndian(codeAddr); isLocal = true; JITDUMP(" %u", lclNum); goto LOC_ST; case CEE_STLOC_S: lclNum = getU1LittleEndian(codeAddr); isLocal = true; JITDUMP(" %u", lclNum); goto LOC_ST; case CEE_STLOC_0: case CEE_STLOC_1: case CEE_STLOC_2: case CEE_STLOC_3: isLocal = true; lclNum = (opcode - CEE_STLOC_0); assert(lclNum >= 0 && lclNum < 4); LOC_ST: if (compIsForInlining()) { lclTyp = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclTypeInfo; /* Have we allocated a temp for this local? */ lclNum = impInlineFetchLocal(lclNum DEBUGARG("Inline stloc first use temp")); goto _PopValue; } lclNum += numArgs; VAR_ST: if (lclNum >= info.compLocalsCount && lclNum != lvaArg0Var) { BADCODE("Bad IL"); } VAR_ST_VALID: /* if it is a struct assignment, make certain we don't overflow the buffer */ assert(lclTyp != TYP_STRUCT || lvaLclSize(lclNum) >= info.compCompHnd->getClassSize(clsHnd)); if (lvaTable[lclNum].lvNormalizeOnLoad()) { lclTyp = lvaGetRealType(lclNum); } else { lclTyp = lvaGetActualType(lclNum); } _PopValue: /* Pop the value being assigned */ { StackEntry se = impPopStack(); clsHnd = se.seTypeInfo.GetClassHandle(); op1 = se.val; tiRetVal = se.seTypeInfo; } #ifdef FEATURE_SIMD if (varTypeIsSIMD(lclTyp) && (lclTyp != op1->TypeGet())) { assert(op1->TypeGet() == TYP_STRUCT); op1->gtType = lclTyp; } #endif // FEATURE_SIMD op1 = impImplicitIorI4Cast(op1, lclTyp); #ifdef TARGET_64BIT // Downcast the TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity if (varTypeIsI(op1->TypeGet()) && (genActualType(lclTyp) == TYP_INT)) { op1 = gtNewCastNode(TYP_INT, op1, false, TYP_INT); } #endif // TARGET_64BIT // We had better assign it a value of the correct type assertImp( genActualType(lclTyp) == genActualType(op1->gtType) || (genActualType(lclTyp) == TYP_I_IMPL && op1->IsLocalAddrExpr() != nullptr) || (genActualType(lclTyp) == TYP_I_IMPL && (op1->gtType == TYP_BYREF || op1->gtType == TYP_REF)) || (genActualType(op1->gtType) == TYP_I_IMPL && lclTyp == TYP_BYREF) || (varTypeIsFloating(lclTyp) && varTypeIsFloating(op1->TypeGet())) || ((genActualType(lclTyp) == TYP_BYREF) && genActualType(op1->TypeGet()) == TYP_REF)); /* If op1 is "&var" then its type is the transient "*" and it can be used either as TYP_BYREF or TYP_I_IMPL */ if (op1->IsLocalAddrExpr() != nullptr) { assertImp(genActualType(lclTyp) == TYP_I_IMPL || lclTyp == TYP_BYREF); /* When "&var" is created, we assume it is a byref. If it is being assigned to a TYP_I_IMPL var, change the type to prevent unnecessary GC info */ if (genActualType(lclTyp) == TYP_I_IMPL) { op1->gtType = TYP_I_IMPL; } } // If this is a local and the local is a ref type, see // if we can improve type information based on the // value being assigned. if (isLocal && (lclTyp == TYP_REF)) { // We should have seen a stloc in our IL prescan. assert(lvaTable[lclNum].lvHasILStoreOp); // Is there just one place this local is defined? const bool isSingleDefLocal = lvaTable[lclNum].lvSingleDef; // Conservative check that there is just one // definition that reaches this store. const bool hasSingleReachingDef = (block->bbStackDepthOnEntry() == 0); if (isSingleDefLocal && hasSingleReachingDef) { lvaUpdateClass(lclNum, op1, clsHnd); } } /* Filter out simple assignments to itself */ if (op1->gtOper == GT_LCL_VAR && lclNum == op1->AsLclVarCommon()->GetLclNum()) { if (opts.compDbgCode) { op1 = gtNewNothingNode(); goto SPILL_APPEND; } else { break; } } /* Create the assignment node */ op2 = gtNewLclvNode(lclNum, lclTyp DEBUGARG(opcodeOffs + sz + 1)); /* If the local is aliased or pinned, we need to spill calls and indirections from the stack. */ if ((lvaTable[lclNum].IsAddressExposed() || lvaTable[lclNum].lvHasLdAddrOp || lvaTable[lclNum].lvPinned) && (verCurrentState.esStackDepth > 0)) { impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("Local could be aliased or is pinned")); } /* Spill any refs to the local from the stack */ impSpillLclRefs(lclNum); // We can generate an assignment to a TYP_FLOAT from a TYP_DOUBLE // We insert a cast to the dest 'op2' type // if ((op1->TypeGet() != op2->TypeGet()) && varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType)) { op1 = gtNewCastNode(op2->TypeGet(), op1, false, op2->TypeGet()); } if (varTypeIsStruct(lclTyp)) { op1 = impAssignStruct(op2, op1, clsHnd, (unsigned)CHECK_SPILL_ALL); } else { op1 = gtNewAssignNode(op2, op1); } goto SPILL_APPEND; case CEE_LDLOCA: lclNum = getU2LittleEndian(codeAddr); goto LDLOCA; case CEE_LDLOCA_S: lclNum = getU1LittleEndian(codeAddr); LDLOCA: JITDUMP(" %u", lclNum); if (compIsForInlining()) { // Get the local type lclTyp = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclTypeInfo; /* Have we allocated a temp for this local? */ lclNum = impInlineFetchLocal(lclNum DEBUGARG("Inline ldloca(s) first use temp")); assert(!lvaGetDesc(lclNum)->lvNormalizeOnLoad()); op1 = gtNewLclvNode(lclNum, lvaGetActualType(lclNum)); goto _PUSH_ADRVAR; } lclNum += numArgs; assertImp(lclNum < info.compLocalsCount); goto ADRVAR; case CEE_LDARGA: lclNum = getU2LittleEndian(codeAddr); goto LDARGA; case CEE_LDARGA_S: lclNum = getU1LittleEndian(codeAddr); LDARGA: JITDUMP(" %u", lclNum); Verify(lclNum < info.compILargsCount, "bad arg num"); if (compIsForInlining()) { // In IL, LDARGA(_S) is used to load the byref managed pointer of struct argument, // followed by a ldfld to load the field. op1 = impInlineFetchArg(lclNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo); if (op1->gtOper != GT_LCL_VAR) { compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDARGA_NOT_LOCAL_VAR); return; } assert(op1->gtOper == GT_LCL_VAR); goto _PUSH_ADRVAR; } lclNum = compMapILargNum(lclNum); // account for possible hidden param assertImp(lclNum < numArgs); if (lclNum == info.compThisArg) { lclNum = lvaArg0Var; } goto ADRVAR; ADRVAR: op1 = impCreateLocalNode(lclNum DEBUGARG(opcodeOffs + sz + 1)); _PUSH_ADRVAR: assert(op1->gtOper == GT_LCL_VAR); /* Note that this is supposed to create the transient type "*" which may be used as a TYP_I_IMPL. However we catch places where it is used as a TYP_I_IMPL and change the node if needed. Thus we are pessimistic and may report byrefs in the GC info where it was not absolutely needed, but it is safer this way. */ op1 = gtNewOperNode(GT_ADDR, TYP_BYREF, op1); // &aliasedVar doesnt need GTF_GLOB_REF, though alisasedVar does assert((op1->gtFlags & GTF_GLOB_REF) == 0); tiRetVal = lvaTable[lclNum].lvVerTypeInfo; impPushOnStack(op1, tiRetVal); break; case CEE_ARGLIST: if (!info.compIsVarArgs) { BADCODE("arglist in non-vararg method"); } assertImp((info.compMethodInfo->args.callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG); /* The ARGLIST cookie is a hidden 'last' parameter, we have already adjusted the arg count cos this is like fetching the last param */ assertImp(0 < numArgs); lclNum = lvaVarargsHandleArg; op1 = gtNewLclvNode(lclNum, TYP_I_IMPL DEBUGARG(opcodeOffs + sz + 1)); op1 = gtNewOperNode(GT_ADDR, TYP_BYREF, op1); impPushOnStack(op1, tiRetVal); break; case CEE_ENDFINALLY: if (compIsForInlining()) { assert(!"Shouldn't have exception handlers in the inliner!"); compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_ENDFINALLY); return; } if (verCurrentState.esStackDepth > 0) { impEvalSideEffects(); } if (info.compXcptnsCount == 0) { BADCODE("endfinally outside finally"); } assert(verCurrentState.esStackDepth == 0); op1 = gtNewOperNode(GT_RETFILT, TYP_VOID, nullptr); goto APPEND; case CEE_ENDFILTER: if (compIsForInlining()) { assert(!"Shouldn't have exception handlers in the inliner!"); compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_ENDFILTER); return; } block->bbSetRunRarely(); // filters are rare if (info.compXcptnsCount == 0) { BADCODE("endfilter outside filter"); } op1 = impPopStack().val; assertImp(op1->gtType == TYP_INT); if (!bbInFilterILRange(block)) { BADCODE("EndFilter outside a filter handler"); } /* Mark current bb as end of filter */ assert(compCurBB->bbFlags & BBF_DONT_REMOVE); assert(compCurBB->bbJumpKind == BBJ_EHFILTERRET); /* Mark catch handler as successor */ op1 = gtNewOperNode(GT_RETFILT, op1->TypeGet(), op1); if (verCurrentState.esStackDepth != 0) { verRaiseVerifyException(INDEBUG("stack must be 1 on end of filter") DEBUGARG(__FILE__) DEBUGARG(__LINE__)); } goto APPEND; case CEE_RET: prefixFlags &= ~PREFIX_TAILCALL; // ret without call before it RET: if (!impReturnInstruction(prefixFlags, opcode)) { return; // abort } else { break; } case CEE_JMP: assert(!compIsForInlining()); if ((info.compFlags & CORINFO_FLG_SYNCH) || block->hasTryIndex() || block->hasHndIndex()) { /* CEE_JMP does not make sense in some "protected" regions. */ BADCODE("Jmp not allowed in protected region"); } if (opts.IsReversePInvoke()) { BADCODE("Jmp not allowed in reverse P/Invoke"); } if (verCurrentState.esStackDepth != 0) { BADCODE("Stack must be empty after CEE_JMPs"); } _impResolveToken(CORINFO_TOKENKIND_Method); JITDUMP(" %08X", resolvedToken.token); /* The signature of the target has to be identical to ours. At least check that argCnt and returnType match */ eeGetMethodSig(resolvedToken.hMethod, &sig); if (sig.numArgs != info.compMethodInfo->args.numArgs || sig.retType != info.compMethodInfo->args.retType || sig.callConv != info.compMethodInfo->args.callConv) { BADCODE("Incompatible target for CEE_JMPs"); } op1 = new (this, GT_JMP) GenTreeVal(GT_JMP, TYP_VOID, (size_t)resolvedToken.hMethod); /* Mark the basic block as being a JUMP instead of RETURN */ block->bbFlags |= BBF_HAS_JMP; /* Set this flag to make sure register arguments have a location assigned * even if we don't use them inside the method */ compJmpOpUsed = true; fgNoStructPromotion = true; goto APPEND; case CEE_LDELEMA: assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Class); JITDUMP(" %08X", resolvedToken.token); ldelemClsHnd = resolvedToken.hClass; // If it's a value class array we just do a simple address-of if (eeIsValueClass(ldelemClsHnd)) { CorInfoType cit = info.compCompHnd->getTypeForPrimitiveValueClass(ldelemClsHnd); if (cit == CORINFO_TYPE_UNDEF) { lclTyp = TYP_STRUCT; } else { lclTyp = JITtype2varType(cit); } goto ARR_LD_POST_VERIFY; } // Similarly, if its a readonly access, we can do a simple address-of // without doing a runtime type-check if (prefixFlags & PREFIX_READONLY) { lclTyp = TYP_REF; goto ARR_LD_POST_VERIFY; } // Otherwise we need the full helper function with run-time type check op1 = impTokenToHandle(&resolvedToken); if (op1 == nullptr) { // compDonotInline() return; } { GenTreeCall::Use* args = gtNewCallArgs(op1); // Type args = gtPrependNewCallArg(impPopStack().val, args); // index args = gtPrependNewCallArg(impPopStack().val, args); // array op1 = gtNewHelperCallNode(CORINFO_HELP_LDELEMA_REF, TYP_BYREF, args); } impPushOnStack(op1, tiRetVal); break; // ldelem for reference and value types case CEE_LDELEM: assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Class); JITDUMP(" %08X", resolvedToken.token); ldelemClsHnd = resolvedToken.hClass; // If it's a reference type or generic variable type // then just generate code as though it's a ldelem.ref instruction if (!eeIsValueClass(ldelemClsHnd)) { lclTyp = TYP_REF; opcode = CEE_LDELEM_REF; } else { CorInfoType jitTyp = info.compCompHnd->asCorInfoType(ldelemClsHnd); lclTyp = JITtype2varType(jitTyp); tiRetVal = verMakeTypeInfo(ldelemClsHnd); // precise type always needed for struct tiRetVal.NormaliseForStack(); } goto ARR_LD_POST_VERIFY; case CEE_LDELEM_I1: lclTyp = TYP_BYTE; goto ARR_LD; case CEE_LDELEM_I2: lclTyp = TYP_SHORT; goto ARR_LD; case CEE_LDELEM_I: lclTyp = TYP_I_IMPL; goto ARR_LD; // Should be UINT, but since no platform widens 4->8 bytes it doesn't matter // and treating it as TYP_INT avoids other asserts. case CEE_LDELEM_U4: lclTyp = TYP_INT; goto ARR_LD; case CEE_LDELEM_I4: lclTyp = TYP_INT; goto ARR_LD; case CEE_LDELEM_I8: lclTyp = TYP_LONG; goto ARR_LD; case CEE_LDELEM_REF: lclTyp = TYP_REF; goto ARR_LD; case CEE_LDELEM_R4: lclTyp = TYP_FLOAT; goto ARR_LD; case CEE_LDELEM_R8: lclTyp = TYP_DOUBLE; goto ARR_LD; case CEE_LDELEM_U1: lclTyp = TYP_UBYTE; goto ARR_LD; case CEE_LDELEM_U2: lclTyp = TYP_USHORT; goto ARR_LD; ARR_LD: ARR_LD_POST_VERIFY: /* Pull the index value and array address */ op2 = impPopStack().val; op1 = impPopStack().val; assertImp(op1->gtType == TYP_REF); /* Check for null pointer - in the inliner case we simply abort */ if (compIsForInlining()) { if (op1->gtOper == GT_CNS_INT) { compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NULL_FOR_LDELEM); return; } } /* Mark the block as containing an index expression */ if (op1->gtOper == GT_LCL_VAR) { if (op2->gtOper == GT_LCL_VAR || op2->gtOper == GT_CNS_INT || op2->gtOper == GT_ADD) { block->bbFlags |= BBF_HAS_IDX_LEN; optMethodFlags |= OMF_HAS_ARRAYREF; } } /* Create the index node and push it on the stack */ op1 = gtNewIndexRef(lclTyp, op1, op2); ldstruct = (opcode == CEE_LDELEM && lclTyp == TYP_STRUCT); if ((opcode == CEE_LDELEMA) || ldstruct || (ldelemClsHnd != DUMMY_INIT(NULL) && eeIsValueClass(ldelemClsHnd))) { assert(ldelemClsHnd != DUMMY_INIT(NULL)); // remember the element size if (lclTyp == TYP_REF) { op1->AsIndex()->gtIndElemSize = TARGET_POINTER_SIZE; } else { // If ldElemClass is precisely a primitive type, use that, otherwise, preserve the struct type. if (info.compCompHnd->getTypeForPrimitiveValueClass(ldelemClsHnd) == CORINFO_TYPE_UNDEF) { op1->AsIndex()->gtStructElemClass = ldelemClsHnd; } assert(lclTyp != TYP_STRUCT || op1->AsIndex()->gtStructElemClass != nullptr); if (lclTyp == TYP_STRUCT) { size = info.compCompHnd->getClassSize(ldelemClsHnd); op1->AsIndex()->gtIndElemSize = size; op1->gtType = lclTyp; } } if ((opcode == CEE_LDELEMA) || ldstruct) { // wrap it in a & lclTyp = TYP_BYREF; op1 = gtNewOperNode(GT_ADDR, lclTyp, op1); } else { assert(lclTyp != TYP_STRUCT); } } if (ldstruct) { // Create an OBJ for the result op1 = gtNewObjNode(ldelemClsHnd, op1); op1->gtFlags |= GTF_EXCEPT; } impPushOnStack(op1, tiRetVal); break; // stelem for reference and value types case CEE_STELEM: assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Class); JITDUMP(" %08X", resolvedToken.token); stelemClsHnd = resolvedToken.hClass; // If it's a reference type just behave as though it's a stelem.ref instruction if (!eeIsValueClass(stelemClsHnd)) { goto STELEM_REF_POST_VERIFY; } // Otherwise extract the type { CorInfoType jitTyp = info.compCompHnd->asCorInfoType(stelemClsHnd); lclTyp = JITtype2varType(jitTyp); goto ARR_ST_POST_VERIFY; } case CEE_STELEM_REF: STELEM_REF_POST_VERIFY: if (opts.OptimizationEnabled()) { GenTree* array = impStackTop(2).val; GenTree* value = impStackTop().val; // Is this a case where we can skip the covariant store check? if (impCanSkipCovariantStoreCheck(value, array)) { lclTyp = TYP_REF; goto ARR_ST_POST_VERIFY; } } // Else call a helper function to do the assignment op1 = gtNewHelperCallNode(CORINFO_HELP_ARRADDR_ST, TYP_VOID, impPopCallArgs(3, nullptr)); goto SPILL_APPEND; case CEE_STELEM_I1: lclTyp = TYP_BYTE; goto ARR_ST; case CEE_STELEM_I2: lclTyp = TYP_SHORT; goto ARR_ST; case CEE_STELEM_I: lclTyp = TYP_I_IMPL; goto ARR_ST; case CEE_STELEM_I4: lclTyp = TYP_INT; goto ARR_ST; case CEE_STELEM_I8: lclTyp = TYP_LONG; goto ARR_ST; case CEE_STELEM_R4: lclTyp = TYP_FLOAT; goto ARR_ST; case CEE_STELEM_R8: lclTyp = TYP_DOUBLE; goto ARR_ST; ARR_ST: ARR_ST_POST_VERIFY: /* The strict order of evaluation is LHS-operands, RHS-operands, range-check, and then assignment. However, codegen currently does the range-check before evaluation the RHS-operands. So to maintain strict ordering, we spill the stack. */ if (impStackTop().val->gtFlags & GTF_SIDE_EFFECT) { impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG( "Strict ordering of exceptions for Array store")); } /* Pull the new value from the stack */ op2 = impPopStack().val; /* Pull the index value */ op1 = impPopStack().val; /* Pull the array address */ op3 = impPopStack().val; assertImp(op3->gtType == TYP_REF); if (op2->IsLocalAddrExpr() != nullptr) { op2->gtType = TYP_I_IMPL; } // Mark the block as containing an index expression if (op3->gtOper == GT_LCL_VAR) { if (op1->gtOper == GT_LCL_VAR || op1->gtOper == GT_CNS_INT || op1->gtOper == GT_ADD) { block->bbFlags |= BBF_HAS_IDX_LEN; optMethodFlags |= OMF_HAS_ARRAYREF; } } /* Create the index node */ op1 = gtNewIndexRef(lclTyp, op3, op1); /* Create the assignment node and append it */ if (lclTyp == TYP_STRUCT) { assert(stelemClsHnd != DUMMY_INIT(NULL)); op1->AsIndex()->gtStructElemClass = stelemClsHnd; op1->AsIndex()->gtIndElemSize = info.compCompHnd->getClassSize(stelemClsHnd); } if (varTypeIsStruct(op1)) { op1 = impAssignStruct(op1, op2, stelemClsHnd, (unsigned)CHECK_SPILL_ALL); } else { op2 = impImplicitR4orR8Cast(op2, op1->TypeGet()); op1 = gtNewAssignNode(op1, op2); } /* Mark the expression as containing an assignment */ op1->gtFlags |= GTF_ASG; goto SPILL_APPEND; case CEE_ADD: oper = GT_ADD; goto MATH_OP2; case CEE_ADD_OVF: uns = false; goto ADD_OVF; case CEE_ADD_OVF_UN: uns = true; goto ADD_OVF; ADD_OVF: ovfl = true; callNode = false; oper = GT_ADD; goto MATH_OP2_FLAGS; case CEE_SUB: oper = GT_SUB; goto MATH_OP2; case CEE_SUB_OVF: uns = false; goto SUB_OVF; case CEE_SUB_OVF_UN: uns = true; goto SUB_OVF; SUB_OVF: ovfl = true; callNode = false; oper = GT_SUB; goto MATH_OP2_FLAGS; case CEE_MUL: oper = GT_MUL; goto MATH_MAYBE_CALL_NO_OVF; case CEE_MUL_OVF: uns = false; goto MUL_OVF; case CEE_MUL_OVF_UN: uns = true; goto MUL_OVF; MUL_OVF: ovfl = true; oper = GT_MUL; goto MATH_MAYBE_CALL_OVF; // Other binary math operations case CEE_DIV: oper = GT_DIV; goto MATH_MAYBE_CALL_NO_OVF; case CEE_DIV_UN: oper = GT_UDIV; goto MATH_MAYBE_CALL_NO_OVF; case CEE_REM: oper = GT_MOD; goto MATH_MAYBE_CALL_NO_OVF; case CEE_REM_UN: oper = GT_UMOD; goto MATH_MAYBE_CALL_NO_OVF; MATH_MAYBE_CALL_NO_OVF: ovfl = false; MATH_MAYBE_CALL_OVF: // Morpher has some complex logic about when to turn different // typed nodes on different platforms into helper calls. We // need to either duplicate that logic here, or just // pessimistically make all the nodes large enough to become // call nodes. Since call nodes aren't that much larger and // these opcodes are infrequent enough I chose the latter. callNode = true; goto MATH_OP2_FLAGS; case CEE_AND: oper = GT_AND; goto MATH_OP2; case CEE_OR: oper = GT_OR; goto MATH_OP2; case CEE_XOR: oper = GT_XOR; goto MATH_OP2; MATH_OP2: // For default values of 'ovfl' and 'callNode' ovfl = false; callNode = false; MATH_OP2_FLAGS: // If 'ovfl' and 'callNode' have already been set /* Pull two values and push back the result */ op2 = impPopStack().val; op1 = impPopStack().val; /* Can't do arithmetic with references */ assertImp(genActualType(op1->TypeGet()) != TYP_REF && genActualType(op2->TypeGet()) != TYP_REF); // Change both to TYP_I_IMPL (impBashVarAddrsToI won't change if its a true byref, only // if it is in the stack) impBashVarAddrsToI(op1, op2); type = impGetByRefResultType(oper, uns, &op1, &op2); assert(!ovfl || !varTypeIsFloating(op1->gtType)); /* Special case: "int+0", "int-0", "int*1", "int/1" */ if (op2->gtOper == GT_CNS_INT) { if ((op2->IsIntegralConst(0) && (oper == GT_ADD || oper == GT_SUB)) || (op2->IsIntegralConst(1) && (oper == GT_MUL || oper == GT_DIV))) { impPushOnStack(op1, tiRetVal); break; } } // We can generate a TYP_FLOAT operation that has a TYP_DOUBLE operand // if (varTypeIsFloating(type) && varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType)) { if (op1->TypeGet() != type) { // We insert a cast of op1 to 'type' op1 = gtNewCastNode(type, op1, false, type); } if (op2->TypeGet() != type) { // We insert a cast of op2 to 'type' op2 = gtNewCastNode(type, op2, false, type); } } if (callNode) { /* These operators can later be transformed into 'GT_CALL' */ assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_MUL]); #ifndef TARGET_ARM assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_DIV]); assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_UDIV]); assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_MOD]); assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_UMOD]); #endif // It's tempting to use LargeOpOpcode() here, but this logic is *not* saying // that we'll need to transform into a general large node, but rather specifically // to a call: by doing it this way, things keep working if there are multiple sizes, // and a CALL is no longer the largest. // That said, as of now it *is* a large node, so we'll do this with an assert rather // than an "if". assert(GenTree::s_gtNodeSizes[GT_CALL] == TREE_NODE_SZ_LARGE); op1 = new (this, GT_CALL) GenTreeOp(oper, type, op1, op2 DEBUGARG(/*largeNode*/ true)); } else { op1 = gtNewOperNode(oper, type, op1, op2); } /* Special case: integer/long division may throw an exception */ if (varTypeIsIntegral(op1->TypeGet()) && op1->OperMayThrow(this)) { op1->gtFlags |= GTF_EXCEPT; } if (ovfl) { assert(oper == GT_ADD || oper == GT_SUB || oper == GT_MUL); if (ovflType != TYP_UNKNOWN) { op1->gtType = ovflType; } op1->gtFlags |= (GTF_EXCEPT | GTF_OVERFLOW); if (uns) { op1->gtFlags |= GTF_UNSIGNED; } } impPushOnStack(op1, tiRetVal); break; case CEE_SHL: oper = GT_LSH; goto CEE_SH_OP2; case CEE_SHR: oper = GT_RSH; goto CEE_SH_OP2; case CEE_SHR_UN: oper = GT_RSZ; goto CEE_SH_OP2; CEE_SH_OP2: op2 = impPopStack().val; op1 = impPopStack().val; // operand to be shifted impBashVarAddrsToI(op1, op2); type = genActualType(op1->TypeGet()); op1 = gtNewOperNode(oper, type, op1, op2); impPushOnStack(op1, tiRetVal); break; case CEE_NOT: op1 = impPopStack().val; impBashVarAddrsToI(op1, nullptr); type = genActualType(op1->TypeGet()); impPushOnStack(gtNewOperNode(GT_NOT, type, op1), tiRetVal); break; case CEE_CKFINITE: op1 = impPopStack().val; type = op1->TypeGet(); op1 = gtNewOperNode(GT_CKFINITE, type, op1); op1->gtFlags |= GTF_EXCEPT; impPushOnStack(op1, tiRetVal); break; case CEE_LEAVE: val = getI4LittleEndian(codeAddr); // jump distance jmpAddr = (IL_OFFSET)((codeAddr - info.compCode + sizeof(__int32)) + val); goto LEAVE; case CEE_LEAVE_S: val = getI1LittleEndian(codeAddr); // jump distance jmpAddr = (IL_OFFSET)((codeAddr - info.compCode + sizeof(__int8)) + val); LEAVE: if (compIsForInlining()) { compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_LEAVE); return; } JITDUMP(" %04X", jmpAddr); if (block->bbJumpKind != BBJ_LEAVE) { impResetLeaveBlock(block, jmpAddr); } assert(jmpAddr == block->bbJumpDest->bbCodeOffs); impImportLeave(block); impNoteBranchOffs(); break; case CEE_BR: case CEE_BR_S: jmpDist = (sz == 1) ? getI1LittleEndian(codeAddr) : getI4LittleEndian(codeAddr); if (compIsForInlining() && jmpDist == 0) { break; /* NOP */ } impNoteBranchOffs(); break; case CEE_BRTRUE: case CEE_BRTRUE_S: case CEE_BRFALSE: case CEE_BRFALSE_S: /* Pop the comparand (now there's a neat term) from the stack */ op1 = impPopStack().val; type = op1->TypeGet(); // Per Ecma-355, brfalse and brtrue are only specified for nint, ref, and byref. // // We've historically been a bit more permissive, so here we allow // any type that gtNewZeroConNode can handle. if (!varTypeIsArithmetic(type) && !varTypeIsGC(type)) { BADCODE("invalid type for brtrue/brfalse"); } if (opts.OptimizationEnabled() && (block->bbJumpDest == block->bbNext)) { block->bbJumpKind = BBJ_NONE; if (op1->gtFlags & GTF_GLOB_EFFECT) { op1 = gtUnusedValNode(op1); goto SPILL_APPEND; } else { break; } } if (op1->OperIsCompare()) { if (opcode == CEE_BRFALSE || opcode == CEE_BRFALSE_S) { // Flip the sense of the compare op1 = gtReverseCond(op1); } } else { // We'll compare against an equally-sized integer 0 // For small types, we always compare against int op2 = gtNewZeroConNode(genActualType(op1->gtType)); // Create the comparison operator and try to fold it oper = (opcode == CEE_BRTRUE || opcode == CEE_BRTRUE_S) ? GT_NE : GT_EQ; op1 = gtNewOperNode(oper, TYP_INT, op1, op2); } // fall through COND_JUMP: /* Fold comparison if we can */ op1 = gtFoldExpr(op1); /* Try to fold the really simple cases like 'iconst *, ifne/ifeq'*/ /* Don't make any blocks unreachable in import only mode */ if ((op1->gtOper == GT_CNS_INT) && !compIsForImportOnly()) { /* gtFoldExpr() should prevent this as we don't want to make any blocks unreachable under compDbgCode */ assert(!opts.compDbgCode); BBjumpKinds foldedJumpKind = (BBjumpKinds)(op1->AsIntCon()->gtIconVal ? BBJ_ALWAYS : BBJ_NONE); assertImp((block->bbJumpKind == BBJ_COND) // normal case || (block->bbJumpKind == foldedJumpKind)); // this can happen if we are reimporting the // block for the second time block->bbJumpKind = foldedJumpKind; #ifdef DEBUG if (verbose) { if (op1->AsIntCon()->gtIconVal) { printf("\nThe conditional jump becomes an unconditional jump to " FMT_BB "\n", block->bbJumpDest->bbNum); } else { printf("\nThe block falls through into the next " FMT_BB "\n", block->bbNext->bbNum); } } #endif break; } op1 = gtNewOperNode(GT_JTRUE, TYP_VOID, op1); /* GT_JTRUE is handled specially for non-empty stacks. See 'addStmt' in impImportBlock(block). For correct line numbers, spill stack. */ if (opts.compDbgCode && impCurStmtDI.IsValid()) { impSpillStackEnsure(true); } goto SPILL_APPEND; case CEE_CEQ: oper = GT_EQ; uns = false; goto CMP_2_OPs; case CEE_CGT_UN: oper = GT_GT; uns = true; goto CMP_2_OPs; case CEE_CGT: oper = GT_GT; uns = false; goto CMP_2_OPs; case CEE_CLT_UN: oper = GT_LT; uns = true; goto CMP_2_OPs; case CEE_CLT: oper = GT_LT; uns = false; goto CMP_2_OPs; CMP_2_OPs: op2 = impPopStack().val; op1 = impPopStack().val; // Recognize the IL idiom of CGT_UN(op1, 0) and normalize // it so that downstream optimizations don't have to. if ((opcode == CEE_CGT_UN) && op2->IsIntegralConst(0)) { oper = GT_NE; uns = false; } #ifdef TARGET_64BIT // TODO-Casts: create a helper that upcasts int32 -> native int when necessary. // See also identical code in impGetByRefResultType and STSFLD import. if (varTypeIsI(op1) && (genActualType(op2) == TYP_INT)) { op2 = gtNewCastNode(TYP_I_IMPL, op2, uns, TYP_I_IMPL); } else if (varTypeIsI(op2) && (genActualType(op1) == TYP_INT)) { op1 = gtNewCastNode(TYP_I_IMPL, op1, uns, TYP_I_IMPL); } #endif // TARGET_64BIT assertImp(genActualType(op1) == genActualType(op2) || (varTypeIsI(op1) && varTypeIsI(op2)) || (varTypeIsFloating(op1) && varTypeIsFloating(op2))); // Create the comparison node. op1 = gtNewOperNode(oper, TYP_INT, op1, op2); // TODO: setting both flags when only one is appropriate. if (uns) { op1->gtFlags |= GTF_RELOP_NAN_UN | GTF_UNSIGNED; } // Fold result, if possible. op1 = gtFoldExpr(op1); impPushOnStack(op1, tiRetVal); break; case CEE_BEQ_S: case CEE_BEQ: oper = GT_EQ; goto CMP_2_OPs_AND_BR; case CEE_BGE_S: case CEE_BGE: oper = GT_GE; goto CMP_2_OPs_AND_BR; case CEE_BGE_UN_S: case CEE_BGE_UN: oper = GT_GE; goto CMP_2_OPs_AND_BR_UN; case CEE_BGT_S: case CEE_BGT: oper = GT_GT; goto CMP_2_OPs_AND_BR; case CEE_BGT_UN_S: case CEE_BGT_UN: oper = GT_GT; goto CMP_2_OPs_AND_BR_UN; case CEE_BLE_S: case CEE_BLE: oper = GT_LE; goto CMP_2_OPs_AND_BR; case CEE_BLE_UN_S: case CEE_BLE_UN: oper = GT_LE; goto CMP_2_OPs_AND_BR_UN; case CEE_BLT_S: case CEE_BLT: oper = GT_LT; goto CMP_2_OPs_AND_BR; case CEE_BLT_UN_S: case CEE_BLT_UN: oper = GT_LT; goto CMP_2_OPs_AND_BR_UN; case CEE_BNE_UN_S: case CEE_BNE_UN: oper = GT_NE; goto CMP_2_OPs_AND_BR_UN; CMP_2_OPs_AND_BR_UN: uns = true; unordered = true; goto CMP_2_OPs_AND_BR_ALL; CMP_2_OPs_AND_BR: uns = false; unordered = false; goto CMP_2_OPs_AND_BR_ALL; CMP_2_OPs_AND_BR_ALL: /* Pull two values */ op2 = impPopStack().val; op1 = impPopStack().val; #ifdef TARGET_64BIT if ((op1->TypeGet() == TYP_I_IMPL) && (genActualType(op2->TypeGet()) == TYP_INT)) { op2 = gtNewCastNode(TYP_I_IMPL, op2, uns, uns ? TYP_U_IMPL : TYP_I_IMPL); } else if ((op2->TypeGet() == TYP_I_IMPL) && (genActualType(op1->TypeGet()) == TYP_INT)) { op1 = gtNewCastNode(TYP_I_IMPL, op1, uns, uns ? TYP_U_IMPL : TYP_I_IMPL); } #endif // TARGET_64BIT assertImp(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) || (varTypeIsI(op1->TypeGet()) && varTypeIsI(op2->TypeGet())) || (varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType))); if (opts.OptimizationEnabled() && (block->bbJumpDest == block->bbNext)) { block->bbJumpKind = BBJ_NONE; if (op1->gtFlags & GTF_GLOB_EFFECT) { impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG( "Branch to next Optimization, op1 side effect")); impAppendTree(gtUnusedValNode(op1), (unsigned)CHECK_SPILL_NONE, impCurStmtDI); } if (op2->gtFlags & GTF_GLOB_EFFECT) { impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG( "Branch to next Optimization, op2 side effect")); impAppendTree(gtUnusedValNode(op2), (unsigned)CHECK_SPILL_NONE, impCurStmtDI); } #ifdef DEBUG if ((op1->gtFlags | op2->gtFlags) & GTF_GLOB_EFFECT) { impNoteLastILoffs(); } #endif break; } // We can generate an compare of different sized floating point op1 and op2 // We insert a cast // if (varTypeIsFloating(op1->TypeGet())) { if (op1->TypeGet() != op2->TypeGet()) { assert(varTypeIsFloating(op2->TypeGet())); // say op1=double, op2=float. To avoid loss of precision // while comparing, op2 is converted to double and double // comparison is done. if (op1->TypeGet() == TYP_DOUBLE) { // We insert a cast of op2 to TYP_DOUBLE op2 = gtNewCastNode(TYP_DOUBLE, op2, false, TYP_DOUBLE); } else if (op2->TypeGet() == TYP_DOUBLE) { // We insert a cast of op1 to TYP_DOUBLE op1 = gtNewCastNode(TYP_DOUBLE, op1, false, TYP_DOUBLE); } } } /* Create and append the operator */ op1 = gtNewOperNode(oper, TYP_INT, op1, op2); if (uns) { op1->gtFlags |= GTF_UNSIGNED; } if (unordered) { op1->gtFlags |= GTF_RELOP_NAN_UN; } goto COND_JUMP; case CEE_SWITCH: /* Pop the switch value off the stack */ op1 = impPopStack().val; assertImp(genActualTypeIsIntOrI(op1->TypeGet())); /* We can create a switch node */ op1 = gtNewOperNode(GT_SWITCH, TYP_VOID, op1); val = (int)getU4LittleEndian(codeAddr); codeAddr += 4 + val * 4; // skip over the switch-table goto SPILL_APPEND; /************************** Casting OPCODES ***************************/ case CEE_CONV_OVF_I1: lclTyp = TYP_BYTE; goto CONV_OVF; case CEE_CONV_OVF_I2: lclTyp = TYP_SHORT; goto CONV_OVF; case CEE_CONV_OVF_I: lclTyp = TYP_I_IMPL; goto CONV_OVF; case CEE_CONV_OVF_I4: lclTyp = TYP_INT; goto CONV_OVF; case CEE_CONV_OVF_I8: lclTyp = TYP_LONG; goto CONV_OVF; case CEE_CONV_OVF_U1: lclTyp = TYP_UBYTE; goto CONV_OVF; case CEE_CONV_OVF_U2: lclTyp = TYP_USHORT; goto CONV_OVF; case CEE_CONV_OVF_U: lclTyp = TYP_U_IMPL; goto CONV_OVF; case CEE_CONV_OVF_U4: lclTyp = TYP_UINT; goto CONV_OVF; case CEE_CONV_OVF_U8: lclTyp = TYP_ULONG; goto CONV_OVF; case CEE_CONV_OVF_I1_UN: lclTyp = TYP_BYTE; goto CONV_OVF_UN; case CEE_CONV_OVF_I2_UN: lclTyp = TYP_SHORT; goto CONV_OVF_UN; case CEE_CONV_OVF_I_UN: lclTyp = TYP_I_IMPL; goto CONV_OVF_UN; case CEE_CONV_OVF_I4_UN: lclTyp = TYP_INT; goto CONV_OVF_UN; case CEE_CONV_OVF_I8_UN: lclTyp = TYP_LONG; goto CONV_OVF_UN; case CEE_CONV_OVF_U1_UN: lclTyp = TYP_UBYTE; goto CONV_OVF_UN; case CEE_CONV_OVF_U2_UN: lclTyp = TYP_USHORT; goto CONV_OVF_UN; case CEE_CONV_OVF_U_UN: lclTyp = TYP_U_IMPL; goto CONV_OVF_UN; case CEE_CONV_OVF_U4_UN: lclTyp = TYP_UINT; goto CONV_OVF_UN; case CEE_CONV_OVF_U8_UN: lclTyp = TYP_ULONG; goto CONV_OVF_UN; CONV_OVF_UN: uns = true; goto CONV_OVF_COMMON; CONV_OVF: uns = false; goto CONV_OVF_COMMON; CONV_OVF_COMMON: ovfl = true; goto _CONV; case CEE_CONV_I1: lclTyp = TYP_BYTE; goto CONV; case CEE_CONV_I2: lclTyp = TYP_SHORT; goto CONV; case CEE_CONV_I: lclTyp = TYP_I_IMPL; goto CONV; case CEE_CONV_I4: lclTyp = TYP_INT; goto CONV; case CEE_CONV_I8: lclTyp = TYP_LONG; goto CONV; case CEE_CONV_U1: lclTyp = TYP_UBYTE; goto CONV; case CEE_CONV_U2: lclTyp = TYP_USHORT; goto CONV; #if (REGSIZE_BYTES == 8) case CEE_CONV_U: lclTyp = TYP_U_IMPL; goto CONV_UN; #else case CEE_CONV_U: lclTyp = TYP_U_IMPL; goto CONV; #endif case CEE_CONV_U4: lclTyp = TYP_UINT; goto CONV; case CEE_CONV_U8: lclTyp = TYP_ULONG; goto CONV_UN; case CEE_CONV_R4: lclTyp = TYP_FLOAT; goto CONV; case CEE_CONV_R8: lclTyp = TYP_DOUBLE; goto CONV; case CEE_CONV_R_UN: lclTyp = TYP_DOUBLE; goto CONV_UN; CONV_UN: uns = true; ovfl = false; goto _CONV; CONV: uns = false; ovfl = false; goto _CONV; _CONV: // only converts from FLOAT or DOUBLE to an integer type // and converts from ULONG (or LONG on ARM) to DOUBLE are morphed to calls if (varTypeIsFloating(lclTyp)) { callNode = varTypeIsLong(impStackTop().val) || uns // uint->dbl gets turned into uint->long->dbl #ifdef TARGET_64BIT // TODO-ARM64-Bug?: This was AMD64; I enabled it for ARM64 also. OK? // TYP_BYREF could be used as TYP_I_IMPL which is long. // TODO-CQ: remove this when we lower casts long/ulong --> float/double // and generate SSE2 code instead of going through helper calls. || (impStackTop().val->TypeGet() == TYP_BYREF) #endif ; } else { callNode = varTypeIsFloating(impStackTop().val->TypeGet()); } op1 = impPopStack().val; impBashVarAddrsToI(op1); // Casts from floating point types must not have GTF_UNSIGNED set. if (varTypeIsFloating(op1)) { uns = false; } // At this point uns, ovf, callNode are all set. if (varTypeIsSmall(lclTyp) && !ovfl && op1->gtType == TYP_INT && op1->gtOper == GT_AND) { op2 = op1->AsOp()->gtOp2; if (op2->gtOper == GT_CNS_INT) { ssize_t ival = op2->AsIntCon()->gtIconVal; ssize_t mask, umask; switch (lclTyp) { case TYP_BYTE: case TYP_UBYTE: mask = 0x00FF; umask = 0x007F; break; case TYP_USHORT: case TYP_SHORT: mask = 0xFFFF; umask = 0x7FFF; break; default: assert(!"unexpected type"); return; } if (((ival & umask) == ival) || ((ival & mask) == ival && uns)) { /* Toss the cast, it's a waste of time */ impPushOnStack(op1, tiRetVal); break; } else if (ival == mask) { /* Toss the masking, it's a waste of time, since we sign-extend from the small value anyways */ op1 = op1->AsOp()->gtOp1; } } } /* The 'op2' sub-operand of a cast is the 'real' type number, since the result of a cast to one of the 'small' integer types is an integer. */ type = genActualType(lclTyp); // If this is a no-op cast, just use op1. if (!ovfl && (type == op1->TypeGet()) && (genTypeSize(type) == genTypeSize(lclTyp))) { // Nothing needs to change } // Work is evidently required, add cast node else { if (callNode) { op1 = gtNewCastNodeL(type, op1, uns, lclTyp); } else { op1 = gtNewCastNode(type, op1, uns, lclTyp); } if (ovfl) { op1->gtFlags |= (GTF_OVERFLOW | GTF_EXCEPT); } if (op1->gtGetOp1()->OperIsConst() && opts.OptimizationEnabled()) { // Try and fold the introduced cast op1 = gtFoldExprConst(op1); } } impPushOnStack(op1, tiRetVal); break; case CEE_NEG: op1 = impPopStack().val; impBashVarAddrsToI(op1, nullptr); impPushOnStack(gtNewOperNode(GT_NEG, genActualType(op1->gtType), op1), tiRetVal); break; case CEE_POP: { /* Pull the top value from the stack */ StackEntry se = impPopStack(); clsHnd = se.seTypeInfo.GetClassHandle(); op1 = se.val; /* Get hold of the type of the value being duplicated */ lclTyp = genActualType(op1->gtType); /* Does the value have any side effects? */ if ((op1->gtFlags & GTF_SIDE_EFFECT) || opts.compDbgCode) { // Since we are throwing away the value, just normalize // it to its address. This is more efficient. if (varTypeIsStruct(op1)) { JITDUMP("\n ... CEE_POP struct ...\n"); DISPTREE(op1); #ifdef UNIX_AMD64_ABI // Non-calls, such as obj or ret_expr, have to go through this. // Calls with large struct return value have to go through this. // Helper calls with small struct return value also have to go // through this since they do not follow Unix calling convention. if (op1->gtOper != GT_CALL || !IsMultiRegReturnedType(clsHnd, op1->AsCall()->GetUnmanagedCallConv()) || op1->AsCall()->gtCallType == CT_HELPER) #endif // UNIX_AMD64_ABI { // If the value being produced comes from loading // via an underlying address, just null check the address. if (op1->OperIs(GT_FIELD, GT_IND, GT_OBJ)) { gtChangeOperToNullCheck(op1, block); } else { op1 = impGetStructAddr(op1, clsHnd, (unsigned)CHECK_SPILL_ALL, false); } JITDUMP("\n ... optimized to ...\n"); DISPTREE(op1); } } // If op1 is non-overflow cast, throw it away since it is useless. // Another reason for throwing away the useless cast is in the context of // implicit tail calls when the operand of pop is GT_CAST(GT_CALL(..)). // The cast gets added as part of importing GT_CALL, which gets in the way // of fgMorphCall() on the forms of tail call nodes that we assert. if ((op1->gtOper == GT_CAST) && !op1->gtOverflow()) { op1 = op1->AsOp()->gtOp1; } if (op1->gtOper != GT_CALL) { if ((op1->gtFlags & GTF_SIDE_EFFECT) != 0) { op1 = gtUnusedValNode(op1); } else { // Can't bash to NOP here because op1 can be referenced from `currentBlock->bbEntryState`, // if we ever need to reimport we need a valid LCL_VAR on it. op1 = gtNewNothingNode(); } } /* Append the value to the tree list */ goto SPILL_APPEND; } /* No side effects - just throw the <BEEP> thing away */ } break; case CEE_DUP: { StackEntry se = impPopStack(); GenTree* tree = se.val; tiRetVal = se.seTypeInfo; op1 = tree; // If the expression to dup is simple, just clone it. // Otherwise spill it to a temp, and reload the temp twice. bool cloneExpr = false; if (!opts.compDbgCode) { // Duplicate 0 and +0.0 if (op1->IsIntegralConst(0) || op1->IsFloatPositiveZero()) { cloneExpr = true; } // Duplicate locals and addresses of them else if (op1->IsLocal()) { cloneExpr = true; } else if (op1->TypeIs(TYP_BYREF) && op1->OperIs(GT_ADDR) && op1->gtGetOp1()->IsLocal() && (OPCODE)impGetNonPrefixOpcode(codeAddr + sz, codeEndp) != CEE_INITOBJ) { cloneExpr = true; } } else { // Always clone for debug mode cloneExpr = true; } if (!cloneExpr) { const unsigned tmpNum = lvaGrabTemp(true DEBUGARG("dup spill")); impAssignTempGen(tmpNum, op1, tiRetVal.GetClassHandle(), (unsigned)CHECK_SPILL_ALL); var_types type = genActualType(lvaTable[tmpNum].TypeGet()); op1 = gtNewLclvNode(tmpNum, type); // Propagate type info to the temp from the stack and the original tree if (type == TYP_REF) { assert(lvaTable[tmpNum].lvSingleDef == 0); lvaTable[tmpNum].lvSingleDef = 1; JITDUMP("Marked V%02u as a single def local\n", tmpNum); lvaSetClass(tmpNum, tree, tiRetVal.GetClassHandle()); } } op1 = impCloneExpr(op1, &op2, tiRetVal.GetClassHandle(), (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("DUP instruction")); assert(!(op1->gtFlags & GTF_GLOB_EFFECT) && !(op2->gtFlags & GTF_GLOB_EFFECT)); impPushOnStack(op1, tiRetVal); impPushOnStack(op2, tiRetVal); } break; case CEE_STIND_I1: lclTyp = TYP_BYTE; goto STIND; case CEE_STIND_I2: lclTyp = TYP_SHORT; goto STIND; case CEE_STIND_I4: lclTyp = TYP_INT; goto STIND; case CEE_STIND_I8: lclTyp = TYP_LONG; goto STIND; case CEE_STIND_I: lclTyp = TYP_I_IMPL; goto STIND; case CEE_STIND_REF: lclTyp = TYP_REF; goto STIND; case CEE_STIND_R4: lclTyp = TYP_FLOAT; goto STIND; case CEE_STIND_R8: lclTyp = TYP_DOUBLE; goto STIND; STIND: op2 = impPopStack().val; // value to store op1 = impPopStack().val; // address to store to // you can indirect off of a TYP_I_IMPL (if we are in C) or a BYREF assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF); impBashVarAddrsToI(op1, op2); op2 = impImplicitR4orR8Cast(op2, lclTyp); #ifdef TARGET_64BIT // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL if ((op2->OperGet() == GT_CNS_INT) && varTypeIsI(lclTyp) && !varTypeIsI(op2->gtType)) { op2->gtType = TYP_I_IMPL; } else { // Allow a downcast of op2 from TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity // if (varTypeIsI(op2->gtType) && (genActualType(lclTyp) == TYP_INT)) { op2 = gtNewCastNode(TYP_INT, op2, false, TYP_INT); } // Allow an upcast of op2 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity // if (varTypeIsI(lclTyp) && (genActualType(op2->gtType) == TYP_INT)) { op2 = gtNewCastNode(TYP_I_IMPL, op2, false, TYP_I_IMPL); } } #endif // TARGET_64BIT if (opcode == CEE_STIND_REF) { // STIND_REF can be used to store TYP_INT, TYP_I_IMPL, TYP_REF, or TYP_BYREF assertImp(varTypeIsIntOrI(op2->gtType) || varTypeIsGC(op2->gtType)); lclTyp = genActualType(op2->TypeGet()); } // Check target type. #ifdef DEBUG if (op2->gtType == TYP_BYREF || lclTyp == TYP_BYREF) { if (op2->gtType == TYP_BYREF) { assertImp(lclTyp == TYP_BYREF || lclTyp == TYP_I_IMPL); } else if (lclTyp == TYP_BYREF) { assertImp(op2->gtType == TYP_BYREF || varTypeIsIntOrI(op2->gtType)); } } else { assertImp(genActualType(op2->gtType) == genActualType(lclTyp) || ((lclTyp == TYP_I_IMPL) && (genActualType(op2->gtType) == TYP_INT)) || (varTypeIsFloating(op2->gtType) && varTypeIsFloating(lclTyp))); } #endif op1 = gtNewOperNode(GT_IND, lclTyp, op1); // stind could point anywhere, example a boxed class static int op1->gtFlags |= GTF_IND_TGTANYWHERE; if (prefixFlags & PREFIX_VOLATILE) { assert(op1->OperGet() == GT_IND); op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered op1->gtFlags |= GTF_IND_VOLATILE; } if ((prefixFlags & PREFIX_UNALIGNED) && !varTypeIsByte(lclTyp)) { assert(op1->OperGet() == GT_IND); op1->gtFlags |= GTF_IND_UNALIGNED; } op1 = gtNewAssignNode(op1, op2); op1->gtFlags |= GTF_EXCEPT | GTF_GLOB_REF; // Spill side-effects AND global-data-accesses if (verCurrentState.esStackDepth > 0) { impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("spill side effects before STIND")); } goto APPEND; case CEE_LDIND_I1: lclTyp = TYP_BYTE; goto LDIND; case CEE_LDIND_I2: lclTyp = TYP_SHORT; goto LDIND; case CEE_LDIND_U4: case CEE_LDIND_I4: lclTyp = TYP_INT; goto LDIND; case CEE_LDIND_I8: lclTyp = TYP_LONG; goto LDIND; case CEE_LDIND_REF: lclTyp = TYP_REF; goto LDIND; case CEE_LDIND_I: lclTyp = TYP_I_IMPL; goto LDIND; case CEE_LDIND_R4: lclTyp = TYP_FLOAT; goto LDIND; case CEE_LDIND_R8: lclTyp = TYP_DOUBLE; goto LDIND; case CEE_LDIND_U1: lclTyp = TYP_UBYTE; goto LDIND; case CEE_LDIND_U2: lclTyp = TYP_USHORT; goto LDIND; LDIND: op1 = impPopStack().val; // address to load from impBashVarAddrsToI(op1); #ifdef TARGET_64BIT // Allow an upcast of op1 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity // if (genActualType(op1->gtType) == TYP_INT) { op1 = gtNewCastNode(TYP_I_IMPL, op1, false, TYP_I_IMPL); } #endif assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF); op1 = gtNewOperNode(GT_IND, lclTyp, op1); // ldind could point anywhere, example a boxed class static int op1->gtFlags |= (GTF_EXCEPT | GTF_GLOB_REF | GTF_IND_TGTANYWHERE); if (prefixFlags & PREFIX_VOLATILE) { assert(op1->OperGet() == GT_IND); op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered op1->gtFlags |= GTF_IND_VOLATILE; } if ((prefixFlags & PREFIX_UNALIGNED) && !varTypeIsByte(lclTyp)) { assert(op1->OperGet() == GT_IND); op1->gtFlags |= GTF_IND_UNALIGNED; } impPushOnStack(op1, tiRetVal); break; case CEE_UNALIGNED: assert(sz == 1); val = getU1LittleEndian(codeAddr); ++codeAddr; JITDUMP(" %u", val); if ((val != 1) && (val != 2) && (val != 4)) { BADCODE("Alignment unaligned. must be 1, 2, or 4"); } Verify(!(prefixFlags & PREFIX_UNALIGNED), "Multiple unaligned. prefixes"); prefixFlags |= PREFIX_UNALIGNED; impValidateMemoryAccessOpcode(codeAddr, codeEndp, false); PREFIX: opcode = (OPCODE)getU1LittleEndian(codeAddr); opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode); codeAddr += sizeof(__int8); goto DECODE_OPCODE; case CEE_VOLATILE: Verify(!(prefixFlags & PREFIX_VOLATILE), "Multiple volatile. prefixes"); prefixFlags |= PREFIX_VOLATILE; impValidateMemoryAccessOpcode(codeAddr, codeEndp, true); assert(sz == 0); goto PREFIX; case CEE_LDFTN: { // Need to do a lookup here so that we perform an access check // and do a NOWAY if protections are violated _impResolveToken(CORINFO_TOKENKIND_Method); JITDUMP(" %08X", resolvedToken.token); eeGetCallInfo(&resolvedToken, (prefixFlags & PREFIX_CONSTRAINED) ? &constrainedResolvedToken : nullptr, combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_LDFTN), &callInfo); // This check really only applies to intrinsic Array.Address methods if (callInfo.sig.callConv & CORINFO_CALLCONV_PARAMTYPE) { NO_WAY("Currently do not support LDFTN of Parameterized functions"); } // Do this before DO_LDFTN since CEE_LDVIRTFN does it on its own. impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper); DO_LDFTN: op1 = impMethodPointer(&resolvedToken, &callInfo); if (compDonotInline()) { return; } // Call info may have more precise information about the function than // the resolved token. CORINFO_RESOLVED_TOKEN* heapToken = impAllocateToken(resolvedToken); assert(callInfo.hMethod != nullptr); heapToken->hMethod = callInfo.hMethod; impPushOnStack(op1, typeInfo(heapToken)); break; } case CEE_LDVIRTFTN: { /* Get the method token */ _impResolveToken(CORINFO_TOKENKIND_Method); JITDUMP(" %08X", resolvedToken.token); eeGetCallInfo(&resolvedToken, nullptr /* constraint typeRef */, combine(combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_LDFTN), CORINFO_CALLINFO_CALLVIRT), &callInfo); // This check really only applies to intrinsic Array.Address methods if (callInfo.sig.callConv & CORINFO_CALLCONV_PARAMTYPE) { NO_WAY("Currently do not support LDFTN of Parameterized functions"); } mflags = callInfo.methodFlags; impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper); if (compIsForInlining()) { if (mflags & (CORINFO_FLG_FINAL | CORINFO_FLG_STATIC) || !(mflags & CORINFO_FLG_VIRTUAL)) { compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDVIRTFN_ON_NON_VIRTUAL); return; } } CORINFO_SIG_INFO& ftnSig = callInfo.sig; /* Get the object-ref */ op1 = impPopStack().val; assertImp(op1->gtType == TYP_REF); if (opts.IsReadyToRun()) { if (callInfo.kind != CORINFO_VIRTUALCALL_LDVIRTFTN) { if (op1->gtFlags & GTF_SIDE_EFFECT) { op1 = gtUnusedValNode(op1); impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtDI); } goto DO_LDFTN; } } else if (mflags & (CORINFO_FLG_FINAL | CORINFO_FLG_STATIC) || !(mflags & CORINFO_FLG_VIRTUAL)) { if (op1->gtFlags & GTF_SIDE_EFFECT) { op1 = gtUnusedValNode(op1); impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtDI); } goto DO_LDFTN; } GenTree* fptr = impImportLdvirtftn(op1, &resolvedToken, &callInfo); if (compDonotInline()) { return; } CORINFO_RESOLVED_TOKEN* heapToken = impAllocateToken(resolvedToken); assert(heapToken->tokenType == CORINFO_TOKENKIND_Method); assert(callInfo.hMethod != nullptr); heapToken->tokenType = CORINFO_TOKENKIND_Ldvirtftn; heapToken->hMethod = callInfo.hMethod; impPushOnStack(fptr, typeInfo(heapToken)); break; } case CEE_CONSTRAINED: assertImp(sz == sizeof(unsigned)); impResolveToken(codeAddr, &constrainedResolvedToken, CORINFO_TOKENKIND_Constrained); codeAddr += sizeof(unsigned); // prefix instructions must increment codeAddr manually JITDUMP(" (%08X) ", constrainedResolvedToken.token); Verify(!(prefixFlags & PREFIX_CONSTRAINED), "Multiple constrained. prefixes"); prefixFlags |= PREFIX_CONSTRAINED; { OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp); if (actualOpcode != CEE_CALLVIRT && actualOpcode != CEE_CALL && actualOpcode != CEE_LDFTN) { BADCODE("constrained. has to be followed by callvirt, call or ldftn"); } } goto PREFIX; case CEE_READONLY: JITDUMP(" readonly."); Verify(!(prefixFlags & PREFIX_READONLY), "Multiple readonly. prefixes"); prefixFlags |= PREFIX_READONLY; { OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp); if (actualOpcode != CEE_LDELEMA && !impOpcodeIsCallOpcode(actualOpcode)) { BADCODE("readonly. has to be followed by ldelema or call"); } } assert(sz == 0); goto PREFIX; case CEE_TAILCALL: JITDUMP(" tail."); Verify(!(prefixFlags & PREFIX_TAILCALL_EXPLICIT), "Multiple tailcall. prefixes"); prefixFlags |= PREFIX_TAILCALL_EXPLICIT; { OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp); if (!impOpcodeIsCallOpcode(actualOpcode)) { BADCODE("tailcall. has to be followed by call, callvirt or calli"); } } assert(sz == 0); goto PREFIX; case CEE_NEWOBJ: /* Since we will implicitly insert newObjThisPtr at the start of the argument list, spill any GTF_ORDER_SIDEEFF */ impSpillSpecialSideEff(); /* NEWOBJ does not respond to TAIL */ prefixFlags &= ~PREFIX_TAILCALL_EXPLICIT; /* NEWOBJ does not respond to CONSTRAINED */ prefixFlags &= ~PREFIX_CONSTRAINED; _impResolveToken(CORINFO_TOKENKIND_NewObj); eeGetCallInfo(&resolvedToken, nullptr /* constraint typeRef*/, combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_ALLOWINSTPARAM), &callInfo); mflags = callInfo.methodFlags; if ((mflags & (CORINFO_FLG_STATIC | CORINFO_FLG_ABSTRACT)) != 0) { BADCODE("newobj on static or abstract method"); } // Insert the security callout before any actual code is generated impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper); // There are three different cases for new // Object size is variable (depends on arguments) // 1) Object is an array (arrays treated specially by the EE) // 2) Object is some other variable sized object (e.g. String) // 3) Class Size can be determined beforehand (normal case) // In the first case, we need to call a NEWOBJ helper (multinewarray) // in the second case we call the constructor with a '0' this pointer // In the third case we alloc the memory, then call the constuctor clsFlags = callInfo.classFlags; if (clsFlags & CORINFO_FLG_ARRAY) { // Arrays need to call the NEWOBJ helper. assertImp(clsFlags & CORINFO_FLG_VAROBJSIZE); impImportNewObjArray(&resolvedToken, &callInfo); if (compDonotInline()) { return; } callTyp = TYP_REF; break; } // At present this can only be String else if (clsFlags & CORINFO_FLG_VAROBJSIZE) { // Skip this thisPtr argument newObjThisPtr = nullptr; /* Remember that this basic block contains 'new' of an object */ block->bbFlags |= BBF_HAS_NEWOBJ; optMethodFlags |= OMF_HAS_NEWOBJ; } else { // This is the normal case where the size of the object is // fixed. Allocate the memory and call the constructor. // Note: We cannot add a peep to avoid use of temp here // becase we don't have enough interference info to detect when // sources and destination interfere, example: s = new S(ref); // TODO: We find the correct place to introduce a general // reverse copy prop for struct return values from newobj or // any function returning structs. /* get a temporary for the new object */ lclNum = lvaGrabTemp(true DEBUGARG("NewObj constructor temp")); if (compDonotInline()) { // Fail fast if lvaGrabTemp fails with CALLSITE_TOO_MANY_LOCALS. assert(compInlineResult->GetObservation() == InlineObservation::CALLSITE_TOO_MANY_LOCALS); return; } // In the value class case we only need clsHnd for size calcs. // // The lookup of the code pointer will be handled by CALL in this case if (clsFlags & CORINFO_FLG_VALUECLASS) { if (compIsForInlining()) { // If value class has GC fields, inform the inliner. It may choose to // bail out on the inline. DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass); if ((typeFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0) { compInlineResult->Note(InlineObservation::CALLEE_HAS_GC_STRUCT); if (compInlineResult->IsFailure()) { return; } // Do further notification in the case where the call site is rare; // some policies do not track the relative hotness of call sites for // "always" inline cases. if (impInlineInfo->iciBlock->isRunRarely()) { compInlineResult->Note(InlineObservation::CALLSITE_RARE_GC_STRUCT); if (compInlineResult->IsFailure()) { return; } } } } CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass); if (impIsPrimitive(jitTyp)) { lvaTable[lclNum].lvType = JITtype2varType(jitTyp); } else { // The local variable itself is the allocated space. // Here we need unsafe value cls check, since the address of struct is taken for further use // and potentially exploitable. lvaSetStruct(lclNum, resolvedToken.hClass, true /* unsafe value cls check */); } bool bbInALoop = impBlockIsInALoop(block); bool bbIsReturn = (block->bbJumpKind == BBJ_RETURN) && (!compIsForInlining() || (impInlineInfo->iciBlock->bbJumpKind == BBJ_RETURN)); LclVarDsc* const lclDsc = lvaGetDesc(lclNum); if (fgVarNeedsExplicitZeroInit(lclNum, bbInALoop, bbIsReturn)) { // Append a tree to zero-out the temp newObjThisPtr = gtNewLclvNode(lclNum, lclDsc->TypeGet()); newObjThisPtr = gtNewBlkOpNode(newObjThisPtr, // Dest gtNewIconNode(0), // Value false, // isVolatile false); // not copyBlock impAppendTree(newObjThisPtr, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); } else { JITDUMP("\nSuppressing zero-init for V%02u -- expect to zero in prolog\n", lclNum); lclDsc->lvSuppressedZeroInit = 1; compSuppressedZeroInit = true; } // Obtain the address of the temp newObjThisPtr = gtNewOperNode(GT_ADDR, TYP_BYREF, gtNewLclvNode(lclNum, lvaTable[lclNum].TypeGet())); } else { // If we're newing up a finalizable object, spill anything that can cause exceptions. // bool hasSideEffects = false; CorInfoHelpFunc newHelper = info.compCompHnd->getNewHelper(&resolvedToken, info.compMethodHnd, &hasSideEffects); if (hasSideEffects) { JITDUMP("\nSpilling stack for finalizable newobj\n"); impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("finalizable newobj spill")); } const bool useParent = true; op1 = gtNewAllocObjNode(&resolvedToken, useParent); if (op1 == nullptr) { return; } // Remember that this basic block contains 'new' of an object block->bbFlags |= BBF_HAS_NEWOBJ; optMethodFlags |= OMF_HAS_NEWOBJ; // Append the assignment to the temp/local. Dont need to spill // at all as we are just calling an EE-Jit helper which can only // cause an (async) OutOfMemoryException. // We assign the newly allocated object (by a GT_ALLOCOBJ node) // to a temp. Note that the pattern "temp = allocObj" is required // by ObjectAllocator phase to be able to determine GT_ALLOCOBJ nodes // without exhaustive walk over all expressions. impAssignTempGen(lclNum, op1, (unsigned)CHECK_SPILL_NONE); assert(lvaTable[lclNum].lvSingleDef == 0); lvaTable[lclNum].lvSingleDef = 1; JITDUMP("Marked V%02u as a single def local\n", lclNum); lvaSetClass(lclNum, resolvedToken.hClass, true /* is Exact */); newObjThisPtr = gtNewLclvNode(lclNum, TYP_REF); } } goto CALL; case CEE_CALLI: /* CALLI does not respond to CONSTRAINED */ prefixFlags &= ~PREFIX_CONSTRAINED; FALLTHROUGH; case CEE_CALLVIRT: case CEE_CALL: // We can't call getCallInfo on the token from a CALLI, but we need it in // many other places. We unfortunately embed that knowledge here. if (opcode != CEE_CALLI) { _impResolveToken(CORINFO_TOKENKIND_Method); eeGetCallInfo(&resolvedToken, (prefixFlags & PREFIX_CONSTRAINED) ? &constrainedResolvedToken : nullptr, // this is how impImportCall invokes getCallInfo combine(combine(CORINFO_CALLINFO_ALLOWINSTPARAM, CORINFO_CALLINFO_SECURITYCHECKS), (opcode == CEE_CALLVIRT) ? CORINFO_CALLINFO_CALLVIRT : CORINFO_CALLINFO_NONE), &callInfo); } else { // Suppress uninitialized use warning. memset(&resolvedToken, 0, sizeof(resolvedToken)); memset(&callInfo, 0, sizeof(callInfo)); resolvedToken.token = getU4LittleEndian(codeAddr); resolvedToken.tokenContext = impTokenLookupContextHandle; resolvedToken.tokenScope = info.compScopeHnd; } CALL: // memberRef should be set. // newObjThisPtr should be set for CEE_NEWOBJ JITDUMP(" %08X", resolvedToken.token); constraintCall = (prefixFlags & PREFIX_CONSTRAINED) != 0; bool newBBcreatedForTailcallStress; bool passedStressModeValidation; newBBcreatedForTailcallStress = false; passedStressModeValidation = true; if (compIsForInlining()) { if (compDonotInline()) { return; } // We rule out inlinees with explicit tail calls in fgMakeBasicBlocks. assert((prefixFlags & PREFIX_TAILCALL_EXPLICIT) == 0); } else { if (compTailCallStress()) { // Have we created a new BB after the "call" instruction in fgMakeBasicBlocks()? // Tail call stress only recognizes call+ret patterns and forces them to be // explicit tail prefixed calls. Also fgMakeBasicBlocks() under tail call stress // doesn't import 'ret' opcode following the call into the basic block containing // the call instead imports it to a new basic block. Note that fgMakeBasicBlocks() // is already checking that there is an opcode following call and hence it is // safe here to read next opcode without bounds check. newBBcreatedForTailcallStress = impOpcodeIsCallOpcode(opcode) && // Current opcode is a CALL, (not a CEE_NEWOBJ). So, don't // make it jump to RET. (OPCODE)getU1LittleEndian(codeAddr + sz) == CEE_RET; // Next opcode is a CEE_RET bool hasTailPrefix = (prefixFlags & PREFIX_TAILCALL_EXPLICIT); if (newBBcreatedForTailcallStress && !hasTailPrefix) { // Do a more detailed evaluation of legality const bool returnFalseIfInvalid = true; const bool passedConstraintCheck = verCheckTailCallConstraint(opcode, &resolvedToken, constraintCall ? &constrainedResolvedToken : nullptr, returnFalseIfInvalid); if (passedConstraintCheck) { // Now check with the runtime CORINFO_METHOD_HANDLE declaredCalleeHnd = callInfo.hMethod; bool isVirtual = (callInfo.kind == CORINFO_VIRTUALCALL_STUB) || (callInfo.kind == CORINFO_VIRTUALCALL_VTABLE); CORINFO_METHOD_HANDLE exactCalleeHnd = isVirtual ? nullptr : declaredCalleeHnd; if (info.compCompHnd->canTailCall(info.compMethodHnd, declaredCalleeHnd, exactCalleeHnd, hasTailPrefix)) // Is it legal to do tailcall? { // Stress the tailcall. JITDUMP(" (Tailcall stress: prefixFlags |= PREFIX_TAILCALL_EXPLICIT)"); prefixFlags |= PREFIX_TAILCALL_EXPLICIT; prefixFlags |= PREFIX_TAILCALL_STRESS; } else { // Runtime disallows this tail call JITDUMP(" (Tailcall stress: runtime preventing tailcall)"); passedStressModeValidation = false; } } else { // Constraints disallow this tail call JITDUMP(" (Tailcall stress: constraint check failed)"); passedStressModeValidation = false; } } } } // This is split up to avoid goto flow warnings. bool isRecursive; isRecursive = !compIsForInlining() && (callInfo.hMethod == info.compMethodHnd); // If we've already disqualified this call as a tail call under tail call stress, // don't consider it for implicit tail calling either. // // When not running under tail call stress, we may mark this call as an implicit // tail call candidate. We'll do an "equivalent" validation during impImportCall. // // Note that when running under tail call stress, a call marked as explicit // tail prefixed will not be considered for implicit tail calling. if (passedStressModeValidation && impIsImplicitTailCallCandidate(opcode, codeAddr + sz, codeEndp, prefixFlags, isRecursive)) { if (compIsForInlining()) { #if FEATURE_TAILCALL_OPT_SHARED_RETURN // Are we inlining at an implicit tail call site? If so the we can flag // implicit tail call sites in the inline body. These call sites // often end up in non BBJ_RETURN blocks, so only flag them when // we're able to handle shared returns. if (impInlineInfo->iciCall->IsImplicitTailCall()) { JITDUMP("\n (Inline Implicit Tail call: prefixFlags |= PREFIX_TAILCALL_IMPLICIT)"); prefixFlags |= PREFIX_TAILCALL_IMPLICIT; } #endif // FEATURE_TAILCALL_OPT_SHARED_RETURN } else { JITDUMP("\n (Implicit Tail call: prefixFlags |= PREFIX_TAILCALL_IMPLICIT)"); prefixFlags |= PREFIX_TAILCALL_IMPLICIT; } } // Treat this call as tail call for verification only if "tail" prefixed (i.e. explicit tail call). explicitTailCall = (prefixFlags & PREFIX_TAILCALL_EXPLICIT) != 0; readonlyCall = (prefixFlags & PREFIX_READONLY) != 0; if (opcode != CEE_CALLI && opcode != CEE_NEWOBJ) { // All calls and delegates need a security callout. // For delegates, this is the call to the delegate constructor, not the access check on the // LD(virt)FTN. impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper); } callTyp = impImportCall(opcode, &resolvedToken, constraintCall ? &constrainedResolvedToken : nullptr, newObjThisPtr, prefixFlags, &callInfo, opcodeOffs); if (compDonotInline()) { // We do not check fails after lvaGrabTemp. It is covered with CoreCLR_13272 issue. assert((callTyp == TYP_UNDEF) || (compInlineResult->GetObservation() == InlineObservation::CALLSITE_TOO_MANY_LOCALS)); return; } if (explicitTailCall || newBBcreatedForTailcallStress) // If newBBcreatedForTailcallStress is true, we // have created a new BB after the "call" // instruction in fgMakeBasicBlocks(). So we need to jump to RET regardless. { assert(!compIsForInlining()); goto RET; } break; case CEE_LDFLD: case CEE_LDSFLD: case CEE_LDFLDA: case CEE_LDSFLDA: { bool isLoadAddress = (opcode == CEE_LDFLDA || opcode == CEE_LDSFLDA); bool isLoadStatic = (opcode == CEE_LDSFLD || opcode == CEE_LDSFLDA); /* Get the CP_Fieldref index */ assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Field); JITDUMP(" %08X", resolvedToken.token); int aflags = isLoadAddress ? CORINFO_ACCESS_ADDRESS : CORINFO_ACCESS_GET; GenTree* obj = nullptr; typeInfo* tiObj = nullptr; CORINFO_CLASS_HANDLE objType = nullptr; // used for fields if (opcode == CEE_LDFLD || opcode == CEE_LDFLDA) { tiObj = &impStackTop().seTypeInfo; StackEntry se = impPopStack(); objType = se.seTypeInfo.GetClassHandle(); obj = se.val; if (impIsThis(obj)) { aflags |= CORINFO_ACCESS_THIS; } } eeGetFieldInfo(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo); // Figure out the type of the member. We always call canAccessField, so you always need this // handle CorInfoType ciType = fieldInfo.fieldType; clsHnd = fieldInfo.structType; lclTyp = JITtype2varType(ciType); if (compIsForInlining()) { switch (fieldInfo.fieldAccessor) { case CORINFO_FIELD_INSTANCE_HELPER: case CORINFO_FIELD_INSTANCE_ADDR_HELPER: case CORINFO_FIELD_STATIC_ADDR_HELPER: case CORINFO_FIELD_STATIC_TLS: compInlineResult->NoteFatal(InlineObservation::CALLEE_LDFLD_NEEDS_HELPER); return; case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER: case CORINFO_FIELD_STATIC_READYTORUN_HELPER: /* We may be able to inline the field accessors in specific instantiations of generic * methods */ compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDFLD_NEEDS_HELPER); return; default: break; } if (!isLoadAddress && (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && lclTyp == TYP_STRUCT && clsHnd) { if ((info.compCompHnd->getTypeForPrimitiveValueClass(clsHnd) == CORINFO_TYPE_UNDEF) && !(info.compFlags & CORINFO_FLG_FORCEINLINE)) { // Loading a static valuetype field usually will cause a JitHelper to be called // for the static base. This will bloat the code. compInlineResult->Note(InlineObservation::CALLEE_LDFLD_STATIC_VALUECLASS); if (compInlineResult->IsFailure()) { return; } } } } tiRetVal = verMakeTypeInfo(ciType, clsHnd); if (isLoadAddress) { tiRetVal.MakeByRef(); } else { tiRetVal.NormaliseForStack(); } // Perform this check always to ensure that we get field access exceptions even with // SkipVerification. impHandleAccessAllowed(fieldInfo.accessAllowed, &fieldInfo.accessCalloutHelper); // Raise InvalidProgramException if static load accesses non-static field if (isLoadStatic && ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) == 0)) { BADCODE("static access on an instance field"); } // We are using ldfld/a on a static field. We allow it, but need to get side-effect from obj. if ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && obj != nullptr) { if (obj->gtFlags & GTF_SIDE_EFFECT) { obj = gtUnusedValNode(obj); impAppendTree(obj, (unsigned)CHECK_SPILL_ALL, impCurStmtDI); } obj = nullptr; } /* Preserve 'small' int types */ if (!varTypeIsSmall(lclTyp)) { lclTyp = genActualType(lclTyp); } bool usesHelper = false; switch (fieldInfo.fieldAccessor) { case CORINFO_FIELD_INSTANCE: #ifdef FEATURE_READYTORUN case CORINFO_FIELD_INSTANCE_WITH_BASE: #endif { // If the object is a struct, what we really want is // for the field to operate on the address of the struct. if (!varTypeGCtype(obj->TypeGet()) && impIsValueType(tiObj)) { assert(opcode == CEE_LDFLD && objType != nullptr); obj = impGetStructAddr(obj, objType, (unsigned)CHECK_SPILL_ALL, true); } /* Create the data member node */ op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, obj, fieldInfo.offset); #ifdef FEATURE_READYTORUN if (fieldInfo.fieldAccessor == CORINFO_FIELD_INSTANCE_WITH_BASE) { op1->AsField()->gtFieldLookup = fieldInfo.fieldLookup; } #endif op1->gtFlags |= (obj->gtFlags & GTF_GLOB_EFFECT); if (fgAddrCouldBeNull(obj)) { op1->gtFlags |= GTF_EXCEPT; } // If the object is a BYREF then our target is a value class and // it could point anywhere, example a boxed class static int if (obj->gtType == TYP_BYREF) { op1->gtFlags |= GTF_IND_TGTANYWHERE; } DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass); if (StructHasOverlappingFields(typeFlags)) { op1->AsField()->gtFldMayOverlap = true; } // wrap it in a address of operator if necessary if (isLoadAddress) { op1 = gtNewOperNode(GT_ADDR, (var_types)(varTypeIsGC(obj->TypeGet()) ? TYP_BYREF : TYP_I_IMPL), op1); } else { if (compIsForInlining() && impInlineIsGuaranteedThisDerefBeforeAnySideEffects(nullptr, nullptr, obj, impInlineInfo->inlArgInfo)) { impInlineInfo->thisDereferencedFirst = true; } } } break; case CORINFO_FIELD_STATIC_TLS: #ifdef TARGET_X86 // Legacy TLS access is implemented as intrinsic on x86 only /* Create the data member node */ op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, NULL, fieldInfo.offset); op1->gtFlags |= GTF_IND_TLS_REF; // fgMorphField will handle the transformation if (isLoadAddress) { op1 = gtNewOperNode(GT_ADDR, (var_types)TYP_I_IMPL, op1); } break; #else fieldInfo.fieldAccessor = CORINFO_FIELD_STATIC_ADDR_HELPER; FALLTHROUGH; #endif case CORINFO_FIELD_STATIC_ADDR_HELPER: case CORINFO_FIELD_INSTANCE_HELPER: case CORINFO_FIELD_INSTANCE_ADDR_HELPER: op1 = gtNewRefCOMfield(obj, &resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo, lclTyp, clsHnd, nullptr); usesHelper = true; break; case CORINFO_FIELD_STATIC_ADDRESS: // Replace static read-only fields with constant if possible if ((aflags & CORINFO_ACCESS_GET) && (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_FINAL) && !(fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP) && (varTypeIsIntegral(lclTyp) || varTypeIsFloating(lclTyp))) { CorInfoInitClassResult initClassResult = info.compCompHnd->initClass(resolvedToken.hField, info.compMethodHnd, impTokenLookupContextHandle); if (initClassResult & CORINFO_INITCLASS_INITIALIZED) { void** pFldAddr = nullptr; void* fldAddr = info.compCompHnd->getFieldAddress(resolvedToken.hField, (void**)&pFldAddr); // We should always be able to access this static's address directly // assert(pFldAddr == nullptr); op1 = impImportStaticReadOnlyField(fldAddr, lclTyp); // Widen small types since we're propagating the value // instead of producing an indir. // op1->gtType = genActualType(lclTyp); goto FIELD_DONE; } } FALLTHROUGH; case CORINFO_FIELD_STATIC_RVA_ADDRESS: case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER: case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER: case CORINFO_FIELD_STATIC_READYTORUN_HELPER: op1 = impImportStaticFieldAccess(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo, lclTyp); break; case CORINFO_FIELD_INTRINSIC_ZERO: { assert(aflags & CORINFO_ACCESS_GET); // Widen to stack type lclTyp = genActualType(lclTyp); op1 = gtNewIconNode(0, lclTyp); goto FIELD_DONE; } break; case CORINFO_FIELD_INTRINSIC_EMPTY_STRING: { assert(aflags & CORINFO_ACCESS_GET); // Import String.Empty as "" (GT_CNS_STR with a fake SconCPX = 0) op1 = gtNewSconNode(EMPTY_STRING_SCON, nullptr); goto FIELD_DONE; } break; case CORINFO_FIELD_INTRINSIC_ISLITTLEENDIAN: { assert(aflags & CORINFO_ACCESS_GET); // Widen to stack type lclTyp = genActualType(lclTyp); #if BIGENDIAN op1 = gtNewIconNode(0, lclTyp); #else op1 = gtNewIconNode(1, lclTyp); #endif goto FIELD_DONE; } break; default: assert(!"Unexpected fieldAccessor"); } if (!isLoadAddress) { if (prefixFlags & PREFIX_VOLATILE) { op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered if (!usesHelper) { assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND) || (op1->OperGet() == GT_OBJ)); op1->gtFlags |= GTF_IND_VOLATILE; } } if ((prefixFlags & PREFIX_UNALIGNED) && !varTypeIsByte(lclTyp)) { if (!usesHelper) { assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND) || (op1->OperGet() == GT_OBJ)); op1->gtFlags |= GTF_IND_UNALIGNED; } } } /* Check if the class needs explicit initialization */ if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_INITCLASS) { GenTree* helperNode = impInitClass(&resolvedToken); if (compDonotInline()) { return; } if (helperNode != nullptr) { op1 = gtNewOperNode(GT_COMMA, op1->TypeGet(), helperNode, op1); } } FIELD_DONE: impPushOnStack(op1, tiRetVal); } break; case CEE_STFLD: case CEE_STSFLD: { bool isStoreStatic = (opcode == CEE_STSFLD); CORINFO_CLASS_HANDLE fieldClsHnd; // class of the field (if it's a ref type) /* Get the CP_Fieldref index */ assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Field); JITDUMP(" %08X", resolvedToken.token); int aflags = CORINFO_ACCESS_SET; GenTree* obj = nullptr; typeInfo* tiObj = nullptr; typeInfo tiVal; /* Pull the value from the stack */ StackEntry se = impPopStack(); op2 = se.val; tiVal = se.seTypeInfo; clsHnd = tiVal.GetClassHandle(); if (opcode == CEE_STFLD) { tiObj = &impStackTop().seTypeInfo; obj = impPopStack().val; if (impIsThis(obj)) { aflags |= CORINFO_ACCESS_THIS; } } eeGetFieldInfo(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo); // Figure out the type of the member. We always call canAccessField, so you always need this // handle CorInfoType ciType = fieldInfo.fieldType; fieldClsHnd = fieldInfo.structType; lclTyp = JITtype2varType(ciType); if (compIsForInlining()) { /* Is this a 'special' (COM) field? or a TLS ref static field?, field stored int GC heap? or * per-inst static? */ switch (fieldInfo.fieldAccessor) { case CORINFO_FIELD_INSTANCE_HELPER: case CORINFO_FIELD_INSTANCE_ADDR_HELPER: case CORINFO_FIELD_STATIC_ADDR_HELPER: case CORINFO_FIELD_STATIC_TLS: compInlineResult->NoteFatal(InlineObservation::CALLEE_STFLD_NEEDS_HELPER); return; case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER: case CORINFO_FIELD_STATIC_READYTORUN_HELPER: /* We may be able to inline the field accessors in specific instantiations of generic * methods */ compInlineResult->NoteFatal(InlineObservation::CALLSITE_STFLD_NEEDS_HELPER); return; default: break; } } impHandleAccessAllowed(fieldInfo.accessAllowed, &fieldInfo.accessCalloutHelper); // Raise InvalidProgramException if static store accesses non-static field if (isStoreStatic && ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) == 0)) { BADCODE("static access on an instance field"); } // We are using stfld on a static field. // We allow it, but need to eval any side-effects for obj if ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && obj != nullptr) { if (obj->gtFlags & GTF_SIDE_EFFECT) { obj = gtUnusedValNode(obj); impAppendTree(obj, (unsigned)CHECK_SPILL_ALL, impCurStmtDI); } obj = nullptr; } /* Preserve 'small' int types */ if (!varTypeIsSmall(lclTyp)) { lclTyp = genActualType(lclTyp); } switch (fieldInfo.fieldAccessor) { case CORINFO_FIELD_INSTANCE: #ifdef FEATURE_READYTORUN case CORINFO_FIELD_INSTANCE_WITH_BASE: #endif { /* Create the data member node */ op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, obj, fieldInfo.offset); DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass); if (StructHasOverlappingFields(typeFlags)) { op1->AsField()->gtFldMayOverlap = true; } #ifdef FEATURE_READYTORUN if (fieldInfo.fieldAccessor == CORINFO_FIELD_INSTANCE_WITH_BASE) { op1->AsField()->gtFieldLookup = fieldInfo.fieldLookup; } #endif op1->gtFlags |= (obj->gtFlags & GTF_GLOB_EFFECT); if (fgAddrCouldBeNull(obj)) { op1->gtFlags |= GTF_EXCEPT; } // If object is a BYREF then our target is a value class and // it could point anywhere, example a boxed class static int if (obj->gtType == TYP_BYREF) { op1->gtFlags |= GTF_IND_TGTANYWHERE; } if (compIsForInlining() && impInlineIsGuaranteedThisDerefBeforeAnySideEffects(op2, nullptr, obj, impInlineInfo->inlArgInfo)) { impInlineInfo->thisDereferencedFirst = true; } } break; case CORINFO_FIELD_STATIC_TLS: #ifdef TARGET_X86 // Legacy TLS access is implemented as intrinsic on x86 only /* Create the data member node */ op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, NULL, fieldInfo.offset); op1->gtFlags |= GTF_IND_TLS_REF; // fgMorphField will handle the transformation break; #else fieldInfo.fieldAccessor = CORINFO_FIELD_STATIC_ADDR_HELPER; FALLTHROUGH; #endif case CORINFO_FIELD_STATIC_ADDR_HELPER: case CORINFO_FIELD_INSTANCE_HELPER: case CORINFO_FIELD_INSTANCE_ADDR_HELPER: op1 = gtNewRefCOMfield(obj, &resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo, lclTyp, clsHnd, op2); goto SPILL_APPEND; case CORINFO_FIELD_STATIC_ADDRESS: case CORINFO_FIELD_STATIC_RVA_ADDRESS: case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER: case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER: case CORINFO_FIELD_STATIC_READYTORUN_HELPER: op1 = impImportStaticFieldAccess(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo, lclTyp); break; default: assert(!"Unexpected fieldAccessor"); } // Create the member assignment, unless we have a TYP_STRUCT. bool deferStructAssign = (lclTyp == TYP_STRUCT); if (!deferStructAssign) { if (prefixFlags & PREFIX_VOLATILE) { assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND)); op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered op1->gtFlags |= GTF_IND_VOLATILE; } if ((prefixFlags & PREFIX_UNALIGNED) && !varTypeIsByte(lclTyp)) { assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND)); op1->gtFlags |= GTF_IND_UNALIGNED; } /* V4.0 allows assignment of i4 constant values to i8 type vars when IL verifier is bypassed (full trust apps). The reason this works is that JIT stores an i4 constant in Gentree union during importation and reads from the union as if it were a long during code generation. Though this can potentially read garbage, one can get lucky to have this working correctly. This code pattern is generated by Dev10 MC++ compiler while storing to fields when compiled with /O2 switch (default when compiling retail configs in Dev10) and a customer app has taken a dependency on it. To be backward compatible, we will explicitly add an upward cast here so that it works correctly always. Note that this is limited to x86 alone as there is no back compat to be addressed for Arm JIT for V4.0. */ CLANG_FORMAT_COMMENT_ANCHOR; #ifndef TARGET_64BIT // In UWP6.0 and beyond (post-.NET Core 2.0), we decided to let this cast from int to long be // generated for ARM as well as x86, so the following IR will be accepted: // STMTx (IL 0x... ???) // * ASG long // +--* CLS_VAR long // \--* CNS_INT int 2 if ((op1->TypeGet() != op2->TypeGet()) && op2->OperIsConst() && varTypeIsIntOrI(op2->TypeGet()) && varTypeIsLong(op1->TypeGet())) { op2 = gtNewCastNode(op1->TypeGet(), op2, false, op1->TypeGet()); } #endif #ifdef TARGET_64BIT // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL if ((op2->OperGet() == GT_CNS_INT) && varTypeIsI(lclTyp) && !varTypeIsI(op2->gtType)) { op2->gtType = TYP_I_IMPL; } else { // Allow a downcast of op2 from TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity // if (varTypeIsI(op2->gtType) && (genActualType(lclTyp) == TYP_INT)) { op2 = gtNewCastNode(TYP_INT, op2, false, TYP_INT); } // Allow an upcast of op2 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity // if (varTypeIsI(lclTyp) && (genActualType(op2->gtType) == TYP_INT)) { op2 = gtNewCastNode(TYP_I_IMPL, op2, false, TYP_I_IMPL); } } #endif // We can generate an assignment to a TYP_FLOAT from a TYP_DOUBLE // We insert a cast to the dest 'op1' type // if ((op1->TypeGet() != op2->TypeGet()) && varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType)) { op2 = gtNewCastNode(op1->TypeGet(), op2, false, op1->TypeGet()); } op1 = gtNewAssignNode(op1, op2); /* Mark the expression as containing an assignment */ op1->gtFlags |= GTF_ASG; } /* Check if the class needs explicit initialization */ if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_INITCLASS) { GenTree* helperNode = impInitClass(&resolvedToken); if (compDonotInline()) { return; } if (helperNode != nullptr) { op1 = gtNewOperNode(GT_COMMA, op1->TypeGet(), helperNode, op1); } } /* stfld can interfere with value classes (consider the sequence ldloc, ldloca, ..., stfld, stloc). We will be conservative and spill all value class references from the stack. */ if (obj && ((obj->gtType == TYP_BYREF) || (obj->gtType == TYP_I_IMPL))) { assert(tiObj); // If we can resolve the field to be within some local, // then just spill that local. // GenTreeLclVarCommon* const lcl = obj->IsLocalAddrExpr(); if (lcl != nullptr) { impSpillLclRefs(lcl->GetLclNum()); } else if (impIsValueType(tiObj)) { impSpillEvalStack(); } else { impSpillValueClasses(); } } /* Spill any refs to the same member from the stack */ impSpillLclRefs((ssize_t)resolvedToken.hField); /* stsfld also interferes with indirect accesses (for aliased statics) and calls. But don't need to spill other statics as we have explicitly spilled this particular static field. */ impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("spill side effects before STFLD")); if (deferStructAssign) { op1 = impAssignStruct(op1, op2, clsHnd, (unsigned)CHECK_SPILL_ALL); } } goto APPEND; case CEE_NEWARR: { /* Get the class type index operand */ _impResolveToken(CORINFO_TOKENKIND_Newarr); JITDUMP(" %08X", resolvedToken.token); if (!opts.IsReadyToRun()) { // Need to restore array classes before creating array objects on the heap op1 = impTokenToHandle(&resolvedToken, nullptr, true /*mustRestoreHandle*/); if (op1 == nullptr) { // compDonotInline() return; } } tiRetVal = verMakeTypeInfo(resolvedToken.hClass); accessAllowedResult = info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper); impHandleAccessAllowed(accessAllowedResult, &calloutHelper); /* Form the arglist: array class handle, size */ op2 = impPopStack().val; assertImp(genActualTypeIsIntOrI(op2->gtType)); #ifdef TARGET_64BIT // The array helper takes a native int for array length. // So if we have an int, explicitly extend it to be a native int. if (genActualType(op2->TypeGet()) != TYP_I_IMPL) { if (op2->IsIntegralConst()) { op2->gtType = TYP_I_IMPL; } else { bool isUnsigned = false; op2 = gtNewCastNode(TYP_I_IMPL, op2, isUnsigned, TYP_I_IMPL); } } #endif // TARGET_64BIT #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { op1 = impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_NEWARR_1, TYP_REF, gtNewCallArgs(op2)); usingReadyToRunHelper = (op1 != nullptr); if (!usingReadyToRunHelper) { // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call // and the newarr call with a single call to a dynamic R2R cell that will: // 1) Load the context // 2) Perform the generic dictionary lookup and caching, and generate the appropriate stub // 3) Allocate the new array // Reason: performance (today, we'll always use the slow helper for the R2R generics case) // Need to restore array classes before creating array objects on the heap op1 = impTokenToHandle(&resolvedToken, nullptr, true /*mustRestoreHandle*/); if (op1 == nullptr) { // compDonotInline() return; } } } if (!usingReadyToRunHelper) #endif { GenTreeCall::Use* args = gtNewCallArgs(op1, op2); /* Create a call to 'new' */ // Note that this only works for shared generic code because the same helper is used for all // reference array types op1 = gtNewHelperCallNode(info.compCompHnd->getNewArrHelper(resolvedToken.hClass), TYP_REF, args); } op1->AsCall()->compileTimeHelperArgumentHandle = (CORINFO_GENERIC_HANDLE)resolvedToken.hClass; /* Remember that this basic block contains 'new' of an sd array */ block->bbFlags |= BBF_HAS_NEWARRAY; optMethodFlags |= OMF_HAS_NEWARRAY; /* Push the result of the call on the stack */ impPushOnStack(op1, tiRetVal); callTyp = TYP_REF; } break; case CEE_LOCALLOC: // We don't allow locallocs inside handlers if (block->hasHndIndex()) { BADCODE("Localloc can't be inside handler"); } // Get the size to allocate op2 = impPopStack().val; assertImp(genActualTypeIsIntOrI(op2->gtType)); if (verCurrentState.esStackDepth != 0) { BADCODE("Localloc can only be used when the stack is empty"); } // If the localloc is not in a loop and its size is a small constant, // create a new local var of TYP_BLK and return its address. { bool convertedToLocal = false; // Need to aggressively fold here, as even fixed-size locallocs // will have casts in the way. op2 = gtFoldExpr(op2); if (op2->IsIntegralConst()) { const ssize_t allocSize = op2->AsIntCon()->IconValue(); bool bbInALoop = impBlockIsInALoop(block); if (allocSize == 0) { // Result is nullptr JITDUMP("Converting stackalloc of 0 bytes to push null unmanaged pointer\n"); op1 = gtNewIconNode(0, TYP_I_IMPL); convertedToLocal = true; } else if ((allocSize > 0) && !bbInALoop) { // Get the size threshold for local conversion ssize_t maxSize = DEFAULT_MAX_LOCALLOC_TO_LOCAL_SIZE; #ifdef DEBUG // Optionally allow this to be modified maxSize = JitConfig.JitStackAllocToLocalSize(); #endif // DEBUG if (allocSize <= maxSize) { const unsigned stackallocAsLocal = lvaGrabTemp(false DEBUGARG("stackallocLocal")); JITDUMP("Converting stackalloc of %zd bytes to new local V%02u\n", allocSize, stackallocAsLocal); lvaTable[stackallocAsLocal].lvType = TYP_BLK; lvaTable[stackallocAsLocal].lvExactSize = (unsigned)allocSize; lvaTable[stackallocAsLocal].lvIsUnsafeBuffer = true; op1 = gtNewLclvNode(stackallocAsLocal, TYP_BLK); op1 = gtNewOperNode(GT_ADDR, TYP_I_IMPL, op1); convertedToLocal = true; if (!this->opts.compDbgEnC) { // Ensure we have stack security for this method. // Reorder layout since the converted localloc is treated as an unsafe buffer. setNeedsGSSecurityCookie(); compGSReorderStackLayout = true; } } } } if (!convertedToLocal) { // Bail out if inlining and the localloc was not converted. // // Note we might consider allowing the inline, if the call // site is not in a loop. if (compIsForInlining()) { InlineObservation obs = op2->IsIntegralConst() ? InlineObservation::CALLEE_LOCALLOC_TOO_LARGE : InlineObservation::CALLSITE_LOCALLOC_SIZE_UNKNOWN; compInlineResult->NoteFatal(obs); return; } op1 = gtNewOperNode(GT_LCLHEAP, TYP_I_IMPL, op2); // May throw a stack overflow exception. Obviously, we don't want locallocs to be CSE'd. op1->gtFlags |= (GTF_EXCEPT | GTF_DONT_CSE); // Ensure we have stack security for this method. setNeedsGSSecurityCookie(); /* The FP register may not be back to the original value at the end of the method, even if the frame size is 0, as localloc may have modified it. So we will HAVE to reset it */ compLocallocUsed = true; } else { compLocallocOptimized = true; } } impPushOnStack(op1, tiRetVal); break; case CEE_ISINST: { /* Get the type token */ assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Casting); JITDUMP(" %08X", resolvedToken.token); if (!opts.IsReadyToRun()) { op2 = impTokenToHandle(&resolvedToken, nullptr, false); if (op2 == nullptr) { // compDonotInline() return; } } accessAllowedResult = info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper); impHandleAccessAllowed(accessAllowedResult, &calloutHelper); op1 = impPopStack().val; GenTree* optTree = impOptimizeCastClassOrIsInst(op1, &resolvedToken, false); if (optTree != nullptr) { impPushOnStack(optTree, tiRetVal); } else { #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { GenTreeCall* opLookup = impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_ISINSTANCEOF, TYP_REF, gtNewCallArgs(op1)); usingReadyToRunHelper = (opLookup != nullptr); op1 = (usingReadyToRunHelper ? opLookup : op1); if (!usingReadyToRunHelper) { // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call // and the isinstanceof_any call with a single call to a dynamic R2R cell that will: // 1) Load the context // 2) Perform the generic dictionary lookup and caching, and generate the appropriate // stub // 3) Perform the 'is instance' check on the input object // Reason: performance (today, we'll always use the slow helper for the R2R generics case) op2 = impTokenToHandle(&resolvedToken, nullptr, false); if (op2 == nullptr) { // compDonotInline() return; } } } if (!usingReadyToRunHelper) #endif { op1 = impCastClassOrIsInstToTree(op1, op2, &resolvedToken, false, opcodeOffs); } if (compDonotInline()) { return; } impPushOnStack(op1, tiRetVal); } break; } case CEE_REFANYVAL: // get the class handle and make a ICON node out of it _impResolveToken(CORINFO_TOKENKIND_Class); JITDUMP(" %08X", resolvedToken.token); op2 = impTokenToHandle(&resolvedToken); if (op2 == nullptr) { // compDonotInline() return; } op1 = impPopStack().val; // make certain it is normalized; op1 = impNormStructVal(op1, impGetRefAnyClass(), (unsigned)CHECK_SPILL_ALL); // Call helper GETREFANY(classHandle, op1); op1 = gtNewHelperCallNode(CORINFO_HELP_GETREFANY, TYP_BYREF, gtNewCallArgs(op2, op1)); impPushOnStack(op1, tiRetVal); break; case CEE_REFANYTYPE: op1 = impPopStack().val; // make certain it is normalized; op1 = impNormStructVal(op1, impGetRefAnyClass(), (unsigned)CHECK_SPILL_ALL); if (op1->gtOper == GT_OBJ) { // Get the address of the refany op1 = op1->AsOp()->gtOp1; // Fetch the type from the correct slot op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1, gtNewIconNode(OFFSETOF__CORINFO_TypedReference__type, TYP_I_IMPL)); op1 = gtNewOperNode(GT_IND, TYP_BYREF, op1); } else { assertImp(op1->gtOper == GT_MKREFANY); // The pointer may have side-effects if (op1->AsOp()->gtOp1->gtFlags & GTF_SIDE_EFFECT) { impAppendTree(op1->AsOp()->gtOp1, (unsigned)CHECK_SPILL_ALL, impCurStmtDI); #ifdef DEBUG impNoteLastILoffs(); #endif } // We already have the class handle op1 = op1->AsOp()->gtOp2; } // convert native TypeHandle to RuntimeTypeHandle { GenTreeCall::Use* helperArgs = gtNewCallArgs(op1); op1 = gtNewHelperCallNode(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPEHANDLE_MAYBENULL, TYP_STRUCT, helperArgs); CORINFO_CLASS_HANDLE classHandle = impGetTypeHandleClass(); // The handle struct is returned in register op1->AsCall()->gtReturnType = GetRuntimeHandleUnderlyingType(); op1->AsCall()->gtRetClsHnd = classHandle; #if FEATURE_MULTIREG_RET op1->AsCall()->InitializeStructReturnType(this, classHandle, op1->AsCall()->GetUnmanagedCallConv()); #endif tiRetVal = typeInfo(TI_STRUCT, classHandle); } impPushOnStack(op1, tiRetVal); break; case CEE_LDTOKEN: { /* Get the Class index */ assertImp(sz == sizeof(unsigned)); lastLoadToken = codeAddr; _impResolveToken(CORINFO_TOKENKIND_Ldtoken); tokenType = info.compCompHnd->getTokenTypeAsHandle(&resolvedToken); op1 = impTokenToHandle(&resolvedToken, nullptr, true); if (op1 == nullptr) { // compDonotInline() return; } helper = CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPEHANDLE; assert(resolvedToken.hClass != nullptr); if (resolvedToken.hMethod != nullptr) { helper = CORINFO_HELP_METHODDESC_TO_STUBRUNTIMEMETHOD; } else if (resolvedToken.hField != nullptr) { helper = CORINFO_HELP_FIELDDESC_TO_STUBRUNTIMEFIELD; } GenTreeCall::Use* helperArgs = gtNewCallArgs(op1); op1 = gtNewHelperCallNode(helper, TYP_STRUCT, helperArgs); // The handle struct is returned in register and // it could be consumed both as `TYP_STRUCT` and `TYP_REF`. op1->AsCall()->gtReturnType = GetRuntimeHandleUnderlyingType(); #if FEATURE_MULTIREG_RET op1->AsCall()->InitializeStructReturnType(this, tokenType, op1->AsCall()->GetUnmanagedCallConv()); #endif op1->AsCall()->gtRetClsHnd = tokenType; tiRetVal = verMakeTypeInfo(tokenType); impPushOnStack(op1, tiRetVal); } break; case CEE_UNBOX: case CEE_UNBOX_ANY: { /* Get the Class index */ assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Class); JITDUMP(" %08X", resolvedToken.token); bool runtimeLookup; op2 = impTokenToHandle(&resolvedToken, &runtimeLookup); if (op2 == nullptr) { assert(compDonotInline()); return; } // Run this always so we can get access exceptions even with SkipVerification. accessAllowedResult = info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper); impHandleAccessAllowed(accessAllowedResult, &calloutHelper); if (opcode == CEE_UNBOX_ANY && !eeIsValueClass(resolvedToken.hClass)) { JITDUMP("\n Importing UNBOX.ANY(refClass) as CASTCLASS\n"); op1 = impPopStack().val; goto CASTCLASS; } /* Pop the object and create the unbox helper call */ /* You might think that for UNBOX_ANY we need to push a different */ /* (non-byref) type, but here we're making the tiRetVal that is used */ /* for the intermediate pointer which we then transfer onto the OBJ */ /* instruction. OBJ then creates the appropriate tiRetVal. */ op1 = impPopStack().val; assertImp(op1->gtType == TYP_REF); helper = info.compCompHnd->getUnBoxHelper(resolvedToken.hClass); assert(helper == CORINFO_HELP_UNBOX || helper == CORINFO_HELP_UNBOX_NULLABLE); // Check legality and profitability of inline expansion for unboxing. const bool canExpandInline = (helper == CORINFO_HELP_UNBOX); const bool shouldExpandInline = !compCurBB->isRunRarely() && opts.OptimizationEnabled(); if (canExpandInline && shouldExpandInline) { // See if we know anything about the type of op1, the object being unboxed. bool isExact = false; bool isNonNull = false; CORINFO_CLASS_HANDLE clsHnd = gtGetClassHandle(op1, &isExact, &isNonNull); // We can skip the "exact" bit here as we are comparing to a value class. // compareTypesForEquality should bail on comparisions for shared value classes. if (clsHnd != NO_CLASS_HANDLE) { const TypeCompareState compare = info.compCompHnd->compareTypesForEquality(resolvedToken.hClass, clsHnd); if (compare == TypeCompareState::Must) { JITDUMP("\nOptimizing %s (%s) -- type test will succeed\n", opcode == CEE_UNBOX ? "UNBOX" : "UNBOX.ANY", eeGetClassName(clsHnd)); // For UNBOX, null check (if necessary), and then leave the box payload byref on the stack. if (opcode == CEE_UNBOX) { GenTree* cloneOperand; op1 = impCloneExpr(op1, &cloneOperand, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("optimized unbox clone")); GenTree* boxPayloadOffset = gtNewIconNode(TARGET_POINTER_SIZE, TYP_I_IMPL); GenTree* boxPayloadAddress = gtNewOperNode(GT_ADD, TYP_BYREF, cloneOperand, boxPayloadOffset); GenTree* nullcheck = gtNewNullCheck(op1, block); GenTree* result = gtNewOperNode(GT_COMMA, TYP_BYREF, nullcheck, boxPayloadAddress); impPushOnStack(result, tiRetVal); break; } // For UNBOX.ANY load the struct from the box payload byref (the load will nullcheck) assert(opcode == CEE_UNBOX_ANY); GenTree* boxPayloadOffset = gtNewIconNode(TARGET_POINTER_SIZE, TYP_I_IMPL); GenTree* boxPayloadAddress = gtNewOperNode(GT_ADD, TYP_BYREF, op1, boxPayloadOffset); impPushOnStack(boxPayloadAddress, tiRetVal); oper = GT_OBJ; goto OBJ; } else { JITDUMP("\nUnable to optimize %s -- can't resolve type comparison\n", opcode == CEE_UNBOX ? "UNBOX" : "UNBOX.ANY"); } } else { JITDUMP("\nUnable to optimize %s -- class for [%06u] not known\n", opcode == CEE_UNBOX ? "UNBOX" : "UNBOX.ANY", dspTreeID(op1)); } JITDUMP("\n Importing %s as inline sequence\n", opcode == CEE_UNBOX ? "UNBOX" : "UNBOX.ANY"); // we are doing normal unboxing // inline the common case of the unbox helper // UNBOX(exp) morphs into // clone = pop(exp); // ((*clone == typeToken) ? nop : helper(clone, typeToken)); // push(clone + TARGET_POINTER_SIZE) // GenTree* cloneOperand; op1 = impCloneExpr(op1, &cloneOperand, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("inline UNBOX clone1")); op1 = gtNewMethodTableLookup(op1); GenTree* condBox = gtNewOperNode(GT_EQ, TYP_INT, op1, op2); op1 = impCloneExpr(cloneOperand, &cloneOperand, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("inline UNBOX clone2")); op2 = impTokenToHandle(&resolvedToken); if (op2 == nullptr) { // compDonotInline() return; } op1 = gtNewHelperCallNode(helper, TYP_VOID, gtNewCallArgs(op2, op1)); op1 = new (this, GT_COLON) GenTreeColon(TYP_VOID, gtNewNothingNode(), op1); op1 = gtNewQmarkNode(TYP_VOID, condBox, op1->AsColon()); // QMARK nodes cannot reside on the evaluation stack. Because there // may be other trees on the evaluation stack that side-effect the // sources of the UNBOX operation we must spill the stack. impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtDI); // Create the address-expression to reference past the object header // to the beginning of the value-type. Today this means adjusting // past the base of the objects vtable field which is pointer sized. op2 = gtNewIconNode(TARGET_POINTER_SIZE, TYP_I_IMPL); op1 = gtNewOperNode(GT_ADD, TYP_BYREF, cloneOperand, op2); } else { JITDUMP("\n Importing %s as helper call because %s\n", opcode == CEE_UNBOX ? "UNBOX" : "UNBOX.ANY", canExpandInline ? "want smaller code or faster jitting" : "inline expansion not legal"); // Don't optimize, just call the helper and be done with it op1 = gtNewHelperCallNode(helper, (var_types)((helper == CORINFO_HELP_UNBOX) ? TYP_BYREF : TYP_STRUCT), gtNewCallArgs(op2, op1)); if (op1->gtType == TYP_STRUCT) { op1->AsCall()->gtRetClsHnd = resolvedToken.hClass; } } assert((helper == CORINFO_HELP_UNBOX && op1->gtType == TYP_BYREF) || // Unbox helper returns a byref. (helper == CORINFO_HELP_UNBOX_NULLABLE && varTypeIsStruct(op1)) // UnboxNullable helper returns a struct. ); /* ---------------------------------------------------------------------- | \ helper | | | | \ | | | | \ | CORINFO_HELP_UNBOX | CORINFO_HELP_UNBOX_NULLABLE | | \ | (which returns a BYREF) | (which returns a STRUCT) | | | opcode \ | | | |--------------------------------------------------------------------- | UNBOX | push the BYREF | spill the STRUCT to a local, | | | | push the BYREF to this local | |--------------------------------------------------------------------- | UNBOX_ANY | push a GT_OBJ of | push the STRUCT | | | the BYREF | For Linux when the | | | | struct is returned in two | | | | registers create a temp | | | | which address is passed to | | | | the unbox_nullable helper. | |--------------------------------------------------------------------- */ if (opcode == CEE_UNBOX) { if (helper == CORINFO_HELP_UNBOX_NULLABLE) { // Unbox nullable helper returns a struct type. // We need to spill it to a temp so than can take the address of it. // Here we need unsafe value cls check, since the address of struct is taken to be used // further along and potetially be exploitable. unsigned tmp = lvaGrabTemp(true DEBUGARG("UNBOXing a nullable")); lvaSetStruct(tmp, resolvedToken.hClass, true /* unsafe value cls check */); op2 = gtNewLclvNode(tmp, TYP_STRUCT); op1 = impAssignStruct(op2, op1, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL); assert(op1->gtType == TYP_VOID); // We must be assigning the return struct to the temp. op2 = gtNewLclvNode(tmp, TYP_STRUCT); op2 = gtNewOperNode(GT_ADDR, TYP_BYREF, op2); op1 = gtNewOperNode(GT_COMMA, TYP_BYREF, op1, op2); } assert(op1->gtType == TYP_BYREF); } else { assert(opcode == CEE_UNBOX_ANY); if (helper == CORINFO_HELP_UNBOX) { // Normal unbox helper returns a TYP_BYREF. impPushOnStack(op1, tiRetVal); oper = GT_OBJ; goto OBJ; } assert(helper == CORINFO_HELP_UNBOX_NULLABLE && "Make sure the helper is nullable!"); #if FEATURE_MULTIREG_RET if (varTypeIsStruct(op1) && IsMultiRegReturnedType(resolvedToken.hClass, CorInfoCallConvExtension::Managed)) { // Unbox nullable helper returns a TYP_STRUCT. // For the multi-reg case we need to spill it to a temp so that // we can pass the address to the unbox_nullable jit helper. unsigned tmp = lvaGrabTemp(true DEBUGARG("UNBOXing a register returnable nullable")); lvaTable[tmp].lvIsMultiRegArg = true; lvaSetStruct(tmp, resolvedToken.hClass, true /* unsafe value cls check */); op2 = gtNewLclvNode(tmp, TYP_STRUCT); op1 = impAssignStruct(op2, op1, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL); assert(op1->gtType == TYP_VOID); // We must be assigning the return struct to the temp. op2 = gtNewLclvNode(tmp, TYP_STRUCT); op2 = gtNewOperNode(GT_ADDR, TYP_BYREF, op2); op1 = gtNewOperNode(GT_COMMA, TYP_BYREF, op1, op2); // In this case the return value of the unbox helper is TYP_BYREF. // Make sure the right type is placed on the operand type stack. impPushOnStack(op1, tiRetVal); // Load the struct. oper = GT_OBJ; assert(op1->gtType == TYP_BYREF); goto OBJ; } else #endif // !FEATURE_MULTIREG_RET { // If non register passable struct we have it materialized in the RetBuf. assert(op1->gtType == TYP_STRUCT); tiRetVal = verMakeTypeInfo(resolvedToken.hClass); assert(tiRetVal.IsValueClass()); } } impPushOnStack(op1, tiRetVal); } break; case CEE_BOX: { /* Get the Class index */ assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Box); JITDUMP(" %08X", resolvedToken.token); accessAllowedResult = info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper); impHandleAccessAllowed(accessAllowedResult, &calloutHelper); // Note BOX can be used on things that are not value classes, in which // case we get a NOP. However the verifier's view of the type on the // stack changes (in generic code a 'T' becomes a 'boxed T') if (!eeIsValueClass(resolvedToken.hClass)) { JITDUMP("\n Importing BOX(refClass) as NOP\n"); verCurrentState.esStack[verCurrentState.esStackDepth - 1].seTypeInfo = tiRetVal; break; } // Look ahead for box idioms int matched = impBoxPatternMatch(&resolvedToken, codeAddr + sz, codeEndp); if (matched >= 0) { // Skip the matched IL instructions sz += matched; break; } impImportAndPushBox(&resolvedToken); if (compDonotInline()) { return; } } break; case CEE_SIZEOF: /* Get the Class index */ assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Class); JITDUMP(" %08X", resolvedToken.token); op1 = gtNewIconNode(info.compCompHnd->getClassSize(resolvedToken.hClass)); impPushOnStack(op1, tiRetVal); break; case CEE_CASTCLASS: /* Get the Class index */ assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Casting); JITDUMP(" %08X", resolvedToken.token); if (!opts.IsReadyToRun()) { op2 = impTokenToHandle(&resolvedToken, nullptr, false); if (op2 == nullptr) { // compDonotInline() return; } } accessAllowedResult = info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper); impHandleAccessAllowed(accessAllowedResult, &calloutHelper); op1 = impPopStack().val; /* Pop the address and create the 'checked cast' helper call */ // At this point we expect typeRef to contain the token, op1 to contain the value being cast, // and op2 to contain code that creates the type handle corresponding to typeRef CASTCLASS: { GenTree* optTree = impOptimizeCastClassOrIsInst(op1, &resolvedToken, true); if (optTree != nullptr) { impPushOnStack(optTree, tiRetVal); } else { #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { GenTreeCall* opLookup = impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_CHKCAST, TYP_REF, gtNewCallArgs(op1)); usingReadyToRunHelper = (opLookup != nullptr); op1 = (usingReadyToRunHelper ? opLookup : op1); if (!usingReadyToRunHelper) { // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call // and the chkcastany call with a single call to a dynamic R2R cell that will: // 1) Load the context // 2) Perform the generic dictionary lookup and caching, and generate the appropriate // stub // 3) Check the object on the stack for the type-cast // Reason: performance (today, we'll always use the slow helper for the R2R generics case) op2 = impTokenToHandle(&resolvedToken, nullptr, false); if (op2 == nullptr) { // compDonotInline() return; } } } if (!usingReadyToRunHelper) #endif { op1 = impCastClassOrIsInstToTree(op1, op2, &resolvedToken, true, opcodeOffs); } if (compDonotInline()) { return; } /* Push the result back on the stack */ impPushOnStack(op1, tiRetVal); } } break; case CEE_THROW: // Any block with a throw is rarely executed. block->bbSetRunRarely(); // Pop the exception object and create the 'throw' helper call op1 = gtNewHelperCallNode(CORINFO_HELP_THROW, TYP_VOID, gtNewCallArgs(impPopStack().val)); // Fall through to clear out the eval stack. EVAL_APPEND: if (verCurrentState.esStackDepth > 0) { impEvalSideEffects(); } assert(verCurrentState.esStackDepth == 0); goto APPEND; case CEE_RETHROW: assert(!compIsForInlining()); if (info.compXcptnsCount == 0) { BADCODE("rethrow outside catch"); } /* Create the 'rethrow' helper call */ op1 = gtNewHelperCallNode(CORINFO_HELP_RETHROW, TYP_VOID); goto EVAL_APPEND; case CEE_INITOBJ: assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Class); JITDUMP(" %08X", resolvedToken.token); op2 = gtNewIconNode(0); // Value op1 = impPopStack().val; // Dest if (eeIsValueClass(resolvedToken.hClass)) { op1 = gtNewStructVal(resolvedToken.hClass, op1); if (op1->OperIs(GT_OBJ)) { gtSetObjGcInfo(op1->AsObj()); } } else { size = info.compCompHnd->getClassSize(resolvedToken.hClass); assert(size == TARGET_POINTER_SIZE); op1 = gtNewBlockVal(op1, size); } op1 = gtNewBlkOpNode(op1, op2, (prefixFlags & PREFIX_VOLATILE) != 0, false); goto SPILL_APPEND; case CEE_INITBLK: op3 = impPopStack().val; // Size op2 = impPopStack().val; // Value op1 = impPopStack().val; // Dst addr if (op3->IsCnsIntOrI()) { size = (unsigned)op3->AsIntConCommon()->IconValue(); op1 = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, op1, typGetBlkLayout(size)); op1 = gtNewBlkOpNode(op1, op2, (prefixFlags & PREFIX_VOLATILE) != 0, false); } else { if (!op2->IsIntegralConst(0)) { op2 = gtNewOperNode(GT_INIT_VAL, TYP_INT, op2); } op1 = new (this, GT_STORE_DYN_BLK) GenTreeStoreDynBlk(op1, op2, op3); size = 0; if ((prefixFlags & PREFIX_VOLATILE) != 0) { op1->gtFlags |= GTF_BLK_VOLATILE; } } goto SPILL_APPEND; case CEE_CPBLK: op3 = impPopStack().val; // Size op2 = impPopStack().val; // Src addr op1 = impPopStack().val; // Dst addr if (op2->OperGet() == GT_ADDR) { op2 = op2->AsOp()->gtOp1; } else { op2 = gtNewOperNode(GT_IND, TYP_STRUCT, op2); } if (op3->IsCnsIntOrI()) { size = (unsigned)op3->AsIntConCommon()->IconValue(); op1 = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, op1, typGetBlkLayout(size)); op1 = gtNewBlkOpNode(op1, op2, (prefixFlags & PREFIX_VOLATILE) != 0, true); } else { op1 = new (this, GT_STORE_DYN_BLK) GenTreeStoreDynBlk(op1, op2, op3); size = 0; if ((prefixFlags & PREFIX_VOLATILE) != 0) { op1->gtFlags |= GTF_BLK_VOLATILE; } } goto SPILL_APPEND; case CEE_CPOBJ: assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Class); JITDUMP(" %08X", resolvedToken.token); if (!eeIsValueClass(resolvedToken.hClass)) { op1 = impPopStack().val; // address to load from impBashVarAddrsToI(op1); assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF); op1 = gtNewOperNode(GT_IND, TYP_REF, op1); op1->gtFlags |= GTF_EXCEPT | GTF_GLOB_REF; impPushOnStack(op1, typeInfo()); opcode = CEE_STIND_REF; lclTyp = TYP_REF; goto STIND; } op2 = impPopStack().val; // Src op1 = impPopStack().val; // Dest op1 = gtNewCpObjNode(op1, op2, resolvedToken.hClass, ((prefixFlags & PREFIX_VOLATILE) != 0)); goto SPILL_APPEND; case CEE_STOBJ: { assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Class); JITDUMP(" %08X", resolvedToken.token); if (eeIsValueClass(resolvedToken.hClass)) { lclTyp = TYP_STRUCT; } else { lclTyp = TYP_REF; } if (lclTyp == TYP_REF) { opcode = CEE_STIND_REF; goto STIND; } CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass); if (impIsPrimitive(jitTyp)) { lclTyp = JITtype2varType(jitTyp); goto STIND; } op2 = impPopStack().val; // Value op1 = impPopStack().val; // Ptr assertImp(varTypeIsStruct(op2)); op1 = impAssignStructPtr(op1, op2, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL); if (op1->OperIsBlkOp() && (prefixFlags & PREFIX_UNALIGNED)) { op1->gtFlags |= GTF_BLK_UNALIGNED; } goto SPILL_APPEND; } case CEE_MKREFANY: assert(!compIsForInlining()); // Being lazy here. Refanys are tricky in terms of gc tracking. // Since it is uncommon, just don't perform struct promotion in any method that contains mkrefany. JITDUMP("disabling struct promotion because of mkrefany\n"); fgNoStructPromotion = true; oper = GT_MKREFANY; assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Class); JITDUMP(" %08X", resolvedToken.token); op2 = impTokenToHandle(&resolvedToken, nullptr, true); if (op2 == nullptr) { // compDonotInline() return; } accessAllowedResult = info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper); impHandleAccessAllowed(accessAllowedResult, &calloutHelper); op1 = impPopStack().val; // @SPECVIOLATION: TYP_INT should not be allowed here by a strict reading of the spec. // But JIT32 allowed it, so we continue to allow it. assertImp(op1->TypeGet() == TYP_BYREF || op1->TypeGet() == TYP_I_IMPL || op1->TypeGet() == TYP_INT); // MKREFANY returns a struct. op2 is the class token. op1 = gtNewOperNode(oper, TYP_STRUCT, op1, op2); impPushOnStack(op1, verMakeTypeInfo(impGetRefAnyClass())); break; case CEE_LDOBJ: { oper = GT_OBJ; assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Class); JITDUMP(" %08X", resolvedToken.token); OBJ: tiRetVal = verMakeTypeInfo(resolvedToken.hClass); if (eeIsValueClass(resolvedToken.hClass)) { lclTyp = TYP_STRUCT; } else { lclTyp = TYP_REF; opcode = CEE_LDIND_REF; goto LDIND; } op1 = impPopStack().val; assertImp(op1->TypeGet() == TYP_BYREF || op1->TypeGet() == TYP_I_IMPL); CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass); if (impIsPrimitive(jitTyp)) { op1 = gtNewOperNode(GT_IND, JITtype2varType(jitTyp), op1); // Could point anywhere, example a boxed class static int op1->gtFlags |= GTF_IND_TGTANYWHERE | GTF_GLOB_REF; assertImp(varTypeIsArithmetic(op1->gtType)); } else { // OBJ returns a struct // and an inline argument which is the class token of the loaded obj op1 = gtNewObjNode(resolvedToken.hClass, op1); } op1->gtFlags |= GTF_EXCEPT; if (prefixFlags & PREFIX_UNALIGNED) { op1->gtFlags |= GTF_IND_UNALIGNED; } impPushOnStack(op1, tiRetVal); break; } case CEE_LDLEN: op1 = impPopStack().val; if (opts.OptimizationEnabled()) { /* Use GT_ARR_LENGTH operator so rng check opts see this */ GenTreeArrLen* arrLen = gtNewArrLen(TYP_INT, op1, OFFSETOF__CORINFO_Array__length, block); op1 = arrLen; } else { /* Create the expression "*(array_addr + ArrLenOffs)" */ op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1, gtNewIconNode(OFFSETOF__CORINFO_Array__length, TYP_I_IMPL)); op1 = gtNewIndir(TYP_INT, op1); } /* Push the result back on the stack */ impPushOnStack(op1, tiRetVal); break; case CEE_BREAK: op1 = gtNewHelperCallNode(CORINFO_HELP_USER_BREAKPOINT, TYP_VOID); goto SPILL_APPEND; case CEE_NOP: if (opts.compDbgCode) { op1 = new (this, GT_NO_OP) GenTree(GT_NO_OP, TYP_VOID); goto SPILL_APPEND; } break; /******************************** NYI *******************************/ case 0xCC: OutputDebugStringA("CLR: Invalid x86 breakpoint in IL stream\n"); FALLTHROUGH; case CEE_ILLEGAL: case CEE_MACRO_END: default: if (compIsForInlining()) { compInlineResult->NoteFatal(InlineObservation::CALLEE_COMPILATION_ERROR); return; } BADCODE3("unknown opcode", ": %02X", (int)opcode); } codeAddr += sz; prevOpcode = opcode; prefixFlags = 0; } return; #undef _impResolveToken } #ifdef _PREFAST_ #pragma warning(pop) #endif // Push a local/argument treeon the operand stack void Compiler::impPushVar(GenTree* op, typeInfo tiRetVal) { tiRetVal.NormaliseForStack(); if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init) && tiRetVal.IsThisPtr()) { tiRetVal.SetUninitialisedObjRef(); } impPushOnStack(op, tiRetVal); } //------------------------------------------------------------------------ // impCreateLocal: create a GT_LCL_VAR node to access a local that might need to be normalized on load // // Arguments: // lclNum -- The index into lvaTable // offset -- The offset to associate with the node // // Returns: // The node // GenTreeLclVar* Compiler::impCreateLocalNode(unsigned lclNum DEBUGARG(IL_OFFSET offset)) { var_types lclTyp; if (lvaTable[lclNum].lvNormalizeOnLoad()) { lclTyp = lvaGetRealType(lclNum); } else { lclTyp = lvaGetActualType(lclNum); } return gtNewLclvNode(lclNum, lclTyp DEBUGARG(offset)); } // Load a local/argument on the operand stack // lclNum is an index into lvaTable *NOT* the arg/lcl index in the IL void Compiler::impLoadVar(unsigned lclNum, IL_OFFSET offset, const typeInfo& tiRetVal) { impPushVar(impCreateLocalNode(lclNum DEBUGARG(offset)), tiRetVal); } // Load an argument on the operand stack // Shared by the various CEE_LDARG opcodes // ilArgNum is the argument index as specified in IL. // It will be mapped to the correct lvaTable index void Compiler::impLoadArg(unsigned ilArgNum, IL_OFFSET offset) { Verify(ilArgNum < info.compILargsCount, "bad arg num"); if (compIsForInlining()) { if (ilArgNum >= info.compArgsCount) { compInlineResult->NoteFatal(InlineObservation::CALLEE_BAD_ARGUMENT_NUMBER); return; } impPushVar(impInlineFetchArg(ilArgNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo), impInlineInfo->lclVarInfo[ilArgNum].lclVerTypeInfo); } else { if (ilArgNum >= info.compArgsCount) { BADCODE("Bad IL"); } unsigned lclNum = compMapILargNum(ilArgNum); // account for possible hidden param if (lclNum == info.compThisArg) { lclNum = lvaArg0Var; } impLoadVar(lclNum, offset); } } // Load a local on the operand stack // Shared by the various CEE_LDLOC opcodes // ilLclNum is the local index as specified in IL. // It will be mapped to the correct lvaTable index void Compiler::impLoadLoc(unsigned ilLclNum, IL_OFFSET offset) { if (compIsForInlining()) { if (ilLclNum >= info.compMethodInfo->locals.numArgs) { compInlineResult->NoteFatal(InlineObservation::CALLEE_BAD_LOCAL_NUMBER); return; } // Get the local type var_types lclTyp = impInlineInfo->lclVarInfo[ilLclNum + impInlineInfo->argCnt].lclTypeInfo; typeInfo tiRetVal = impInlineInfo->lclVarInfo[ilLclNum + impInlineInfo->argCnt].lclVerTypeInfo; /* Have we allocated a temp for this local? */ unsigned lclNum = impInlineFetchLocal(ilLclNum DEBUGARG("Inline ldloc first use temp")); // All vars of inlined methods should be !lvNormalizeOnLoad() assert(!lvaTable[lclNum].lvNormalizeOnLoad()); lclTyp = genActualType(lclTyp); impPushVar(gtNewLclvNode(lclNum, lclTyp), tiRetVal); } else { if (ilLclNum >= info.compMethodInfo->locals.numArgs) { BADCODE("Bad IL"); } unsigned lclNum = info.compArgsCount + ilLclNum; impLoadVar(lclNum, offset); } } #ifdef TARGET_ARM /************************************************************************************** * * When assigning a vararg call src to a HFA lcl dest, mark that we cannot promote the * dst struct, because struct promotion will turn it into a float/double variable while * the rhs will be an int/long variable. We don't code generate assignment of int into * a float, but there is nothing that might prevent us from doing so. The tree however * would like: (=, (typ_float, typ_int)) or (GT_TRANSFER, (typ_float, typ_int)) * * tmpNum - the lcl dst variable num that is a struct. * src - the src tree assigned to the dest that is a struct/int (when varargs call.) * hClass - the type handle for the struct variable. * * TODO-ARM-CQ: [301608] This is a rare scenario with varargs and struct promotion coming into play, * however, we could do a codegen of transferring from int to float registers * (transfer, not a cast.) * */ void Compiler::impMarkLclDstNotPromotable(unsigned tmpNum, GenTree* src, CORINFO_CLASS_HANDLE hClass) { if (src->gtOper == GT_CALL && src->AsCall()->IsVarargs() && IsHfa(hClass)) { int hfaSlots = GetHfaCount(hClass); var_types hfaType = GetHfaType(hClass); // If we have varargs we morph the method's return type to be "int" irrespective of its original // type: struct/float at importer because the ABI calls out return in integer registers. // We don't want struct promotion to replace an expression like this: // lclFld_int = callvar_int() into lclFld_float = callvar_int(); // This means an int is getting assigned to a float without a cast. Prevent the promotion. if ((hfaType == TYP_DOUBLE && hfaSlots == sizeof(double) / REGSIZE_BYTES) || (hfaType == TYP_FLOAT && hfaSlots == sizeof(float) / REGSIZE_BYTES)) { // Make sure this struct type stays as struct so we can receive the call in a struct. lvaTable[tmpNum].lvIsMultiRegRet = true; } } } #endif // TARGET_ARM #if FEATURE_MULTIREG_RET //------------------------------------------------------------------------ // impAssignMultiRegTypeToVar: ensure calls that return structs in multiple // registers return values to suitable temps. // // Arguments: // op -- call returning a struct in registers // hClass -- class handle for struct // // Returns: // Tree with reference to struct local to use as call return value. GenTree* Compiler::impAssignMultiRegTypeToVar(GenTree* op, CORINFO_CLASS_HANDLE hClass DEBUGARG(CorInfoCallConvExtension callConv)) { unsigned tmpNum = lvaGrabTemp(true DEBUGARG("Return value temp for multireg return")); impAssignTempGen(tmpNum, op, hClass, (unsigned)CHECK_SPILL_ALL); GenTree* ret = gtNewLclvNode(tmpNum, lvaTable[tmpNum].lvType); // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns. ret->gtFlags |= GTF_DONT_CSE; assert(IsMultiRegReturnedType(hClass, callConv)); // Mark the var so that fields are not promoted and stay together. lvaTable[tmpNum].lvIsMultiRegRet = true; return ret; } #endif // FEATURE_MULTIREG_RET //------------------------------------------------------------------------ // impReturnInstruction: import a return or an explicit tail call // // Arguments: // prefixFlags -- active IL prefixes // opcode -- [in, out] IL opcode // // Returns: // True if import was successful (may fail for some inlinees) // bool Compiler::impReturnInstruction(int prefixFlags, OPCODE& opcode) { const bool isTailCall = (prefixFlags & PREFIX_TAILCALL) != 0; #ifdef DEBUG // If we are importing an inlinee and have GC ref locals we always // need to have a spill temp for the return value. This temp // should have been set up in advance, over in fgFindBasicBlocks. if (compIsForInlining() && impInlineInfo->HasGcRefLocals() && (info.compRetType != TYP_VOID)) { assert(lvaInlineeReturnSpillTemp != BAD_VAR_NUM); } #endif // DEBUG GenTree* op2 = nullptr; GenTree* op1 = nullptr; CORINFO_CLASS_HANDLE retClsHnd = nullptr; if (info.compRetType != TYP_VOID) { StackEntry se = impPopStack(); retClsHnd = se.seTypeInfo.GetClassHandle(); op2 = se.val; if (!compIsForInlining()) { impBashVarAddrsToI(op2); op2 = impImplicitIorI4Cast(op2, info.compRetType); op2 = impImplicitR4orR8Cast(op2, info.compRetType); // Note that we allow TYP_I_IMPL<->TYP_BYREF transformation, but only TYP_I_IMPL<-TYP_REF. assertImp((genActualType(op2->TypeGet()) == genActualType(info.compRetType)) || ((op2->TypeGet() == TYP_I_IMPL) && TypeIs(info.compRetType, TYP_BYREF)) || (op2->TypeIs(TYP_BYREF, TYP_REF) && (info.compRetType == TYP_I_IMPL)) || (varTypeIsFloating(op2->gtType) && varTypeIsFloating(info.compRetType)) || (varTypeIsStruct(op2) && varTypeIsStruct(info.compRetType))); #ifdef DEBUG if (!isTailCall && opts.compGcChecks && (info.compRetType == TYP_REF)) { // DDB 3483 : JIT Stress: early termination of GC ref's life time in exception code path // VSW 440513: Incorrect gcinfo on the return value under COMPlus_JitGCChecks=1 for methods with // one-return BB. assert(op2->gtType == TYP_REF); // confirm that the argument is a GC pointer (for debugging (GC stress)) GenTreeCall::Use* args = gtNewCallArgs(op2); op2 = gtNewHelperCallNode(CORINFO_HELP_CHECK_OBJ, TYP_REF, args); if (verbose) { printf("\ncompGcChecks tree:\n"); gtDispTree(op2); } } #endif } else { if (verCurrentState.esStackDepth != 0) { assert(compIsForInlining()); JITDUMP("CALLSITE_COMPILATION_ERROR: inlinee's stack is not empty."); compInlineResult->NoteFatal(InlineObservation::CALLSITE_COMPILATION_ERROR); return false; } #ifdef DEBUG if (verbose) { printf("\n\n Inlinee Return expression (before normalization) =>\n"); gtDispTree(op2); } #endif // Make sure the type matches the original call. var_types returnType = genActualType(op2->gtType); var_types originalCallType = impInlineInfo->inlineCandidateInfo->fncRetType; if ((returnType != originalCallType) && (originalCallType == TYP_STRUCT)) { originalCallType = impNormStructType(impInlineInfo->inlineCandidateInfo->methInfo.args.retTypeClass); } if (returnType != originalCallType) { // Allow TYP_BYREF to be returned as TYP_I_IMPL and vice versa. // Allow TYP_REF to be returned as TYP_I_IMPL and NOT vice verse. if ((TypeIs(returnType, TYP_BYREF, TYP_REF) && (originalCallType == TYP_I_IMPL)) || ((returnType == TYP_I_IMPL) && TypeIs(originalCallType, TYP_BYREF))) { JITDUMP("Allowing return type mismatch: have %s, needed %s\n", varTypeName(returnType), varTypeName(originalCallType)); } else { JITDUMP("Return type mismatch: have %s, needed %s\n", varTypeName(returnType), varTypeName(originalCallType)); compInlineResult->NoteFatal(InlineObservation::CALLSITE_RETURN_TYPE_MISMATCH); return false; } } // Below, we are going to set impInlineInfo->retExpr to the tree with the return // expression. At this point, retExpr could already be set if there are multiple // return blocks (meaning fgNeedReturnSpillTemp() == true) and one of // the other blocks already set it. If there is only a single return block, // retExpr shouldn't be set. However, this is not true if we reimport a block // with a return. In that case, retExpr will be set, then the block will be // reimported, but retExpr won't get cleared as part of setting the block to // be reimported. The reimported retExpr value should be the same, so even if // we don't unconditionally overwrite it, it shouldn't matter. if (info.compRetNativeType != TYP_STRUCT) { // compRetNativeType is not TYP_STRUCT. // This implies it could be either a scalar type or SIMD vector type or // a struct type that can be normalized to a scalar type. if (varTypeIsStruct(info.compRetType)) { noway_assert(info.compRetBuffArg == BAD_VAR_NUM); // adjust the type away from struct to integral // and no normalizing op2 = impFixupStructReturnType(op2, retClsHnd, info.compCallConv); } else { // Do we have to normalize? var_types fncRealRetType = JITtype2varType(info.compMethodInfo->args.retType); if ((varTypeIsSmall(op2->TypeGet()) || varTypeIsSmall(fncRealRetType)) && fgCastNeeded(op2, fncRealRetType)) { // Small-typed return values are normalized by the callee op2 = gtNewCastNode(TYP_INT, op2, false, fncRealRetType); } } if (fgNeedReturnSpillTemp()) { assert(info.compRetNativeType != TYP_VOID && (fgMoreThanOneReturnBlock() || impInlineInfo->HasGcRefLocals())); // If this method returns a ref type, track the actual types seen // in the returns. if (info.compRetType == TYP_REF) { bool isExact = false; bool isNonNull = false; CORINFO_CLASS_HANDLE returnClsHnd = gtGetClassHandle(op2, &isExact, &isNonNull); if (impInlineInfo->retExpr == nullptr) { // This is the first return, so best known type is the type // of this return value. impInlineInfo->retExprClassHnd = returnClsHnd; impInlineInfo->retExprClassHndIsExact = isExact; } else if (impInlineInfo->retExprClassHnd != returnClsHnd) { // This return site type differs from earlier seen sites, // so reset the info and we'll fall back to using the method's // declared return type for the return spill temp. impInlineInfo->retExprClassHnd = nullptr; impInlineInfo->retExprClassHndIsExact = false; } } impAssignTempGen(lvaInlineeReturnSpillTemp, op2, se.seTypeInfo.GetClassHandle(), (unsigned)CHECK_SPILL_ALL); var_types lclRetType = lvaGetDesc(lvaInlineeReturnSpillTemp)->lvType; GenTree* tmpOp2 = gtNewLclvNode(lvaInlineeReturnSpillTemp, lclRetType); op2 = tmpOp2; #ifdef DEBUG if (impInlineInfo->retExpr) { // Some other block(s) have seen the CEE_RET first. // Better they spilled to the same temp. assert(impInlineInfo->retExpr->gtOper == GT_LCL_VAR); assert(impInlineInfo->retExpr->AsLclVarCommon()->GetLclNum() == op2->AsLclVarCommon()->GetLclNum()); } #endif } #ifdef DEBUG if (verbose) { printf("\n\n Inlinee Return expression (after normalization) =>\n"); gtDispTree(op2); } #endif // Report the return expression impInlineInfo->retExpr = op2; } else { // compRetNativeType is TYP_STRUCT. // This implies that struct return via RetBuf arg or multi-reg struct return GenTreeCall* iciCall = impInlineInfo->iciCall->AsCall(); // Assign the inlinee return into a spill temp. // spill temp only exists if there are multiple return points if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM) { // in this case we have to insert multiple struct copies to the temp // and the retexpr is just the temp. assert(info.compRetNativeType != TYP_VOID); assert(fgMoreThanOneReturnBlock() || impInlineInfo->HasGcRefLocals()); impAssignTempGen(lvaInlineeReturnSpillTemp, op2, se.seTypeInfo.GetClassHandle(), (unsigned)CHECK_SPILL_ALL); } #if defined(TARGET_ARM) || defined(UNIX_AMD64_ABI) #if defined(TARGET_ARM) // TODO-ARM64-NYI: HFA // TODO-AMD64-Unix and TODO-ARM once the ARM64 functionality is implemented the // next ifdefs could be refactored in a single method with the ifdef inside. if (IsHfa(retClsHnd)) { // Same as !IsHfa but just don't bother with impAssignStructPtr. #else // defined(UNIX_AMD64_ABI) ReturnTypeDesc retTypeDesc; retTypeDesc.InitializeStructReturnType(this, retClsHnd, info.compCallConv); unsigned retRegCount = retTypeDesc.GetReturnRegCount(); if (retRegCount != 0) { // If single eightbyte, the return type would have been normalized and there won't be a temp var. // This code will be called only if the struct return has not been normalized (i.e. 2 eightbytes - // max allowed.) assert(retRegCount == MAX_RET_REG_COUNT); // Same as !structDesc.passedInRegisters but just don't bother with impAssignStructPtr. CLANG_FORMAT_COMMENT_ANCHOR; #endif // defined(UNIX_AMD64_ABI) if (fgNeedReturnSpillTemp()) { if (!impInlineInfo->retExpr) { #if defined(TARGET_ARM) impInlineInfo->retExpr = gtNewLclvNode(lvaInlineeReturnSpillTemp, info.compRetType); #else // defined(UNIX_AMD64_ABI) // The inlinee compiler has figured out the type of the temp already. Use it here. impInlineInfo->retExpr = gtNewLclvNode(lvaInlineeReturnSpillTemp, lvaTable[lvaInlineeReturnSpillTemp].lvType); #endif // defined(UNIX_AMD64_ABI) } } else { impInlineInfo->retExpr = op2; } } else #elif defined(TARGET_ARM64) ReturnTypeDesc retTypeDesc; retTypeDesc.InitializeStructReturnType(this, retClsHnd, info.compCallConv); unsigned retRegCount = retTypeDesc.GetReturnRegCount(); if (retRegCount != 0) { assert(!iciCall->HasRetBufArg()); assert(retRegCount >= 2); if (fgNeedReturnSpillTemp()) { if (!impInlineInfo->retExpr) { // The inlinee compiler has figured out the type of the temp already. Use it here. impInlineInfo->retExpr = gtNewLclvNode(lvaInlineeReturnSpillTemp, lvaTable[lvaInlineeReturnSpillTemp].lvType); } } else { impInlineInfo->retExpr = op2; } } else #elif defined(TARGET_X86) ReturnTypeDesc retTypeDesc; retTypeDesc.InitializeStructReturnType(this, retClsHnd, info.compCallConv); unsigned retRegCount = retTypeDesc.GetReturnRegCount(); if (retRegCount != 0) { assert(!iciCall->HasRetBufArg()); assert(retRegCount == MAX_RET_REG_COUNT); if (fgNeedReturnSpillTemp()) { if (!impInlineInfo->retExpr) { // The inlinee compiler has figured out the type of the temp already. Use it here. impInlineInfo->retExpr = gtNewLclvNode(lvaInlineeReturnSpillTemp, lvaTable[lvaInlineeReturnSpillTemp].lvType); } } else { impInlineInfo->retExpr = op2; } } else #endif // defined(TARGET_ARM64) { assert(iciCall->HasRetBufArg()); GenTree* dest = gtCloneExpr(iciCall->gtCallArgs->GetNode()); // spill temp only exists if there are multiple return points if (fgNeedReturnSpillTemp()) { // if this is the first return we have seen set the retExpr if (!impInlineInfo->retExpr) { impInlineInfo->retExpr = impAssignStructPtr(dest, gtNewLclvNode(lvaInlineeReturnSpillTemp, info.compRetType), retClsHnd, (unsigned)CHECK_SPILL_ALL); } } else { impInlineInfo->retExpr = impAssignStructPtr(dest, op2, retClsHnd, (unsigned)CHECK_SPILL_ALL); } } } if (impInlineInfo->retExpr != nullptr) { impInlineInfo->retBB = compCurBB; } } } if (compIsForInlining()) { return true; } if (info.compRetType == TYP_VOID) { // return void op1 = new (this, GT_RETURN) GenTreeOp(GT_RETURN, TYP_VOID); } else if (info.compRetBuffArg != BAD_VAR_NUM) { // Assign value to return buff (first param) GenTree* retBuffAddr = gtNewLclvNode(info.compRetBuffArg, TYP_BYREF DEBUGARG(impCurStmtDI.GetLocation().GetOffset())); op2 = impAssignStructPtr(retBuffAddr, op2, retClsHnd, (unsigned)CHECK_SPILL_ALL); impAppendTree(op2, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); // There are cases where the address of the implicit RetBuf should be returned explicitly (in RAX). CLANG_FORMAT_COMMENT_ANCHOR; #if defined(TARGET_AMD64) // x64 (System V and Win64) calling convention requires to // return the implicit return buffer explicitly (in RAX). // Change the return type to be BYREF. op1 = gtNewOperNode(GT_RETURN, TYP_BYREF, gtNewLclvNode(info.compRetBuffArg, TYP_BYREF)); #else // !defined(TARGET_AMD64) // In case of non-AMD64 targets the profiler hook requires to return the implicit RetBuf explicitly (in RAX). // In such case the return value of the function is changed to BYREF. // If profiler hook is not needed the return type of the function is TYP_VOID. if (compIsProfilerHookNeeded()) { op1 = gtNewOperNode(GT_RETURN, TYP_BYREF, gtNewLclvNode(info.compRetBuffArg, TYP_BYREF)); } #if defined(TARGET_ARM64) // On ARM64, the native instance calling convention variant // requires the implicit ByRef to be explicitly returned. else if (TargetOS::IsWindows && callConvIsInstanceMethodCallConv(info.compCallConv)) { op1 = gtNewOperNode(GT_RETURN, TYP_BYREF, gtNewLclvNode(info.compRetBuffArg, TYP_BYREF)); } #endif #if defined(TARGET_X86) else if (info.compCallConv != CorInfoCallConvExtension::Managed) { op1 = gtNewOperNode(GT_RETURN, TYP_BYREF, gtNewLclvNode(info.compRetBuffArg, TYP_BYREF)); } #endif else { // return void op1 = new (this, GT_RETURN) GenTreeOp(GT_RETURN, TYP_VOID); } #endif // !defined(TARGET_AMD64) } else if (varTypeIsStruct(info.compRetType)) { #if !FEATURE_MULTIREG_RET // For both ARM architectures the HFA native types are maintained as structs. // Also on System V AMD64 the multireg structs returns are also left as structs. noway_assert(info.compRetNativeType != TYP_STRUCT); #endif op2 = impFixupStructReturnType(op2, retClsHnd, info.compCallConv); // return op2 var_types returnType = info.compRetType; op1 = gtNewOperNode(GT_RETURN, genActualType(returnType), op2); } else { // return op2 op1 = gtNewOperNode(GT_RETURN, genActualType(info.compRetType), op2); } // We must have imported a tailcall and jumped to RET if (isTailCall) { assert(verCurrentState.esStackDepth == 0 && impOpcodeIsCallOpcode(opcode)); opcode = CEE_RET; // To prevent trying to spill if CALL_SITE_BOUNDARIES // impImportCall() would have already appended TYP_VOID calls if (info.compRetType == TYP_VOID) { return true; } } impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); #ifdef DEBUG // Remember at which BC offset the tree was finished impNoteLastILoffs(); #endif return true; } /***************************************************************************** * Mark the block as unimported. * Note that the caller is responsible for calling impImportBlockPending(), * with the appropriate stack-state */ inline void Compiler::impReimportMarkBlock(BasicBlock* block) { #ifdef DEBUG if (verbose && (block->bbFlags & BBF_IMPORTED)) { printf("\n" FMT_BB " will be reimported\n", block->bbNum); } #endif block->bbFlags &= ~BBF_IMPORTED; } /***************************************************************************** * Mark the successors of the given block as unimported. * Note that the caller is responsible for calling impImportBlockPending() * for all the successors, with the appropriate stack-state. */ void Compiler::impReimportMarkSuccessors(BasicBlock* block) { for (BasicBlock* const succBlock : block->Succs()) { impReimportMarkBlock(succBlock); } } /***************************************************************************** * * Filter wrapper to handle only passed in exception code * from it). */ LONG FilterVerificationExceptions(PEXCEPTION_POINTERS pExceptionPointers, LPVOID lpvParam) { if (pExceptionPointers->ExceptionRecord->ExceptionCode == SEH_VERIFICATION_EXCEPTION) { return EXCEPTION_EXECUTE_HANDLER; } return EXCEPTION_CONTINUE_SEARCH; } void Compiler::impVerifyEHBlock(BasicBlock* block, bool isTryStart) { assert(block->hasTryIndex()); assert(!compIsForInlining()); unsigned tryIndex = block->getTryIndex(); EHblkDsc* HBtab = ehGetDsc(tryIndex); if (isTryStart) { assert(block->bbFlags & BBF_TRY_BEG); // The Stack must be empty // if (block->bbStkDepth != 0) { BADCODE("Evaluation stack must be empty on entry into a try block"); } } // Save the stack contents, we'll need to restore it later // SavedStack blockState; impSaveStackState(&blockState, false); while (HBtab != nullptr) { if (isTryStart) { // Are we verifying that an instance constructor properly initializes it's 'this' pointer once? // We do not allow the 'this' pointer to be uninitialized when entering most kinds try regions // if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init)) { // We trigger an invalid program exception here unless we have a try/fault region. // if (HBtab->HasCatchHandler() || HBtab->HasFinallyHandler() || HBtab->HasFilter()) { BADCODE( "The 'this' pointer of an instance constructor is not intialized upon entry to a try region"); } else { // Allow a try/fault region to proceed. assert(HBtab->HasFaultHandler()); } } } // Recursively process the handler block, if we haven't already done so. BasicBlock* hndBegBB = HBtab->ebdHndBeg; if (((hndBegBB->bbFlags & BBF_IMPORTED) == 0) && (impGetPendingBlockMember(hndBegBB) == 0)) { // Construct the proper verification stack state // either empty or one that contains just // the Exception Object that we are dealing with // verCurrentState.esStackDepth = 0; if (handlerGetsXcptnObj(hndBegBB->bbCatchTyp)) { CORINFO_CLASS_HANDLE clsHnd; if (HBtab->HasFilter()) { clsHnd = impGetObjectClass(); } else { CORINFO_RESOLVED_TOKEN resolvedToken; resolvedToken.tokenContext = impTokenLookupContextHandle; resolvedToken.tokenScope = info.compScopeHnd; resolvedToken.token = HBtab->ebdTyp; resolvedToken.tokenType = CORINFO_TOKENKIND_Class; info.compCompHnd->resolveToken(&resolvedToken); clsHnd = resolvedToken.hClass; } // push catch arg the stack, spill to a temp if necessary // Note: can update HBtab->ebdHndBeg! hndBegBB = impPushCatchArgOnStack(hndBegBB, clsHnd, false); } // Queue up the handler for importing // impImportBlockPending(hndBegBB); } // Process the filter block, if we haven't already done so. if (HBtab->HasFilter()) { /* @VERIFICATION : Ideally the end of filter state should get propagated to the catch handler, this is an incompleteness, but is not a security/compliance issue, since the only interesting state is the 'thisInit' state. */ BasicBlock* filterBB = HBtab->ebdFilter; if (((filterBB->bbFlags & BBF_IMPORTED) == 0) && (impGetPendingBlockMember(filterBB) == 0)) { verCurrentState.esStackDepth = 0; // push catch arg the stack, spill to a temp if necessary // Note: can update HBtab->ebdFilter! const bool isSingleBlockFilter = (filterBB->bbNext == hndBegBB); filterBB = impPushCatchArgOnStack(filterBB, impGetObjectClass(), isSingleBlockFilter); impImportBlockPending(filterBB); } } // This seems redundant ....?? if (verTrackObjCtorInitState && HBtab->HasFaultHandler()) { /* Recursively process the handler block */ verCurrentState.esStackDepth = 0; // Queue up the fault handler for importing // impImportBlockPending(HBtab->ebdHndBeg); } // Now process our enclosing try index (if any) // tryIndex = HBtab->ebdEnclosingTryIndex; if (tryIndex == EHblkDsc::NO_ENCLOSING_INDEX) { HBtab = nullptr; } else { HBtab = ehGetDsc(tryIndex); } } // Restore the stack contents impRestoreStackState(&blockState); } //*************************************************************** // Import the instructions for the given basic block. Perform // verification, throwing an exception on failure. Push any successor blocks that are enabled for the first // time, or whose verification pre-state is changed. #ifdef _PREFAST_ #pragma warning(push) #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function #endif void Compiler::impImportBlock(BasicBlock* block) { // BBF_INTERNAL blocks only exist during importation due to EH canonicalization. We need to // handle them specially. In particular, there is no IL to import for them, but we do need // to mark them as imported and put their successors on the pending import list. if (block->bbFlags & BBF_INTERNAL) { JITDUMP("Marking BBF_INTERNAL block " FMT_BB " as BBF_IMPORTED\n", block->bbNum); block->bbFlags |= BBF_IMPORTED; for (BasicBlock* const succBlock : block->Succs()) { impImportBlockPending(succBlock); } return; } bool markImport; assert(block); /* Make the block globaly available */ compCurBB = block; #ifdef DEBUG /* Initialize the debug variables */ impCurOpcName = "unknown"; impCurOpcOffs = block->bbCodeOffs; #endif /* Set the current stack state to the merged result */ verResetCurrentState(block, &verCurrentState); /* Now walk the code and import the IL into GenTrees */ struct FilterVerificationExceptionsParam { Compiler* pThis; BasicBlock* block; }; FilterVerificationExceptionsParam param; param.pThis = this; param.block = block; PAL_TRY(FilterVerificationExceptionsParam*, pParam, &param) { /* @VERIFICATION : For now, the only state propagation from try to it's handler is "thisInit" state (stack is empty at start of try). In general, for state that we track in verification, we need to model the possibility that an exception might happen at any IL instruction, so we really need to merge all states that obtain between IL instructions in a try block into the start states of all handlers. However we do not allow the 'this' pointer to be uninitialized when entering most kinds try regions (only try/fault are allowed to have an uninitialized this pointer on entry to the try) Fortunately, the stack is thrown away when an exception leads to a handler, so we don't have to worry about that. We DO, however, have to worry about the "thisInit" state. But only for the try/fault case. The only allowed transition is from TIS_Uninit to TIS_Init. So for a try/fault region for the fault handler block we will merge the start state of the try begin and the post-state of each block that is part of this try region */ // merge the start state of the try begin // if (pParam->block->bbFlags & BBF_TRY_BEG) { pParam->pThis->impVerifyEHBlock(pParam->block, true); } pParam->pThis->impImportBlockCode(pParam->block); // As discussed above: // merge the post-state of each block that is part of this try region // if (pParam->block->hasTryIndex()) { pParam->pThis->impVerifyEHBlock(pParam->block, false); } } PAL_EXCEPT_FILTER(FilterVerificationExceptions) { verHandleVerificationFailure(block DEBUGARG(false)); } PAL_ENDTRY if (compDonotInline()) { return; } assert(!compDonotInline()); markImport = false; SPILLSTACK: unsigned baseTmp = NO_BASE_TMP; // input temps assigned to successor blocks bool reimportSpillClique = false; BasicBlock* tgtBlock = nullptr; /* If the stack is non-empty, we might have to spill its contents */ if (verCurrentState.esStackDepth != 0) { impBoxTemp = BAD_VAR_NUM; // if a box temp is used in a block that leaves something // on the stack, its lifetime is hard to determine, simply // don't reuse such temps. Statement* addStmt = nullptr; /* Do the successors of 'block' have any other predecessors ? We do not want to do some of the optimizations related to multiRef if we can reimport blocks */ unsigned multRef = impCanReimport ? unsigned(~0) : 0; switch (block->bbJumpKind) { case BBJ_COND: addStmt = impExtractLastStmt(); assert(addStmt->GetRootNode()->gtOper == GT_JTRUE); /* Note if the next block has more than one ancestor */ multRef |= block->bbNext->bbRefs; /* Does the next block have temps assigned? */ baseTmp = block->bbNext->bbStkTempsIn; tgtBlock = block->bbNext; if (baseTmp != NO_BASE_TMP) { break; } /* Try the target of the jump then */ multRef |= block->bbJumpDest->bbRefs; baseTmp = block->bbJumpDest->bbStkTempsIn; tgtBlock = block->bbJumpDest; break; case BBJ_ALWAYS: multRef |= block->bbJumpDest->bbRefs; baseTmp = block->bbJumpDest->bbStkTempsIn; tgtBlock = block->bbJumpDest; break; case BBJ_NONE: multRef |= block->bbNext->bbRefs; baseTmp = block->bbNext->bbStkTempsIn; tgtBlock = block->bbNext; break; case BBJ_SWITCH: addStmt = impExtractLastStmt(); assert(addStmt->GetRootNode()->gtOper == GT_SWITCH); for (BasicBlock* const tgtBlock : block->SwitchTargets()) { multRef |= tgtBlock->bbRefs; // Thanks to spill cliques, we should have assigned all or none assert((baseTmp == NO_BASE_TMP) || (baseTmp == tgtBlock->bbStkTempsIn)); baseTmp = tgtBlock->bbStkTempsIn; if (multRef > 1) { break; } } break; case BBJ_CALLFINALLY: case BBJ_EHCATCHRET: case BBJ_RETURN: case BBJ_EHFINALLYRET: case BBJ_EHFILTERRET: case BBJ_THROW: NO_WAY("can't have 'unreached' end of BB with non-empty stack"); break; default: noway_assert(!"Unexpected bbJumpKind"); break; } assert(multRef >= 1); /* Do we have a base temp number? */ bool newTemps = (baseTmp == NO_BASE_TMP); if (newTemps) { /* Grab enough temps for the whole stack */ baseTmp = impGetSpillTmpBase(block); } /* Spill all stack entries into temps */ unsigned level, tempNum; JITDUMP("\nSpilling stack entries into temps\n"); for (level = 0, tempNum = baseTmp; level < verCurrentState.esStackDepth; level++, tempNum++) { GenTree* tree = verCurrentState.esStack[level].val; /* VC generates code where it pushes a byref from one branch, and an int (ldc.i4 0) from the other. This should merge to a byref in unverifiable code. However, if the branch which leaves the TYP_I_IMPL on the stack is imported first, the successor would be imported assuming there was a TYP_I_IMPL on the stack. Thus the value would not get GC-tracked. Hence, change the temp to TYP_BYREF and reimport the successors. Note: We should only allow this in unverifiable code. */ if (tree->gtType == TYP_BYREF && lvaTable[tempNum].lvType == TYP_I_IMPL) { lvaTable[tempNum].lvType = TYP_BYREF; impReimportMarkSuccessors(block); markImport = true; } #ifdef TARGET_64BIT if (genActualType(tree->gtType) == TYP_I_IMPL && lvaTable[tempNum].lvType == TYP_INT) { // Some other block in the spill clique set this to "int", but now we have "native int". // Change the type and go back to re-import any blocks that used the wrong type. lvaTable[tempNum].lvType = TYP_I_IMPL; reimportSpillClique = true; } else if (genActualType(tree->gtType) == TYP_INT && lvaTable[tempNum].lvType == TYP_I_IMPL) { // Spill clique has decided this should be "native int", but this block only pushes an "int". // Insert a sign-extension to "native int" so we match the clique. verCurrentState.esStack[level].val = gtNewCastNode(TYP_I_IMPL, tree, false, TYP_I_IMPL); } // Consider the case where one branch left a 'byref' on the stack and the other leaves // an 'int'. On 32-bit, this is allowed (in non-verifiable code) since they are the same // size. JIT64 managed to make this work on 64-bit. For compatibility, we support JIT64 // behavior instead of asserting and then generating bad code (where we save/restore the // low 32 bits of a byref pointer to an 'int' sized local). If the 'int' side has been // imported already, we need to change the type of the local and reimport the spill clique. // If the 'byref' side has imported, we insert a cast from int to 'native int' to match // the 'byref' size. if (genActualType(tree->gtType) == TYP_BYREF && lvaTable[tempNum].lvType == TYP_INT) { // Some other block in the spill clique set this to "int", but now we have "byref". // Change the type and go back to re-import any blocks that used the wrong type. lvaTable[tempNum].lvType = TYP_BYREF; reimportSpillClique = true; } else if (genActualType(tree->gtType) == TYP_INT && lvaTable[tempNum].lvType == TYP_BYREF) { // Spill clique has decided this should be "byref", but this block only pushes an "int". // Insert a sign-extension to "native int" so we match the clique size. verCurrentState.esStack[level].val = gtNewCastNode(TYP_I_IMPL, tree, false, TYP_I_IMPL); } #endif // TARGET_64BIT if (tree->gtType == TYP_DOUBLE && lvaTable[tempNum].lvType == TYP_FLOAT) { // Some other block in the spill clique set this to "float", but now we have "double". // Change the type and go back to re-import any blocks that used the wrong type. lvaTable[tempNum].lvType = TYP_DOUBLE; reimportSpillClique = true; } else if (tree->gtType == TYP_FLOAT && lvaTable[tempNum].lvType == TYP_DOUBLE) { // Spill clique has decided this should be "double", but this block only pushes a "float". // Insert a cast to "double" so we match the clique. verCurrentState.esStack[level].val = gtNewCastNode(TYP_DOUBLE, tree, false, TYP_DOUBLE); } /* If addStmt has a reference to tempNum (can only happen if we are spilling to the temps already used by a previous block), we need to spill addStmt */ if (addStmt != nullptr && !newTemps && gtHasRef(addStmt->GetRootNode(), tempNum)) { GenTree* addTree = addStmt->GetRootNode(); if (addTree->gtOper == GT_JTRUE) { GenTree* relOp = addTree->AsOp()->gtOp1; assert(relOp->OperIsCompare()); var_types type = genActualType(relOp->AsOp()->gtOp1->TypeGet()); if (gtHasRef(relOp->AsOp()->gtOp1, tempNum)) { unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt JTRUE ref Op1")); impAssignTempGen(temp, relOp->AsOp()->gtOp1, level); type = genActualType(lvaTable[temp].TypeGet()); relOp->AsOp()->gtOp1 = gtNewLclvNode(temp, type); } if (gtHasRef(relOp->AsOp()->gtOp2, tempNum)) { unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt JTRUE ref Op2")); impAssignTempGen(temp, relOp->AsOp()->gtOp2, level); type = genActualType(lvaTable[temp].TypeGet()); relOp->AsOp()->gtOp2 = gtNewLclvNode(temp, type); } } else { assert(addTree->gtOper == GT_SWITCH && genActualTypeIsIntOrI(addTree->AsOp()->gtOp1->TypeGet())); unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt SWITCH")); impAssignTempGen(temp, addTree->AsOp()->gtOp1, level); addTree->AsOp()->gtOp1 = gtNewLclvNode(temp, genActualType(addTree->AsOp()->gtOp1->TypeGet())); } } /* Spill the stack entry, and replace with the temp */ if (!impSpillStackEntry(level, tempNum #ifdef DEBUG , true, "Spill Stack Entry" #endif )) { if (markImport) { BADCODE("bad stack state"); } // Oops. Something went wrong when spilling. Bad code. verHandleVerificationFailure(block DEBUGARG(true)); goto SPILLSTACK; } } /* Put back the 'jtrue'/'switch' if we removed it earlier */ if (addStmt != nullptr) { impAppendStmt(addStmt, (unsigned)CHECK_SPILL_NONE); } } // Some of the append/spill logic works on compCurBB assert(compCurBB == block); /* Save the tree list in the block */ impEndTreeList(block); // impEndTreeList sets BBF_IMPORTED on the block // We do *NOT* want to set it later than this because // impReimportSpillClique might clear it if this block is both a // predecessor and successor in the current spill clique assert(block->bbFlags & BBF_IMPORTED); // If we had a int/native int, or float/double collision, we need to re-import if (reimportSpillClique) { // This will re-import all the successors of block (as well as each of their predecessors) impReimportSpillClique(block); // For blocks that haven't been imported yet, we still need to mark them as pending import. for (BasicBlock* const succ : block->Succs()) { if ((succ->bbFlags & BBF_IMPORTED) == 0) { impImportBlockPending(succ); } } } else // the normal case { // otherwise just import the successors of block /* Does this block jump to any other blocks? */ for (BasicBlock* const succ : block->Succs()) { impImportBlockPending(succ); } } } #ifdef _PREFAST_ #pragma warning(pop) #endif /*****************************************************************************/ // // Ensures that "block" is a member of the list of BBs waiting to be imported, pushing it on the list if // necessary (and ensures that it is a member of the set of BB's on the list, by setting its byte in // impPendingBlockMembers). Merges the current verification state into the verification state of "block" // (its "pre-state"). void Compiler::impImportBlockPending(BasicBlock* block) { #ifdef DEBUG if (verbose) { printf("\nimpImportBlockPending for " FMT_BB "\n", block->bbNum); } #endif // We will add a block to the pending set if it has not already been imported (or needs to be re-imported), // or if it has, but merging in a predecessor's post-state changes the block's pre-state. // (When we're doing verification, we always attempt the merge to detect verification errors.) // If the block has not been imported, add to pending set. bool addToPending = ((block->bbFlags & BBF_IMPORTED) == 0); // Initialize bbEntryState just the first time we try to add this block to the pending list // Just because bbEntryState is NULL, doesn't mean the pre-state wasn't previously set // We use NULL to indicate the 'common' state to avoid memory allocation if ((block->bbEntryState == nullptr) && ((block->bbFlags & (BBF_IMPORTED | BBF_FAILED_VERIFICATION)) == 0) && (impGetPendingBlockMember(block) == 0)) { verInitBBEntryState(block, &verCurrentState); assert(block->bbStkDepth == 0); block->bbStkDepth = static_cast<unsigned short>(verCurrentState.esStackDepth); assert(addToPending); assert(impGetPendingBlockMember(block) == 0); } else { // The stack should have the same height on entry to the block from all its predecessors. if (block->bbStkDepth != verCurrentState.esStackDepth) { #ifdef DEBUG char buffer[400]; sprintf_s(buffer, sizeof(buffer), "Block at offset %4.4x to %4.4x in %0.200s entered with different stack depths.\n" "Previous depth was %d, current depth is %d", block->bbCodeOffs, block->bbCodeOffsEnd, info.compFullName, block->bbStkDepth, verCurrentState.esStackDepth); buffer[400 - 1] = 0; NO_WAY(buffer); #else NO_WAY("Block entered with different stack depths"); #endif } if (!addToPending) { return; } if (block->bbStkDepth > 0) { // We need to fix the types of any spill temps that might have changed: // int->native int, float->double, int->byref, etc. impRetypeEntryStateTemps(block); } // OK, we must add to the pending list, if it's not already in it. if (impGetPendingBlockMember(block) != 0) { return; } } // Get an entry to add to the pending list PendingDsc* dsc; if (impPendingFree) { // We can reuse one of the freed up dscs. dsc = impPendingFree; impPendingFree = dsc->pdNext; } else { // We have to create a new dsc dsc = new (this, CMK_Unknown) PendingDsc; } dsc->pdBB = block; dsc->pdSavedStack.ssDepth = verCurrentState.esStackDepth; dsc->pdThisPtrInit = verCurrentState.thisInitialized; // Save the stack trees for later if (verCurrentState.esStackDepth) { impSaveStackState(&dsc->pdSavedStack, false); } // Add the entry to the pending list dsc->pdNext = impPendingList; impPendingList = dsc; impSetPendingBlockMember(block, 1); // And indicate that it's now a member of the set. // Various assertions require us to now to consider the block as not imported (at least for // the final time...) block->bbFlags &= ~BBF_IMPORTED; #ifdef DEBUG if (verbose && 0) { printf("Added PendingDsc - %08p for " FMT_BB "\n", dspPtr(dsc), block->bbNum); } #endif } /*****************************************************************************/ // // Ensures that "block" is a member of the list of BBs waiting to be imported, pushing it on the list if // necessary (and ensures that it is a member of the set of BB's on the list, by setting its byte in // impPendingBlockMembers). Does *NOT* change the existing "pre-state" of the block. void Compiler::impReimportBlockPending(BasicBlock* block) { JITDUMP("\nimpReimportBlockPending for " FMT_BB, block->bbNum); assert(block->bbFlags & BBF_IMPORTED); // OK, we must add to the pending list, if it's not already in it. if (impGetPendingBlockMember(block) != 0) { return; } // Get an entry to add to the pending list PendingDsc* dsc; if (impPendingFree) { // We can reuse one of the freed up dscs. dsc = impPendingFree; impPendingFree = dsc->pdNext; } else { // We have to create a new dsc dsc = new (this, CMK_ImpStack) PendingDsc; } dsc->pdBB = block; if (block->bbEntryState) { dsc->pdThisPtrInit = block->bbEntryState->thisInitialized; dsc->pdSavedStack.ssDepth = block->bbEntryState->esStackDepth; dsc->pdSavedStack.ssTrees = block->bbEntryState->esStack; } else { dsc->pdThisPtrInit = TIS_Bottom; dsc->pdSavedStack.ssDepth = 0; dsc->pdSavedStack.ssTrees = nullptr; } // Add the entry to the pending list dsc->pdNext = impPendingList; impPendingList = dsc; impSetPendingBlockMember(block, 1); // And indicate that it's now a member of the set. // Various assertions require us to now to consider the block as not imported (at least for // the final time...) block->bbFlags &= ~BBF_IMPORTED; #ifdef DEBUG if (verbose && 0) { printf("Added PendingDsc - %08p for " FMT_BB "\n", dspPtr(dsc), block->bbNum); } #endif } void* Compiler::BlockListNode::operator new(size_t sz, Compiler* comp) { if (comp->impBlockListNodeFreeList == nullptr) { return comp->getAllocator(CMK_BasicBlock).allocate<BlockListNode>(1); } else { BlockListNode* res = comp->impBlockListNodeFreeList; comp->impBlockListNodeFreeList = res->m_next; return res; } } void Compiler::FreeBlockListNode(Compiler::BlockListNode* node) { node->m_next = impBlockListNodeFreeList; impBlockListNodeFreeList = node; } void Compiler::impWalkSpillCliqueFromPred(BasicBlock* block, SpillCliqueWalker* callback) { bool toDo = true; noway_assert(!fgComputePredsDone); if (!fgCheapPredsValid) { fgComputeCheapPreds(); } BlockListNode* succCliqueToDo = nullptr; BlockListNode* predCliqueToDo = new (this) BlockListNode(block); while (toDo) { toDo = false; // Look at the successors of every member of the predecessor to-do list. while (predCliqueToDo != nullptr) { BlockListNode* node = predCliqueToDo; predCliqueToDo = node->m_next; BasicBlock* blk = node->m_blk; FreeBlockListNode(node); for (BasicBlock* const succ : blk->Succs()) { // If it's not already in the clique, add it, and also add it // as a member of the successor "toDo" set. if (impSpillCliqueGetMember(SpillCliqueSucc, succ) == 0) { callback->Visit(SpillCliqueSucc, succ); impSpillCliqueSetMember(SpillCliqueSucc, succ, 1); succCliqueToDo = new (this) BlockListNode(succ, succCliqueToDo); toDo = true; } } } // Look at the predecessors of every member of the successor to-do list. while (succCliqueToDo != nullptr) { BlockListNode* node = succCliqueToDo; succCliqueToDo = node->m_next; BasicBlock* blk = node->m_blk; FreeBlockListNode(node); for (BasicBlockList* pred = blk->bbCheapPreds; pred != nullptr; pred = pred->next) { BasicBlock* predBlock = pred->block; // If it's not already in the clique, add it, and also add it // as a member of the predecessor "toDo" set. if (impSpillCliqueGetMember(SpillCliquePred, predBlock) == 0) { callback->Visit(SpillCliquePred, predBlock); impSpillCliqueSetMember(SpillCliquePred, predBlock, 1); predCliqueToDo = new (this) BlockListNode(predBlock, predCliqueToDo); toDo = true; } } } } // If this fails, it means we didn't walk the spill clique properly and somehow managed // miss walking back to include the predecessor we started from. // This most likely cause: missing or out of date bbPreds assert(impSpillCliqueGetMember(SpillCliquePred, block) != 0); } void Compiler::SetSpillTempsBase::Visit(SpillCliqueDir predOrSucc, BasicBlock* blk) { if (predOrSucc == SpillCliqueSucc) { assert(blk->bbStkTempsIn == NO_BASE_TMP); // Should not already be a member of a clique as a successor. blk->bbStkTempsIn = m_baseTmp; } else { assert(predOrSucc == SpillCliquePred); assert(blk->bbStkTempsOut == NO_BASE_TMP); // Should not already be a member of a clique as a predecessor. blk->bbStkTempsOut = m_baseTmp; } } void Compiler::ReimportSpillClique::Visit(SpillCliqueDir predOrSucc, BasicBlock* blk) { // For Preds we could be a little smarter and just find the existing store // and re-type it/add a cast, but that is complicated and hopefully very rare, so // just re-import the whole block (just like we do for successors) if (((blk->bbFlags & BBF_IMPORTED) == 0) && (m_pComp->impGetPendingBlockMember(blk) == 0)) { // If we haven't imported this block and we're not going to (because it isn't on // the pending list) then just ignore it for now. // This block has either never been imported (EntryState == NULL) or it failed // verification. Neither state requires us to force it to be imported now. assert((blk->bbEntryState == nullptr) || (blk->bbFlags & BBF_FAILED_VERIFICATION)); return; } // For successors we have a valid verCurrentState, so just mark them for reimport // the 'normal' way // Unlike predecessors, we *DO* need to reimport the current block because the // initial import had the wrong entry state types. // Similarly, blocks that are currently on the pending list, still need to call // impImportBlockPending to fixup their entry state. if (predOrSucc == SpillCliqueSucc) { m_pComp->impReimportMarkBlock(blk); // Set the current stack state to that of the blk->bbEntryState m_pComp->verResetCurrentState(blk, &m_pComp->verCurrentState); assert(m_pComp->verCurrentState.thisInitialized == blk->bbThisOnEntry()); m_pComp->impImportBlockPending(blk); } else if ((blk != m_pComp->compCurBB) && ((blk->bbFlags & BBF_IMPORTED) != 0)) { // As described above, we are only visiting predecessors so they can // add the appropriate casts, since we have already done that for the current // block, it does not need to be reimported. // Nor do we need to reimport blocks that are still pending, but not yet // imported. // // For predecessors, we have no state to seed the EntryState, so we just have // to assume the existing one is correct. // If the block is also a successor, it will get the EntryState properly // updated when it is visited as a successor in the above "if" block. assert(predOrSucc == SpillCliquePred); m_pComp->impReimportBlockPending(blk); } } // Re-type the incoming lclVar nodes to match the varDsc. void Compiler::impRetypeEntryStateTemps(BasicBlock* blk) { if (blk->bbEntryState != nullptr) { EntryState* es = blk->bbEntryState; for (unsigned level = 0; level < es->esStackDepth; level++) { GenTree* tree = es->esStack[level].val; if ((tree->gtOper == GT_LCL_VAR) || (tree->gtOper == GT_LCL_FLD)) { es->esStack[level].val->gtType = lvaGetDesc(tree->AsLclVarCommon())->TypeGet(); } } } } unsigned Compiler::impGetSpillTmpBase(BasicBlock* block) { if (block->bbStkTempsOut != NO_BASE_TMP) { return block->bbStkTempsOut; } #ifdef DEBUG if (verbose) { printf("\n*************** In impGetSpillTmpBase(" FMT_BB ")\n", block->bbNum); } #endif // DEBUG // Otherwise, choose one, and propagate to all members of the spill clique. // Grab enough temps for the whole stack. unsigned baseTmp = lvaGrabTemps(verCurrentState.esStackDepth DEBUGARG("IL Stack Entries")); SetSpillTempsBase callback(baseTmp); // We do *NOT* need to reset the SpillClique*Members because a block can only be the predecessor // to one spill clique, and similarly can only be the successor to one spill clique impWalkSpillCliqueFromPred(block, &callback); return baseTmp; } void Compiler::impReimportSpillClique(BasicBlock* block) { #ifdef DEBUG if (verbose) { printf("\n*************** In impReimportSpillClique(" FMT_BB ")\n", block->bbNum); } #endif // DEBUG // If we get here, it is because this block is already part of a spill clique // and one predecessor had an outgoing live stack slot of type int, and this // block has an outgoing live stack slot of type native int. // We need to reset these before traversal because they have already been set // by the previous walk to determine all the members of the spill clique. impInlineRoot()->impSpillCliquePredMembers.Reset(); impInlineRoot()->impSpillCliqueSuccMembers.Reset(); ReimportSpillClique callback(this); impWalkSpillCliqueFromPred(block, &callback); } // Set the pre-state of "block" (which should not have a pre-state allocated) to // a copy of "srcState", cloning tree pointers as required. void Compiler::verInitBBEntryState(BasicBlock* block, EntryState* srcState) { if (srcState->esStackDepth == 0 && srcState->thisInitialized == TIS_Bottom) { block->bbEntryState = nullptr; return; } block->bbEntryState = getAllocator(CMK_Unknown).allocate<EntryState>(1); // block->bbEntryState.esRefcount = 1; block->bbEntryState->esStackDepth = srcState->esStackDepth; block->bbEntryState->thisInitialized = TIS_Bottom; if (srcState->esStackDepth > 0) { block->bbSetStack(new (this, CMK_Unknown) StackEntry[srcState->esStackDepth]); unsigned stackSize = srcState->esStackDepth * sizeof(StackEntry); memcpy(block->bbEntryState->esStack, srcState->esStack, stackSize); for (unsigned level = 0; level < srcState->esStackDepth; level++) { GenTree* tree = srcState->esStack[level].val; block->bbEntryState->esStack[level].val = gtCloneExpr(tree); } } if (verTrackObjCtorInitState) { verSetThisInit(block, srcState->thisInitialized); } return; } void Compiler::verSetThisInit(BasicBlock* block, ThisInitState tis) { assert(tis != TIS_Bottom); // Precondition. if (block->bbEntryState == nullptr) { block->bbEntryState = new (this, CMK_Unknown) EntryState(); } block->bbEntryState->thisInitialized = tis; } /* * Resets the current state to the state at the start of the basic block */ void Compiler::verResetCurrentState(BasicBlock* block, EntryState* destState) { if (block->bbEntryState == nullptr) { destState->esStackDepth = 0; destState->thisInitialized = TIS_Bottom; return; } destState->esStackDepth = block->bbEntryState->esStackDepth; if (destState->esStackDepth > 0) { unsigned stackSize = destState->esStackDepth * sizeof(StackEntry); memcpy(destState->esStack, block->bbStackOnEntry(), stackSize); } destState->thisInitialized = block->bbThisOnEntry(); return; } ThisInitState BasicBlock::bbThisOnEntry() const { return bbEntryState ? bbEntryState->thisInitialized : TIS_Bottom; } unsigned BasicBlock::bbStackDepthOnEntry() const { return (bbEntryState ? bbEntryState->esStackDepth : 0); } void BasicBlock::bbSetStack(void* stackBuffer) { assert(bbEntryState); assert(stackBuffer); bbEntryState->esStack = (StackEntry*)stackBuffer; } StackEntry* BasicBlock::bbStackOnEntry() const { assert(bbEntryState); return bbEntryState->esStack; } void Compiler::verInitCurrentState() { verTrackObjCtorInitState = false; verCurrentState.thisInitialized = TIS_Bottom; // initialize stack info verCurrentState.esStackDepth = 0; assert(verCurrentState.esStack != nullptr); // copy current state to entry state of first BB verInitBBEntryState(fgFirstBB, &verCurrentState); } Compiler* Compiler::impInlineRoot() { if (impInlineInfo == nullptr) { return this; } else { return impInlineInfo->InlineRoot; } } BYTE Compiler::impSpillCliqueGetMember(SpillCliqueDir predOrSucc, BasicBlock* blk) { if (predOrSucc == SpillCliquePred) { return impInlineRoot()->impSpillCliquePredMembers.Get(blk->bbInd()); } else { assert(predOrSucc == SpillCliqueSucc); return impInlineRoot()->impSpillCliqueSuccMembers.Get(blk->bbInd()); } } void Compiler::impSpillCliqueSetMember(SpillCliqueDir predOrSucc, BasicBlock* blk, BYTE val) { if (predOrSucc == SpillCliquePred) { impInlineRoot()->impSpillCliquePredMembers.Set(blk->bbInd(), val); } else { assert(predOrSucc == SpillCliqueSucc); impInlineRoot()->impSpillCliqueSuccMembers.Set(blk->bbInd(), val); } } /***************************************************************************** * * Convert the instrs ("import") into our internal format (trees). The * basic flowgraph has already been constructed and is passed in. */ void Compiler::impImport() { #ifdef DEBUG if (verbose) { printf("*************** In impImport() for %s\n", info.compFullName); } #endif Compiler* inlineRoot = impInlineRoot(); if (info.compMaxStack <= SMALL_STACK_SIZE) { impStkSize = SMALL_STACK_SIZE; } else { impStkSize = info.compMaxStack; } if (this == inlineRoot) { // Allocate the stack contents verCurrentState.esStack = new (this, CMK_ImpStack) StackEntry[impStkSize]; } else { // This is the inlinee compiler, steal the stack from the inliner compiler // (after ensuring that it is large enough). if (inlineRoot->impStkSize < impStkSize) { inlineRoot->impStkSize = impStkSize; inlineRoot->verCurrentState.esStack = new (this, CMK_ImpStack) StackEntry[impStkSize]; } verCurrentState.esStack = inlineRoot->verCurrentState.esStack; } // initialize the entry state at start of method verInitCurrentState(); // Initialize stuff related to figuring "spill cliques" (see spec comment for impGetSpillTmpBase). if (this == inlineRoot) // These are only used on the root of the inlining tree. { // We have initialized these previously, but to size 0. Make them larger. impPendingBlockMembers.Init(getAllocator(), fgBBNumMax * 2); impSpillCliquePredMembers.Init(getAllocator(), fgBBNumMax * 2); impSpillCliqueSuccMembers.Init(getAllocator(), fgBBNumMax * 2); } inlineRoot->impPendingBlockMembers.Reset(fgBBNumMax * 2); inlineRoot->impSpillCliquePredMembers.Reset(fgBBNumMax * 2); inlineRoot->impSpillCliqueSuccMembers.Reset(fgBBNumMax * 2); impBlockListNodeFreeList = nullptr; #ifdef DEBUG impLastILoffsStmt = nullptr; impNestedStackSpill = false; #endif impBoxTemp = BAD_VAR_NUM; impPendingList = impPendingFree = nullptr; // Skip leading internal blocks. // These can arise from needing a leading scratch BB, from EH normalization, and from OSR entry redirects. // BasicBlock* entryBlock = fgFirstBB; while (entryBlock->bbFlags & BBF_INTERNAL) { JITDUMP("Marking leading BBF_INTERNAL block " FMT_BB " as BBF_IMPORTED\n", entryBlock->bbNum); entryBlock->bbFlags |= BBF_IMPORTED; if (entryBlock->bbJumpKind == BBJ_NONE) { entryBlock = entryBlock->bbNext; } else if (opts.IsOSR() && (entryBlock->bbJumpKind == BBJ_ALWAYS)) { entryBlock = entryBlock->bbJumpDest; } else { assert(!"unexpected bbJumpKind in entry sequence"); } } // Note for OSR we'd like to be able to verify this block must be // stack empty, but won't know that until we've imported...so instead // we'll BADCODE out if we mess up. // // (the concern here is that the runtime asks us to OSR a // different IL version than the one that matched the method that // triggered OSR). This should not happen but I might have the // IL versioning stuff wrong. // // TODO: we also currently expect this block to be a join point, // which we should verify over when we find jump targets. impImportBlockPending(entryBlock); /* Import blocks in the worker-list until there are no more */ while (impPendingList) { /* Remove the entry at the front of the list */ PendingDsc* dsc = impPendingList; impPendingList = impPendingList->pdNext; impSetPendingBlockMember(dsc->pdBB, 0); /* Restore the stack state */ verCurrentState.thisInitialized = dsc->pdThisPtrInit; verCurrentState.esStackDepth = dsc->pdSavedStack.ssDepth; if (verCurrentState.esStackDepth) { impRestoreStackState(&dsc->pdSavedStack); } /* Add the entry to the free list for reuse */ dsc->pdNext = impPendingFree; impPendingFree = dsc; /* Now import the block */ if (dsc->pdBB->bbFlags & BBF_FAILED_VERIFICATION) { verConvertBBToThrowVerificationException(dsc->pdBB DEBUGARG(true)); impEndTreeList(dsc->pdBB); } else { impImportBlock(dsc->pdBB); if (compDonotInline()) { return; } if (compIsForImportOnly()) { return; } } } #ifdef DEBUG if (verbose && info.compXcptnsCount) { printf("\nAfter impImport() added block for try,catch,finally"); fgDispBasicBlocks(); printf("\n"); } // Used in impImportBlockPending() for STRESS_CHK_REIMPORT for (BasicBlock* const block : Blocks()) { block->bbFlags &= ~BBF_VISITED; } #endif } // Checks if a typeinfo (usually stored in the type stack) is a struct. // The invariant here is that if it's not a ref or a method and has a class handle // it's a valuetype bool Compiler::impIsValueType(typeInfo* pTypeInfo) { if (pTypeInfo && pTypeInfo->IsValueClassWithClsHnd()) { return true; } else { return false; } } /***************************************************************************** * Check to see if the tree is the address of a local or the address of a field in a local. *lclVarTreeOut will contain the GT_LCL_VAR tree when it returns true. */ bool Compiler::impIsAddressInLocal(const GenTree* tree, GenTree** lclVarTreeOut) { if (tree->gtOper != GT_ADDR) { return false; } GenTree* op = tree->AsOp()->gtOp1; while (op->gtOper == GT_FIELD) { op = op->AsField()->GetFldObj(); if (op && op->gtOper == GT_ADDR) // Skip static fields where op will be NULL. { op = op->AsOp()->gtOp1; } else { return false; } } if (op->gtOper == GT_LCL_VAR) { if (lclVarTreeOut != nullptr) { *lclVarTreeOut = op; } return true; } else { return false; } } //------------------------------------------------------------------------ // impMakeDiscretionaryInlineObservations: make observations that help // determine the profitability of a discretionary inline // // Arguments: // pInlineInfo -- InlineInfo for the inline, or null for the prejit root // inlineResult -- InlineResult accumulating information about this inline // // Notes: // If inlining or prejitting the root, this method also makes // various observations about the method that factor into inline // decisions. It sets `compNativeSizeEstimate` as a side effect. void Compiler::impMakeDiscretionaryInlineObservations(InlineInfo* pInlineInfo, InlineResult* inlineResult) { assert((pInlineInfo != nullptr && compIsForInlining()) || // Perform the actual inlining. (pInlineInfo == nullptr && !compIsForInlining()) // Calculate the static inlining hint for ngen. ); // If we're really inlining, we should just have one result in play. assert((pInlineInfo == nullptr) || (inlineResult == pInlineInfo->inlineResult)); // If this is a "forceinline" method, the JIT probably shouldn't have gone // to the trouble of estimating the native code size. Even if it did, it // shouldn't be relying on the result of this method. assert(inlineResult->GetObservation() == InlineObservation::CALLEE_IS_DISCRETIONARY_INLINE); // Note if the caller contains NEWOBJ or NEWARR. Compiler* rootCompiler = impInlineRoot(); if ((rootCompiler->optMethodFlags & OMF_HAS_NEWARRAY) != 0) { inlineResult->Note(InlineObservation::CALLER_HAS_NEWARRAY); } if ((rootCompiler->optMethodFlags & OMF_HAS_NEWOBJ) != 0) { inlineResult->Note(InlineObservation::CALLER_HAS_NEWOBJ); } bool calleeIsStatic = (info.compFlags & CORINFO_FLG_STATIC) != 0; bool isSpecialMethod = (info.compFlags & CORINFO_FLG_CONSTRUCTOR) != 0; if (isSpecialMethod) { if (calleeIsStatic) { inlineResult->Note(InlineObservation::CALLEE_IS_CLASS_CTOR); } else { inlineResult->Note(InlineObservation::CALLEE_IS_INSTANCE_CTOR); } } else if (!calleeIsStatic) { // Callee is an instance method. // // Check if the callee has the same 'this' as the root. if (pInlineInfo != nullptr) { GenTree* thisArg = pInlineInfo->iciCall->AsCall()->gtCallThisArg->GetNode(); assert(thisArg); bool isSameThis = impIsThis(thisArg); inlineResult->NoteBool(InlineObservation::CALLSITE_IS_SAME_THIS, isSameThis); } } bool callsiteIsGeneric = (rootCompiler->info.compMethodInfo->args.sigInst.methInstCount != 0) || (rootCompiler->info.compMethodInfo->args.sigInst.classInstCount != 0); bool calleeIsGeneric = (info.compMethodInfo->args.sigInst.methInstCount != 0) || (info.compMethodInfo->args.sigInst.classInstCount != 0); if (!callsiteIsGeneric && calleeIsGeneric) { inlineResult->Note(InlineObservation::CALLSITE_NONGENERIC_CALLS_GENERIC); } // Inspect callee's arguments (and the actual values at the callsite for them) CORINFO_SIG_INFO sig = info.compMethodInfo->args; CORINFO_ARG_LIST_HANDLE sigArg = sig.args; GenTreeCall::Use* argUse = pInlineInfo == nullptr ? nullptr : pInlineInfo->iciCall->AsCall()->gtCallArgs; for (unsigned i = 0; i < info.compMethodInfo->args.numArgs; i++) { CORINFO_CLASS_HANDLE sigClass; CorInfoType corType = strip(info.compCompHnd->getArgType(&sig, sigArg, &sigClass)); GenTree* argNode = argUse == nullptr ? nullptr : argUse->GetNode()->gtSkipPutArgType(); if (corType == CORINFO_TYPE_CLASS) { sigClass = info.compCompHnd->getArgClass(&sig, sigArg); } else if (corType == CORINFO_TYPE_VALUECLASS) { inlineResult->Note(InlineObservation::CALLEE_ARG_STRUCT); } else if (corType == CORINFO_TYPE_BYREF) { sigClass = info.compCompHnd->getArgClass(&sig, sigArg); corType = info.compCompHnd->getChildType(sigClass, &sigClass); } if (argNode != nullptr) { bool isExact = false; bool isNonNull = false; CORINFO_CLASS_HANDLE argCls = gtGetClassHandle(argNode, &isExact, &isNonNull); if (argCls != nullptr) { const bool isArgValueType = eeIsValueClass(argCls); // Exact class of the arg is known if (isExact && !isArgValueType) { inlineResult->Note(InlineObservation::CALLSITE_ARG_EXACT_CLS); if ((argCls != sigClass) && (sigClass != nullptr)) { // .. but the signature accepts a less concrete type. inlineResult->Note(InlineObservation::CALLSITE_ARG_EXACT_CLS_SIG_IS_NOT); } } // Arg is a reference type in the signature and a boxed value type was passed. else if (isArgValueType && (corType == CORINFO_TYPE_CLASS)) { inlineResult->Note(InlineObservation::CALLSITE_ARG_BOXED); } } if (argNode->OperIsConst()) { inlineResult->Note(InlineObservation::CALLSITE_ARG_CONST); } argUse = argUse->GetNext(); } sigArg = info.compCompHnd->getArgNext(sigArg); } // Note if the callee's return type is a value type if (info.compMethodInfo->args.retType == CORINFO_TYPE_VALUECLASS) { inlineResult->Note(InlineObservation::CALLEE_RETURNS_STRUCT); } // Note if the callee's class is a promotable struct if ((info.compClassAttr & CORINFO_FLG_VALUECLASS) != 0) { assert(structPromotionHelper != nullptr); if (structPromotionHelper->CanPromoteStructType(info.compClassHnd)) { inlineResult->Note(InlineObservation::CALLEE_CLASS_PROMOTABLE); } inlineResult->Note(InlineObservation::CALLEE_CLASS_VALUETYPE); } #ifdef FEATURE_SIMD // Note if this method is has SIMD args or return value if (pInlineInfo != nullptr && pInlineInfo->hasSIMDTypeArgLocalOrReturn) { inlineResult->Note(InlineObservation::CALLEE_HAS_SIMD); } #endif // FEATURE_SIMD // Roughly classify callsite frequency. InlineCallsiteFrequency frequency = InlineCallsiteFrequency::UNUSED; // If this is a prejit root, or a maximally hot block... if ((pInlineInfo == nullptr) || (pInlineInfo->iciBlock->isMaxBBWeight())) { frequency = InlineCallsiteFrequency::HOT; } // No training data. Look for loop-like things. // We consider a recursive call loop-like. Do not give the inlining boost to the method itself. // However, give it to things nearby. else if ((pInlineInfo->iciBlock->bbFlags & BBF_BACKWARD_JUMP) && (pInlineInfo->fncHandle != pInlineInfo->inlineCandidateInfo->ilCallerHandle)) { frequency = InlineCallsiteFrequency::LOOP; } else if (pInlineInfo->iciBlock->hasProfileWeight() && (pInlineInfo->iciBlock->bbWeight > BB_ZERO_WEIGHT)) { frequency = InlineCallsiteFrequency::WARM; } // Now modify the multiplier based on where we're called from. else if (pInlineInfo->iciBlock->isRunRarely() || ((info.compFlags & FLG_CCTOR) == FLG_CCTOR)) { frequency = InlineCallsiteFrequency::RARE; } else { frequency = InlineCallsiteFrequency::BORING; } // Also capture the block weight of the call site. // // In the prejit root case, assume at runtime there might be a hot call site // for this method, so we won't prematurely conclude this method should never // be inlined. // weight_t weight = 0; if (pInlineInfo != nullptr) { weight = pInlineInfo->iciBlock->bbWeight; } else { const weight_t prejitHotCallerWeight = 1000000.0; weight = prejitHotCallerWeight; } inlineResult->NoteInt(InlineObservation::CALLSITE_FREQUENCY, static_cast<int>(frequency)); inlineResult->NoteInt(InlineObservation::CALLSITE_WEIGHT, (int)(weight)); bool hasProfile = false; double profileFreq = 0.0; // If the call site has profile data, report the relative frequency of the site. // if ((pInlineInfo != nullptr) && rootCompiler->fgHaveSufficientProfileData()) { const weight_t callSiteWeight = pInlineInfo->iciBlock->bbWeight; const weight_t entryWeight = rootCompiler->fgFirstBB->bbWeight; profileFreq = fgProfileWeightsEqual(entryWeight, 0.0) ? 0.0 : callSiteWeight / entryWeight; hasProfile = true; assert(callSiteWeight >= 0); assert(entryWeight >= 0); } else if (pInlineInfo == nullptr) { // Simulate a hot callsite for PrejitRoot mode. hasProfile = true; profileFreq = 1.0; } inlineResult->NoteBool(InlineObservation::CALLSITE_HAS_PROFILE, hasProfile); inlineResult->NoteDouble(InlineObservation::CALLSITE_PROFILE_FREQUENCY, profileFreq); } /***************************************************************************** This method makes STATIC inlining decision based on the IL code. It should not make any inlining decision based on the context. If forceInline is true, then the inlining decision should not depend on performance heuristics (code size, etc.). */ void Compiler::impCanInlineIL(CORINFO_METHOD_HANDLE fncHandle, CORINFO_METHOD_INFO* methInfo, bool forceInline, InlineResult* inlineResult) { unsigned codeSize = methInfo->ILCodeSize; // We shouldn't have made up our minds yet... assert(!inlineResult->IsDecided()); if (methInfo->EHcount) { inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_EH); return; } if ((methInfo->ILCode == nullptr) || (codeSize == 0)) { inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NO_BODY); return; } // For now we don't inline varargs (import code can't handle it) if (methInfo->args.isVarArg()) { inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_MANAGED_VARARGS); return; } // Reject if it has too many locals. // This is currently an implementation limit due to fixed-size arrays in the // inline info, rather than a performance heuristic. inlineResult->NoteInt(InlineObservation::CALLEE_NUMBER_OF_LOCALS, methInfo->locals.numArgs); if (methInfo->locals.numArgs > MAX_INL_LCLS) { inlineResult->NoteFatal(InlineObservation::CALLEE_TOO_MANY_LOCALS); return; } // Make sure there aren't too many arguments. // This is currently an implementation limit due to fixed-size arrays in the // inline info, rather than a performance heuristic. inlineResult->NoteInt(InlineObservation::CALLEE_NUMBER_OF_ARGUMENTS, methInfo->args.numArgs); if (methInfo->args.numArgs > MAX_INL_ARGS) { inlineResult->NoteFatal(InlineObservation::CALLEE_TOO_MANY_ARGUMENTS); return; } // Note force inline state inlineResult->NoteBool(InlineObservation::CALLEE_IS_FORCE_INLINE, forceInline); // Note IL code size inlineResult->NoteInt(InlineObservation::CALLEE_IL_CODE_SIZE, codeSize); if (inlineResult->IsFailure()) { return; } // Make sure maxstack is not too big inlineResult->NoteInt(InlineObservation::CALLEE_MAXSTACK, methInfo->maxStack); if (inlineResult->IsFailure()) { return; } } /***************************************************************************** */ void Compiler::impCheckCanInline(GenTreeCall* call, CORINFO_METHOD_HANDLE fncHandle, unsigned methAttr, CORINFO_CONTEXT_HANDLE exactContextHnd, InlineCandidateInfo** ppInlineCandidateInfo, InlineResult* inlineResult) { // Either EE or JIT might throw exceptions below. // If that happens, just don't inline the method. struct Param { Compiler* pThis; GenTreeCall* call; CORINFO_METHOD_HANDLE fncHandle; unsigned methAttr; CORINFO_CONTEXT_HANDLE exactContextHnd; InlineResult* result; InlineCandidateInfo** ppInlineCandidateInfo; } param; memset(&param, 0, sizeof(param)); param.pThis = this; param.call = call; param.fncHandle = fncHandle; param.methAttr = methAttr; param.exactContextHnd = (exactContextHnd != nullptr) ? exactContextHnd : MAKE_METHODCONTEXT(fncHandle); param.result = inlineResult; param.ppInlineCandidateInfo = ppInlineCandidateInfo; bool success = eeRunWithErrorTrap<Param>( [](Param* pParam) { CorInfoInitClassResult initClassResult; #ifdef DEBUG const char* methodName; const char* className; methodName = pParam->pThis->eeGetMethodName(pParam->fncHandle, &className); if (JitConfig.JitNoInline()) { pParam->result->NoteFatal(InlineObservation::CALLEE_IS_JIT_NOINLINE); goto _exit; } #endif /* Try to get the code address/size for the method */ CORINFO_METHOD_INFO methInfo; if (!pParam->pThis->info.compCompHnd->getMethodInfo(pParam->fncHandle, &methInfo)) { pParam->result->NoteFatal(InlineObservation::CALLEE_NO_METHOD_INFO); goto _exit; } // Profile data allows us to avoid early "too many IL bytes" outs. pParam->result->NoteBool(InlineObservation::CALLSITE_HAS_PROFILE, pParam->pThis->fgHaveSufficientProfileData()); bool forceInline; forceInline = !!(pParam->methAttr & CORINFO_FLG_FORCEINLINE); pParam->pThis->impCanInlineIL(pParam->fncHandle, &methInfo, forceInline, pParam->result); if (pParam->result->IsFailure()) { assert(pParam->result->IsNever()); goto _exit; } // Speculatively check if initClass() can be done. // If it can be done, we will try to inline the method. initClassResult = pParam->pThis->info.compCompHnd->initClass(nullptr /* field */, pParam->fncHandle /* method */, pParam->exactContextHnd /* context */); if (initClassResult & CORINFO_INITCLASS_DONT_INLINE) { pParam->result->NoteFatal(InlineObservation::CALLSITE_CANT_CLASS_INIT); goto _exit; } // Given the EE the final say in whether to inline or not. // This should be last since for verifiable code, this can be expensive /* VM Inline check also ensures that the method is verifiable if needed */ CorInfoInline vmResult; vmResult = pParam->pThis->info.compCompHnd->canInline(pParam->pThis->info.compMethodHnd, pParam->fncHandle); if (vmResult == INLINE_FAIL) { pParam->result->NoteFatal(InlineObservation::CALLSITE_IS_VM_NOINLINE); } else if (vmResult == INLINE_NEVER) { pParam->result->NoteFatal(InlineObservation::CALLEE_IS_VM_NOINLINE); } if (pParam->result->IsFailure()) { // Make sure not to report this one. It was already reported by the VM. pParam->result->SetReported(); goto _exit; } /* Get the method properties */ CORINFO_CLASS_HANDLE clsHandle; clsHandle = pParam->pThis->info.compCompHnd->getMethodClass(pParam->fncHandle); unsigned clsAttr; clsAttr = pParam->pThis->info.compCompHnd->getClassAttribs(clsHandle); /* Get the return type */ var_types fncRetType; fncRetType = pParam->call->TypeGet(); #ifdef DEBUG var_types fncRealRetType; fncRealRetType = JITtype2varType(methInfo.args.retType); assert((genActualType(fncRealRetType) == genActualType(fncRetType)) || // <BUGNUM> VSW 288602 </BUGNUM> // In case of IJW, we allow to assign a native pointer to a BYREF. (fncRetType == TYP_BYREF && methInfo.args.retType == CORINFO_TYPE_PTR) || (varTypeIsStruct(fncRetType) && (fncRealRetType == TYP_STRUCT))); #endif // Allocate an InlineCandidateInfo structure, // // Or, reuse the existing GuardedDevirtualizationCandidateInfo, // which was pre-allocated to have extra room. // InlineCandidateInfo* pInfo; if (pParam->call->IsGuardedDevirtualizationCandidate()) { pInfo = pParam->call->gtInlineCandidateInfo; } else { pInfo = new (pParam->pThis, CMK_Inlining) InlineCandidateInfo; // Null out bits we don't use when we're just inlining pInfo->guardedClassHandle = nullptr; pInfo->guardedMethodHandle = nullptr; pInfo->guardedMethodUnboxedEntryHandle = nullptr; pInfo->likelihood = 0; pInfo->requiresInstMethodTableArg = false; } pInfo->methInfo = methInfo; pInfo->ilCallerHandle = pParam->pThis->info.compMethodHnd; pInfo->clsHandle = clsHandle; pInfo->exactContextHnd = pParam->exactContextHnd; pInfo->retExpr = nullptr; pInfo->preexistingSpillTemp = BAD_VAR_NUM; pInfo->clsAttr = clsAttr; pInfo->methAttr = pParam->methAttr; pInfo->initClassResult = initClassResult; pInfo->fncRetType = fncRetType; pInfo->exactContextNeedsRuntimeLookup = false; pInfo->inlinersContext = pParam->pThis->compInlineContext; // Note exactContextNeedsRuntimeLookup is reset later on, // over in impMarkInlineCandidate. *(pParam->ppInlineCandidateInfo) = pInfo; _exit:; }, &param); if (!success) { param.result->NoteFatal(InlineObservation::CALLSITE_COMPILATION_ERROR); } } //------------------------------------------------------------------------ // impInlineRecordArgInfo: record information about an inline candidate argument // // Arguments: // pInlineInfo - inline info for the inline candidate // curArgVal - tree for the caller actual argument value // argNum - logical index of this argument // inlineResult - result of ongoing inline evaluation // // Notes: // // Checks for various inline blocking conditions and makes notes in // the inline info arg table about the properties of the actual. These // properties are used later by impInlineFetchArg to determine how best to // pass the argument into the inlinee. void Compiler::impInlineRecordArgInfo(InlineInfo* pInlineInfo, GenTree* curArgVal, unsigned argNum, InlineResult* inlineResult) { InlArgInfo* inlCurArgInfo = &pInlineInfo->inlArgInfo[argNum]; inlCurArgInfo->argNode = curArgVal; // Save the original tree, with PUT_ARG and RET_EXPR. curArgVal = curArgVal->gtSkipPutArgType(); curArgVal = curArgVal->gtRetExprVal(); if (curArgVal->gtOper == GT_MKREFANY) { inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_IS_MKREFANY); return; } GenTree* lclVarTree; const bool isAddressInLocal = impIsAddressInLocal(curArgVal, &lclVarTree); if (isAddressInLocal && varTypeIsStruct(lclVarTree)) { inlCurArgInfo->argIsByRefToStructLocal = true; #ifdef FEATURE_SIMD if (lvaTable[lclVarTree->AsLclVarCommon()->GetLclNum()].lvSIMDType) { pInlineInfo->hasSIMDTypeArgLocalOrReturn = true; } #endif // FEATURE_SIMD } if (curArgVal->gtFlags & GTF_ALL_EFFECT) { inlCurArgInfo->argHasGlobRef = (curArgVal->gtFlags & GTF_GLOB_REF) != 0; inlCurArgInfo->argHasSideEff = (curArgVal->gtFlags & (GTF_ALL_EFFECT & ~GTF_GLOB_REF)) != 0; } if (curArgVal->gtOper == GT_LCL_VAR) { inlCurArgInfo->argIsLclVar = true; /* Remember the "original" argument number */ INDEBUG(curArgVal->AsLclVar()->gtLclILoffs = argNum;) } if (curArgVal->IsInvariant()) { inlCurArgInfo->argIsInvariant = true; if (inlCurArgInfo->argIsThis && (curArgVal->gtOper == GT_CNS_INT) && (curArgVal->AsIntCon()->gtIconVal == 0)) { // Abort inlining at this call site inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_HAS_NULL_THIS); return; } } bool isExact = false; bool isNonNull = false; inlCurArgInfo->argIsExact = (gtGetClassHandle(curArgVal, &isExact, &isNonNull) != NO_CLASS_HANDLE) && isExact; // If the arg is a local that is address-taken, we can't safely // directly substitute it into the inlinee. // // Previously we'd accomplish this by setting "argHasLdargaOp" but // that has a stronger meaning: that the arg value can change in // the method body. Using that flag prevents type propagation, // which is safe in this case. // // Instead mark the arg as having a caller local ref. if (!inlCurArgInfo->argIsInvariant && gtHasLocalsWithAddrOp(curArgVal)) { inlCurArgInfo->argHasCallerLocalRef = true; } #ifdef DEBUG if (verbose) { if (inlCurArgInfo->argIsThis) { printf("thisArg:"); } else { printf("\nArgument #%u:", argNum); } if (inlCurArgInfo->argIsLclVar) { printf(" is a local var"); } if (inlCurArgInfo->argIsInvariant) { printf(" is a constant"); } if (inlCurArgInfo->argHasGlobRef) { printf(" has global refs"); } if (inlCurArgInfo->argHasCallerLocalRef) { printf(" has caller local ref"); } if (inlCurArgInfo->argHasSideEff) { printf(" has side effects"); } if (inlCurArgInfo->argHasLdargaOp) { printf(" has ldarga effect"); } if (inlCurArgInfo->argHasStargOp) { printf(" has starg effect"); } if (inlCurArgInfo->argIsByRefToStructLocal) { printf(" is byref to a struct local"); } printf("\n"); gtDispTree(curArgVal); printf("\n"); } #endif } //------------------------------------------------------------------------ // impInlineInitVars: setup inline information for inlinee args and locals // // Arguments: // pInlineInfo - inline info for the inline candidate // // Notes: // This method primarily adds caller-supplied info to the inlArgInfo // and sets up the lclVarInfo table. // // For args, the inlArgInfo records properties of the actual argument // including the tree node that produces the arg value. This node is // usually the tree node present at the call, but may also differ in // various ways: // - when the call arg is a GT_RET_EXPR, we search back through the ret // expr chain for the actual node. Note this will either be the original // call (which will be a failed inline by this point), or the return // expression from some set of inlines. // - when argument type casting is needed the necessary casts are added // around the argument node. // - if an argument can be simplified by folding then the node here is the // folded value. // // The method may make observations that lead to marking this candidate as // a failed inline. If this happens the initialization is abandoned immediately // to try and reduce the jit time cost for a failed inline. void Compiler::impInlineInitVars(InlineInfo* pInlineInfo) { assert(!compIsForInlining()); GenTreeCall* call = pInlineInfo->iciCall; CORINFO_METHOD_INFO* methInfo = &pInlineInfo->inlineCandidateInfo->methInfo; unsigned clsAttr = pInlineInfo->inlineCandidateInfo->clsAttr; InlArgInfo* inlArgInfo = pInlineInfo->inlArgInfo; InlLclVarInfo* lclVarInfo = pInlineInfo->lclVarInfo; InlineResult* inlineResult = pInlineInfo->inlineResult; // Inlined methods always use the managed calling convention const bool hasRetBuffArg = impMethodInfo_hasRetBuffArg(methInfo, CorInfoCallConvExtension::Managed); /* init the argument stuct */ memset(inlArgInfo, 0, (MAX_INL_ARGS + 1) * sizeof(inlArgInfo[0])); GenTreeCall::Use* thisArg = call->gtCallThisArg; unsigned argCnt = 0; // Count of the arguments assert((methInfo->args.hasThis()) == (thisArg != nullptr)); if (thisArg != nullptr) { inlArgInfo[0].argIsThis = true; impInlineRecordArgInfo(pInlineInfo, thisArg->GetNode(), argCnt, inlineResult); if (inlineResult->IsFailure()) { return; } /* Increment the argument count */ argCnt++; } /* Record some information about each of the arguments */ bool hasTypeCtxtArg = (methInfo->args.callConv & CORINFO_CALLCONV_PARAMTYPE) != 0; #if USER_ARGS_COME_LAST unsigned typeCtxtArg = (thisArg != nullptr) ? 1 : 0; #else // USER_ARGS_COME_LAST unsigned typeCtxtArg = methInfo->args.totalILArgs(); #endif // USER_ARGS_COME_LAST for (GenTreeCall::Use& use : call->Args()) { if (hasRetBuffArg && (&use == call->gtCallArgs)) { continue; } // Ignore the type context argument if (hasTypeCtxtArg && (argCnt == typeCtxtArg)) { pInlineInfo->typeContextArg = typeCtxtArg; typeCtxtArg = 0xFFFFFFFF; continue; } GenTree* actualArg = gtFoldExpr(use.GetNode()); impInlineRecordArgInfo(pInlineInfo, actualArg, argCnt, inlineResult); if (inlineResult->IsFailure()) { return; } /* Increment the argument count */ argCnt++; } /* Make sure we got the arg number right */ assert(argCnt == methInfo->args.totalILArgs()); #ifdef FEATURE_SIMD bool foundSIMDType = pInlineInfo->hasSIMDTypeArgLocalOrReturn; #endif // FEATURE_SIMD /* We have typeless opcodes, get type information from the signature */ if (thisArg != nullptr) { lclVarInfo[0].lclVerTypeInfo = verMakeTypeInfo(pInlineInfo->inlineCandidateInfo->clsHandle); lclVarInfo[0].lclHasLdlocaOp = false; #ifdef FEATURE_SIMD // We always want to check isSIMDClass, since we want to set foundSIMDType (to increase // the inlining multiplier) for anything in that assembly. // But we only need to normalize it if it is a TYP_STRUCT // (which we need to do even if we have already set foundSIMDType). if (!foundSIMDType && isSIMDorHWSIMDClass(&(lclVarInfo[0].lclVerTypeInfo))) { foundSIMDType = true; } #endif // FEATURE_SIMD var_types sigType = ((clsAttr & CORINFO_FLG_VALUECLASS) != 0) ? TYP_BYREF : TYP_REF; lclVarInfo[0].lclTypeInfo = sigType; GenTree* thisArgNode = thisArg->GetNode(); assert(varTypeIsGC(thisArgNode->TypeGet()) || // "this" is managed ((thisArgNode->TypeGet() == TYP_I_IMPL) && // "this" is unmgd but the method's class doesnt care (clsAttr & CORINFO_FLG_VALUECLASS))); if (genActualType(thisArgNode->TypeGet()) != genActualType(sigType)) { if (sigType == TYP_REF) { /* The argument cannot be bashed into a ref (see bug 750871) */ inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_NO_BASH_TO_REF); return; } /* This can only happen with byrefs <-> ints/shorts */ assert(sigType == TYP_BYREF); assert((genActualType(thisArgNode->TypeGet()) == TYP_I_IMPL) || (thisArgNode->TypeGet() == TYP_BYREF)); lclVarInfo[0].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL)); } } /* Init the types of the arguments and make sure the types * from the trees match the types in the signature */ CORINFO_ARG_LIST_HANDLE argLst; argLst = methInfo->args.args; unsigned i; for (i = (thisArg ? 1 : 0); i < argCnt; i++, argLst = info.compCompHnd->getArgNext(argLst)) { var_types sigType = (var_types)eeGetArgType(argLst, &methInfo->args); lclVarInfo[i].lclVerTypeInfo = verParseArgSigToTypeInfo(&methInfo->args, argLst); #ifdef FEATURE_SIMD if ((!foundSIMDType || (sigType == TYP_STRUCT)) && isSIMDorHWSIMDClass(&(lclVarInfo[i].lclVerTypeInfo))) { // If this is a SIMD class (i.e. in the SIMD assembly), then we will consider that we've // found a SIMD type, even if this may not be a type we recognize (the assumption is that // it is likely to use a SIMD type, and therefore we want to increase the inlining multiplier). foundSIMDType = true; if (sigType == TYP_STRUCT) { var_types structType = impNormStructType(lclVarInfo[i].lclVerTypeInfo.GetClassHandle()); sigType = structType; } } #endif // FEATURE_SIMD lclVarInfo[i].lclTypeInfo = sigType; lclVarInfo[i].lclHasLdlocaOp = false; /* Does the tree type match the signature type? */ GenTree* inlArgNode = inlArgInfo[i].argNode; if ((sigType != inlArgNode->gtType) || inlArgNode->OperIs(GT_PUTARG_TYPE)) { assert(impCheckImplicitArgumentCoercion(sigType, inlArgNode->gtType)); assert(!varTypeIsStruct(inlArgNode->gtType) && !varTypeIsStruct(sigType)); /* In valid IL, this can only happen for short integer types or byrefs <-> [native] ints, but in bad IL cases with caller-callee signature mismatches we can see other types. Intentionally reject cases with mismatches so the jit is more flexible when encountering bad IL. */ bool isPlausibleTypeMatch = (genActualType(sigType) == genActualType(inlArgNode->gtType)) || (genActualTypeIsIntOrI(sigType) && inlArgNode->gtType == TYP_BYREF) || (sigType == TYP_BYREF && genActualTypeIsIntOrI(inlArgNode->gtType)); if (!isPlausibleTypeMatch) { inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_TYPES_INCOMPATIBLE); return; } GenTree** pInlArgNode; if (inlArgNode->OperIs(GT_PUTARG_TYPE)) { // There was a widening or narrowing cast. GenTreeUnOp* putArgType = inlArgNode->AsUnOp(); pInlArgNode = &putArgType->gtOp1; inlArgNode = putArgType->gtOp1; } else { // The same size but different type of the arguments. pInlArgNode = &inlArgInfo[i].argNode; } /* Is it a narrowing or widening cast? * Widening casts are ok since the value computed is already * normalized to an int (on the IL stack) */ if (genTypeSize(inlArgNode->gtType) >= genTypeSize(sigType)) { if (sigType == TYP_BYREF) { lclVarInfo[i].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL)); } else if (inlArgNode->gtType == TYP_BYREF) { assert(varTypeIsIntOrI(sigType)); /* If possible bash the BYREF to an int */ if (inlArgNode->IsLocalAddrExpr() != nullptr) { inlArgNode->gtType = TYP_I_IMPL; lclVarInfo[i].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL)); } else { /* Arguments 'int <- byref' cannot be changed */ inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_NO_BASH_TO_INT); return; } } else if (genTypeSize(sigType) < TARGET_POINTER_SIZE) { // Narrowing cast. if (inlArgNode->OperIs(GT_LCL_VAR)) { const unsigned lclNum = inlArgNode->AsLclVarCommon()->GetLclNum(); if (!lvaTable[lclNum].lvNormalizeOnLoad() && sigType == lvaGetRealType(lclNum)) { // We don't need to insert a cast here as the variable // was assigned a normalized value of the right type. continue; } } inlArgNode = gtNewCastNode(TYP_INT, inlArgNode, false, sigType); inlArgInfo[i].argIsLclVar = false; // Try to fold the node in case we have constant arguments. if (inlArgInfo[i].argIsInvariant) { inlArgNode = gtFoldExprConst(inlArgNode); assert(inlArgNode->OperIsConst()); } *pInlArgNode = inlArgNode; } #ifdef TARGET_64BIT else if (genTypeSize(genActualType(inlArgNode->gtType)) < genTypeSize(sigType)) { // This should only happen for int -> native int widening inlArgNode = gtNewCastNode(genActualType(sigType), inlArgNode, false, sigType); inlArgInfo[i].argIsLclVar = false; /* Try to fold the node in case we have constant arguments */ if (inlArgInfo[i].argIsInvariant) { inlArgNode = gtFoldExprConst(inlArgNode); assert(inlArgNode->OperIsConst()); } *pInlArgNode = inlArgNode; } #endif // TARGET_64BIT } } } /* Init the types of the local variables */ CORINFO_ARG_LIST_HANDLE localsSig; localsSig = methInfo->locals.args; for (i = 0; i < methInfo->locals.numArgs; i++) { bool isPinned; var_types type = (var_types)eeGetArgType(localsSig, &methInfo->locals, &isPinned); lclVarInfo[i + argCnt].lclHasLdlocaOp = false; lclVarInfo[i + argCnt].lclTypeInfo = type; if (varTypeIsGC(type)) { if (isPinned) { JITDUMP("Inlinee local #%02u is pinned\n", i); lclVarInfo[i + argCnt].lclIsPinned = true; // Pinned locals may cause inlines to fail. inlineResult->Note(InlineObservation::CALLEE_HAS_PINNED_LOCALS); if (inlineResult->IsFailure()) { return; } } pInlineInfo->numberOfGcRefLocals++; } else if (isPinned) { JITDUMP("Ignoring pin on inlinee local #%02u -- not a GC type\n", i); } lclVarInfo[i + argCnt].lclVerTypeInfo = verParseArgSigToTypeInfo(&methInfo->locals, localsSig); // If this local is a struct type with GC fields, inform the inliner. It may choose to bail // out on the inline. if (type == TYP_STRUCT) { CORINFO_CLASS_HANDLE lclHandle = lclVarInfo[i + argCnt].lclVerTypeInfo.GetClassHandle(); DWORD typeFlags = info.compCompHnd->getClassAttribs(lclHandle); if ((typeFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0) { inlineResult->Note(InlineObservation::CALLEE_HAS_GC_STRUCT); if (inlineResult->IsFailure()) { return; } // Do further notification in the case where the call site is rare; some policies do // not track the relative hotness of call sites for "always" inline cases. if (pInlineInfo->iciBlock->isRunRarely()) { inlineResult->Note(InlineObservation::CALLSITE_RARE_GC_STRUCT); if (inlineResult->IsFailure()) { return; } } } } localsSig = info.compCompHnd->getArgNext(localsSig); #ifdef FEATURE_SIMD if ((!foundSIMDType || (type == TYP_STRUCT)) && isSIMDorHWSIMDClass(&(lclVarInfo[i + argCnt].lclVerTypeInfo))) { foundSIMDType = true; if (supportSIMDTypes() && type == TYP_STRUCT) { var_types structType = impNormStructType(lclVarInfo[i + argCnt].lclVerTypeInfo.GetClassHandle()); lclVarInfo[i + argCnt].lclTypeInfo = structType; } } #endif // FEATURE_SIMD } #ifdef FEATURE_SIMD if (!foundSIMDType && (call->AsCall()->gtRetClsHnd != nullptr) && isSIMDorHWSIMDClass(call->AsCall()->gtRetClsHnd)) { foundSIMDType = true; } pInlineInfo->hasSIMDTypeArgLocalOrReturn = foundSIMDType; #endif // FEATURE_SIMD } //------------------------------------------------------------------------ // impInlineFetchLocal: get a local var that represents an inlinee local // // Arguments: // lclNum -- number of the inlinee local // reason -- debug string describing purpose of the local var // // Returns: // Number of the local to use // // Notes: // This method is invoked only for locals actually used in the // inlinee body. // // Allocates a new temp if necessary, and copies key properties // over from the inlinee local var info. unsigned Compiler::impInlineFetchLocal(unsigned lclNum DEBUGARG(const char* reason)) { assert(compIsForInlining()); unsigned tmpNum = impInlineInfo->lclTmpNum[lclNum]; if (tmpNum == BAD_VAR_NUM) { const InlLclVarInfo& inlineeLocal = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt]; const var_types lclTyp = inlineeLocal.lclTypeInfo; // The lifetime of this local might span multiple BBs. // So it is a long lifetime local. impInlineInfo->lclTmpNum[lclNum] = tmpNum = lvaGrabTemp(false DEBUGARG(reason)); // Copy over key info lvaTable[tmpNum].lvType = lclTyp; lvaTable[tmpNum].lvHasLdAddrOp = inlineeLocal.lclHasLdlocaOp; lvaTable[tmpNum].lvPinned = inlineeLocal.lclIsPinned; lvaTable[tmpNum].lvHasILStoreOp = inlineeLocal.lclHasStlocOp; lvaTable[tmpNum].lvHasMultipleILStoreOp = inlineeLocal.lclHasMultipleStlocOp; // Copy over class handle for ref types. Note this may be a // shared type -- someday perhaps we can get the exact // signature and pass in a more precise type. if (lclTyp == TYP_REF) { assert(lvaTable[tmpNum].lvSingleDef == 0); lvaTable[tmpNum].lvSingleDef = !inlineeLocal.lclHasMultipleStlocOp && !inlineeLocal.lclHasLdlocaOp; if (lvaTable[tmpNum].lvSingleDef) { JITDUMP("Marked V%02u as a single def temp\n", tmpNum); } lvaSetClass(tmpNum, inlineeLocal.lclVerTypeInfo.GetClassHandleForObjRef()); } if (inlineeLocal.lclVerTypeInfo.IsStruct()) { if (varTypeIsStruct(lclTyp)) { lvaSetStruct(tmpNum, inlineeLocal.lclVerTypeInfo.GetClassHandle(), true /* unsafe value cls check */); } else { // This is a wrapped primitive. Make sure the verstate knows that lvaTable[tmpNum].lvVerTypeInfo = inlineeLocal.lclVerTypeInfo; } } #ifdef DEBUG // Sanity check that we're properly prepared for gc ref locals. if (varTypeIsGC(lclTyp)) { // Since there are gc locals we should have seen them earlier // and if there was a return value, set up the spill temp. assert(impInlineInfo->HasGcRefLocals()); assert((info.compRetNativeType == TYP_VOID) || fgNeedReturnSpillTemp()); } else { // Make sure all pinned locals count as gc refs. assert(!inlineeLocal.lclIsPinned); } #endif // DEBUG } return tmpNum; } //------------------------------------------------------------------------ // impInlineFetchArg: return tree node for argument value in an inlinee // // Arguments: // lclNum -- argument number in inlinee IL // inlArgInfo -- argument info for inlinee // lclVarInfo -- var info for inlinee // // Returns: // Tree for the argument's value. Often an inlinee-scoped temp // GT_LCL_VAR but can be other tree kinds, if the argument // expression from the caller can be directly substituted into the // inlinee body. // // Notes: // Must be used only for arguments -- use impInlineFetchLocal for // inlinee locals. // // Direct substitution is performed when the formal argument cannot // change value in the inlinee body (no starg or ldarga), and the // actual argument expression's value cannot be changed if it is // substituted it into the inlinee body. // // Even if an inlinee-scoped temp is returned here, it may later be // "bashed" to a caller-supplied tree when arguments are actually // passed (see fgInlinePrependStatements). Bashing can happen if // the argument ends up being single use and other conditions are // met. So the contents of the tree returned here may not end up // being the ones ultimately used for the argument. // // This method will side effect inlArgInfo. It should only be called // for actual uses of the argument in the inlinee. GenTree* Compiler::impInlineFetchArg(unsigned lclNum, InlArgInfo* inlArgInfo, InlLclVarInfo* lclVarInfo) { // Cache the relevant arg and lcl info for this argument. // We will modify argInfo but not lclVarInfo. InlArgInfo& argInfo = inlArgInfo[lclNum]; const InlLclVarInfo& lclInfo = lclVarInfo[lclNum]; const bool argCanBeModified = argInfo.argHasLdargaOp || argInfo.argHasStargOp; const var_types lclTyp = lclInfo.lclTypeInfo; GenTree* op1 = nullptr; GenTree* argNode = argInfo.argNode->gtSkipPutArgType()->gtRetExprVal(); if (argInfo.argIsInvariant && !argCanBeModified) { // Directly substitute constants or addresses of locals // // Clone the constant. Note that we cannot directly use // argNode in the trees even if !argInfo.argIsUsed as this // would introduce aliasing between inlArgInfo[].argNode and // impInlineExpr. Then gtFoldExpr() could change it, causing // further references to the argument working off of the // bashed copy. op1 = gtCloneExpr(argNode); PREFIX_ASSUME(op1 != nullptr); argInfo.argTmpNum = BAD_VAR_NUM; // We may need to retype to ensure we match the callee's view of the type. // Otherwise callee-pass throughs of arguments can create return type // mismatches that block inlining. // // Note argument type mismatches that prevent inlining should // have been caught in impInlineInitVars. if (op1->TypeGet() != lclTyp) { op1->gtType = genActualType(lclTyp); } } else if (argInfo.argIsLclVar && !argCanBeModified && !argInfo.argHasCallerLocalRef) { // Directly substitute unaliased caller locals for args that cannot be modified // // Use the caller-supplied node if this is the first use. op1 = argNode; unsigned argLclNum = op1->AsLclVarCommon()->GetLclNum(); argInfo.argTmpNum = argLclNum; // Use an equivalent copy if this is the second or subsequent // use. // // Note argument type mismatches that prevent inlining should // have been caught in impInlineInitVars. If inlining is not prevented // but a cast is necessary, we similarly expect it to have been inserted then. // So here we may have argument type mismatches that are benign, for instance // passing a TYP_SHORT local (eg. normalized-on-load) as a TYP_INT arg. // The exception is when the inlining means we should start tracking the argument. if (argInfo.argIsUsed || ((lclTyp == TYP_BYREF) && (op1->TypeGet() != TYP_BYREF))) { assert(op1->gtOper == GT_LCL_VAR); assert(lclNum == op1->AsLclVar()->gtLclILoffs); // Create a new lcl var node - remember the argument lclNum op1 = impCreateLocalNode(argLclNum DEBUGARG(op1->AsLclVar()->gtLclILoffs)); // Start tracking things as a byref if the parameter is a byref. if (lclTyp == TYP_BYREF) { op1->gtType = TYP_BYREF; } } } else if (argInfo.argIsByRefToStructLocal && !argInfo.argHasStargOp) { /* Argument is a by-ref address to a struct, a normed struct, or its field. In these cases, don't spill the byref to a local, simply clone the tree and use it. This way we will increase the chance for this byref to be optimized away by a subsequent "dereference" operation. From Dev11 bug #139955: Argument node can also be TYP_I_IMPL if we've bashed the tree (in impInlineInitVars()), if the arg has argHasLdargaOp as well as argIsByRefToStructLocal. For example, if the caller is: ldloca.s V_1 // V_1 is a local struct call void Test.ILPart::RunLdargaOnPointerArg(int32*) and the callee being inlined has: .method public static void RunLdargaOnPointerArg(int32* ptrToInts) cil managed ldarga.s ptrToInts call void Test.FourInts::NotInlined_SetExpectedValuesThroughPointerToPointer(int32**) then we change the argument tree (of "ldloca.s V_1") to TYP_I_IMPL to match the callee signature. We'll soon afterwards reject the inlining anyway, since the tree we return isn't a GT_LCL_VAR. */ assert(argNode->TypeGet() == TYP_BYREF || argNode->TypeGet() == TYP_I_IMPL); op1 = gtCloneExpr(argNode); } else { /* Argument is a complex expression - it must be evaluated into a temp */ if (argInfo.argHasTmp) { assert(argInfo.argIsUsed); assert(argInfo.argTmpNum < lvaCount); /* Create a new lcl var node - remember the argument lclNum */ op1 = gtNewLclvNode(argInfo.argTmpNum, genActualType(lclTyp)); /* This is the second or later use of the this argument, so we have to use the temp (instead of the actual arg) */ argInfo.argBashTmpNode = nullptr; } else { /* First time use */ assert(!argInfo.argIsUsed); /* Reserve a temp for the expression. * Use a large size node as we may change it later */ const unsigned tmpNum = lvaGrabTemp(true DEBUGARG("Inlining Arg")); lvaTable[tmpNum].lvType = lclTyp; // For ref types, determine the type of the temp. if (lclTyp == TYP_REF) { if (!argCanBeModified) { // If the arg can't be modified in the method // body, use the type of the value, if // known. Otherwise, use the declared type. assert(lvaTable[tmpNum].lvSingleDef == 0); lvaTable[tmpNum].lvSingleDef = 1; JITDUMP("Marked V%02u as a single def temp\n", tmpNum); lvaSetClass(tmpNum, argNode, lclInfo.lclVerTypeInfo.GetClassHandleForObjRef()); } else { // Arg might be modified, use the declared type of // the argument. lvaSetClass(tmpNum, lclInfo.lclVerTypeInfo.GetClassHandleForObjRef()); } } assert(!lvaTable[tmpNum].IsAddressExposed()); if (argInfo.argHasLdargaOp) { lvaTable[tmpNum].lvHasLdAddrOp = 1; } if (lclInfo.lclVerTypeInfo.IsStruct()) { if (varTypeIsStruct(lclTyp)) { lvaSetStruct(tmpNum, lclInfo.lclVerTypeInfo.GetClassHandle(), true /* unsafe value cls check */); if (info.compIsVarArgs) { lvaSetStructUsedAsVarArg(tmpNum); } } else { // This is a wrapped primitive. Make sure the verstate knows that lvaTable[tmpNum].lvVerTypeInfo = lclInfo.lclVerTypeInfo; } } argInfo.argHasTmp = true; argInfo.argTmpNum = tmpNum; // If we require strict exception order, then arguments must // be evaluated in sequence before the body of the inlined method. // So we need to evaluate them to a temp. // Also, if arguments have global or local references, we need to // evaluate them to a temp before the inlined body as the // inlined body may be modifying the global ref. // TODO-1stClassStructs: We currently do not reuse an existing lclVar // if it is a struct, because it requires some additional handling. if ((!varTypeIsStruct(lclTyp) && !argInfo.argHasSideEff && !argInfo.argHasGlobRef && !argInfo.argHasCallerLocalRef)) { /* Get a *LARGE* LCL_VAR node */ op1 = gtNewLclLNode(tmpNum, genActualType(lclTyp) DEBUGARG(lclNum)); /* Record op1 as the very first use of this argument. If there are no further uses of the arg, we may be able to use the actual arg node instead of the temp. If we do see any further uses, we will clear this. */ argInfo.argBashTmpNode = op1; } else { /* Get a small LCL_VAR node */ op1 = gtNewLclvNode(tmpNum, genActualType(lclTyp)); /* No bashing of this argument */ argInfo.argBashTmpNode = nullptr; } } } // Mark this argument as used. argInfo.argIsUsed = true; return op1; } /****************************************************************************** Is this the original "this" argument to the call being inlined? Note that we do not inline methods with "starg 0", and so we do not need to worry about it. */ bool Compiler::impInlineIsThis(GenTree* tree, InlArgInfo* inlArgInfo) { assert(compIsForInlining()); return (tree->gtOper == GT_LCL_VAR && tree->AsLclVarCommon()->GetLclNum() == inlArgInfo[0].argTmpNum); } //----------------------------------------------------------------------------- // impInlineIsGuaranteedThisDerefBeforeAnySideEffects: Check if a dereference in // the inlinee can guarantee that the "this" pointer is non-NULL. // // Arguments: // additionalTree - a tree to check for side effects // additionalCallArgs - a list of call args to check for side effects // dereferencedAddress - address expression being dereferenced // inlArgInfo - inlinee argument information // // Notes: // If we haven't hit a branch or a side effect, and we are dereferencing // from 'this' to access a field or make GTF_CALL_NULLCHECK call, // then we can avoid a separate null pointer check. // // The importer stack and current statement list are searched for side effects. // Trees that have been popped of the stack but haven't been appended to the // statement list and have to be checked for side effects may be provided via // additionalTree and additionalCallArgs. // bool Compiler::impInlineIsGuaranteedThisDerefBeforeAnySideEffects(GenTree* additionalTree, GenTreeCall::Use* additionalCallArgs, GenTree* dereferencedAddress, InlArgInfo* inlArgInfo) { assert(compIsForInlining()); assert(opts.OptEnabled(CLFLG_INLINING)); BasicBlock* block = compCurBB; if (block != fgFirstBB) { return false; } if (!impInlineIsThis(dereferencedAddress, inlArgInfo)) { return false; } if ((additionalTree != nullptr) && GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(additionalTree->gtFlags)) { return false; } for (GenTreeCall::Use& use : GenTreeCall::UseList(additionalCallArgs)) { if (GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(use.GetNode()->gtFlags)) { return false; } } for (Statement* stmt : StatementList(impStmtList)) { GenTree* expr = stmt->GetRootNode(); if (GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(expr->gtFlags)) { return false; } } for (unsigned level = 0; level < verCurrentState.esStackDepth; level++) { GenTreeFlags stackTreeFlags = verCurrentState.esStack[level].val->gtFlags; if (GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(stackTreeFlags)) { return false; } } return true; } //------------------------------------------------------------------------ // impMarkInlineCandidate: determine if this call can be subsequently inlined // // Arguments: // callNode -- call under scrutiny // exactContextHnd -- context handle for inlining // exactContextNeedsRuntimeLookup -- true if context required runtime lookup // callInfo -- call info from VM // // Notes: // Mostly a wrapper for impMarkInlineCandidateHelper that also undoes // guarded devirtualization for virtual calls where the method we'd // devirtualize to cannot be inlined. void Compiler::impMarkInlineCandidate(GenTree* callNode, CORINFO_CONTEXT_HANDLE exactContextHnd, bool exactContextNeedsRuntimeLookup, CORINFO_CALL_INFO* callInfo) { GenTreeCall* call = callNode->AsCall(); // Do the actual evaluation impMarkInlineCandidateHelper(call, exactContextHnd, exactContextNeedsRuntimeLookup, callInfo); // If this call is an inline candidate or is not a guarded devirtualization // candidate, we're done. if (call->IsInlineCandidate() || !call->IsGuardedDevirtualizationCandidate()) { return; } // If we can't inline the call we'd guardedly devirtualize to, // we undo the guarded devirtualization, as the benefit from // just guarded devirtualization alone is likely not worth the // extra jit time and code size. // // TODO: it is possibly interesting to allow this, but requires // fixes elsewhere too... JITDUMP("Revoking guarded devirtualization candidacy for call [%06u]: target method can't be inlined\n", dspTreeID(call)); call->ClearGuardedDevirtualizationCandidate(); } //------------------------------------------------------------------------ // impMarkInlineCandidateHelper: determine if this call can be subsequently // inlined // // Arguments: // callNode -- call under scrutiny // exactContextHnd -- context handle for inlining // exactContextNeedsRuntimeLookup -- true if context required runtime lookup // callInfo -- call info from VM // // Notes: // If callNode is an inline candidate, this method sets the flag // GTF_CALL_INLINE_CANDIDATE, and ensures that helper methods have // filled in the associated InlineCandidateInfo. // // If callNode is not an inline candidate, and the reason is // something that is inherent to the method being called, the // method may be marked as "noinline" to short-circuit any // future assessments of calls to this method. void Compiler::impMarkInlineCandidateHelper(GenTreeCall* call, CORINFO_CONTEXT_HANDLE exactContextHnd, bool exactContextNeedsRuntimeLookup, CORINFO_CALL_INFO* callInfo) { // Let the strategy know there's another call impInlineRoot()->m_inlineStrategy->NoteCall(); if (!opts.OptEnabled(CLFLG_INLINING)) { /* XXX Mon 8/18/2008 * This assert is misleading. The caller does not ensure that we have CLFLG_INLINING set before * calling impMarkInlineCandidate. However, if this assert trips it means that we're an inlinee and * CLFLG_MINOPT is set. That doesn't make a lot of sense. If you hit this assert, work back and * figure out why we did not set MAXOPT for this compile. */ assert(!compIsForInlining()); return; } if (compIsForImportOnly()) { // Don't bother creating the inline candidate during verification. // Otherwise the call to info.compCompHnd->canInline will trigger a recursive verification // that leads to the creation of multiple instances of Compiler. return; } InlineResult inlineResult(this, call, nullptr, "impMarkInlineCandidate"); // Don't inline if not optimizing root method if (opts.compDbgCode) { inlineResult.NoteFatal(InlineObservation::CALLER_DEBUG_CODEGEN); return; } // Don't inline if inlining into this method is disabled. if (impInlineRoot()->m_inlineStrategy->IsInliningDisabled()) { inlineResult.NoteFatal(InlineObservation::CALLER_IS_JIT_NOINLINE); return; } // Don't inline into callers that use the NextCallReturnAddress intrinsic. if (info.compHasNextCallRetAddr) { inlineResult.NoteFatal(InlineObservation::CALLER_USES_NEXT_CALL_RET_ADDR); return; } // Inlining candidate determination needs to honor only IL tail prefix. // Inlining takes precedence over implicit tail call optimization (if the call is not directly recursive). if (call->IsTailPrefixedCall()) { inlineResult.NoteFatal(InlineObservation::CALLSITE_EXPLICIT_TAIL_PREFIX); return; } // Delegate Invoke method doesn't have a body and gets special cased instead. // Don't even bother trying to inline it. if (call->IsDelegateInvoke()) { inlineResult.NoteFatal(InlineObservation::CALLEE_HAS_NO_BODY); return; } // Tail recursion elimination takes precedence over inlining. // TODO: We may want to do some of the additional checks from fgMorphCall // here to reduce the chance we don't inline a call that won't be optimized // as a fast tail call or turned into a loop. if (gtIsRecursiveCall(call) && call->IsImplicitTailCall()) { inlineResult.NoteFatal(InlineObservation::CALLSITE_IMPLICIT_REC_TAIL_CALL); return; } if (call->IsVirtual()) { // Allow guarded devirt calls to be treated as inline candidates, // but reject all other virtual calls. if (!call->IsGuardedDevirtualizationCandidate()) { inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_NOT_DIRECT); return; } } /* Ignore helper calls */ if (call->gtCallType == CT_HELPER) { assert(!call->IsGuardedDevirtualizationCandidate()); inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_CALL_TO_HELPER); return; } /* Ignore indirect calls */ if (call->gtCallType == CT_INDIRECT) { inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_NOT_DIRECT_MANAGED); return; } /* I removed the check for BBJ_THROW. BBJ_THROW is usually marked as rarely run. This more or less * restricts the inliner to non-expanding inlines. I removed the check to allow for non-expanding * inlining in throw blocks. I should consider the same thing for catch and filter regions. */ CORINFO_METHOD_HANDLE fncHandle; unsigned methAttr; if (call->IsGuardedDevirtualizationCandidate()) { if (call->gtGuardedDevirtualizationCandidateInfo->guardedMethodUnboxedEntryHandle != nullptr) { fncHandle = call->gtGuardedDevirtualizationCandidateInfo->guardedMethodUnboxedEntryHandle; } else { fncHandle = call->gtGuardedDevirtualizationCandidateInfo->guardedMethodHandle; } methAttr = info.compCompHnd->getMethodAttribs(fncHandle); } else { fncHandle = call->gtCallMethHnd; // Reuse method flags from the original callInfo if possible if (fncHandle == callInfo->hMethod) { methAttr = callInfo->methodFlags; } else { methAttr = info.compCompHnd->getMethodAttribs(fncHandle); } } #ifdef DEBUG if (compStressCompile(STRESS_FORCE_INLINE, 0)) { methAttr |= CORINFO_FLG_FORCEINLINE; } #endif // Check for COMPlus_AggressiveInlining if (compDoAggressiveInlining) { methAttr |= CORINFO_FLG_FORCEINLINE; } if (!(methAttr & CORINFO_FLG_FORCEINLINE)) { /* Don't bother inline blocks that are in the filter region */ if (bbInCatchHandlerILRange(compCurBB)) { #ifdef DEBUG if (verbose) { printf("\nWill not inline blocks that are in the catch handler region\n"); } #endif inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_WITHIN_CATCH); return; } if (bbInFilterILRange(compCurBB)) { #ifdef DEBUG if (verbose) { printf("\nWill not inline blocks that are in the filter region\n"); } #endif inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_WITHIN_FILTER); return; } } /* Check if we tried to inline this method before */ if (methAttr & CORINFO_FLG_DONT_INLINE) { inlineResult.NoteFatal(InlineObservation::CALLEE_IS_NOINLINE); return; } /* Cannot inline synchronized methods */ if (methAttr & CORINFO_FLG_SYNCH) { inlineResult.NoteFatal(InlineObservation::CALLEE_IS_SYNCHRONIZED); return; } /* Check legality of PInvoke callsite (for inlining of marshalling code) */ if (methAttr & CORINFO_FLG_PINVOKE) { // See comment in impCheckForPInvokeCall BasicBlock* block = compIsForInlining() ? impInlineInfo->iciBlock : compCurBB; if (!impCanPInvokeInlineCallSite(block)) { inlineResult.NoteFatal(InlineObservation::CALLSITE_PINVOKE_EH); return; } } InlineCandidateInfo* inlineCandidateInfo = nullptr; impCheckCanInline(call, fncHandle, methAttr, exactContextHnd, &inlineCandidateInfo, &inlineResult); if (inlineResult.IsFailure()) { return; } // The old value should be null OR this call should be a guarded devirtualization candidate. assert((call->gtInlineCandidateInfo == nullptr) || call->IsGuardedDevirtualizationCandidate()); // The new value should not be null. assert(inlineCandidateInfo != nullptr); inlineCandidateInfo->exactContextNeedsRuntimeLookup = exactContextNeedsRuntimeLookup; call->gtInlineCandidateInfo = inlineCandidateInfo; // If we're in an inlinee compiler, and have a return spill temp, and this inline candidate // is also a tail call candidate, it can use the same return spill temp. // if (compIsForInlining() && call->CanTailCall() && (impInlineInfo->inlineCandidateInfo->preexistingSpillTemp != BAD_VAR_NUM)) { inlineCandidateInfo->preexistingSpillTemp = impInlineInfo->inlineCandidateInfo->preexistingSpillTemp; JITDUMP("Inline candidate [%06u] can share spill temp V%02u\n", dspTreeID(call), inlineCandidateInfo->preexistingSpillTemp); } // Mark the call node as inline candidate. call->gtFlags |= GTF_CALL_INLINE_CANDIDATE; // Let the strategy know there's another candidate. impInlineRoot()->m_inlineStrategy->NoteCandidate(); // Since we're not actually inlining yet, and this call site is // still just an inline candidate, there's nothing to report. inlineResult.SetReported(); } /******************************************************************************/ // Returns true if the given intrinsic will be implemented by target-specific // instructions bool Compiler::IsTargetIntrinsic(NamedIntrinsic intrinsicName) { #if defined(TARGET_XARCH) switch (intrinsicName) { // AMD64/x86 has SSE2 instructions to directly compute sqrt/abs and SSE4.1 // instructions to directly compute round/ceiling/floor/truncate. case NI_System_Math_Abs: case NI_System_Math_Sqrt: return true; case NI_System_Math_Ceiling: case NI_System_Math_Floor: case NI_System_Math_Truncate: case NI_System_Math_Round: return compOpportunisticallyDependsOn(InstructionSet_SSE41); case NI_System_Math_FusedMultiplyAdd: return compOpportunisticallyDependsOn(InstructionSet_FMA); default: return false; } #elif defined(TARGET_ARM64) switch (intrinsicName) { case NI_System_Math_Abs: case NI_System_Math_Ceiling: case NI_System_Math_Floor: case NI_System_Math_Truncate: case NI_System_Math_Round: case NI_System_Math_Sqrt: case NI_System_Math_Max: case NI_System_Math_Min: return true; case NI_System_Math_FusedMultiplyAdd: return compOpportunisticallyDependsOn(InstructionSet_AdvSimd); default: return false; } #elif defined(TARGET_ARM) switch (intrinsicName) { case NI_System_Math_Abs: case NI_System_Math_Round: case NI_System_Math_Sqrt: return true; default: return false; } #else // TODO: This portion of logic is not implemented for other arch. // The reason for returning true is that on all other arch the only intrinsic // enabled are target intrinsics. return true; #endif } /******************************************************************************/ // Returns true if the given intrinsic will be implemented by calling System.Math // methods. bool Compiler::IsIntrinsicImplementedByUserCall(NamedIntrinsic intrinsicName) { // Currently, if a math intrinsic is not implemented by target-specific // instructions, it will be implemented by a System.Math call. In the // future, if we turn to implementing some of them with helper calls, // this predicate needs to be revisited. return !IsTargetIntrinsic(intrinsicName); } bool Compiler::IsMathIntrinsic(NamedIntrinsic intrinsicName) { switch (intrinsicName) { case NI_System_Math_Abs: case NI_System_Math_Acos: case NI_System_Math_Acosh: case NI_System_Math_Asin: case NI_System_Math_Asinh: case NI_System_Math_Atan: case NI_System_Math_Atanh: case NI_System_Math_Atan2: case NI_System_Math_Cbrt: case NI_System_Math_Ceiling: case NI_System_Math_Cos: case NI_System_Math_Cosh: case NI_System_Math_Exp: case NI_System_Math_Floor: case NI_System_Math_FMod: case NI_System_Math_FusedMultiplyAdd: case NI_System_Math_ILogB: case NI_System_Math_Log: case NI_System_Math_Log2: case NI_System_Math_Log10: case NI_System_Math_Max: case NI_System_Math_Min: case NI_System_Math_Pow: case NI_System_Math_Round: case NI_System_Math_Sin: case NI_System_Math_Sinh: case NI_System_Math_Sqrt: case NI_System_Math_Tan: case NI_System_Math_Tanh: case NI_System_Math_Truncate: { assert((intrinsicName > NI_SYSTEM_MATH_START) && (intrinsicName < NI_SYSTEM_MATH_END)); return true; } default: { assert((intrinsicName < NI_SYSTEM_MATH_START) || (intrinsicName > NI_SYSTEM_MATH_END)); return false; } } } bool Compiler::IsMathIntrinsic(GenTree* tree) { return (tree->OperGet() == GT_INTRINSIC) && IsMathIntrinsic(tree->AsIntrinsic()->gtIntrinsicName); } //------------------------------------------------------------------------ // impDevirtualizeCall: Attempt to change a virtual vtable call into a // normal call // // Arguments: // call -- the call node to examine/modify // pResolvedToken -- [IN] the resolved token used to create the call. Used for R2R. // method -- [IN/OUT] the method handle for call. Updated iff call devirtualized. // methodFlags -- [IN/OUT] flags for the method to call. Updated iff call devirtualized. // pContextHandle -- [IN/OUT] context handle for the call. Updated iff call devirtualized. // pExactContextHandle -- [OUT] updated context handle iff call devirtualized // isLateDevirtualization -- if devirtualization is happening after importation // isExplicitTailCalll -- [IN] true if we plan on using an explicit tail call // ilOffset -- IL offset of the call // // Notes: // Virtual calls in IL will always "invoke" the base class method. // // This transformation looks for evidence that the type of 'this' // in the call is exactly known, is a final class or would invoke // a final method, and if that and other safety checks pan out, // modifies the call and the call info to create a direct call. // // This transformation is initially done in the importer and not // in some subsequent optimization pass because we want it to be // upstream of inline candidate identification. // // However, later phases may supply improved type information that // can enable further devirtualization. We currently reinvoke this // code after inlining, if the return value of the inlined call is // the 'this obj' of a subsequent virtual call. // // If devirtualization succeeds and the call's this object is a // (boxed) value type, the jit will ask the EE for the unboxed entry // point. If this exists, the jit will invoke the unboxed entry // on the box payload. In addition if the boxing operation is // visible to the jit and the call is the only consmer of the box, // the jit will try analyze the box to see if the call can be instead // instead made on a local copy. If that is doable, the call is // updated to invoke the unboxed entry on the local copy and the // boxing operation is removed. // // When guarded devirtualization is enabled, this method will mark // calls as guarded devirtualization candidates, if the type of `this` // is not exactly known, and there is a plausible guess for the type. void Compiler::impDevirtualizeCall(GenTreeCall* call, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_METHOD_HANDLE* method, unsigned* methodFlags, CORINFO_CONTEXT_HANDLE* pContextHandle, CORINFO_CONTEXT_HANDLE* pExactContextHandle, bool isLateDevirtualization, bool isExplicitTailCall, IL_OFFSET ilOffset) { assert(call != nullptr); assert(method != nullptr); assert(methodFlags != nullptr); assert(pContextHandle != nullptr); // This should be a virtual vtable or virtual stub call. // assert(call->IsVirtual()); // Possibly instrument. Note for OSR+PGO we will instrument when // optimizing and (currently) won't devirtualize. We may want // to revisit -- if we can devirtualize we should be able to // suppress the probe. // // We strip BBINSTR from inlinees currently, so we'll only // do this for the root method calls. // if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_BBINSTR)) { assert(opts.OptimizationDisabled() || opts.IsOSR()); assert(!compIsForInlining()); // During importation, optionally flag this block as one that // contains calls requiring class profiling. Ideally perhaps // we'd just keep track of the calls themselves, so we don't // have to search for them later. // if ((call->gtCallType != CT_INDIRECT) && opts.jitFlags->IsSet(JitFlags::JIT_FLAG_BBINSTR) && !opts.jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT) && (JitConfig.JitClassProfiling() > 0) && !isLateDevirtualization) { JITDUMP("\n ... marking [%06u] in " FMT_BB " for class profile instrumentation\n", dspTreeID(call), compCurBB->bbNum); ClassProfileCandidateInfo* pInfo = new (this, CMK_Inlining) ClassProfileCandidateInfo; // Record some info needed for the class profiling probe. // pInfo->ilOffset = ilOffset; pInfo->probeIndex = info.compClassProbeCount++; call->gtClassProfileCandidateInfo = pInfo; // Flag block as needing scrutiny // compCurBB->bbFlags |= BBF_HAS_CLASS_PROFILE; } return; } // Bail if optimizations are disabled. if (opts.OptimizationDisabled()) { return; } #if defined(DEBUG) // Bail if devirt is disabled. if (JitConfig.JitEnableDevirtualization() == 0) { return; } // Optionally, print info on devirtualization Compiler* const rootCompiler = impInlineRoot(); const bool doPrint = JitConfig.JitPrintDevirtualizedMethods().contains(rootCompiler->info.compMethodName, rootCompiler->info.compClassName, &rootCompiler->info.compMethodInfo->args); #endif // DEBUG // Fetch information about the virtual method we're calling. CORINFO_METHOD_HANDLE baseMethod = *method; unsigned baseMethodAttribs = *methodFlags; if (baseMethodAttribs == 0) { // For late devirt we may not have method attributes, so fetch them. baseMethodAttribs = info.compCompHnd->getMethodAttribs(baseMethod); } else { #if defined(DEBUG) // Validate that callInfo has up to date method flags const DWORD freshBaseMethodAttribs = info.compCompHnd->getMethodAttribs(baseMethod); // All the base method attributes should agree, save that // CORINFO_FLG_DONT_INLINE may have changed from 0 to 1 // because of concurrent jitting activity. // // Note we don't look at this particular flag bit below, and // later on (if we do try and inline) we will rediscover why // the method can't be inlined, so there's no danger here in // seeing this particular flag bit in different states between // the cached and fresh values. if ((freshBaseMethodAttribs & ~CORINFO_FLG_DONT_INLINE) != (baseMethodAttribs & ~CORINFO_FLG_DONT_INLINE)) { assert(!"mismatched method attributes"); } #endif // DEBUG } // In R2R mode, we might see virtual stub calls to // non-virtuals. For instance cases where the non-virtual method // is in a different assembly but is called via CALLVIRT. For // verison resilience we must allow for the fact that the method // might become virtual in some update. // // In non-R2R modes CALLVIRT <nonvirtual> will be turned into a // regular call+nullcheck upstream, so we won't reach this // point. if ((baseMethodAttribs & CORINFO_FLG_VIRTUAL) == 0) { assert(call->IsVirtualStub()); assert(opts.IsReadyToRun()); JITDUMP("\nimpDevirtualizeCall: [R2R] base method not virtual, sorry\n"); return; } // Fetch information about the class that introduced the virtual method. CORINFO_CLASS_HANDLE baseClass = info.compCompHnd->getMethodClass(baseMethod); const DWORD baseClassAttribs = info.compCompHnd->getClassAttribs(baseClass); // Is the call an interface call? const bool isInterface = (baseClassAttribs & CORINFO_FLG_INTERFACE) != 0; // See what we know about the type of 'this' in the call. GenTree* thisObj = call->gtCallThisArg->GetNode()->gtEffectiveVal(false); bool isExact = false; bool objIsNonNull = false; CORINFO_CLASS_HANDLE objClass = gtGetClassHandle(thisObj, &isExact, &objIsNonNull); // Bail if we know nothing. if (objClass == NO_CLASS_HANDLE) { JITDUMP("\nimpDevirtualizeCall: no type available (op=%s)\n", GenTree::OpName(thisObj->OperGet())); // Don't try guarded devirtualiztion when we're doing late devirtualization. // if (isLateDevirtualization) { JITDUMP("No guarded devirt during late devirtualization\n"); return; } considerGuardedDevirtualization(call, ilOffset, isInterface, baseMethod, baseClass, pContextHandle DEBUGARG(objClass) DEBUGARG("unknown")); return; } // If the objClass is sealed (final), then we may be able to devirtualize. const DWORD objClassAttribs = info.compCompHnd->getClassAttribs(objClass); const bool objClassIsFinal = (objClassAttribs & CORINFO_FLG_FINAL) != 0; #if defined(DEBUG) const char* callKind = isInterface ? "interface" : "virtual"; const char* objClassNote = "[?]"; const char* objClassName = "?objClass"; const char* baseClassName = "?baseClass"; const char* baseMethodName = "?baseMethod"; if (verbose || doPrint) { objClassNote = isExact ? " [exact]" : objClassIsFinal ? " [final]" : ""; objClassName = info.compCompHnd->getClassName(objClass); baseClassName = info.compCompHnd->getClassName(baseClass); baseMethodName = eeGetMethodName(baseMethod, nullptr); if (verbose) { printf("\nimpDevirtualizeCall: Trying to devirtualize %s call:\n" " class for 'this' is %s%s (attrib %08x)\n" " base method is %s::%s\n", callKind, objClassName, objClassNote, objClassAttribs, baseClassName, baseMethodName); } } #endif // defined(DEBUG) // See if the jit's best type for `obj` is an interface. // See for instance System.ValueTuple`8::GetHashCode, where lcl 0 is System.IValueTupleInternal // IL_021d: ldloc.0 // IL_021e: callvirt instance int32 System.Object::GetHashCode() // // If so, we can't devirtualize, but we may be able to do guarded devirtualization. // if ((objClassAttribs & CORINFO_FLG_INTERFACE) != 0) { // Don't try guarded devirtualiztion when we're doing late devirtualization. // if (isLateDevirtualization) { JITDUMP("No guarded devirt during late devirtualization\n"); return; } considerGuardedDevirtualization(call, ilOffset, isInterface, baseMethod, baseClass, pContextHandle DEBUGARG(objClass) DEBUGARG(objClassName)); return; } // If we get this far, the jit has a lower bound class type for the `this` object being used for dispatch. // It may or may not know enough to devirtualize... if (isInterface) { assert(call->IsVirtualStub()); JITDUMP("--- base class is interface\n"); } // Fetch the method that would be called based on the declared type of 'this', // and prepare to fetch the method attributes. // CORINFO_DEVIRTUALIZATION_INFO dvInfo; dvInfo.virtualMethod = baseMethod; dvInfo.objClass = objClass; dvInfo.context = *pContextHandle; dvInfo.detail = CORINFO_DEVIRTUALIZATION_UNKNOWN; dvInfo.pResolvedTokenVirtualMethod = pResolvedToken; info.compCompHnd->resolveVirtualMethod(&dvInfo); CORINFO_METHOD_HANDLE derivedMethod = dvInfo.devirtualizedMethod; CORINFO_CONTEXT_HANDLE exactContext = dvInfo.exactContext; CORINFO_CLASS_HANDLE derivedClass = NO_CLASS_HANDLE; CORINFO_RESOLVED_TOKEN* pDerivedResolvedToken = &dvInfo.resolvedTokenDevirtualizedMethod; if (derivedMethod != nullptr) { assert(exactContext != nullptr); assert(((size_t)exactContext & CORINFO_CONTEXTFLAGS_MASK) == CORINFO_CONTEXTFLAGS_CLASS); derivedClass = (CORINFO_CLASS_HANDLE)((size_t)exactContext & ~CORINFO_CONTEXTFLAGS_MASK); } DWORD derivedMethodAttribs = 0; bool derivedMethodIsFinal = false; bool canDevirtualize = false; #if defined(DEBUG) const char* derivedClassName = "?derivedClass"; const char* derivedMethodName = "?derivedMethod"; const char* note = "inexact or not final"; #endif // If we failed to get a method handle, we can't directly devirtualize. // // This can happen when prejitting, if the devirtualization crosses // servicing bubble boundaries, or if objClass is a shared class. // if (derivedMethod == nullptr) { JITDUMP("--- no derived method: %s\n", devirtualizationDetailToString(dvInfo.detail)); } else { // Fetch method attributes to see if method is marked final. derivedMethodAttribs = info.compCompHnd->getMethodAttribs(derivedMethod); derivedMethodIsFinal = ((derivedMethodAttribs & CORINFO_FLG_FINAL) != 0); #if defined(DEBUG) if (isExact) { note = "exact"; } else if (objClassIsFinal) { note = "final class"; } else if (derivedMethodIsFinal) { note = "final method"; } if (verbose || doPrint) { derivedMethodName = eeGetMethodName(derivedMethod, nullptr); derivedClassName = eeGetClassName(derivedClass); if (verbose) { printf(" devirt to %s::%s -- %s\n", derivedClassName, derivedMethodName, note); gtDispTree(call); } } #endif // defined(DEBUG) canDevirtualize = isExact || objClassIsFinal || (!isInterface && derivedMethodIsFinal); } // We still might be able to do a guarded devirtualization. // Note the call might be an interface call or a virtual call. // if (!canDevirtualize) { JITDUMP(" Class not final or exact%s\n", isInterface ? "" : ", and method not final"); #if defined(DEBUG) // If we know the object type exactly, we generally expect we can devirtualize. // (don't when doing late devirt as we won't have an owner type (yet)) // if (!isLateDevirtualization && (isExact || objClassIsFinal) && JitConfig.JitNoteFailedExactDevirtualization()) { printf("@@@ Exact/Final devirt failure in %s at [%06u] $ %s\n", info.compFullName, dspTreeID(call), devirtualizationDetailToString(dvInfo.detail)); } #endif // Don't try guarded devirtualiztion if we're doing late devirtualization. // if (isLateDevirtualization) { JITDUMP("No guarded devirt during late devirtualization\n"); return; } considerGuardedDevirtualization(call, ilOffset, isInterface, baseMethod, baseClass, pContextHandle DEBUGARG(objClass) DEBUGARG(objClassName)); return; } // All checks done. Time to transform the call. // // We should always have an exact class context. // // Note that wouldnt' be true if the runtime side supported array interface devirt, // the resulting method would be a generic method of the non-generic SZArrayHelper class. // assert(canDevirtualize); JITDUMP(" %s; can devirtualize\n", note); // Make the updates. call->gtFlags &= ~GTF_CALL_VIRT_VTABLE; call->gtFlags &= ~GTF_CALL_VIRT_STUB; call->gtCallMethHnd = derivedMethod; call->gtCallType = CT_USER_FUNC; call->gtCallMoreFlags |= GTF_CALL_M_DEVIRTUALIZED; // Virtual calls include an implicit null check, which we may // now need to make explicit. if (!objIsNonNull) { call->gtFlags |= GTF_CALL_NULLCHECK; } // Clear the inline candidate info (may be non-null since // it's a union field used for other things by virtual // stubs) call->gtInlineCandidateInfo = nullptr; #if defined(DEBUG) if (verbose) { printf("... after devirt...\n"); gtDispTree(call); } if (doPrint) { printf("Devirtualized %s call to %s:%s; now direct call to %s:%s [%s]\n", callKind, baseClassName, baseMethodName, derivedClassName, derivedMethodName, note); } // If we successfully devirtualized based on an exact or final class, // and we have dynamic PGO data describing the likely class, make sure they agree. // // If pgo source is not dynamic we may see likely classes from other versions of this code // where types had different properties. // // If method is an inlinee we may be specializing to a class that wasn't seen at runtime. // const bool canSensiblyCheck = (isExact || objClassIsFinal) && (fgPgoSource == ICorJitInfo::PgoSource::Dynamic) && !compIsForInlining(); if (JitConfig.JitCrossCheckDevirtualizationAndPGO() && canSensiblyCheck) { // We only can handle a single likely class for now const int maxLikelyClasses = 1; LikelyClassRecord likelyClasses[maxLikelyClasses]; UINT32 numberOfClasses = getLikelyClasses(likelyClasses, maxLikelyClasses, fgPgoSchema, fgPgoSchemaCount, fgPgoData, ilOffset); UINT32 likelihood = likelyClasses[0].likelihood; CORINFO_CLASS_HANDLE likelyClass = likelyClasses[0].clsHandle; if (numberOfClasses > 0) { // PGO had better agree the class we devirtualized to is plausible. // if (likelyClass != derivedClass) { // Managed type system may report different addresses for a class handle // at different times....? // // Also, AOT may have a more nuanced notion of class equality. // if (!opts.jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT)) { bool mismatch = true; // derivedClass will be the introducer of derived method, so it's possible // likelyClass is a non-overriding subclass. Check up the hierarchy. // CORINFO_CLASS_HANDLE parentClass = likelyClass; while (parentClass != NO_CLASS_HANDLE) { if (parentClass == derivedClass) { mismatch = false; break; } parentClass = info.compCompHnd->getParentType(parentClass); } if (mismatch || (numberOfClasses != 1) || (likelihood != 100)) { printf("@@@ Likely %p (%s) != Derived %p (%s) [n=%u, l=%u, il=%u] in %s \n", likelyClass, eeGetClassName(likelyClass), derivedClass, eeGetClassName(derivedClass), numberOfClasses, likelihood, ilOffset, info.compFullName); } assert(!(mismatch || (numberOfClasses != 1) || (likelihood != 100))); } } } } #endif // defined(DEBUG) // If the 'this' object is a value class, see if we can rework the call to invoke the // unboxed entry. This effectively inlines the normally un-inlineable wrapper stub // and exposes the potentially inlinable unboxed entry method. // // We won't optimize explicit tail calls, as ensuring we get the right tail call info // is tricky (we'd need to pass an updated sig and resolved token back to some callers). // // Note we may not have a derived class in some cases (eg interface call on an array) // if (info.compCompHnd->isValueClass(derivedClass)) { if (isExplicitTailCall) { JITDUMP("Have a direct explicit tail call to boxed entry point; can't optimize further\n"); } else { JITDUMP("Have a direct call to boxed entry point. Trying to optimize to call an unboxed entry point\n"); // Note for some shared methods the unboxed entry point requires an extra parameter. bool requiresInstMethodTableArg = false; CORINFO_METHOD_HANDLE unboxedEntryMethod = info.compCompHnd->getUnboxedEntry(derivedMethod, &requiresInstMethodTableArg); if (unboxedEntryMethod != nullptr) { bool optimizedTheBox = false; // If the 'this' object is a local box, see if we can revise things // to not require boxing. // if (thisObj->IsBoxedValue() && !isExplicitTailCall) { // Since the call is the only consumer of the box, we know the box can't escape // since it is being passed an interior pointer. // // So, revise the box to simply create a local copy, use the address of that copy // as the this pointer, and update the entry point to the unboxed entry. // // Ideally, we then inline the boxed method and and if it turns out not to modify // the copy, we can undo the copy too. if (requiresInstMethodTableArg) { // Perform a trial box removal and ask for the type handle tree that fed the box. // JITDUMP("Unboxed entry needs method table arg...\n"); GenTree* methodTableArg = gtTryRemoveBoxUpstreamEffects(thisObj, BR_DONT_REMOVE_WANT_TYPE_HANDLE); if (methodTableArg != nullptr) { // If that worked, turn the box into a copy to a local var // JITDUMP("Found suitable method table arg tree [%06u]\n", dspTreeID(methodTableArg)); GenTree* localCopyThis = gtTryRemoveBoxUpstreamEffects(thisObj, BR_MAKE_LOCAL_COPY); if (localCopyThis != nullptr) { // Pass the local var as this and the type handle as a new arg // JITDUMP("Success! invoking unboxed entry point on local copy, and passing method table " "arg\n"); call->gtCallThisArg = gtNewCallArgs(localCopyThis); call->gtCallMoreFlags |= GTF_CALL_M_UNBOXED; // Prepend for R2L arg passing or empty L2R passing // Append for non-empty L2R // if ((Target::g_tgtArgOrder == Target::ARG_ORDER_R2L) || (call->gtCallArgs == nullptr)) { // If there's a ret buf, the method table is the second arg. // if (call->HasRetBufArg()) { gtInsertNewCallArgAfter(methodTableArg, call->gtCallArgs); } else { call->gtCallArgs = gtPrependNewCallArg(methodTableArg, call->gtCallArgs); } } else { GenTreeCall::Use* beforeArg = call->gtCallArgs; while (beforeArg->GetNext() != nullptr) { beforeArg = beforeArg->GetNext(); } beforeArg->SetNext(gtNewCallArgs(methodTableArg)); } call->gtCallMethHnd = unboxedEntryMethod; derivedMethod = unboxedEntryMethod; pDerivedResolvedToken = &dvInfo.resolvedTokenDevirtualizedUnboxedMethod; // Method attributes will differ because unboxed entry point is shared // const DWORD unboxedMethodAttribs = info.compCompHnd->getMethodAttribs(unboxedEntryMethod); JITDUMP("Updating method attribs from 0x%08x to 0x%08x\n", derivedMethodAttribs, unboxedMethodAttribs); derivedMethodAttribs = unboxedMethodAttribs; optimizedTheBox = true; } else { JITDUMP("Sorry, failed to undo the box -- can't convert to local copy\n"); } } else { JITDUMP("Sorry, failed to undo the box -- can't find method table arg\n"); } } else { JITDUMP("Found unboxed entry point, trying to simplify box to a local copy\n"); GenTree* localCopyThis = gtTryRemoveBoxUpstreamEffects(thisObj, BR_MAKE_LOCAL_COPY); if (localCopyThis != nullptr) { JITDUMP("Success! invoking unboxed entry point on local copy\n"); call->gtCallThisArg = gtNewCallArgs(localCopyThis); call->gtCallMethHnd = unboxedEntryMethod; call->gtCallMoreFlags |= GTF_CALL_M_UNBOXED; derivedMethod = unboxedEntryMethod; pDerivedResolvedToken = &dvInfo.resolvedTokenDevirtualizedUnboxedMethod; optimizedTheBox = true; } else { JITDUMP("Sorry, failed to undo the box\n"); } } if (optimizedTheBox) { #if FEATURE_TAILCALL_OPT if (call->IsImplicitTailCall()) { JITDUMP("Clearing the implicit tail call flag\n"); // If set, we clear the implicit tail call flag // as we just introduced a new address taken local variable // call->gtCallMoreFlags &= ~GTF_CALL_M_IMPLICIT_TAILCALL; } #endif // FEATURE_TAILCALL_OPT } } if (!optimizedTheBox) { // If we get here, we have a boxed value class that either wasn't boxed // locally, or was boxed locally but we were unable to remove the box for // various reasons. // // We can still update the call to invoke the unboxed entry, if the // boxed value is simple. // if (requiresInstMethodTableArg) { // Get the method table from the boxed object. // GenTree* const thisArg = call->gtCallThisArg->GetNode(); GenTree* const clonedThisArg = gtClone(thisArg); if (clonedThisArg == nullptr) { JITDUMP( "unboxed entry needs MT arg, but `this` was too complex to clone. Deferring update.\n"); } else { JITDUMP("revising call to invoke unboxed entry with additional method table arg\n"); GenTree* const methodTableArg = gtNewMethodTableLookup(clonedThisArg); // Update the 'this' pointer to refer to the box payload // GenTree* const payloadOffset = gtNewIconNode(TARGET_POINTER_SIZE, TYP_I_IMPL); GenTree* const boxPayload = gtNewOperNode(GT_ADD, TYP_BYREF, thisArg, payloadOffset); call->gtCallThisArg = gtNewCallArgs(boxPayload); call->gtCallMethHnd = unboxedEntryMethod; call->gtCallMoreFlags |= GTF_CALL_M_UNBOXED; // Method attributes will differ because unboxed entry point is shared // const DWORD unboxedMethodAttribs = info.compCompHnd->getMethodAttribs(unboxedEntryMethod); JITDUMP("Updating method attribs from 0x%08x to 0x%08x\n", derivedMethodAttribs, unboxedMethodAttribs); derivedMethod = unboxedEntryMethod; pDerivedResolvedToken = &dvInfo.resolvedTokenDevirtualizedUnboxedMethod; derivedMethodAttribs = unboxedMethodAttribs; // Add the method table argument. // // Prepend for R2L arg passing or empty L2R passing // Append for non-empty L2R // if ((Target::g_tgtArgOrder == Target::ARG_ORDER_R2L) || (call->gtCallArgs == nullptr)) { // If there's a ret buf, the method table is the second arg. // if (call->HasRetBufArg()) { gtInsertNewCallArgAfter(methodTableArg, call->gtCallArgs); } else { call->gtCallArgs = gtPrependNewCallArg(methodTableArg, call->gtCallArgs); } } else { GenTreeCall::Use* beforeArg = call->gtCallArgs; while (beforeArg->GetNext() != nullptr) { beforeArg = beforeArg->GetNext(); } beforeArg->SetNext(gtNewCallArgs(methodTableArg)); } } } else { JITDUMP("revising call to invoke unboxed entry\n"); GenTree* const thisArg = call->gtCallThisArg->GetNode(); GenTree* const payloadOffset = gtNewIconNode(TARGET_POINTER_SIZE, TYP_I_IMPL); GenTree* const boxPayload = gtNewOperNode(GT_ADD, TYP_BYREF, thisArg, payloadOffset); call->gtCallThisArg = gtNewCallArgs(boxPayload); call->gtCallMethHnd = unboxedEntryMethod; call->gtCallMoreFlags |= GTF_CALL_M_UNBOXED; derivedMethod = unboxedEntryMethod; pDerivedResolvedToken = &dvInfo.resolvedTokenDevirtualizedUnboxedMethod; } } } else { // Many of the low-level methods on value classes won't have unboxed entries, // as they need access to the type of the object. // // Note this may be a cue for us to stack allocate the boxed object, since // we probably know that these objects don't escape. JITDUMP("Sorry, failed to find unboxed entry point\n"); } } } // Need to update call info too. // *method = derivedMethod; *methodFlags = derivedMethodAttribs; // Update context handle // *pContextHandle = MAKE_METHODCONTEXT(derivedMethod); // Update exact context handle. // if (pExactContextHandle != nullptr) { *pExactContextHandle = MAKE_CLASSCONTEXT(derivedClass); } #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { // For R2R, getCallInfo triggers bookkeeping on the zap // side and acquires the actual symbol to call so we need to call it here. // Look up the new call info. CORINFO_CALL_INFO derivedCallInfo; eeGetCallInfo(pDerivedResolvedToken, nullptr, CORINFO_CALLINFO_ALLOWINSTPARAM, &derivedCallInfo); // Update the call. call->gtCallMoreFlags &= ~GTF_CALL_M_VIRTSTUB_REL_INDIRECT; call->gtCallMoreFlags &= ~GTF_CALL_M_R2R_REL_INDIRECT; call->setEntryPoint(derivedCallInfo.codePointerLookup.constLookup); } #endif // FEATURE_READYTORUN } //------------------------------------------------------------------------ // impGetSpecialIntrinsicExactReturnType: Look for special cases where a call // to an intrinsic returns an exact type // // Arguments: // methodHnd -- handle for the special intrinsic method // // Returns: // Exact class handle returned by the intrinsic call, if known. // Nullptr if not known, or not likely to lead to beneficial optimization. CORINFO_CLASS_HANDLE Compiler::impGetSpecialIntrinsicExactReturnType(CORINFO_METHOD_HANDLE methodHnd) { JITDUMP("Special intrinsic: looking for exact type returned by %s\n", eeGetMethodFullName(methodHnd)); CORINFO_CLASS_HANDLE result = nullptr; // See what intrinisc we have... const NamedIntrinsic ni = lookupNamedIntrinsic(methodHnd); switch (ni) { case NI_System_Collections_Generic_Comparer_get_Default: case NI_System_Collections_Generic_EqualityComparer_get_Default: { // Expect one class generic parameter; figure out which it is. CORINFO_SIG_INFO sig; info.compCompHnd->getMethodSig(methodHnd, &sig); assert(sig.sigInst.classInstCount == 1); CORINFO_CLASS_HANDLE typeHnd = sig.sigInst.classInst[0]; assert(typeHnd != nullptr); // Lookup can incorrect when we have __Canon as it won't appear // to implement any interface types. // // And if we do not have a final type, devirt & inlining is // unlikely to result in much simplification. // // We can use CORINFO_FLG_FINAL to screen out both of these cases. const DWORD typeAttribs = info.compCompHnd->getClassAttribs(typeHnd); const bool isFinalType = ((typeAttribs & CORINFO_FLG_FINAL) != 0); if (isFinalType) { if (ni == NI_System_Collections_Generic_EqualityComparer_get_Default) { result = info.compCompHnd->getDefaultEqualityComparerClass(typeHnd); } else { assert(ni == NI_System_Collections_Generic_Comparer_get_Default); result = info.compCompHnd->getDefaultComparerClass(typeHnd); } JITDUMP("Special intrinsic for type %s: return type is %s\n", eeGetClassName(typeHnd), result != nullptr ? eeGetClassName(result) : "unknown"); } else { JITDUMP("Special intrinsic for type %s: type not final, so deferring opt\n", eeGetClassName(typeHnd)); } break; } default: { JITDUMP("This special intrinsic not handled, sorry...\n"); break; } } return result; } //------------------------------------------------------------------------ // impAllocateToken: create CORINFO_RESOLVED_TOKEN into jit-allocated memory and init it. // // Arguments: // token - init value for the allocated token. // // Return Value: // pointer to token into jit-allocated memory. CORINFO_RESOLVED_TOKEN* Compiler::impAllocateToken(const CORINFO_RESOLVED_TOKEN& token) { CORINFO_RESOLVED_TOKEN* memory = getAllocator(CMK_Unknown).allocate<CORINFO_RESOLVED_TOKEN>(1); *memory = token; return memory; } //------------------------------------------------------------------------ // SpillRetExprHelper: iterate through arguments tree and spill ret_expr to local variables. // class SpillRetExprHelper { public: SpillRetExprHelper(Compiler* comp) : comp(comp) { } void StoreRetExprResultsInArgs(GenTreeCall* call) { for (GenTreeCall::Use& use : call->Args()) { comp->fgWalkTreePre(&use.NodeRef(), SpillRetExprVisitor, this); } if (call->gtCallThisArg != nullptr) { comp->fgWalkTreePre(&call->gtCallThisArg->NodeRef(), SpillRetExprVisitor, this); } } private: static Compiler::fgWalkResult SpillRetExprVisitor(GenTree** pTree, Compiler::fgWalkData* fgWalkPre) { assert((pTree != nullptr) && (*pTree != nullptr)); GenTree* tree = *pTree; if ((tree->gtFlags & GTF_CALL) == 0) { // Trees with ret_expr are marked as GTF_CALL. return Compiler::WALK_SKIP_SUBTREES; } if (tree->OperGet() == GT_RET_EXPR) { SpillRetExprHelper* walker = static_cast<SpillRetExprHelper*>(fgWalkPre->pCallbackData); walker->StoreRetExprAsLocalVar(pTree); } return Compiler::WALK_CONTINUE; } void StoreRetExprAsLocalVar(GenTree** pRetExpr) { GenTree* retExpr = *pRetExpr; assert(retExpr->OperGet() == GT_RET_EXPR); const unsigned tmp = comp->lvaGrabTemp(true DEBUGARG("spilling ret_expr")); JITDUMP("Storing return expression [%06u] to a local var V%02u.\n", comp->dspTreeID(retExpr), tmp); comp->impAssignTempGen(tmp, retExpr, (unsigned)Compiler::CHECK_SPILL_NONE); *pRetExpr = comp->gtNewLclvNode(tmp, retExpr->TypeGet()); if (retExpr->TypeGet() == TYP_REF) { assert(comp->lvaTable[tmp].lvSingleDef == 0); comp->lvaTable[tmp].lvSingleDef = 1; JITDUMP("Marked V%02u as a single def temp\n", tmp); bool isExact = false; bool isNonNull = false; CORINFO_CLASS_HANDLE retClsHnd = comp->gtGetClassHandle(retExpr, &isExact, &isNonNull); if (retClsHnd != nullptr) { comp->lvaSetClass(tmp, retClsHnd, isExact); } } } private: Compiler* comp; }; //------------------------------------------------------------------------ // addFatPointerCandidate: mark the call and the method, that they have a fat pointer candidate. // Spill ret_expr in the call node, because they can't be cloned. // // Arguments: // call - fat calli candidate // void Compiler::addFatPointerCandidate(GenTreeCall* call) { JITDUMP("Marking call [%06u] as fat pointer candidate\n", dspTreeID(call)); setMethodHasFatPointer(); call->SetFatPointerCandidate(); SpillRetExprHelper helper(this); helper.StoreRetExprResultsInArgs(call); } //------------------------------------------------------------------------ // considerGuardedDevirtualization: see if we can profitably guess at the // class involved in an interface or virtual call. // // Arguments: // // call - potential guarded devirtualization candidate // ilOffset - IL ofset of the call instruction // isInterface - true if this is an interface call // baseMethod - target method of the call // baseClass - class that introduced the target method // pContextHandle - context handle for the call // objClass - class of 'this' in the call // objClassName - name of the obj Class // // Notes: // Consults with VM to see if there's a likely class at runtime, // if so, adds a candidate for guarded devirtualization. // void Compiler::considerGuardedDevirtualization( GenTreeCall* call, IL_OFFSET ilOffset, bool isInterface, CORINFO_METHOD_HANDLE baseMethod, CORINFO_CLASS_HANDLE baseClass, CORINFO_CONTEXT_HANDLE* pContextHandle DEBUGARG(CORINFO_CLASS_HANDLE objClass) DEBUGARG(const char* objClassName)) { #if defined(DEBUG) const char* callKind = isInterface ? "interface" : "virtual"; #endif JITDUMP("Considering guarded devirtualization at IL offset %u (0x%x)\n", ilOffset, ilOffset); // We currently only get likely class guesses when there is PGO data // with class profiles. // if (fgPgoClassProfiles == 0) { JITDUMP("Not guessing for class: no class profile pgo data, or pgo disabled\n"); return; } // See if there's a likely guess for the class. // const unsigned likelihoodThreshold = isInterface ? 25 : 30; unsigned likelihood = 0; unsigned numberOfClasses = 0; CORINFO_CLASS_HANDLE likelyClass = NO_CLASS_HANDLE; bool doRandomDevirt = false; const int maxLikelyClasses = 32; LikelyClassRecord likelyClasses[maxLikelyClasses]; #ifdef DEBUG // Optional stress mode to pick a random known class, rather than // the most likely known class. // doRandomDevirt = JitConfig.JitRandomGuardedDevirtualization() != 0; if (doRandomDevirt) { // Reuse the random inliner's random state. // CLRRandom* const random = impInlineRoot()->m_inlineStrategy->GetRandom(JitConfig.JitRandomGuardedDevirtualization()); likelyClasses[0].clsHandle = getRandomClass(fgPgoSchema, fgPgoSchemaCount, fgPgoData, ilOffset, random); likelyClasses[0].likelihood = 100; if (likelyClasses[0].clsHandle != NO_CLASS_HANDLE) { numberOfClasses = 1; } } else #endif { numberOfClasses = getLikelyClasses(likelyClasses, maxLikelyClasses, fgPgoSchema, fgPgoSchemaCount, fgPgoData, ilOffset); } // For now we only use the most popular type likelihood = likelyClasses[0].likelihood; likelyClass = likelyClasses[0].clsHandle; if (numberOfClasses < 1) { JITDUMP("No likely class, sorry\n"); return; } assert(likelyClass != NO_CLASS_HANDLE); // Print all likely classes JITDUMP("%s classes for %p (%s):\n", doRandomDevirt ? "Random" : "Likely", dspPtr(objClass), objClassName) for (UINT32 i = 0; i < numberOfClasses; i++) { JITDUMP(" %u) %p (%s) [likelihood:%u%%]\n", i + 1, likelyClasses[i].clsHandle, eeGetClassName(likelyClasses[i].clsHandle), likelyClasses[i].likelihood); } // Todo: a more advanced heuristic using likelihood, number of // classes, and the profile count for this block. // // For now we will guess if the likelihood is at least 25%/30% (intfc/virt), as studies // have shown this transformation should pay off even if we guess wrong sometimes. // if (likelihood < likelihoodThreshold) { JITDUMP("Not guessing for class; likelihood is below %s call threshold %u\n", callKind, likelihoodThreshold); return; } uint32_t const likelyClassAttribs = info.compCompHnd->getClassAttribs(likelyClass); if ((likelyClassAttribs & CORINFO_FLG_ABSTRACT) != 0) { // We may see an abstract likely class, if we have a stale profile. // No point guessing for this. // JITDUMP("Not guessing for class; abstract (stale profile)\n"); return; } // Figure out which method will be called. // CORINFO_DEVIRTUALIZATION_INFO dvInfo; dvInfo.virtualMethod = baseMethod; dvInfo.objClass = likelyClass; dvInfo.context = *pContextHandle; dvInfo.exactContext = *pContextHandle; dvInfo.pResolvedTokenVirtualMethod = nullptr; const bool canResolve = info.compCompHnd->resolveVirtualMethod(&dvInfo); if (!canResolve) { JITDUMP("Can't figure out which method would be invoked, sorry\n"); return; } CORINFO_METHOD_HANDLE likelyMethod = dvInfo.devirtualizedMethod; JITDUMP("%s call would invoke method %s\n", callKind, eeGetMethodName(likelyMethod, nullptr)); // Add this as a potential candidate. // uint32_t const likelyMethodAttribs = info.compCompHnd->getMethodAttribs(likelyMethod); addGuardedDevirtualizationCandidate(call, likelyMethod, likelyClass, likelyMethodAttribs, likelyClassAttribs, likelihood); } //------------------------------------------------------------------------ // addGuardedDevirtualizationCandidate: potentially mark the call as a guarded // devirtualization candidate // // Notes: // // Call sites in rare or unoptimized code, and calls that require cookies are // not marked as candidates. // // As part of marking the candidate, the code spills GT_RET_EXPRs anywhere in any // child tree, because and we need to clone all these trees when we clone the call // as part of guarded devirtualization, and these IR nodes can't be cloned. // // Arguments: // call - potential guarded devirtualization candidate // methodHandle - method that will be invoked if the class test succeeds // classHandle - class that will be tested for at runtime // methodAttr - attributes of the method // classAttr - attributes of the class // likelihood - odds that this class is the class seen at runtime // void Compiler::addGuardedDevirtualizationCandidate(GenTreeCall* call, CORINFO_METHOD_HANDLE methodHandle, CORINFO_CLASS_HANDLE classHandle, unsigned methodAttr, unsigned classAttr, unsigned likelihood) { // This transformation only makes sense for virtual calls assert(call->IsVirtual()); // Only mark calls if the feature is enabled. const bool isEnabled = JitConfig.JitEnableGuardedDevirtualization() > 0; if (!isEnabled) { JITDUMP("NOT Marking call [%06u] as guarded devirtualization candidate -- disabled by jit config\n", dspTreeID(call)); return; } // Bail if not optimizing or the call site is very likely cold if (compCurBB->isRunRarely() || opts.OptimizationDisabled()) { JITDUMP("NOT Marking call [%06u] as guarded devirtualization candidate -- rare / dbg / minopts\n", dspTreeID(call)); return; } // CT_INDIRECT calls may use the cookie, bail if so... // // If transforming these provides a benefit, we could save this off in the same way // we save the stub address below. if ((call->gtCallType == CT_INDIRECT) && (call->AsCall()->gtCallCookie != nullptr)) { JITDUMP("NOT Marking call [%06u] as guarded devirtualization candidate -- CT_INDIRECT with cookie\n", dspTreeID(call)); return; } #ifdef DEBUG // See if disabled by range // static ConfigMethodRange JitGuardedDevirtualizationRange; JitGuardedDevirtualizationRange.EnsureInit(JitConfig.JitGuardedDevirtualizationRange()); assert(!JitGuardedDevirtualizationRange.Error()); if (!JitGuardedDevirtualizationRange.Contains(impInlineRoot()->info.compMethodHash())) { JITDUMP("NOT Marking call [%06u] as guarded devirtualization candidate -- excluded by " "JitGuardedDevirtualizationRange", dspTreeID(call)); return; } #endif // We're all set, proceed with candidate creation. // JITDUMP("Marking call [%06u] as guarded devirtualization candidate; will guess for class %s\n", dspTreeID(call), eeGetClassName(classHandle)); setMethodHasGuardedDevirtualization(); call->SetGuardedDevirtualizationCandidate(); // Spill off any GT_RET_EXPR subtrees so we can clone the call. // SpillRetExprHelper helper(this); helper.StoreRetExprResultsInArgs(call); // Gather some information for later. Note we actually allocate InlineCandidateInfo // here, as the devirtualized half of this call will likely become an inline candidate. // GuardedDevirtualizationCandidateInfo* pInfo = new (this, CMK_Inlining) InlineCandidateInfo; pInfo->guardedMethodHandle = methodHandle; pInfo->guardedMethodUnboxedEntryHandle = nullptr; pInfo->guardedClassHandle = classHandle; pInfo->likelihood = likelihood; pInfo->requiresInstMethodTableArg = false; // If the guarded class is a value class, look for an unboxed entry point. // if ((classAttr & CORINFO_FLG_VALUECLASS) != 0) { JITDUMP(" ... class is a value class, looking for unboxed entry\n"); bool requiresInstMethodTableArg = false; CORINFO_METHOD_HANDLE unboxedEntryMethodHandle = info.compCompHnd->getUnboxedEntry(methodHandle, &requiresInstMethodTableArg); if (unboxedEntryMethodHandle != nullptr) { JITDUMP(" ... updating GDV candidate with unboxed entry info\n"); pInfo->guardedMethodUnboxedEntryHandle = unboxedEntryMethodHandle; pInfo->requiresInstMethodTableArg = requiresInstMethodTableArg; } } call->gtGuardedDevirtualizationCandidateInfo = pInfo; } void Compiler::addExpRuntimeLookupCandidate(GenTreeCall* call) { setMethodHasExpRuntimeLookup(); call->SetExpRuntimeLookup(); } //------------------------------------------------------------------------ // impIsClassExact: check if a class handle can only describe values // of exactly one class. // // Arguments: // classHnd - handle for class in question // // Returns: // true if class is final and not subject to special casting from // variance or similar. // // Note: // We are conservative on arrays of primitive types here. bool Compiler::impIsClassExact(CORINFO_CLASS_HANDLE classHnd) { DWORD flags = info.compCompHnd->getClassAttribs(classHnd); DWORD flagsMask = CORINFO_FLG_FINAL | CORINFO_FLG_VARIANCE | CORINFO_FLG_ARRAY; if ((flags & flagsMask) == CORINFO_FLG_FINAL) { return true; } if ((flags & flagsMask) == (CORINFO_FLG_FINAL | CORINFO_FLG_ARRAY)) { CORINFO_CLASS_HANDLE arrayElementHandle = nullptr; CorInfoType type = info.compCompHnd->getChildType(classHnd, &arrayElementHandle); if ((type == CORINFO_TYPE_CLASS) || (type == CORINFO_TYPE_VALUECLASS)) { return impIsClassExact(arrayElementHandle); } } return false; } //------------------------------------------------------------------------ // impCanSkipCovariantStoreCheck: see if storing a ref type value to an array // can skip the array store covariance check. // // Arguments: // value -- tree producing the value to store // array -- tree representing the array to store to // // Returns: // true if the store does not require a covariance check. // bool Compiler::impCanSkipCovariantStoreCheck(GenTree* value, GenTree* array) { // We should only call this when optimizing. assert(opts.OptimizationEnabled()); // Check for assignment to same array, ie. arrLcl[i] = arrLcl[j] if (value->OperIs(GT_INDEX) && array->OperIs(GT_LCL_VAR)) { GenTree* valueIndex = value->AsIndex()->Arr(); if (valueIndex->OperIs(GT_LCL_VAR)) { unsigned valueLcl = valueIndex->AsLclVar()->GetLclNum(); unsigned arrayLcl = array->AsLclVar()->GetLclNum(); if ((valueLcl == arrayLcl) && !lvaGetDesc(arrayLcl)->IsAddressExposed()) { JITDUMP("\nstelem of ref from same array: skipping covariant store check\n"); return true; } } } // Check for assignment of NULL. if (value->OperIs(GT_CNS_INT)) { assert(value->gtType == TYP_REF); if (value->AsIntCon()->gtIconVal == 0) { JITDUMP("\nstelem of null: skipping covariant store check\n"); return true; } // Non-0 const refs can only occur with frozen objects assert(value->IsIconHandle(GTF_ICON_STR_HDL)); assert(doesMethodHaveFrozenString() || (compIsForInlining() && impInlineInfo->InlinerCompiler->doesMethodHaveFrozenString())); } // Try and get a class handle for the array if (value->gtType != TYP_REF) { return false; } bool arrayIsExact = false; bool arrayIsNonNull = false; CORINFO_CLASS_HANDLE arrayHandle = gtGetClassHandle(array, &arrayIsExact, &arrayIsNonNull); if (arrayHandle == NO_CLASS_HANDLE) { return false; } // There are some methods in corelib where we're storing to an array but the IL // doesn't reflect this (see SZArrayHelper). Avoid. DWORD attribs = info.compCompHnd->getClassAttribs(arrayHandle); if ((attribs & CORINFO_FLG_ARRAY) == 0) { return false; } CORINFO_CLASS_HANDLE arrayElementHandle = nullptr; CorInfoType arrayElemType = info.compCompHnd->getChildType(arrayHandle, &arrayElementHandle); // Verify array type handle is really an array of ref type assert(arrayElemType == CORINFO_TYPE_CLASS); // Check for exactly object[] if (arrayIsExact && (arrayElementHandle == impGetObjectClass())) { JITDUMP("\nstelem to (exact) object[]: skipping covariant store check\n"); return true; } const bool arrayTypeIsSealed = impIsClassExact(arrayElementHandle); if ((!arrayIsExact && !arrayTypeIsSealed) || (arrayElementHandle == NO_CLASS_HANDLE)) { // Bail out if we don't know array's exact type return false; } bool valueIsExact = false; bool valueIsNonNull = false; CORINFO_CLASS_HANDLE valueHandle = gtGetClassHandle(value, &valueIsExact, &valueIsNonNull); // Array's type is sealed and equals to value's type if (arrayTypeIsSealed && (valueHandle == arrayElementHandle)) { JITDUMP("\nstelem to T[] with T exact: skipping covariant store check\n"); return true; } // Array's type is not sealed but we know its exact type if (arrayIsExact && (valueHandle != NO_CLASS_HANDLE) && (info.compCompHnd->compareTypesForCast(valueHandle, arrayElementHandle) == TypeCompareState::Must)) { JITDUMP("\nstelem to T[] with T exact: skipping covariant store check\n"); return true; } return false; }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Importer XX XX XX XX Imports the given method and converts it to semantic trees XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ #include "jitpch.h" #ifdef _MSC_VER #pragma hdrstop #endif #include "corexcep.h" #define Verify(cond, msg) \ do \ { \ if (!(cond)) \ { \ verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__)); \ } \ } while (0) #define VerifyOrReturn(cond, msg) \ do \ { \ if (!(cond)) \ { \ verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__)); \ return; \ } \ } while (0) #define VerifyOrReturnSpeculative(cond, msg, speculative) \ do \ { \ if (speculative) \ { \ if (!(cond)) \ { \ return false; \ } \ } \ else \ { \ if (!(cond)) \ { \ verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__)); \ return false; \ } \ } \ } while (0) /*****************************************************************************/ void Compiler::impInit() { impStmtList = impLastStmt = nullptr; #ifdef DEBUG impInlinedCodeSize = 0; #endif // DEBUG } /***************************************************************************** * * Pushes the given tree on the stack. */ void Compiler::impPushOnStack(GenTree* tree, typeInfo ti) { /* Check for overflow. If inlining, we may be using a bigger stack */ if ((verCurrentState.esStackDepth >= info.compMaxStack) && (verCurrentState.esStackDepth >= impStkSize || ((compCurBB->bbFlags & BBF_IMPORTED) == 0))) { BADCODE("stack overflow"); } #ifdef DEBUG // If we are pushing a struct, make certain we know the precise type! if (tree->TypeGet() == TYP_STRUCT) { assert(ti.IsType(TI_STRUCT)); CORINFO_CLASS_HANDLE clsHnd = ti.GetClassHandle(); assert(clsHnd != NO_CLASS_HANDLE); } #endif // DEBUG verCurrentState.esStack[verCurrentState.esStackDepth].seTypeInfo = ti; verCurrentState.esStack[verCurrentState.esStackDepth++].val = tree; if ((tree->gtType == TYP_LONG) && (compLongUsed == false)) { compLongUsed = true; } else if (((tree->gtType == TYP_FLOAT) || (tree->gtType == TYP_DOUBLE)) && (compFloatingPointUsed == false)) { compFloatingPointUsed = true; } } inline void Compiler::impPushNullObjRefOnStack() { impPushOnStack(gtNewIconNode(0, TYP_REF), typeInfo(TI_NULL)); } // This method gets called when we run into unverifiable code // (and we are verifying the method) inline void Compiler::verRaiseVerifyExceptionIfNeeded(INDEBUG(const char* msg) DEBUGARG(const char* file) DEBUGARG(unsigned line)) { #ifdef DEBUG const char* tail = strrchr(file, '\\'); if (tail) { file = tail + 1; } if (JitConfig.JitBreakOnUnsafeCode()) { assert(!"Unsafe code detected"); } #endif JITLOG((LL_INFO10000, "Detected unsafe code: %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line, msg, info.compFullName, impCurOpcName, impCurOpcOffs)); if (compIsForImportOnly()) { JITLOG((LL_ERROR, "Verification failure: %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line, msg, info.compFullName, impCurOpcName, impCurOpcOffs)); verRaiseVerifyException(INDEBUG(msg) DEBUGARG(file) DEBUGARG(line)); } } inline void DECLSPEC_NORETURN Compiler::verRaiseVerifyException(INDEBUG(const char* msg) DEBUGARG(const char* file) DEBUGARG(unsigned line)) { JITLOG((LL_ERROR, "Verification failure: %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line, msg, info.compFullName, impCurOpcName, impCurOpcOffs)); #ifdef DEBUG // BreakIfDebuggerPresent(); if (getBreakOnBadCode()) { assert(!"Typechecking error"); } #endif RaiseException(SEH_VERIFICATION_EXCEPTION, EXCEPTION_NONCONTINUABLE, 0, nullptr); UNREACHABLE(); } // helper function that will tell us if the IL instruction at the addr passed // by param consumes an address at the top of the stack. We use it to save // us lvAddrTaken bool Compiler::impILConsumesAddr(const BYTE* codeAddr) { assert(!compIsForInlining()); OPCODE opcode; opcode = (OPCODE)getU1LittleEndian(codeAddr); switch (opcode) { // case CEE_LDFLDA: We're taking this one out as if you have a sequence // like // // ldloca.0 // ldflda whatever // // of a primitivelike struct, you end up after morphing with addr of a local // that's not marked as addrtaken, which is wrong. Also ldflda is usually used // for structs that contain other structs, which isnt a case we handle very // well now for other reasons. case CEE_LDFLD: { // We won't collapse small fields. This is probably not the right place to have this // check, but we're only using the function for this purpose, and is easy to factor // out if we need to do so. CORINFO_RESOLVED_TOKEN resolvedToken; impResolveToken(codeAddr + sizeof(__int8), &resolvedToken, CORINFO_TOKENKIND_Field); var_types lclTyp = JITtype2varType(info.compCompHnd->getFieldType(resolvedToken.hField)); // Preserve 'small' int types if (!varTypeIsSmall(lclTyp)) { lclTyp = genActualType(lclTyp); } if (varTypeIsSmall(lclTyp)) { return false; } return true; } default: break; } return false; } void Compiler::impResolveToken(const BYTE* addr, CORINFO_RESOLVED_TOKEN* pResolvedToken, CorInfoTokenKind kind) { pResolvedToken->tokenContext = impTokenLookupContextHandle; pResolvedToken->tokenScope = info.compScopeHnd; pResolvedToken->token = getU4LittleEndian(addr); pResolvedToken->tokenType = kind; info.compCompHnd->resolveToken(pResolvedToken); } /***************************************************************************** * * Pop one tree from the stack. */ StackEntry Compiler::impPopStack() { if (verCurrentState.esStackDepth == 0) { BADCODE("stack underflow"); } return verCurrentState.esStack[--verCurrentState.esStackDepth]; } /***************************************************************************** * * Peep at n'th (0-based) tree on the top of the stack. */ StackEntry& Compiler::impStackTop(unsigned n) { if (verCurrentState.esStackDepth <= n) { BADCODE("stack underflow"); } return verCurrentState.esStack[verCurrentState.esStackDepth - n - 1]; } unsigned Compiler::impStackHeight() { return verCurrentState.esStackDepth; } /***************************************************************************** * Some of the trees are spilled specially. While unspilling them, or * making a copy, these need to be handled specially. The function * enumerates the operators possible after spilling. */ #ifdef DEBUG // only used in asserts static bool impValidSpilledStackEntry(GenTree* tree) { if (tree->gtOper == GT_LCL_VAR) { return true; } if (tree->OperIsConst()) { return true; } return false; } #endif /***************************************************************************** * * The following logic is used to save/restore stack contents. * If 'copy' is true, then we make a copy of the trees on the stack. These * have to all be cloneable/spilled values. */ void Compiler::impSaveStackState(SavedStack* savePtr, bool copy) { savePtr->ssDepth = verCurrentState.esStackDepth; if (verCurrentState.esStackDepth) { savePtr->ssTrees = new (this, CMK_ImpStack) StackEntry[verCurrentState.esStackDepth]; size_t saveSize = verCurrentState.esStackDepth * sizeof(*savePtr->ssTrees); if (copy) { StackEntry* table = savePtr->ssTrees; /* Make a fresh copy of all the stack entries */ for (unsigned level = 0; level < verCurrentState.esStackDepth; level++, table++) { table->seTypeInfo = verCurrentState.esStack[level].seTypeInfo; GenTree* tree = verCurrentState.esStack[level].val; assert(impValidSpilledStackEntry(tree)); switch (tree->gtOper) { case GT_CNS_INT: case GT_CNS_LNG: case GT_CNS_DBL: case GT_CNS_STR: case GT_LCL_VAR: table->val = gtCloneExpr(tree); break; default: assert(!"Bad oper - Not covered by impValidSpilledStackEntry()"); break; } } } else { memcpy(savePtr->ssTrees, verCurrentState.esStack, saveSize); } } } void Compiler::impRestoreStackState(SavedStack* savePtr) { verCurrentState.esStackDepth = savePtr->ssDepth; if (verCurrentState.esStackDepth) { memcpy(verCurrentState.esStack, savePtr->ssTrees, verCurrentState.esStackDepth * sizeof(*verCurrentState.esStack)); } } //------------------------------------------------------------------------ // impBeginTreeList: Get the tree list started for a new basic block. // inline void Compiler::impBeginTreeList() { assert(impStmtList == nullptr && impLastStmt == nullptr); } /***************************************************************************** * * Store the given start and end stmt in the given basic block. This is * mostly called by impEndTreeList(BasicBlock *block). It is called * directly only for handling CEE_LEAVEs out of finally-protected try's. */ inline void Compiler::impEndTreeList(BasicBlock* block, Statement* firstStmt, Statement* lastStmt) { /* Make the list circular, so that we can easily walk it backwards */ firstStmt->SetPrevStmt(lastStmt); /* Store the tree list in the basic block */ block->bbStmtList = firstStmt; /* The block should not already be marked as imported */ assert((block->bbFlags & BBF_IMPORTED) == 0); block->bbFlags |= BBF_IMPORTED; } inline void Compiler::impEndTreeList(BasicBlock* block) { if (impStmtList == nullptr) { // The block should not already be marked as imported. assert((block->bbFlags & BBF_IMPORTED) == 0); // Empty block. Just mark it as imported. block->bbFlags |= BBF_IMPORTED; } else { impEndTreeList(block, impStmtList, impLastStmt); } #ifdef DEBUG if (impLastILoffsStmt != nullptr) { impLastILoffsStmt->SetLastILOffset(compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs); impLastILoffsStmt = nullptr; } #endif impStmtList = impLastStmt = nullptr; } /***************************************************************************** * * Check that storing the given tree doesnt mess up the semantic order. Note * that this has only limited value as we can only check [0..chkLevel). */ inline void Compiler::impAppendStmtCheck(Statement* stmt, unsigned chkLevel) { #ifndef DEBUG return; #else if (chkLevel == (unsigned)CHECK_SPILL_ALL) { chkLevel = verCurrentState.esStackDepth; } if (verCurrentState.esStackDepth == 0 || chkLevel == 0 || chkLevel == (unsigned)CHECK_SPILL_NONE) { return; } GenTree* tree = stmt->GetRootNode(); // Calls can only be appended if there are no GTF_GLOB_EFFECT on the stack if (tree->gtFlags & GTF_CALL) { for (unsigned level = 0; level < chkLevel; level++) { assert((verCurrentState.esStack[level].val->gtFlags & GTF_GLOB_EFFECT) == 0); } } if (tree->gtOper == GT_ASG) { // For an assignment to a local variable, all references of that // variable have to be spilled. If it is aliased, all calls and // indirect accesses have to be spilled if (tree->AsOp()->gtOp1->gtOper == GT_LCL_VAR) { unsigned lclNum = tree->AsOp()->gtOp1->AsLclVarCommon()->GetLclNum(); for (unsigned level = 0; level < chkLevel; level++) { assert(!gtHasRef(verCurrentState.esStack[level].val, lclNum)); assert(!lvaTable[lclNum].IsAddressExposed() || (verCurrentState.esStack[level].val->gtFlags & GTF_SIDE_EFFECT) == 0); } } // If the access may be to global memory, all side effects have to be spilled. else if (tree->AsOp()->gtOp1->gtFlags & GTF_GLOB_REF) { for (unsigned level = 0; level < chkLevel; level++) { assert((verCurrentState.esStack[level].val->gtFlags & GTF_GLOB_REF) == 0); } } } #endif } //------------------------------------------------------------------------ // impAppendStmt: Append the given statement to the current block's tree list. // // // Arguments: // stmt - The statement to add. // chkLevel - [0..chkLevel) is the portion of the stack which we will check // for interference with stmt and spill if needed. // checkConsumedDebugInfo - Whether to check for consumption of impCurStmtDI. impCurStmtDI // marks the debug info of the current boundary and is set when we // start importing IL at that boundary. If this parameter is true, // then the function checks if 'stmt' has been associated with the // current boundary, and if so, clears it so that we do not attach // it to more upcoming statements. // void Compiler::impAppendStmt(Statement* stmt, unsigned chkLevel, bool checkConsumedDebugInfo) { if (chkLevel == (unsigned)CHECK_SPILL_ALL) { chkLevel = verCurrentState.esStackDepth; } if ((chkLevel != 0) && (chkLevel != (unsigned)CHECK_SPILL_NONE)) { assert(chkLevel <= verCurrentState.esStackDepth); /* If the statement being appended has any side-effects, check the stack to see if anything needs to be spilled to preserve correct ordering. */ GenTree* expr = stmt->GetRootNode(); GenTreeFlags flags = expr->gtFlags & GTF_GLOB_EFFECT; // Assignment to (unaliased) locals don't count as a side-effect as // we handle them specially using impSpillLclRefs(). Temp locals should // be fine too. if ((expr->gtOper == GT_ASG) && (expr->AsOp()->gtOp1->gtOper == GT_LCL_VAR) && ((expr->AsOp()->gtOp1->gtFlags & GTF_GLOB_REF) == 0) && !gtHasLocalsWithAddrOp(expr->AsOp()->gtOp2)) { GenTreeFlags op2Flags = expr->AsOp()->gtOp2->gtFlags & GTF_GLOB_EFFECT; assert(flags == (op2Flags | GTF_ASG)); flags = op2Flags; } if (flags != 0) { bool spillGlobEffects = false; if ((flags & GTF_CALL) != 0) { // If there is a call, we have to spill global refs spillGlobEffects = true; } else if (!expr->OperIs(GT_ASG)) { if ((flags & GTF_ASG) != 0) { // The expression is not an assignment node but it has an assignment side effect, it // must be an atomic op, HW intrinsic or some other kind of node that stores to memory. // Since we don't know what it assigns to, we need to spill global refs. spillGlobEffects = true; } } else { GenTree* lhs = expr->gtGetOp1(); GenTree* rhs = expr->gtGetOp2(); if (((rhs->gtFlags | lhs->gtFlags) & GTF_ASG) != 0) { // Either side of the assignment node has an assignment side effect. // Since we don't know what it assigns to, we need to spill global refs. spillGlobEffects = true; } else if ((lhs->gtFlags & GTF_GLOB_REF) != 0) { spillGlobEffects = true; } } impSpillSideEffects(spillGlobEffects, chkLevel DEBUGARG("impAppendStmt")); } else { impSpillSpecialSideEff(); } } impAppendStmtCheck(stmt, chkLevel); impAppendStmt(stmt); #ifdef FEATURE_SIMD impMarkContiguousSIMDFieldAssignments(stmt); #endif // Once we set the current offset as debug info in an appended tree, we are // ready to report the following offsets. Note that we need to compare // offsets here instead of debug info, since we do not set the "is call" // bit in impCurStmtDI. if (checkConsumedDebugInfo && (impLastStmt->GetDebugInfo().GetLocation().GetOffset() == impCurStmtDI.GetLocation().GetOffset())) { impCurStmtOffsSet(BAD_IL_OFFSET); } #ifdef DEBUG if (impLastILoffsStmt == nullptr) { impLastILoffsStmt = stmt; } if (verbose) { printf("\n\n"); gtDispStmt(stmt); } #endif } //------------------------------------------------------------------------ // impAppendStmt: Add the statement to the current stmts list. // // Arguments: // stmt - the statement to add. // inline void Compiler::impAppendStmt(Statement* stmt) { if (impStmtList == nullptr) { // The stmt is the first in the list. impStmtList = stmt; } else { // Append the expression statement to the existing list. impLastStmt->SetNextStmt(stmt); stmt->SetPrevStmt(impLastStmt); } impLastStmt = stmt; } //------------------------------------------------------------------------ // impExtractLastStmt: Extract the last statement from the current stmts list. // // Return Value: // The extracted statement. // // Notes: // It assumes that the stmt will be reinserted later. // Statement* Compiler::impExtractLastStmt() { assert(impLastStmt != nullptr); Statement* stmt = impLastStmt; impLastStmt = impLastStmt->GetPrevStmt(); if (impLastStmt == nullptr) { impStmtList = nullptr; } return stmt; } //------------------------------------------------------------------------- // impInsertStmtBefore: Insert the given "stmt" before "stmtBefore". // // Arguments: // stmt - a statement to insert; // stmtBefore - an insertion point to insert "stmt" before. // inline void Compiler::impInsertStmtBefore(Statement* stmt, Statement* stmtBefore) { assert(stmt != nullptr); assert(stmtBefore != nullptr); if (stmtBefore == impStmtList) { impStmtList = stmt; } else { Statement* stmtPrev = stmtBefore->GetPrevStmt(); stmt->SetPrevStmt(stmtPrev); stmtPrev->SetNextStmt(stmt); } stmt->SetNextStmt(stmtBefore); stmtBefore->SetPrevStmt(stmt); } //------------------------------------------------------------------------ // impAppendTree: Append the given expression tree to the current block's tree list. // // // Arguments: // tree - The tree that will be the root of the newly created statement. // chkLevel - [0..chkLevel) is the portion of the stack which we will check // for interference with stmt and spill if needed. // di - Debug information to associate with the statement. // checkConsumedDebugInfo - Whether to check for consumption of impCurStmtDI. impCurStmtDI // marks the debug info of the current boundary and is set when we // start importing IL at that boundary. If this parameter is true, // then the function checks if 'stmt' has been associated with the // current boundary, and if so, clears it so that we do not attach // it to more upcoming statements. // // Return value: // The newly created statement. // Statement* Compiler::impAppendTree(GenTree* tree, unsigned chkLevel, const DebugInfo& di, bool checkConsumedDebugInfo) { assert(tree); /* Allocate an 'expression statement' node */ Statement* stmt = gtNewStmt(tree, di); /* Append the statement to the current block's stmt list */ impAppendStmt(stmt, chkLevel, checkConsumedDebugInfo); return stmt; } /***************************************************************************** * * Insert the given expression tree before "stmtBefore" */ void Compiler::impInsertTreeBefore(GenTree* tree, const DebugInfo& di, Statement* stmtBefore) { /* Allocate an 'expression statement' node */ Statement* stmt = gtNewStmt(tree, di); /* Append the statement to the current block's stmt list */ impInsertStmtBefore(stmt, stmtBefore); } /***************************************************************************** * * Append an assignment of the given value to a temp to the current tree list. * curLevel is the stack level for which the spill to the temp is being done. */ void Compiler::impAssignTempGen(unsigned tmp, GenTree* val, unsigned curLevel, Statement** pAfterStmt, /* = NULL */ const DebugInfo& di, /* = DebugInfo() */ BasicBlock* block /* = NULL */ ) { GenTree* asg = gtNewTempAssign(tmp, val); if (!asg->IsNothingNode()) { if (pAfterStmt) { Statement* asgStmt = gtNewStmt(asg, di); fgInsertStmtAfter(block, *pAfterStmt, asgStmt); *pAfterStmt = asgStmt; } else { impAppendTree(asg, curLevel, impCurStmtDI); } } } /***************************************************************************** * same as above, but handle the valueclass case too */ void Compiler::impAssignTempGen(unsigned tmpNum, GenTree* val, CORINFO_CLASS_HANDLE structType, unsigned curLevel, Statement** pAfterStmt, /* = NULL */ const DebugInfo& di, /* = DebugInfo() */ BasicBlock* block /* = NULL */ ) { GenTree* asg; assert(val->TypeGet() != TYP_STRUCT || structType != NO_CLASS_HANDLE); if (varTypeIsStruct(val) && (structType != NO_CLASS_HANDLE)) { assert(tmpNum < lvaCount); assert(structType != NO_CLASS_HANDLE); // if the method is non-verifiable the assert is not true // so at least ignore it in the case when verification is turned on // since any block that tries to use the temp would have failed verification. var_types varType = lvaTable[tmpNum].lvType; assert(varType == TYP_UNDEF || varTypeIsStruct(varType)); lvaSetStruct(tmpNum, structType, false); varType = lvaTable[tmpNum].lvType; // Now, set the type of the struct value. Note that lvaSetStruct may modify the type // of the lclVar to a specialized type (e.g. TYP_SIMD), based on the handle (structType) // that has been passed in for the value being assigned to the temp, in which case we // need to set 'val' to that same type. // Note also that if we always normalized the types of any node that might be a struct // type, this would not be necessary - but that requires additional JIT/EE interface // calls that may not actually be required - e.g. if we only access a field of a struct. GenTree* dst = gtNewLclvNode(tmpNum, varType); asg = impAssignStruct(dst, val, structType, curLevel, pAfterStmt, di, block); } else { asg = gtNewTempAssign(tmpNum, val); } if (!asg->IsNothingNode()) { if (pAfterStmt) { Statement* asgStmt = gtNewStmt(asg, di); fgInsertStmtAfter(block, *pAfterStmt, asgStmt); *pAfterStmt = asgStmt; } else { impAppendTree(asg, curLevel, impCurStmtDI); } } } /***************************************************************************** * * Pop the given number of values from the stack and return a list node with * their values. * The 'prefixTree' argument may optionally contain an argument * list that is prepended to the list returned from this function. * * The notion of prepended is a bit misleading in that the list is backwards * from the way I would expect: The first element popped is at the end of * the returned list, and prefixTree is 'before' that, meaning closer to * the end of the list. To get to prefixTree, you have to walk to the * end of the list. * * For ARG_ORDER_R2L prefixTree is only used to insert extra arguments, as * such we reverse its meaning such that returnValue has a reversed * prefixTree at the head of the list. */ GenTreeCall::Use* Compiler::impPopCallArgs(unsigned count, CORINFO_SIG_INFO* sig, GenTreeCall::Use* prefixArgs) { assert(sig == nullptr || count == sig->numArgs); CORINFO_CLASS_HANDLE structType; GenTreeCall::Use* argList; if (Target::g_tgtArgOrder == Target::ARG_ORDER_R2L) { argList = nullptr; } else { // ARG_ORDER_L2R argList = prefixArgs; } while (count--) { StackEntry se = impPopStack(); typeInfo ti = se.seTypeInfo; GenTree* temp = se.val; if (varTypeIsStruct(temp)) { // Morph trees that aren't already OBJs or MKREFANY to be OBJs assert(ti.IsType(TI_STRUCT)); structType = ti.GetClassHandleForValueClass(); bool forceNormalization = false; if (varTypeIsSIMD(temp)) { // We need to ensure that fgMorphArgs will use the correct struct handle to ensure proper // ABI handling of this argument. // Note that this can happen, for example, if we have a SIMD intrinsic that returns a SIMD type // with a different baseType than we've seen. // We also need to ensure an OBJ node if we have a FIELD node that might be transformed to LCL_FLD // or a plain GT_IND. // TODO-Cleanup: Consider whether we can eliminate all of these cases. if ((gtGetStructHandleIfPresent(temp) != structType) || temp->OperIs(GT_FIELD)) { forceNormalization = true; } } #ifdef DEBUG if (verbose) { printf("Calling impNormStructVal on:\n"); gtDispTree(temp); } #endif temp = impNormStructVal(temp, structType, (unsigned)CHECK_SPILL_ALL, forceNormalization); #ifdef DEBUG if (verbose) { printf("resulting tree:\n"); gtDispTree(temp); } #endif } /* NOTE: we defer bashing the type for I_IMPL to fgMorphArgs */ argList = gtPrependNewCallArg(temp, argList); } if (sig != nullptr) { if (sig->retTypeSigClass != nullptr && sig->retType != CORINFO_TYPE_CLASS && sig->retType != CORINFO_TYPE_BYREF && sig->retType != CORINFO_TYPE_PTR && sig->retType != CORINFO_TYPE_VAR) { // Make sure that all valuetypes (including enums) that we push are loaded. // This is to guarantee that if a GC is triggerred from the prestub of this methods, // all valuetypes in the method signature are already loaded. // We need to be able to find the size of the valuetypes, but we cannot // do a class-load from within GC. info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(sig->retTypeSigClass); } CORINFO_ARG_LIST_HANDLE sigArgs = sig->args; GenTreeCall::Use* arg; for (arg = argList, count = sig->numArgs; count > 0; arg = arg->GetNext(), count--) { PREFIX_ASSUME(arg != nullptr); CORINFO_CLASS_HANDLE classHnd; CorInfoType corType = strip(info.compCompHnd->getArgType(sig, sigArgs, &classHnd)); var_types jitSigType = JITtype2varType(corType); if (!impCheckImplicitArgumentCoercion(jitSigType, arg->GetNode()->TypeGet())) { BADCODE("the call argument has a type that can't be implicitly converted to the signature type"); } // insert implied casts (from float to double or double to float) if ((jitSigType == TYP_DOUBLE) && (arg->GetNode()->TypeGet() == TYP_FLOAT)) { arg->SetNode(gtNewCastNode(TYP_DOUBLE, arg->GetNode(), false, TYP_DOUBLE)); } else if ((jitSigType == TYP_FLOAT) && (arg->GetNode()->TypeGet() == TYP_DOUBLE)) { arg->SetNode(gtNewCastNode(TYP_FLOAT, arg->GetNode(), false, TYP_FLOAT)); } // insert any widening or narrowing casts for backwards compatibility arg->SetNode(impImplicitIorI4Cast(arg->GetNode(), jitSigType)); if (corType != CORINFO_TYPE_CLASS && corType != CORINFO_TYPE_BYREF && corType != CORINFO_TYPE_PTR && corType != CORINFO_TYPE_VAR) { CORINFO_CLASS_HANDLE argRealClass = info.compCompHnd->getArgClass(sig, sigArgs); if (argRealClass != nullptr) { // Make sure that all valuetypes (including enums) that we push are loaded. // This is to guarantee that if a GC is triggered from the prestub of this methods, // all valuetypes in the method signature are already loaded. // We need to be able to find the size of the valuetypes, but we cannot // do a class-load from within GC. info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(argRealClass); } } const var_types nodeArgType = arg->GetNode()->TypeGet(); if (!varTypeIsStruct(jitSigType) && genTypeSize(nodeArgType) != genTypeSize(jitSigType)) { assert(!varTypeIsStruct(nodeArgType)); // Some ABI require precise size information for call arguments less than target pointer size, // for example arm64 OSX. Create a special node to keep this information until morph // consumes it into `fgArgInfo`. GenTree* putArgType = gtNewOperNode(GT_PUTARG_TYPE, jitSigType, arg->GetNode()); arg->SetNode(putArgType); } sigArgs = info.compCompHnd->getArgNext(sigArgs); } } if (Target::g_tgtArgOrder == Target::ARG_ORDER_R2L) { // Prepend the prefixTree // Simple in-place reversal to place treeList // at the end of a reversed prefixTree while (prefixArgs != nullptr) { GenTreeCall::Use* next = prefixArgs->GetNext(); prefixArgs->SetNext(argList); argList = prefixArgs; prefixArgs = next; } } return argList; } static bool TypeIs(var_types type1, var_types type2) { return type1 == type2; } // Check if type1 matches any type from the list. template <typename... T> static bool TypeIs(var_types type1, var_types type2, T... rest) { return TypeIs(type1, type2) || TypeIs(type1, rest...); } //------------------------------------------------------------------------ // impCheckImplicitArgumentCoercion: check that the node's type is compatible with // the signature's type using ECMA implicit argument coercion table. // // Arguments: // sigType - the type in the call signature; // nodeType - the node type. // // Return Value: // true if they are compatible, false otherwise. // // Notes: // - it is currently allowing byref->long passing, should be fixed in VM; // - it can't check long -> native int case on 64-bit platforms, // so the behavior is different depending on the target bitness. // bool Compiler::impCheckImplicitArgumentCoercion(var_types sigType, var_types nodeType) const { if (sigType == nodeType) { return true; } if (TypeIs(sigType, TYP_BOOL, TYP_UBYTE, TYP_BYTE, TYP_USHORT, TYP_SHORT, TYP_UINT, TYP_INT)) { if (TypeIs(nodeType, TYP_BOOL, TYP_UBYTE, TYP_BYTE, TYP_USHORT, TYP_SHORT, TYP_UINT, TYP_INT, TYP_I_IMPL)) { return true; } } else if (TypeIs(sigType, TYP_ULONG, TYP_LONG)) { if (TypeIs(nodeType, TYP_LONG)) { return true; } } else if (TypeIs(sigType, TYP_FLOAT, TYP_DOUBLE)) { if (TypeIs(nodeType, TYP_FLOAT, TYP_DOUBLE)) { return true; } } else if (TypeIs(sigType, TYP_BYREF)) { if (TypeIs(nodeType, TYP_I_IMPL)) { return true; } // This condition tolerates such IL: // ; V00 this ref this class-hnd // ldarg.0 // call(byref) if (TypeIs(nodeType, TYP_REF)) { return true; } } else if (varTypeIsStruct(sigType)) { if (varTypeIsStruct(nodeType)) { return true; } } // This condition should not be under `else` because `TYP_I_IMPL` // intersects with `TYP_LONG` or `TYP_INT`. if (TypeIs(sigType, TYP_I_IMPL, TYP_U_IMPL)) { // Note that it allows `ldc.i8 1; call(nint)` on 64-bit platforms, // but we can't distinguish `nint` from `long` there. if (TypeIs(nodeType, TYP_I_IMPL, TYP_U_IMPL, TYP_INT, TYP_UINT)) { return true; } // It tolerates IL that ECMA does not allow but that is commonly used. // Example: // V02 loc1 struct <RTL_OSVERSIONINFOEX, 32> // ldloca.s 0x2 // call(native int) if (TypeIs(nodeType, TYP_BYREF)) { return true; } } return false; } /***************************************************************************** * * Pop the given number of values from the stack in reverse order (STDCALL/CDECL etc.) * The first "skipReverseCount" items are not reversed. */ GenTreeCall::Use* Compiler::impPopReverseCallArgs(unsigned count, CORINFO_SIG_INFO* sig, unsigned skipReverseCount) { assert(skipReverseCount <= count); GenTreeCall::Use* list = impPopCallArgs(count, sig); // reverse the list if (list == nullptr || skipReverseCount == count) { return list; } GenTreeCall::Use* ptr = nullptr; // Initialized to the first node that needs to be reversed GenTreeCall::Use* lastSkipNode = nullptr; // Will be set to the last node that does not need to be reversed if (skipReverseCount == 0) { ptr = list; } else { lastSkipNode = list; // Get to the first node that needs to be reversed for (unsigned i = 0; i < skipReverseCount - 1; i++) { lastSkipNode = lastSkipNode->GetNext(); } PREFIX_ASSUME(lastSkipNode != nullptr); ptr = lastSkipNode->GetNext(); } GenTreeCall::Use* reversedList = nullptr; do { GenTreeCall::Use* tmp = ptr->GetNext(); ptr->SetNext(reversedList); reversedList = ptr; ptr = tmp; } while (ptr != nullptr); if (skipReverseCount) { lastSkipNode->SetNext(reversedList); return list; } else { return reversedList; } } //------------------------------------------------------------------------ // impAssignStruct: Create a struct assignment // // Arguments: // dest - the destination of the assignment // src - the value to be assigned // structHnd - handle representing the struct type // curLevel - stack level for which a spill may be being done // pAfterStmt - statement to insert any additional statements after // ilOffset - il offset for new statements // block - block to insert any additional statements in // // Return Value: // The tree that should be appended to the statement list that represents the assignment. // // Notes: // Temp assignments may be appended to impStmtList if spilling is necessary. GenTree* Compiler::impAssignStruct(GenTree* dest, GenTree* src, CORINFO_CLASS_HANDLE structHnd, unsigned curLevel, Statement** pAfterStmt, /* = nullptr */ const DebugInfo& di, /* = DebugInfo() */ BasicBlock* block /* = nullptr */ ) { assert(varTypeIsStruct(dest)); DebugInfo usedDI = di; if (!usedDI.IsValid()) { usedDI = impCurStmtDI; } while (dest->gtOper == GT_COMMA) { // Second thing is the struct. assert(varTypeIsStruct(dest->AsOp()->gtOp2)); // Append all the op1 of GT_COMMA trees before we evaluate op2 of the GT_COMMA tree. if (pAfterStmt) { Statement* newStmt = gtNewStmt(dest->AsOp()->gtOp1, usedDI); fgInsertStmtAfter(block, *pAfterStmt, newStmt); *pAfterStmt = newStmt; } else { impAppendTree(dest->AsOp()->gtOp1, curLevel, usedDI); // do the side effect } // set dest to the second thing dest = dest->AsOp()->gtOp2; } assert(dest->gtOper == GT_LCL_VAR || dest->gtOper == GT_RETURN || dest->gtOper == GT_FIELD || dest->gtOper == GT_IND || dest->gtOper == GT_OBJ || dest->gtOper == GT_INDEX); // Return a NOP if this is a self-assignment. if (dest->OperGet() == GT_LCL_VAR && src->OperGet() == GT_LCL_VAR && src->AsLclVarCommon()->GetLclNum() == dest->AsLclVarCommon()->GetLclNum()) { return gtNewNothingNode(); } // TODO-1stClassStructs: Avoid creating an address if it is not needed, // or re-creating a Blk node if it is. GenTree* destAddr; if (dest->gtOper == GT_IND || dest->OperIsBlk()) { destAddr = dest->AsOp()->gtOp1; } else { destAddr = gtNewOperNode(GT_ADDR, TYP_BYREF, dest); } return (impAssignStructPtr(destAddr, src, structHnd, curLevel, pAfterStmt, usedDI, block)); } //------------------------------------------------------------------------ // impAssignStructPtr: Assign (copy) the structure from 'src' to 'destAddr'. // // Arguments: // destAddr - address of the destination of the assignment // src - source of the assignment // structHnd - handle representing the struct type // curLevel - stack level for which a spill may be being done // pAfterStmt - statement to insert any additional statements after // di - debug info for new statements // block - block to insert any additional statements in // // Return Value: // The tree that should be appended to the statement list that represents the assignment. // // Notes: // Temp assignments may be appended to impStmtList if spilling is necessary. GenTree* Compiler::impAssignStructPtr(GenTree* destAddr, GenTree* src, CORINFO_CLASS_HANDLE structHnd, unsigned curLevel, Statement** pAfterStmt, /* = NULL */ const DebugInfo& di, /* = DebugInfo() */ BasicBlock* block /* = NULL */ ) { GenTree* dest = nullptr; GenTreeFlags destFlags = GTF_EMPTY; DebugInfo usedDI = di; if (!usedDI.IsValid()) { usedDI = impCurStmtDI; } #ifdef DEBUG #ifdef FEATURE_HW_INTRINSICS if (src->OperIs(GT_HWINTRINSIC)) { const GenTreeHWIntrinsic* intrinsic = src->AsHWIntrinsic(); if (HWIntrinsicInfo::IsMultiReg(intrinsic->GetHWIntrinsicId())) { assert(src->TypeGet() == TYP_STRUCT); } else { assert(varTypeIsSIMD(src)); } } else #endif // FEATURE_HW_INTRINSICS { assert(src->OperIs(GT_LCL_VAR, GT_LCL_FLD, GT_FIELD, GT_IND, GT_OBJ, GT_CALL, GT_MKREFANY, GT_RET_EXPR, GT_COMMA) || ((src->TypeGet() != TYP_STRUCT) && src->OperIsSIMD())); } #endif // DEBUG var_types asgType = src->TypeGet(); if (src->gtOper == GT_CALL) { GenTreeCall* srcCall = src->AsCall(); if (srcCall->TreatAsHasRetBufArg(this)) { // Case of call returning a struct via hidden retbuf arg CLANG_FORMAT_COMMENT_ANCHOR; #if !defined(TARGET_ARM) // Unmanaged instance methods on Windows or Unix X86 need the retbuf arg after the first (this) parameter if ((TargetOS::IsWindows || compUnixX86Abi()) && srcCall->IsUnmanaged()) { if (callConvIsInstanceMethodCallConv(srcCall->GetUnmanagedCallConv())) { #ifdef TARGET_X86 // The argument list has already been reversed. // Insert the return buffer as the second-to-last node // so it will be pushed on to the stack after the user args but before the native this arg // as required by the native ABI. GenTreeCall::Use* lastArg = srcCall->gtCallArgs; if (lastArg == nullptr) { srcCall->gtCallArgs = gtPrependNewCallArg(destAddr, srcCall->gtCallArgs); } else if (srcCall->GetUnmanagedCallConv() == CorInfoCallConvExtension::Thiscall) { // For thiscall, the "this" parameter is not included in the argument list reversal, // so we need to put the return buffer as the last parameter. for (; lastArg->GetNext() != nullptr; lastArg = lastArg->GetNext()) ; gtInsertNewCallArgAfter(destAddr, lastArg); } else if (lastArg->GetNext() == nullptr) { srcCall->gtCallArgs = gtPrependNewCallArg(destAddr, lastArg); } else { assert(lastArg != nullptr && lastArg->GetNext() != nullptr); GenTreeCall::Use* secondLastArg = lastArg; lastArg = lastArg->GetNext(); for (; lastArg->GetNext() != nullptr; secondLastArg = lastArg, lastArg = lastArg->GetNext()) ; assert(secondLastArg->GetNext() != nullptr); gtInsertNewCallArgAfter(destAddr, secondLastArg); } #else GenTreeCall::Use* thisArg = gtInsertNewCallArgAfter(destAddr, srcCall->gtCallArgs); #endif } else { #ifdef TARGET_X86 // The argument list has already been reversed. // Insert the return buffer as the last node so it will be pushed on to the stack last // as required by the native ABI. GenTreeCall::Use* lastArg = srcCall->gtCallArgs; if (lastArg == nullptr) { srcCall->gtCallArgs = gtPrependNewCallArg(destAddr, srcCall->gtCallArgs); } else { for (; lastArg->GetNext() != nullptr; lastArg = lastArg->GetNext()) ; gtInsertNewCallArgAfter(destAddr, lastArg); } #else // insert the return value buffer into the argument list as first byref parameter srcCall->gtCallArgs = gtPrependNewCallArg(destAddr, srcCall->gtCallArgs); #endif } } else #endif // !defined(TARGET_ARM) { // insert the return value buffer into the argument list as first byref parameter srcCall->gtCallArgs = gtPrependNewCallArg(destAddr, srcCall->gtCallArgs); } // now returns void, not a struct src->gtType = TYP_VOID; // return the morphed call node return src; } else { // Case of call returning a struct in one or more registers. var_types returnType = (var_types)srcCall->gtReturnType; // First we try to change this to "LclVar/LclFld = call" // if ((destAddr->gtOper == GT_ADDR) && (destAddr->AsOp()->gtOp1->gtOper == GT_LCL_VAR)) { // If it is a multi-reg struct return, don't change the oper to GT_LCL_FLD. // That is, the IR will be of the form lclVar = call for multi-reg return // GenTreeLclVar* lcl = destAddr->AsOp()->gtOp1->AsLclVar(); unsigned lclNum = lcl->GetLclNum(); LclVarDsc* varDsc = lvaGetDesc(lclNum); if (src->AsCall()->HasMultiRegRetVal()) { // Mark the struct LclVar as used in a MultiReg return context // which currently makes it non promotable. // TODO-1stClassStructs: Eliminate this pessimization when we can more generally // handle multireg returns. lcl->gtFlags |= GTF_DONT_CSE; varDsc->lvIsMultiRegRet = true; } dest = lcl; #if defined(TARGET_ARM) // TODO-Cleanup: This should have been taken care of in the above HasMultiRegRetVal() case, // but that method has not been updadted to include ARM. impMarkLclDstNotPromotable(lclNum, src, structHnd); lcl->gtFlags |= GTF_DONT_CSE; #elif defined(UNIX_AMD64_ABI) // Not allowed for FEATURE_CORCLR which is the only SKU available for System V OSs. assert(!src->AsCall()->IsVarargs() && "varargs not allowed for System V OSs."); // Make the struct non promotable. The eightbytes could contain multiple fields. // TODO-1stClassStructs: Eliminate this pessimization when we can more generally // handle multireg returns. // TODO-Cleanup: Why is this needed here? This seems that it will set this even for // non-multireg returns. lcl->gtFlags |= GTF_DONT_CSE; varDsc->lvIsMultiRegRet = true; #endif } else // we don't have a GT_ADDR of a GT_LCL_VAR { // !!! The destination could be on stack. !!! // This flag will let us choose the correct write barrier. asgType = returnType; destFlags = GTF_IND_TGTANYWHERE; } } } else if (src->gtOper == GT_RET_EXPR) { GenTreeCall* call = src->AsRetExpr()->gtInlineCandidate->AsCall(); noway_assert(call->gtOper == GT_CALL); if (call->HasRetBufArg()) { // insert the return value buffer into the argument list as first byref parameter call->gtCallArgs = gtPrependNewCallArg(destAddr, call->gtCallArgs); // now returns void, not a struct src->gtType = TYP_VOID; call->gtType = TYP_VOID; // We already have appended the write to 'dest' GT_CALL's args // So now we just return an empty node (pruning the GT_RET_EXPR) return src; } else { // Case of inline method returning a struct in one or more registers. // We won't need a return buffer asgType = src->gtType; if ((destAddr->gtOper != GT_ADDR) || (destAddr->AsOp()->gtOp1->gtOper != GT_LCL_VAR)) { // !!! The destination could be on stack. !!! // This flag will let us choose the correct write barrier. destFlags = GTF_IND_TGTANYWHERE; } } } else if (src->OperIsBlk()) { asgType = impNormStructType(structHnd); if (src->gtOper == GT_OBJ) { assert(src->AsObj()->GetLayout()->GetClassHandle() == structHnd); } } else if (src->gtOper == GT_INDEX) { asgType = impNormStructType(structHnd); assert(src->AsIndex()->gtStructElemClass == structHnd); } else if (src->gtOper == GT_MKREFANY) { // Since we are assigning the result of a GT_MKREFANY, // "destAddr" must point to a refany. GenTree* destAddrClone; destAddr = impCloneExpr(destAddr, &destAddrClone, structHnd, curLevel, pAfterStmt DEBUGARG("MKREFANY assignment")); assert(OFFSETOF__CORINFO_TypedReference__dataPtr == 0); assert(destAddr->gtType == TYP_I_IMPL || destAddr->gtType == TYP_BYREF); fgAddFieldSeqForZeroOffset(destAddr, GetFieldSeqStore()->CreateSingleton(GetRefanyDataField())); GenTree* ptrSlot = gtNewOperNode(GT_IND, TYP_I_IMPL, destAddr); GenTreeIntCon* typeFieldOffset = gtNewIconNode(OFFSETOF__CORINFO_TypedReference__type, TYP_I_IMPL); typeFieldOffset->gtFieldSeq = GetFieldSeqStore()->CreateSingleton(GetRefanyTypeField()); GenTree* typeSlot = gtNewOperNode(GT_IND, TYP_I_IMPL, gtNewOperNode(GT_ADD, destAddr->gtType, destAddrClone, typeFieldOffset)); // append the assign of the pointer value GenTree* asg = gtNewAssignNode(ptrSlot, src->AsOp()->gtOp1); if (pAfterStmt) { Statement* newStmt = gtNewStmt(asg, usedDI); fgInsertStmtAfter(block, *pAfterStmt, newStmt); *pAfterStmt = newStmt; } else { impAppendTree(asg, curLevel, usedDI); } // return the assign of the type value, to be appended return gtNewAssignNode(typeSlot, src->AsOp()->gtOp2); } else if (src->gtOper == GT_COMMA) { // The second thing is the struct or its address. assert(varTypeIsStruct(src->AsOp()->gtOp2) || src->AsOp()->gtOp2->gtType == TYP_BYREF); if (pAfterStmt) { // Insert op1 after '*pAfterStmt' Statement* newStmt = gtNewStmt(src->AsOp()->gtOp1, usedDI); fgInsertStmtAfter(block, *pAfterStmt, newStmt); *pAfterStmt = newStmt; } else if (impLastStmt != nullptr) { // Do the side-effect as a separate statement. impAppendTree(src->AsOp()->gtOp1, curLevel, usedDI); } else { // In this case we have neither been given a statement to insert after, nor are we // in the importer where we can append the side effect. // Instead, we're going to sink the assignment below the COMMA. src->AsOp()->gtOp2 = impAssignStructPtr(destAddr, src->AsOp()->gtOp2, structHnd, curLevel, pAfterStmt, usedDI, block); return src; } // Evaluate the second thing using recursion. return impAssignStructPtr(destAddr, src->AsOp()->gtOp2, structHnd, curLevel, pAfterStmt, usedDI, block); } else if (src->IsLocal()) { asgType = src->TypeGet(); } else if (asgType == TYP_STRUCT) { // It should already have the appropriate type. assert(asgType == impNormStructType(structHnd)); } if ((dest == nullptr) && (destAddr->OperGet() == GT_ADDR)) { GenTree* destNode = destAddr->gtGetOp1(); // If the actual destination is a local, a GT_INDEX or a block node, or is a node that // will be morphed, don't insert an OBJ(ADDR) if it already has the right type. if (destNode->OperIs(GT_LCL_VAR, GT_INDEX) || destNode->OperIsBlk()) { var_types destType = destNode->TypeGet(); // If one or both types are TYP_STRUCT (one may not yet be normalized), they are compatible // iff their handles are the same. // Otherwise, they are compatible if their types are the same. bool typesAreCompatible = ((destType == TYP_STRUCT) || (asgType == TYP_STRUCT)) ? ((gtGetStructHandleIfPresent(destNode) == structHnd) && varTypeIsStruct(asgType)) : (destType == asgType); if (typesAreCompatible) { dest = destNode; if (destType != TYP_STRUCT) { // Use a normalized type if available. We know from above that they're equivalent. asgType = destType; } } } } if (dest == nullptr) { if (asgType == TYP_STRUCT) { dest = gtNewObjNode(structHnd, destAddr); gtSetObjGcInfo(dest->AsObj()); // Although an obj as a call argument was always assumed to be a globRef // (which is itself overly conservative), that is not true of the operands // of a block assignment. dest->gtFlags &= ~GTF_GLOB_REF; dest->gtFlags |= (destAddr->gtFlags & GTF_GLOB_REF); } else { dest = gtNewOperNode(GT_IND, asgType, destAddr); } } if (dest->OperIs(GT_LCL_VAR) && (src->IsMultiRegNode() || (src->OperIs(GT_RET_EXPR) && src->AsRetExpr()->gtInlineCandidate->AsCall()->HasMultiRegRetVal()))) { if (lvaEnregMultiRegVars && varTypeIsStruct(dest)) { dest->AsLclVar()->SetMultiReg(); } if (src->OperIs(GT_CALL)) { lvaGetDesc(dest->AsLclVar())->lvIsMultiRegRet = true; } } dest->gtFlags |= destFlags; destFlags = dest->gtFlags; // return an assignment node, to be appended GenTree* asgNode = gtNewAssignNode(dest, src); gtBlockOpInit(asgNode, dest, src, false); // TODO-1stClassStructs: Clean up the settings of GTF_DONT_CSE on the lhs // of assignments. if ((destFlags & GTF_DONT_CSE) == 0) { dest->gtFlags &= ~(GTF_DONT_CSE); } return asgNode; } /***************************************************************************** Given a struct value, and the class handle for that structure, return the expression for the address for that structure value. willDeref - does the caller guarantee to dereference the pointer. */ GenTree* Compiler::impGetStructAddr(GenTree* structVal, CORINFO_CLASS_HANDLE structHnd, unsigned curLevel, bool willDeref) { assert(varTypeIsStruct(structVal) || eeIsValueClass(structHnd)); var_types type = structVal->TypeGet(); genTreeOps oper = structVal->gtOper; if (oper == GT_OBJ && willDeref) { assert(structVal->AsObj()->GetLayout()->GetClassHandle() == structHnd); return (structVal->AsObj()->Addr()); } else if (oper == GT_CALL || oper == GT_RET_EXPR || oper == GT_OBJ || oper == GT_MKREFANY || structVal->OperIsSimdOrHWintrinsic()) { unsigned tmpNum = lvaGrabTemp(true DEBUGARG("struct address for call/obj")); impAssignTempGen(tmpNum, structVal, structHnd, curLevel); // The 'return value' is now the temp itself type = genActualType(lvaTable[tmpNum].TypeGet()); GenTree* temp = gtNewLclvNode(tmpNum, type); temp = gtNewOperNode(GT_ADDR, TYP_BYREF, temp); return temp; } else if (oper == GT_COMMA) { assert(structVal->AsOp()->gtOp2->gtType == type); // Second thing is the struct Statement* oldLastStmt = impLastStmt; structVal->AsOp()->gtOp2 = impGetStructAddr(structVal->AsOp()->gtOp2, structHnd, curLevel, willDeref); structVal->gtType = TYP_BYREF; if (oldLastStmt != impLastStmt) { // Some temp assignment statement was placed on the statement list // for Op2, but that would be out of order with op1, so we need to // spill op1 onto the statement list after whatever was last // before we recursed on Op2 (i.e. before whatever Op2 appended). Statement* beforeStmt; if (oldLastStmt == nullptr) { // The op1 stmt should be the first in the list. beforeStmt = impStmtList; } else { // Insert after the oldLastStmt before the first inserted for op2. beforeStmt = oldLastStmt->GetNextStmt(); } impInsertTreeBefore(structVal->AsOp()->gtOp1, impCurStmtDI, beforeStmt); structVal->AsOp()->gtOp1 = gtNewNothingNode(); } return (structVal); } return (gtNewOperNode(GT_ADDR, TYP_BYREF, structVal)); } //------------------------------------------------------------------------ // impNormStructType: Normalize the type of a (known to be) struct class handle. // // Arguments: // structHnd - The class handle for the struct type of interest. // pSimdBaseJitType - (optional, default nullptr) - if non-null, and the struct is a SIMD // type, set to the SIMD base JIT type // // Return Value: // The JIT type for the struct (e.g. TYP_STRUCT, or TYP_SIMD*). // It may also modify the compFloatingPointUsed flag if the type is a SIMD type. // // Notes: // Normalizing the type involves examining the struct type to determine if it should // be modified to one that is handled specially by the JIT, possibly being a candidate // for full enregistration, e.g. TYP_SIMD16. If the size of the struct is already known // call structSizeMightRepresentSIMDType to determine if this api needs to be called. var_types Compiler::impNormStructType(CORINFO_CLASS_HANDLE structHnd, CorInfoType* pSimdBaseJitType) { assert(structHnd != NO_CLASS_HANDLE); var_types structType = TYP_STRUCT; #ifdef FEATURE_SIMD if (supportSIMDTypes()) { const DWORD structFlags = info.compCompHnd->getClassAttribs(structHnd); // Don't bother if the struct contains GC references of byrefs, it can't be a SIMD type. if ((structFlags & (CORINFO_FLG_CONTAINS_GC_PTR | CORINFO_FLG_BYREF_LIKE)) == 0) { unsigned originalSize = info.compCompHnd->getClassSize(structHnd); if (structSizeMightRepresentSIMDType(originalSize)) { unsigned int sizeBytes; CorInfoType simdBaseJitType = getBaseJitTypeAndSizeOfSIMDType(structHnd, &sizeBytes); if (simdBaseJitType != CORINFO_TYPE_UNDEF) { assert(sizeBytes == originalSize); structType = getSIMDTypeForSize(sizeBytes); if (pSimdBaseJitType != nullptr) { *pSimdBaseJitType = simdBaseJitType; } // Also indicate that we use floating point registers. compFloatingPointUsed = true; } } } } #endif // FEATURE_SIMD return structType; } //------------------------------------------------------------------------ // Compiler::impNormStructVal: Normalize a struct value // // Arguments: // structVal - the node we are going to normalize // structHnd - the class handle for the node // curLevel - the current stack level // forceNormalization - Force the creation of an OBJ node (default is false). // // Notes: // Given struct value 'structVal', make sure it is 'canonical', that is // it is either: // - a known struct type (non-TYP_STRUCT, e.g. TYP_SIMD8) // - an OBJ or a MKREFANY node, or // - a node (e.g. GT_INDEX) that will be morphed. // If the node is a CALL or RET_EXPR, a copy will be made to a new temp. // GenTree* Compiler::impNormStructVal(GenTree* structVal, CORINFO_CLASS_HANDLE structHnd, unsigned curLevel, bool forceNormalization /*=false*/) { assert(forceNormalization || varTypeIsStruct(structVal)); assert(structHnd != NO_CLASS_HANDLE); var_types structType = structVal->TypeGet(); bool makeTemp = false; if (structType == TYP_STRUCT) { structType = impNormStructType(structHnd); } bool alreadyNormalized = false; GenTreeLclVarCommon* structLcl = nullptr; genTreeOps oper = structVal->OperGet(); switch (oper) { // GT_RETURN and GT_MKREFANY don't capture the handle. case GT_RETURN: break; case GT_MKREFANY: alreadyNormalized = true; break; case GT_CALL: structVal->AsCall()->gtRetClsHnd = structHnd; makeTemp = true; break; case GT_RET_EXPR: structVal->AsRetExpr()->gtRetClsHnd = structHnd; makeTemp = true; break; case GT_ARGPLACE: structVal->AsArgPlace()->gtArgPlaceClsHnd = structHnd; break; case GT_INDEX: // This will be transformed to an OBJ later. alreadyNormalized = true; structVal->AsIndex()->gtStructElemClass = structHnd; structVal->AsIndex()->gtIndElemSize = info.compCompHnd->getClassSize(structHnd); break; case GT_FIELD: // Wrap it in a GT_OBJ, if needed. structVal->gtType = structType; if ((structType == TYP_STRUCT) || forceNormalization) { structVal = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal)); } break; case GT_LCL_VAR: case GT_LCL_FLD: structLcl = structVal->AsLclVarCommon(); // Wrap it in a GT_OBJ. structVal = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal)); FALLTHROUGH; case GT_OBJ: case GT_BLK: case GT_ASG: // These should already have the appropriate type. assert(structVal->gtType == structType); alreadyNormalized = true; break; case GT_IND: assert(structVal->gtType == structType); structVal = gtNewObjNode(structHnd, structVal->gtGetOp1()); alreadyNormalized = true; break; #ifdef FEATURE_SIMD case GT_SIMD: assert(varTypeIsSIMD(structVal) && (structVal->gtType == structType)); break; #endif // FEATURE_SIMD #ifdef FEATURE_HW_INTRINSICS case GT_HWINTRINSIC: assert(structVal->gtType == structType); assert(varTypeIsSIMD(structVal) || HWIntrinsicInfo::IsMultiReg(structVal->AsHWIntrinsic()->GetHWIntrinsicId())); break; #endif case GT_COMMA: { // The second thing could either be a block node or a GT_FIELD or a GT_SIMD or a GT_COMMA node. GenTree* blockNode = structVal->AsOp()->gtOp2; assert(blockNode->gtType == structType); // Is this GT_COMMA(op1, GT_COMMA())? GenTree* parent = structVal; if (blockNode->OperGet() == GT_COMMA) { // Find the last node in the comma chain. do { assert(blockNode->gtType == structType); parent = blockNode; blockNode = blockNode->AsOp()->gtOp2; } while (blockNode->OperGet() == GT_COMMA); } if (blockNode->OperGet() == GT_FIELD) { // If we have a GT_FIELD then wrap it in a GT_OBJ. blockNode = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, blockNode)); } #ifdef FEATURE_SIMD if (blockNode->OperIsSimdOrHWintrinsic()) { parent->AsOp()->gtOp2 = impNormStructVal(blockNode, structHnd, curLevel, forceNormalization); alreadyNormalized = true; } else #endif { noway_assert(blockNode->OperIsBlk()); // Sink the GT_COMMA below the blockNode addr. // That is GT_COMMA(op1, op2=blockNode) is tranformed into // blockNode(GT_COMMA(TYP_BYREF, op1, op2's op1)). // // In case of a chained GT_COMMA case, we sink the last // GT_COMMA below the blockNode addr. GenTree* blockNodeAddr = blockNode->AsOp()->gtOp1; assert(blockNodeAddr->gtType == TYP_BYREF); GenTree* commaNode = parent; commaNode->gtType = TYP_BYREF; commaNode->AsOp()->gtOp2 = blockNodeAddr; blockNode->AsOp()->gtOp1 = commaNode; if (parent == structVal) { structVal = blockNode; } alreadyNormalized = true; } } break; default: noway_assert(!"Unexpected node in impNormStructVal()"); break; } structVal->gtType = structType; if (!alreadyNormalized || forceNormalization) { if (makeTemp) { unsigned tmpNum = lvaGrabTemp(true DEBUGARG("struct address for call/obj")); impAssignTempGen(tmpNum, structVal, structHnd, curLevel); // The structVal is now the temp itself structLcl = gtNewLclvNode(tmpNum, structType)->AsLclVarCommon(); structVal = structLcl; } if ((forceNormalization || (structType == TYP_STRUCT)) && !structVal->OperIsBlk()) { // Wrap it in a GT_OBJ structVal = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal)); } } if (structLcl != nullptr) { // A OBJ on a ADDR(LCL_VAR) can never raise an exception // so we don't set GTF_EXCEPT here. if (!lvaIsImplicitByRefLocal(structLcl->GetLclNum())) { structVal->gtFlags &= ~GTF_GLOB_REF; } } else if (structVal->OperIsBlk()) { // In general a OBJ is an indirection and could raise an exception. structVal->gtFlags |= GTF_EXCEPT; } return structVal; } /******************************************************************************/ // Given a type token, generate code that will evaluate to the correct // handle representation of that token (type handle, field handle, or method handle) // // For most cases, the handle is determined at compile-time, and the code // generated is simply an embedded handle. // // Run-time lookup is required if the enclosing method is shared between instantiations // and the token refers to formal type parameters whose instantiation is not known // at compile-time. // GenTree* Compiler::impTokenToHandle(CORINFO_RESOLVED_TOKEN* pResolvedToken, bool* pRuntimeLookup /* = NULL */, bool mustRestoreHandle /* = false */, bool importParent /* = false */) { assert(!fgGlobalMorph); CORINFO_GENERICHANDLE_RESULT embedInfo; info.compCompHnd->embedGenericHandle(pResolvedToken, importParent, &embedInfo); if (pRuntimeLookup) { *pRuntimeLookup = embedInfo.lookup.lookupKind.needsRuntimeLookup; } if (mustRestoreHandle && !embedInfo.lookup.lookupKind.needsRuntimeLookup) { switch (embedInfo.handleType) { case CORINFO_HANDLETYPE_CLASS: info.compCompHnd->classMustBeLoadedBeforeCodeIsRun((CORINFO_CLASS_HANDLE)embedInfo.compileTimeHandle); break; case CORINFO_HANDLETYPE_METHOD: info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun((CORINFO_METHOD_HANDLE)embedInfo.compileTimeHandle); break; case CORINFO_HANDLETYPE_FIELD: info.compCompHnd->classMustBeLoadedBeforeCodeIsRun( info.compCompHnd->getFieldClass((CORINFO_FIELD_HANDLE)embedInfo.compileTimeHandle)); break; default: break; } } // Generate the full lookup tree. May be null if we're abandoning an inline attempt. GenTree* result = impLookupToTree(pResolvedToken, &embedInfo.lookup, gtTokenToIconFlags(pResolvedToken->token), embedInfo.compileTimeHandle); // If we have a result and it requires runtime lookup, wrap it in a runtime lookup node. if ((result != nullptr) && embedInfo.lookup.lookupKind.needsRuntimeLookup) { result = gtNewRuntimeLookup(embedInfo.compileTimeHandle, embedInfo.handleType, result); } return result; } GenTree* Compiler::impLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_LOOKUP* pLookup, GenTreeFlags handleFlags, void* compileTimeHandle) { if (!pLookup->lookupKind.needsRuntimeLookup) { // No runtime lookup is required. // Access is direct or memory-indirect (of a fixed address) reference CORINFO_GENERIC_HANDLE handle = nullptr; void* pIndirection = nullptr; assert(pLookup->constLookup.accessType != IAT_PPVALUE && pLookup->constLookup.accessType != IAT_RELPVALUE); if (pLookup->constLookup.accessType == IAT_VALUE) { handle = pLookup->constLookup.handle; } else if (pLookup->constLookup.accessType == IAT_PVALUE) { pIndirection = pLookup->constLookup.addr; } GenTree* addr = gtNewIconEmbHndNode(handle, pIndirection, handleFlags, compileTimeHandle); #ifdef DEBUG size_t handleToTrack; if (handleFlags == GTF_ICON_TOKEN_HDL) { handleToTrack = 0; } else { handleToTrack = (size_t)compileTimeHandle; } if (handle != nullptr) { addr->AsIntCon()->gtTargetHandle = handleToTrack; } else { addr->gtGetOp1()->AsIntCon()->gtTargetHandle = handleToTrack; } #endif return addr; } if (pLookup->lookupKind.runtimeLookupKind == CORINFO_LOOKUP_NOT_SUPPORTED) { // Runtime does not support inlining of all shapes of runtime lookups // Inlining has to be aborted in such a case assert(compIsForInlining()); compInlineResult->NoteFatal(InlineObservation::CALLSITE_GENERIC_DICTIONARY_LOOKUP); return nullptr; } // Need to use dictionary-based access which depends on the typeContext // which is only available at runtime, not at compile-time. return impRuntimeLookupToTree(pResolvedToken, pLookup, compileTimeHandle); } #ifdef FEATURE_READYTORUN GenTree* Compiler::impReadyToRunLookupToTree(CORINFO_CONST_LOOKUP* pLookup, GenTreeFlags handleFlags, void* compileTimeHandle) { CORINFO_GENERIC_HANDLE handle = nullptr; void* pIndirection = nullptr; assert(pLookup->accessType != IAT_PPVALUE && pLookup->accessType != IAT_RELPVALUE); if (pLookup->accessType == IAT_VALUE) { handle = pLookup->handle; } else if (pLookup->accessType == IAT_PVALUE) { pIndirection = pLookup->addr; } GenTree* addr = gtNewIconEmbHndNode(handle, pIndirection, handleFlags, compileTimeHandle); #ifdef DEBUG assert((handleFlags == GTF_ICON_CLASS_HDL) || (handleFlags == GTF_ICON_METHOD_HDL)); if (handle != nullptr) { addr->AsIntCon()->gtTargetHandle = (size_t)compileTimeHandle; } else { addr->gtGetOp1()->AsIntCon()->gtTargetHandle = (size_t)compileTimeHandle; } #endif // DEBUG return addr; } //------------------------------------------------------------------------ // impIsCastHelperEligibleForClassProbe: Checks whether a tree is a cast helper eligible to // to be profiled and then optimized with PGO data // // Arguments: // tree - the tree object to check // // Returns: // true if the tree is a cast helper eligible to be profiled // bool Compiler::impIsCastHelperEligibleForClassProbe(GenTree* tree) { if (!opts.jitFlags->IsSet(JitFlags::JIT_FLAG_BBINSTR) || (JitConfig.JitCastProfiling() != 1)) { return false; } if (tree->IsCall() && tree->AsCall()->gtCallType == CT_HELPER) { const CorInfoHelpFunc helper = eeGetHelperNum(tree->AsCall()->gtCallMethHnd); if ((helper == CORINFO_HELP_ISINSTANCEOFINTERFACE) || (helper == CORINFO_HELP_ISINSTANCEOFCLASS) || (helper == CORINFO_HELP_CHKCASTCLASS) || (helper == CORINFO_HELP_CHKCASTINTERFACE)) { return true; } } return false; } //------------------------------------------------------------------------ // impIsCastHelperMayHaveProfileData: Checks whether a tree is a cast helper that might // have profile data // // Arguments: // tree - the tree object to check // // Returns: // true if the tree is a cast helper with potential profile data // bool Compiler::impIsCastHelperMayHaveProfileData(GenTree* tree) { if (!opts.jitFlags->IsSet(JitFlags::JIT_FLAG_BBOPT) || (JitConfig.JitCastProfiling() != 1)) { return false; } if (tree->IsCall() && tree->AsCall()->gtCallType == CT_HELPER) { const CorInfoHelpFunc helper = eeGetHelperNum(tree->AsCall()->gtCallMethHnd); if ((helper == CORINFO_HELP_ISINSTANCEOFINTERFACE) || (helper == CORINFO_HELP_ISINSTANCEOFCLASS) || (helper == CORINFO_HELP_CHKCASTCLASS) || (helper == CORINFO_HELP_CHKCASTINTERFACE)) { return true; } } return false; } GenTreeCall* Compiler::impReadyToRunHelperToTree( CORINFO_RESOLVED_TOKEN* pResolvedToken, CorInfoHelpFunc helper, var_types type, GenTreeCall::Use* args /* = nullptr */, CORINFO_LOOKUP_KIND* pGenericLookupKind /* =NULL. Only used with generics */) { CORINFO_CONST_LOOKUP lookup; if (!info.compCompHnd->getReadyToRunHelper(pResolvedToken, pGenericLookupKind, helper, &lookup)) { return nullptr; } GenTreeCall* op1 = gtNewHelperCallNode(helper, type, args); op1->setEntryPoint(lookup); return op1; } #endif GenTree* Compiler::impMethodPointer(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo) { GenTree* op1 = nullptr; switch (pCallInfo->kind) { case CORINFO_CALL: op1 = new (this, GT_FTN_ADDR) GenTreeFptrVal(TYP_I_IMPL, pCallInfo->hMethod); #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { op1->AsFptrVal()->gtEntryPoint = pCallInfo->codePointerLookup.constLookup; } #endif break; case CORINFO_CALL_CODE_POINTER: op1 = impLookupToTree(pResolvedToken, &pCallInfo->codePointerLookup, GTF_ICON_FTN_ADDR, pCallInfo->hMethod); break; default: noway_assert(!"unknown call kind"); break; } return op1; } //------------------------------------------------------------------------ // getRuntimeContextTree: find pointer to context for runtime lookup. // // Arguments: // kind - lookup kind. // // Return Value: // Return GenTree pointer to generic shared context. // // Notes: // Reports about generic context using. GenTree* Compiler::getRuntimeContextTree(CORINFO_RUNTIME_LOOKUP_KIND kind) { GenTree* ctxTree = nullptr; // Collectible types requires that for shared generic code, if we use the generic context parameter // that we report it. (This is a conservative approach, we could detect some cases particularly when the // context parameter is this that we don't need the eager reporting logic.) lvaGenericsContextInUse = true; Compiler* pRoot = impInlineRoot(); if (kind == CORINFO_LOOKUP_THISOBJ) { // this Object ctxTree = gtNewLclvNode(pRoot->info.compThisArg, TYP_REF); ctxTree->gtFlags |= GTF_VAR_CONTEXT; // context is the method table pointer of the this object ctxTree = gtNewMethodTableLookup(ctxTree); } else { assert(kind == CORINFO_LOOKUP_METHODPARAM || kind == CORINFO_LOOKUP_CLASSPARAM); // Exact method descriptor as passed in ctxTree = gtNewLclvNode(pRoot->info.compTypeCtxtArg, TYP_I_IMPL); ctxTree->gtFlags |= GTF_VAR_CONTEXT; } return ctxTree; } /*****************************************************************************/ /* Import a dictionary lookup to access a handle in code shared between generic instantiations. The lookup depends on the typeContext which is only available at runtime, and not at compile-time. pLookup->token1 and pLookup->token2 specify the handle that is needed. The cases are: 1. pLookup->indirections == CORINFO_USEHELPER : Call a helper passing it the instantiation-specific handle, and the tokens to lookup the handle. 2. pLookup->indirections != CORINFO_USEHELPER : 2a. pLookup->testForNull == false : Dereference the instantiation-specific handle to get the handle. 2b. pLookup->testForNull == true : Dereference the instantiation-specific handle. If it is non-NULL, it is the handle required. Else, call a helper to lookup the handle. */ GenTree* Compiler::impRuntimeLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_LOOKUP* pLookup, void* compileTimeHandle) { GenTree* ctxTree = getRuntimeContextTree(pLookup->lookupKind.runtimeLookupKind); CORINFO_RUNTIME_LOOKUP* pRuntimeLookup = &pLookup->runtimeLookup; // It's available only via the run-time helper function if (pRuntimeLookup->indirections == CORINFO_USEHELPER) { #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { return impReadyToRunHelperToTree(pResolvedToken, CORINFO_HELP_READYTORUN_GENERIC_HANDLE, TYP_I_IMPL, gtNewCallArgs(ctxTree), &pLookup->lookupKind); } #endif return gtNewRuntimeLookupHelperCallNode(pRuntimeLookup, ctxTree, compileTimeHandle); } // Slot pointer GenTree* slotPtrTree = ctxTree; if (pRuntimeLookup->testForNull) { slotPtrTree = impCloneExpr(ctxTree, &ctxTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("impRuntimeLookup slot")); } GenTree* indOffTree = nullptr; GenTree* lastIndOfTree = nullptr; // Applied repeated indirections for (WORD i = 0; i < pRuntimeLookup->indirections; i++) { if ((i == 1 && pRuntimeLookup->indirectFirstOffset) || (i == 2 && pRuntimeLookup->indirectSecondOffset)) { indOffTree = impCloneExpr(slotPtrTree, &slotPtrTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("impRuntimeLookup indirectOffset")); } // The last indirection could be subject to a size check (dynamic dictionary expansion) bool isLastIndirectionWithSizeCheck = ((i == pRuntimeLookup->indirections - 1) && (pRuntimeLookup->sizeOffset != CORINFO_NO_SIZE_CHECK)); if (i != 0) { slotPtrTree = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree); slotPtrTree->gtFlags |= GTF_IND_NONFAULTING; if (!isLastIndirectionWithSizeCheck) { slotPtrTree->gtFlags |= GTF_IND_INVARIANT; } } if ((i == 1 && pRuntimeLookup->indirectFirstOffset) || (i == 2 && pRuntimeLookup->indirectSecondOffset)) { slotPtrTree = gtNewOperNode(GT_ADD, TYP_I_IMPL, indOffTree, slotPtrTree); } if (pRuntimeLookup->offsets[i] != 0) { if (isLastIndirectionWithSizeCheck) { lastIndOfTree = impCloneExpr(slotPtrTree, &slotPtrTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("impRuntimeLookup indirectOffset")); } slotPtrTree = gtNewOperNode(GT_ADD, TYP_I_IMPL, slotPtrTree, gtNewIconNode(pRuntimeLookup->offsets[i], TYP_I_IMPL)); } } // No null test required if (!pRuntimeLookup->testForNull) { if (pRuntimeLookup->indirections == 0) { return slotPtrTree; } slotPtrTree = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree); slotPtrTree->gtFlags |= GTF_IND_NONFAULTING; if (!pRuntimeLookup->testForFixup) { return slotPtrTree; } impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark0")); unsigned slotLclNum = lvaGrabTemp(true DEBUGARG("impRuntimeLookup test")); impAssignTempGen(slotLclNum, slotPtrTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr, impCurStmtDI); GenTree* slot = gtNewLclvNode(slotLclNum, TYP_I_IMPL); // downcast the pointer to a TYP_INT on 64-bit targets slot = impImplicitIorI4Cast(slot, TYP_INT); // Use a GT_AND to check for the lowest bit and indirect if it is set GenTree* test = gtNewOperNode(GT_AND, TYP_INT, slot, gtNewIconNode(1)); GenTree* relop = gtNewOperNode(GT_EQ, TYP_INT, test, gtNewIconNode(0)); // slot = GT_IND(slot - 1) slot = gtNewLclvNode(slotLclNum, TYP_I_IMPL); GenTree* add = gtNewOperNode(GT_ADD, TYP_I_IMPL, slot, gtNewIconNode(-1, TYP_I_IMPL)); GenTree* indir = gtNewOperNode(GT_IND, TYP_I_IMPL, add); indir->gtFlags |= GTF_IND_NONFAULTING; indir->gtFlags |= GTF_IND_INVARIANT; slot = gtNewLclvNode(slotLclNum, TYP_I_IMPL); GenTree* asg = gtNewAssignNode(slot, indir); GenTreeColon* colon = new (this, GT_COLON) GenTreeColon(TYP_VOID, gtNewNothingNode(), asg); GenTreeQmark* qmark = gtNewQmarkNode(TYP_VOID, relop, colon); impAppendTree(qmark, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); return gtNewLclvNode(slotLclNum, TYP_I_IMPL); } assert(pRuntimeLookup->indirections != 0); impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark1")); // Extract the handle GenTree* handleForNullCheck = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree); handleForNullCheck->gtFlags |= GTF_IND_NONFAULTING; // Call the helper // - Setup argNode with the pointer to the signature returned by the lookup GenTree* argNode = gtNewIconEmbHndNode(pRuntimeLookup->signature, nullptr, GTF_ICON_GLOBAL_PTR, compileTimeHandle); GenTreeCall::Use* helperArgs = gtNewCallArgs(ctxTree, argNode); GenTreeCall* helperCall = gtNewHelperCallNode(pRuntimeLookup->helper, TYP_I_IMPL, helperArgs); // Check for null and possibly call helper GenTree* nullCheck = gtNewOperNode(GT_NE, TYP_INT, handleForNullCheck, gtNewIconNode(0, TYP_I_IMPL)); GenTree* handleForResult = gtCloneExpr(handleForNullCheck); GenTree* result = nullptr; if (pRuntimeLookup->sizeOffset != CORINFO_NO_SIZE_CHECK) { // Dynamic dictionary expansion support assert((lastIndOfTree != nullptr) && (pRuntimeLookup->indirections > 0)); // sizeValue = dictionary[pRuntimeLookup->sizeOffset] GenTreeIntCon* sizeOffset = gtNewIconNode(pRuntimeLookup->sizeOffset, TYP_I_IMPL); GenTree* sizeValueOffset = gtNewOperNode(GT_ADD, TYP_I_IMPL, lastIndOfTree, sizeOffset); GenTree* sizeValue = gtNewOperNode(GT_IND, TYP_I_IMPL, sizeValueOffset); sizeValue->gtFlags |= GTF_IND_NONFAULTING; // sizeCheck fails if sizeValue < pRuntimeLookup->offsets[i] GenTree* offsetValue = gtNewIconNode(pRuntimeLookup->offsets[pRuntimeLookup->indirections - 1], TYP_I_IMPL); GenTree* sizeCheck = gtNewOperNode(GT_LE, TYP_INT, sizeValue, offsetValue); // revert null check condition. nullCheck->ChangeOperUnchecked(GT_EQ); // ((sizeCheck fails || nullCheck fails))) ? (helperCall : handle). // Add checks and the handle as call arguments, indirect call transformer will handle this. helperCall->gtCallArgs = gtPrependNewCallArg(handleForResult, helperCall->gtCallArgs); helperCall->gtCallArgs = gtPrependNewCallArg(sizeCheck, helperCall->gtCallArgs); helperCall->gtCallArgs = gtPrependNewCallArg(nullCheck, helperCall->gtCallArgs); result = helperCall; addExpRuntimeLookupCandidate(helperCall); } else { GenTreeColon* colonNullCheck = new (this, GT_COLON) GenTreeColon(TYP_I_IMPL, handleForResult, helperCall); result = gtNewQmarkNode(TYP_I_IMPL, nullCheck, colonNullCheck); } unsigned tmp = lvaGrabTemp(true DEBUGARG("spilling Runtime Lookup tree")); impAssignTempGen(tmp, result, (unsigned)CHECK_SPILL_NONE); return gtNewLclvNode(tmp, TYP_I_IMPL); } /****************************************************************************** * Spills the stack at verCurrentState.esStack[level] and replaces it with a temp. * If tnum!=BAD_VAR_NUM, the temp var used to replace the tree is tnum, * else, grab a new temp. * For structs (which can be pushed on the stack using obj, etc), * special handling is needed */ struct RecursiveGuard { public: RecursiveGuard() { m_pAddress = nullptr; } ~RecursiveGuard() { if (m_pAddress) { *m_pAddress = false; } } void Init(bool* pAddress, bool bInitialize) { assert(pAddress && *pAddress == false && "Recursive guard violation"); m_pAddress = pAddress; if (bInitialize) { *m_pAddress = true; } } protected: bool* m_pAddress; }; bool Compiler::impSpillStackEntry(unsigned level, unsigned tnum #ifdef DEBUG , bool bAssertOnRecursion, const char* reason #endif ) { #ifdef DEBUG RecursiveGuard guard; guard.Init(&impNestedStackSpill, bAssertOnRecursion); #endif GenTree* tree = verCurrentState.esStack[level].val; /* Allocate a temp if we haven't been asked to use a particular one */ if (tnum != BAD_VAR_NUM && (tnum >= lvaCount)) { return false; } bool isNewTemp = false; if (tnum == BAD_VAR_NUM) { tnum = lvaGrabTemp(true DEBUGARG(reason)); isNewTemp = true; } /* Assign the spilled entry to the temp */ impAssignTempGen(tnum, tree, verCurrentState.esStack[level].seTypeInfo.GetClassHandle(), level); // If temp is newly introduced and a ref type, grab what type info we can. if (isNewTemp && (lvaTable[tnum].lvType == TYP_REF)) { assert(lvaTable[tnum].lvSingleDef == 0); lvaTable[tnum].lvSingleDef = 1; JITDUMP("Marked V%02u as a single def temp\n", tnum); CORINFO_CLASS_HANDLE stkHnd = verCurrentState.esStack[level].seTypeInfo.GetClassHandle(); lvaSetClass(tnum, tree, stkHnd); // If we're assigning a GT_RET_EXPR, note the temp over on the call, // so the inliner can use it in case it needs a return spill temp. if (tree->OperGet() == GT_RET_EXPR) { JITDUMP("\n*** see V%02u = GT_RET_EXPR, noting temp\n", tnum); GenTree* call = tree->AsRetExpr()->gtInlineCandidate; InlineCandidateInfo* ici = call->AsCall()->gtInlineCandidateInfo; ici->preexistingSpillTemp = tnum; } } // The tree type may be modified by impAssignTempGen, so use the type of the lclVar. var_types type = genActualType(lvaTable[tnum].TypeGet()); GenTree* temp = gtNewLclvNode(tnum, type); verCurrentState.esStack[level].val = temp; return true; } /***************************************************************************** * * Ensure that the stack has only spilled values */ void Compiler::impSpillStackEnsure(bool spillLeaves) { assert(!spillLeaves || opts.compDbgCode); for (unsigned level = 0; level < verCurrentState.esStackDepth; level++) { GenTree* tree = verCurrentState.esStack[level].val; if (!spillLeaves && tree->OperIsLeaf()) { continue; } // Temps introduced by the importer itself don't need to be spilled bool isTempLcl = (tree->OperGet() == GT_LCL_VAR) && (tree->AsLclVarCommon()->GetLclNum() >= info.compLocalsCount); if (isTempLcl) { continue; } impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillStackEnsure")); } } void Compiler::impSpillEvalStack() { for (unsigned level = 0; level < verCurrentState.esStackDepth; level++) { impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillEvalStack")); } } /***************************************************************************** * * If the stack contains any trees with side effects in them, assign those * trees to temps and append the assignments to the statement list. * On return the stack is guaranteed to be empty. */ inline void Compiler::impEvalSideEffects() { impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("impEvalSideEffects")); verCurrentState.esStackDepth = 0; } /***************************************************************************** * * If the stack contains any trees with side effects in them, assign those * trees to temps and replace them on the stack with refs to their temps. * [0..chkLevel) is the portion of the stack which will be checked and spilled. */ inline void Compiler::impSpillSideEffects(bool spillGlobEffects, unsigned chkLevel DEBUGARG(const char* reason)) { assert(chkLevel != (unsigned)CHECK_SPILL_NONE); /* Before we make any appends to the tree list we must spill the * "special" side effects (GTF_ORDER_SIDEEFF on a GT_CATCH_ARG) */ impSpillSpecialSideEff(); if (chkLevel == (unsigned)CHECK_SPILL_ALL) { chkLevel = verCurrentState.esStackDepth; } assert(chkLevel <= verCurrentState.esStackDepth); GenTreeFlags spillFlags = spillGlobEffects ? GTF_GLOB_EFFECT : GTF_SIDE_EFFECT; for (unsigned i = 0; i < chkLevel; i++) { GenTree* tree = verCurrentState.esStack[i].val; if ((tree->gtFlags & spillFlags) != 0 || (spillGlobEffects && // Only consider the following when spillGlobEffects == true !impIsAddressInLocal(tree) && // No need to spill the GT_ADDR node on a local. gtHasLocalsWithAddrOp(tree))) // Spill if we still see GT_LCL_VAR that contains lvHasLdAddrOp or // lvAddrTaken flag. { impSpillStackEntry(i, BAD_VAR_NUM DEBUGARG(false) DEBUGARG(reason)); } } } /***************************************************************************** * * If the stack contains any trees with special side effects in them, assign * those trees to temps and replace them on the stack with refs to their temps. */ inline void Compiler::impSpillSpecialSideEff() { // Only exception objects need to be carefully handled if (!compCurBB->bbCatchTyp) { return; } for (unsigned level = 0; level < verCurrentState.esStackDepth; level++) { GenTree* tree = verCurrentState.esStack[level].val; // Make sure if we have an exception object in the sub tree we spill ourselves. if (gtHasCatchArg(tree)) { impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillSpecialSideEff")); } } } /***************************************************************************** * * Spill all stack references to value classes (TYP_STRUCT nodes) */ void Compiler::impSpillValueClasses() { for (unsigned level = 0; level < verCurrentState.esStackDepth; level++) { GenTree* tree = verCurrentState.esStack[level].val; if (fgWalkTreePre(&tree, impFindValueClasses) == WALK_ABORT) { // Tree walk was aborted, which means that we found a // value class on the stack. Need to spill that // stack entry. impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillValueClasses")); } } } /***************************************************************************** * * Callback that checks if a tree node is TYP_STRUCT */ Compiler::fgWalkResult Compiler::impFindValueClasses(GenTree** pTree, fgWalkData* data) { fgWalkResult walkResult = WALK_CONTINUE; if ((*pTree)->gtType == TYP_STRUCT) { // Abort the walk and indicate that we found a value class walkResult = WALK_ABORT; } return walkResult; } /***************************************************************************** * * If the stack contains any trees with references to local #lclNum, assign * those trees to temps and replace their place on the stack with refs to * their temps. */ void Compiler::impSpillLclRefs(ssize_t lclNum) { /* Before we make any appends to the tree list we must spill the * "special" side effects (GTF_ORDER_SIDEEFF) - GT_CATCH_ARG */ impSpillSpecialSideEff(); for (unsigned level = 0; level < verCurrentState.esStackDepth; level++) { GenTree* tree = verCurrentState.esStack[level].val; /* If the tree may throw an exception, and the block has a handler, then we need to spill assignments to the local if the local is live on entry to the handler. Just spill 'em all without considering the liveness */ bool xcptnCaught = ehBlockHasExnFlowDsc(compCurBB) && (tree->gtFlags & (GTF_CALL | GTF_EXCEPT)); /* Skip the tree if it doesn't have an affected reference, unless xcptnCaught */ if (xcptnCaught || gtHasRef(tree, lclNum)) { impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillLclRefs")); } } } /***************************************************************************** * * Push catch arg onto the stack. * If there are jumps to the beginning of the handler, insert basic block * and spill catch arg to a temp. Update the handler block if necessary. * * Returns the basic block of the actual handler. */ BasicBlock* Compiler::impPushCatchArgOnStack(BasicBlock* hndBlk, CORINFO_CLASS_HANDLE clsHnd, bool isSingleBlockFilter) { // Do not inject the basic block twice on reimport. This should be // hit only under JIT stress. See if the block is the one we injected. // Note that EH canonicalization can inject internal blocks here. We might // be able to re-use such a block (but we don't, right now). if ((hndBlk->bbFlags & (BBF_IMPORTED | BBF_INTERNAL | BBF_DONT_REMOVE)) == (BBF_IMPORTED | BBF_INTERNAL | BBF_DONT_REMOVE)) { Statement* stmt = hndBlk->firstStmt(); if (stmt != nullptr) { GenTree* tree = stmt->GetRootNode(); assert(tree != nullptr); if ((tree->gtOper == GT_ASG) && (tree->AsOp()->gtOp1->gtOper == GT_LCL_VAR) && (tree->AsOp()->gtOp2->gtOper == GT_CATCH_ARG)) { tree = gtNewLclvNode(tree->AsOp()->gtOp1->AsLclVarCommon()->GetLclNum(), TYP_REF); impPushOnStack(tree, typeInfo(TI_REF, clsHnd)); return hndBlk->bbNext; } } // If we get here, it must have been some other kind of internal block. It's possible that // someone prepended something to our injected block, but that's unlikely. } /* Push the exception address value on the stack */ GenTree* arg = new (this, GT_CATCH_ARG) GenTree(GT_CATCH_ARG, TYP_REF); /* Mark the node as having a side-effect - i.e. cannot be * moved around since it is tied to a fixed location (EAX) */ arg->gtFlags |= GTF_ORDER_SIDEEFF; #if defined(JIT32_GCENCODER) const bool forceInsertNewBlock = isSingleBlockFilter || compStressCompile(STRESS_CATCH_ARG, 5); #else const bool forceInsertNewBlock = compStressCompile(STRESS_CATCH_ARG, 5); #endif // defined(JIT32_GCENCODER) /* Spill GT_CATCH_ARG to a temp if there are jumps to the beginning of the handler */ if (hndBlk->bbRefs > 1 || forceInsertNewBlock) { if (hndBlk->bbRefs == 1) { hndBlk->bbRefs++; } /* Create extra basic block for the spill */ BasicBlock* newBlk = fgNewBBbefore(BBJ_NONE, hndBlk, /* extendRegion */ true); newBlk->bbFlags |= BBF_IMPORTED | BBF_DONT_REMOVE; newBlk->inheritWeight(hndBlk); newBlk->bbCodeOffs = hndBlk->bbCodeOffs; /* Account for the new link we are about to create */ hndBlk->bbRefs++; // Spill into a temp. unsigned tempNum = lvaGrabTemp(false DEBUGARG("SpillCatchArg")); lvaTable[tempNum].lvType = TYP_REF; GenTree* argAsg = gtNewTempAssign(tempNum, arg); arg = gtNewLclvNode(tempNum, TYP_REF); hndBlk->bbStkTempsIn = tempNum; Statement* argStmt; if (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES) { // Report the debug info. impImportBlockCode won't treat the actual handler as exception block and thus // won't do it for us. // TODO-DEBUGINFO: Previous code always set stack as non-empty // here. Can we not just use impCurStmtOffsSet? Are we out of sync // here with the stack? impCurStmtDI = DebugInfo(compInlineContext, ILLocation(newBlk->bbCodeOffs, false, false)); argStmt = gtNewStmt(argAsg, impCurStmtDI); } else { argStmt = gtNewStmt(argAsg); } fgInsertStmtAtEnd(newBlk, argStmt); } impPushOnStack(arg, typeInfo(TI_REF, clsHnd)); return hndBlk; } /***************************************************************************** * * Given a tree, clone it. *pClone is set to the cloned tree. * Returns the original tree if the cloning was easy, * else returns the temp to which the tree had to be spilled to. * If the tree has side-effects, it will be spilled to a temp. */ GenTree* Compiler::impCloneExpr(GenTree* tree, GenTree** pClone, CORINFO_CLASS_HANDLE structHnd, unsigned curLevel, Statement** pAfterStmt DEBUGARG(const char* reason)) { if (!(tree->gtFlags & GTF_GLOB_EFFECT)) { GenTree* clone = gtClone(tree, true); if (clone) { *pClone = clone; return tree; } } /* Store the operand in a temp and return the temp */ unsigned temp = lvaGrabTemp(true DEBUGARG(reason)); // impAssignTempGen() may change tree->gtType to TYP_VOID for calls which // return a struct type. It also may modify the struct type to a more // specialized type (e.g. a SIMD type). So we will get the type from // the lclVar AFTER calling impAssignTempGen(). impAssignTempGen(temp, tree, structHnd, curLevel, pAfterStmt, impCurStmtDI); var_types type = genActualType(lvaTable[temp].TypeGet()); *pClone = gtNewLclvNode(temp, type); return gtNewLclvNode(temp, type); } //------------------------------------------------------------------------ // impCreateDIWithCurrentStackInfo: Create a DebugInfo instance with the // specified IL offset and 'is call' bit, using the current stack to determine // whether to set the 'stack empty' bit. // // Arguments: // offs - the IL offset for the DebugInfo // isCall - whether the created DebugInfo should have the IsCall bit set // // Return Value: // The DebugInfo instance. // DebugInfo Compiler::impCreateDIWithCurrentStackInfo(IL_OFFSET offs, bool isCall) { assert(offs != BAD_IL_OFFSET); bool isStackEmpty = verCurrentState.esStackDepth <= 0; return DebugInfo(compInlineContext, ILLocation(offs, isStackEmpty, isCall)); } //------------------------------------------------------------------------ // impCurStmtOffsSet: Set the "current debug info" to attach to statements that // we are generating next. // // Arguments: // offs - the IL offset // // Remarks: // This function will be called in the main IL processing loop when it is // determined that we have reached a location in the IL stream for which we // want to report debug information. This is the main way we determine which // statements to report debug info for to the EE: for other statements, they // will have no debug information attached. // inline void Compiler::impCurStmtOffsSet(IL_OFFSET offs) { if (offs == BAD_IL_OFFSET) { impCurStmtDI = DebugInfo(compInlineContext, ILLocation()); } else { impCurStmtDI = impCreateDIWithCurrentStackInfo(offs, false); } } //------------------------------------------------------------------------ // impCanSpillNow: check is it possible to spill all values from eeStack to local variables. // // Arguments: // prevOpcode - last importer opcode // // Return Value: // true if it is legal, false if it could be a sequence that we do not want to divide. bool Compiler::impCanSpillNow(OPCODE prevOpcode) { // Don't spill after ldtoken, newarr and newobj, because it could be a part of the InitializeArray sequence. // Avoid breaking up to guarantee that impInitializeArrayIntrinsic can succeed. return (prevOpcode != CEE_LDTOKEN) && (prevOpcode != CEE_NEWARR) && (prevOpcode != CEE_NEWOBJ); } /***************************************************************************** * * Remember the instr offset for the statements * * When we do impAppendTree(tree), we can't set stmt->SetLastILOffset(impCurOpcOffs), * if the append was done because of a partial stack spill, * as some of the trees corresponding to code up to impCurOpcOffs might * still be sitting on the stack. * So we delay calling of SetLastILOffset() until impNoteLastILoffs(). * This should be called when an opcode finally/explicitly causes * impAppendTree(tree) to be called (as opposed to being called because of * a spill caused by the opcode) */ #ifdef DEBUG void Compiler::impNoteLastILoffs() { if (impLastILoffsStmt == nullptr) { // We should have added a statement for the current basic block // Is this assert correct ? assert(impLastStmt); impLastStmt->SetLastILOffset(compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs); } else { impLastILoffsStmt->SetLastILOffset(compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs); impLastILoffsStmt = nullptr; } } #endif // DEBUG /***************************************************************************** * We don't create any GenTree (excluding spills) for a branch. * For debugging info, we need a placeholder so that we can note * the IL offset in gtStmt.gtStmtOffs. So append an empty statement. */ void Compiler::impNoteBranchOffs() { if (opts.compDbgCode) { impAppendTree(gtNewNothingNode(), (unsigned)CHECK_SPILL_NONE, impCurStmtDI); } } /***************************************************************************** * Locate the next stmt boundary for which we need to record info. * We will have to spill the stack at such boundaries if it is not * already empty. * Returns the next stmt boundary (after the start of the block) */ unsigned Compiler::impInitBlockLineInfo() { /* Assume the block does not correspond with any IL offset. This prevents us from reporting extra offsets. Extra mappings can cause confusing stepping, especially if the extra mapping is a jump-target, and the debugger does not ignore extra mappings, but instead rewinds to the nearest known offset */ impCurStmtOffsSet(BAD_IL_OFFSET); IL_OFFSET blockOffs = compCurBB->bbCodeOffs; if ((verCurrentState.esStackDepth == 0) && (info.compStmtOffsetsImplicit & ICorDebugInfo::STACK_EMPTY_BOUNDARIES)) { impCurStmtOffsSet(blockOffs); } /* Always report IL offset 0 or some tests get confused. Probably a good idea anyways */ if (blockOffs == 0) { impCurStmtOffsSet(blockOffs); } if (!info.compStmtOffsetsCount) { return ~0; } /* Find the lowest explicit stmt boundary within the block */ /* Start looking at an entry that is based on our instr offset */ unsigned index = (info.compStmtOffsetsCount * blockOffs) / info.compILCodeSize; if (index >= info.compStmtOffsetsCount) { index = info.compStmtOffsetsCount - 1; } /* If we've guessed too far, back up */ while (index > 0 && info.compStmtOffsets[index - 1] >= blockOffs) { index--; } /* If we guessed short, advance ahead */ while (info.compStmtOffsets[index] < blockOffs) { index++; if (index == info.compStmtOffsetsCount) { return info.compStmtOffsetsCount; } } assert(index < info.compStmtOffsetsCount); if (info.compStmtOffsets[index] == blockOffs) { /* There is an explicit boundary for the start of this basic block. So we will start with bbCodeOffs. Else we will wait until we get to the next explicit boundary */ impCurStmtOffsSet(blockOffs); index++; } return index; } /*****************************************************************************/ bool Compiler::impOpcodeIsCallOpcode(OPCODE opcode) { switch (opcode) { case CEE_CALL: case CEE_CALLI: case CEE_CALLVIRT: return true; default: return false; } } /*****************************************************************************/ static inline bool impOpcodeIsCallSiteBoundary(OPCODE opcode) { switch (opcode) { case CEE_CALL: case CEE_CALLI: case CEE_CALLVIRT: case CEE_JMP: case CEE_NEWOBJ: case CEE_NEWARR: return true; default: return false; } } /*****************************************************************************/ // One might think it is worth caching these values, but results indicate // that it isn't. // In addition, caching them causes SuperPMI to be unable to completely // encapsulate an individual method context. CORINFO_CLASS_HANDLE Compiler::impGetRefAnyClass() { CORINFO_CLASS_HANDLE refAnyClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPED_BYREF); assert(refAnyClass != (CORINFO_CLASS_HANDLE) nullptr); return refAnyClass; } CORINFO_CLASS_HANDLE Compiler::impGetTypeHandleClass() { CORINFO_CLASS_HANDLE typeHandleClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPE_HANDLE); assert(typeHandleClass != (CORINFO_CLASS_HANDLE) nullptr); return typeHandleClass; } CORINFO_CLASS_HANDLE Compiler::impGetRuntimeArgumentHandle() { CORINFO_CLASS_HANDLE argIteratorClass = info.compCompHnd->getBuiltinClass(CLASSID_ARGUMENT_HANDLE); assert(argIteratorClass != (CORINFO_CLASS_HANDLE) nullptr); return argIteratorClass; } CORINFO_CLASS_HANDLE Compiler::impGetStringClass() { CORINFO_CLASS_HANDLE stringClass = info.compCompHnd->getBuiltinClass(CLASSID_STRING); assert(stringClass != (CORINFO_CLASS_HANDLE) nullptr); return stringClass; } CORINFO_CLASS_HANDLE Compiler::impGetObjectClass() { CORINFO_CLASS_HANDLE objectClass = info.compCompHnd->getBuiltinClass(CLASSID_SYSTEM_OBJECT); assert(objectClass != (CORINFO_CLASS_HANDLE) nullptr); return objectClass; } /***************************************************************************** * "&var" can be used either as TYP_BYREF or TYP_I_IMPL, but we * set its type to TYP_BYREF when we create it. We know if it can be * changed to TYP_I_IMPL only at the point where we use it */ /* static */ void Compiler::impBashVarAddrsToI(GenTree* tree1, GenTree* tree2) { if (tree1->IsLocalAddrExpr() != nullptr) { tree1->gtType = TYP_I_IMPL; } if (tree2 && (tree2->IsLocalAddrExpr() != nullptr)) { tree2->gtType = TYP_I_IMPL; } } /***************************************************************************** * TYP_INT and TYP_I_IMPL can be used almost interchangeably, but we want * to make that an explicit cast in our trees, so any implicit casts that * exist in the IL (at least on 64-bit where TYP_I_IMPL != TYP_INT) are * turned into explicit casts here. * We also allow an implicit conversion of a ldnull into a TYP_I_IMPL(0) */ GenTree* Compiler::impImplicitIorI4Cast(GenTree* tree, var_types dstTyp) { var_types currType = genActualType(tree->gtType); var_types wantedType = genActualType(dstTyp); if (wantedType != currType) { // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL if ((tree->OperGet() == GT_CNS_INT) && varTypeIsI(dstTyp)) { if (!varTypeIsI(tree->gtType) || ((tree->gtType == TYP_REF) && (tree->AsIntCon()->gtIconVal == 0))) { tree->gtType = TYP_I_IMPL; } } #ifdef TARGET_64BIT else if (varTypeIsI(wantedType) && (currType == TYP_INT)) { // Note that this allows TYP_INT to be cast to a TYP_I_IMPL when wantedType is a TYP_BYREF or TYP_REF tree = gtNewCastNode(TYP_I_IMPL, tree, false, TYP_I_IMPL); } else if ((wantedType == TYP_INT) && varTypeIsI(currType)) { // Note that this allows TYP_BYREF or TYP_REF to be cast to a TYP_INT tree = gtNewCastNode(TYP_INT, tree, false, TYP_INT); } #endif // TARGET_64BIT } return tree; } /***************************************************************************** * TYP_FLOAT and TYP_DOUBLE can be used almost interchangeably in some cases, * but we want to make that an explicit cast in our trees, so any implicit casts * that exist in the IL are turned into explicit casts here. */ GenTree* Compiler::impImplicitR4orR8Cast(GenTree* tree, var_types dstTyp) { if (varTypeIsFloating(tree) && varTypeIsFloating(dstTyp) && (dstTyp != tree->gtType)) { tree = gtNewCastNode(dstTyp, tree, false, dstTyp); } return tree; } //------------------------------------------------------------------------ // impInitializeArrayIntrinsic: Attempts to replace a call to InitializeArray // with a GT_COPYBLK node. // // Arguments: // sig - The InitializeArray signature. // // Return Value: // A pointer to the newly created GT_COPYBLK node if the replacement succeeds or // nullptr otherwise. // // Notes: // The function recognizes the following IL pattern: // ldc <length> or a list of ldc <lower bound>/<length> // newarr or newobj // dup // ldtoken <field handle> // call InitializeArray // The lower bounds need not be constant except when the array rank is 1. // The function recognizes all kinds of arrays thus enabling a small runtime // such as CoreRT to skip providing an implementation for InitializeArray. GenTree* Compiler::impInitializeArrayIntrinsic(CORINFO_SIG_INFO* sig) { assert(sig->numArgs == 2); GenTree* fieldTokenNode = impStackTop(0).val; GenTree* arrayLocalNode = impStackTop(1).val; // // Verify that the field token is known and valid. Note that It's also // possible for the token to come from reflection, in which case we cannot do // the optimization and must therefore revert to calling the helper. You can // see an example of this in bvt\DynIL\initarray2.exe (in Main). // // Check to see if the ldtoken helper call is what we see here. if (fieldTokenNode->gtOper != GT_CALL || (fieldTokenNode->AsCall()->gtCallType != CT_HELPER) || (fieldTokenNode->AsCall()->gtCallMethHnd != eeFindHelper(CORINFO_HELP_FIELDDESC_TO_STUBRUNTIMEFIELD))) { return nullptr; } // Strip helper call away fieldTokenNode = fieldTokenNode->AsCall()->gtCallArgs->GetNode(); if (fieldTokenNode->gtOper == GT_IND) { fieldTokenNode = fieldTokenNode->AsOp()->gtOp1; } // Check for constant if (fieldTokenNode->gtOper != GT_CNS_INT) { return nullptr; } CORINFO_FIELD_HANDLE fieldToken = (CORINFO_FIELD_HANDLE)fieldTokenNode->AsIntCon()->gtCompileTimeHandle; if (!fieldTokenNode->IsIconHandle(GTF_ICON_FIELD_HDL) || (fieldToken == nullptr)) { return nullptr; } // // We need to get the number of elements in the array and the size of each element. // We verify that the newarr statement is exactly what we expect it to be. // If it's not then we just return NULL and we don't optimize this call // // It is possible the we don't have any statements in the block yet. if (impLastStmt == nullptr) { return nullptr; } // // We start by looking at the last statement, making sure it's an assignment, and // that the target of the assignment is the array passed to InitializeArray. // GenTree* arrayAssignment = impLastStmt->GetRootNode(); if ((arrayAssignment->gtOper != GT_ASG) || (arrayAssignment->AsOp()->gtOp1->gtOper != GT_LCL_VAR) || (arrayLocalNode->gtOper != GT_LCL_VAR) || (arrayAssignment->AsOp()->gtOp1->AsLclVarCommon()->GetLclNum() != arrayLocalNode->AsLclVarCommon()->GetLclNum())) { return nullptr; } // // Make sure that the object being assigned is a helper call. // GenTree* newArrayCall = arrayAssignment->AsOp()->gtOp2; if ((newArrayCall->gtOper != GT_CALL) || (newArrayCall->AsCall()->gtCallType != CT_HELPER)) { return nullptr; } // // Verify that it is one of the new array helpers. // bool isMDArray = false; if (newArrayCall->AsCall()->gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_DIRECT) && newArrayCall->AsCall()->gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_OBJ) && newArrayCall->AsCall()->gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_VC) && newArrayCall->AsCall()->gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_ALIGN8) #ifdef FEATURE_READYTORUN && newArrayCall->AsCall()->gtCallMethHnd != eeFindHelper(CORINFO_HELP_READYTORUN_NEWARR_1) #endif ) { if (newArrayCall->AsCall()->gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEW_MDARR)) { return nullptr; } isMDArray = true; } CORINFO_CLASS_HANDLE arrayClsHnd = (CORINFO_CLASS_HANDLE)newArrayCall->AsCall()->compileTimeHelperArgumentHandle; // // Make sure we found a compile time handle to the array // if (!arrayClsHnd) { return nullptr; } unsigned rank = 0; S_UINT32 numElements; if (isMDArray) { rank = info.compCompHnd->getArrayRank(arrayClsHnd); if (rank == 0) { return nullptr; } GenTreeCall::Use* tokenArg = newArrayCall->AsCall()->gtCallArgs; assert(tokenArg != nullptr); GenTreeCall::Use* numArgsArg = tokenArg->GetNext(); assert(numArgsArg != nullptr); GenTreeCall::Use* argsArg = numArgsArg->GetNext(); assert(argsArg != nullptr); // // The number of arguments should be a constant between 1 and 64. The rank can't be 0 // so at least one length must be present and the rank can't exceed 32 so there can // be at most 64 arguments - 32 lengths and 32 lower bounds. // if ((!numArgsArg->GetNode()->IsCnsIntOrI()) || (numArgsArg->GetNode()->AsIntCon()->IconValue() < 1) || (numArgsArg->GetNode()->AsIntCon()->IconValue() > 64)) { return nullptr; } unsigned numArgs = static_cast<unsigned>(numArgsArg->GetNode()->AsIntCon()->IconValue()); bool lowerBoundsSpecified; if (numArgs == rank * 2) { lowerBoundsSpecified = true; } else if (numArgs == rank) { lowerBoundsSpecified = false; // // If the rank is 1 and a lower bound isn't specified then the runtime creates // a SDArray. Note that even if a lower bound is specified it can be 0 and then // we get a SDArray as well, see the for loop below. // if (rank == 1) { isMDArray = false; } } else { return nullptr; } // // The rank is known to be at least 1 so we can start with numElements being 1 // to avoid the need to special case the first dimension. // numElements = S_UINT32(1); struct Match { static bool IsArgsFieldInit(GenTree* tree, unsigned index, unsigned lvaNewObjArrayArgs) { return (tree->OperGet() == GT_ASG) && IsArgsFieldIndir(tree->gtGetOp1(), index, lvaNewObjArrayArgs) && IsArgsAddr(tree->gtGetOp1()->gtGetOp1()->gtGetOp1(), lvaNewObjArrayArgs); } static bool IsArgsFieldIndir(GenTree* tree, unsigned index, unsigned lvaNewObjArrayArgs) { return (tree->OperGet() == GT_IND) && (tree->gtGetOp1()->OperGet() == GT_ADD) && (tree->gtGetOp1()->gtGetOp2()->IsIntegralConst(sizeof(INT32) * index)) && IsArgsAddr(tree->gtGetOp1()->gtGetOp1(), lvaNewObjArrayArgs); } static bool IsArgsAddr(GenTree* tree, unsigned lvaNewObjArrayArgs) { return (tree->OperGet() == GT_ADDR) && (tree->gtGetOp1()->OperGet() == GT_LCL_VAR) && (tree->gtGetOp1()->AsLclVar()->GetLclNum() == lvaNewObjArrayArgs); } static bool IsComma(GenTree* tree) { return (tree != nullptr) && (tree->OperGet() == GT_COMMA); } }; unsigned argIndex = 0; GenTree* comma; for (comma = argsArg->GetNode(); Match::IsComma(comma); comma = comma->gtGetOp2()) { if (lowerBoundsSpecified) { // // In general lower bounds can be ignored because they're not needed to // calculate the total number of elements. But for single dimensional arrays // we need to know if the lower bound is 0 because in this case the runtime // creates a SDArray and this affects the way the array data offset is calculated. // if (rank == 1) { GenTree* lowerBoundAssign = comma->gtGetOp1(); assert(Match::IsArgsFieldInit(lowerBoundAssign, argIndex, lvaNewObjArrayArgs)); GenTree* lowerBoundNode = lowerBoundAssign->gtGetOp2(); if (lowerBoundNode->IsIntegralConst(0)) { isMDArray = false; } } comma = comma->gtGetOp2(); argIndex++; } GenTree* lengthNodeAssign = comma->gtGetOp1(); assert(Match::IsArgsFieldInit(lengthNodeAssign, argIndex, lvaNewObjArrayArgs)); GenTree* lengthNode = lengthNodeAssign->gtGetOp2(); if (!lengthNode->IsCnsIntOrI()) { return nullptr; } numElements *= S_SIZE_T(lengthNode->AsIntCon()->IconValue()); argIndex++; } assert((comma != nullptr) && Match::IsArgsAddr(comma, lvaNewObjArrayArgs)); if (argIndex != numArgs) { return nullptr; } } else { // // Make sure there are exactly two arguments: the array class and // the number of elements. // GenTree* arrayLengthNode; GenTreeCall::Use* args = newArrayCall->AsCall()->gtCallArgs; #ifdef FEATURE_READYTORUN if (newArrayCall->AsCall()->gtCallMethHnd == eeFindHelper(CORINFO_HELP_READYTORUN_NEWARR_1)) { // Array length is 1st argument for readytorun helper arrayLengthNode = args->GetNode(); } else #endif { // Array length is 2nd argument for regular helper arrayLengthNode = args->GetNext()->GetNode(); } // // This optimization is only valid for a constant array size. // if (arrayLengthNode->gtOper != GT_CNS_INT) { return nullptr; } numElements = S_SIZE_T(arrayLengthNode->AsIntCon()->gtIconVal); if (!info.compCompHnd->isSDArray(arrayClsHnd)) { return nullptr; } } CORINFO_CLASS_HANDLE elemClsHnd; var_types elementType = JITtype2varType(info.compCompHnd->getChildType(arrayClsHnd, &elemClsHnd)); // // Note that genTypeSize will return zero for non primitive types, which is exactly // what we want (size will then be 0, and we will catch this in the conditional below). // Note that we don't expect this to fail for valid binaries, so we assert in the // non-verification case (the verification case should not assert but rather correctly // handle bad binaries). This assert is not guarding any specific invariant, but rather // saying that we don't expect this to happen, and if it is hit, we need to investigate // why. // S_UINT32 elemSize(genTypeSize(elementType)); S_UINT32 size = elemSize * S_UINT32(numElements); if (size.IsOverflow()) { return nullptr; } if ((size.Value() == 0) || (varTypeIsGC(elementType))) { return nullptr; } void* initData = info.compCompHnd->getArrayInitializationData(fieldToken, size.Value()); if (!initData) { return nullptr; } // // At this point we are ready to commit to implementing the InitializeArray // intrinsic using a struct assignment. Pop the arguments from the stack and // return the struct assignment node. // impPopStack(); impPopStack(); const unsigned blkSize = size.Value(); unsigned dataOffset; if (isMDArray) { dataOffset = eeGetMDArrayDataOffset(rank); } else { dataOffset = eeGetArrayDataOffset(); } GenTree* dstAddr = gtNewOperNode(GT_ADD, TYP_BYREF, arrayLocalNode, gtNewIconNode(dataOffset, TYP_I_IMPL)); GenTree* dst = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, dstAddr, typGetBlkLayout(blkSize)); GenTree* src = gtNewIndOfIconHandleNode(TYP_STRUCT, (size_t)initData, GTF_ICON_CONST_PTR, true); #ifdef DEBUG src->gtGetOp1()->AsIntCon()->gtTargetHandle = THT_IntializeArrayIntrinsics; #endif return gtNewBlkOpNode(dst, // dst src, // src false, // volatile true); // copyBlock } GenTree* Compiler::impCreateSpanIntrinsic(CORINFO_SIG_INFO* sig) { assert(sig->numArgs == 1); assert(sig->sigInst.methInstCount == 1); GenTree* fieldTokenNode = impStackTop(0).val; // // Verify that the field token is known and valid. Note that it's also // possible for the token to come from reflection, in which case we cannot do // the optimization and must therefore revert to calling the helper. You can // see an example of this in bvt\DynIL\initarray2.exe (in Main). // // Check to see if the ldtoken helper call is what we see here. if (fieldTokenNode->gtOper != GT_CALL || (fieldTokenNode->AsCall()->gtCallType != CT_HELPER) || (fieldTokenNode->AsCall()->gtCallMethHnd != eeFindHelper(CORINFO_HELP_FIELDDESC_TO_STUBRUNTIMEFIELD))) { return nullptr; } // Strip helper call away fieldTokenNode = fieldTokenNode->AsCall()->gtCallArgs->GetNode(); if (fieldTokenNode->gtOper == GT_IND) { fieldTokenNode = fieldTokenNode->AsOp()->gtOp1; } // Check for constant if (fieldTokenNode->gtOper != GT_CNS_INT) { return nullptr; } CORINFO_FIELD_HANDLE fieldToken = (CORINFO_FIELD_HANDLE)fieldTokenNode->AsIntCon()->gtCompileTimeHandle; if (!fieldTokenNode->IsIconHandle(GTF_ICON_FIELD_HDL) || (fieldToken == nullptr)) { return nullptr; } CORINFO_CLASS_HANDLE fieldOwnerHnd = info.compCompHnd->getFieldClass(fieldToken); CORINFO_CLASS_HANDLE fieldClsHnd; var_types fieldElementType = JITtype2varType(info.compCompHnd->getFieldType(fieldToken, &fieldClsHnd, fieldOwnerHnd)); unsigned totalFieldSize; // Most static initialization data fields are of some structure, but it is possible for them to be of various // primitive types as well if (fieldElementType == var_types::TYP_STRUCT) { totalFieldSize = info.compCompHnd->getClassSize(fieldClsHnd); } else { totalFieldSize = genTypeSize(fieldElementType); } // Limit to primitive or enum type - see ArrayNative::GetSpanDataFrom() CORINFO_CLASS_HANDLE targetElemHnd = sig->sigInst.methInst[0]; if (info.compCompHnd->getTypeForPrimitiveValueClass(targetElemHnd) == CORINFO_TYPE_UNDEF) { return nullptr; } const unsigned targetElemSize = info.compCompHnd->getClassSize(targetElemHnd); assert(targetElemSize != 0); const unsigned count = totalFieldSize / targetElemSize; if (count == 0) { return nullptr; } void* data = info.compCompHnd->getArrayInitializationData(fieldToken, totalFieldSize); if (!data) { return nullptr; } // // Ready to commit to the work // impPopStack(); // Turn count and pointer value into constants. GenTree* lengthValue = gtNewIconNode(count, TYP_INT); GenTree* pointerValue = gtNewIconHandleNode((size_t)data, GTF_ICON_CONST_PTR); // Construct ReadOnlySpan<T> to return. CORINFO_CLASS_HANDLE spanHnd = sig->retTypeClass; unsigned spanTempNum = lvaGrabTemp(true DEBUGARG("ReadOnlySpan<T> for CreateSpan<T>")); lvaSetStruct(spanTempNum, spanHnd, false); CORINFO_FIELD_HANDLE pointerFieldHnd = info.compCompHnd->getFieldInClass(spanHnd, 0); CORINFO_FIELD_HANDLE lengthFieldHnd = info.compCompHnd->getFieldInClass(spanHnd, 1); GenTreeLclFld* pointerField = gtNewLclFldNode(spanTempNum, TYP_BYREF, 0); pointerField->SetFieldSeq(GetFieldSeqStore()->CreateSingleton(pointerFieldHnd)); GenTree* pointerFieldAsg = gtNewAssignNode(pointerField, pointerValue); GenTreeLclFld* lengthField = gtNewLclFldNode(spanTempNum, TYP_INT, TARGET_POINTER_SIZE); lengthField->SetFieldSeq(GetFieldSeqStore()->CreateSingleton(lengthFieldHnd)); GenTree* lengthFieldAsg = gtNewAssignNode(lengthField, lengthValue); // Now append a few statements the initialize the span impAppendTree(lengthFieldAsg, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); impAppendTree(pointerFieldAsg, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); // And finally create a tree that points at the span. return impCreateLocalNode(spanTempNum DEBUGARG(0)); } //------------------------------------------------------------------------ // impIntrinsic: possibly expand intrinsic call into alternate IR sequence // // Arguments: // newobjThis - for constructor calls, the tree for the newly allocated object // clsHnd - handle for the intrinsic method's class // method - handle for the intrinsic method // sig - signature of the intrinsic method // methodFlags - CORINFO_FLG_XXX flags of the intrinsic method // memberRef - the token for the intrinsic method // readonlyCall - true if call has a readonly prefix // tailCall - true if call is in tail position // pConstrainedResolvedToken -- resolved token for constrained call, or nullptr // if call is not constrained // constraintCallThisTransform -- this transform to apply for a constrained call // pIntrinsicName [OUT] -- intrinsic name (see enumeration in namedintrinsiclist.h) // for "traditional" jit intrinsics // isSpecialIntrinsic [OUT] -- set true if intrinsic expansion is a call // that is amenable to special downstream optimization opportunities // // Returns: // IR tree to use in place of the call, or nullptr if the jit should treat // the intrinsic call like a normal call. // // pIntrinsicName set to non-illegal value if the call is recognized as a // traditional jit intrinsic, even if the intrinsic is not expaned. // // isSpecial set true if the expansion is subject to special // optimizations later in the jit processing // // Notes: // On success the IR tree may be a call to a different method or an inline // sequence. If it is a call, then the intrinsic processing here is responsible // for handling all the special cases, as upon return to impImportCall // expanded intrinsics bypass most of the normal call processing. // // Intrinsics are generally not recognized in minopts and debug codegen. // // However, certain traditional intrinsics are identifed as "must expand" // if there is no fallback implmentation to invoke; these must be handled // in all codegen modes. // // New style intrinsics (where the fallback implementation is in IL) are // identified as "must expand" if they are invoked from within their // own method bodies. // GenTree* Compiler::impIntrinsic(GenTree* newobjThis, CORINFO_CLASS_HANDLE clsHnd, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, unsigned methodFlags, int memberRef, bool readonlyCall, bool tailCall, CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, CORINFO_THIS_TRANSFORM constraintCallThisTransform, NamedIntrinsic* pIntrinsicName, bool* isSpecialIntrinsic) { assert((methodFlags & CORINFO_FLG_INTRINSIC) != 0); bool mustExpand = false; bool isSpecial = false; NamedIntrinsic ni = NI_Illegal; if ((methodFlags & CORINFO_FLG_INTRINSIC) != 0) { // The recursive non-virtual calls to Jit intrinsics are must-expand by convention. mustExpand = mustExpand || (gtIsRecursiveCall(method) && !(methodFlags & CORINFO_FLG_VIRTUAL)); ni = lookupNamedIntrinsic(method); // We specially support the following on all platforms to allow for dead // code optimization and to more generally support recursive intrinsics. if (ni == NI_IsSupported_True) { assert(sig->numArgs == 0); return gtNewIconNode(true); } if (ni == NI_IsSupported_False) { assert(sig->numArgs == 0); return gtNewIconNode(false); } if (ni == NI_Throw_PlatformNotSupportedException) { return impUnsupportedNamedIntrinsic(CORINFO_HELP_THROW_PLATFORM_NOT_SUPPORTED, method, sig, mustExpand); } #ifdef FEATURE_HW_INTRINSICS if ((ni > NI_HW_INTRINSIC_START) && (ni < NI_HW_INTRINSIC_END)) { GenTree* hwintrinsic = impHWIntrinsic(ni, clsHnd, method, sig, mustExpand); if (mustExpand && (hwintrinsic == nullptr)) { return impUnsupportedNamedIntrinsic(CORINFO_HELP_THROW_NOT_IMPLEMENTED, method, sig, mustExpand); } return hwintrinsic; } if ((ni > NI_SIMD_AS_HWINTRINSIC_START) && (ni < NI_SIMD_AS_HWINTRINSIC_END)) { // These intrinsics aren't defined recursively and so they will never be mustExpand // Instead, they provide software fallbacks that will be executed instead. assert(!mustExpand); return impSimdAsHWIntrinsic(ni, clsHnd, method, sig, newobjThis); } #endif // FEATURE_HW_INTRINSICS } *pIntrinsicName = ni; if (ni == NI_System_StubHelpers_GetStubContext) { // must be done regardless of DbgCode and MinOpts return gtNewLclvNode(lvaStubArgumentVar, TYP_I_IMPL); } if (ni == NI_System_StubHelpers_NextCallReturnAddress) { // For now we just avoid inlining anything into these methods since // this intrinsic is only rarely used. We could do this better if we // wanted to by trying to match which call is the one we need to get // the return address of. info.compHasNextCallRetAddr = true; return new (this, GT_LABEL) GenTree(GT_LABEL, TYP_I_IMPL); } switch (ni) { // CreateSpan must be expanded for NativeAOT case NI_System_Runtime_CompilerServices_RuntimeHelpers_CreateSpan: case NI_System_Runtime_CompilerServices_RuntimeHelpers_InitializeArray: mustExpand |= IsTargetAbi(CORINFO_CORERT_ABI); break; case NI_System_ByReference_ctor: case NI_System_ByReference_get_Value: case NI_System_Activator_AllocatorOf: case NI_System_Activator_DefaultConstructorOf: case NI_System_Object_MethodTableOf: case NI_System_EETypePtr_EETypePtrOf: mustExpand = true; break; default: break; } GenTree* retNode = nullptr; // Under debug and minopts, only expand what is required. // NextCallReturnAddress intrinsic returns the return address of the next call. // If that call is an intrinsic and is expanded, codegen for NextCallReturnAddress will fail. // To avoid that we conservatively expand only required intrinsics in methods that call // the NextCallReturnAddress intrinsic. if (!mustExpand && (opts.OptimizationDisabled() || info.compHasNextCallRetAddr)) { *pIntrinsicName = NI_Illegal; return retNode; } CorInfoType callJitType = sig->retType; var_types callType = JITtype2varType(callJitType); /* First do the intrinsics which are always smaller than a call */ if (ni != NI_Illegal) { assert(retNode == nullptr); switch (ni) { case NI_Array_Address: case NI_Array_Get: case NI_Array_Set: retNode = impArrayAccessIntrinsic(clsHnd, sig, memberRef, readonlyCall, ni); break; case NI_System_String_Equals: { retNode = impStringEqualsOrStartsWith(/*startsWith:*/ false, sig, methodFlags); break; } case NI_System_MemoryExtensions_Equals: case NI_System_MemoryExtensions_SequenceEqual: { retNode = impSpanEqualsOrStartsWith(/*startsWith:*/ false, sig, methodFlags); break; } case NI_System_String_StartsWith: { retNode = impStringEqualsOrStartsWith(/*startsWith:*/ true, sig, methodFlags); break; } case NI_System_MemoryExtensions_StartsWith: { retNode = impSpanEqualsOrStartsWith(/*startsWith:*/ true, sig, methodFlags); break; } case NI_System_MemoryExtensions_AsSpan: case NI_System_String_op_Implicit: { assert(sig->numArgs == 1); isSpecial = impStackTop().val->OperIs(GT_CNS_STR); break; } case NI_System_String_get_Chars: { GenTree* op2 = impPopStack().val; GenTree* op1 = impPopStack().val; retNode = gtNewIndexRef(TYP_USHORT, op1, op2); retNode->gtFlags |= GTF_INX_STRING_LAYOUT; break; } case NI_System_String_get_Length: { GenTree* op1 = impPopStack().val; if (op1->OperIs(GT_CNS_STR)) { // Optimize `ldstr + String::get_Length()` to CNS_INT // e.g. "Hello".Length => 5 GenTreeIntCon* iconNode = gtNewStringLiteralLength(op1->AsStrCon()); if (iconNode != nullptr) { retNode = iconNode; break; } } GenTreeArrLen* arrLen = gtNewArrLen(TYP_INT, op1, OFFSETOF__CORINFO_String__stringLen, compCurBB); op1 = arrLen; // Getting the length of a null string should throw op1->gtFlags |= GTF_EXCEPT; retNode = op1; break; } // Implement ByReference Ctor. This wraps the assignment of the ref into a byref-like field // in a value type. The canonical example of this is Span<T>. In effect this is just a // substitution. The parameter byref will be assigned into the newly allocated object. case NI_System_ByReference_ctor: { // Remove call to constructor and directly assign the byref passed // to the call to the first slot of the ByReference struct. GenTree* op1 = impPopStack().val; GenTree* thisptr = newobjThis; CORINFO_FIELD_HANDLE fldHnd = info.compCompHnd->getFieldInClass(clsHnd, 0); GenTree* field = gtNewFieldRef(TYP_BYREF, fldHnd, thisptr, 0); GenTree* assign = gtNewAssignNode(field, op1); GenTree* byReferenceStruct = gtCloneExpr(thisptr->gtGetOp1()); assert(byReferenceStruct != nullptr); impPushOnStack(byReferenceStruct, typeInfo(TI_STRUCT, clsHnd)); retNode = assign; break; } // Implement ptr value getter for ByReference struct. case NI_System_ByReference_get_Value: { GenTree* op1 = impPopStack().val; CORINFO_FIELD_HANDLE fldHnd = info.compCompHnd->getFieldInClass(clsHnd, 0); GenTree* field = gtNewFieldRef(TYP_BYREF, fldHnd, op1, 0); retNode = field; break; } case NI_System_Runtime_CompilerServices_RuntimeHelpers_CreateSpan: { retNode = impCreateSpanIntrinsic(sig); break; } case NI_System_Runtime_CompilerServices_RuntimeHelpers_InitializeArray: { retNode = impInitializeArrayIntrinsic(sig); break; } case NI_System_Runtime_CompilerServices_RuntimeHelpers_IsKnownConstant: { GenTree* op1 = impPopStack().val; if (op1->OperIsConst()) { // op1 is a known constant, replace with 'true'. retNode = gtNewIconNode(1); JITDUMP("\nExpanding RuntimeHelpers.IsKnownConstant to true early\n"); // We can also consider FTN_ADDR and typeof(T) here } else { // op1 is not a known constant, we'll do the expansion in morph retNode = new (this, GT_INTRINSIC) GenTreeIntrinsic(TYP_INT, op1, ni, method); JITDUMP("\nConverting RuntimeHelpers.IsKnownConstant to:\n"); DISPTREE(retNode); } break; } case NI_System_Activator_AllocatorOf: case NI_System_Activator_DefaultConstructorOf: case NI_System_Object_MethodTableOf: case NI_System_EETypePtr_EETypePtrOf: { assert(IsTargetAbi(CORINFO_CORERT_ABI)); // Only CoreRT supports it. CORINFO_RESOLVED_TOKEN resolvedToken; resolvedToken.tokenContext = impTokenLookupContextHandle; resolvedToken.tokenScope = info.compScopeHnd; resolvedToken.token = memberRef; resolvedToken.tokenType = CORINFO_TOKENKIND_Method; CORINFO_GENERICHANDLE_RESULT embedInfo; info.compCompHnd->expandRawHandleIntrinsic(&resolvedToken, &embedInfo); GenTree* rawHandle = impLookupToTree(&resolvedToken, &embedInfo.lookup, gtTokenToIconFlags(memberRef), embedInfo.compileTimeHandle); if (rawHandle == nullptr) { return nullptr; } noway_assert(genTypeSize(rawHandle->TypeGet()) == genTypeSize(TYP_I_IMPL)); unsigned rawHandleSlot = lvaGrabTemp(true DEBUGARG("rawHandle")); impAssignTempGen(rawHandleSlot, rawHandle, clsHnd, (unsigned)CHECK_SPILL_NONE); GenTree* lclVar = gtNewLclvNode(rawHandleSlot, TYP_I_IMPL); GenTree* lclVarAddr = gtNewOperNode(GT_ADDR, TYP_I_IMPL, lclVar); var_types resultType = JITtype2varType(sig->retType); retNode = gtNewOperNode(GT_IND, resultType, lclVarAddr); break; } case NI_System_Span_get_Item: case NI_System_ReadOnlySpan_get_Item: { // Have index, stack pointer-to Span<T> s on the stack. Expand to: // // For Span<T> // Comma // BoundsCheck(index, s->_length) // s->_pointer + index * sizeof(T) // // For ReadOnlySpan<T> -- same expansion, as it now returns a readonly ref // // Signature should show one class type parameter, which // we need to examine. assert(sig->sigInst.classInstCount == 1); assert(sig->numArgs == 1); CORINFO_CLASS_HANDLE spanElemHnd = sig->sigInst.classInst[0]; const unsigned elemSize = info.compCompHnd->getClassSize(spanElemHnd); assert(elemSize > 0); const bool isReadOnly = (ni == NI_System_ReadOnlySpan_get_Item); JITDUMP("\nimpIntrinsic: Expanding %sSpan<T>.get_Item, T=%s, sizeof(T)=%u\n", isReadOnly ? "ReadOnly" : "", info.compCompHnd->getClassName(spanElemHnd), elemSize); GenTree* index = impPopStack().val; GenTree* ptrToSpan = impPopStack().val; GenTree* indexClone = nullptr; GenTree* ptrToSpanClone = nullptr; assert(genActualType(index) == TYP_INT); assert(ptrToSpan->TypeGet() == TYP_BYREF); #if defined(DEBUG) if (verbose) { printf("with ptr-to-span\n"); gtDispTree(ptrToSpan); printf("and index\n"); gtDispTree(index); } #endif // defined(DEBUG) // We need to use both index and ptr-to-span twice, so clone or spill. index = impCloneExpr(index, &indexClone, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Span.get_Item index")); ptrToSpan = impCloneExpr(ptrToSpan, &ptrToSpanClone, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Span.get_Item ptrToSpan")); // Bounds check CORINFO_FIELD_HANDLE lengthHnd = info.compCompHnd->getFieldInClass(clsHnd, 1); const unsigned lengthOffset = info.compCompHnd->getFieldOffset(lengthHnd); GenTree* length = gtNewFieldRef(TYP_INT, lengthHnd, ptrToSpan, lengthOffset); GenTree* boundsCheck = new (this, GT_BOUNDS_CHECK) GenTreeBoundsChk(index, length, SCK_RNGCHK_FAIL); // Element access index = indexClone; #ifdef TARGET_64BIT if (index->OperGet() == GT_CNS_INT) { index->gtType = TYP_I_IMPL; } else { index = gtNewCastNode(TYP_I_IMPL, index, true, TYP_I_IMPL); } #endif if (elemSize != 1) { GenTree* sizeofNode = gtNewIconNode(static_cast<ssize_t>(elemSize), TYP_I_IMPL); index = gtNewOperNode(GT_MUL, TYP_I_IMPL, index, sizeofNode); } CORINFO_FIELD_HANDLE ptrHnd = info.compCompHnd->getFieldInClass(clsHnd, 0); const unsigned ptrOffset = info.compCompHnd->getFieldOffset(ptrHnd); GenTree* data = gtNewFieldRef(TYP_BYREF, ptrHnd, ptrToSpanClone, ptrOffset); GenTree* result = gtNewOperNode(GT_ADD, TYP_BYREF, data, index); // Prepare result var_types resultType = JITtype2varType(sig->retType); assert(resultType == result->TypeGet()); retNode = gtNewOperNode(GT_COMMA, resultType, boundsCheck, result); break; } case NI_System_RuntimeTypeHandle_GetValueInternal: { GenTree* op1 = impStackTop(0).val; if (op1->gtOper == GT_CALL && (op1->AsCall()->gtCallType == CT_HELPER) && gtIsTypeHandleToRuntimeTypeHandleHelper(op1->AsCall())) { // Old tree // Helper-RuntimeTypeHandle -> TreeToGetNativeTypeHandle // // New tree // TreeToGetNativeTypeHandle // Remove call to helper and return the native TypeHandle pointer that was the parameter // to that helper. op1 = impPopStack().val; // Get native TypeHandle argument to old helper GenTreeCall::Use* arg = op1->AsCall()->gtCallArgs; assert(arg->GetNext() == nullptr); op1 = arg->GetNode(); retNode = op1; } // Call the regular function. break; } case NI_System_Type_GetTypeFromHandle: { GenTree* op1 = impStackTop(0).val; CorInfoHelpFunc typeHandleHelper; if (op1->gtOper == GT_CALL && (op1->AsCall()->gtCallType == CT_HELPER) && gtIsTypeHandleToRuntimeTypeHandleHelper(op1->AsCall(), &typeHandleHelper)) { op1 = impPopStack().val; // Replace helper with a more specialized helper that returns RuntimeType if (typeHandleHelper == CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPEHANDLE) { typeHandleHelper = CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE; } else { assert(typeHandleHelper == CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPEHANDLE_MAYBENULL); typeHandleHelper = CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE_MAYBENULL; } assert(op1->AsCall()->gtCallArgs->GetNext() == nullptr); op1 = gtNewHelperCallNode(typeHandleHelper, TYP_REF, op1->AsCall()->gtCallArgs); op1->gtType = TYP_REF; retNode = op1; } break; } case NI_System_Type_op_Equality: case NI_System_Type_op_Inequality: { JITDUMP("Importing Type.op_*Equality intrinsic\n"); GenTree* op1 = impStackTop(1).val; GenTree* op2 = impStackTop(0).val; GenTree* optTree = gtFoldTypeEqualityCall(ni == NI_System_Type_op_Equality, op1, op2); if (optTree != nullptr) { // Success, clean up the evaluation stack. impPopStack(); impPopStack(); // See if we can optimize even further, to a handle compare. optTree = gtFoldTypeCompare(optTree); // See if we can now fold a handle compare to a constant. optTree = gtFoldExpr(optTree); retNode = optTree; } else { // Retry optimizing these later isSpecial = true; } break; } case NI_System_Enum_HasFlag: { GenTree* thisOp = impStackTop(1).val; GenTree* flagOp = impStackTop(0).val; GenTree* optTree = gtOptimizeEnumHasFlag(thisOp, flagOp); if (optTree != nullptr) { // Optimization successful. Pop the stack for real. impPopStack(); impPopStack(); retNode = optTree; } else { // Retry optimizing this during morph. isSpecial = true; } break; } case NI_System_Type_IsAssignableFrom: { GenTree* typeTo = impStackTop(1).val; GenTree* typeFrom = impStackTop(0).val; retNode = impTypeIsAssignable(typeTo, typeFrom); break; } case NI_System_Type_IsAssignableTo: { GenTree* typeTo = impStackTop(0).val; GenTree* typeFrom = impStackTop(1).val; retNode = impTypeIsAssignable(typeTo, typeFrom); break; } case NI_System_Type_get_IsValueType: { // Optimize // // call Type.GetTypeFromHandle (which is replaced with CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE) // call Type.IsValueType // // to `true` or `false` // e.g. `typeof(int).IsValueType` => `true` if (impStackTop().val->IsCall()) { GenTreeCall* call = impStackTop().val->AsCall(); if (call->gtCallMethHnd == eeFindHelper(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE)) { CORINFO_CLASS_HANDLE hClass = gtGetHelperArgClassHandle(call->gtCallArgs->GetNode()); if (hClass != NO_CLASS_HANDLE) { retNode = gtNewIconNode((eeIsValueClass(hClass) && // pointers are not value types (e.g. typeof(int*).IsValueType is false) info.compCompHnd->asCorInfoType(hClass) != CORINFO_TYPE_PTR) ? 1 : 0); impPopStack(); // drop CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE call } } } break; } case NI_System_Threading_Thread_get_ManagedThreadId: { if (impStackTop().val->OperIs(GT_RET_EXPR)) { GenTreeCall* call = impStackTop().val->AsRetExpr()->gtInlineCandidate->AsCall(); if (call->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC) { if (lookupNamedIntrinsic(call->gtCallMethHnd) == NI_System_Threading_Thread_get_CurrentThread) { // drop get_CurrentThread() call impPopStack(); call->ReplaceWith(gtNewNothingNode(), this); retNode = gtNewHelperCallNode(CORINFO_HELP_GETCURRENTMANAGEDTHREADID, TYP_INT); } } } break; } #ifdef TARGET_ARM64 // Intrinsify Interlocked.Or and Interlocked.And only for arm64-v8.1 (and newer) // TODO-CQ: Implement for XArch (https://github.com/dotnet/runtime/issues/32239). case NI_System_Threading_Interlocked_Or: case NI_System_Threading_Interlocked_And: { if (compOpportunisticallyDependsOn(InstructionSet_Atomics)) { assert(sig->numArgs == 2); GenTree* op2 = impPopStack().val; GenTree* op1 = impPopStack().val; genTreeOps op = (ni == NI_System_Threading_Interlocked_Or) ? GT_XORR : GT_XAND; retNode = gtNewOperNode(op, genActualType(callType), op1, op2); retNode->gtFlags |= GTF_GLOB_REF | GTF_ASG; } break; } #endif // TARGET_ARM64 #if defined(TARGET_XARCH) || defined(TARGET_ARM64) // TODO-ARM-CQ: reenable treating InterlockedCmpXchg32 operation as intrinsic case NI_System_Threading_Interlocked_CompareExchange: { var_types retType = JITtype2varType(sig->retType); if ((retType == TYP_LONG) && (TARGET_POINTER_SIZE == 4)) { break; } if ((retType != TYP_INT) && (retType != TYP_LONG)) { break; } assert(callType != TYP_STRUCT); assert(sig->numArgs == 3); GenTree* op3 = impPopStack().val; // comparand GenTree* op2 = impPopStack().val; // value GenTree* op1 = impPopStack().val; // location GenTree* node = new (this, GT_CMPXCHG) GenTreeCmpXchg(genActualType(callType), op1, op2, op3); node->AsCmpXchg()->gtOpLocation->gtFlags |= GTF_DONT_CSE; retNode = node; break; } case NI_System_Threading_Interlocked_Exchange: case NI_System_Threading_Interlocked_ExchangeAdd: { assert(callType != TYP_STRUCT); assert(sig->numArgs == 2); var_types retType = JITtype2varType(sig->retType); if ((retType == TYP_LONG) && (TARGET_POINTER_SIZE == 4)) { break; } if ((retType != TYP_INT) && (retType != TYP_LONG)) { break; } GenTree* op2 = impPopStack().val; GenTree* op1 = impPopStack().val; // This creates: // val // XAdd // addr // field (for example) // // In the case where the first argument is the address of a local, we might // want to make this *not* make the var address-taken -- but atomic instructions // on a local are probably pretty useless anyway, so we probably don't care. op1 = gtNewOperNode(ni == NI_System_Threading_Interlocked_ExchangeAdd ? GT_XADD : GT_XCHG, genActualType(callType), op1, op2); op1->gtFlags |= GTF_GLOB_REF | GTF_ASG; retNode = op1; break; } #endif // defined(TARGET_XARCH) || defined(TARGET_ARM64) case NI_System_Threading_Interlocked_MemoryBarrier: case NI_System_Threading_Interlocked_ReadMemoryBarrier: { assert(sig->numArgs == 0); GenTree* op1 = new (this, GT_MEMORYBARRIER) GenTree(GT_MEMORYBARRIER, TYP_VOID); op1->gtFlags |= GTF_GLOB_REF | GTF_ASG; // On XARCH `NI_System_Threading_Interlocked_ReadMemoryBarrier` fences need not be emitted. // However, we still need to capture the effect on reordering. if (ni == NI_System_Threading_Interlocked_ReadMemoryBarrier) { op1->gtFlags |= GTF_MEMORYBARRIER_LOAD; } retNode = op1; break; } #ifdef FEATURE_HW_INTRINSICS case NI_System_Math_FusedMultiplyAdd: { #ifdef TARGET_XARCH if (compExactlyDependsOn(InstructionSet_FMA) && supportSIMDTypes()) { assert(varTypeIsFloating(callType)); // We are constructing a chain of intrinsics similar to: // return FMA.MultiplyAddScalar( // Vector128.CreateScalarUnsafe(x), // Vector128.CreateScalarUnsafe(y), // Vector128.CreateScalarUnsafe(z) // ).ToScalar(); GenTree* op3 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, impPopStack().val, NI_Vector128_CreateScalarUnsafe, callJitType, 16); GenTree* op2 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, impPopStack().val, NI_Vector128_CreateScalarUnsafe, callJitType, 16); GenTree* op1 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, impPopStack().val, NI_Vector128_CreateScalarUnsafe, callJitType, 16); GenTree* res = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, op2, op3, NI_FMA_MultiplyAddScalar, callJitType, 16); retNode = gtNewSimdHWIntrinsicNode(callType, res, NI_Vector128_ToScalar, callJitType, 16); break; } #elif defined(TARGET_ARM64) if (compExactlyDependsOn(InstructionSet_AdvSimd)) { assert(varTypeIsFloating(callType)); // We are constructing a chain of intrinsics similar to: // return AdvSimd.FusedMultiplyAddScalar( // Vector64.Create{ScalarUnsafe}(z), // Vector64.Create{ScalarUnsafe}(y), // Vector64.Create{ScalarUnsafe}(x) // ).ToScalar(); NamedIntrinsic createVector64 = (callType == TYP_DOUBLE) ? NI_Vector64_Create : NI_Vector64_CreateScalarUnsafe; constexpr unsigned int simdSize = 8; GenTree* op3 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, impPopStack().val, createVector64, callJitType, simdSize); GenTree* op2 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, impPopStack().val, createVector64, callJitType, simdSize); GenTree* op1 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, impPopStack().val, createVector64, callJitType, simdSize); // Note that AdvSimd.FusedMultiplyAddScalar(op1,op2,op3) corresponds to op1 + op2 * op3 // while Math{F}.FusedMultiplyAddScalar(op1,op2,op3) corresponds to op1 * op2 + op3 retNode = gtNewSimdHWIntrinsicNode(TYP_SIMD8, op3, op2, op1, NI_AdvSimd_FusedMultiplyAddScalar, callJitType, simdSize); retNode = gtNewSimdHWIntrinsicNode(callType, retNode, NI_Vector64_ToScalar, callJitType, simdSize); break; } #endif // TODO-CQ-XArch: Ideally we would create a GT_INTRINSIC node for fma, however, that currently // requires more extensive changes to valuenum to support methods with 3 operands // We want to generate a GT_INTRINSIC node in the case the call can't be treated as // a target intrinsic so that we can still benefit from CSE and constant folding. break; } #endif // FEATURE_HW_INTRINSICS case NI_System_Math_Abs: case NI_System_Math_Acos: case NI_System_Math_Acosh: case NI_System_Math_Asin: case NI_System_Math_Asinh: case NI_System_Math_Atan: case NI_System_Math_Atanh: case NI_System_Math_Atan2: case NI_System_Math_Cbrt: case NI_System_Math_Ceiling: case NI_System_Math_Cos: case NI_System_Math_Cosh: case NI_System_Math_Exp: case NI_System_Math_Floor: case NI_System_Math_FMod: case NI_System_Math_ILogB: case NI_System_Math_Log: case NI_System_Math_Log2: case NI_System_Math_Log10: #ifdef TARGET_ARM64 // ARM64 has fmax/fmin which are IEEE754:2019 minimum/maximum compatible // TODO-XARCH-CQ: Enable this for XARCH when one of the arguments is a constant // so we can then emit maxss/minss and avoid NaN/-0.0 handling case NI_System_Math_Max: case NI_System_Math_Min: #endif case NI_System_Math_Pow: case NI_System_Math_Round: case NI_System_Math_Sin: case NI_System_Math_Sinh: case NI_System_Math_Sqrt: case NI_System_Math_Tan: case NI_System_Math_Tanh: case NI_System_Math_Truncate: { retNode = impMathIntrinsic(method, sig, callType, ni, tailCall); break; } case NI_System_Array_Clone: case NI_System_Collections_Generic_Comparer_get_Default: case NI_System_Collections_Generic_EqualityComparer_get_Default: case NI_System_Object_MemberwiseClone: case NI_System_Threading_Thread_get_CurrentThread: { // Flag for later handling. isSpecial = true; break; } case NI_System_Object_GetType: { JITDUMP("\n impIntrinsic: call to Object.GetType\n"); GenTree* op1 = impStackTop(0).val; // If we're calling GetType on a boxed value, just get the type directly. if (op1->IsBoxedValue()) { JITDUMP("Attempting to optimize box(...).getType() to direct type construction\n"); // Try and clean up the box. Obtain the handle we // were going to pass to the newobj. GenTree* boxTypeHandle = gtTryRemoveBoxUpstreamEffects(op1, BR_REMOVE_AND_NARROW_WANT_TYPE_HANDLE); if (boxTypeHandle != nullptr) { // Note we don't need to play the TYP_STRUCT games here like // do for LDTOKEN since the return value of this operator is Type, // not RuntimeTypeHandle. impPopStack(); GenTreeCall::Use* helperArgs = gtNewCallArgs(boxTypeHandle); GenTree* runtimeType = gtNewHelperCallNode(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE, TYP_REF, helperArgs); retNode = runtimeType; } } // If we have a constrained callvirt with a "box this" transform // we know we have a value class and hence an exact type. // // If so, instead of boxing and then extracting the type, just // construct the type directly. if ((retNode == nullptr) && (pConstrainedResolvedToken != nullptr) && (constraintCallThisTransform == CORINFO_BOX_THIS)) { // Ensure this is one of the is simple box cases (in particular, rule out nullables). const CorInfoHelpFunc boxHelper = info.compCompHnd->getBoxHelper(pConstrainedResolvedToken->hClass); const bool isSafeToOptimize = (boxHelper == CORINFO_HELP_BOX); if (isSafeToOptimize) { JITDUMP("Optimizing constrained box-this obj.getType() to direct type construction\n"); impPopStack(); GenTree* typeHandleOp = impTokenToHandle(pConstrainedResolvedToken, nullptr, true /* mustRestoreHandle */); if (typeHandleOp == nullptr) { assert(compDonotInline()); return nullptr; } GenTreeCall::Use* helperArgs = gtNewCallArgs(typeHandleOp); GenTree* runtimeType = gtNewHelperCallNode(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE, TYP_REF, helperArgs); retNode = runtimeType; } } #ifdef DEBUG if (retNode != nullptr) { JITDUMP("Optimized result for call to GetType is\n"); if (verbose) { gtDispTree(retNode); } } #endif // Else expand as an intrinsic, unless the call is constrained, // in which case we defer expansion to allow impImportCall do the // special constraint processing. if ((retNode == nullptr) && (pConstrainedResolvedToken == nullptr)) { JITDUMP("Expanding as special intrinsic\n"); impPopStack(); op1 = new (this, GT_INTRINSIC) GenTreeIntrinsic(genActualType(callType), op1, ni, method); // Set the CALL flag to indicate that the operator is implemented by a call. // Set also the EXCEPTION flag because the native implementation of // NI_System_Object_GetType intrinsic can throw NullReferenceException. op1->gtFlags |= (GTF_CALL | GTF_EXCEPT); retNode = op1; // Might be further optimizable, so arrange to leave a mark behind isSpecial = true; } if (retNode == nullptr) { JITDUMP("Leaving as normal call\n"); // Might be further optimizable, so arrange to leave a mark behind isSpecial = true; } break; } case NI_System_Array_GetLength: case NI_System_Array_GetLowerBound: case NI_System_Array_GetUpperBound: { // System.Array.GetLength(Int32) method: // public int GetLength(int dimension) // System.Array.GetLowerBound(Int32) method: // public int GetLowerBound(int dimension) // System.Array.GetUpperBound(Int32) method: // public int GetUpperBound(int dimension) // // Only implement these as intrinsics for multi-dimensional arrays. // Only handle constant dimension arguments. GenTree* gtDim = impStackTop().val; GenTree* gtArr = impStackTop(1).val; if (gtDim->IsIntegralConst()) { bool isExact = false; bool isNonNull = false; CORINFO_CLASS_HANDLE arrCls = gtGetClassHandle(gtArr, &isExact, &isNonNull); if (arrCls != NO_CLASS_HANDLE) { unsigned rank = info.compCompHnd->getArrayRank(arrCls); if ((rank > 1) && !info.compCompHnd->isSDArray(arrCls)) { // `rank` is guaranteed to be <=32 (see MAX_RANK in vm\array.h). Any constant argument // is `int` sized. INT64 dimValue = gtDim->AsIntConCommon()->IntegralValue(); assert((unsigned int)dimValue == dimValue); unsigned dim = (unsigned int)dimValue; if (dim < rank) { // This is now known to be a multi-dimension array with a constant dimension // that is in range; we can expand it as an intrinsic. impPopStack().val; // Pop the dim and array object; we already have a pointer to them. impPopStack().val; // Make sure there are no global effects in the array (such as it being a function // call), so we can mark the generated indirection with GTF_IND_INVARIANT. In the // GetUpperBound case we need the cloned object, since we refer to the array // object twice. In the other cases, we don't need to clone. GenTree* gtArrClone = nullptr; if (((gtArr->gtFlags & GTF_GLOB_EFFECT) != 0) || (ni == NI_System_Array_GetUpperBound)) { gtArr = impCloneExpr(gtArr, &gtArrClone, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("MD intrinsics array")); } switch (ni) { case NI_System_Array_GetLength: { // Generate *(array + offset-to-length-array + sizeof(int) * dim) unsigned offs = eeGetMDArrayLengthOffset(rank, dim); GenTree* gtOffs = gtNewIconNode(offs, TYP_I_IMPL); GenTree* gtAddr = gtNewOperNode(GT_ADD, TYP_BYREF, gtArr, gtOffs); retNode = gtNewIndir(TYP_INT, gtAddr); retNode->gtFlags |= GTF_IND_INVARIANT; break; } case NI_System_Array_GetLowerBound: { // Generate *(array + offset-to-bounds-array + sizeof(int) * dim) unsigned offs = eeGetMDArrayLowerBoundOffset(rank, dim); GenTree* gtOffs = gtNewIconNode(offs, TYP_I_IMPL); GenTree* gtAddr = gtNewOperNode(GT_ADD, TYP_BYREF, gtArr, gtOffs); retNode = gtNewIndir(TYP_INT, gtAddr); retNode->gtFlags |= GTF_IND_INVARIANT; break; } case NI_System_Array_GetUpperBound: { assert(gtArrClone != nullptr); // Generate: // *(array + offset-to-length-array + sizeof(int) * dim) + // *(array + offset-to-bounds-array + sizeof(int) * dim) - 1 unsigned offs = eeGetMDArrayLowerBoundOffset(rank, dim); GenTree* gtOffs = gtNewIconNode(offs, TYP_I_IMPL); GenTree* gtAddr = gtNewOperNode(GT_ADD, TYP_BYREF, gtArr, gtOffs); GenTree* gtLowerBound = gtNewIndir(TYP_INT, gtAddr); gtLowerBound->gtFlags |= GTF_IND_INVARIANT; offs = eeGetMDArrayLengthOffset(rank, dim); gtOffs = gtNewIconNode(offs, TYP_I_IMPL); gtAddr = gtNewOperNode(GT_ADD, TYP_BYREF, gtArrClone, gtOffs); GenTree* gtLength = gtNewIndir(TYP_INT, gtAddr); gtLength->gtFlags |= GTF_IND_INVARIANT; GenTree* gtSum = gtNewOperNode(GT_ADD, TYP_INT, gtLowerBound, gtLength); GenTree* gtOne = gtNewIconNode(1, TYP_INT); retNode = gtNewOperNode(GT_SUB, TYP_INT, gtSum, gtOne); break; } default: unreached(); } } } } } break; } case NI_System_Buffers_Binary_BinaryPrimitives_ReverseEndianness: { assert(sig->numArgs == 1); // We expect the return type of the ReverseEndianness routine to match the type of the // one and only argument to the method. We use a special instruction for 16-bit // BSWAPs since on x86 processors this is implemented as ROR <16-bit reg>, 8. Additionally, // we only emit 64-bit BSWAP instructions on 64-bit archs; if we're asked to perform a // 64-bit byte swap on a 32-bit arch, we'll fall to the default case in the switch block below. switch (sig->retType) { case CorInfoType::CORINFO_TYPE_SHORT: case CorInfoType::CORINFO_TYPE_USHORT: retNode = gtNewCastNode(TYP_INT, gtNewOperNode(GT_BSWAP16, TYP_INT, impPopStack().val), false, callType); break; case CorInfoType::CORINFO_TYPE_INT: case CorInfoType::CORINFO_TYPE_UINT: #ifdef TARGET_64BIT case CorInfoType::CORINFO_TYPE_LONG: case CorInfoType::CORINFO_TYPE_ULONG: #endif // TARGET_64BIT retNode = gtNewOperNode(GT_BSWAP, callType, impPopStack().val); break; default: // This default case gets hit on 32-bit archs when a call to a 64-bit overload // of ReverseEndianness is encountered. In that case we'll let JIT treat this as a standard // method call, where the implementation decomposes the operation into two 32-bit // bswap routines. If the input to the 64-bit function is a constant, then we rely // on inlining + constant folding of 32-bit bswaps to effectively constant fold // the 64-bit call site. break; } break; } // Fold PopCount for constant input case NI_System_Numerics_BitOperations_PopCount: { assert(sig->numArgs == 1); if (impStackTop().val->IsIntegralConst()) { typeInfo argType = verParseArgSigToTypeInfo(sig, sig->args).NormaliseForStack(); INT64 cns = impPopStack().val->AsIntConCommon()->IntegralValue(); if (argType.IsType(TI_LONG)) { retNode = gtNewIconNode(genCountBits(cns), callType); } else { assert(argType.IsType(TI_INT)); retNode = gtNewIconNode(genCountBits(static_cast<unsigned>(cns)), callType); } } break; } case NI_System_GC_KeepAlive: { retNode = impKeepAliveIntrinsic(impPopStack().val); break; } default: break; } } if (mustExpand && (retNode == nullptr)) { assert(!"Unhandled must expand intrinsic, throwing PlatformNotSupportedException"); return impUnsupportedNamedIntrinsic(CORINFO_HELP_THROW_PLATFORM_NOT_SUPPORTED, method, sig, mustExpand); } // Optionally report if this intrinsic is special // (that is, potentially re-optimizable during morph). if (isSpecialIntrinsic != nullptr) { *isSpecialIntrinsic = isSpecial; } return retNode; } GenTree* Compiler::impTypeIsAssignable(GenTree* typeTo, GenTree* typeFrom) { // Optimize patterns like: // // typeof(TTo).IsAssignableFrom(typeof(TTFrom)) // valueTypeVar.GetType().IsAssignableFrom(typeof(TTFrom)) // typeof(TTFrom).IsAssignableTo(typeof(TTo)) // typeof(TTFrom).IsAssignableTo(valueTypeVar.GetType()) // // to true/false if (typeTo->IsCall() && typeFrom->IsCall()) { // make sure both arguments are `typeof()` CORINFO_METHOD_HANDLE hTypeof = eeFindHelper(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE); if ((typeTo->AsCall()->gtCallMethHnd == hTypeof) && (typeFrom->AsCall()->gtCallMethHnd == hTypeof)) { CORINFO_CLASS_HANDLE hClassTo = gtGetHelperArgClassHandle(typeTo->AsCall()->gtCallArgs->GetNode()); CORINFO_CLASS_HANDLE hClassFrom = gtGetHelperArgClassHandle(typeFrom->AsCall()->gtCallArgs->GetNode()); if (hClassTo == NO_CLASS_HANDLE || hClassFrom == NO_CLASS_HANDLE) { return nullptr; } TypeCompareState castResult = info.compCompHnd->compareTypesForCast(hClassFrom, hClassTo); if (castResult == TypeCompareState::May) { // requires runtime check // e.g. __Canon, COMObjects, Nullable return nullptr; } GenTreeIntCon* retNode = gtNewIconNode((castResult == TypeCompareState::Must) ? 1 : 0); impPopStack(); // drop both CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE calls impPopStack(); return retNode; } } return nullptr; } GenTree* Compiler::impMathIntrinsic(CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, var_types callType, NamedIntrinsic intrinsicName, bool tailCall) { GenTree* op1; GenTree* op2; assert(callType != TYP_STRUCT); assert(IsMathIntrinsic(intrinsicName)); op1 = nullptr; #if !defined(TARGET_X86) // Intrinsics that are not implemented directly by target instructions will // be re-materialized as users calls in rationalizer. For prefixed tail calls, // don't do this optimization, because // a) For back compatibility reasons on desktop .NET Framework 4.6 / 4.6.1 // b) It will be non-trivial task or too late to re-materialize a surviving // tail prefixed GT_INTRINSIC as tail call in rationalizer. if (!IsIntrinsicImplementedByUserCall(intrinsicName) || !tailCall) #else // On x86 RyuJIT, importing intrinsics that are implemented as user calls can cause incorrect calculation // of the depth of the stack if these intrinsics are used as arguments to another call. This causes bad // code generation for certain EH constructs. if (!IsIntrinsicImplementedByUserCall(intrinsicName)) #endif { CORINFO_CLASS_HANDLE tmpClass; CORINFO_ARG_LIST_HANDLE arg; var_types op1Type; var_types op2Type; switch (sig->numArgs) { case 1: op1 = impPopStack().val; arg = sig->args; op1Type = JITtype2varType(strip(info.compCompHnd->getArgType(sig, arg, &tmpClass))); if (op1->TypeGet() != genActualType(op1Type)) { assert(varTypeIsFloating(op1)); op1 = gtNewCastNode(callType, op1, false, callType); } op1 = new (this, GT_INTRINSIC) GenTreeIntrinsic(genActualType(callType), op1, intrinsicName, method); break; case 2: op2 = impPopStack().val; op1 = impPopStack().val; arg = sig->args; op1Type = JITtype2varType(strip(info.compCompHnd->getArgType(sig, arg, &tmpClass))); if (op1->TypeGet() != genActualType(op1Type)) { assert(varTypeIsFloating(op1)); op1 = gtNewCastNode(callType, op1, false, callType); } arg = info.compCompHnd->getArgNext(arg); op2Type = JITtype2varType(strip(info.compCompHnd->getArgType(sig, arg, &tmpClass))); if (op2->TypeGet() != genActualType(op2Type)) { assert(varTypeIsFloating(op2)); op2 = gtNewCastNode(callType, op2, false, callType); } op1 = new (this, GT_INTRINSIC) GenTreeIntrinsic(genActualType(callType), op1, op2, intrinsicName, method); break; default: NO_WAY("Unsupported number of args for Math Intrinsic"); } if (IsIntrinsicImplementedByUserCall(intrinsicName)) { op1->gtFlags |= GTF_CALL; } } return op1; } //------------------------------------------------------------------------ // lookupNamedIntrinsic: map method to jit named intrinsic value // // Arguments: // method -- method handle for method // // Return Value: // Id for the named intrinsic, or Illegal if none. // // Notes: // method should have CORINFO_FLG_INTRINSIC set in its attributes, // otherwise it is not a named jit intrinsic. // NamedIntrinsic Compiler::lookupNamedIntrinsic(CORINFO_METHOD_HANDLE method) { const char* className = nullptr; const char* namespaceName = nullptr; const char* enclosingClassName = nullptr; const char* methodName = info.compCompHnd->getMethodNameFromMetadata(method, &className, &namespaceName, &enclosingClassName); JITDUMP("Named Intrinsic "); if (namespaceName != nullptr) { JITDUMP("%s.", namespaceName); } if (enclosingClassName != nullptr) { JITDUMP("%s.", enclosingClassName); } if (className != nullptr) { JITDUMP("%s.", className); } if (methodName != nullptr) { JITDUMP("%s", methodName); } if ((namespaceName == nullptr) || (className == nullptr) || (methodName == nullptr)) { // Check if we are dealing with an MD array's known runtime method CorInfoArrayIntrinsic arrayFuncIndex = info.compCompHnd->getArrayIntrinsicID(method); switch (arrayFuncIndex) { case CorInfoArrayIntrinsic::GET: JITDUMP("ARRAY_FUNC_GET: Recognized\n"); return NI_Array_Get; case CorInfoArrayIntrinsic::SET: JITDUMP("ARRAY_FUNC_SET: Recognized\n"); return NI_Array_Set; case CorInfoArrayIntrinsic::ADDRESS: JITDUMP("ARRAY_FUNC_ADDRESS: Recognized\n"); return NI_Array_Address; default: break; } JITDUMP(": Not recognized, not enough metadata\n"); return NI_Illegal; } JITDUMP(": "); NamedIntrinsic result = NI_Illegal; if (strcmp(namespaceName, "System") == 0) { if ((strcmp(className, "Enum") == 0) && (strcmp(methodName, "HasFlag") == 0)) { result = NI_System_Enum_HasFlag; } else if (strcmp(className, "Activator") == 0) { if (strcmp(methodName, "AllocatorOf") == 0) { result = NI_System_Activator_AllocatorOf; } else if (strcmp(methodName, "DefaultConstructorOf") == 0) { result = NI_System_Activator_DefaultConstructorOf; } } else if (strcmp(className, "ByReference`1") == 0) { if (strcmp(methodName, ".ctor") == 0) { result = NI_System_ByReference_ctor; } else if (strcmp(methodName, "get_Value") == 0) { result = NI_System_ByReference_get_Value; } } else if (strcmp(className, "Math") == 0 || strcmp(className, "MathF") == 0) { if (strcmp(methodName, "Abs") == 0) { result = NI_System_Math_Abs; } else if (strcmp(methodName, "Acos") == 0) { result = NI_System_Math_Acos; } else if (strcmp(methodName, "Acosh") == 0) { result = NI_System_Math_Acosh; } else if (strcmp(methodName, "Asin") == 0) { result = NI_System_Math_Asin; } else if (strcmp(methodName, "Asinh") == 0) { result = NI_System_Math_Asinh; } else if (strcmp(methodName, "Atan") == 0) { result = NI_System_Math_Atan; } else if (strcmp(methodName, "Atanh") == 0) { result = NI_System_Math_Atanh; } else if (strcmp(methodName, "Atan2") == 0) { result = NI_System_Math_Atan2; } else if (strcmp(methodName, "Cbrt") == 0) { result = NI_System_Math_Cbrt; } else if (strcmp(methodName, "Ceiling") == 0) { result = NI_System_Math_Ceiling; } else if (strcmp(methodName, "Cos") == 0) { result = NI_System_Math_Cos; } else if (strcmp(methodName, "Cosh") == 0) { result = NI_System_Math_Cosh; } else if (strcmp(methodName, "Exp") == 0) { result = NI_System_Math_Exp; } else if (strcmp(methodName, "Floor") == 0) { result = NI_System_Math_Floor; } else if (strcmp(methodName, "FMod") == 0) { result = NI_System_Math_FMod; } else if (strcmp(methodName, "FusedMultiplyAdd") == 0) { result = NI_System_Math_FusedMultiplyAdd; } else if (strcmp(methodName, "ILogB") == 0) { result = NI_System_Math_ILogB; } else if (strcmp(methodName, "Log") == 0) { result = NI_System_Math_Log; } else if (strcmp(methodName, "Log2") == 0) { result = NI_System_Math_Log2; } else if (strcmp(methodName, "Log10") == 0) { result = NI_System_Math_Log10; } else if (strcmp(methodName, "Max") == 0) { result = NI_System_Math_Max; } else if (strcmp(methodName, "Min") == 0) { result = NI_System_Math_Min; } else if (strcmp(methodName, "Pow") == 0) { result = NI_System_Math_Pow; } else if (strcmp(methodName, "Round") == 0) { result = NI_System_Math_Round; } else if (strcmp(methodName, "Sin") == 0) { result = NI_System_Math_Sin; } else if (strcmp(methodName, "Sinh") == 0) { result = NI_System_Math_Sinh; } else if (strcmp(methodName, "Sqrt") == 0) { result = NI_System_Math_Sqrt; } else if (strcmp(methodName, "Tan") == 0) { result = NI_System_Math_Tan; } else if (strcmp(methodName, "Tanh") == 0) { result = NI_System_Math_Tanh; } else if (strcmp(methodName, "Truncate") == 0) { result = NI_System_Math_Truncate; } } else if (strcmp(className, "GC") == 0) { if (strcmp(methodName, "KeepAlive") == 0) { result = NI_System_GC_KeepAlive; } } else if (strcmp(className, "Array") == 0) { if (strcmp(methodName, "Clone") == 0) { result = NI_System_Array_Clone; } else if (strcmp(methodName, "GetLength") == 0) { result = NI_System_Array_GetLength; } else if (strcmp(methodName, "GetLowerBound") == 0) { result = NI_System_Array_GetLowerBound; } else if (strcmp(methodName, "GetUpperBound") == 0) { result = NI_System_Array_GetUpperBound; } } else if (strcmp(className, "Object") == 0) { if (strcmp(methodName, "MemberwiseClone") == 0) { result = NI_System_Object_MemberwiseClone; } else if (strcmp(methodName, "GetType") == 0) { result = NI_System_Object_GetType; } else if (strcmp(methodName, "MethodTableOf") == 0) { result = NI_System_Object_MethodTableOf; } } else if (strcmp(className, "RuntimeTypeHandle") == 0) { if (strcmp(methodName, "GetValueInternal") == 0) { result = NI_System_RuntimeTypeHandle_GetValueInternal; } } else if (strcmp(className, "Type") == 0) { if (strcmp(methodName, "get_IsValueType") == 0) { result = NI_System_Type_get_IsValueType; } else if (strcmp(methodName, "IsAssignableFrom") == 0) { result = NI_System_Type_IsAssignableFrom; } else if (strcmp(methodName, "IsAssignableTo") == 0) { result = NI_System_Type_IsAssignableTo; } else if (strcmp(methodName, "op_Equality") == 0) { result = NI_System_Type_op_Equality; } else if (strcmp(methodName, "op_Inequality") == 0) { result = NI_System_Type_op_Inequality; } else if (strcmp(methodName, "GetTypeFromHandle") == 0) { result = NI_System_Type_GetTypeFromHandle; } } else if (strcmp(className, "String") == 0) { if (strcmp(methodName, "Equals") == 0) { result = NI_System_String_Equals; } else if (strcmp(methodName, "get_Chars") == 0) { result = NI_System_String_get_Chars; } else if (strcmp(methodName, "get_Length") == 0) { result = NI_System_String_get_Length; } else if (strcmp(methodName, "op_Implicit") == 0) { result = NI_System_String_op_Implicit; } else if (strcmp(methodName, "StartsWith") == 0) { result = NI_System_String_StartsWith; } } else if (strcmp(className, "MemoryExtensions") == 0) { if (strcmp(methodName, "AsSpan") == 0) { result = NI_System_MemoryExtensions_AsSpan; } if (strcmp(methodName, "SequenceEqual") == 0) { result = NI_System_MemoryExtensions_SequenceEqual; } else if (strcmp(methodName, "Equals") == 0) { result = NI_System_MemoryExtensions_Equals; } else if (strcmp(methodName, "StartsWith") == 0) { result = NI_System_MemoryExtensions_StartsWith; } } else if (strcmp(className, "Span`1") == 0) { if (strcmp(methodName, "get_Item") == 0) { result = NI_System_Span_get_Item; } } else if (strcmp(className, "ReadOnlySpan`1") == 0) { if (strcmp(methodName, "get_Item") == 0) { result = NI_System_ReadOnlySpan_get_Item; } } else if (strcmp(className, "EETypePtr") == 0) { if (strcmp(methodName, "EETypePtrOf") == 0) { result = NI_System_EETypePtr_EETypePtrOf; } } } else if (strcmp(namespaceName, "System.Threading") == 0) { if (strcmp(className, "Thread") == 0) { if (strcmp(methodName, "get_CurrentThread") == 0) { result = NI_System_Threading_Thread_get_CurrentThread; } else if (strcmp(methodName, "get_ManagedThreadId") == 0) { result = NI_System_Threading_Thread_get_ManagedThreadId; } } else if (strcmp(className, "Interlocked") == 0) { #ifndef TARGET_ARM64 // TODO-CQ: Implement for XArch (https://github.com/dotnet/runtime/issues/32239). if (strcmp(methodName, "And") == 0) { result = NI_System_Threading_Interlocked_And; } else if (strcmp(methodName, "Or") == 0) { result = NI_System_Threading_Interlocked_Or; } #endif if (strcmp(methodName, "CompareExchange") == 0) { result = NI_System_Threading_Interlocked_CompareExchange; } else if (strcmp(methodName, "Exchange") == 0) { result = NI_System_Threading_Interlocked_Exchange; } else if (strcmp(methodName, "ExchangeAdd") == 0) { result = NI_System_Threading_Interlocked_ExchangeAdd; } else if (strcmp(methodName, "MemoryBarrier") == 0) { result = NI_System_Threading_Interlocked_MemoryBarrier; } else if (strcmp(methodName, "ReadMemoryBarrier") == 0) { result = NI_System_Threading_Interlocked_ReadMemoryBarrier; } } } #if defined(TARGET_XARCH) || defined(TARGET_ARM64) else if (strcmp(namespaceName, "System.Buffers.Binary") == 0) { if ((strcmp(className, "BinaryPrimitives") == 0) && (strcmp(methodName, "ReverseEndianness") == 0)) { result = NI_System_Buffers_Binary_BinaryPrimitives_ReverseEndianness; } } #endif // defined(TARGET_XARCH) || defined(TARGET_ARM64) else if (strcmp(namespaceName, "System.Collections.Generic") == 0) { if ((strcmp(className, "EqualityComparer`1") == 0) && (strcmp(methodName, "get_Default") == 0)) { result = NI_System_Collections_Generic_EqualityComparer_get_Default; } else if ((strcmp(className, "Comparer`1") == 0) && (strcmp(methodName, "get_Default") == 0)) { result = NI_System_Collections_Generic_Comparer_get_Default; } } else if ((strcmp(namespaceName, "System.Numerics") == 0) && (strcmp(className, "BitOperations") == 0)) { if (strcmp(methodName, "PopCount") == 0) { result = NI_System_Numerics_BitOperations_PopCount; } } #ifdef FEATURE_HW_INTRINSICS else if (strcmp(namespaceName, "System.Numerics") == 0) { CORINFO_SIG_INFO sig; info.compCompHnd->getMethodSig(method, &sig); int sizeOfVectorT = getSIMDVectorRegisterByteLength(); result = SimdAsHWIntrinsicInfo::lookupId(&sig, className, methodName, enclosingClassName, sizeOfVectorT); } #endif // FEATURE_HW_INTRINSICS else if ((strcmp(namespaceName, "System.Runtime.CompilerServices") == 0) && (strcmp(className, "RuntimeHelpers") == 0)) { if (strcmp(methodName, "CreateSpan") == 0) { result = NI_System_Runtime_CompilerServices_RuntimeHelpers_CreateSpan; } else if (strcmp(methodName, "InitializeArray") == 0) { result = NI_System_Runtime_CompilerServices_RuntimeHelpers_InitializeArray; } else if (strcmp(methodName, "IsKnownConstant") == 0) { result = NI_System_Runtime_CompilerServices_RuntimeHelpers_IsKnownConstant; } } else if (strncmp(namespaceName, "System.Runtime.Intrinsics", 25) == 0) { // We go down this path even when FEATURE_HW_INTRINSICS isn't enabled // so we can specially handle IsSupported and recursive calls. // This is required to appropriately handle the intrinsics on platforms // which don't support them. On such a platform methods like Vector64.Create // will be seen as `Intrinsic` and `mustExpand` due to having a code path // which is recursive. When such a path is hit we expect it to be handled by // the importer and we fire an assert if it wasn't and in previous versions // of the JIT would fail fast. This was changed to throw a PNSE instead but // we still assert as most intrinsics should have been recognized/handled. // In order to avoid the assert, we specially handle the IsSupported checks // (to better allow dead-code optimizations) and we explicitly throw a PNSE // as we know that is the desired behavior for the HWIntrinsics when not // supported. For cases like Vector64.Create, this is fine because it will // be behind a relevant IsSupported check and will never be hit and the // software fallback will be executed instead. CLANG_FORMAT_COMMENT_ANCHOR; #ifdef FEATURE_HW_INTRINSICS namespaceName += 25; const char* platformNamespaceName; #if defined(TARGET_XARCH) platformNamespaceName = ".X86"; #elif defined(TARGET_ARM64) platformNamespaceName = ".Arm"; #else #error Unsupported platform #endif if ((namespaceName[0] == '\0') || (strcmp(namespaceName, platformNamespaceName) == 0)) { CORINFO_SIG_INFO sig; info.compCompHnd->getMethodSig(method, &sig); result = HWIntrinsicInfo::lookupId(this, &sig, className, methodName, enclosingClassName); } #endif // FEATURE_HW_INTRINSICS if (result == NI_Illegal) { if ((strcmp(methodName, "get_IsSupported") == 0) || (strcmp(methodName, "get_IsHardwareAccelerated") == 0)) { // This allows the relevant code paths to be dropped as dead code even // on platforms where FEATURE_HW_INTRINSICS is not supported. result = NI_IsSupported_False; } else if (gtIsRecursiveCall(method)) { // For the framework itself, any recursive intrinsics will either be // only supported on a single platform or will be guarded by a relevant // IsSupported check so the throw PNSE will be valid or dropped. result = NI_Throw_PlatformNotSupportedException; } } } else if (strcmp(namespaceName, "System.StubHelpers") == 0) { if (strcmp(className, "StubHelpers") == 0) { if (strcmp(methodName, "GetStubContext") == 0) { result = NI_System_StubHelpers_GetStubContext; } else if (strcmp(methodName, "NextCallReturnAddress") == 0) { result = NI_System_StubHelpers_NextCallReturnAddress; } } } if (result == NI_Illegal) { JITDUMP("Not recognized\n"); } else if (result == NI_IsSupported_False) { JITDUMP("Unsupported - return false"); } else if (result == NI_Throw_PlatformNotSupportedException) { JITDUMP("Unsupported - throw PlatformNotSupportedException"); } else { JITDUMP("Recognized\n"); } return result; } //------------------------------------------------------------------------ // impUnsupportedNamedIntrinsic: Throws an exception for an unsupported named intrinsic // // Arguments: // helper - JIT helper ID for the exception to be thrown // method - method handle of the intrinsic function. // sig - signature of the intrinsic call // mustExpand - true if the intrinsic must return a GenTree*; otherwise, false // // Return Value: // a gtNewMustThrowException if mustExpand is true; otherwise, nullptr // GenTree* Compiler::impUnsupportedNamedIntrinsic(unsigned helper, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, bool mustExpand) { // We've hit some error case and may need to return a node for the given error. // // When `mustExpand=false`, we are attempting to inline the intrinsic directly into another method. In this // scenario, we need to return `nullptr` so that a GT_CALL to the intrinsic is emitted instead. This is to // ensure that everything continues to behave correctly when optimizations are enabled (e.g. things like the // inliner may expect the node we return to have a certain signature, and the `MustThrowException` node won't // match that). // // When `mustExpand=true`, we are in a GT_CALL to the intrinsic and are attempting to JIT it. This will generally // be in response to an indirect call (e.g. done via reflection) or in response to an earlier attempt returning // `nullptr` (under `mustExpand=false`). In that scenario, we are safe to return the `MustThrowException` node. if (mustExpand) { for (unsigned i = 0; i < sig->numArgs; i++) { impPopStack(); } return gtNewMustThrowException(helper, JITtype2varType(sig->retType), sig->retTypeClass); } else { return nullptr; } } /*****************************************************************************/ GenTree* Compiler::impArrayAccessIntrinsic( CORINFO_CLASS_HANDLE clsHnd, CORINFO_SIG_INFO* sig, int memberRef, bool readonlyCall, NamedIntrinsic intrinsicName) { /* If we are generating SMALL_CODE, we don't want to use intrinsics for the following, as it generates fatter code. */ if (compCodeOpt() == SMALL_CODE) { return nullptr; } /* These intrinsics generate fatter (but faster) code and are only done if we don't need SMALL_CODE */ unsigned rank = (intrinsicName == NI_Array_Set) ? (sig->numArgs - 1) : sig->numArgs; // The rank 1 case is special because it has to handle two array formats // we will simply not do that case if (rank > GT_ARR_MAX_RANK || rank <= 1) { return nullptr; } CORINFO_CLASS_HANDLE arrElemClsHnd = nullptr; var_types elemType = JITtype2varType(info.compCompHnd->getChildType(clsHnd, &arrElemClsHnd)); // For the ref case, we will only be able to inline if the types match // (verifier checks for this, we don't care for the nonverified case and the // type is final (so we don't need to do the cast) if ((intrinsicName != NI_Array_Get) && !readonlyCall && varTypeIsGC(elemType)) { // Get the call site signature CORINFO_SIG_INFO LocalSig; eeGetCallSiteSig(memberRef, info.compScopeHnd, impTokenLookupContextHandle, &LocalSig); assert(LocalSig.hasThis()); CORINFO_CLASS_HANDLE actualElemClsHnd; if (intrinsicName == NI_Array_Set) { // Fetch the last argument, the one that indicates the type we are setting. CORINFO_ARG_LIST_HANDLE argType = LocalSig.args; for (unsigned r = 0; r < rank; r++) { argType = info.compCompHnd->getArgNext(argType); } typeInfo argInfo = verParseArgSigToTypeInfo(&LocalSig, argType); actualElemClsHnd = argInfo.GetClassHandle(); } else { assert(intrinsicName == NI_Array_Address); // Fetch the return type typeInfo retInfo = verMakeTypeInfo(LocalSig.retType, LocalSig.retTypeClass); assert(retInfo.IsByRef()); actualElemClsHnd = retInfo.GetClassHandle(); } // if it's not final, we can't do the optimization if (!(info.compCompHnd->getClassAttribs(actualElemClsHnd) & CORINFO_FLG_FINAL)) { return nullptr; } } unsigned arrayElemSize; if (elemType == TYP_STRUCT) { assert(arrElemClsHnd); arrayElemSize = info.compCompHnd->getClassSize(arrElemClsHnd); } else { arrayElemSize = genTypeSize(elemType); } if ((unsigned char)arrayElemSize != arrayElemSize) { // arrayElemSize would be truncated as an unsigned char. // This means the array element is too large. Don't do the optimization. return nullptr; } GenTree* val = nullptr; if (intrinsicName == NI_Array_Set) { // Assignment of a struct is more work, and there are more gets than sets. if (elemType == TYP_STRUCT) { return nullptr; } val = impPopStack().val; assert(genActualType(elemType) == genActualType(val->gtType) || (elemType == TYP_FLOAT && val->gtType == TYP_DOUBLE) || (elemType == TYP_INT && val->gtType == TYP_BYREF) || (elemType == TYP_DOUBLE && val->gtType == TYP_FLOAT)); } noway_assert((unsigned char)GT_ARR_MAX_RANK == GT_ARR_MAX_RANK); GenTree* inds[GT_ARR_MAX_RANK]; for (unsigned k = rank; k > 0; k--) { inds[k - 1] = impPopStack().val; } GenTree* arr = impPopStack().val; assert(arr->gtType == TYP_REF); GenTree* arrElem = new (this, GT_ARR_ELEM) GenTreeArrElem(TYP_BYREF, arr, static_cast<unsigned char>(rank), static_cast<unsigned char>(arrayElemSize), elemType, &inds[0]); if (intrinsicName != NI_Array_Address) { if (varTypeIsStruct(elemType)) { arrElem = gtNewObjNode(sig->retTypeClass, arrElem); } else { arrElem = gtNewOperNode(GT_IND, elemType, arrElem); } } if (intrinsicName == NI_Array_Set) { assert(val != nullptr); return gtNewAssignNode(arrElem, val); } else { return arrElem; } } //------------------------------------------------------------------------ // impKeepAliveIntrinsic: Import the GC.KeepAlive intrinsic call // // Imports the intrinsic as a GT_KEEPALIVE node, and, as an optimization, // if the object to keep alive is a GT_BOX, removes its side effects and // uses the address of a local (copied from the box's source if needed) // as the operand for GT_KEEPALIVE. For the BOX optimization, if the class // of the box has no GC fields, a GT_NOP is returned. // // Arguments: // objToKeepAlive - the intrinisic call's argument // // Return Value: // The imported GT_KEEPALIVE or GT_NOP - see description. // GenTree* Compiler::impKeepAliveIntrinsic(GenTree* objToKeepAlive) { assert(objToKeepAlive->TypeIs(TYP_REF)); if (opts.OptimizationEnabled() && objToKeepAlive->IsBoxedValue()) { CORINFO_CLASS_HANDLE boxedClass = lvaGetDesc(objToKeepAlive->AsBox()->BoxOp()->AsLclVar())->lvClassHnd; ClassLayout* layout = typGetObjLayout(boxedClass); if (!layout->HasGCPtr()) { gtTryRemoveBoxUpstreamEffects(objToKeepAlive, BR_REMOVE_AND_NARROW); JITDUMP("\nBOX class has no GC fields, KEEPALIVE is a NOP"); return gtNewNothingNode(); } GenTree* boxSrc = gtTryRemoveBoxUpstreamEffects(objToKeepAlive, BR_REMOVE_BUT_NOT_NARROW); if (boxSrc != nullptr) { unsigned boxTempNum; if (boxSrc->OperIs(GT_LCL_VAR)) { boxTempNum = boxSrc->AsLclVarCommon()->GetLclNum(); } else { boxTempNum = lvaGrabTemp(true DEBUGARG("Temp for the box source")); GenTree* boxTempAsg = gtNewTempAssign(boxTempNum, boxSrc); Statement* boxAsgStmt = objToKeepAlive->AsBox()->gtCopyStmtWhenInlinedBoxValue; boxAsgStmt->SetRootNode(boxTempAsg); } JITDUMP("\nImporting KEEPALIVE(BOX) as KEEPALIVE(ADDR(LCL_VAR V%02u))", boxTempNum); GenTree* boxTemp = gtNewLclvNode(boxTempNum, boxSrc->TypeGet()); GenTree* boxTempAddr = gtNewOperNode(GT_ADDR, TYP_BYREF, boxTemp); return gtNewKeepAliveNode(boxTempAddr); } } return gtNewKeepAliveNode(objToKeepAlive); } bool Compiler::verMergeEntryStates(BasicBlock* block, bool* changed) { unsigned i; // do some basic checks first if (block->bbStackDepthOnEntry() != verCurrentState.esStackDepth) { return false; } if (verCurrentState.esStackDepth > 0) { // merge stack types StackEntry* parentStack = block->bbStackOnEntry(); StackEntry* childStack = verCurrentState.esStack; for (i = 0; i < verCurrentState.esStackDepth; i++, parentStack++, childStack++) { if (tiMergeToCommonParent(&parentStack->seTypeInfo, &childStack->seTypeInfo, changed) == false) { return false; } } } // merge initialization status of this ptr if (verTrackObjCtorInitState) { // If we're tracking the CtorInitState, then it must not be unknown in the current state. assert(verCurrentState.thisInitialized != TIS_Bottom); // If the successor block's thisInit state is unknown, copy it from the current state. if (block->bbThisOnEntry() == TIS_Bottom) { *changed = true; verSetThisInit(block, verCurrentState.thisInitialized); } else if (verCurrentState.thisInitialized != block->bbThisOnEntry()) { if (block->bbThisOnEntry() != TIS_Top) { *changed = true; verSetThisInit(block, TIS_Top); if (block->bbFlags & BBF_FAILED_VERIFICATION) { // The block is bad. Control can flow through the block to any handler that catches the // verification exception, but the importer ignores bad blocks and therefore won't model // this flow in the normal way. To complete the merge into the bad block, the new state // needs to be manually pushed to the handlers that may be reached after the verification // exception occurs. // // Usually, the new state was already propagated to the relevant handlers while processing // the predecessors of the bad block. The exception is when the bad block is at the start // of a try region, meaning it is protected by additional handlers that do not protect its // predecessors. // if (block->hasTryIndex() && ((block->bbFlags & BBF_TRY_BEG) != 0)) { // Push TIS_Top to the handlers that protect the bad block. Note that this can cause // recursive calls back into this code path (if successors of the current bad block are // also bad blocks). // ThisInitState origTIS = verCurrentState.thisInitialized; verCurrentState.thisInitialized = TIS_Top; impVerifyEHBlock(block, true); verCurrentState.thisInitialized = origTIS; } } } } } else { assert(verCurrentState.thisInitialized == TIS_Bottom && block->bbThisOnEntry() == TIS_Bottom); } return true; } /***************************************************************************** * 'logMsg' is true if a log message needs to be logged. false if the caller has * already logged it (presumably in a more detailed fashion than done here) */ void Compiler::verConvertBBToThrowVerificationException(BasicBlock* block DEBUGARG(bool logMsg)) { block->bbJumpKind = BBJ_THROW; block->bbFlags |= BBF_FAILED_VERIFICATION; block->bbFlags &= ~BBF_IMPORTED; impCurStmtOffsSet(block->bbCodeOffs); // Clear the statement list as it exists so far; we're only going to have a verification exception. impStmtList = impLastStmt = nullptr; #ifdef DEBUG if (logMsg) { JITLOG((LL_ERROR, "Verification failure: while compiling %s near IL offset %x..%xh \n", info.compFullName, block->bbCodeOffs, block->bbCodeOffsEnd)); if (verbose) { printf("\n\nVerification failure: %s near IL %xh \n", info.compFullName, block->bbCodeOffs); } } if (JitConfig.DebugBreakOnVerificationFailure()) { DebugBreak(); } #endif impBeginTreeList(); // if the stack is non-empty evaluate all the side-effects if (verCurrentState.esStackDepth > 0) { impEvalSideEffects(); } assert(verCurrentState.esStackDepth == 0); GenTree* op1 = gtNewHelperCallNode(CORINFO_HELP_VERIFICATION, TYP_VOID, gtNewCallArgs(gtNewIconNode(block->bbCodeOffs))); // verCurrentState.esStackDepth = 0; impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); // The inliner is not able to handle methods that require throw block, so // make sure this methods never gets inlined. info.compCompHnd->setMethodAttribs(info.compMethodHnd, CORINFO_FLG_BAD_INLINEE); } /***************************************************************************** * */ void Compiler::verHandleVerificationFailure(BasicBlock* block DEBUGARG(bool logMsg)) { verResetCurrentState(block, &verCurrentState); verConvertBBToThrowVerificationException(block DEBUGARG(logMsg)); #ifdef DEBUG impNoteLastILoffs(); // Remember at which BC offset the tree was finished #endif // DEBUG } /******************************************************************************/ typeInfo Compiler::verMakeTypeInfo(CorInfoType ciType, CORINFO_CLASS_HANDLE clsHnd) { assert(ciType < CORINFO_TYPE_COUNT); typeInfo tiResult; switch (ciType) { case CORINFO_TYPE_STRING: case CORINFO_TYPE_CLASS: tiResult = verMakeTypeInfo(clsHnd); if (!tiResult.IsType(TI_REF)) { // type must be consistent with element type return typeInfo(); } break; #ifdef TARGET_64BIT case CORINFO_TYPE_NATIVEINT: case CORINFO_TYPE_NATIVEUINT: if (clsHnd) { // If we have more precise information, use it return verMakeTypeInfo(clsHnd); } else { return typeInfo::nativeInt(); } break; #endif // TARGET_64BIT case CORINFO_TYPE_VALUECLASS: case CORINFO_TYPE_REFANY: tiResult = verMakeTypeInfo(clsHnd); // type must be constant with element type; if (!tiResult.IsValueClass()) { return typeInfo(); } break; case CORINFO_TYPE_VAR: return verMakeTypeInfo(clsHnd); case CORINFO_TYPE_PTR: // for now, pointers are treated as an error case CORINFO_TYPE_VOID: return typeInfo(); break; case CORINFO_TYPE_BYREF: { CORINFO_CLASS_HANDLE childClassHandle; CorInfoType childType = info.compCompHnd->getChildType(clsHnd, &childClassHandle); return ByRef(verMakeTypeInfo(childType, childClassHandle)); } break; default: if (clsHnd) { // If we have more precise information, use it return typeInfo(TI_STRUCT, clsHnd); } else { return typeInfo(JITtype2tiType(ciType)); } } return tiResult; } /******************************************************************************/ typeInfo Compiler::verMakeTypeInfo(CORINFO_CLASS_HANDLE clsHnd, bool bashStructToRef /* = false */) { if (clsHnd == nullptr) { return typeInfo(); } // Byrefs should only occur in method and local signatures, which are accessed // using ICorClassInfo and ICorClassInfo.getChildType. // So findClass() and getClassAttribs() should not be called for byrefs if (JITtype2varType(info.compCompHnd->asCorInfoType(clsHnd)) == TYP_BYREF) { assert(!"Did findClass() return a Byref?"); return typeInfo(); } unsigned attribs = info.compCompHnd->getClassAttribs(clsHnd); if (attribs & CORINFO_FLG_VALUECLASS) { CorInfoType t = info.compCompHnd->getTypeForPrimitiveValueClass(clsHnd); // Meta-data validation should ensure that CORINF_TYPE_BYREF should // not occur here, so we may want to change this to an assert instead. if (t == CORINFO_TYPE_VOID || t == CORINFO_TYPE_BYREF || t == CORINFO_TYPE_PTR) { return typeInfo(); } #ifdef TARGET_64BIT if (t == CORINFO_TYPE_NATIVEINT || t == CORINFO_TYPE_NATIVEUINT) { return typeInfo::nativeInt(); } #endif // TARGET_64BIT if (t != CORINFO_TYPE_UNDEF) { return (typeInfo(JITtype2tiType(t))); } else if (bashStructToRef) { return (typeInfo(TI_REF, clsHnd)); } else { return (typeInfo(TI_STRUCT, clsHnd)); } } else if (attribs & CORINFO_FLG_GENERIC_TYPE_VARIABLE) { // See comment in _typeInfo.h for why we do it this way. return (typeInfo(TI_REF, clsHnd, true)); } else { return (typeInfo(TI_REF, clsHnd)); } } /******************************************************************************/ bool Compiler::verIsSDArray(const typeInfo& ti) { if (ti.IsNullObjRef()) { // nulls are SD arrays return true; } if (!ti.IsType(TI_REF)) { return false; } if (!info.compCompHnd->isSDArray(ti.GetClassHandleForObjRef())) { return false; } return true; } /******************************************************************************/ /* Given 'arrayObjectType' which is an array type, fetch the element type. */ /* Returns an error type if anything goes wrong */ typeInfo Compiler::verGetArrayElemType(const typeInfo& arrayObjectType) { assert(!arrayObjectType.IsNullObjRef()); // you need to check for null explicitly since that is a success case if (!verIsSDArray(arrayObjectType)) { return typeInfo(); } CORINFO_CLASS_HANDLE childClassHandle = nullptr; CorInfoType ciType = info.compCompHnd->getChildType(arrayObjectType.GetClassHandleForObjRef(), &childClassHandle); return verMakeTypeInfo(ciType, childClassHandle); } /***************************************************************************** */ typeInfo Compiler::verParseArgSigToTypeInfo(CORINFO_SIG_INFO* sig, CORINFO_ARG_LIST_HANDLE args) { CORINFO_CLASS_HANDLE classHandle; CorInfoType ciType = strip(info.compCompHnd->getArgType(sig, args, &classHandle)); var_types type = JITtype2varType(ciType); if (varTypeIsGC(type)) { // For efficiency, getArgType only returns something in classHandle for // value types. For other types that have addition type info, you // have to call back explicitly classHandle = info.compCompHnd->getArgClass(sig, args); if (!classHandle) { NO_WAY("Could not figure out Class specified in argument or local signature"); } } return verMakeTypeInfo(ciType, classHandle); } bool Compiler::verIsByRefLike(const typeInfo& ti) { if (ti.IsByRef()) { return true; } if (!ti.IsType(TI_STRUCT)) { return false; } return info.compCompHnd->getClassAttribs(ti.GetClassHandleForValueClass()) & CORINFO_FLG_BYREF_LIKE; } bool Compiler::verIsSafeToReturnByRef(const typeInfo& ti) { if (ti.IsPermanentHomeByRef()) { return true; } else { return false; } } bool Compiler::verIsBoxable(const typeInfo& ti) { return (ti.IsPrimitiveType() || ti.IsObjRef() // includes boxed generic type variables || ti.IsUnboxedGenericTypeVar() || (ti.IsType(TI_STRUCT) && // exclude byreflike structs !(info.compCompHnd->getClassAttribs(ti.GetClassHandleForValueClass()) & CORINFO_FLG_BYREF_LIKE))); } // Is it a boxed value type? bool Compiler::verIsBoxedValueType(const typeInfo& ti) { if (ti.GetType() == TI_REF) { CORINFO_CLASS_HANDLE clsHnd = ti.GetClassHandleForObjRef(); return !!eeIsValueClass(clsHnd); } else { return false; } } /***************************************************************************** * * Check if a TailCall is legal. */ bool Compiler::verCheckTailCallConstraint( OPCODE opcode, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, // Is this a "constrained." call on a type parameter? bool speculative // If true, won't throw if verificatoin fails. Instead it will // return false to the caller. // If false, it will throw. ) { DWORD mflags; CORINFO_SIG_INFO sig; unsigned int popCount = 0; // we can't pop the stack since impImportCall needs it, so // this counter is used to keep track of how many items have been // virtually popped CORINFO_METHOD_HANDLE methodHnd = nullptr; CORINFO_CLASS_HANDLE methodClassHnd = nullptr; unsigned methodClassFlgs = 0; assert(impOpcodeIsCallOpcode(opcode)); if (compIsForInlining()) { return false; } // for calli, VerifyOrReturn that this is not a virtual method if (opcode == CEE_CALLI) { /* Get the call sig */ eeGetSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, &sig); // We don't know the target method, so we have to infer the flags, or // assume the worst-case. mflags = (sig.callConv & CORINFO_CALLCONV_HASTHIS) ? 0 : CORINFO_FLG_STATIC; } else { methodHnd = pResolvedToken->hMethod; mflags = info.compCompHnd->getMethodAttribs(methodHnd); // When verifying generic code we pair the method handle with its // owning class to get the exact method signature. methodClassHnd = pResolvedToken->hClass; assert(methodClassHnd); eeGetMethodSig(methodHnd, &sig, methodClassHnd); // opcode specific check methodClassFlgs = info.compCompHnd->getClassAttribs(methodClassHnd); } // We must have got the methodClassHnd if opcode is not CEE_CALLI assert((methodHnd != nullptr && methodClassHnd != nullptr) || opcode == CEE_CALLI); if ((sig.callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG) { eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, &sig); } // check compatibility of the arguments unsigned int argCount; argCount = sig.numArgs; CORINFO_ARG_LIST_HANDLE args; args = sig.args; while (argCount--) { typeInfo tiDeclared = verParseArgSigToTypeInfo(&sig, args).NormaliseForStack(); // check that the argument is not a byref for tailcalls VerifyOrReturnSpeculative(!verIsByRefLike(tiDeclared), "tailcall on byrefs", speculative); // For unsafe code, we might have parameters containing pointer to the stack location. // Disallow the tailcall for this kind. CORINFO_CLASS_HANDLE classHandle; CorInfoType ciType = strip(info.compCompHnd->getArgType(&sig, args, &classHandle)); VerifyOrReturnSpeculative(ciType != CORINFO_TYPE_PTR, "tailcall on CORINFO_TYPE_PTR", speculative); args = info.compCompHnd->getArgNext(args); } // update popCount popCount += sig.numArgs; // check for 'this' which is on non-static methods, not called via NEWOBJ if (!(mflags & CORINFO_FLG_STATIC)) { // Always update the popCount. // This is crucial for the stack calculation to be correct. typeInfo tiThis = impStackTop(popCount).seTypeInfo; popCount++; if (opcode == CEE_CALLI) { // For CALLI, we don't know the methodClassHnd. Therefore, let's check the "this" object // on the stack. if (tiThis.IsValueClass()) { tiThis.MakeByRef(); } VerifyOrReturnSpeculative(!verIsByRefLike(tiThis), "byref in tailcall", speculative); } else { // Check type compatibility of the this argument typeInfo tiDeclaredThis = verMakeTypeInfo(methodClassHnd); if (tiDeclaredThis.IsValueClass()) { tiDeclaredThis.MakeByRef(); } VerifyOrReturnSpeculative(!verIsByRefLike(tiDeclaredThis), "byref in tailcall", speculative); } } // Tail calls on constrained calls should be illegal too: // when instantiated at a value type, a constrained call may pass the address of a stack allocated value VerifyOrReturnSpeculative(!pConstrainedResolvedToken, "byref in constrained tailcall", speculative); // Get the exact view of the signature for an array method if (sig.retType != CORINFO_TYPE_VOID) { if (methodClassFlgs & CORINFO_FLG_ARRAY) { assert(opcode != CEE_CALLI); eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, &sig); } } typeInfo tiCalleeRetType = verMakeTypeInfo(sig.retType, sig.retTypeClass); typeInfo tiCallerRetType = verMakeTypeInfo(info.compMethodInfo->args.retType, info.compMethodInfo->args.retTypeClass); // void return type gets morphed into the error type, so we have to treat them specially here if (sig.retType == CORINFO_TYPE_VOID) { VerifyOrReturnSpeculative(info.compMethodInfo->args.retType == CORINFO_TYPE_VOID, "tailcall return mismatch", speculative); } else { VerifyOrReturnSpeculative(tiCompatibleWith(NormaliseForStack(tiCalleeRetType), NormaliseForStack(tiCallerRetType), true), "tailcall return mismatch", speculative); } // for tailcall, stack must be empty VerifyOrReturnSpeculative(verCurrentState.esStackDepth == popCount, "stack non-empty on tailcall", speculative); return true; // Yes, tailcall is legal } /***************************************************************************** * * Checks the IL verification rules for the call */ void Compiler::verVerifyCall(OPCODE opcode, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, bool tailCall, bool readonlyCall, const BYTE* delegateCreateStart, const BYTE* codeAddr, CORINFO_CALL_INFO* callInfo DEBUGARG(const char* methodName)) { DWORD mflags; CORINFO_SIG_INFO* sig = nullptr; unsigned int popCount = 0; // we can't pop the stack since impImportCall needs it, so // this counter is used to keep track of how many items have been // virtually popped // for calli, VerifyOrReturn that this is not a virtual method if (opcode == CEE_CALLI) { Verify(false, "Calli not verifiable"); return; } //<NICE> It would be nice to cache the rest of it, but eeFindMethod is the big ticket item. mflags = callInfo->verMethodFlags; sig = &callInfo->verSig; if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG) { eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, sig); } // opcode specific check unsigned methodClassFlgs = callInfo->classFlags; switch (opcode) { case CEE_CALLVIRT: // cannot do callvirt on valuetypes VerifyOrReturn(!(methodClassFlgs & CORINFO_FLG_VALUECLASS), "callVirt on value class"); VerifyOrReturn(sig->hasThis(), "CallVirt on static method"); break; case CEE_NEWOBJ: { assert(!tailCall); // Importer should not allow this VerifyOrReturn((mflags & CORINFO_FLG_CONSTRUCTOR) && !(mflags & CORINFO_FLG_STATIC), "newobj must be on instance"); if (methodClassFlgs & CORINFO_FLG_DELEGATE) { VerifyOrReturn(sig->numArgs == 2, "wrong number args to delegate ctor"); typeInfo tiDeclaredObj = verParseArgSigToTypeInfo(sig, sig->args).NormaliseForStack(); typeInfo tiDeclaredFtn = verParseArgSigToTypeInfo(sig, info.compCompHnd->getArgNext(sig->args)).NormaliseForStack(); VerifyOrReturn(tiDeclaredFtn.IsNativeIntType(), "ftn arg needs to be a native int type"); assert(popCount == 0); typeInfo tiActualObj = impStackTop(1).seTypeInfo; typeInfo tiActualFtn = impStackTop(0).seTypeInfo; VerifyOrReturn(tiActualFtn.IsMethod(), "delegate needs method as first arg"); VerifyOrReturn(tiCompatibleWith(tiActualObj, tiDeclaredObj, true), "delegate object type mismatch"); VerifyOrReturn(tiActualObj.IsNullObjRef() || tiActualObj.IsType(TI_REF), "delegate object type mismatch"); CORINFO_CLASS_HANDLE objTypeHandle = tiActualObj.IsNullObjRef() ? nullptr : tiActualObj.GetClassHandleForObjRef(); // the method signature must be compatible with the delegate's invoke method // check that for virtual functions, the type of the object used to get the // ftn ptr is the same as the type of the object passed to the delegate ctor. // since this is a bit of work to determine in general, we pattern match stylized // code sequences // the delegate creation code check, which used to be done later, is now done here // so we can read delegateMethodRef directly from // from the preceding LDFTN or CEE_LDVIRTFN instruction sequence; // we then use it in our call to isCompatibleDelegate(). mdMemberRef delegateMethodRef = mdMemberRefNil; VerifyOrReturn(verCheckDelegateCreation(delegateCreateStart, codeAddr, delegateMethodRef), "must create delegates with certain IL"); CORINFO_RESOLVED_TOKEN delegateResolvedToken; delegateResolvedToken.tokenContext = impTokenLookupContextHandle; delegateResolvedToken.tokenScope = info.compScopeHnd; delegateResolvedToken.token = delegateMethodRef; delegateResolvedToken.tokenType = CORINFO_TOKENKIND_Method; info.compCompHnd->resolveToken(&delegateResolvedToken); CORINFO_CALL_INFO delegateCallInfo; eeGetCallInfo(&delegateResolvedToken, nullptr /* constraint typeRef */, CORINFO_CALLINFO_SECURITYCHECKS, &delegateCallInfo); bool isOpenDelegate = false; VerifyOrReturn(info.compCompHnd->isCompatibleDelegate(objTypeHandle, delegateResolvedToken.hClass, tiActualFtn.GetMethod(), pResolvedToken->hClass, &isOpenDelegate), "function incompatible with delegate"); // check the constraints on the target method VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(delegateResolvedToken.hClass), "delegate target has unsatisfied class constraints"); VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(delegateResolvedToken.hClass, tiActualFtn.GetMethod()), "delegate target has unsatisfied method constraints"); // See ECMA spec section 1.8.1.5.2 (Delegating via instance dispatch) // for additional verification rules for delegates CORINFO_METHOD_HANDLE actualMethodHandle = tiActualFtn.GetMethod(); DWORD actualMethodAttribs = info.compCompHnd->getMethodAttribs(actualMethodHandle); if (impIsLDFTN_TOKEN(delegateCreateStart, codeAddr)) { if ((actualMethodAttribs & CORINFO_FLG_VIRTUAL) && ((actualMethodAttribs & CORINFO_FLG_FINAL) == 0)) { VerifyOrReturn((tiActualObj.IsThisPtr() && lvaIsOriginalThisReadOnly()) || verIsBoxedValueType(tiActualObj), "The 'this' parameter to the call must be either the calling method's " "'this' parameter or " "a boxed value type."); } } if (actualMethodAttribs & CORINFO_FLG_PROTECTED) { bool targetIsStatic = actualMethodAttribs & CORINFO_FLG_STATIC; Verify(targetIsStatic || !isOpenDelegate, "Unverifiable creation of an open instance delegate for a protected member."); CORINFO_CLASS_HANDLE instanceClassHnd = (tiActualObj.IsNullObjRef() || targetIsStatic) ? info.compClassHnd : tiActualObj.GetClassHandleForObjRef(); // In the case of protected methods, it is a requirement that the 'this' // pointer be a subclass of the current context. Perform this check. Verify(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClassHnd), "Accessing protected method through wrong type."); } goto DONE_ARGS; } } // fall thru to default checks FALLTHROUGH; default: VerifyOrReturn(!(mflags & CORINFO_FLG_ABSTRACT), "method abstract"); } VerifyOrReturn(!((mflags & CORINFO_FLG_CONSTRUCTOR) && (methodClassFlgs & CORINFO_FLG_DELEGATE)), "can only newobj a delegate constructor"); // check compatibility of the arguments unsigned int argCount; argCount = sig->numArgs; CORINFO_ARG_LIST_HANDLE args; args = sig->args; while (argCount--) { typeInfo tiActual = impStackTop(popCount + argCount).seTypeInfo; typeInfo tiDeclared = verParseArgSigToTypeInfo(sig, args).NormaliseForStack(); VerifyOrReturn(tiCompatibleWith(tiActual, tiDeclared, true), "type mismatch"); args = info.compCompHnd->getArgNext(args); } DONE_ARGS: // update popCount popCount += sig->numArgs; // check for 'this' which are is non-static methods, not called via NEWOBJ CORINFO_CLASS_HANDLE instanceClassHnd = info.compClassHnd; if (!(mflags & CORINFO_FLG_STATIC) && (opcode != CEE_NEWOBJ)) { typeInfo tiThis = impStackTop(popCount).seTypeInfo; popCount++; // If it is null, we assume we can access it (since it will AV shortly) // If it is anything but a reference class, there is no hierarchy, so // again, we don't need the precise instance class to compute 'protected' access if (tiThis.IsType(TI_REF)) { instanceClassHnd = tiThis.GetClassHandleForObjRef(); } // Check type compatibility of the this argument typeInfo tiDeclaredThis = verMakeTypeInfo(pResolvedToken->hClass); if (tiDeclaredThis.IsValueClass()) { tiDeclaredThis.MakeByRef(); } // If this is a call to the base class .ctor, set thisPtr Init for // this block. if (mflags & CORINFO_FLG_CONSTRUCTOR) { if (verTrackObjCtorInitState && tiThis.IsThisPtr() && verIsCallToInitThisPtr(info.compClassHnd, pResolvedToken->hClass)) { assert(verCurrentState.thisInitialized != TIS_Bottom); // This should never be the case just from the logic of the verifier. VerifyOrReturn(verCurrentState.thisInitialized == TIS_Uninit, "Call to base class constructor when 'this' is possibly initialized"); // Otherwise, 'this' is now initialized. verCurrentState.thisInitialized = TIS_Init; tiThis.SetInitialisedObjRef(); } else { // We allow direct calls to value type constructors // NB: we have to check that the contents of tiThis is a value type, otherwise we could use a // constrained callvirt to illegally re-enter a .ctor on a value of reference type. VerifyOrReturn(tiThis.IsByRef() && DereferenceByRef(tiThis).IsValueClass(), "Bad call to a constructor"); } } if (pConstrainedResolvedToken != nullptr) { VerifyOrReturn(tiThis.IsByRef(), "non-byref this type in constrained call"); typeInfo tiConstraint = verMakeTypeInfo(pConstrainedResolvedToken->hClass); // We just dereference this and test for equality tiThis.DereferenceByRef(); VerifyOrReturn(typeInfo::AreEquivalent(tiThis, tiConstraint), "this type mismatch with constrained type operand"); // Now pretend the this type is the boxed constrained type, for the sake of subsequent checks tiThis = typeInfo(TI_REF, pConstrainedResolvedToken->hClass); } // To support direct calls on readonly byrefs, just pretend tiDeclaredThis is readonly too if (tiDeclaredThis.IsByRef() && tiThis.IsReadonlyByRef()) { tiDeclaredThis.SetIsReadonlyByRef(); } VerifyOrReturn(tiCompatibleWith(tiThis, tiDeclaredThis, true), "this type mismatch"); if (tiThis.IsByRef()) { // Find the actual type where the method exists (as opposed to what is declared // in the metadata). This is to prevent passing a byref as the "this" argument // while calling methods like System.ValueType.GetHashCode() which expect boxed objects. CORINFO_CLASS_HANDLE actualClassHnd = info.compCompHnd->getMethodClass(pResolvedToken->hMethod); VerifyOrReturn(eeIsValueClass(actualClassHnd), "Call to base type of valuetype (which is never a valuetype)"); } // Rules for non-virtual call to a non-final virtual method: // Define: // The "this" pointer is considered to be "possibly written" if // 1. Its address have been taken (LDARGA 0) anywhere in the method. // (or) // 2. It has been stored to (STARG.0) anywhere in the method. // A non-virtual call to a non-final virtual method is only allowed if // 1. The this pointer passed to the callee is an instance of a boxed value type. // (or) // 2. The this pointer passed to the callee is the current method's this pointer. // (and) The current method's this pointer is not "possibly written". // Thus the rule is that if you assign to this ANYWHERE you can't make "base" calls to // virtual methods. (Luckily this does affect .ctors, since they are not virtual). // This is stronger that is strictly needed, but implementing a laxer rule is significantly // hard and more error prone. if (opcode == CEE_CALL && (mflags & CORINFO_FLG_VIRTUAL) && ((mflags & CORINFO_FLG_FINAL) == 0)) { VerifyOrReturn((tiThis.IsThisPtr() && lvaIsOriginalThisReadOnly()) || verIsBoxedValueType(tiThis), "The 'this' parameter to the call must be either the calling method's 'this' parameter or " "a boxed value type."); } } // check any constraints on the callee's class and type parameters VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(pResolvedToken->hClass), "method has unsatisfied class constraints"); VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(pResolvedToken->hClass, pResolvedToken->hMethod), "method has unsatisfied method constraints"); if (mflags & CORINFO_FLG_PROTECTED) { VerifyOrReturn(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClassHnd), "Can't access protected method"); } // Get the exact view of the signature for an array method if (sig->retType != CORINFO_TYPE_VOID) { eeGetMethodSig(pResolvedToken->hMethod, sig, pResolvedToken->hClass); } // "readonly." prefixed calls only allowed for the Address operation on arrays. // The methods supported by array types are under the control of the EE // so we can trust that only the Address operation returns a byref. if (readonlyCall) { typeInfo tiCalleeRetType = verMakeTypeInfo(sig->retType, sig->retTypeClass); VerifyOrReturn((methodClassFlgs & CORINFO_FLG_ARRAY) && tiCalleeRetType.IsByRef(), "unexpected use of readonly prefix"); } // Verify the tailcall if (tailCall) { verCheckTailCallConstraint(opcode, pResolvedToken, pConstrainedResolvedToken, false); } } /***************************************************************************** * Checks that a delegate creation is done using the following pattern: * dup * ldvirtftn targetMemberRef * OR * ldftn targetMemberRef * * 'delegateCreateStart' points at the last dup or ldftn in this basic block (null if * not in this basic block) * * targetMemberRef is read from the code sequence. * targetMemberRef is validated iff verificationNeeded. */ bool Compiler::verCheckDelegateCreation(const BYTE* delegateCreateStart, const BYTE* codeAddr, mdMemberRef& targetMemberRef) { if (impIsLDFTN_TOKEN(delegateCreateStart, codeAddr)) { targetMemberRef = getU4LittleEndian(&delegateCreateStart[2]); return true; } else if (impIsDUP_LDVIRTFTN_TOKEN(delegateCreateStart, codeAddr)) { targetMemberRef = getU4LittleEndian(&delegateCreateStart[3]); return true; } return false; } typeInfo Compiler::verVerifySTIND(const typeInfo& tiTo, const typeInfo& value, const typeInfo& instrType) { Verify(!tiTo.IsReadonlyByRef(), "write to readonly byref"); typeInfo ptrVal = verVerifyLDIND(tiTo, instrType); typeInfo normPtrVal = typeInfo(ptrVal).NormaliseForStack(); if (!tiCompatibleWith(value, normPtrVal, true)) { Verify(tiCompatibleWith(value, normPtrVal, true), "type mismatch"); } return ptrVal; } typeInfo Compiler::verVerifyLDIND(const typeInfo& ptr, const typeInfo& instrType) { assert(!instrType.IsStruct()); typeInfo ptrVal; if (ptr.IsByRef()) { ptrVal = DereferenceByRef(ptr); if (instrType.IsObjRef() && !ptrVal.IsObjRef()) { Verify(false, "bad pointer"); } else if (!instrType.IsObjRef() && !typeInfo::AreEquivalent(instrType, ptrVal)) { Verify(false, "pointer not consistent with instr"); } } else { Verify(false, "pointer not byref"); } return ptrVal; } // Verify that the field is used properly. 'tiThis' is NULL for statics, // 'fieldFlags' is the fields attributes, and mutator is true if it is a // ld*flda or a st*fld. // 'enclosingClass' is given if we are accessing a field in some specific type. void Compiler::verVerifyField(CORINFO_RESOLVED_TOKEN* pResolvedToken, const CORINFO_FIELD_INFO& fieldInfo, const typeInfo* tiThis, bool mutator, bool allowPlainStructAsThis) { CORINFO_CLASS_HANDLE enclosingClass = pResolvedToken->hClass; unsigned fieldFlags = fieldInfo.fieldFlags; CORINFO_CLASS_HANDLE instanceClass = info.compClassHnd; // for statics, we imagine the instance is the current class. bool isStaticField = ((fieldFlags & CORINFO_FLG_FIELD_STATIC) != 0); if (mutator) { Verify(!(fieldFlags & CORINFO_FLG_FIELD_UNMANAGED), "mutating an RVA bases static"); if ((fieldFlags & CORINFO_FLG_FIELD_FINAL)) { Verify((info.compFlags & CORINFO_FLG_CONSTRUCTOR) && enclosingClass == info.compClassHnd && info.compIsStatic == isStaticField, "bad use of initonly field (set or address taken)"); } } if (tiThis == nullptr) { Verify(isStaticField, "used static opcode with non-static field"); } else { typeInfo tThis = *tiThis; if (allowPlainStructAsThis && tThis.IsValueClass()) { tThis.MakeByRef(); } // If it is null, we assume we can access it (since it will AV shortly) // If it is anything but a refernce class, there is no hierarchy, so // again, we don't need the precise instance class to compute 'protected' access if (tiThis->IsType(TI_REF)) { instanceClass = tiThis->GetClassHandleForObjRef(); } // Note that even if the field is static, we require that the this pointer // satisfy the same constraints as a non-static field This happens to // be simpler and seems reasonable typeInfo tiDeclaredThis = verMakeTypeInfo(enclosingClass); if (tiDeclaredThis.IsValueClass()) { tiDeclaredThis.MakeByRef(); // we allow read-only tThis, on any field access (even stores!), because if the // class implementor wants to prohibit stores he should make the field private. // we do this by setting the read-only bit on the type we compare tThis to. tiDeclaredThis.SetIsReadonlyByRef(); } else if (verTrackObjCtorInitState && tThis.IsThisPtr()) { // Any field access is legal on "uninitialized" this pointers. // The easiest way to implement this is to simply set the // initialized bit for the duration of the type check on the // field access only. It does not change the state of the "this" // for the function as a whole. Note that the "tThis" is a copy // of the original "this" type (*tiThis) passed in. tThis.SetInitialisedObjRef(); } Verify(tiCompatibleWith(tThis, tiDeclaredThis, true), "this type mismatch"); } // Presently the JIT does not check that we don't store or take the address of init-only fields // since we cannot guarantee their immutability and it is not a security issue. // check any constraints on the fields's class --- accessing the field might cause a class constructor to run. VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(enclosingClass), "field has unsatisfied class constraints"); if (fieldFlags & CORINFO_FLG_FIELD_PROTECTED) { Verify(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClass), "Accessing protected method through wrong type."); } } void Compiler::verVerifyCond(const typeInfo& tiOp1, const typeInfo& tiOp2, unsigned opcode) { if (tiOp1.IsNumberType()) { #ifdef TARGET_64BIT Verify(tiCompatibleWith(tiOp1, tiOp2, true), "Cond type mismatch"); #else // TARGET_64BIT // [10/17/2013] Consider changing this: to put on my verification lawyer hat, // this is non-conforming to the ECMA Spec: types don't have to be equivalent, // but compatible, since we can coalesce native int with int32 (see section III.1.5). Verify(typeInfo::AreEquivalent(tiOp1, tiOp2), "Cond type mismatch"); #endif // !TARGET_64BIT } else if (tiOp1.IsObjRef()) { switch (opcode) { case CEE_BEQ_S: case CEE_BEQ: case CEE_BNE_UN_S: case CEE_BNE_UN: case CEE_CEQ: case CEE_CGT_UN: break; default: Verify(false, "Cond not allowed on object types"); } Verify(tiOp2.IsObjRef(), "Cond type mismatch"); } else if (tiOp1.IsByRef()) { Verify(tiOp2.IsByRef(), "Cond type mismatch"); } else { Verify(tiOp1.IsMethod() && tiOp2.IsMethod(), "Cond type mismatch"); } } void Compiler::verVerifyThisPtrInitialised() { if (verTrackObjCtorInitState) { Verify(verCurrentState.thisInitialized == TIS_Init, "this ptr is not initialized"); } } bool Compiler::verIsCallToInitThisPtr(CORINFO_CLASS_HANDLE context, CORINFO_CLASS_HANDLE target) { // Either target == context, in this case calling an alternate .ctor // Or target is the immediate parent of context return ((target == context) || (target == info.compCompHnd->getParentType(context))); } GenTree* Compiler::impImportLdvirtftn(GenTree* thisPtr, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo) { if ((pCallInfo->methodFlags & CORINFO_FLG_EnC) && !(pCallInfo->classFlags & CORINFO_FLG_INTERFACE)) { NO_WAY("Virtual call to a function added via EnC is not supported"); } // CoreRT generic virtual method if ((pCallInfo->sig.sigInst.methInstCount != 0) && IsTargetAbi(CORINFO_CORERT_ABI)) { GenTree* runtimeMethodHandle = impLookupToTree(pResolvedToken, &pCallInfo->codePointerLookup, GTF_ICON_METHOD_HDL, pCallInfo->hMethod); return gtNewHelperCallNode(CORINFO_HELP_GVMLOOKUP_FOR_SLOT, TYP_I_IMPL, gtNewCallArgs(thisPtr, runtimeMethodHandle)); } #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { if (!pCallInfo->exactContextNeedsRuntimeLookup) { GenTreeCall* call = gtNewHelperCallNode(CORINFO_HELP_READYTORUN_VIRTUAL_FUNC_PTR, TYP_I_IMPL, gtNewCallArgs(thisPtr)); call->setEntryPoint(pCallInfo->codePointerLookup.constLookup); return call; } // We need a runtime lookup. CoreRT has a ReadyToRun helper for that too. if (IsTargetAbi(CORINFO_CORERT_ABI)) { GenTree* ctxTree = getRuntimeContextTree(pCallInfo->codePointerLookup.lookupKind.runtimeLookupKind); return impReadyToRunHelperToTree(pResolvedToken, CORINFO_HELP_READYTORUN_GENERIC_HANDLE, TYP_I_IMPL, gtNewCallArgs(ctxTree), &pCallInfo->codePointerLookup.lookupKind); } } #endif // Get the exact descriptor for the static callsite GenTree* exactTypeDesc = impParentClassTokenToHandle(pResolvedToken); if (exactTypeDesc == nullptr) { // compDonotInline() return nullptr; } GenTree* exactMethodDesc = impTokenToHandle(pResolvedToken); if (exactMethodDesc == nullptr) { // compDonotInline() return nullptr; } GenTreeCall::Use* helpArgs = gtNewCallArgs(exactMethodDesc); helpArgs = gtPrependNewCallArg(exactTypeDesc, helpArgs); helpArgs = gtPrependNewCallArg(thisPtr, helpArgs); // Call helper function. This gets the target address of the final destination callsite. return gtNewHelperCallNode(CORINFO_HELP_VIRTUAL_FUNC_PTR, TYP_I_IMPL, helpArgs); } //------------------------------------------------------------------------ // impBoxPatternMatch: match and import common box idioms // // Arguments: // pResolvedToken - resolved token from the box operation // codeAddr - position in IL stream after the box instruction // codeEndp - end of IL stream // // Return Value: // Number of IL bytes matched and imported, -1 otherwise // // Notes: // pResolvedToken is known to be a value type; ref type boxing // is handled in the CEE_BOX clause. int Compiler::impBoxPatternMatch(CORINFO_RESOLVED_TOKEN* pResolvedToken, const BYTE* codeAddr, const BYTE* codeEndp, bool makeInlineObservation) { if (codeAddr >= codeEndp) { return -1; } switch (codeAddr[0]) { case CEE_UNBOX_ANY: // box + unbox.any if (codeAddr + 1 + sizeof(mdToken) <= codeEndp) { if (makeInlineObservation) { compInlineResult->Note(InlineObservation::CALLEE_FOLDABLE_BOX); return 1 + sizeof(mdToken); } CORINFO_RESOLVED_TOKEN unboxResolvedToken; impResolveToken(codeAddr + 1, &unboxResolvedToken, CORINFO_TOKENKIND_Class); // See if the resolved tokens describe types that are equal. const TypeCompareState compare = info.compCompHnd->compareTypesForEquality(unboxResolvedToken.hClass, pResolvedToken->hClass); // If so, box/unbox.any is a nop. if (compare == TypeCompareState::Must) { JITDUMP("\n Importing BOX; UNBOX.ANY as NOP\n"); // Skip the next unbox.any instruction return 1 + sizeof(mdToken); } } break; case CEE_BRTRUE: case CEE_BRTRUE_S: case CEE_BRFALSE: case CEE_BRFALSE_S: // box + br_true/false if ((codeAddr + ((codeAddr[0] >= CEE_BRFALSE) ? 5 : 2)) <= codeEndp) { if (makeInlineObservation) { compInlineResult->Note(InlineObservation::CALLEE_FOLDABLE_BOX); return 0; } GenTree* const treeToBox = impStackTop().val; bool canOptimize = true; GenTree* treeToNullcheck = nullptr; // Can the thing being boxed cause a side effect? if ((treeToBox->gtFlags & GTF_SIDE_EFFECT) != 0) { // Is this a side effect we can replicate cheaply? if (((treeToBox->gtFlags & GTF_SIDE_EFFECT) == GTF_EXCEPT) && treeToBox->OperIs(GT_OBJ, GT_BLK, GT_IND)) { // Yes, we just need to perform a null check if needed. GenTree* const addr = treeToBox->AsOp()->gtGetOp1(); if (fgAddrCouldBeNull(addr)) { treeToNullcheck = addr; } } else { canOptimize = false; } } if (canOptimize) { CorInfoHelpFunc boxHelper = info.compCompHnd->getBoxHelper(pResolvedToken->hClass); if (boxHelper == CORINFO_HELP_BOX) { JITDUMP("\n Importing BOX; BR_TRUE/FALSE as %sconstant\n", treeToNullcheck == nullptr ? "" : "nullcheck+"); impPopStack(); GenTree* result = gtNewIconNode(1); if (treeToNullcheck != nullptr) { GenTree* nullcheck = gtNewNullCheck(treeToNullcheck, compCurBB); result = gtNewOperNode(GT_COMMA, TYP_INT, nullcheck, result); } impPushOnStack(result, typeInfo(TI_INT)); return 0; } } } break; case CEE_ISINST: if (codeAddr + 1 + sizeof(mdToken) + 1 <= codeEndp) { const BYTE* nextCodeAddr = codeAddr + 1 + sizeof(mdToken); switch (nextCodeAddr[0]) { // box + isinst + br_true/false case CEE_BRTRUE: case CEE_BRTRUE_S: case CEE_BRFALSE: case CEE_BRFALSE_S: if ((nextCodeAddr + ((nextCodeAddr[0] >= CEE_BRFALSE) ? 5 : 2)) <= codeEndp) { if (makeInlineObservation) { compInlineResult->Note(InlineObservation::CALLEE_FOLDABLE_BOX); return 1 + sizeof(mdToken); } if (!(impStackTop().val->gtFlags & GTF_SIDE_EFFECT)) { CorInfoHelpFunc boxHelper = info.compCompHnd->getBoxHelper(pResolvedToken->hClass); if (boxHelper == CORINFO_HELP_BOX) { CORINFO_RESOLVED_TOKEN isInstResolvedToken; impResolveToken(codeAddr + 1, &isInstResolvedToken, CORINFO_TOKENKIND_Casting); TypeCompareState castResult = info.compCompHnd->compareTypesForCast(pResolvedToken->hClass, isInstResolvedToken.hClass); if (castResult != TypeCompareState::May) { JITDUMP("\n Importing BOX; ISINST; BR_TRUE/FALSE as constant\n"); impPopStack(); impPushOnStack(gtNewIconNode((castResult == TypeCompareState::Must) ? 1 : 0), typeInfo(TI_INT)); // Skip the next isinst instruction return 1 + sizeof(mdToken); } } else if (boxHelper == CORINFO_HELP_BOX_NULLABLE) { // For nullable we're going to fold it to "ldfld hasValue + brtrue/brfalse" or // "ldc.i4.0 + brtrue/brfalse" in case if the underlying type is not castable to // the target type. CORINFO_RESOLVED_TOKEN isInstResolvedToken; impResolveToken(codeAddr + 1, &isInstResolvedToken, CORINFO_TOKENKIND_Casting); CORINFO_CLASS_HANDLE nullableCls = pResolvedToken->hClass; CORINFO_CLASS_HANDLE underlyingCls = info.compCompHnd->getTypeForBox(nullableCls); TypeCompareState castResult = info.compCompHnd->compareTypesForCast(underlyingCls, isInstResolvedToken.hClass); if (castResult == TypeCompareState::Must) { const CORINFO_FIELD_HANDLE hasValueFldHnd = info.compCompHnd->getFieldInClass(nullableCls, 0); assert(info.compCompHnd->getFieldOffset(hasValueFldHnd) == 0); assert(!strcmp(info.compCompHnd->getFieldName(hasValueFldHnd, nullptr), "hasValue")); GenTree* objToBox = impPopStack().val; // Spill struct to get its address (to access hasValue field) objToBox = impGetStructAddr(objToBox, nullableCls, (unsigned)CHECK_SPILL_ALL, true); impPushOnStack(gtNewFieldRef(TYP_BOOL, hasValueFldHnd, objToBox, 0), typeInfo(TI_INT)); JITDUMP("\n Importing BOX; ISINST; BR_TRUE/FALSE as nullableVT.hasValue\n"); return 1 + sizeof(mdToken); } else if (castResult == TypeCompareState::MustNot) { impPopStack(); impPushOnStack(gtNewIconNode(0), typeInfo(TI_INT)); JITDUMP("\n Importing BOX; ISINST; BR_TRUE/FALSE as constant (false)\n"); return 1 + sizeof(mdToken); } } } } break; // box + isinst + unbox.any case CEE_UNBOX_ANY: if ((nextCodeAddr + 1 + sizeof(mdToken)) <= codeEndp) { if (makeInlineObservation) { compInlineResult->Note(InlineObservation::CALLEE_FOLDABLE_BOX); return 2 + sizeof(mdToken) * 2; } // See if the resolved tokens in box, isinst and unbox.any describe types that are equal. CORINFO_RESOLVED_TOKEN isinstResolvedToken = {}; impResolveToken(codeAddr + 1, &isinstResolvedToken, CORINFO_TOKENKIND_Class); if (info.compCompHnd->compareTypesForEquality(isinstResolvedToken.hClass, pResolvedToken->hClass) == TypeCompareState::Must) { CORINFO_RESOLVED_TOKEN unboxResolvedToken = {}; impResolveToken(nextCodeAddr + 1, &unboxResolvedToken, CORINFO_TOKENKIND_Class); // If so, box + isinst + unbox.any is a nop. if (info.compCompHnd->compareTypesForEquality(unboxResolvedToken.hClass, pResolvedToken->hClass) == TypeCompareState::Must) { JITDUMP("\n Importing BOX; ISINST, UNBOX.ANY as NOP\n"); return 2 + sizeof(mdToken) * 2; } } } break; } } break; default: break; } return -1; } //------------------------------------------------------------------------ // impImportAndPushBox: build and import a value-type box // // Arguments: // pResolvedToken - resolved token from the box operation // // Return Value: // None. // // Side Effects: // The value to be boxed is popped from the stack, and a tree for // the boxed value is pushed. This method may create upstream // statements, spill side effecting trees, and create new temps. // // If importing an inlinee, we may also discover the inline must // fail. If so there is no new value pushed on the stack. Callers // should use CompDoNotInline after calling this method to see if // ongoing importation should be aborted. // // Notes: // Boxing of ref classes results in the same value as the value on // the top of the stack, so is handled inline in impImportBlockCode // for the CEE_BOX case. Only value or primitive type boxes make it // here. // // Boxing for nullable types is done via a helper call; boxing // of other value types is expanded inline or handled via helper // call, depending on the jit's codegen mode. // // When the jit is operating in size and time constrained modes, // using a helper call here can save jit time and code size. But it // also may inhibit cleanup optimizations that could have also had a // even greater benefit effect on code size and jit time. An optimal // strategy may need to peek ahead and see if it is easy to tell how // the box is being used. For now, we defer. void Compiler::impImportAndPushBox(CORINFO_RESOLVED_TOKEN* pResolvedToken) { // Spill any special side effects impSpillSpecialSideEff(); // Get get the expression to box from the stack. GenTree* op1 = nullptr; GenTree* op2 = nullptr; StackEntry se = impPopStack(); CORINFO_CLASS_HANDLE operCls = se.seTypeInfo.GetClassHandle(); GenTree* exprToBox = se.val; // Look at what helper we should use. CorInfoHelpFunc boxHelper = info.compCompHnd->getBoxHelper(pResolvedToken->hClass); // Determine what expansion to prefer. // // In size/time/debuggable constrained modes, the helper call // expansion for box is generally smaller and is preferred, unless // the value to box is a struct that comes from a call. In that // case the call can construct its return value directly into the // box payload, saving possibly some up-front zeroing. // // Currently primitive type boxes always get inline expanded. We may // want to do the same for small structs if they don't come from // calls and don't have GC pointers, since explicitly copying such // structs is cheap. JITDUMP("\nCompiler::impImportAndPushBox -- handling BOX(value class) via"); bool canExpandInline = (boxHelper == CORINFO_HELP_BOX); bool optForSize = !exprToBox->IsCall() && (operCls != nullptr) && opts.OptimizationDisabled(); bool expandInline = canExpandInline && !optForSize; if (expandInline) { JITDUMP(" inline allocate/copy sequence\n"); // we are doing 'normal' boxing. This means that we can inline the box operation // Box(expr) gets morphed into // temp = new(clsHnd) // cpobj(temp+4, expr, clsHnd) // push temp // The code paths differ slightly below for structs and primitives because // "cpobj" differs in these cases. In one case you get // impAssignStructPtr(temp+4, expr, clsHnd) // and the other you get // *(temp+4) = expr if (opts.OptimizationDisabled()) { // For minopts/debug code, try and minimize the total number // of box temps by reusing an existing temp when possible. if (impBoxTempInUse || impBoxTemp == BAD_VAR_NUM) { impBoxTemp = lvaGrabTemp(true DEBUGARG("Reusable Box Helper")); } } else { // When optimizing, use a new temp for each box operation // since we then know the exact class of the box temp. impBoxTemp = lvaGrabTemp(true DEBUGARG("Single-def Box Helper")); lvaTable[impBoxTemp].lvType = TYP_REF; lvaTable[impBoxTemp].lvSingleDef = 1; JITDUMP("Marking V%02u as a single def local\n", impBoxTemp); const bool isExact = true; lvaSetClass(impBoxTemp, pResolvedToken->hClass, isExact); } // needs to stay in use until this box expression is appended // some other node. We approximate this by keeping it alive until // the opcode stack becomes empty impBoxTempInUse = true; // Remember the current last statement in case we need to move // a range of statements to ensure the box temp is initialized // before it's used. // Statement* const cursor = impLastStmt; const bool useParent = false; op1 = gtNewAllocObjNode(pResolvedToken, useParent); if (op1 == nullptr) { // If we fail to create the newobj node, we must be inlining // and have run across a type we can't describe. // assert(compDonotInline()); return; } // Remember that this basic block contains 'new' of an object, // and so does this method // compCurBB->bbFlags |= BBF_HAS_NEWOBJ; optMethodFlags |= OMF_HAS_NEWOBJ; // Assign the boxed object to the box temp. // GenTree* asg = gtNewTempAssign(impBoxTemp, op1); Statement* asgStmt = impAppendTree(asg, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); // If the exprToBox is a call that returns its value via a ret buf arg, // move the assignment statement(s) before the call (which must be a top level tree). // // We do this because impAssignStructPtr (invoked below) will // back-substitute into a call when it sees a GT_RET_EXPR and the call // has a hidden buffer pointer, So we need to reorder things to avoid // creating out-of-sequence IR. // if (varTypeIsStruct(exprToBox) && exprToBox->OperIs(GT_RET_EXPR)) { GenTreeCall* const call = exprToBox->AsRetExpr()->gtInlineCandidate->AsCall(); if (call->HasRetBufArg()) { JITDUMP("Must insert newobj stmts for box before call [%06u]\n", dspTreeID(call)); // Walk back through the statements in this block, looking for the one // that has this call as the root node. // // Because gtNewTempAssign (above) may have added statements that // feed into the actual assignment we need to move this set of added // statements as a group. // // Note boxed allocations are side-effect free (no com or finalizer) so // our only worries here are (correctness) not overlapping the box temp // lifetime and (perf) stretching the temp lifetime across the inlinee // body. // // Since this is an inline candidate, we must be optimizing, and so we have // a unique box temp per call. So no worries about overlap. // assert(!opts.OptimizationDisabled()); // Lifetime stretching could addressed with some extra cleverness--sinking // the allocation back down to just before the copy, once we figure out // where the copy is. We defer for now. // Statement* insertBeforeStmt = cursor; noway_assert(insertBeforeStmt != nullptr); while (true) { if (insertBeforeStmt->GetRootNode() == call) { break; } // If we've searched all the statements in the block and failed to // find the call, then something's wrong. // noway_assert(insertBeforeStmt != impStmtList); insertBeforeStmt = insertBeforeStmt->GetPrevStmt(); } // Found the call. Move the statements comprising the assignment. // JITDUMP("Moving " FMT_STMT "..." FMT_STMT " before " FMT_STMT "\n", cursor->GetNextStmt()->GetID(), asgStmt->GetID(), insertBeforeStmt->GetID()); assert(asgStmt == impLastStmt); do { Statement* movingStmt = impExtractLastStmt(); impInsertStmtBefore(movingStmt, insertBeforeStmt); insertBeforeStmt = movingStmt; } while (impLastStmt != cursor); } } // Create a pointer to the box payload in op1. // op1 = gtNewLclvNode(impBoxTemp, TYP_REF); op2 = gtNewIconNode(TARGET_POINTER_SIZE, TYP_I_IMPL); op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1, op2); // Copy from the exprToBox to the box payload. // if (varTypeIsStruct(exprToBox)) { assert(info.compCompHnd->getClassSize(pResolvedToken->hClass) == info.compCompHnd->getClassSize(operCls)); op1 = impAssignStructPtr(op1, exprToBox, operCls, (unsigned)CHECK_SPILL_ALL); } else { var_types lclTyp = exprToBox->TypeGet(); if (lclTyp == TYP_BYREF) { lclTyp = TYP_I_IMPL; } CorInfoType jitType = info.compCompHnd->asCorInfoType(pResolvedToken->hClass); if (impIsPrimitive(jitType)) { lclTyp = JITtype2varType(jitType); } var_types srcTyp = exprToBox->TypeGet(); var_types dstTyp = lclTyp; // We allow float <-> double mismatches and implicit truncation for small types. assert((genActualType(srcTyp) == genActualType(dstTyp)) || (varTypeIsFloating(srcTyp) == varTypeIsFloating(dstTyp))); // Note regarding small types. // We are going to store to the box here via an indirection, so the cast added below is // redundant, since the store has an implicit truncation semantic. The reason we still // add this cast is so that the code which deals with GT_BOX optimizations does not have // to account for this implicit truncation (e. g. understand that BOX<byte>(0xFF + 1) is // actually BOX<byte>(0) or deal with signedness mismatch and other GT_CAST complexities). if (srcTyp != dstTyp) { exprToBox = gtNewCastNode(genActualType(dstTyp), exprToBox, false, dstTyp); } op1 = gtNewAssignNode(gtNewOperNode(GT_IND, dstTyp, op1), exprToBox); } // Spill eval stack to flush out any pending side effects. impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportAndPushBox")); // Set up this copy as a second assignment. Statement* copyStmt = impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); op1 = gtNewLclvNode(impBoxTemp, TYP_REF); // Record that this is a "box" node and keep track of the matching parts. op1 = new (this, GT_BOX) GenTreeBox(TYP_REF, op1, asgStmt, copyStmt); // If it is a value class, mark the "box" node. We can use this information // to optimise several cases: // "box(x) == null" --> false // "(box(x)).CallAnInterfaceMethod(...)" --> "(&x).CallAValueTypeMethod" // "(box(x)).CallAnObjectMethod(...)" --> "(&x).CallAValueTypeMethod" op1->gtFlags |= GTF_BOX_VALUE; assert(op1->IsBoxedValue()); assert(asg->gtOper == GT_ASG); } else { // Don't optimize, just call the helper and be done with it. JITDUMP(" helper call because: %s\n", canExpandInline ? "optimizing for size" : "nullable"); assert(operCls != nullptr); // Ensure that the value class is restored op2 = impTokenToHandle(pResolvedToken, nullptr, true /* mustRestoreHandle */); if (op2 == nullptr) { // We must be backing out of an inline. assert(compDonotInline()); return; } GenTreeCall::Use* args = gtNewCallArgs(op2, impGetStructAddr(exprToBox, operCls, (unsigned)CHECK_SPILL_ALL, true)); op1 = gtNewHelperCallNode(boxHelper, TYP_REF, args); } /* Push the result back on the stack, */ /* even if clsHnd is a value class we want the TI_REF */ typeInfo tiRetVal = typeInfo(TI_REF, info.compCompHnd->getTypeForBox(pResolvedToken->hClass)); impPushOnStack(op1, tiRetVal); } //------------------------------------------------------------------------ // impImportNewObjArray: Build and import `new` of multi-dimmensional array // // Arguments: // pResolvedToken - The CORINFO_RESOLVED_TOKEN that has been initialized // by a call to CEEInfo::resolveToken(). // pCallInfo - The CORINFO_CALL_INFO that has been initialized // by a call to CEEInfo::getCallInfo(). // // Assumptions: // The multi-dimensional array constructor arguments (array dimensions) are // pushed on the IL stack on entry to this method. // // Notes: // Multi-dimensional array constructors are imported as calls to a JIT // helper, not as regular calls. void Compiler::impImportNewObjArray(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo) { GenTree* classHandle = impParentClassTokenToHandle(pResolvedToken); if (classHandle == nullptr) { // compDonotInline() return; } assert(pCallInfo->sig.numArgs); GenTree* node; // Reuse the temp used to pass the array dimensions to avoid bloating // the stack frame in case there are multiple calls to multi-dim array // constructors within a single method. if (lvaNewObjArrayArgs == BAD_VAR_NUM) { lvaNewObjArrayArgs = lvaGrabTemp(false DEBUGARG("NewObjArrayArgs")); lvaTable[lvaNewObjArrayArgs].lvType = TYP_BLK; lvaTable[lvaNewObjArrayArgs].lvExactSize = 0; } // Increase size of lvaNewObjArrayArgs to be the largest size needed to hold 'numArgs' integers // for our call to CORINFO_HELP_NEW_MDARR. lvaTable[lvaNewObjArrayArgs].lvExactSize = max(lvaTable[lvaNewObjArrayArgs].lvExactSize, pCallInfo->sig.numArgs * sizeof(INT32)); // The side-effects may include allocation of more multi-dimensional arrays. Spill all side-effects // to ensure that the shared lvaNewObjArrayArgs local variable is only ever used to pass arguments // to one allocation at a time. impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportNewObjArray")); // // The arguments of the CORINFO_HELP_NEW_MDARR helper are: // - Array class handle // - Number of dimension arguments // - Pointer to block of int32 dimensions - address of lvaNewObjArrayArgs temp. // node = gtNewLclvNode(lvaNewObjArrayArgs, TYP_BLK); node = gtNewOperNode(GT_ADDR, TYP_I_IMPL, node); // Pop dimension arguments from the stack one at a time and store it // into lvaNewObjArrayArgs temp. for (int i = pCallInfo->sig.numArgs - 1; i >= 0; i--) { GenTree* arg = impImplicitIorI4Cast(impPopStack().val, TYP_INT); GenTree* dest = gtNewLclvNode(lvaNewObjArrayArgs, TYP_BLK); dest = gtNewOperNode(GT_ADDR, TYP_I_IMPL, dest); dest = gtNewOperNode(GT_ADD, TYP_I_IMPL, dest, new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, sizeof(INT32) * i)); dest = gtNewOperNode(GT_IND, TYP_INT, dest); node = gtNewOperNode(GT_COMMA, node->TypeGet(), gtNewAssignNode(dest, arg), node); } GenTreeCall::Use* args = gtNewCallArgs(node); // pass number of arguments to the helper args = gtPrependNewCallArg(gtNewIconNode(pCallInfo->sig.numArgs), args); args = gtPrependNewCallArg(classHandle, args); node = gtNewHelperCallNode(CORINFO_HELP_NEW_MDARR, TYP_REF, args); for (GenTreeCall::Use& use : node->AsCall()->Args()) { node->gtFlags |= use.GetNode()->gtFlags & GTF_GLOB_EFFECT; } node->AsCall()->compileTimeHelperArgumentHandle = (CORINFO_GENERIC_HANDLE)pResolvedToken->hClass; // Remember that this basic block contains 'new' of a md array compCurBB->bbFlags |= BBF_HAS_NEWARRAY; impPushOnStack(node, typeInfo(TI_REF, pResolvedToken->hClass)); } GenTree* Compiler::impTransformThis(GenTree* thisPtr, CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, CORINFO_THIS_TRANSFORM transform) { switch (transform) { case CORINFO_DEREF_THIS: { GenTree* obj = thisPtr; // This does a LDIND on the obj, which should be a byref. pointing to a ref impBashVarAddrsToI(obj); assert(genActualType(obj->gtType) == TYP_I_IMPL || obj->gtType == TYP_BYREF); CorInfoType constraintTyp = info.compCompHnd->asCorInfoType(pConstrainedResolvedToken->hClass); obj = gtNewOperNode(GT_IND, JITtype2varType(constraintTyp), obj); // ldind could point anywhere, example a boxed class static int obj->gtFlags |= (GTF_EXCEPT | GTF_GLOB_REF | GTF_IND_TGTANYWHERE); return obj; } case CORINFO_BOX_THIS: { // Constraint calls where there might be no // unboxed entry point require us to implement the call via helper. // These only occur when a possible target of the call // may have inherited an implementation of an interface // method from System.Object or System.ValueType. The EE does not provide us with // "unboxed" versions of these methods. GenTree* obj = thisPtr; assert(obj->TypeGet() == TYP_BYREF || obj->TypeGet() == TYP_I_IMPL); obj = gtNewObjNode(pConstrainedResolvedToken->hClass, obj); obj->gtFlags |= GTF_EXCEPT; CorInfoType jitTyp = info.compCompHnd->asCorInfoType(pConstrainedResolvedToken->hClass); if (impIsPrimitive(jitTyp)) { if (obj->OperIsBlk()) { obj->ChangeOperUnchecked(GT_IND); // Obj could point anywhere, example a boxed class static int obj->gtFlags |= GTF_IND_TGTANYWHERE; obj->AsOp()->gtOp2 = nullptr; // must be zero for tree walkers } obj->gtType = JITtype2varType(jitTyp); assert(varTypeIsArithmetic(obj->gtType)); } // This pushes on the dereferenced byref // This is then used immediately to box. impPushOnStack(obj, verMakeTypeInfo(pConstrainedResolvedToken->hClass).NormaliseForStack()); // This pops off the byref-to-a-value-type remaining on the stack and // replaces it with a boxed object. // This is then used as the object to the virtual call immediately below. impImportAndPushBox(pConstrainedResolvedToken); if (compDonotInline()) { return nullptr; } obj = impPopStack().val; return obj; } case CORINFO_NO_THIS_TRANSFORM: default: return thisPtr; } } //------------------------------------------------------------------------ // impCanPInvokeInline: check whether PInvoke inlining should enabled in current method. // // Return Value: // true if PInvoke inlining should be enabled in current method, false otherwise // // Notes: // Checks a number of ambient conditions where we could pinvoke but choose not to bool Compiler::impCanPInvokeInline() { return getInlinePInvokeEnabled() && (!opts.compDbgCode) && (compCodeOpt() != SMALL_CODE) && (!opts.compNoPInvokeInlineCB) // profiler is preventing inline pinvoke ; } //------------------------------------------------------------------------ // impCanPInvokeInlineCallSite: basic legality checks using information // from a call to see if the call qualifies as an inline pinvoke. // // Arguments: // block - block contaning the call, or for inlinees, block // containing the call being inlined // // Return Value: // true if this call can legally qualify as an inline pinvoke, false otherwise // // Notes: // For runtimes that support exception handling interop there are // restrictions on using inline pinvoke in handler regions. // // * We have to disable pinvoke inlining inside of filters because // in case the main execution (i.e. in the try block) is inside // unmanaged code, we cannot reuse the inlined stub (we still need // the original state until we are in the catch handler) // // * We disable pinvoke inlining inside handlers since the GSCookie // is in the inlined Frame (see // CORINFO_EE_INFO::InlinedCallFrameInfo::offsetOfGSCookie), but // this would not protect framelets/return-address of handlers. // // These restrictions are currently also in place for CoreCLR but // can be relaxed when coreclr/#8459 is addressed. bool Compiler::impCanPInvokeInlineCallSite(BasicBlock* block) { if (block->hasHndIndex()) { return false; } // The remaining limitations do not apply to CoreRT if (IsTargetAbi(CORINFO_CORERT_ABI)) { return true; } #ifdef TARGET_64BIT // On 64-bit platforms, we disable pinvoke inlining inside of try regions. // Note that this could be needed on other architectures too, but we // haven't done enough investigation to know for sure at this point. // // Here is the comment from JIT64 explaining why: // [VSWhidbey: 611015] - because the jitted code links in the // Frame (instead of the stub) we rely on the Frame not being // 'active' until inside the stub. This normally happens by the // stub setting the return address pointer in the Frame object // inside the stub. On a normal return, the return address // pointer is zeroed out so the Frame can be safely re-used, but // if an exception occurs, nobody zeros out the return address // pointer. Thus if we re-used the Frame object, it would go // 'active' as soon as we link it into the Frame chain. // // Technically we only need to disable PInvoke inlining if we're // in a handler or if we're in a try body with a catch or // filter/except where other non-handler code in this method // might run and try to re-use the dirty Frame object. // // A desktop test case where this seems to matter is // jit\jit64\ebvts\mcpp\sources2\ijw\__clrcall\vector_ctor_dtor.02\deldtor_clr.exe if (block->hasTryIndex()) { // This does not apply to the raw pinvoke call that is inside the pinvoke // ILStub. In this case, we have to inline the raw pinvoke call into the stub, // otherwise we would end up with a stub that recursively calls itself, and end // up with a stack overflow. if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB) && opts.ShouldUsePInvokeHelpers()) { return true; } return false; } #endif // TARGET_64BIT return true; } //------------------------------------------------------------------------ // impCheckForPInvokeCall examine call to see if it is a pinvoke and if so // if it can be expressed as an inline pinvoke. // // Arguments: // call - tree for the call // methHnd - handle for the method being called (may be null) // sig - signature of the method being called // mflags - method flags for the method being called // block - block contaning the call, or for inlinees, block // containing the call being inlined // // Notes: // Sets GTF_CALL_M_PINVOKE on the call for pinvokes. // // Also sets GTF_CALL_UNMANAGED on call for inline pinvokes if the // call passes a combination of legality and profitabilty checks. // // If GTF_CALL_UNMANAGED is set, increments info.compUnmanagedCallCountWithGCTransition void Compiler::impCheckForPInvokeCall( GenTreeCall* call, CORINFO_METHOD_HANDLE methHnd, CORINFO_SIG_INFO* sig, unsigned mflags, BasicBlock* block) { CorInfoCallConvExtension unmanagedCallConv; // If VM flagged it as Pinvoke, flag the call node accordingly if ((mflags & CORINFO_FLG_PINVOKE) != 0) { call->gtCallMoreFlags |= GTF_CALL_M_PINVOKE; } bool suppressGCTransition = false; if (methHnd) { if ((mflags & CORINFO_FLG_PINVOKE) == 0) { return; } unmanagedCallConv = info.compCompHnd->getUnmanagedCallConv(methHnd, nullptr, &suppressGCTransition); } else { if (sig->getCallConv() == CORINFO_CALLCONV_DEFAULT || sig->getCallConv() == CORINFO_CALLCONV_VARARG) { return; } unmanagedCallConv = info.compCompHnd->getUnmanagedCallConv(nullptr, sig, &suppressGCTransition); assert(!call->gtCallCookie); } if (suppressGCTransition) { call->gtCallMoreFlags |= GTF_CALL_M_SUPPRESS_GC_TRANSITION; } // If we can't get the unmanaged calling convention or the calling convention is unsupported in the JIT, // return here without inlining the native call. if (unmanagedCallConv == CorInfoCallConvExtension::Managed || unmanagedCallConv == CorInfoCallConvExtension::Fastcall || unmanagedCallConv == CorInfoCallConvExtension::FastcallMemberFunction) { return; } optNativeCallCount++; if (methHnd == nullptr && (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB) || IsTargetAbi(CORINFO_CORERT_ABI))) { // PInvoke in CoreRT ABI must be always inlined. Non-inlineable CALLI cases have been // converted to regular method calls earlier using convertPInvokeCalliToCall. // PInvoke CALLI in IL stubs must be inlined } else { // Check legality if (!impCanPInvokeInlineCallSite(block)) { return; } // Legal PInvoke CALL in PInvoke IL stubs must be inlined to avoid infinite recursive // inlining in CoreRT. Skip the ambient conditions checks and profitability checks. if (!IsTargetAbi(CORINFO_CORERT_ABI) || (info.compFlags & CORINFO_FLG_PINVOKE) == 0) { if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB) && opts.ShouldUsePInvokeHelpers()) { // Raw PInvoke call in PInvoke IL stub generated must be inlined to avoid infinite // recursive calls to the stub. } else { if (!impCanPInvokeInline()) { return; } // Size-speed tradeoff: don't use inline pinvoke at rarely // executed call sites. The non-inline version is more // compact. if (block->isRunRarely()) { return; } } } // The expensive check should be last if (info.compCompHnd->pInvokeMarshalingRequired(methHnd, sig)) { return; } } JITLOG((LL_INFO1000000, "\nInline a CALLI PINVOKE call from method %s\n", info.compFullName)); call->gtFlags |= GTF_CALL_UNMANAGED; call->unmgdCallConv = unmanagedCallConv; if (!call->IsSuppressGCTransition()) { info.compUnmanagedCallCountWithGCTransition++; } // AMD64 convention is same for native and managed if (unmanagedCallConv == CorInfoCallConvExtension::C || unmanagedCallConv == CorInfoCallConvExtension::CMemberFunction) { call->gtFlags |= GTF_CALL_POP_ARGS; } if (unmanagedCallConv == CorInfoCallConvExtension::Thiscall) { call->gtCallMoreFlags |= GTF_CALL_M_UNMGD_THISCALL; } } GenTreeCall* Compiler::impImportIndirectCall(CORINFO_SIG_INFO* sig, const DebugInfo& di) { var_types callRetTyp = JITtype2varType(sig->retType); /* The function pointer is on top of the stack - It may be a * complex expression. As it is evaluated after the args, * it may cause registered args to be spilled. Simply spill it. */ // Ignore this trivial case. if (impStackTop().val->gtOper != GT_LCL_VAR) { impSpillStackEntry(verCurrentState.esStackDepth - 1, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impImportIndirectCall")); } /* Get the function pointer */ GenTree* fptr = impPopStack().val; // The function pointer is typically a sized to match the target pointer size // However, stubgen IL optimization can change LDC.I8 to LDC.I4 // See ILCodeStream::LowerOpcode assert(genActualType(fptr->gtType) == TYP_I_IMPL || genActualType(fptr->gtType) == TYP_INT); #ifdef DEBUG // This temporary must never be converted to a double in stress mode, // because that can introduce a call to the cast helper after the // arguments have already been evaluated. if (fptr->OperGet() == GT_LCL_VAR) { lvaTable[fptr->AsLclVarCommon()->GetLclNum()].lvKeepType = 1; } #endif /* Create the call node */ GenTreeCall* call = gtNewIndCallNode(fptr, callRetTyp, nullptr, di); call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT); #ifdef UNIX_X86_ABI call->gtFlags &= ~GTF_CALL_POP_ARGS; #endif return call; } /*****************************************************************************/ void Compiler::impPopArgsForUnmanagedCall(GenTree* call, CORINFO_SIG_INFO* sig) { assert(call->gtFlags & GTF_CALL_UNMANAGED); /* Since we push the arguments in reverse order (i.e. right -> left) * spill any side effects from the stack * * OBS: If there is only one side effect we do not need to spill it * thus we have to spill all side-effects except last one */ unsigned lastLevelWithSideEffects = UINT_MAX; unsigned argsToReverse = sig->numArgs; // For "thiscall", the first argument goes in a register. Since its // order does not need to be changed, we do not need to spill it if (call->AsCall()->gtCallMoreFlags & GTF_CALL_M_UNMGD_THISCALL) { assert(argsToReverse); argsToReverse--; } #ifndef TARGET_X86 // Don't reverse args on ARM or x64 - first four args always placed in regs in order argsToReverse = 0; #endif for (unsigned level = verCurrentState.esStackDepth - argsToReverse; level < verCurrentState.esStackDepth; level++) { if (verCurrentState.esStack[level].val->gtFlags & GTF_ORDER_SIDEEFF) { assert(lastLevelWithSideEffects == UINT_MAX); impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impPopArgsForUnmanagedCall - other side effect")); } else if (verCurrentState.esStack[level].val->gtFlags & GTF_SIDE_EFFECT) { if (lastLevelWithSideEffects != UINT_MAX) { /* We had a previous side effect - must spill it */ impSpillStackEntry(lastLevelWithSideEffects, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impPopArgsForUnmanagedCall - side effect")); /* Record the level for the current side effect in case we will spill it */ lastLevelWithSideEffects = level; } else { /* This is the first side effect encountered - record its level */ lastLevelWithSideEffects = level; } } } /* The argument list is now "clean" - no out-of-order side effects * Pop the argument list in reverse order */ GenTreeCall::Use* args = impPopReverseCallArgs(sig->numArgs, sig, sig->numArgs - argsToReverse); call->AsCall()->gtCallArgs = args; if (call->AsCall()->gtCallMoreFlags & GTF_CALL_M_UNMGD_THISCALL) { GenTree* thisPtr = args->GetNode(); impBashVarAddrsToI(thisPtr); assert(thisPtr->TypeGet() == TYP_I_IMPL || thisPtr->TypeGet() == TYP_BYREF); } for (GenTreeCall::Use& argUse : GenTreeCall::UseList(args)) { GenTree* arg = argUse.GetNode(); call->gtFlags |= arg->gtFlags & GTF_GLOB_EFFECT; // We should not be passing gc typed args to an unmanaged call. if (varTypeIsGC(arg->TypeGet())) { // Tolerate byrefs by retyping to native int. // // This is needed or we'll generate inconsistent GC info // for this arg at the call site (gc info says byref, // pinvoke sig says native int). // if (arg->TypeGet() == TYP_BYREF) { arg->ChangeType(TYP_I_IMPL); } else { assert(!"*** invalid IL: gc ref passed to unmanaged call"); } } } } //------------------------------------------------------------------------ // impInitClass: Build a node to initialize the class before accessing the // field if necessary // // Arguments: // pResolvedToken - The CORINFO_RESOLVED_TOKEN that has been initialized // by a call to CEEInfo::resolveToken(). // // Return Value: If needed, a pointer to the node that will perform the class // initializtion. Otherwise, nullptr. // GenTree* Compiler::impInitClass(CORINFO_RESOLVED_TOKEN* pResolvedToken) { CorInfoInitClassResult initClassResult = info.compCompHnd->initClass(pResolvedToken->hField, info.compMethodHnd, impTokenLookupContextHandle); if ((initClassResult & CORINFO_INITCLASS_USE_HELPER) == 0) { return nullptr; } bool runtimeLookup; GenTree* node = impParentClassTokenToHandle(pResolvedToken, &runtimeLookup); if (node == nullptr) { assert(compDonotInline()); return nullptr; } if (runtimeLookup) { node = gtNewHelperCallNode(CORINFO_HELP_INITCLASS, TYP_VOID, gtNewCallArgs(node)); } else { // Call the shared non gc static helper, as its the fastest node = fgGetSharedCCtor(pResolvedToken->hClass); } return node; } GenTree* Compiler::impImportStaticReadOnlyField(void* fldAddr, var_types lclTyp) { GenTree* op1 = nullptr; #if defined(DEBUG) // If we're replaying under SuperPMI, we're going to read the data stored by SuperPMI and use it // for optimization. Unfortunately, SuperPMI doesn't implement a guarantee on the alignment of // this data, so for some platforms which don't allow unaligned access (e.g., Linux arm32), // this can fault. We should fix SuperPMI to guarantee alignment, but that is a big change. // Instead, simply fix up the data here for future use. // This variable should be the largest size element, with the largest alignment requirement, // and the native C++ compiler should guarantee sufficient alignment. double aligned_data = 0.0; void* p_aligned_data = &aligned_data; if (info.compMethodSuperPMIIndex != -1) { switch (lclTyp) { case TYP_BOOL: case TYP_BYTE: case TYP_UBYTE: static_assert_no_msg(sizeof(unsigned __int8) == sizeof(bool)); static_assert_no_msg(sizeof(unsigned __int8) == sizeof(signed char)); static_assert_no_msg(sizeof(unsigned __int8) == sizeof(unsigned char)); // No alignment necessary for byte. break; case TYP_SHORT: case TYP_USHORT: static_assert_no_msg(sizeof(unsigned __int16) == sizeof(short)); static_assert_no_msg(sizeof(unsigned __int16) == sizeof(unsigned short)); if ((size_t)fldAddr % sizeof(unsigned __int16) != 0) { *(unsigned __int16*)p_aligned_data = GET_UNALIGNED_16(fldAddr); fldAddr = p_aligned_data; } break; case TYP_INT: case TYP_UINT: case TYP_FLOAT: static_assert_no_msg(sizeof(unsigned __int32) == sizeof(int)); static_assert_no_msg(sizeof(unsigned __int32) == sizeof(unsigned int)); static_assert_no_msg(sizeof(unsigned __int32) == sizeof(float)); if ((size_t)fldAddr % sizeof(unsigned __int32) != 0) { *(unsigned __int32*)p_aligned_data = GET_UNALIGNED_32(fldAddr); fldAddr = p_aligned_data; } break; case TYP_LONG: case TYP_ULONG: case TYP_DOUBLE: static_assert_no_msg(sizeof(unsigned __int64) == sizeof(__int64)); static_assert_no_msg(sizeof(unsigned __int64) == sizeof(double)); if ((size_t)fldAddr % sizeof(unsigned __int64) != 0) { *(unsigned __int64*)p_aligned_data = GET_UNALIGNED_64(fldAddr); fldAddr = p_aligned_data; } break; default: assert(!"Unexpected lclTyp"); break; } } #endif // DEBUG switch (lclTyp) { int ival; __int64 lval; double dval; case TYP_BOOL: ival = *((bool*)fldAddr); goto IVAL_COMMON; case TYP_BYTE: ival = *((signed char*)fldAddr); goto IVAL_COMMON; case TYP_UBYTE: ival = *((unsigned char*)fldAddr); goto IVAL_COMMON; case TYP_SHORT: ival = *((short*)fldAddr); goto IVAL_COMMON; case TYP_USHORT: ival = *((unsigned short*)fldAddr); goto IVAL_COMMON; case TYP_UINT: case TYP_INT: ival = *((int*)fldAddr); IVAL_COMMON: op1 = gtNewIconNode(ival); break; case TYP_LONG: case TYP_ULONG: lval = *((__int64*)fldAddr); op1 = gtNewLconNode(lval); break; case TYP_FLOAT: dval = *((float*)fldAddr); op1 = gtNewDconNode(dval); op1->gtType = TYP_FLOAT; break; case TYP_DOUBLE: dval = *((double*)fldAddr); op1 = gtNewDconNode(dval); break; default: assert(!"Unexpected lclTyp"); break; } return op1; } GenTree* Compiler::impImportStaticFieldAccess(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_ACCESS_FLAGS access, CORINFO_FIELD_INFO* pFieldInfo, var_types lclTyp) { // Ordinary static fields never overlap. RVA statics, however, can overlap (if they're // mapped to the same ".data" declaration). That said, such mappings only appear to be // possible with ILASM, and in ILASM-produced (ILONLY) images, RVA statics are always // read-only (using "stsfld" on them is UB). In mixed-mode assemblies, RVA statics can // be mutable, but the only current producer of such images, the C++/CLI compiler, does // not appear to support mapping different fields to the same address. So we will say // that "mutable overlapping RVA statics" are UB as well. If this ever changes, code in // morph and value numbering will need to be updated to respect "gtFldMayOverlap" and // "NotAField FldSeq". // For statics that are not "boxed", the initial address tree will contain the field sequence. // For those that are, we will attach it later, when adding the indirection for the box, since // that tree will represent the true address. bool isBoxedStatic = (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP) != 0; FieldSeqNode* innerFldSeq = !isBoxedStatic ? GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField) : FieldSeqStore::NotAField(); GenTree* op1; switch (pFieldInfo->fieldAccessor) { case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER: { assert(!compIsForInlining()); // We first call a special helper to get the statics base pointer op1 = impParentClassTokenToHandle(pResolvedToken); // compIsForInlining() is false so we should not get NULL here assert(op1 != nullptr); var_types type = TYP_BYREF; switch (pFieldInfo->helper) { case CORINFO_HELP_GETGENERICS_NONGCTHREADSTATIC_BASE: type = TYP_I_IMPL; break; case CORINFO_HELP_GETGENERICS_GCSTATIC_BASE: case CORINFO_HELP_GETGENERICS_NONGCSTATIC_BASE: case CORINFO_HELP_GETGENERICS_GCTHREADSTATIC_BASE: break; default: assert(!"unknown generic statics helper"); break; } op1 = gtNewHelperCallNode(pFieldInfo->helper, type, gtNewCallArgs(op1)); op1 = gtNewOperNode(GT_ADD, type, op1, gtNewIconNode(pFieldInfo->offset, innerFldSeq)); } break; case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER: { #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { GenTreeFlags callFlags = GTF_EMPTY; if (info.compCompHnd->getClassAttribs(pResolvedToken->hClass) & CORINFO_FLG_BEFOREFIELDINIT) { callFlags |= GTF_CALL_HOISTABLE; } op1 = gtNewHelperCallNode(CORINFO_HELP_READYTORUN_STATIC_BASE, TYP_BYREF); op1->gtFlags |= callFlags; op1->AsCall()->setEntryPoint(pFieldInfo->fieldLookup); } else #endif { op1 = fgGetStaticsCCtorHelper(pResolvedToken->hClass, pFieldInfo->helper); } op1 = gtNewOperNode(GT_ADD, op1->TypeGet(), op1, gtNewIconNode(pFieldInfo->offset, innerFldSeq)); break; } case CORINFO_FIELD_STATIC_READYTORUN_HELPER: { #ifdef FEATURE_READYTORUN assert(opts.IsReadyToRun()); assert(!compIsForInlining()); CORINFO_LOOKUP_KIND kind; info.compCompHnd->getLocationOfThisType(info.compMethodHnd, &kind); assert(kind.needsRuntimeLookup); GenTree* ctxTree = getRuntimeContextTree(kind.runtimeLookupKind); GenTreeCall::Use* args = gtNewCallArgs(ctxTree); GenTreeFlags callFlags = GTF_EMPTY; if (info.compCompHnd->getClassAttribs(pResolvedToken->hClass) & CORINFO_FLG_BEFOREFIELDINIT) { callFlags |= GTF_CALL_HOISTABLE; } var_types type = TYP_BYREF; op1 = gtNewHelperCallNode(CORINFO_HELP_READYTORUN_GENERIC_STATIC_BASE, type, args); op1->gtFlags |= callFlags; op1->AsCall()->setEntryPoint(pFieldInfo->fieldLookup); op1 = gtNewOperNode(GT_ADD, type, op1, gtNewIconNode(pFieldInfo->offset, innerFldSeq)); #else unreached(); #endif // FEATURE_READYTORUN } break; default: { // Do we need the address of a static field? // if (access & CORINFO_ACCESS_ADDRESS) { void** pFldAddr = nullptr; void* fldAddr = info.compCompHnd->getFieldAddress(pResolvedToken->hField, (void**)&pFldAddr); // We should always be able to access this static's address directly. assert(pFldAddr == nullptr); // Create the address node. GenTreeFlags handleKind = isBoxedStatic ? GTF_ICON_STATIC_BOX_PTR : GTF_ICON_STATIC_HDL; op1 = gtNewIconHandleNode((size_t)fldAddr, handleKind, innerFldSeq); #ifdef DEBUG op1->AsIntCon()->gtTargetHandle = op1->AsIntCon()->gtIconVal; #endif if (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_INITCLASS) { op1->gtFlags |= GTF_ICON_INITCLASS; } } else // We need the value of a static field { // In future, it may be better to just create the right tree here instead of folding it later. op1 = gtNewFieldRef(lclTyp, pResolvedToken->hField); if (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_INITCLASS) { op1->gtFlags |= GTF_FLD_INITCLASS; } if (isBoxedStatic) { FieldSeqNode* outerFldSeq = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField); op1->ChangeType(TYP_REF); // points at boxed object op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1, gtNewIconNode(TARGET_POINTER_SIZE, outerFldSeq)); if (varTypeIsStruct(lclTyp)) { // Constructor adds GTF_GLOB_REF. Note that this is *not* GTF_EXCEPT. op1 = gtNewObjNode(pFieldInfo->structType, op1); } else { op1 = gtNewOperNode(GT_IND, lclTyp, op1); op1->gtFlags |= GTF_GLOB_REF | GTF_IND_NONFAULTING; } } return op1; } break; } } if (isBoxedStatic) { FieldSeqNode* outerFldSeq = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField); op1 = gtNewOperNode(GT_IND, TYP_REF, op1); op1->gtFlags |= (GTF_IND_INVARIANT | GTF_IND_NONFAULTING | GTF_IND_NONNULL); op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1, gtNewIconNode(TARGET_POINTER_SIZE, outerFldSeq)); } if (!(access & CORINFO_ACCESS_ADDRESS)) { if (varTypeIsStruct(lclTyp)) { // Constructor adds GTF_GLOB_REF. Note that this is *not* GTF_EXCEPT. op1 = gtNewObjNode(pFieldInfo->structType, op1); } else { op1 = gtNewOperNode(GT_IND, lclTyp, op1); op1->gtFlags |= GTF_GLOB_REF; } } return op1; } // In general try to call this before most of the verification work. Most people expect the access // exceptions before the verification exceptions. If you do this after, that usually doesn't happen. Turns // out if you can't access something we also think that you're unverifiable for other reasons. void Compiler::impHandleAccessAllowed(CorInfoIsAccessAllowedResult result, CORINFO_HELPER_DESC* helperCall) { if (result != CORINFO_ACCESS_ALLOWED) { impHandleAccessAllowedInternal(result, helperCall); } } void Compiler::impHandleAccessAllowedInternal(CorInfoIsAccessAllowedResult result, CORINFO_HELPER_DESC* helperCall) { switch (result) { case CORINFO_ACCESS_ALLOWED: break; case CORINFO_ACCESS_ILLEGAL: // if we're verifying, then we need to reject the illegal access to ensure that we don't think the // method is verifiable. Otherwise, delay the exception to runtime. if (compIsForImportOnly()) { info.compCompHnd->ThrowExceptionForHelper(helperCall); } else { impInsertHelperCall(helperCall); } break; } } void Compiler::impInsertHelperCall(CORINFO_HELPER_DESC* helperInfo) { // Construct the argument list GenTreeCall::Use* args = nullptr; assert(helperInfo->helperNum != CORINFO_HELP_UNDEF); for (unsigned i = helperInfo->numArgs; i > 0; --i) { const CORINFO_HELPER_ARG& helperArg = helperInfo->args[i - 1]; GenTree* currentArg = nullptr; switch (helperArg.argType) { case CORINFO_HELPER_ARG_TYPE_Field: info.compCompHnd->classMustBeLoadedBeforeCodeIsRun( info.compCompHnd->getFieldClass(helperArg.fieldHandle)); currentArg = gtNewIconEmbFldHndNode(helperArg.fieldHandle); break; case CORINFO_HELPER_ARG_TYPE_Method: info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun(helperArg.methodHandle); currentArg = gtNewIconEmbMethHndNode(helperArg.methodHandle); break; case CORINFO_HELPER_ARG_TYPE_Class: info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(helperArg.classHandle); currentArg = gtNewIconEmbClsHndNode(helperArg.classHandle); break; case CORINFO_HELPER_ARG_TYPE_Module: currentArg = gtNewIconEmbScpHndNode(helperArg.moduleHandle); break; case CORINFO_HELPER_ARG_TYPE_Const: currentArg = gtNewIconNode(helperArg.constant); break; default: NO_WAY("Illegal helper arg type"); } args = gtPrependNewCallArg(currentArg, args); } /* TODO-Review: * Mark as CSE'able, and hoistable. Consider marking hoistable unless you're in the inlinee. * Also, consider sticking this in the first basic block. */ GenTree* callout = gtNewHelperCallNode(helperInfo->helperNum, TYP_VOID, args); impAppendTree(callout, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); } //------------------------------------------------------------------------ // impTailCallRetTypeCompatible: Checks whether the return types of caller // and callee are compatible so that calle can be tail called. // sizes are not supported integral type sizes return values to temps. // // Arguments: // allowWidening -- whether to allow implicit widening by the callee. // For instance, allowing int32 -> int16 tailcalls. // The managed calling convention allows this, but // we don't want explicit tailcalls to depend on this // detail of the managed calling convention. // callerRetType -- the caller's return type // callerRetTypeClass - the caller's return struct type // callerCallConv -- calling convention of the caller // calleeRetType -- the callee's return type // calleeRetTypeClass - the callee return struct type // calleeCallConv -- calling convention of the callee // // Returns: // True if the tailcall types are compatible. // // Remarks: // Note that here we don't check compatibility in IL Verifier sense, but on the // lines of return types getting returned in the same return register. bool Compiler::impTailCallRetTypeCompatible(bool allowWidening, var_types callerRetType, CORINFO_CLASS_HANDLE callerRetTypeClass, CorInfoCallConvExtension callerCallConv, var_types calleeRetType, CORINFO_CLASS_HANDLE calleeRetTypeClass, CorInfoCallConvExtension calleeCallConv) { // Early out if the types are the same. if (callerRetType == calleeRetType) { return true; } // For integral types the managed calling convention dictates that callee // will widen the return value to 4 bytes, so we can allow implicit widening // in managed to managed tailcalls when dealing with <= 4 bytes. bool isManaged = (callerCallConv == CorInfoCallConvExtension::Managed) && (calleeCallConv == CorInfoCallConvExtension::Managed); if (allowWidening && isManaged && varTypeIsIntegral(callerRetType) && varTypeIsIntegral(calleeRetType) && (genTypeSize(callerRetType) <= 4) && (genTypeSize(calleeRetType) <= genTypeSize(callerRetType))) { return true; } // If the class handles are the same and not null, the return types are compatible. if ((callerRetTypeClass != nullptr) && (callerRetTypeClass == calleeRetTypeClass)) { return true; } #if defined(TARGET_AMD64) || defined(TARGET_ARMARCH) // Jit64 compat: if (callerRetType == TYP_VOID) { // This needs to be allowed to support the following IL pattern that Jit64 allows: // tail.call // pop // ret // // Note that the above IL pattern is not valid as per IL verification rules. // Therefore, only full trust code can take advantage of this pattern. return true; } // These checks return true if the return value type sizes are the same and // get returned in the same return register i.e. caller doesn't need to normalize // return value. Some of the tail calls permitted by below checks would have // been rejected by IL Verifier before we reached here. Therefore, only full // trust code can make those tail calls. unsigned callerRetTypeSize = 0; unsigned calleeRetTypeSize = 0; bool isCallerRetTypMBEnreg = VarTypeIsMultiByteAndCanEnreg(callerRetType, callerRetTypeClass, &callerRetTypeSize, true, info.compIsVarArgs, callerCallConv); bool isCalleeRetTypMBEnreg = VarTypeIsMultiByteAndCanEnreg(calleeRetType, calleeRetTypeClass, &calleeRetTypeSize, true, info.compIsVarArgs, calleeCallConv); if (varTypeIsIntegral(callerRetType) || isCallerRetTypMBEnreg) { return (varTypeIsIntegral(calleeRetType) || isCalleeRetTypMBEnreg) && (callerRetTypeSize == calleeRetTypeSize); } #endif // TARGET_AMD64 || TARGET_ARMARCH return false; } /******************************************************************************** * * Returns true if the current opcode and and the opcodes following it correspond * to a supported tail call IL pattern. * */ bool Compiler::impIsTailCallILPattern( bool tailPrefixed, OPCODE curOpcode, const BYTE* codeAddrOfNextOpcode, const BYTE* codeEnd, bool isRecursive) { // Bail out if the current opcode is not a call. if (!impOpcodeIsCallOpcode(curOpcode)) { return false; } #if !FEATURE_TAILCALL_OPT_SHARED_RETURN // If shared ret tail opt is not enabled, we will enable // it for recursive methods. if (isRecursive) #endif { // we can actually handle if the ret is in a fallthrough block, as long as that is the only part of the // sequence. Make sure we don't go past the end of the IL however. codeEnd = min(codeEnd + 1, info.compCode + info.compILCodeSize); } // Bail out if there is no next opcode after call if (codeAddrOfNextOpcode >= codeEnd) { return false; } OPCODE nextOpcode = (OPCODE)getU1LittleEndian(codeAddrOfNextOpcode); return (nextOpcode == CEE_RET); } /***************************************************************************** * * Determine whether the call could be converted to an implicit tail call * */ bool Compiler::impIsImplicitTailCallCandidate( OPCODE opcode, const BYTE* codeAddrOfNextOpcode, const BYTE* codeEnd, int prefixFlags, bool isRecursive) { #if FEATURE_TAILCALL_OPT if (!opts.compTailCallOpt) { return false; } if (opts.OptimizationDisabled()) { return false; } // must not be tail prefixed if (prefixFlags & PREFIX_TAILCALL_EXPLICIT) { return false; } #if !FEATURE_TAILCALL_OPT_SHARED_RETURN // the block containing call is marked as BBJ_RETURN // We allow shared ret tail call optimization on recursive calls even under // !FEATURE_TAILCALL_OPT_SHARED_RETURN. if (!isRecursive && (compCurBB->bbJumpKind != BBJ_RETURN)) return false; #endif // !FEATURE_TAILCALL_OPT_SHARED_RETURN // must be call+ret or call+pop+ret if (!impIsTailCallILPattern(false, opcode, codeAddrOfNextOpcode, codeEnd, isRecursive)) { return false; } return true; #else return false; #endif // FEATURE_TAILCALL_OPT } //------------------------------------------------------------------------ // impImportCall: import a call-inspiring opcode // // Arguments: // opcode - opcode that inspires the call // pResolvedToken - resolved token for the call target // pConstrainedResolvedToken - resolved constraint token (or nullptr) // newObjThis - tree for this pointer or uninitalized newobj temp (or nullptr) // prefixFlags - IL prefix flags for the call // callInfo - EE supplied info for the call // rawILOffset - IL offset of the opcode, used for guarded devirtualization. // // Returns: // Type of the call's return value. // If we're importing an inlinee and have realized the inline must fail, the call return type should be TYP_UNDEF. // However we can't assert for this here yet because there are cases we miss. See issue #13272. // // // Notes: // opcode can be CEE_CALL, CEE_CALLI, CEE_CALLVIRT, or CEE_NEWOBJ. // // For CEE_NEWOBJ, newobjThis should be the temp grabbed for the allocated // uninitalized object. #ifdef _PREFAST_ #pragma warning(push) #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function #endif var_types Compiler::impImportCall(OPCODE opcode, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, GenTree* newobjThis, int prefixFlags, CORINFO_CALL_INFO* callInfo, IL_OFFSET rawILOffset) { assert(opcode == CEE_CALL || opcode == CEE_CALLVIRT || opcode == CEE_NEWOBJ || opcode == CEE_CALLI); // The current statement DI may not refer to the exact call, but for calls // we wish to be able to attach the exact IL instruction to get "return // value" support in the debugger, so create one with the exact IL offset. DebugInfo di = impCreateDIWithCurrentStackInfo(rawILOffset, true); var_types callRetTyp = TYP_COUNT; CORINFO_SIG_INFO* sig = nullptr; CORINFO_METHOD_HANDLE methHnd = nullptr; CORINFO_CLASS_HANDLE clsHnd = nullptr; unsigned clsFlags = 0; unsigned mflags = 0; GenTree* call = nullptr; GenTreeCall::Use* args = nullptr; CORINFO_THIS_TRANSFORM constraintCallThisTransform = CORINFO_NO_THIS_TRANSFORM; CORINFO_CONTEXT_HANDLE exactContextHnd = nullptr; bool exactContextNeedsRuntimeLookup = false; bool canTailCall = true; const char* szCanTailCallFailReason = nullptr; const int tailCallFlags = (prefixFlags & PREFIX_TAILCALL); const bool isReadonlyCall = (prefixFlags & PREFIX_READONLY) != 0; CORINFO_RESOLVED_TOKEN* ldftnToken = nullptr; // Synchronized methods need to call CORINFO_HELP_MON_EXIT at the end. We could // do that before tailcalls, but that is probably not the intended // semantic. So just disallow tailcalls from synchronized methods. // Also, popping arguments in a varargs function is more work and NYI // If we have a security object, we have to keep our frame around for callers // to see any imperative security. // Reverse P/Invokes need a call to CORINFO_HELP_JIT_REVERSE_PINVOKE_EXIT // at the end, so tailcalls should be disabled. if (info.compFlags & CORINFO_FLG_SYNCH) { canTailCall = false; szCanTailCallFailReason = "Caller is synchronized"; } else if (opts.IsReversePInvoke()) { canTailCall = false; szCanTailCallFailReason = "Caller is Reverse P/Invoke"; } #if !FEATURE_FIXED_OUT_ARGS else if (info.compIsVarArgs) { canTailCall = false; szCanTailCallFailReason = "Caller is varargs"; } #endif // FEATURE_FIXED_OUT_ARGS // We only need to cast the return value of pinvoke inlined calls that return small types // TODO-AMD64-Cleanup: Remove this when we stop interoperating with JIT64, or if we decide to stop // widening everything! CoreCLR does not support JIT64 interoperation so no need to widen there. // The existing x64 JIT doesn't bother widening all types to int, so we have to assume for // the time being that the callee might be compiled by the other JIT and thus the return // value will need to be widened by us (or not widened at all...) // ReadyToRun code sticks with default calling convention that does not widen small return types. bool checkForSmallType = opts.IsReadyToRun(); bool bIntrinsicImported = false; CORINFO_SIG_INFO calliSig; GenTreeCall::Use* extraArg = nullptr; /*------------------------------------------------------------------------- * First create the call node */ if (opcode == CEE_CALLI) { if (IsTargetAbi(CORINFO_CORERT_ABI)) { // See comment in impCheckForPInvokeCall BasicBlock* block = compIsForInlining() ? impInlineInfo->iciBlock : compCurBB; if (info.compCompHnd->convertPInvokeCalliToCall(pResolvedToken, !impCanPInvokeInlineCallSite(block))) { eeGetCallInfo(pResolvedToken, nullptr, CORINFO_CALLINFO_ALLOWINSTPARAM, callInfo); return impImportCall(CEE_CALL, pResolvedToken, nullptr, nullptr, prefixFlags, callInfo, rawILOffset); } } /* Get the call site sig */ eeGetSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, &calliSig); callRetTyp = JITtype2varType(calliSig.retType); call = impImportIndirectCall(&calliSig, di); // We don't know the target method, so we have to infer the flags, or // assume the worst-case. mflags = (calliSig.callConv & CORINFO_CALLCONV_HASTHIS) ? 0 : CORINFO_FLG_STATIC; #ifdef DEBUG if (verbose) { unsigned structSize = (callRetTyp == TYP_STRUCT) ? info.compCompHnd->getClassSize(calliSig.retTypeSigClass) : 0; printf("\nIn Compiler::impImportCall: opcode is %s, kind=%d, callRetType is %s, structSize is %d\n", opcodeNames[opcode], callInfo->kind, varTypeName(callRetTyp), structSize); } #endif sig = &calliSig; } else // (opcode != CEE_CALLI) { NamedIntrinsic ni = NI_Illegal; // Passing CORINFO_CALLINFO_ALLOWINSTPARAM indicates that this JIT is prepared to // supply the instantiation parameters necessary to make direct calls to underlying // shared generic code, rather than calling through instantiating stubs. If the // returned signature has CORINFO_CALLCONV_PARAMTYPE then this indicates that the JIT // must indeed pass an instantiation parameter. methHnd = callInfo->hMethod; sig = &(callInfo->sig); callRetTyp = JITtype2varType(sig->retType); mflags = callInfo->methodFlags; #ifdef DEBUG if (verbose) { unsigned structSize = (callRetTyp == TYP_STRUCT) ? info.compCompHnd->getClassSize(sig->retTypeSigClass) : 0; printf("\nIn Compiler::impImportCall: opcode is %s, kind=%d, callRetType is %s, structSize is %d\n", opcodeNames[opcode], callInfo->kind, varTypeName(callRetTyp), structSize); } #endif if (compIsForInlining()) { /* Does the inlinee use StackCrawlMark */ if (mflags & CORINFO_FLG_DONT_INLINE_CALLER) { compInlineResult->NoteFatal(InlineObservation::CALLEE_STACK_CRAWL_MARK); return TYP_UNDEF; } /* For now ignore varargs */ if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG) { compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NATIVE_VARARGS); return TYP_UNDEF; } if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG) { compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_MANAGED_VARARGS); return TYP_UNDEF; } if ((mflags & CORINFO_FLG_VIRTUAL) && (sig->sigInst.methInstCount != 0) && (opcode == CEE_CALLVIRT)) { compInlineResult->NoteFatal(InlineObservation::CALLEE_IS_GENERIC_VIRTUAL); return TYP_UNDEF; } } clsHnd = pResolvedToken->hClass; clsFlags = callInfo->classFlags; #ifdef DEBUG // If this is a call to JitTestLabel.Mark, do "early inlining", and record the test attribute. // This recognition should really be done by knowing the methHnd of the relevant Mark method(s). // These should be in corelib.h, and available through a JIT/EE interface call. const char* modName; const char* className; const char* methodName; if ((className = eeGetClassName(clsHnd)) != nullptr && strcmp(className, "System.Runtime.CompilerServices.JitTestLabel") == 0 && (methodName = eeGetMethodName(methHnd, &modName)) != nullptr && strcmp(methodName, "Mark") == 0) { return impImportJitTestLabelMark(sig->numArgs); } #endif // DEBUG // <NICE> Factor this into getCallInfo </NICE> bool isSpecialIntrinsic = false; if ((mflags & (CORINFO_FLG_INTRINSIC | CORINFO_FLG_INTRINSIC)) != 0) { const bool isTailCall = canTailCall && (tailCallFlags != 0); call = impIntrinsic(newobjThis, clsHnd, methHnd, sig, mflags, pResolvedToken->token, isReadonlyCall, isTailCall, pConstrainedResolvedToken, callInfo->thisTransform, &ni, &isSpecialIntrinsic); if (compDonotInline()) { return TYP_UNDEF; } if (call != nullptr) { #ifdef FEATURE_READYTORUN if (call->OperGet() == GT_INTRINSIC) { if (opts.IsReadyToRun()) { noway_assert(callInfo->kind == CORINFO_CALL); call->AsIntrinsic()->gtEntryPoint = callInfo->codePointerLookup.constLookup; } else { call->AsIntrinsic()->gtEntryPoint.addr = nullptr; call->AsIntrinsic()->gtEntryPoint.accessType = IAT_VALUE; } } #endif bIntrinsicImported = true; goto DONE_CALL; } } #ifdef FEATURE_SIMD if (supportSIMDTypes()) { call = impSIMDIntrinsic(opcode, newobjThis, clsHnd, methHnd, sig, mflags, pResolvedToken->token); if (call != nullptr) { bIntrinsicImported = true; goto DONE_CALL; } } #endif // FEATURE_SIMD if ((mflags & CORINFO_FLG_VIRTUAL) && (mflags & CORINFO_FLG_EnC) && (opcode == CEE_CALLVIRT)) { NO_WAY("Virtual call to a function added via EnC is not supported"); } if ((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_DEFAULT && (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG && (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG) { BADCODE("Bad calling convention"); } //------------------------------------------------------------------------- // Construct the call node // // Work out what sort of call we're making. // Dispense with virtual calls implemented via LDVIRTFTN immediately. constraintCallThisTransform = callInfo->thisTransform; exactContextHnd = callInfo->contextHandle; exactContextNeedsRuntimeLookup = callInfo->exactContextNeedsRuntimeLookup; switch (callInfo->kind) { case CORINFO_VIRTUALCALL_STUB: { assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method assert(!(clsFlags & CORINFO_FLG_VALUECLASS)); if (callInfo->stubLookup.lookupKind.needsRuntimeLookup) { if (callInfo->stubLookup.lookupKind.runtimeLookupKind == CORINFO_LOOKUP_NOT_SUPPORTED) { // Runtime does not support inlining of all shapes of runtime lookups // Inlining has to be aborted in such a case compInlineResult->NoteFatal(InlineObservation::CALLSITE_HAS_COMPLEX_HANDLE); return TYP_UNDEF; } GenTree* stubAddr = impRuntimeLookupToTree(pResolvedToken, &callInfo->stubLookup, methHnd); assert(!compDonotInline()); // This is the rough code to set up an indirect stub call assert(stubAddr != nullptr); // The stubAddr may be a // complex expression. As it is evaluated after the args, // it may cause registered args to be spilled. Simply spill it. unsigned lclNum = lvaGrabTemp(true DEBUGARG("VirtualCall with runtime lookup")); impAssignTempGen(lclNum, stubAddr, (unsigned)CHECK_SPILL_NONE); stubAddr = gtNewLclvNode(lclNum, TYP_I_IMPL); // Create the actual call node assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG && (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG); call = gtNewIndCallNode(stubAddr, callRetTyp, nullptr); call->gtFlags |= GTF_EXCEPT | (stubAddr->gtFlags & GTF_GLOB_EFFECT); call->gtFlags |= GTF_CALL_VIRT_STUB; #ifdef TARGET_X86 // No tailcalls allowed for these yet... canTailCall = false; szCanTailCallFailReason = "VirtualCall with runtime lookup"; #endif } else { // The stub address is known at compile time call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, di); call->AsCall()->gtStubCallStubAddr = callInfo->stubLookup.constLookup.addr; call->gtFlags |= GTF_CALL_VIRT_STUB; assert(callInfo->stubLookup.constLookup.accessType != IAT_PPVALUE && callInfo->stubLookup.constLookup.accessType != IAT_RELPVALUE); if (callInfo->stubLookup.constLookup.accessType == IAT_PVALUE) { call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_VIRTSTUB_REL_INDIRECT; } } #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { // Null check is sometimes needed for ready to run to handle // non-virtual <-> virtual changes between versions if (callInfo->nullInstanceCheck) { call->gtFlags |= GTF_CALL_NULLCHECK; } } #endif break; } case CORINFO_VIRTUALCALL_VTABLE: { assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method assert(!(clsFlags & CORINFO_FLG_VALUECLASS)); call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, di); call->gtFlags |= GTF_CALL_VIRT_VTABLE; // Should we expand virtual call targets early for this method? // if (opts.compExpandCallsEarly) { // Mark this method to expand the virtual call target early in fgMorpgCall call->AsCall()->SetExpandedEarly(); } break; } case CORINFO_VIRTUALCALL_LDVIRTFTN: { if (compIsForInlining()) { compInlineResult->NoteFatal(InlineObservation::CALLSITE_HAS_CALL_VIA_LDVIRTFTN); return TYP_UNDEF; } assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method assert(!(clsFlags & CORINFO_FLG_VALUECLASS)); // OK, We've been told to call via LDVIRTFTN, so just // take the call now.... GenTreeCall::Use* args = impPopCallArgs(sig->numArgs, sig); GenTree* thisPtr = impPopStack().val; thisPtr = impTransformThis(thisPtr, pConstrainedResolvedToken, callInfo->thisTransform); assert(thisPtr != nullptr); // Clone the (possibly transformed) "this" pointer GenTree* thisPtrCopy; thisPtr = impCloneExpr(thisPtr, &thisPtrCopy, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("LDVIRTFTN this pointer")); GenTree* fptr = impImportLdvirtftn(thisPtr, pResolvedToken, callInfo); assert(fptr != nullptr); thisPtr = nullptr; // can't reuse it // Now make an indirect call through the function pointer unsigned lclNum = lvaGrabTemp(true DEBUGARG("VirtualCall through function pointer")); impAssignTempGen(lclNum, fptr, (unsigned)CHECK_SPILL_ALL); fptr = gtNewLclvNode(lclNum, TYP_I_IMPL); // Create the actual call node call = gtNewIndCallNode(fptr, callRetTyp, args, di); call->AsCall()->gtCallThisArg = gtNewCallArgs(thisPtrCopy); call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT); if ((sig->sigInst.methInstCount != 0) && IsTargetAbi(CORINFO_CORERT_ABI)) { // CoreRT generic virtual method: need to handle potential fat function pointers addFatPointerCandidate(call->AsCall()); } #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { // Null check is needed for ready to run to handle // non-virtual <-> virtual changes between versions call->gtFlags |= GTF_CALL_NULLCHECK; } #endif // Sine we are jumping over some code, check that its OK to skip that code assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG && (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG); goto DONE; } case CORINFO_CALL: { // This is for a non-virtual, non-interface etc. call call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, di); // We remove the nullcheck for the GetType call intrinsic. // TODO-CQ: JIT64 does not introduce the null check for many more helper calls // and intrinsics. if (callInfo->nullInstanceCheck && !((mflags & CORINFO_FLG_INTRINSIC) != 0 && (ni == NI_System_Object_GetType))) { call->gtFlags |= GTF_CALL_NULLCHECK; } #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { call->AsCall()->setEntryPoint(callInfo->codePointerLookup.constLookup); } #endif break; } case CORINFO_CALL_CODE_POINTER: { // The EE has asked us to call by computing a code pointer and then doing an // indirect call. This is because a runtime lookup is required to get the code entry point. // These calls always follow a uniform calling convention, i.e. no extra hidden params assert((sig->callConv & CORINFO_CALLCONV_PARAMTYPE) == 0); assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG); assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG); GenTree* fptr = impLookupToTree(pResolvedToken, &callInfo->codePointerLookup, GTF_ICON_FTN_ADDR, callInfo->hMethod); if (compDonotInline()) { return TYP_UNDEF; } // Now make an indirect call through the function pointer unsigned lclNum = lvaGrabTemp(true DEBUGARG("Indirect call through function pointer")); impAssignTempGen(lclNum, fptr, (unsigned)CHECK_SPILL_ALL); fptr = gtNewLclvNode(lclNum, TYP_I_IMPL); call = gtNewIndCallNode(fptr, callRetTyp, nullptr, di); call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT); if (callInfo->nullInstanceCheck) { call->gtFlags |= GTF_CALL_NULLCHECK; } break; } default: assert(!"unknown call kind"); break; } //------------------------------------------------------------------------- // Set more flags PREFIX_ASSUME(call != nullptr); if (mflags & CORINFO_FLG_NOGCCHECK) { call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_NOGCCHECK; } // Mark call if it's one of the ones we will maybe treat as an intrinsic if (isSpecialIntrinsic) { call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_SPECIAL_INTRINSIC; } } assert(sig); assert(clsHnd || (opcode == CEE_CALLI)); // We're never verifying for CALLI, so this is not set. /* Some sanity checks */ // CALL_VIRT and NEWOBJ must have a THIS pointer assert((opcode != CEE_CALLVIRT && opcode != CEE_NEWOBJ) || (sig->callConv & CORINFO_CALLCONV_HASTHIS)); // static bit and hasThis are negations of one another assert(((mflags & CORINFO_FLG_STATIC) != 0) == ((sig->callConv & CORINFO_CALLCONV_HASTHIS) == 0)); assert(call != nullptr); /*------------------------------------------------------------------------- * Check special-cases etc */ /* Special case - Check if it is a call to Delegate.Invoke(). */ if (mflags & CORINFO_FLG_DELEGATE_INVOKE) { assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method assert(mflags & CORINFO_FLG_FINAL); /* Set the delegate flag */ call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_DELEGATE_INV; if (callInfo->wrapperDelegateInvoke) { call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_WRAPPER_DELEGATE_INV; } if (opcode == CEE_CALLVIRT) { assert(mflags & CORINFO_FLG_FINAL); /* It should have the GTF_CALL_NULLCHECK flag set. Reset it */ assert(call->gtFlags & GTF_CALL_NULLCHECK); call->gtFlags &= ~GTF_CALL_NULLCHECK; } } CORINFO_CLASS_HANDLE actualMethodRetTypeSigClass; actualMethodRetTypeSigClass = sig->retTypeSigClass; /* Check for varargs */ if (!compFeatureVarArg() && ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG || (sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG)) { BADCODE("Varargs not supported."); } if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG || (sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG) { assert(!compIsForInlining()); /* Set the right flags */ call->gtFlags |= GTF_CALL_POP_ARGS; call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_VARARGS; /* Can't allow tailcall for varargs as it is caller-pop. The caller will be expecting to pop a certain number of arguments, but if we tailcall to a function with a different number of arguments, we are hosed. There are ways around this (caller remembers esp value, varargs is not caller-pop, etc), but not worth it. */ CLANG_FORMAT_COMMENT_ANCHOR; #ifdef TARGET_X86 if (canTailCall) { canTailCall = false; szCanTailCallFailReason = "Callee is varargs"; } #endif /* Get the total number of arguments - this is already correct * for CALLI - for methods we have to get it from the call site */ if (opcode != CEE_CALLI) { #ifdef DEBUG unsigned numArgsDef = sig->numArgs; #endif eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, sig); // For vararg calls we must be sure to load the return type of the // method actually being called, as well as the return types of the // specified in the vararg signature. With type equivalency, these types // may not be the same. if (sig->retTypeSigClass != actualMethodRetTypeSigClass) { if (actualMethodRetTypeSigClass != nullptr && sig->retType != CORINFO_TYPE_CLASS && sig->retType != CORINFO_TYPE_BYREF && sig->retType != CORINFO_TYPE_PTR && sig->retType != CORINFO_TYPE_VAR) { // Make sure that all valuetypes (including enums) that we push are loaded. // This is to guarantee that if a GC is triggerred from the prestub of this methods, // all valuetypes in the method signature are already loaded. // We need to be able to find the size of the valuetypes, but we cannot // do a class-load from within GC. info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(actualMethodRetTypeSigClass); } } assert(numArgsDef <= sig->numArgs); } /* We will have "cookie" as the last argument but we cannot push * it on the operand stack because we may overflow, so we append it * to the arg list next after we pop them */ } //--------------------------- Inline NDirect ------------------------------ // For inline cases we technically should look at both the current // block and the call site block (or just the latter if we've // fused the EH trees). However the block-related checks pertain to // EH and we currently won't inline a method with EH. So for // inlinees, just checking the call site block is sufficient. { // New lexical block here to avoid compilation errors because of GOTOs. BasicBlock* block = compIsForInlining() ? impInlineInfo->iciBlock : compCurBB; impCheckForPInvokeCall(call->AsCall(), methHnd, sig, mflags, block); } #ifdef UNIX_X86_ABI // On Unix x86 we use caller-cleaned convention. if ((call->gtFlags & GTF_CALL_UNMANAGED) == 0) call->gtFlags |= GTF_CALL_POP_ARGS; #endif // UNIX_X86_ABI if (call->gtFlags & GTF_CALL_UNMANAGED) { // We set up the unmanaged call by linking the frame, disabling GC, etc // This needs to be cleaned up on return. // In addition, native calls have different normalization rules than managed code // (managed calling convention always widens return values in the callee) if (canTailCall) { canTailCall = false; szCanTailCallFailReason = "Callee is native"; } checkForSmallType = true; impPopArgsForUnmanagedCall(call, sig); goto DONE; } else if ((opcode == CEE_CALLI) && ((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_DEFAULT) && ((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG)) { if (!info.compCompHnd->canGetCookieForPInvokeCalliSig(sig)) { // Normally this only happens with inlining. // However, a generic method (or type) being NGENd into another module // can run into this issue as well. There's not an easy fall-back for NGEN // so instead we fallback to JIT. if (compIsForInlining()) { compInlineResult->NoteFatal(InlineObservation::CALLSITE_CANT_EMBED_PINVOKE_COOKIE); } else { IMPL_LIMITATION("Can't get PInvoke cookie (cross module generics)"); } return TYP_UNDEF; } GenTree* cookie = eeGetPInvokeCookie(sig); // This cookie is required to be either a simple GT_CNS_INT or // an indirection of a GT_CNS_INT // GenTree* cookieConst = cookie; if (cookie->gtOper == GT_IND) { cookieConst = cookie->AsOp()->gtOp1; } assert(cookieConst->gtOper == GT_CNS_INT); // Setting GTF_DONT_CSE on the GT_CNS_INT as well as on the GT_IND (if it exists) will ensure that // we won't allow this tree to participate in any CSE logic // cookie->gtFlags |= GTF_DONT_CSE; cookieConst->gtFlags |= GTF_DONT_CSE; call->AsCall()->gtCallCookie = cookie; if (canTailCall) { canTailCall = false; szCanTailCallFailReason = "PInvoke calli"; } } /*------------------------------------------------------------------------- * Create the argument list */ //------------------------------------------------------------------------- // Special case - for varargs we have an implicit last argument if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG) { assert(!compIsForInlining()); void *varCookie, *pVarCookie; if (!info.compCompHnd->canGetVarArgsHandle(sig)) { compInlineResult->NoteFatal(InlineObservation::CALLSITE_CANT_EMBED_VARARGS_COOKIE); return TYP_UNDEF; } varCookie = info.compCompHnd->getVarArgsHandle(sig, &pVarCookie); assert((!varCookie) != (!pVarCookie)); GenTree* cookie = gtNewIconEmbHndNode(varCookie, pVarCookie, GTF_ICON_VARG_HDL, sig); assert(extraArg == nullptr); extraArg = gtNewCallArgs(cookie); } //------------------------------------------------------------------------- // Extra arg for shared generic code and array methods // // Extra argument containing instantiation information is passed in the // following circumstances: // (a) To the "Address" method on array classes; the extra parameter is // the array's type handle (a TypeDesc) // (b) To shared-code instance methods in generic structs; the extra parameter // is the struct's type handle (a vtable ptr) // (c) To shared-code per-instantiation non-generic static methods in generic // classes and structs; the extra parameter is the type handle // (d) To shared-code generic methods; the extra parameter is an // exact-instantiation MethodDesc // // We also set the exact type context associated with the call so we can // inline the call correctly later on. if (sig->callConv & CORINFO_CALLCONV_PARAMTYPE) { assert(call->AsCall()->gtCallType == CT_USER_FUNC); if (clsHnd == nullptr) { NO_WAY("CALLI on parameterized type"); } assert(opcode != CEE_CALLI); GenTree* instParam; bool runtimeLookup; // Instantiated generic method if (((SIZE_T)exactContextHnd & CORINFO_CONTEXTFLAGS_MASK) == CORINFO_CONTEXTFLAGS_METHOD) { assert(exactContextHnd != METHOD_BEING_COMPILED_CONTEXT()); CORINFO_METHOD_HANDLE exactMethodHandle = (CORINFO_METHOD_HANDLE)((SIZE_T)exactContextHnd & ~CORINFO_CONTEXTFLAGS_MASK); if (!exactContextNeedsRuntimeLookup) { #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { instParam = impReadyToRunLookupToTree(&callInfo->instParamLookup, GTF_ICON_METHOD_HDL, exactMethodHandle); if (instParam == nullptr) { assert(compDonotInline()); return TYP_UNDEF; } } else #endif { instParam = gtNewIconEmbMethHndNode(exactMethodHandle); info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun(exactMethodHandle); } } else { instParam = impTokenToHandle(pResolvedToken, &runtimeLookup, true /*mustRestoreHandle*/); if (instParam == nullptr) { assert(compDonotInline()); return TYP_UNDEF; } } } // otherwise must be an instance method in a generic struct, // a static method in a generic type, or a runtime-generated array method else { assert(((SIZE_T)exactContextHnd & CORINFO_CONTEXTFLAGS_MASK) == CORINFO_CONTEXTFLAGS_CLASS); CORINFO_CLASS_HANDLE exactClassHandle = eeGetClassFromContext(exactContextHnd); if (compIsForInlining() && (clsFlags & CORINFO_FLG_ARRAY) != 0) { compInlineResult->NoteFatal(InlineObservation::CALLEE_IS_ARRAY_METHOD); return TYP_UNDEF; } if ((clsFlags & CORINFO_FLG_ARRAY) && isReadonlyCall) { // We indicate "readonly" to the Address operation by using a null // instParam. instParam = gtNewIconNode(0, TYP_REF); } else if (!exactContextNeedsRuntimeLookup) { #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { instParam = impReadyToRunLookupToTree(&callInfo->instParamLookup, GTF_ICON_CLASS_HDL, exactClassHandle); if (instParam == nullptr) { assert(compDonotInline()); return TYP_UNDEF; } } else #endif { instParam = gtNewIconEmbClsHndNode(exactClassHandle); info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(exactClassHandle); } } else { instParam = impParentClassTokenToHandle(pResolvedToken, &runtimeLookup, true /*mustRestoreHandle*/); if (instParam == nullptr) { assert(compDonotInline()); return TYP_UNDEF; } } } assert(extraArg == nullptr); extraArg = gtNewCallArgs(instParam); } if ((opcode == CEE_NEWOBJ) && ((clsFlags & CORINFO_FLG_DELEGATE) != 0)) { // Only verifiable cases are supported. // dup; ldvirtftn; newobj; or ldftn; newobj. // IL test could contain unverifiable sequence, in this case optimization should not be done. if (impStackHeight() > 0) { typeInfo delegateTypeInfo = impStackTop().seTypeInfo; if (delegateTypeInfo.IsToken()) { ldftnToken = delegateTypeInfo.GetToken(); } } } //------------------------------------------------------------------------- // The main group of arguments args = impPopCallArgs(sig->numArgs, sig, extraArg); call->AsCall()->gtCallArgs = args; for (GenTreeCall::Use& use : call->AsCall()->Args()) { call->gtFlags |= use.GetNode()->gtFlags & GTF_GLOB_EFFECT; } //------------------------------------------------------------------------- // The "this" pointer if (((mflags & CORINFO_FLG_STATIC) == 0) && ((sig->callConv & CORINFO_CALLCONV_EXPLICITTHIS) == 0) && !((opcode == CEE_NEWOBJ) && (newobjThis == nullptr))) { GenTree* obj; if (opcode == CEE_NEWOBJ) { obj = newobjThis; } else { obj = impPopStack().val; obj = impTransformThis(obj, pConstrainedResolvedToken, constraintCallThisTransform); if (compDonotInline()) { return TYP_UNDEF; } } // Store the "this" value in the call call->gtFlags |= obj->gtFlags & GTF_GLOB_EFFECT; call->AsCall()->gtCallThisArg = gtNewCallArgs(obj); // Is this a virtual or interface call? if (call->AsCall()->IsVirtual()) { // only true object pointers can be virtual assert(obj->gtType == TYP_REF); // See if we can devirtualize. const bool isExplicitTailCall = (tailCallFlags & PREFIX_TAILCALL_EXPLICIT) != 0; const bool isLateDevirtualization = false; impDevirtualizeCall(call->AsCall(), pResolvedToken, &callInfo->hMethod, &callInfo->methodFlags, &callInfo->contextHandle, &exactContextHnd, isLateDevirtualization, isExplicitTailCall, // Take care to pass raw IL offset here as the 'debug info' might be different for // inlinees. rawILOffset); // Devirtualization may change which method gets invoked. Update our local cache. // methHnd = callInfo->hMethod; } if (impIsThis(obj)) { call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_NONVIRT_SAME_THIS; } } //------------------------------------------------------------------------- // The "this" pointer for "newobj" if (opcode == CEE_NEWOBJ) { if (clsFlags & CORINFO_FLG_VAROBJSIZE) { assert(!(clsFlags & CORINFO_FLG_ARRAY)); // arrays handled separately // This is a 'new' of a variable sized object, wher // the constructor is to return the object. In this case // the constructor claims to return VOID but we know it // actually returns the new object assert(callRetTyp == TYP_VOID); callRetTyp = TYP_REF; call->gtType = TYP_REF; impSpillSpecialSideEff(); impPushOnStack(call, typeInfo(TI_REF, clsHnd)); } else { if (clsFlags & CORINFO_FLG_DELEGATE) { // New inliner morph it in impImportCall. // This will allow us to inline the call to the delegate constructor. call = fgOptimizeDelegateConstructor(call->AsCall(), &exactContextHnd, ldftnToken); } if (!bIntrinsicImported) { #if defined(DEBUG) || defined(INLINE_DATA) // Keep track of the raw IL offset of the call call->AsCall()->gtRawILOffset = rawILOffset; #endif // defined(DEBUG) || defined(INLINE_DATA) // Is it an inline candidate? impMarkInlineCandidate(call, exactContextHnd, exactContextNeedsRuntimeLookup, callInfo); } // append the call node. impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtDI); // Now push the value of the 'new onto the stack // This is a 'new' of a non-variable sized object. // Append the new node (op1) to the statement list, // and then push the local holding the value of this // new instruction on the stack. if (clsFlags & CORINFO_FLG_VALUECLASS) { assert(newobjThis->gtOper == GT_ADDR && newobjThis->AsOp()->gtOp1->gtOper == GT_LCL_VAR); unsigned tmp = newobjThis->AsOp()->gtOp1->AsLclVarCommon()->GetLclNum(); impPushOnStack(gtNewLclvNode(tmp, lvaGetRealType(tmp)), verMakeTypeInfo(clsHnd).NormaliseForStack()); } else { if (newobjThis->gtOper == GT_COMMA) { // We must have inserted the callout. Get the real newobj. newobjThis = newobjThis->AsOp()->gtOp2; } assert(newobjThis->gtOper == GT_LCL_VAR); impPushOnStack(gtNewLclvNode(newobjThis->AsLclVarCommon()->GetLclNum(), TYP_REF), typeInfo(TI_REF, clsHnd)); } } return callRetTyp; } DONE: #ifdef DEBUG // In debug we want to be able to register callsites with the EE. assert(call->AsCall()->callSig == nullptr); call->AsCall()->callSig = new (this, CMK_Generic) CORINFO_SIG_INFO; *call->AsCall()->callSig = *sig; #endif // Final importer checks for calls flagged as tail calls. // if (tailCallFlags != 0) { const bool isExplicitTailCall = (tailCallFlags & PREFIX_TAILCALL_EXPLICIT) != 0; const bool isImplicitTailCall = (tailCallFlags & PREFIX_TAILCALL_IMPLICIT) != 0; const bool isStressTailCall = (tailCallFlags & PREFIX_TAILCALL_STRESS) != 0; // Exactly one of these should be true. assert(isExplicitTailCall != isImplicitTailCall); // This check cannot be performed for implicit tail calls for the reason // that impIsImplicitTailCallCandidate() is not checking whether return // types are compatible before marking a call node with PREFIX_TAILCALL_IMPLICIT. // As a result it is possible that in the following case, we find that // the type stack is non-empty if Callee() is considered for implicit // tail calling. // int Caller(..) { .... void Callee(); ret val; ... } // // Note that we cannot check return type compatibility before ImpImportCall() // as we don't have required info or need to duplicate some of the logic of // ImpImportCall(). // // For implicit tail calls, we perform this check after return types are // known to be compatible. if (isExplicitTailCall && (verCurrentState.esStackDepth != 0)) { BADCODE("Stack should be empty after tailcall"); } // For opportunistic tailcalls we allow implicit widening, i.e. tailcalls from int32 -> int16, since the // managed calling convention dictates that the callee widens the value. For explicit tailcalls we don't // want to require this detail of the calling convention to bubble up to the tailcall helpers bool allowWidening = isImplicitTailCall; if (canTailCall && !impTailCallRetTypeCompatible(allowWidening, info.compRetType, info.compMethodInfo->args.retTypeClass, info.compCallConv, callRetTyp, sig->retTypeClass, call->AsCall()->GetUnmanagedCallConv())) { canTailCall = false; szCanTailCallFailReason = "Return types are not tail call compatible"; } // Stack empty check for implicit tail calls. if (canTailCall && isImplicitTailCall && (verCurrentState.esStackDepth != 0)) { #ifdef TARGET_AMD64 // JIT64 Compatibility: Opportunistic tail call stack mismatch throws a VerificationException // in JIT64, not an InvalidProgramException. Verify(false, "Stack should be empty after tailcall"); #else // TARGET_64BIT BADCODE("Stack should be empty after tailcall"); #endif //! TARGET_64BIT } // assert(compCurBB is not a catch, finally or filter block); // assert(compCurBB is not a try block protected by a finally block); assert(!isExplicitTailCall || compCurBB->bbJumpKind == BBJ_RETURN); // Ask VM for permission to tailcall if (canTailCall) { // True virtual or indirect calls, shouldn't pass in a callee handle. CORINFO_METHOD_HANDLE exactCalleeHnd = ((call->AsCall()->gtCallType != CT_USER_FUNC) || call->AsCall()->IsVirtual()) ? nullptr : methHnd; if (info.compCompHnd->canTailCall(info.compMethodHnd, methHnd, exactCalleeHnd, isExplicitTailCall)) { if (isExplicitTailCall) { // In case of explicit tail calls, mark it so that it is not considered // for in-lining. call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_EXPLICIT_TAILCALL; JITDUMP("\nGTF_CALL_M_EXPLICIT_TAILCALL set for call [%06u]\n", dspTreeID(call)); if (isStressTailCall) { call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_STRESS_TAILCALL; JITDUMP("\nGTF_CALL_M_STRESS_TAILCALL set for call [%06u]\n", dspTreeID(call)); } } else { #if FEATURE_TAILCALL_OPT // Must be an implicit tail call. assert(isImplicitTailCall); // It is possible that a call node is both an inline candidate and marked // for opportunistic tail calling. In-lining happens before morhphing of // trees. If in-lining of an in-line candidate gets aborted for whatever // reason, it will survive to the morphing stage at which point it will be // transformed into a tail call after performing additional checks. call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_IMPLICIT_TAILCALL; JITDUMP("\nGTF_CALL_M_IMPLICIT_TAILCALL set for call [%06u]\n", dspTreeID(call)); #else //! FEATURE_TAILCALL_OPT NYI("Implicit tail call prefix on a target which doesn't support opportunistic tail calls"); #endif // FEATURE_TAILCALL_OPT } // This might or might not turn into a tailcall. We do more // checks in morph. For explicit tailcalls we need more // information in morph in case it turns out to be a // helper-based tailcall. if (isExplicitTailCall) { assert(call->AsCall()->tailCallInfo == nullptr); call->AsCall()->tailCallInfo = new (this, CMK_CorTailCallInfo) TailCallSiteInfo; switch (opcode) { case CEE_CALLI: call->AsCall()->tailCallInfo->SetCalli(sig); break; case CEE_CALLVIRT: call->AsCall()->tailCallInfo->SetCallvirt(sig, pResolvedToken); break; default: call->AsCall()->tailCallInfo->SetCall(sig, pResolvedToken); break; } } } else { // canTailCall reported its reasons already canTailCall = false; JITDUMP("\ninfo.compCompHnd->canTailCall returned false for call [%06u]\n", dspTreeID(call)); } } else { // If this assert fires it means that canTailCall was set to false without setting a reason! assert(szCanTailCallFailReason != nullptr); JITDUMP("\nRejecting %splicit tail call for [%06u], reason: '%s'\n", isExplicitTailCall ? "ex" : "im", dspTreeID(call), szCanTailCallFailReason); info.compCompHnd->reportTailCallDecision(info.compMethodHnd, methHnd, isExplicitTailCall, TAILCALL_FAIL, szCanTailCallFailReason); } } // Note: we assume that small return types are already normalized by the managed callee // or by the pinvoke stub for calls to unmanaged code. if (!bIntrinsicImported) { // // Things needed to be checked when bIntrinsicImported is false. // assert(call->gtOper == GT_CALL); assert(callInfo != nullptr); if (compIsForInlining() && opcode == CEE_CALLVIRT) { GenTree* callObj = call->AsCall()->gtCallThisArg->GetNode(); if ((call->AsCall()->IsVirtual() || (call->gtFlags & GTF_CALL_NULLCHECK)) && impInlineIsGuaranteedThisDerefBeforeAnySideEffects(nullptr, call->AsCall()->gtCallArgs, callObj, impInlineInfo->inlArgInfo)) { impInlineInfo->thisDereferencedFirst = true; } } #if defined(DEBUG) || defined(INLINE_DATA) // Keep track of the raw IL offset of the call call->AsCall()->gtRawILOffset = rawILOffset; #endif // defined(DEBUG) || defined(INLINE_DATA) // Is it an inline candidate? impMarkInlineCandidate(call, exactContextHnd, exactContextNeedsRuntimeLookup, callInfo); } // Extra checks for tail calls and tail recursion. // // A tail recursive call is a potential loop from the current block to the start of the root method. // If we see a tail recursive call, mark the blocks from the call site back to the entry as potentially // being in a loop. // // Note: if we're importing an inlinee we don't mark the right set of blocks, but by then it's too // late. Currently this doesn't lead to problems. See GitHub issue 33529. // // OSR also needs to handle tail calls specially: // * block profiling in OSR methods needs to ensure probes happen before tail calls, not after. // * the root method entry must be imported if there's a recursive tail call or a potentially // inlineable tail call. // if ((tailCallFlags != 0) && canTailCall) { if (gtIsRecursiveCall(methHnd)) { assert(verCurrentState.esStackDepth == 0); BasicBlock* loopHead = nullptr; if (!compIsForInlining() && opts.IsOSR()) { // For root method OSR we may branch back to the actual method entry, // which is not fgFirstBB, and which we will need to import. assert(fgEntryBB != nullptr); loopHead = fgEntryBB; } else { // For normal jitting we may branch back to the firstBB; this // should already be imported. loopHead = fgFirstBB; } JITDUMP("\nTail recursive call [%06u] in the method. Mark " FMT_BB " to " FMT_BB " as having a backward branch.\n", dspTreeID(call), loopHead->bbNum, compCurBB->bbNum); fgMarkBackwardJump(loopHead, compCurBB); } // We only do these OSR checks in the root method because: // * If we fail to import the root method entry when importing the root method, we can't go back // and import it during inlining. So instead of checking jsut for recursive tail calls we also // have to check for anything that might introduce a recursive tail call. // * We only instrument root method blocks in OSR methods, // if (opts.IsOSR() && !compIsForInlining()) { // If a root method tail call candidate block is not a BBJ_RETURN, it should have a unique // BBJ_RETURN successor. Mark that successor so we can handle it specially during profile // instrumentation. // if (compCurBB->bbJumpKind != BBJ_RETURN) { BasicBlock* const successor = compCurBB->GetUniqueSucc(); assert(successor->bbJumpKind == BBJ_RETURN); successor->bbFlags |= BBF_TAILCALL_SUCCESSOR; optMethodFlags |= OMF_HAS_TAILCALL_SUCCESSOR; } // If this call might eventually turn into a loop back to method entry, make sure we // import the method entry. // assert(call->IsCall()); GenTreeCall* const actualCall = call->AsCall(); const bool mustImportEntryBlock = gtIsRecursiveCall(methHnd) || actualCall->IsInlineCandidate() || actualCall->IsGuardedDevirtualizationCandidate(); // Only schedule importation if we're not currently importing. // if (mustImportEntryBlock && (compCurBB != fgEntryBB)) { JITDUMP("\nOSR: inlineable or recursive tail call [%06u] in the method, so scheduling " FMT_BB " for importation\n", dspTreeID(call), fgEntryBB->bbNum); impImportBlockPending(fgEntryBB); } } } if ((sig->flags & CORINFO_SIGFLAG_FAT_CALL) != 0) { assert(opcode == CEE_CALLI || callInfo->kind == CORINFO_CALL_CODE_POINTER); addFatPointerCandidate(call->AsCall()); } DONE_CALL: // Push or append the result of the call if (callRetTyp == TYP_VOID) { if (opcode == CEE_NEWOBJ) { // we actually did push something, so don't spill the thing we just pushed. assert(verCurrentState.esStackDepth > 0); impAppendTree(call, verCurrentState.esStackDepth - 1, impCurStmtDI); } else { impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtDI); } } else { impSpillSpecialSideEff(); if (clsFlags & CORINFO_FLG_ARRAY) { eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, sig); } typeInfo tiRetVal = verMakeTypeInfo(sig->retType, sig->retTypeClass); tiRetVal.NormaliseForStack(); // The CEE_READONLY prefix modifies the verification semantics of an Address // operation on an array type. if ((clsFlags & CORINFO_FLG_ARRAY) && isReadonlyCall && tiRetVal.IsByRef()) { tiRetVal.SetIsReadonlyByRef(); } if (call->IsCall()) { // Sometimes "call" is not a GT_CALL (if we imported an intrinsic that didn't turn into a call) GenTreeCall* origCall = call->AsCall(); const bool isFatPointerCandidate = origCall->IsFatPointerCandidate(); const bool isInlineCandidate = origCall->IsInlineCandidate(); const bool isGuardedDevirtualizationCandidate = origCall->IsGuardedDevirtualizationCandidate(); if (varTypeIsStruct(callRetTyp)) { // Need to treat all "split tree" cases here, not just inline candidates call = impFixupCallStructReturn(call->AsCall(), sig->retTypeClass); } // TODO: consider handling fatcalli cases this way too...? if (isInlineCandidate || isGuardedDevirtualizationCandidate) { // We should not have made any adjustments in impFixupCallStructReturn // as we defer those until we know the fate of the call. assert(call == origCall); assert(opts.OptEnabled(CLFLG_INLINING)); assert(!isFatPointerCandidate); // We should not try to inline calli. // Make the call its own tree (spill the stack if needed). // Do not consume the debug info here. This is particularly // important if we give up on the inline, in which case the // call will typically end up in the statement that contains // the GT_RET_EXPR that we leave on the stack. impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtDI, false); // TODO: Still using the widened type. GenTree* retExpr = gtNewInlineCandidateReturnExpr(call, genActualType(callRetTyp), compCurBB->bbFlags); // Link the retExpr to the call so if necessary we can manipulate it later. origCall->gtInlineCandidateInfo->retExpr = retExpr; // Propagate retExpr as the placeholder for the call. call = retExpr; } else { // If the call is virtual, and has a generics context, and is not going to have a class probe, // record the context for possible use during late devirt. // // If we ever want to devirt at Tier0, and/or see issues where OSR methods under PGO lose // important devirtualizations, we'll want to allow both a class probe and a captured context. // if (origCall->IsVirtual() && (origCall->gtCallType != CT_INDIRECT) && (exactContextHnd != nullptr) && (origCall->gtClassProfileCandidateInfo == nullptr)) { JITDUMP("\nSaving context %p for call [%06u]\n", exactContextHnd, dspTreeID(origCall)); origCall->gtCallMoreFlags |= GTF_CALL_M_LATE_DEVIRT; LateDevirtualizationInfo* const info = new (this, CMK_Inlining) LateDevirtualizationInfo; info->exactContextHnd = exactContextHnd; origCall->gtLateDevirtualizationInfo = info; } if (isFatPointerCandidate) { // fatPointer candidates should be in statements of the form call() or var = call(). // Such form allows to find statements with fat calls without walking through whole trees // and removes problems with cutting trees. assert(!bIntrinsicImported); assert(IsTargetAbi(CORINFO_CORERT_ABI)); if (call->OperGet() != GT_LCL_VAR) // can be already converted by impFixupCallStructReturn. { unsigned calliSlot = lvaGrabTemp(true DEBUGARG("calli")); LclVarDsc* varDsc = lvaGetDesc(calliSlot); varDsc->lvVerTypeInfo = tiRetVal; impAssignTempGen(calliSlot, call, tiRetVal.GetClassHandle(), (unsigned)CHECK_SPILL_NONE); // impAssignTempGen can change src arg list and return type for call that returns struct. var_types type = genActualType(lvaTable[calliSlot].TypeGet()); call = gtNewLclvNode(calliSlot, type); } } // For non-candidates we must also spill, since we // might have locals live on the eval stack that this // call can modify. // // Suppress this for certain well-known call targets // that we know won't modify locals, eg calls that are // recognized in gtCanOptimizeTypeEquality. Otherwise // we may break key fragile pattern matches later on. bool spillStack = true; if (call->IsCall()) { GenTreeCall* callNode = call->AsCall(); if ((callNode->gtCallType == CT_HELPER) && (gtIsTypeHandleToRuntimeTypeHelper(callNode) || gtIsTypeHandleToRuntimeTypeHandleHelper(callNode))) { spillStack = false; } else if ((callNode->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC) != 0) { spillStack = false; } } if (spillStack) { impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("non-inline candidate call")); } } } if (!bIntrinsicImported) { //------------------------------------------------------------------------- // /* If the call is of a small type and the callee is managed, the callee will normalize the result before returning. However, we need to normalize small type values returned by unmanaged functions (pinvoke). The pinvoke stub does the normalization, but we need to do it here if we use the shorter inlined pinvoke stub. */ if (checkForSmallType && varTypeIsIntegral(callRetTyp) && genTypeSize(callRetTyp) < genTypeSize(TYP_INT)) { call = gtNewCastNode(genActualType(callRetTyp), call, false, callRetTyp); } } impPushOnStack(call, tiRetVal); } // VSD functions get a new call target each time we getCallInfo, so clear the cache. // Also, the call info cache for CALLI instructions is largely incomplete, so clear it out. // if ( (opcode == CEE_CALLI) || (callInfoCache.fetchCallInfo().kind == CORINFO_VIRTUALCALL_STUB)) // callInfoCache.uncacheCallInfo(); return callRetTyp; } #ifdef _PREFAST_ #pragma warning(pop) #endif bool Compiler::impMethodInfo_hasRetBuffArg(CORINFO_METHOD_INFO* methInfo, CorInfoCallConvExtension callConv) { CorInfoType corType = methInfo->args.retType; if ((corType == CORINFO_TYPE_VALUECLASS) || (corType == CORINFO_TYPE_REFANY)) { // We have some kind of STRUCT being returned structPassingKind howToReturnStruct = SPK_Unknown; var_types returnType = getReturnTypeForStruct(methInfo->args.retTypeClass, callConv, &howToReturnStruct); if (howToReturnStruct == SPK_ByReference) { return true; } } return false; } #ifdef DEBUG // var_types Compiler::impImportJitTestLabelMark(int numArgs) { TestLabelAndNum tlAndN; if (numArgs == 2) { tlAndN.m_num = 0; StackEntry se = impPopStack(); assert(se.seTypeInfo.GetType() == TI_INT); GenTree* val = se.val; assert(val->IsCnsIntOrI()); tlAndN.m_tl = (TestLabel)val->AsIntConCommon()->IconValue(); } else if (numArgs == 3) { StackEntry se = impPopStack(); assert(se.seTypeInfo.GetType() == TI_INT); GenTree* val = se.val; assert(val->IsCnsIntOrI()); tlAndN.m_num = val->AsIntConCommon()->IconValue(); se = impPopStack(); assert(se.seTypeInfo.GetType() == TI_INT); val = se.val; assert(val->IsCnsIntOrI()); tlAndN.m_tl = (TestLabel)val->AsIntConCommon()->IconValue(); } else { assert(false); } StackEntry expSe = impPopStack(); GenTree* node = expSe.val; // There are a small number of special cases, where we actually put the annotation on a subnode. if (tlAndN.m_tl == TL_LoopHoist && tlAndN.m_num >= 100) { // A loop hoist annotation with value >= 100 means that the expression should be a static field access, // a GT_IND of a static field address, which should be the sum of a (hoistable) helper call and possibly some // offset within the the static field block whose address is returned by the helper call. // The annotation is saying that this address calculation, but not the entire access, should be hoisted. assert(node->OperGet() == GT_IND); tlAndN.m_num -= 100; GetNodeTestData()->Set(node->AsOp()->gtOp1, tlAndN); GetNodeTestData()->Remove(node); } else { GetNodeTestData()->Set(node, tlAndN); } impPushOnStack(node, expSe.seTypeInfo); return node->TypeGet(); } #endif // DEBUG //----------------------------------------------------------------------------------- // impFixupCallStructReturn: For a call node that returns a struct do one of the following: // - set the flag to indicate struct return via retbuf arg; // - adjust the return type to a SIMD type if it is returned in 1 reg; // - spill call result into a temp if it is returned into 2 registers or more and not tail call or inline candidate. // // Arguments: // call - GT_CALL GenTree node // retClsHnd - Class handle of return type of the call // // Return Value: // Returns new GenTree node after fixing struct return of call node // GenTree* Compiler::impFixupCallStructReturn(GenTreeCall* call, CORINFO_CLASS_HANDLE retClsHnd) { if (!varTypeIsStruct(call)) { return call; } call->gtRetClsHnd = retClsHnd; #if FEATURE_MULTIREG_RET call->InitializeStructReturnType(this, retClsHnd, call->GetUnmanagedCallConv()); const ReturnTypeDesc* retTypeDesc = call->GetReturnTypeDesc(); const unsigned retRegCount = retTypeDesc->GetReturnRegCount(); #else // !FEATURE_MULTIREG_RET const unsigned retRegCount = 1; #endif // !FEATURE_MULTIREG_RET structPassingKind howToReturnStruct; var_types returnType = getReturnTypeForStruct(retClsHnd, call->GetUnmanagedCallConv(), &howToReturnStruct); if (howToReturnStruct == SPK_ByReference) { assert(returnType == TYP_UNKNOWN); call->gtCallMoreFlags |= GTF_CALL_M_RETBUFFARG; return call; } // Recognize SIMD types as we do for LCL_VARs, // note it could be not the ABI specific type, for example, on x64 we can set 'TYP_SIMD8` // for `System.Numerics.Vector2` here but lower will change it to long as ABI dictates. var_types simdReturnType = impNormStructType(call->gtRetClsHnd); if (simdReturnType != call->TypeGet()) { assert(varTypeIsSIMD(simdReturnType)); JITDUMP("changing the type of a call [%06u] from %s to %s\n", dspTreeID(call), varTypeName(call->TypeGet()), varTypeName(simdReturnType)); call->ChangeType(simdReturnType); } if (retRegCount == 1) { return call; } #if FEATURE_MULTIREG_RET assert(varTypeIsStruct(call)); // It could be a SIMD returned in several regs. assert(returnType == TYP_STRUCT); assert((howToReturnStruct == SPK_ByValueAsHfa) || (howToReturnStruct == SPK_ByValue)); #ifdef UNIX_AMD64_ABI // must be a struct returned in two registers assert(retRegCount == 2); #else // not UNIX_AMD64_ABI assert(retRegCount >= 2); #endif // not UNIX_AMD64_ABI if (!call->CanTailCall() && !call->IsInlineCandidate()) { // Force a call returning multi-reg struct to be always of the IR form // tmp = call // // No need to assign a multi-reg struct to a local var if: // - It is a tail call or // - The call is marked for in-lining later return impAssignMultiRegTypeToVar(call, retClsHnd DEBUGARG(call->GetUnmanagedCallConv())); } return call; #endif // FEATURE_MULTIREG_RET } /***************************************************************************** For struct return values, re-type the operand in the case where the ABI does not use a struct return buffer */ //------------------------------------------------------------------------ // impFixupStructReturnType: For struct return values it sets appropriate flags in MULTIREG returns case; // in non-multiref case it handles two special helpers: `CORINFO_HELP_GETFIELDSTRUCT`, `CORINFO_HELP_UNBOX_NULLABLE`. // // Arguments: // op - the return value; // retClsHnd - the struct handle; // unmgdCallConv - the calling convention of the function that returns this struct. // // Return Value: // the result tree that does the return. // GenTree* Compiler::impFixupStructReturnType(GenTree* op, CORINFO_CLASS_HANDLE retClsHnd, CorInfoCallConvExtension unmgdCallConv) { assert(varTypeIsStruct(info.compRetType)); assert(info.compRetBuffArg == BAD_VAR_NUM); JITDUMP("\nimpFixupStructReturnType: retyping\n"); DISPTREE(op); #if defined(TARGET_XARCH) #if FEATURE_MULTIREG_RET // No VarArgs for CoreCLR on x64 Unix UNIX_AMD64_ABI_ONLY(assert(!info.compIsVarArgs)); // Is method returning a multi-reg struct? if (varTypeIsStruct(info.compRetNativeType) && IsMultiRegReturnedType(retClsHnd, unmgdCallConv)) { // In case of multi-reg struct return, we force IR to be one of the following: // GT_RETURN(lclvar) or GT_RETURN(call). If op is anything other than a // lclvar or call, it is assigned to a temp to create: temp = op and GT_RETURN(tmp). if (op->gtOper == GT_LCL_VAR) { // Note that this is a multi-reg return. unsigned lclNum = op->AsLclVarCommon()->GetLclNum(); lvaTable[lclNum].lvIsMultiRegRet = true; // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns. op->gtFlags |= GTF_DONT_CSE; return op; } if (op->gtOper == GT_CALL) { return op; } return impAssignMultiRegTypeToVar(op, retClsHnd DEBUGARG(unmgdCallConv)); } #else assert(info.compRetNativeType != TYP_STRUCT); #endif // defined(UNIX_AMD64_ABI) || defined(TARGET_X86) #elif FEATURE_MULTIREG_RET && defined(TARGET_ARM) if (varTypeIsStruct(info.compRetNativeType) && !info.compIsVarArgs && IsHfa(retClsHnd)) { if (op->gtOper == GT_LCL_VAR) { // This LCL_VAR is an HFA return value, it stays as a TYP_STRUCT unsigned lclNum = op->AsLclVarCommon()->GetLclNum(); // Make sure this struct type stays as struct so that we can return it as an HFA lvaTable[lclNum].lvIsMultiRegRet = true; // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns. op->gtFlags |= GTF_DONT_CSE; return op; } if (op->gtOper == GT_CALL) { if (op->AsCall()->IsVarargs()) { // We cannot tail call because control needs to return to fixup the calling // convention for result return. op->AsCall()->gtCallMoreFlags &= ~GTF_CALL_M_TAILCALL; op->AsCall()->gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL; } else { return op; } } return impAssignMultiRegTypeToVar(op, retClsHnd DEBUGARG(unmgdCallConv)); } #elif FEATURE_MULTIREG_RET && defined(TARGET_ARM64) // Is method returning a multi-reg struct? if (IsMultiRegReturnedType(retClsHnd, unmgdCallConv)) { if (op->gtOper == GT_LCL_VAR) { // This LCL_VAR stays as a TYP_STRUCT unsigned lclNum = op->AsLclVarCommon()->GetLclNum(); if (!lvaIsImplicitByRefLocal(lclNum)) { // Make sure this struct type is not struct promoted lvaTable[lclNum].lvIsMultiRegRet = true; // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns. op->gtFlags |= GTF_DONT_CSE; return op; } } if (op->gtOper == GT_CALL) { if (op->AsCall()->IsVarargs()) { // We cannot tail call because control needs to return to fixup the calling // convention for result return. op->AsCall()->gtCallMoreFlags &= ~GTF_CALL_M_TAILCALL; op->AsCall()->gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL; } else { return op; } } return impAssignMultiRegTypeToVar(op, retClsHnd DEBUGARG(unmgdCallConv)); } #endif // FEATURE_MULTIREG_RET && TARGET_ARM64 if (!op->IsCall() || !op->AsCall()->TreatAsHasRetBufArg(this)) { // Don't retype `struct` as a primitive type in `ret` instruction. return op; } // This must be one of those 'special' helpers that don't // really have a return buffer, but instead use it as a way // to keep the trees cleaner with fewer address-taken temps. // // Well now we have to materialize the the return buffer as // an address-taken temp. Then we can return the temp. // // NOTE: this code assumes that since the call directly // feeds the return, then the call must be returning the // same structure/class/type. // unsigned tmpNum = lvaGrabTemp(true DEBUGARG("pseudo return buffer")); // No need to spill anything as we're about to return. impAssignTempGen(tmpNum, op, info.compMethodInfo->args.retTypeClass, (unsigned)CHECK_SPILL_NONE); op = gtNewLclvNode(tmpNum, info.compRetType); JITDUMP("\nimpFixupStructReturnType: created a pseudo-return buffer for a special helper\n"); DISPTREE(op); return op; } /***************************************************************************** CEE_LEAVE may be jumping out of a protected block, viz, a catch or a finally-protected try. We find the finally blocks protecting the current offset (in order) by walking over the complete exception table and finding enclosing clauses. This assumes that the table is sorted. This will create a series of BBJ_CALLFINALLY -> BBJ_CALLFINALLY ... -> BBJ_ALWAYS. If we are leaving a catch handler, we need to attach the CPX_ENDCATCHes to the correct BBJ_CALLFINALLY blocks. After this function, the BBJ_LEAVE block has been converted to a different type. */ #if !defined(FEATURE_EH_FUNCLETS) void Compiler::impImportLeave(BasicBlock* block) { #ifdef DEBUG if (verbose) { printf("\nBefore import CEE_LEAVE:\n"); fgDispBasicBlocks(); fgDispHandlerTab(); } #endif // DEBUG bool invalidatePreds = false; // If we create new blocks, invalidate the predecessor lists (if created) unsigned blkAddr = block->bbCodeOffs; BasicBlock* leaveTarget = block->bbJumpDest; unsigned jmpAddr = leaveTarget->bbCodeOffs; // LEAVE clears the stack, spill side effects, and set stack to 0 impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportLeave")); verCurrentState.esStackDepth = 0; assert(block->bbJumpKind == BBJ_LEAVE); assert(fgBBs == (BasicBlock**)0xCDCD || fgLookupBB(jmpAddr) != NULL); // should be a BB boundary BasicBlock* step = DUMMY_INIT(NULL); unsigned encFinallies = 0; // Number of enclosing finallies. GenTree* endCatches = NULL; Statement* endLFinStmt = NULL; // The statement tree to indicate the end of locally-invoked finally. unsigned XTnum; EHblkDsc* HBtab; for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++) { // Grab the handler offsets IL_OFFSET tryBeg = HBtab->ebdTryBegOffs(); IL_OFFSET tryEnd = HBtab->ebdTryEndOffs(); IL_OFFSET hndBeg = HBtab->ebdHndBegOffs(); IL_OFFSET hndEnd = HBtab->ebdHndEndOffs(); /* Is this a catch-handler we are CEE_LEAVEing out of? * If so, we need to call CORINFO_HELP_ENDCATCH. */ if (jitIsBetween(blkAddr, hndBeg, hndEnd) && !jitIsBetween(jmpAddr, hndBeg, hndEnd)) { // Can't CEE_LEAVE out of a finally/fault handler if (HBtab->HasFinallyOrFaultHandler()) BADCODE("leave out of fault/finally block"); // Create the call to CORINFO_HELP_ENDCATCH GenTree* endCatch = gtNewHelperCallNode(CORINFO_HELP_ENDCATCH, TYP_VOID); // Make a list of all the currently pending endCatches if (endCatches) endCatches = gtNewOperNode(GT_COMMA, TYP_VOID, endCatches, endCatch); else endCatches = endCatch; #ifdef DEBUG if (verbose) { printf("impImportLeave - " FMT_BB " jumping out of catch handler EH#%u, adding call to " "CORINFO_HELP_ENDCATCH\n", block->bbNum, XTnum); } #endif } else if (HBtab->HasFinallyHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) && !jitIsBetween(jmpAddr, tryBeg, tryEnd)) { /* This is a finally-protected try we are jumping out of */ /* If there are any pending endCatches, and we have already jumped out of a finally-protected try, then the endCatches have to be put in a block in an outer try for async exceptions to work correctly. Else, just use append to the original block */ BasicBlock* callBlock; assert(!encFinallies == !endLFinStmt); // if we have finallies, we better have an endLFin tree, and vice-versa if (encFinallies == 0) { assert(step == DUMMY_INIT(NULL)); callBlock = block; callBlock->bbJumpKind = BBJ_CALLFINALLY; // convert the BBJ_LEAVE to BBJ_CALLFINALLY if (endCatches) impAppendTree(endCatches, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); #ifdef DEBUG if (verbose) { printf("impImportLeave - jumping out of a finally-protected try, convert block to BBJ_CALLFINALLY " "block %s\n", callBlock->dspToString()); } #endif } else { assert(step != DUMMY_INIT(NULL)); /* Calling the finally block */ callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, XTnum + 1, 0, step); assert(step->bbJumpKind == BBJ_ALWAYS); step->bbJumpDest = callBlock; // the previous call to a finally returns to this call (to the next // finally in the chain) step->bbJumpDest->bbRefs++; /* The new block will inherit this block's weight */ callBlock->inheritWeight(block); #ifdef DEBUG if (verbose) { printf("impImportLeave - jumping out of a finally-protected try, new BBJ_CALLFINALLY block %s\n", callBlock->dspToString()); } #endif Statement* lastStmt; if (endCatches) { lastStmt = gtNewStmt(endCatches); endLFinStmt->SetNextStmt(lastStmt); lastStmt->SetPrevStmt(endLFinStmt); } else { lastStmt = endLFinStmt; } // note that this sets BBF_IMPORTED on the block impEndTreeList(callBlock, endLFinStmt, lastStmt); } step = fgNewBBafter(BBJ_ALWAYS, callBlock, true); /* The new block will inherit this block's weight */ step->inheritWeight(block); step->bbFlags |= BBF_IMPORTED | BBF_KEEP_BBJ_ALWAYS; #ifdef DEBUG if (verbose) { printf("impImportLeave - jumping out of a finally-protected try, created step (BBJ_ALWAYS) block %s\n", step->dspToString()); } #endif unsigned finallyNesting = compHndBBtab[XTnum].ebdHandlerNestingLevel; assert(finallyNesting <= compHndBBtabCount); callBlock->bbJumpDest = HBtab->ebdHndBeg; // This callBlock will call the "finally" handler. GenTree* endLFin = new (this, GT_END_LFIN) GenTreeVal(GT_END_LFIN, TYP_VOID, finallyNesting); endLFinStmt = gtNewStmt(endLFin); endCatches = NULL; encFinallies++; invalidatePreds = true; } } /* Append any remaining endCatches, if any */ assert(!encFinallies == !endLFinStmt); if (encFinallies == 0) { assert(step == DUMMY_INIT(NULL)); block->bbJumpKind = BBJ_ALWAYS; // convert the BBJ_LEAVE to a BBJ_ALWAYS if (endCatches) impAppendTree(endCatches, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); #ifdef DEBUG if (verbose) { printf("impImportLeave - no enclosing finally-protected try blocks; convert CEE_LEAVE block to BBJ_ALWAYS " "block %s\n", block->dspToString()); } #endif } else { // If leaveTarget is the start of another try block, we want to make sure that // we do not insert finalStep into that try block. Hence, we find the enclosing // try block. unsigned tryIndex = bbFindInnermostCommonTryRegion(step, leaveTarget); // Insert a new BB either in the try region indicated by tryIndex or // the handler region indicated by leaveTarget->bbHndIndex, // depending on which is the inner region. BasicBlock* finalStep = fgNewBBinRegion(BBJ_ALWAYS, tryIndex, leaveTarget->bbHndIndex, step); finalStep->bbFlags |= BBF_KEEP_BBJ_ALWAYS; step->bbJumpDest = finalStep; /* The new block will inherit this block's weight */ finalStep->inheritWeight(block); #ifdef DEBUG if (verbose) { printf("impImportLeave - finalStep block required (encFinallies(%d) > 0), new block %s\n", encFinallies, finalStep->dspToString()); } #endif Statement* lastStmt; if (endCatches) { lastStmt = gtNewStmt(endCatches); endLFinStmt->SetNextStmt(lastStmt); lastStmt->SetPrevStmt(endLFinStmt); } else { lastStmt = endLFinStmt; } impEndTreeList(finalStep, endLFinStmt, lastStmt); finalStep->bbJumpDest = leaveTarget; // this is the ultimate destination of the LEAVE // Queue up the jump target for importing impImportBlockPending(leaveTarget); invalidatePreds = true; } if (invalidatePreds && fgComputePredsDone) { JITDUMP("\n**** impImportLeave - Removing preds after creating new blocks\n"); fgRemovePreds(); } #ifdef DEBUG fgVerifyHandlerTab(); if (verbose) { printf("\nAfter import CEE_LEAVE:\n"); fgDispBasicBlocks(); fgDispHandlerTab(); } #endif // DEBUG } #else // FEATURE_EH_FUNCLETS void Compiler::impImportLeave(BasicBlock* block) { #ifdef DEBUG if (verbose) { printf("\nBefore import CEE_LEAVE in " FMT_BB " (targetting " FMT_BB "):\n", block->bbNum, block->bbJumpDest->bbNum); fgDispBasicBlocks(); fgDispHandlerTab(); } #endif // DEBUG bool invalidatePreds = false; // If we create new blocks, invalidate the predecessor lists (if created) unsigned blkAddr = block->bbCodeOffs; BasicBlock* leaveTarget = block->bbJumpDest; unsigned jmpAddr = leaveTarget->bbCodeOffs; // LEAVE clears the stack, spill side effects, and set stack to 0 impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportLeave")); verCurrentState.esStackDepth = 0; assert(block->bbJumpKind == BBJ_LEAVE); assert(fgBBs == (BasicBlock**)0xCDCD || fgLookupBB(jmpAddr) != nullptr); // should be a BB boundary BasicBlock* step = nullptr; enum StepType { // No step type; step == NULL. ST_None, // Is the step block the BBJ_ALWAYS block of a BBJ_CALLFINALLY/BBJ_ALWAYS pair? // That is, is step->bbJumpDest where a finally will return to? ST_FinallyReturn, // The step block is a catch return. ST_Catch, // The step block is in a "try", created as the target for a finally return or the target for a catch return. ST_Try }; StepType stepType = ST_None; unsigned XTnum; EHblkDsc* HBtab; for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++) { // Grab the handler offsets IL_OFFSET tryBeg = HBtab->ebdTryBegOffs(); IL_OFFSET tryEnd = HBtab->ebdTryEndOffs(); IL_OFFSET hndBeg = HBtab->ebdHndBegOffs(); IL_OFFSET hndEnd = HBtab->ebdHndEndOffs(); /* Is this a catch-handler we are CEE_LEAVEing out of? */ if (jitIsBetween(blkAddr, hndBeg, hndEnd) && !jitIsBetween(jmpAddr, hndBeg, hndEnd)) { // Can't CEE_LEAVE out of a finally/fault handler if (HBtab->HasFinallyOrFaultHandler()) { BADCODE("leave out of fault/finally block"); } /* We are jumping out of a catch */ if (step == nullptr) { step = block; step->bbJumpKind = BBJ_EHCATCHRET; // convert the BBJ_LEAVE to BBJ_EHCATCHRET stepType = ST_Catch; #ifdef DEBUG if (verbose) { printf("impImportLeave - jumping out of a catch (EH#%u), convert block " FMT_BB " to BBJ_EHCATCHRET " "block\n", XTnum, step->bbNum); } #endif } else { BasicBlock* exitBlock; /* Create a new catch exit block in the catch region for the existing step block to jump to in this * scope */ exitBlock = fgNewBBinRegion(BBJ_EHCATCHRET, 0, XTnum + 1, step); assert(step->KindIs(BBJ_ALWAYS, BBJ_EHCATCHRET)); step->bbJumpDest = exitBlock; // the previous step (maybe a call to a nested finally, or a nested catch // exit) returns to this block step->bbJumpDest->bbRefs++; #if defined(TARGET_ARM) if (stepType == ST_FinallyReturn) { assert(step->bbJumpKind == BBJ_ALWAYS); // Mark the target of a finally return step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET; } #endif // defined(TARGET_ARM) /* The new block will inherit this block's weight */ exitBlock->inheritWeight(block); exitBlock->bbFlags |= BBF_IMPORTED; /* This exit block is the new step */ step = exitBlock; stepType = ST_Catch; invalidatePreds = true; #ifdef DEBUG if (verbose) { printf("impImportLeave - jumping out of a catch (EH#%u), new BBJ_EHCATCHRET block " FMT_BB "\n", XTnum, exitBlock->bbNum); } #endif } } else if (HBtab->HasFinallyHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) && !jitIsBetween(jmpAddr, tryBeg, tryEnd)) { /* We are jumping out of a finally-protected try */ BasicBlock* callBlock; if (step == nullptr) { #if FEATURE_EH_CALLFINALLY_THUNKS // Put the call to the finally in the enclosing region. unsigned callFinallyTryIndex = (HBtab->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingTryIndex + 1; unsigned callFinallyHndIndex = (HBtab->ebdEnclosingHndIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingHndIndex + 1; callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, callFinallyTryIndex, callFinallyHndIndex, block); // Convert the BBJ_LEAVE to BBJ_ALWAYS, jumping to the new BBJ_CALLFINALLY. This is because // the new BBJ_CALLFINALLY is in a different EH region, thus it can't just replace the BBJ_LEAVE, // which might be in the middle of the "try". In most cases, the BBJ_ALWAYS will jump to the // next block, and flow optimizations will remove it. block->bbJumpKind = BBJ_ALWAYS; block->bbJumpDest = callBlock; block->bbJumpDest->bbRefs++; /* The new block will inherit this block's weight */ callBlock->inheritWeight(block); callBlock->bbFlags |= BBF_IMPORTED; #ifdef DEBUG if (verbose) { printf("impImportLeave - jumping out of a finally-protected try (EH#%u), convert block " FMT_BB " to " "BBJ_ALWAYS, add BBJ_CALLFINALLY block " FMT_BB "\n", XTnum, block->bbNum, callBlock->bbNum); } #endif #else // !FEATURE_EH_CALLFINALLY_THUNKS callBlock = block; callBlock->bbJumpKind = BBJ_CALLFINALLY; // convert the BBJ_LEAVE to BBJ_CALLFINALLY #ifdef DEBUG if (verbose) { printf("impImportLeave - jumping out of a finally-protected try (EH#%u), convert block " FMT_BB " to " "BBJ_CALLFINALLY block\n", XTnum, callBlock->bbNum); } #endif #endif // !FEATURE_EH_CALLFINALLY_THUNKS } else { // Calling the finally block. We already have a step block that is either the call-to-finally from a // more nested try/finally (thus we are jumping out of multiple nested 'try' blocks, each protected by // a 'finally'), or the step block is the return from a catch. // // Due to ThreadAbortException, we can't have the catch return target the call-to-finally block // directly. Note that if a 'catch' ends without resetting the ThreadAbortException, the VM will // automatically re-raise the exception, using the return address of the catch (that is, the target // block of the BBJ_EHCATCHRET) as the re-raise address. If this address is in a finally, the VM will // refuse to do the re-raise, and the ThreadAbortException will get eaten (and lost). On AMD64/ARM64, // we put the call-to-finally thunk in a special "cloned finally" EH region that does look like a // finally clause to the VM. Thus, on these platforms, we can't have BBJ_EHCATCHRET target a // BBJ_CALLFINALLY directly. (Note that on ARM32, we don't mark the thunk specially -- it lives directly // within the 'try' region protected by the finally, since we generate code in such a way that execution // never returns to the call-to-finally call, and the finally-protected 'try' region doesn't appear on // stack walks.) assert(step->KindIs(BBJ_ALWAYS, BBJ_EHCATCHRET)); #if FEATURE_EH_CALLFINALLY_THUNKS if (step->bbJumpKind == BBJ_EHCATCHRET) { // Need to create another step block in the 'try' region that will actually branch to the // call-to-finally thunk. BasicBlock* step2 = fgNewBBinRegion(BBJ_ALWAYS, XTnum + 1, 0, step); step->bbJumpDest = step2; step->bbJumpDest->bbRefs++; step2->inheritWeight(block); step2->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED; #ifdef DEBUG if (verbose) { printf("impImportLeave - jumping out of a finally-protected try (EH#%u), step block is " "BBJ_EHCATCHRET (" FMT_BB "), new BBJ_ALWAYS step-step block " FMT_BB "\n", XTnum, step->bbNum, step2->bbNum); } #endif step = step2; assert(stepType == ST_Catch); // Leave it as catch type for now. } #endif // FEATURE_EH_CALLFINALLY_THUNKS #if FEATURE_EH_CALLFINALLY_THUNKS unsigned callFinallyTryIndex = (HBtab->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingTryIndex + 1; unsigned callFinallyHndIndex = (HBtab->ebdEnclosingHndIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingHndIndex + 1; #else // !FEATURE_EH_CALLFINALLY_THUNKS unsigned callFinallyTryIndex = XTnum + 1; unsigned callFinallyHndIndex = 0; // don't care #endif // !FEATURE_EH_CALLFINALLY_THUNKS callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, callFinallyTryIndex, callFinallyHndIndex, step); step->bbJumpDest = callBlock; // the previous call to a finally returns to this call (to the next // finally in the chain) step->bbJumpDest->bbRefs++; #if defined(TARGET_ARM) if (stepType == ST_FinallyReturn) { assert(step->bbJumpKind == BBJ_ALWAYS); // Mark the target of a finally return step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET; } #endif // defined(TARGET_ARM) /* The new block will inherit this block's weight */ callBlock->inheritWeight(block); callBlock->bbFlags |= BBF_IMPORTED; #ifdef DEBUG if (verbose) { printf("impImportLeave - jumping out of a finally-protected try (EH#%u), new BBJ_CALLFINALLY " "block " FMT_BB "\n", XTnum, callBlock->bbNum); } #endif } step = fgNewBBafter(BBJ_ALWAYS, callBlock, true); stepType = ST_FinallyReturn; /* The new block will inherit this block's weight */ step->inheritWeight(block); step->bbFlags |= BBF_IMPORTED | BBF_KEEP_BBJ_ALWAYS; #ifdef DEBUG if (verbose) { printf("impImportLeave - jumping out of a finally-protected try (EH#%u), created step (BBJ_ALWAYS) " "block " FMT_BB "\n", XTnum, step->bbNum); } #endif callBlock->bbJumpDest = HBtab->ebdHndBeg; // This callBlock will call the "finally" handler. invalidatePreds = true; } else if (HBtab->HasCatchHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) && !jitIsBetween(jmpAddr, tryBeg, tryEnd)) { // We are jumping out of a catch-protected try. // // If we are returning from a call to a finally, then we must have a step block within a try // that is protected by a catch. This is so when unwinding from that finally (e.g., if code within the // finally raises an exception), the VM will find this step block, notice that it is in a protected region, // and invoke the appropriate catch. // // We also need to handle a special case with the handling of ThreadAbortException. If a try/catch // catches a ThreadAbortException (which might be because it catches a parent, e.g. System.Exception), // and the catch doesn't call System.Threading.Thread::ResetAbort(), then when the catch returns to the VM, // the VM will automatically re-raise the ThreadAbortException. When it does this, it uses the target // address of the catch return as the new exception address. That is, the re-raised exception appears to // occur at the catch return address. If this exception return address skips an enclosing try/catch that // catches ThreadAbortException, then the enclosing try/catch will not catch the exception, as it should. // For example: // // try { // try { // // something here raises ThreadAbortException // LEAVE LABEL_1; // no need to stop at LABEL_2 // } catch (Exception) { // // This catches ThreadAbortException, but doesn't call System.Threading.Thread::ResetAbort(), so // // ThreadAbortException is re-raised by the VM at the address specified by the LEAVE opcode. // // This is bad, since it means the outer try/catch won't get a chance to catch the re-raised // // ThreadAbortException. So, instead, create step block LABEL_2 and LEAVE to that. We only // // need to do this transformation if the current EH block is a try/catch that catches // // ThreadAbortException (or one of its parents), however we might not be able to find that // // information, so currently we do it for all catch types. // LEAVE LABEL_1; // Convert this to LEAVE LABEL2; // } // LABEL_2: LEAVE LABEL_1; // inserted by this step creation code // } catch (ThreadAbortException) { // } // LABEL_1: // // Note that this pattern isn't theoretical: it occurs in ASP.NET, in IL code generated by the Roslyn C# // compiler. if ((stepType == ST_FinallyReturn) || (stepType == ST_Catch)) { BasicBlock* catchStep; assert(step); if (stepType == ST_FinallyReturn) { assert(step->bbJumpKind == BBJ_ALWAYS); } else { assert(stepType == ST_Catch); assert(step->bbJumpKind == BBJ_EHCATCHRET); } /* Create a new exit block in the try region for the existing step block to jump to in this scope */ catchStep = fgNewBBinRegion(BBJ_ALWAYS, XTnum + 1, 0, step); step->bbJumpDest = catchStep; step->bbJumpDest->bbRefs++; #if defined(TARGET_ARM) if (stepType == ST_FinallyReturn) { // Mark the target of a finally return step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET; } #endif // defined(TARGET_ARM) /* The new block will inherit this block's weight */ catchStep->inheritWeight(block); catchStep->bbFlags |= BBF_IMPORTED; #ifdef DEBUG if (verbose) { if (stepType == ST_FinallyReturn) { printf("impImportLeave - return from finally jumping out of a catch-protected try (EH#%u), new " "BBJ_ALWAYS block " FMT_BB "\n", XTnum, catchStep->bbNum); } else { assert(stepType == ST_Catch); printf("impImportLeave - return from catch jumping out of a catch-protected try (EH#%u), new " "BBJ_ALWAYS block " FMT_BB "\n", XTnum, catchStep->bbNum); } } #endif // DEBUG /* This block is the new step */ step = catchStep; stepType = ST_Try; invalidatePreds = true; } } } if (step == nullptr) { block->bbJumpKind = BBJ_ALWAYS; // convert the BBJ_LEAVE to a BBJ_ALWAYS #ifdef DEBUG if (verbose) { printf("impImportLeave - no enclosing finally-protected try blocks or catch handlers; convert CEE_LEAVE " "block " FMT_BB " to BBJ_ALWAYS\n", block->bbNum); } #endif } else { step->bbJumpDest = leaveTarget; // this is the ultimate destination of the LEAVE #if defined(TARGET_ARM) if (stepType == ST_FinallyReturn) { assert(step->bbJumpKind == BBJ_ALWAYS); // Mark the target of a finally return step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET; } #endif // defined(TARGET_ARM) #ifdef DEBUG if (verbose) { printf("impImportLeave - final destination of step blocks set to " FMT_BB "\n", leaveTarget->bbNum); } #endif // Queue up the jump target for importing impImportBlockPending(leaveTarget); } if (invalidatePreds && fgComputePredsDone) { JITDUMP("\n**** impImportLeave - Removing preds after creating new blocks\n"); fgRemovePreds(); } #ifdef DEBUG fgVerifyHandlerTab(); if (verbose) { printf("\nAfter import CEE_LEAVE:\n"); fgDispBasicBlocks(); fgDispHandlerTab(); } #endif // DEBUG } #endif // FEATURE_EH_FUNCLETS /*****************************************************************************/ // This is called when reimporting a leave block. It resets the JumpKind, // JumpDest, and bbNext to the original values void Compiler::impResetLeaveBlock(BasicBlock* block, unsigned jmpAddr) { #if defined(FEATURE_EH_FUNCLETS) // With EH Funclets, while importing leave opcode we create another block ending with BBJ_ALWAYS (call it B1) // and the block containing leave (say B0) is marked as BBJ_CALLFINALLY. Say for some reason we reimport B0, // it is reset (in this routine) by marking as ending with BBJ_LEAVE and further down when B0 is reimported, we // create another BBJ_ALWAYS (call it B2). In this process B1 gets orphaned and any blocks to which B1 is the // only predecessor are also considered orphans and attempted to be deleted. // // try { // .... // try // { // .... // leave OUTSIDE; // B0 is the block containing this leave, following this would be B1 // } finally { } // } finally { } // OUTSIDE: // // In the above nested try-finally example, we create a step block (call it Bstep) which in branches to a block // where a finally would branch to (and such block is marked as finally target). Block B1 branches to step block. // Because of re-import of B0, Bstep is also orphaned. Since Bstep is a finally target it cannot be removed. To // work around this we will duplicate B0 (call it B0Dup) before reseting. B0Dup is marked as BBJ_CALLFINALLY and // only serves to pair up with B1 (BBJ_ALWAYS) that got orphaned. Now during orphan block deletion B0Dup and B1 // will be treated as pair and handled correctly. if (block->bbJumpKind == BBJ_CALLFINALLY) { BasicBlock* dupBlock = bbNewBasicBlock(block->bbJumpKind); dupBlock->bbFlags = block->bbFlags; dupBlock->bbJumpDest = block->bbJumpDest; dupBlock->copyEHRegion(block); dupBlock->bbCatchTyp = block->bbCatchTyp; // Mark this block as // a) not referenced by any other block to make sure that it gets deleted // b) weight zero // c) prevent from being imported // d) as internal // e) as rarely run dupBlock->bbRefs = 0; dupBlock->bbWeight = BB_ZERO_WEIGHT; dupBlock->bbFlags |= BBF_IMPORTED | BBF_INTERNAL | BBF_RUN_RARELY; // Insert the block right after the block which is getting reset so that BBJ_CALLFINALLY and BBJ_ALWAYS // will be next to each other. fgInsertBBafter(block, dupBlock); #ifdef DEBUG if (verbose) { printf("New Basic Block " FMT_BB " duplicate of " FMT_BB " created.\n", dupBlock->bbNum, block->bbNum); } #endif } #endif // FEATURE_EH_FUNCLETS block->bbJumpKind = BBJ_LEAVE; fgInitBBLookup(); block->bbJumpDest = fgLookupBB(jmpAddr); // We will leave the BBJ_ALWAYS block we introduced. When it's reimported // the BBJ_ALWAYS block will be unreachable, and will be removed after. The // reason we don't want to remove the block at this point is that if we call // fgInitBBLookup() again we will do it wrong as the BBJ_ALWAYS block won't be // added and the linked list length will be different than fgBBcount. } /*****************************************************************************/ // Get the first non-prefix opcode. Used for verification of valid combinations // of prefixes and actual opcodes. OPCODE Compiler::impGetNonPrefixOpcode(const BYTE* codeAddr, const BYTE* codeEndp) { while (codeAddr < codeEndp) { OPCODE opcode = (OPCODE)getU1LittleEndian(codeAddr); codeAddr += sizeof(__int8); if (opcode == CEE_PREFIX1) { if (codeAddr >= codeEndp) { break; } opcode = (OPCODE)(getU1LittleEndian(codeAddr) + 256); codeAddr += sizeof(__int8); } switch (opcode) { case CEE_UNALIGNED: case CEE_VOLATILE: case CEE_TAILCALL: case CEE_CONSTRAINED: case CEE_READONLY: break; default: return opcode; } codeAddr += opcodeSizes[opcode]; } return CEE_ILLEGAL; } /*****************************************************************************/ // Checks whether the opcode is a valid opcode for volatile. and unaligned. prefixes void Compiler::impValidateMemoryAccessOpcode(const BYTE* codeAddr, const BYTE* codeEndp, bool volatilePrefix) { OPCODE opcode = impGetNonPrefixOpcode(codeAddr, codeEndp); if (!( // Opcode of all ldind and stdind happen to be in continuous, except stind.i. ((CEE_LDIND_I1 <= opcode) && (opcode <= CEE_STIND_R8)) || (opcode == CEE_STIND_I) || (opcode == CEE_LDFLD) || (opcode == CEE_STFLD) || (opcode == CEE_LDOBJ) || (opcode == CEE_STOBJ) || (opcode == CEE_INITBLK) || (opcode == CEE_CPBLK) || // volatile. prefix is allowed with the ldsfld and stsfld (volatilePrefix && ((opcode == CEE_LDSFLD) || (opcode == CEE_STSFLD))))) { BADCODE("Invalid opcode for unaligned. or volatile. prefix"); } } /*****************************************************************************/ #ifdef DEBUG #undef RETURN // undef contracts RETURN macro enum controlFlow_t { NEXT, CALL, RETURN, THROW, BRANCH, COND_BRANCH, BREAK, PHI, META, }; const static controlFlow_t controlFlow[] = { #define OPDEF(c, s, pop, push, args, type, l, s1, s2, flow) flow, #include "opcode.def" #undef OPDEF }; #endif // DEBUG /***************************************************************************** * Determine the result type of an arithemetic operation * On 64-bit inserts upcasts when native int is mixed with int32 */ var_types Compiler::impGetByRefResultType(genTreeOps oper, bool fUnsigned, GenTree** pOp1, GenTree** pOp2) { var_types type = TYP_UNDEF; GenTree* op1 = *pOp1; GenTree* op2 = *pOp2; // Arithemetic operations are generally only allowed with // primitive types, but certain operations are allowed // with byrefs if ((oper == GT_SUB) && (genActualType(op1->TypeGet()) == TYP_BYREF || genActualType(op2->TypeGet()) == TYP_BYREF)) { if ((genActualType(op1->TypeGet()) == TYP_BYREF) && (genActualType(op2->TypeGet()) == TYP_BYREF)) { // byref1-byref2 => gives a native int type = TYP_I_IMPL; } else if (genActualTypeIsIntOrI(op1->TypeGet()) && (genActualType(op2->TypeGet()) == TYP_BYREF)) { // [native] int - byref => gives a native int // // The reason is that it is possible, in managed C++, // to have a tree like this: // // - // / \. // / \. // / \. // / \. // const(h) int addr byref // // <BUGNUM> VSW 318822 </BUGNUM> // // So here we decide to make the resulting type to be a native int. CLANG_FORMAT_COMMENT_ANCHOR; #ifdef TARGET_64BIT if (genActualType(op1->TypeGet()) != TYP_I_IMPL) { // insert an explicit upcast op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL); } #endif // TARGET_64BIT type = TYP_I_IMPL; } else { // byref - [native] int => gives a byref assert(genActualType(op1->TypeGet()) == TYP_BYREF && genActualTypeIsIntOrI(op2->TypeGet())); #ifdef TARGET_64BIT if ((genActualType(op2->TypeGet()) != TYP_I_IMPL)) { // insert an explicit upcast op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL); } #endif // TARGET_64BIT type = TYP_BYREF; } } else if ((oper == GT_ADD) && (genActualType(op1->TypeGet()) == TYP_BYREF || genActualType(op2->TypeGet()) == TYP_BYREF)) { // byref + [native] int => gives a byref // (or) // [native] int + byref => gives a byref // only one can be a byref : byref op byref not allowed assert(genActualType(op1->TypeGet()) != TYP_BYREF || genActualType(op2->TypeGet()) != TYP_BYREF); assert(genActualTypeIsIntOrI(op1->TypeGet()) || genActualTypeIsIntOrI(op2->TypeGet())); #ifdef TARGET_64BIT if (genActualType(op2->TypeGet()) == TYP_BYREF) { if (genActualType(op1->TypeGet()) != TYP_I_IMPL) { // insert an explicit upcast op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL); } } else if (genActualType(op2->TypeGet()) != TYP_I_IMPL) { // insert an explicit upcast op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL); } #endif // TARGET_64BIT type = TYP_BYREF; } #ifdef TARGET_64BIT else if (genActualType(op1->TypeGet()) == TYP_I_IMPL || genActualType(op2->TypeGet()) == TYP_I_IMPL) { assert(!varTypeIsFloating(op1->gtType) && !varTypeIsFloating(op2->gtType)); // int + long => gives long // long + int => gives long // we get this because in the IL the long isn't Int64, it's just IntPtr if (genActualType(op1->TypeGet()) != TYP_I_IMPL) { // insert an explicit upcast op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL); } else if (genActualType(op2->TypeGet()) != TYP_I_IMPL) { // insert an explicit upcast op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL); } type = TYP_I_IMPL; } #else // 32-bit TARGET else if (genActualType(op1->TypeGet()) == TYP_LONG || genActualType(op2->TypeGet()) == TYP_LONG) { assert(!varTypeIsFloating(op1->gtType) && !varTypeIsFloating(op2->gtType)); // int + long => gives long // long + int => gives long type = TYP_LONG; } #endif // TARGET_64BIT else { // int + int => gives an int assert(genActualType(op1->TypeGet()) != TYP_BYREF && genActualType(op2->TypeGet()) != TYP_BYREF); assert(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) || (varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType))); type = genActualType(op1->gtType); // If both operands are TYP_FLOAT, then leave it as TYP_FLOAT. // Otherwise, turn floats into doubles if ((type == TYP_FLOAT) && (genActualType(op2->gtType) != TYP_FLOAT)) { assert(genActualType(op2->gtType) == TYP_DOUBLE); type = TYP_DOUBLE; } } assert(type == TYP_BYREF || type == TYP_DOUBLE || type == TYP_FLOAT || type == TYP_LONG || type == TYP_INT); return type; } //------------------------------------------------------------------------ // impOptimizeCastClassOrIsInst: attempt to resolve a cast when jitting // // Arguments: // op1 - value to cast // pResolvedToken - resolved token for type to cast to // isCastClass - true if this is a castclass, false if isinst // // Return Value: // tree representing optimized cast, or null if no optimization possible GenTree* Compiler::impOptimizeCastClassOrIsInst(GenTree* op1, CORINFO_RESOLVED_TOKEN* pResolvedToken, bool isCastClass) { assert(op1->TypeGet() == TYP_REF); // Don't optimize for minopts or debug codegen. if (opts.OptimizationDisabled()) { return nullptr; } // See what we know about the type of the object being cast. bool isExact = false; bool isNonNull = false; CORINFO_CLASS_HANDLE fromClass = gtGetClassHandle(op1, &isExact, &isNonNull); if (fromClass != nullptr) { CORINFO_CLASS_HANDLE toClass = pResolvedToken->hClass; JITDUMP("\nConsidering optimization of %s from %s%p (%s) to %p (%s)\n", isCastClass ? "castclass" : "isinst", isExact ? "exact " : "", dspPtr(fromClass), info.compCompHnd->getClassName(fromClass), dspPtr(toClass), info.compCompHnd->getClassName(toClass)); // Perhaps we know if the cast will succeed or fail. TypeCompareState castResult = info.compCompHnd->compareTypesForCast(fromClass, toClass); if (castResult == TypeCompareState::Must) { // Cast will succeed, result is simply op1. JITDUMP("Cast will succeed, optimizing to simply return input\n"); return op1; } else if (castResult == TypeCompareState::MustNot) { // See if we can sharpen exactness by looking for final classes if (!isExact) { isExact = impIsClassExact(fromClass); } // Cast to exact type will fail. Handle case where we have // an exact type (that is, fromClass is not a subtype) // and we're not going to throw on failure. if (isExact && !isCastClass) { JITDUMP("Cast will fail, optimizing to return null\n"); GenTree* result = gtNewIconNode(0, TYP_REF); // If the cast was fed by a box, we can remove that too. if (op1->IsBoxedValue()) { JITDUMP("Also removing upstream box\n"); gtTryRemoveBoxUpstreamEffects(op1); } return result; } else if (isExact) { JITDUMP("Not optimizing failing castclass (yet)\n"); } else { JITDUMP("Can't optimize since fromClass is inexact\n"); } } else { JITDUMP("Result of cast unknown, must generate runtime test\n"); } } else { JITDUMP("\nCan't optimize since fromClass is unknown\n"); } return nullptr; } //------------------------------------------------------------------------ // impCastClassOrIsInstToTree: build and import castclass/isinst // // Arguments: // op1 - value to cast // op2 - type handle for type to cast to // pResolvedToken - resolved token from the cast operation // isCastClass - true if this is castclass, false means isinst // // Return Value: // Tree representing the cast // // Notes: // May expand into a series of runtime checks or a helper call. GenTree* Compiler::impCastClassOrIsInstToTree( GenTree* op1, GenTree* op2, CORINFO_RESOLVED_TOKEN* pResolvedToken, bool isCastClass, IL_OFFSET ilOffset) { assert(op1->TypeGet() == TYP_REF); // Optimistically assume the jit should expand this as an inline test bool shouldExpandInline = true; // Profitability check. // // Don't bother with inline expansion when jit is trying to // generate code quickly, or the cast is in code that won't run very // often, or the method already is pretty big. if (compCurBB->isRunRarely() || opts.OptimizationDisabled()) { // not worth the code expansion if jitting fast or in a rarely run block shouldExpandInline = false; } else if ((op1->gtFlags & GTF_GLOB_EFFECT) && lvaHaveManyLocals()) { // not worth creating an untracked local variable shouldExpandInline = false; } else if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_BBINSTR) && (JitConfig.JitCastProfiling() == 1)) { // Optimizations are enabled but we're still instrumenting (including casts) if (isCastClass && !impIsClassExact(pResolvedToken->hClass)) { // Usually, we make a speculative assumption that it makes sense to expand castclass // even for non-sealed classes, but let's rely on PGO in this specific case shouldExpandInline = false; } } // Pessimistically assume the jit cannot expand this as an inline test bool canExpandInline = false; const CorInfoHelpFunc helper = info.compCompHnd->getCastingHelper(pResolvedToken, isCastClass); // Legality check. // // Not all classclass/isinst operations can be inline expanded. // Check legality only if an inline expansion is desirable. if (shouldExpandInline) { if (isCastClass) { // Jit can only inline expand the normal CHKCASTCLASS helper. canExpandInline = (helper == CORINFO_HELP_CHKCASTCLASS); } else { if (helper == CORINFO_HELP_ISINSTANCEOFCLASS) { // If the class is exact, the jit can expand the IsInst check inline. canExpandInline = impIsClassExact(pResolvedToken->hClass); } } } const bool expandInline = canExpandInline && shouldExpandInline; if (!expandInline) { JITDUMP("\nExpanding %s as call because %s\n", isCastClass ? "castclass" : "isinst", canExpandInline ? "want smaller code or faster jitting" : "inline expansion not legal"); // If we CSE this class handle we prevent assertionProp from making SubType assertions // so instead we force the CSE logic to not consider CSE-ing this class handle. // op2->gtFlags |= GTF_DONT_CSE; GenTreeCall* call = gtNewHelperCallNode(helper, TYP_REF, gtNewCallArgs(op2, op1)); if (impIsCastHelperEligibleForClassProbe(call) && !impIsClassExact(pResolvedToken->hClass)) { ClassProfileCandidateInfo* pInfo = new (this, CMK_Inlining) ClassProfileCandidateInfo; pInfo->ilOffset = ilOffset; pInfo->probeIndex = info.compClassProbeCount++; call->gtClassProfileCandidateInfo = pInfo; compCurBB->bbFlags |= BBF_HAS_CLASS_PROFILE; } return call; } JITDUMP("\nExpanding %s inline\n", isCastClass ? "castclass" : "isinst"); impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark2")); GenTree* temp; GenTree* condMT; // // expand the methodtable match: // // condMT ==> GT_NE // / \. // GT_IND op2 (typically CNS_INT) // | // op1Copy // // This can replace op1 with a GT_COMMA that evaluates op1 into a local // op1 = impCloneExpr(op1, &temp, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("CASTCLASS eval op1")); // // op1 is now known to be a non-complex tree // thus we can use gtClone(op1) from now on // GenTree* op2Var = op2; if (isCastClass) { op2Var = fgInsertCommaFormTemp(&op2); lvaTable[op2Var->AsLclVarCommon()->GetLclNum()].lvIsCSE = true; } temp = gtNewMethodTableLookup(temp); condMT = gtNewOperNode(GT_NE, TYP_INT, temp, op2); GenTree* condNull; // // expand the null check: // // condNull ==> GT_EQ // / \. // op1Copy CNS_INT // null // condNull = gtNewOperNode(GT_EQ, TYP_INT, gtClone(op1), gtNewIconNode(0, TYP_REF)); // // expand the true and false trees for the condMT // GenTree* condFalse = gtClone(op1); GenTree* condTrue; if (isCastClass) { // // use the special helper that skips the cases checked by our inlined cast // const CorInfoHelpFunc specialHelper = CORINFO_HELP_CHKCASTCLASS_SPECIAL; condTrue = gtNewHelperCallNode(specialHelper, TYP_REF, gtNewCallArgs(op2Var, gtClone(op1))); } else { condTrue = gtNewIconNode(0, TYP_REF); } GenTree* qmarkMT; // // Generate first QMARK - COLON tree // // qmarkMT ==> GT_QMARK // / \. // condMT GT_COLON // / \. // condFalse condTrue // temp = new (this, GT_COLON) GenTreeColon(TYP_REF, condTrue, condFalse); qmarkMT = gtNewQmarkNode(TYP_REF, condMT, temp->AsColon()); if (isCastClass && impIsClassExact(pResolvedToken->hClass) && condTrue->OperIs(GT_CALL)) { // condTrue is used only for throwing InvalidCastException in case of casting to an exact class. condTrue->AsCall()->gtCallMoreFlags |= GTF_CALL_M_DOES_NOT_RETURN; } GenTree* qmarkNull; // // Generate second QMARK - COLON tree // // qmarkNull ==> GT_QMARK // / \. // condNull GT_COLON // / \. // qmarkMT op1Copy // temp = new (this, GT_COLON) GenTreeColon(TYP_REF, gtClone(op1), qmarkMT); qmarkNull = gtNewQmarkNode(TYP_REF, condNull, temp->AsColon()); qmarkNull->gtFlags |= GTF_QMARK_CAST_INSTOF; // Make QMark node a top level node by spilling it. unsigned tmp = lvaGrabTemp(true DEBUGARG("spilling QMark2")); impAssignTempGen(tmp, qmarkNull, (unsigned)CHECK_SPILL_NONE); // TODO-CQ: Is it possible op1 has a better type? // // See also gtGetHelperCallClassHandle where we make the same // determination for the helper call variants. LclVarDsc* lclDsc = lvaGetDesc(tmp); assert(lclDsc->lvSingleDef == 0); lclDsc->lvSingleDef = 1; JITDUMP("Marked V%02u as a single def temp\n", tmp); lvaSetClass(tmp, pResolvedToken->hClass); return gtNewLclvNode(tmp, TYP_REF); } #ifndef DEBUG #define assertImp(cond) ((void)0) #else #define assertImp(cond) \ do \ { \ if (!(cond)) \ { \ const int cchAssertImpBuf = 600; \ char* assertImpBuf = (char*)_alloca(cchAssertImpBuf); \ _snprintf_s(assertImpBuf, cchAssertImpBuf, cchAssertImpBuf - 1, \ "%s : Possibly bad IL with CEE_%s at offset %04Xh (op1=%s op2=%s stkDepth=%d)", #cond, \ impCurOpcName, impCurOpcOffs, op1 ? varTypeName(op1->TypeGet()) : "NULL", \ op2 ? varTypeName(op2->TypeGet()) : "NULL", verCurrentState.esStackDepth); \ assertAbort(assertImpBuf, __FILE__, __LINE__); \ } \ } while (0) #endif // DEBUG //------------------------------------------------------------------------ // impBlockIsInALoop: check if a block might be in a loop // // Arguments: // block - block to check // // Returns: // true if the block might be in a loop. // // Notes: // Conservatively correct; may return true for some blocks that are // not actually in loops. // bool Compiler::impBlockIsInALoop(BasicBlock* block) { return (compIsForInlining() && ((impInlineInfo->iciBlock->bbFlags & BBF_BACKWARD_JUMP) != 0)) || ((block->bbFlags & BBF_BACKWARD_JUMP) != 0); } #ifdef _PREFAST_ #pragma warning(push) #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function #endif /***************************************************************************** * Import the instr for the given basic block */ void Compiler::impImportBlockCode(BasicBlock* block) { #define _impResolveToken(kind) impResolveToken(codeAddr, &resolvedToken, kind) #ifdef DEBUG if (verbose) { printf("\nImporting " FMT_BB " (PC=%03u) of '%s'", block->bbNum, block->bbCodeOffs, info.compFullName); } #endif unsigned nxtStmtIndex = impInitBlockLineInfo(); IL_OFFSET nxtStmtOffs; CorInfoHelpFunc helper; CorInfoIsAccessAllowedResult accessAllowedResult; CORINFO_HELPER_DESC calloutHelper; const BYTE* lastLoadToken = nullptr; /* Get the tree list started */ impBeginTreeList(); #ifdef FEATURE_ON_STACK_REPLACEMENT bool enablePatchpoints = opts.jitFlags->IsSet(JitFlags::JIT_FLAG_TIER0) && (JitConfig.TC_OnStackReplacement() > 0); #ifdef DEBUG // Optionally suppress patchpoints by method hash // static ConfigMethodRange JitEnablePatchpointRange; JitEnablePatchpointRange.EnsureInit(JitConfig.JitEnablePatchpointRange()); const unsigned hash = impInlineRoot()->info.compMethodHash(); const bool inRange = JitEnablePatchpointRange.Contains(hash); enablePatchpoints &= inRange; #endif // DEBUG if (enablePatchpoints) { // We don't inline at Tier0, if we do, we may need rethink our approach. // Could probably support inlines that don't introduce flow. // assert(!compIsForInlining()); // OSR is not yet supported for methods with explicit tail calls. // // But we also do not have to switch these methods to be optimized, as we should be // able to avoid getting trapped in Tier0 code by normal call counting. // So instead, just suppress adding patchpoints. // if (!compTailPrefixSeen) { // We only need to add patchpoints if the method can loop. // if (compHasBackwardJump) { assert(compCanHavePatchpoints()); // By default we use the "adaptive" strategy. // // This can create both source and target patchpoints within a given // loop structure, which isn't ideal, but is not incorrect. We will // just have some extra Tier0 overhead. // // Todo: implement support for mid-block patchpoints. If `block` // is truly a backedge source (and not in a handler) then we should be // able to find a stack empty point somewhere in the block. // const int patchpointStrategy = JitConfig.TC_PatchpointStrategy(); bool addPatchpoint = false; bool mustUseTargetPatchpoint = false; switch (patchpointStrategy) { default: { // Patchpoints at backedge sources, if possible, otherwise targets. // addPatchpoint = ((block->bbFlags & BBF_BACKWARD_JUMP_SOURCE) == BBF_BACKWARD_JUMP_SOURCE); mustUseTargetPatchpoint = (verCurrentState.esStackDepth != 0) || block->hasHndIndex(); break; } case 1: { // Patchpoints at stackempty backedge targets. // Note if we have loops where the IL stack is not empty on the backedge we can't patchpoint // them. // // We should not have allowed OSR if there were backedges in handlers. // assert(!block->hasHndIndex()); addPatchpoint = ((block->bbFlags & BBF_BACKWARD_JUMP_TARGET) == BBF_BACKWARD_JUMP_TARGET) && (verCurrentState.esStackDepth == 0); break; } case 2: { // Adaptive strategy. // // Patchpoints at backedge targets if there are multiple backedges, // otherwise at backedge sources, if possible. Note a block can be both; if so we // just need one patchpoint. // if ((block->bbFlags & BBF_BACKWARD_JUMP_TARGET) == BBF_BACKWARD_JUMP_TARGET) { // We don't know backedge count, so just use ref count. // addPatchpoint = (block->bbRefs > 1) && (verCurrentState.esStackDepth == 0); } if (!addPatchpoint && ((block->bbFlags & BBF_BACKWARD_JUMP_SOURCE) == BBF_BACKWARD_JUMP_SOURCE)) { addPatchpoint = true; mustUseTargetPatchpoint = (verCurrentState.esStackDepth != 0) || block->hasHndIndex(); // Also force target patchpoint if target block has multiple (backedge) preds. // if (!mustUseTargetPatchpoint) { for (BasicBlock* const succBlock : block->Succs(this)) { if ((succBlock->bbNum <= block->bbNum) && (succBlock->bbRefs > 1)) { mustUseTargetPatchpoint = true; break; } } } } break; } } if (addPatchpoint) { if (mustUseTargetPatchpoint) { // We wanted a source patchpoint, but could not have one. // So, add patchpoints to the backedge targets. // for (BasicBlock* const succBlock : block->Succs(this)) { if (succBlock->bbNum <= block->bbNum) { // The succBlock had better agree it's a target. // assert((succBlock->bbFlags & BBF_BACKWARD_JUMP_TARGET) == BBF_BACKWARD_JUMP_TARGET); // We may already have decided to put a patchpoint in succBlock. If not, add one. // if ((succBlock->bbFlags & BBF_PATCHPOINT) != 0) { // In some cases the target may not be stack-empty at entry. // If so, we will bypass patchpoints for this backedge. // if (succBlock->bbStackDepthOnEntry() > 0) { JITDUMP("\nCan't set source patchpoint at " FMT_BB ", can't use target " FMT_BB " as it has non-empty stack on entry.\n", block->bbNum, succBlock->bbNum); } else { JITDUMP("\nCan't set source patchpoint at " FMT_BB ", using target " FMT_BB " instead\n", block->bbNum, succBlock->bbNum); assert(!succBlock->hasHndIndex()); succBlock->bbFlags |= BBF_PATCHPOINT; } } } } } else { assert(!block->hasHndIndex()); block->bbFlags |= BBF_PATCHPOINT; } setMethodHasPatchpoint(); } } else { // Should not see backward branch targets w/o backwards branches. // So if !compHasBackwardsBranch, these flags should never be set. // assert((block->bbFlags & (BBF_BACKWARD_JUMP_TARGET | BBF_BACKWARD_JUMP_SOURCE)) == 0); } } #ifdef DEBUG // As a stress test, we can place patchpoints at the start of any block // that is a stack empty point and is not within a handler. // // Todo: enable for mid-block stack empty points too. // const int offsetOSR = JitConfig.JitOffsetOnStackReplacement(); const int randomOSR = JitConfig.JitRandomOnStackReplacement(); const bool tryOffsetOSR = offsetOSR >= 0; const bool tryRandomOSR = randomOSR > 0; if (compCanHavePatchpoints() && (tryOffsetOSR || tryRandomOSR) && (verCurrentState.esStackDepth == 0) && !block->hasHndIndex() && ((block->bbFlags & BBF_PATCHPOINT) == 0)) { // Block start can have a patchpoint. See if we should add one. // bool addPatchpoint = false; // Specific offset? // if (tryOffsetOSR) { if (impCurOpcOffs == (unsigned)offsetOSR) { addPatchpoint = true; } } // Random? // else { // Reuse the random inliner's random state. // Note m_inlineStrategy is always created, even if we're not inlining. // CLRRandom* const random = impInlineRoot()->m_inlineStrategy->GetRandom(randomOSR); const int randomValue = (int)random->Next(100); addPatchpoint = (randomValue < randomOSR); } if (addPatchpoint) { block->bbFlags |= BBF_PATCHPOINT; setMethodHasPatchpoint(); } JITDUMP("\n** %s patchpoint%s added to " FMT_BB " (il offset %u)\n", tryOffsetOSR ? "offset" : "random", addPatchpoint ? "" : " not", block->bbNum, impCurOpcOffs); } #endif // DEBUG } // Mark stack-empty rare blocks to be considered for partial compilation. // // Ideally these are conditionally executed blocks -- if the method is going // to unconditionally throw, there's not as much to be gained by deferring jitting. // For now, we just screen out the entry bb. // // In general we might want track all the IL stack empty points so we can // propagate rareness back through flow and place the partial compilation patchpoints "earlier" // so there are fewer overall. // // Note unlike OSR, it's ok to forgo these. // // Todo: stress mode... // if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_TIER0) && (JitConfig.TC_PartialCompilation() > 0) && compCanHavePatchpoints() && !compTailPrefixSeen) { // Is this block a good place for partial compilation? // if ((block != fgFirstBB) && block->isRunRarely() && (verCurrentState.esStackDepth == 0) && ((block->bbFlags & BBF_PATCHPOINT) == 0) && !block->hasHndIndex()) { JITDUMP("\nBlock " FMT_BB " will be a partial compilation patchpoint -- not importing\n", block->bbNum); block->bbFlags |= BBF_PARTIAL_COMPILATION_PATCHPOINT; setMethodHasPartialCompilationPatchpoint(); // Change block to BBJ_THROW so we won't trigger importation of successors. // block->bbJumpKind = BBJ_THROW; // If this method has a explicit generic context, the only uses of it may be in // the IL for this block. So assume it's used. // if (info.compMethodInfo->options & (CORINFO_GENERICS_CTXT_FROM_METHODDESC | CORINFO_GENERICS_CTXT_FROM_METHODTABLE)) { lvaGenericsContextInUse = true; } return; } } #endif // FEATURE_ON_STACK_REPLACEMENT /* Walk the opcodes that comprise the basic block */ const BYTE* codeAddr = info.compCode + block->bbCodeOffs; const BYTE* codeEndp = info.compCode + block->bbCodeOffsEnd; IL_OFFSET opcodeOffs = block->bbCodeOffs; IL_OFFSET lastSpillOffs = opcodeOffs; signed jmpDist; /* remember the start of the delegate creation sequence (used for verification) */ const BYTE* delegateCreateStart = nullptr; int prefixFlags = 0; bool explicitTailCall, constraintCall, readonlyCall; typeInfo tiRetVal; unsigned numArgs = info.compArgsCount; /* Now process all the opcodes in the block */ var_types callTyp = TYP_COUNT; OPCODE prevOpcode = CEE_ILLEGAL; if (block->bbCatchTyp) { if (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES) { impCurStmtOffsSet(block->bbCodeOffs); } // We will spill the GT_CATCH_ARG and the input of the BB_QMARK block // to a temp. This is a trade off for code simplicity impSpillSpecialSideEff(); } while (codeAddr < codeEndp) { #ifdef FEATURE_READYTORUN bool usingReadyToRunHelper = false; #endif CORINFO_RESOLVED_TOKEN resolvedToken; CORINFO_RESOLVED_TOKEN constrainedResolvedToken; CORINFO_CALL_INFO callInfo; CORINFO_FIELD_INFO fieldInfo; tiRetVal = typeInfo(); // Default type info //--------------------------------------------------------------------- /* We need to restrict the max tree depth as many of the Compiler functions are recursive. We do this by spilling the stack */ if (verCurrentState.esStackDepth) { /* Has it been a while since we last saw a non-empty stack (which guarantees that the tree depth isnt accumulating. */ if ((opcodeOffs - lastSpillOffs) > MAX_TREE_SIZE && impCanSpillNow(prevOpcode)) { impSpillStackEnsure(); lastSpillOffs = opcodeOffs; } } else { lastSpillOffs = opcodeOffs; impBoxTempInUse = false; // nothing on the stack, box temp OK to use again } /* Compute the current instr offset */ opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode); #ifndef DEBUG if (opts.compDbgInfo) #endif { nxtStmtOffs = (nxtStmtIndex < info.compStmtOffsetsCount) ? info.compStmtOffsets[nxtStmtIndex] : BAD_IL_OFFSET; /* Have we reached the next stmt boundary ? */ if (nxtStmtOffs != BAD_IL_OFFSET && opcodeOffs >= nxtStmtOffs) { assert(nxtStmtOffs == info.compStmtOffsets[nxtStmtIndex]); if (verCurrentState.esStackDepth != 0 && opts.compDbgCode) { /* We need to provide accurate IP-mapping at this point. So spill anything on the stack so that it will form gtStmts with the correct stmt offset noted */ impSpillStackEnsure(true); } // Have we reported debug info for any tree? if (impCurStmtDI.IsValid() && opts.compDbgCode) { GenTree* placeHolder = new (this, GT_NO_OP) GenTree(GT_NO_OP, TYP_VOID); impAppendTree(placeHolder, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); assert(!impCurStmtDI.IsValid()); } if (!impCurStmtDI.IsValid()) { /* Make sure that nxtStmtIndex is in sync with opcodeOffs. If opcodeOffs has gone past nxtStmtIndex, catch up */ while ((nxtStmtIndex + 1) < info.compStmtOffsetsCount && info.compStmtOffsets[nxtStmtIndex + 1] <= opcodeOffs) { nxtStmtIndex++; } /* Go to the new stmt */ impCurStmtOffsSet(info.compStmtOffsets[nxtStmtIndex]); /* Update the stmt boundary index */ nxtStmtIndex++; assert(nxtStmtIndex <= info.compStmtOffsetsCount); /* Are there any more line# entries after this one? */ if (nxtStmtIndex < info.compStmtOffsetsCount) { /* Remember where the next line# starts */ nxtStmtOffs = info.compStmtOffsets[nxtStmtIndex]; } else { /* No more line# entries */ nxtStmtOffs = BAD_IL_OFFSET; } } } else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::STACK_EMPTY_BOUNDARIES) && (verCurrentState.esStackDepth == 0)) { /* At stack-empty locations, we have already added the tree to the stmt list with the last offset. We just need to update impCurStmtDI */ impCurStmtOffsSet(opcodeOffs); } else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES) && impOpcodeIsCallSiteBoundary(prevOpcode)) { /* Make sure we have a type cached */ assert(callTyp != TYP_COUNT); if (callTyp == TYP_VOID) { impCurStmtOffsSet(opcodeOffs); } else if (opts.compDbgCode) { impSpillStackEnsure(true); impCurStmtOffsSet(opcodeOffs); } } else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::NOP_BOUNDARIES) && (prevOpcode == CEE_NOP)) { if (opts.compDbgCode) { impSpillStackEnsure(true); } impCurStmtOffsSet(opcodeOffs); } assert(!impCurStmtDI.IsValid() || (nxtStmtOffs == BAD_IL_OFFSET) || (impCurStmtDI.GetLocation().GetOffset() <= nxtStmtOffs)); } CORINFO_CLASS_HANDLE clsHnd = DUMMY_INIT(NULL); CORINFO_CLASS_HANDLE ldelemClsHnd = DUMMY_INIT(NULL); CORINFO_CLASS_HANDLE stelemClsHnd = DUMMY_INIT(NULL); var_types lclTyp, ovflType = TYP_UNKNOWN; GenTree* op1 = DUMMY_INIT(NULL); GenTree* op2 = DUMMY_INIT(NULL); GenTree* newObjThisPtr = DUMMY_INIT(NULL); bool uns = DUMMY_INIT(false); bool isLocal = false; /* Get the next opcode and the size of its parameters */ OPCODE opcode = (OPCODE)getU1LittleEndian(codeAddr); codeAddr += sizeof(__int8); #ifdef DEBUG impCurOpcOffs = (IL_OFFSET)(codeAddr - info.compCode - 1); JITDUMP("\n [%2u] %3u (0x%03x) ", verCurrentState.esStackDepth, impCurOpcOffs, impCurOpcOffs); #endif DECODE_OPCODE: // Return if any previous code has caused inline to fail. if (compDonotInline()) { return; } /* Get the size of additional parameters */ signed int sz = opcodeSizes[opcode]; #ifdef DEBUG clsHnd = NO_CLASS_HANDLE; lclTyp = TYP_COUNT; callTyp = TYP_COUNT; impCurOpcOffs = (IL_OFFSET)(codeAddr - info.compCode - 1); impCurOpcName = opcodeNames[opcode]; if (verbose && (opcode != CEE_PREFIX1)) { printf("%s", impCurOpcName); } /* Use assertImp() to display the opcode */ op1 = op2 = nullptr; #endif /* See what kind of an opcode we have, then */ unsigned mflags = 0; unsigned clsFlags = 0; switch (opcode) { unsigned lclNum; var_types type; GenTree* op3; genTreeOps oper; unsigned size; int val; CORINFO_SIG_INFO sig; IL_OFFSET jmpAddr; bool ovfl, unordered, callNode; bool ldstruct; CORINFO_CLASS_HANDLE tokenType; union { int intVal; float fltVal; __int64 lngVal; double dblVal; } cval; case CEE_PREFIX1: opcode = (OPCODE)(getU1LittleEndian(codeAddr) + 256); opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode); codeAddr += sizeof(__int8); goto DECODE_OPCODE; SPILL_APPEND: // We need to call impSpillLclRefs() for a struct type lclVar. // This is because there may be loads of that lclVar on the evaluation stack, and // we need to ensure that those loads are completed before we modify it. if ((op1->OperGet() == GT_ASG) && varTypeIsStruct(op1->gtGetOp1())) { GenTree* lhs = op1->gtGetOp1(); GenTreeLclVarCommon* lclVar = nullptr; if (lhs->gtOper == GT_LCL_VAR) { lclVar = lhs->AsLclVarCommon(); } else if (lhs->OperIsBlk()) { // Check if LHS address is within some struct local, to catch // cases where we're updating the struct by something other than a stfld GenTree* addr = lhs->AsBlk()->Addr(); // Catches ADDR(LCL_VAR), or ADD(ADDR(LCL_VAR),CNS_INT)) lclVar = addr->IsLocalAddrExpr(); // Catches ADDR(FIELD(... ADDR(LCL_VAR))) if (lclVar == nullptr) { GenTree* lclTree = nullptr; if (impIsAddressInLocal(addr, &lclTree)) { lclVar = lclTree->AsLclVarCommon(); } } } if (lclVar != nullptr) { impSpillLclRefs(lclVar->GetLclNum()); } } /* Append 'op1' to the list of statements */ impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtDI); goto DONE_APPEND; APPEND: /* Append 'op1' to the list of statements */ impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); goto DONE_APPEND; DONE_APPEND: #ifdef DEBUG // Remember at which BC offset the tree was finished impNoteLastILoffs(); #endif break; case CEE_LDNULL: impPushNullObjRefOnStack(); break; case CEE_LDC_I4_M1: case CEE_LDC_I4_0: case CEE_LDC_I4_1: case CEE_LDC_I4_2: case CEE_LDC_I4_3: case CEE_LDC_I4_4: case CEE_LDC_I4_5: case CEE_LDC_I4_6: case CEE_LDC_I4_7: case CEE_LDC_I4_8: cval.intVal = (opcode - CEE_LDC_I4_0); assert(-1 <= cval.intVal && cval.intVal <= 8); goto PUSH_I4CON; case CEE_LDC_I4_S: cval.intVal = getI1LittleEndian(codeAddr); goto PUSH_I4CON; case CEE_LDC_I4: cval.intVal = getI4LittleEndian(codeAddr); goto PUSH_I4CON; PUSH_I4CON: JITDUMP(" %d", cval.intVal); impPushOnStack(gtNewIconNode(cval.intVal), typeInfo(TI_INT)); break; case CEE_LDC_I8: cval.lngVal = getI8LittleEndian(codeAddr); JITDUMP(" 0x%016llx", cval.lngVal); impPushOnStack(gtNewLconNode(cval.lngVal), typeInfo(TI_LONG)); break; case CEE_LDC_R8: cval.dblVal = getR8LittleEndian(codeAddr); JITDUMP(" %#.17g", cval.dblVal); impPushOnStack(gtNewDconNode(cval.dblVal), typeInfo(TI_DOUBLE)); break; case CEE_LDC_R4: cval.dblVal = getR4LittleEndian(codeAddr); JITDUMP(" %#.17g", cval.dblVal); impPushOnStack(gtNewDconNode(cval.dblVal, TYP_FLOAT), typeInfo(TI_DOUBLE)); break; case CEE_LDSTR: val = getU4LittleEndian(codeAddr); JITDUMP(" %08X", val); impPushOnStack(gtNewSconNode(val, info.compScopeHnd), tiRetVal); break; case CEE_LDARG: lclNum = getU2LittleEndian(codeAddr); JITDUMP(" %u", lclNum); impLoadArg(lclNum, opcodeOffs + sz + 1); break; case CEE_LDARG_S: lclNum = getU1LittleEndian(codeAddr); JITDUMP(" %u", lclNum); impLoadArg(lclNum, opcodeOffs + sz + 1); break; case CEE_LDARG_0: case CEE_LDARG_1: case CEE_LDARG_2: case CEE_LDARG_3: lclNum = (opcode - CEE_LDARG_0); assert(lclNum >= 0 && lclNum < 4); impLoadArg(lclNum, opcodeOffs + sz + 1); break; case CEE_LDLOC: lclNum = getU2LittleEndian(codeAddr); JITDUMP(" %u", lclNum); impLoadLoc(lclNum, opcodeOffs + sz + 1); break; case CEE_LDLOC_S: lclNum = getU1LittleEndian(codeAddr); JITDUMP(" %u", lclNum); impLoadLoc(lclNum, opcodeOffs + sz + 1); break; case CEE_LDLOC_0: case CEE_LDLOC_1: case CEE_LDLOC_2: case CEE_LDLOC_3: lclNum = (opcode - CEE_LDLOC_0); assert(lclNum >= 0 && lclNum < 4); impLoadLoc(lclNum, opcodeOffs + sz + 1); break; case CEE_STARG: lclNum = getU2LittleEndian(codeAddr); goto STARG; case CEE_STARG_S: lclNum = getU1LittleEndian(codeAddr); STARG: JITDUMP(" %u", lclNum); if (compIsForInlining()) { op1 = impInlineFetchArg(lclNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo); noway_assert(op1->gtOper == GT_LCL_VAR); lclNum = op1->AsLclVar()->GetLclNum(); goto VAR_ST_VALID; } lclNum = compMapILargNum(lclNum); // account for possible hidden param assertImp(lclNum < numArgs); if (lclNum == info.compThisArg) { lclNum = lvaArg0Var; } // We should have seen this arg write in the prescan assert(lvaTable[lclNum].lvHasILStoreOp); goto VAR_ST; case CEE_STLOC: lclNum = getU2LittleEndian(codeAddr); isLocal = true; JITDUMP(" %u", lclNum); goto LOC_ST; case CEE_STLOC_S: lclNum = getU1LittleEndian(codeAddr); isLocal = true; JITDUMP(" %u", lclNum); goto LOC_ST; case CEE_STLOC_0: case CEE_STLOC_1: case CEE_STLOC_2: case CEE_STLOC_3: isLocal = true; lclNum = (opcode - CEE_STLOC_0); assert(lclNum >= 0 && lclNum < 4); LOC_ST: if (compIsForInlining()) { lclTyp = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclTypeInfo; /* Have we allocated a temp for this local? */ lclNum = impInlineFetchLocal(lclNum DEBUGARG("Inline stloc first use temp")); goto _PopValue; } lclNum += numArgs; VAR_ST: if (lclNum >= info.compLocalsCount && lclNum != lvaArg0Var) { BADCODE("Bad IL"); } VAR_ST_VALID: /* if it is a struct assignment, make certain we don't overflow the buffer */ assert(lclTyp != TYP_STRUCT || lvaLclSize(lclNum) >= info.compCompHnd->getClassSize(clsHnd)); if (lvaTable[lclNum].lvNormalizeOnLoad()) { lclTyp = lvaGetRealType(lclNum); } else { lclTyp = lvaGetActualType(lclNum); } _PopValue: /* Pop the value being assigned */ { StackEntry se = impPopStack(); clsHnd = se.seTypeInfo.GetClassHandle(); op1 = se.val; tiRetVal = se.seTypeInfo; } #ifdef FEATURE_SIMD if (varTypeIsSIMD(lclTyp) && (lclTyp != op1->TypeGet())) { assert(op1->TypeGet() == TYP_STRUCT); op1->gtType = lclTyp; } #endif // FEATURE_SIMD op1 = impImplicitIorI4Cast(op1, lclTyp); #ifdef TARGET_64BIT // Downcast the TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity if (varTypeIsI(op1->TypeGet()) && (genActualType(lclTyp) == TYP_INT)) { op1 = gtNewCastNode(TYP_INT, op1, false, TYP_INT); } #endif // TARGET_64BIT // We had better assign it a value of the correct type assertImp( genActualType(lclTyp) == genActualType(op1->gtType) || (genActualType(lclTyp) == TYP_I_IMPL && op1->IsLocalAddrExpr() != nullptr) || (genActualType(lclTyp) == TYP_I_IMPL && (op1->gtType == TYP_BYREF || op1->gtType == TYP_REF)) || (genActualType(op1->gtType) == TYP_I_IMPL && lclTyp == TYP_BYREF) || (varTypeIsFloating(lclTyp) && varTypeIsFloating(op1->TypeGet())) || ((genActualType(lclTyp) == TYP_BYREF) && genActualType(op1->TypeGet()) == TYP_REF)); /* If op1 is "&var" then its type is the transient "*" and it can be used either as TYP_BYREF or TYP_I_IMPL */ if (op1->IsLocalAddrExpr() != nullptr) { assertImp(genActualType(lclTyp) == TYP_I_IMPL || lclTyp == TYP_BYREF); /* When "&var" is created, we assume it is a byref. If it is being assigned to a TYP_I_IMPL var, change the type to prevent unnecessary GC info */ if (genActualType(lclTyp) == TYP_I_IMPL) { op1->gtType = TYP_I_IMPL; } } // If this is a local and the local is a ref type, see // if we can improve type information based on the // value being assigned. if (isLocal && (lclTyp == TYP_REF)) { // We should have seen a stloc in our IL prescan. assert(lvaTable[lclNum].lvHasILStoreOp); // Is there just one place this local is defined? const bool isSingleDefLocal = lvaTable[lclNum].lvSingleDef; // Conservative check that there is just one // definition that reaches this store. const bool hasSingleReachingDef = (block->bbStackDepthOnEntry() == 0); if (isSingleDefLocal && hasSingleReachingDef) { lvaUpdateClass(lclNum, op1, clsHnd); } } /* Filter out simple assignments to itself */ if (op1->gtOper == GT_LCL_VAR && lclNum == op1->AsLclVarCommon()->GetLclNum()) { if (opts.compDbgCode) { op1 = gtNewNothingNode(); goto SPILL_APPEND; } else { break; } } /* Create the assignment node */ op2 = gtNewLclvNode(lclNum, lclTyp DEBUGARG(opcodeOffs + sz + 1)); /* If the local is aliased or pinned, we need to spill calls and indirections from the stack. */ if ((lvaTable[lclNum].IsAddressExposed() || lvaTable[lclNum].lvHasLdAddrOp || lvaTable[lclNum].lvPinned) && (verCurrentState.esStackDepth > 0)) { impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("Local could be aliased or is pinned")); } /* Spill any refs to the local from the stack */ impSpillLclRefs(lclNum); // We can generate an assignment to a TYP_FLOAT from a TYP_DOUBLE // We insert a cast to the dest 'op2' type // if ((op1->TypeGet() != op2->TypeGet()) && varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType)) { op1 = gtNewCastNode(op2->TypeGet(), op1, false, op2->TypeGet()); } if (varTypeIsStruct(lclTyp)) { op1 = impAssignStruct(op2, op1, clsHnd, (unsigned)CHECK_SPILL_ALL); } else { op1 = gtNewAssignNode(op2, op1); } goto SPILL_APPEND; case CEE_LDLOCA: lclNum = getU2LittleEndian(codeAddr); goto LDLOCA; case CEE_LDLOCA_S: lclNum = getU1LittleEndian(codeAddr); LDLOCA: JITDUMP(" %u", lclNum); if (compIsForInlining()) { // Get the local type lclTyp = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclTypeInfo; /* Have we allocated a temp for this local? */ lclNum = impInlineFetchLocal(lclNum DEBUGARG("Inline ldloca(s) first use temp")); assert(!lvaGetDesc(lclNum)->lvNormalizeOnLoad()); op1 = gtNewLclvNode(lclNum, lvaGetActualType(lclNum)); goto _PUSH_ADRVAR; } lclNum += numArgs; assertImp(lclNum < info.compLocalsCount); goto ADRVAR; case CEE_LDARGA: lclNum = getU2LittleEndian(codeAddr); goto LDARGA; case CEE_LDARGA_S: lclNum = getU1LittleEndian(codeAddr); LDARGA: JITDUMP(" %u", lclNum); Verify(lclNum < info.compILargsCount, "bad arg num"); if (compIsForInlining()) { // In IL, LDARGA(_S) is used to load the byref managed pointer of struct argument, // followed by a ldfld to load the field. op1 = impInlineFetchArg(lclNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo); if (op1->gtOper != GT_LCL_VAR) { compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDARGA_NOT_LOCAL_VAR); return; } assert(op1->gtOper == GT_LCL_VAR); goto _PUSH_ADRVAR; } lclNum = compMapILargNum(lclNum); // account for possible hidden param assertImp(lclNum < numArgs); if (lclNum == info.compThisArg) { lclNum = lvaArg0Var; } goto ADRVAR; ADRVAR: op1 = impCreateLocalNode(lclNum DEBUGARG(opcodeOffs + sz + 1)); _PUSH_ADRVAR: assert(op1->gtOper == GT_LCL_VAR); /* Note that this is supposed to create the transient type "*" which may be used as a TYP_I_IMPL. However we catch places where it is used as a TYP_I_IMPL and change the node if needed. Thus we are pessimistic and may report byrefs in the GC info where it was not absolutely needed, but it is safer this way. */ op1 = gtNewOperNode(GT_ADDR, TYP_BYREF, op1); // &aliasedVar doesnt need GTF_GLOB_REF, though alisasedVar does assert((op1->gtFlags & GTF_GLOB_REF) == 0); tiRetVal = lvaTable[lclNum].lvVerTypeInfo; impPushOnStack(op1, tiRetVal); break; case CEE_ARGLIST: if (!info.compIsVarArgs) { BADCODE("arglist in non-vararg method"); } assertImp((info.compMethodInfo->args.callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG); /* The ARGLIST cookie is a hidden 'last' parameter, we have already adjusted the arg count cos this is like fetching the last param */ assertImp(0 < numArgs); lclNum = lvaVarargsHandleArg; op1 = gtNewLclvNode(lclNum, TYP_I_IMPL DEBUGARG(opcodeOffs + sz + 1)); op1 = gtNewOperNode(GT_ADDR, TYP_BYREF, op1); impPushOnStack(op1, tiRetVal); break; case CEE_ENDFINALLY: if (compIsForInlining()) { assert(!"Shouldn't have exception handlers in the inliner!"); compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_ENDFINALLY); return; } if (verCurrentState.esStackDepth > 0) { impEvalSideEffects(); } if (info.compXcptnsCount == 0) { BADCODE("endfinally outside finally"); } assert(verCurrentState.esStackDepth == 0); op1 = gtNewOperNode(GT_RETFILT, TYP_VOID, nullptr); goto APPEND; case CEE_ENDFILTER: if (compIsForInlining()) { assert(!"Shouldn't have exception handlers in the inliner!"); compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_ENDFILTER); return; } block->bbSetRunRarely(); // filters are rare if (info.compXcptnsCount == 0) { BADCODE("endfilter outside filter"); } op1 = impPopStack().val; assertImp(op1->gtType == TYP_INT); if (!bbInFilterILRange(block)) { BADCODE("EndFilter outside a filter handler"); } /* Mark current bb as end of filter */ assert(compCurBB->bbFlags & BBF_DONT_REMOVE); assert(compCurBB->bbJumpKind == BBJ_EHFILTERRET); /* Mark catch handler as successor */ op1 = gtNewOperNode(GT_RETFILT, op1->TypeGet(), op1); if (verCurrentState.esStackDepth != 0) { verRaiseVerifyException(INDEBUG("stack must be 1 on end of filter") DEBUGARG(__FILE__) DEBUGARG(__LINE__)); } goto APPEND; case CEE_RET: prefixFlags &= ~PREFIX_TAILCALL; // ret without call before it RET: if (!impReturnInstruction(prefixFlags, opcode)) { return; // abort } else { break; } case CEE_JMP: assert(!compIsForInlining()); if ((info.compFlags & CORINFO_FLG_SYNCH) || block->hasTryIndex() || block->hasHndIndex()) { /* CEE_JMP does not make sense in some "protected" regions. */ BADCODE("Jmp not allowed in protected region"); } if (opts.IsReversePInvoke()) { BADCODE("Jmp not allowed in reverse P/Invoke"); } if (verCurrentState.esStackDepth != 0) { BADCODE("Stack must be empty after CEE_JMPs"); } _impResolveToken(CORINFO_TOKENKIND_Method); JITDUMP(" %08X", resolvedToken.token); /* The signature of the target has to be identical to ours. At least check that argCnt and returnType match */ eeGetMethodSig(resolvedToken.hMethod, &sig); if (sig.numArgs != info.compMethodInfo->args.numArgs || sig.retType != info.compMethodInfo->args.retType || sig.callConv != info.compMethodInfo->args.callConv) { BADCODE("Incompatible target for CEE_JMPs"); } op1 = new (this, GT_JMP) GenTreeVal(GT_JMP, TYP_VOID, (size_t)resolvedToken.hMethod); /* Mark the basic block as being a JUMP instead of RETURN */ block->bbFlags |= BBF_HAS_JMP; /* Set this flag to make sure register arguments have a location assigned * even if we don't use them inside the method */ compJmpOpUsed = true; fgNoStructPromotion = true; goto APPEND; case CEE_LDELEMA: assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Class); JITDUMP(" %08X", resolvedToken.token); ldelemClsHnd = resolvedToken.hClass; // If it's a value class array we just do a simple address-of if (eeIsValueClass(ldelemClsHnd)) { CorInfoType cit = info.compCompHnd->getTypeForPrimitiveValueClass(ldelemClsHnd); if (cit == CORINFO_TYPE_UNDEF) { lclTyp = TYP_STRUCT; } else { lclTyp = JITtype2varType(cit); } goto ARR_LD_POST_VERIFY; } // Similarly, if its a readonly access, we can do a simple address-of // without doing a runtime type-check if (prefixFlags & PREFIX_READONLY) { lclTyp = TYP_REF; goto ARR_LD_POST_VERIFY; } // Otherwise we need the full helper function with run-time type check op1 = impTokenToHandle(&resolvedToken); if (op1 == nullptr) { // compDonotInline() return; } { GenTreeCall::Use* args = gtNewCallArgs(op1); // Type args = gtPrependNewCallArg(impPopStack().val, args); // index args = gtPrependNewCallArg(impPopStack().val, args); // array op1 = gtNewHelperCallNode(CORINFO_HELP_LDELEMA_REF, TYP_BYREF, args); } impPushOnStack(op1, tiRetVal); break; // ldelem for reference and value types case CEE_LDELEM: assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Class); JITDUMP(" %08X", resolvedToken.token); ldelemClsHnd = resolvedToken.hClass; // If it's a reference type or generic variable type // then just generate code as though it's a ldelem.ref instruction if (!eeIsValueClass(ldelemClsHnd)) { lclTyp = TYP_REF; opcode = CEE_LDELEM_REF; } else { CorInfoType jitTyp = info.compCompHnd->asCorInfoType(ldelemClsHnd); lclTyp = JITtype2varType(jitTyp); tiRetVal = verMakeTypeInfo(ldelemClsHnd); // precise type always needed for struct tiRetVal.NormaliseForStack(); } goto ARR_LD_POST_VERIFY; case CEE_LDELEM_I1: lclTyp = TYP_BYTE; goto ARR_LD; case CEE_LDELEM_I2: lclTyp = TYP_SHORT; goto ARR_LD; case CEE_LDELEM_I: lclTyp = TYP_I_IMPL; goto ARR_LD; // Should be UINT, but since no platform widens 4->8 bytes it doesn't matter // and treating it as TYP_INT avoids other asserts. case CEE_LDELEM_U4: lclTyp = TYP_INT; goto ARR_LD; case CEE_LDELEM_I4: lclTyp = TYP_INT; goto ARR_LD; case CEE_LDELEM_I8: lclTyp = TYP_LONG; goto ARR_LD; case CEE_LDELEM_REF: lclTyp = TYP_REF; goto ARR_LD; case CEE_LDELEM_R4: lclTyp = TYP_FLOAT; goto ARR_LD; case CEE_LDELEM_R8: lclTyp = TYP_DOUBLE; goto ARR_LD; case CEE_LDELEM_U1: lclTyp = TYP_UBYTE; goto ARR_LD; case CEE_LDELEM_U2: lclTyp = TYP_USHORT; goto ARR_LD; ARR_LD: ARR_LD_POST_VERIFY: /* Pull the index value and array address */ op2 = impPopStack().val; op1 = impPopStack().val; assertImp(op1->gtType == TYP_REF); /* Check for null pointer - in the inliner case we simply abort */ if (compIsForInlining()) { if (op1->gtOper == GT_CNS_INT) { compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NULL_FOR_LDELEM); return; } } /* Mark the block as containing an index expression */ if (op1->gtOper == GT_LCL_VAR) { if (op2->gtOper == GT_LCL_VAR || op2->gtOper == GT_CNS_INT || op2->gtOper == GT_ADD) { block->bbFlags |= BBF_HAS_IDX_LEN; optMethodFlags |= OMF_HAS_ARRAYREF; } } /* Create the index node and push it on the stack */ op1 = gtNewIndexRef(lclTyp, op1, op2); ldstruct = (opcode == CEE_LDELEM && lclTyp == TYP_STRUCT); if ((opcode == CEE_LDELEMA) || ldstruct || (ldelemClsHnd != DUMMY_INIT(NULL) && eeIsValueClass(ldelemClsHnd))) { assert(ldelemClsHnd != DUMMY_INIT(NULL)); // remember the element size if (lclTyp == TYP_REF) { op1->AsIndex()->gtIndElemSize = TARGET_POINTER_SIZE; } else { // If ldElemClass is precisely a primitive type, use that, otherwise, preserve the struct type. if (info.compCompHnd->getTypeForPrimitiveValueClass(ldelemClsHnd) == CORINFO_TYPE_UNDEF) { op1->AsIndex()->gtStructElemClass = ldelemClsHnd; } assert(lclTyp != TYP_STRUCT || op1->AsIndex()->gtStructElemClass != nullptr); if (lclTyp == TYP_STRUCT) { size = info.compCompHnd->getClassSize(ldelemClsHnd); op1->AsIndex()->gtIndElemSize = size; op1->gtType = lclTyp; } } if ((opcode == CEE_LDELEMA) || ldstruct) { // wrap it in a & lclTyp = TYP_BYREF; op1 = gtNewOperNode(GT_ADDR, lclTyp, op1); } else { assert(lclTyp != TYP_STRUCT); } } if (ldstruct) { // Create an OBJ for the result op1 = gtNewObjNode(ldelemClsHnd, op1); op1->gtFlags |= GTF_EXCEPT; } impPushOnStack(op1, tiRetVal); break; // stelem for reference and value types case CEE_STELEM: assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Class); JITDUMP(" %08X", resolvedToken.token); stelemClsHnd = resolvedToken.hClass; // If it's a reference type just behave as though it's a stelem.ref instruction if (!eeIsValueClass(stelemClsHnd)) { goto STELEM_REF_POST_VERIFY; } // Otherwise extract the type { CorInfoType jitTyp = info.compCompHnd->asCorInfoType(stelemClsHnd); lclTyp = JITtype2varType(jitTyp); goto ARR_ST_POST_VERIFY; } case CEE_STELEM_REF: STELEM_REF_POST_VERIFY: if (opts.OptimizationEnabled()) { GenTree* array = impStackTop(2).val; GenTree* value = impStackTop().val; // Is this a case where we can skip the covariant store check? if (impCanSkipCovariantStoreCheck(value, array)) { lclTyp = TYP_REF; goto ARR_ST_POST_VERIFY; } } // Else call a helper function to do the assignment op1 = gtNewHelperCallNode(CORINFO_HELP_ARRADDR_ST, TYP_VOID, impPopCallArgs(3, nullptr)); goto SPILL_APPEND; case CEE_STELEM_I1: lclTyp = TYP_BYTE; goto ARR_ST; case CEE_STELEM_I2: lclTyp = TYP_SHORT; goto ARR_ST; case CEE_STELEM_I: lclTyp = TYP_I_IMPL; goto ARR_ST; case CEE_STELEM_I4: lclTyp = TYP_INT; goto ARR_ST; case CEE_STELEM_I8: lclTyp = TYP_LONG; goto ARR_ST; case CEE_STELEM_R4: lclTyp = TYP_FLOAT; goto ARR_ST; case CEE_STELEM_R8: lclTyp = TYP_DOUBLE; goto ARR_ST; ARR_ST: ARR_ST_POST_VERIFY: /* The strict order of evaluation is LHS-operands, RHS-operands, range-check, and then assignment. However, codegen currently does the range-check before evaluation the RHS-operands. So to maintain strict ordering, we spill the stack. */ if (impStackTop().val->gtFlags & GTF_SIDE_EFFECT) { impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG( "Strict ordering of exceptions for Array store")); } /* Pull the new value from the stack */ op2 = impPopStack().val; /* Pull the index value */ op1 = impPopStack().val; /* Pull the array address */ op3 = impPopStack().val; assertImp(op3->gtType == TYP_REF); if (op2->IsLocalAddrExpr() != nullptr) { op2->gtType = TYP_I_IMPL; } // Mark the block as containing an index expression if (op3->gtOper == GT_LCL_VAR) { if (op1->gtOper == GT_LCL_VAR || op1->gtOper == GT_CNS_INT || op1->gtOper == GT_ADD) { block->bbFlags |= BBF_HAS_IDX_LEN; optMethodFlags |= OMF_HAS_ARRAYREF; } } /* Create the index node */ op1 = gtNewIndexRef(lclTyp, op3, op1); /* Create the assignment node and append it */ if (lclTyp == TYP_STRUCT) { assert(stelemClsHnd != DUMMY_INIT(NULL)); op1->AsIndex()->gtStructElemClass = stelemClsHnd; op1->AsIndex()->gtIndElemSize = info.compCompHnd->getClassSize(stelemClsHnd); } if (varTypeIsStruct(op1)) { op1 = impAssignStruct(op1, op2, stelemClsHnd, (unsigned)CHECK_SPILL_ALL); } else { op2 = impImplicitR4orR8Cast(op2, op1->TypeGet()); op1 = gtNewAssignNode(op1, op2); } /* Mark the expression as containing an assignment */ op1->gtFlags |= GTF_ASG; goto SPILL_APPEND; case CEE_ADD: oper = GT_ADD; goto MATH_OP2; case CEE_ADD_OVF: uns = false; goto ADD_OVF; case CEE_ADD_OVF_UN: uns = true; goto ADD_OVF; ADD_OVF: ovfl = true; callNode = false; oper = GT_ADD; goto MATH_OP2_FLAGS; case CEE_SUB: oper = GT_SUB; goto MATH_OP2; case CEE_SUB_OVF: uns = false; goto SUB_OVF; case CEE_SUB_OVF_UN: uns = true; goto SUB_OVF; SUB_OVF: ovfl = true; callNode = false; oper = GT_SUB; goto MATH_OP2_FLAGS; case CEE_MUL: oper = GT_MUL; goto MATH_MAYBE_CALL_NO_OVF; case CEE_MUL_OVF: uns = false; goto MUL_OVF; case CEE_MUL_OVF_UN: uns = true; goto MUL_OVF; MUL_OVF: ovfl = true; oper = GT_MUL; goto MATH_MAYBE_CALL_OVF; // Other binary math operations case CEE_DIV: oper = GT_DIV; goto MATH_MAYBE_CALL_NO_OVF; case CEE_DIV_UN: oper = GT_UDIV; goto MATH_MAYBE_CALL_NO_OVF; case CEE_REM: oper = GT_MOD; goto MATH_MAYBE_CALL_NO_OVF; case CEE_REM_UN: oper = GT_UMOD; goto MATH_MAYBE_CALL_NO_OVF; MATH_MAYBE_CALL_NO_OVF: ovfl = false; MATH_MAYBE_CALL_OVF: // Morpher has some complex logic about when to turn different // typed nodes on different platforms into helper calls. We // need to either duplicate that logic here, or just // pessimistically make all the nodes large enough to become // call nodes. Since call nodes aren't that much larger and // these opcodes are infrequent enough I chose the latter. callNode = true; goto MATH_OP2_FLAGS; case CEE_AND: oper = GT_AND; goto MATH_OP2; case CEE_OR: oper = GT_OR; goto MATH_OP2; case CEE_XOR: oper = GT_XOR; goto MATH_OP2; MATH_OP2: // For default values of 'ovfl' and 'callNode' ovfl = false; callNode = false; MATH_OP2_FLAGS: // If 'ovfl' and 'callNode' have already been set /* Pull two values and push back the result */ op2 = impPopStack().val; op1 = impPopStack().val; /* Can't do arithmetic with references */ assertImp(genActualType(op1->TypeGet()) != TYP_REF && genActualType(op2->TypeGet()) != TYP_REF); // Change both to TYP_I_IMPL (impBashVarAddrsToI won't change if its a true byref, only // if it is in the stack) impBashVarAddrsToI(op1, op2); type = impGetByRefResultType(oper, uns, &op1, &op2); assert(!ovfl || !varTypeIsFloating(op1->gtType)); /* Special case: "int+0", "int-0", "int*1", "int/1" */ if (op2->gtOper == GT_CNS_INT) { if ((op2->IsIntegralConst(0) && (oper == GT_ADD || oper == GT_SUB)) || (op2->IsIntegralConst(1) && (oper == GT_MUL || oper == GT_DIV))) { impPushOnStack(op1, tiRetVal); break; } } // We can generate a TYP_FLOAT operation that has a TYP_DOUBLE operand // if (varTypeIsFloating(type) && varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType)) { if (op1->TypeGet() != type) { // We insert a cast of op1 to 'type' op1 = gtNewCastNode(type, op1, false, type); } if (op2->TypeGet() != type) { // We insert a cast of op2 to 'type' op2 = gtNewCastNode(type, op2, false, type); } } if (callNode) { /* These operators can later be transformed into 'GT_CALL' */ assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_MUL]); #ifndef TARGET_ARM assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_DIV]); assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_UDIV]); assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_MOD]); assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_UMOD]); #endif // It's tempting to use LargeOpOpcode() here, but this logic is *not* saying // that we'll need to transform into a general large node, but rather specifically // to a call: by doing it this way, things keep working if there are multiple sizes, // and a CALL is no longer the largest. // That said, as of now it *is* a large node, so we'll do this with an assert rather // than an "if". assert(GenTree::s_gtNodeSizes[GT_CALL] == TREE_NODE_SZ_LARGE); op1 = new (this, GT_CALL) GenTreeOp(oper, type, op1, op2 DEBUGARG(/*largeNode*/ true)); } else { op1 = gtNewOperNode(oper, type, op1, op2); } /* Special case: integer/long division may throw an exception */ if (varTypeIsIntegral(op1->TypeGet()) && op1->OperMayThrow(this)) { op1->gtFlags |= GTF_EXCEPT; } if (ovfl) { assert(oper == GT_ADD || oper == GT_SUB || oper == GT_MUL); if (ovflType != TYP_UNKNOWN) { op1->gtType = ovflType; } op1->gtFlags |= (GTF_EXCEPT | GTF_OVERFLOW); if (uns) { op1->gtFlags |= GTF_UNSIGNED; } } impPushOnStack(op1, tiRetVal); break; case CEE_SHL: oper = GT_LSH; goto CEE_SH_OP2; case CEE_SHR: oper = GT_RSH; goto CEE_SH_OP2; case CEE_SHR_UN: oper = GT_RSZ; goto CEE_SH_OP2; CEE_SH_OP2: op2 = impPopStack().val; op1 = impPopStack().val; // operand to be shifted impBashVarAddrsToI(op1, op2); type = genActualType(op1->TypeGet()); op1 = gtNewOperNode(oper, type, op1, op2); impPushOnStack(op1, tiRetVal); break; case CEE_NOT: op1 = impPopStack().val; impBashVarAddrsToI(op1, nullptr); type = genActualType(op1->TypeGet()); impPushOnStack(gtNewOperNode(GT_NOT, type, op1), tiRetVal); break; case CEE_CKFINITE: op1 = impPopStack().val; type = op1->TypeGet(); op1 = gtNewOperNode(GT_CKFINITE, type, op1); op1->gtFlags |= GTF_EXCEPT; impPushOnStack(op1, tiRetVal); break; case CEE_LEAVE: val = getI4LittleEndian(codeAddr); // jump distance jmpAddr = (IL_OFFSET)((codeAddr - info.compCode + sizeof(__int32)) + val); goto LEAVE; case CEE_LEAVE_S: val = getI1LittleEndian(codeAddr); // jump distance jmpAddr = (IL_OFFSET)((codeAddr - info.compCode + sizeof(__int8)) + val); LEAVE: if (compIsForInlining()) { compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_LEAVE); return; } JITDUMP(" %04X", jmpAddr); if (block->bbJumpKind != BBJ_LEAVE) { impResetLeaveBlock(block, jmpAddr); } assert(jmpAddr == block->bbJumpDest->bbCodeOffs); impImportLeave(block); impNoteBranchOffs(); break; case CEE_BR: case CEE_BR_S: jmpDist = (sz == 1) ? getI1LittleEndian(codeAddr) : getI4LittleEndian(codeAddr); if (compIsForInlining() && jmpDist == 0) { break; /* NOP */ } impNoteBranchOffs(); break; case CEE_BRTRUE: case CEE_BRTRUE_S: case CEE_BRFALSE: case CEE_BRFALSE_S: /* Pop the comparand (now there's a neat term) from the stack */ op1 = impPopStack().val; type = op1->TypeGet(); // Per Ecma-355, brfalse and brtrue are only specified for nint, ref, and byref. // // We've historically been a bit more permissive, so here we allow // any type that gtNewZeroConNode can handle. if (!varTypeIsArithmetic(type) && !varTypeIsGC(type)) { BADCODE("invalid type for brtrue/brfalse"); } if (opts.OptimizationEnabled() && (block->bbJumpDest == block->bbNext)) { block->bbJumpKind = BBJ_NONE; if (op1->gtFlags & GTF_GLOB_EFFECT) { op1 = gtUnusedValNode(op1); goto SPILL_APPEND; } else { break; } } if (op1->OperIsCompare()) { if (opcode == CEE_BRFALSE || opcode == CEE_BRFALSE_S) { // Flip the sense of the compare op1 = gtReverseCond(op1); } } else { // We'll compare against an equally-sized integer 0 // For small types, we always compare against int op2 = gtNewZeroConNode(genActualType(op1->gtType)); // Create the comparison operator and try to fold it oper = (opcode == CEE_BRTRUE || opcode == CEE_BRTRUE_S) ? GT_NE : GT_EQ; op1 = gtNewOperNode(oper, TYP_INT, op1, op2); } // fall through COND_JUMP: /* Fold comparison if we can */ op1 = gtFoldExpr(op1); /* Try to fold the really simple cases like 'iconst *, ifne/ifeq'*/ /* Don't make any blocks unreachable in import only mode */ if ((op1->gtOper == GT_CNS_INT) && !compIsForImportOnly()) { /* gtFoldExpr() should prevent this as we don't want to make any blocks unreachable under compDbgCode */ assert(!opts.compDbgCode); BBjumpKinds foldedJumpKind = (BBjumpKinds)(op1->AsIntCon()->gtIconVal ? BBJ_ALWAYS : BBJ_NONE); assertImp((block->bbJumpKind == BBJ_COND) // normal case || (block->bbJumpKind == foldedJumpKind)); // this can happen if we are reimporting the // block for the second time block->bbJumpKind = foldedJumpKind; #ifdef DEBUG if (verbose) { if (op1->AsIntCon()->gtIconVal) { printf("\nThe conditional jump becomes an unconditional jump to " FMT_BB "\n", block->bbJumpDest->bbNum); } else { printf("\nThe block falls through into the next " FMT_BB "\n", block->bbNext->bbNum); } } #endif break; } op1 = gtNewOperNode(GT_JTRUE, TYP_VOID, op1); /* GT_JTRUE is handled specially for non-empty stacks. See 'addStmt' in impImportBlock(block). For correct line numbers, spill stack. */ if (opts.compDbgCode && impCurStmtDI.IsValid()) { impSpillStackEnsure(true); } goto SPILL_APPEND; case CEE_CEQ: oper = GT_EQ; uns = false; goto CMP_2_OPs; case CEE_CGT_UN: oper = GT_GT; uns = true; goto CMP_2_OPs; case CEE_CGT: oper = GT_GT; uns = false; goto CMP_2_OPs; case CEE_CLT_UN: oper = GT_LT; uns = true; goto CMP_2_OPs; case CEE_CLT: oper = GT_LT; uns = false; goto CMP_2_OPs; CMP_2_OPs: op2 = impPopStack().val; op1 = impPopStack().val; // Recognize the IL idiom of CGT_UN(op1, 0) and normalize // it so that downstream optimizations don't have to. if ((opcode == CEE_CGT_UN) && op2->IsIntegralConst(0)) { oper = GT_NE; uns = false; } #ifdef TARGET_64BIT // TODO-Casts: create a helper that upcasts int32 -> native int when necessary. // See also identical code in impGetByRefResultType and STSFLD import. if (varTypeIsI(op1) && (genActualType(op2) == TYP_INT)) { op2 = gtNewCastNode(TYP_I_IMPL, op2, uns, TYP_I_IMPL); } else if (varTypeIsI(op2) && (genActualType(op1) == TYP_INT)) { op1 = gtNewCastNode(TYP_I_IMPL, op1, uns, TYP_I_IMPL); } #endif // TARGET_64BIT assertImp(genActualType(op1) == genActualType(op2) || (varTypeIsI(op1) && varTypeIsI(op2)) || (varTypeIsFloating(op1) && varTypeIsFloating(op2))); // Create the comparison node. op1 = gtNewOperNode(oper, TYP_INT, op1, op2); // TODO: setting both flags when only one is appropriate. if (uns) { op1->gtFlags |= GTF_RELOP_NAN_UN | GTF_UNSIGNED; } // Fold result, if possible. op1 = gtFoldExpr(op1); impPushOnStack(op1, tiRetVal); break; case CEE_BEQ_S: case CEE_BEQ: oper = GT_EQ; goto CMP_2_OPs_AND_BR; case CEE_BGE_S: case CEE_BGE: oper = GT_GE; goto CMP_2_OPs_AND_BR; case CEE_BGE_UN_S: case CEE_BGE_UN: oper = GT_GE; goto CMP_2_OPs_AND_BR_UN; case CEE_BGT_S: case CEE_BGT: oper = GT_GT; goto CMP_2_OPs_AND_BR; case CEE_BGT_UN_S: case CEE_BGT_UN: oper = GT_GT; goto CMP_2_OPs_AND_BR_UN; case CEE_BLE_S: case CEE_BLE: oper = GT_LE; goto CMP_2_OPs_AND_BR; case CEE_BLE_UN_S: case CEE_BLE_UN: oper = GT_LE; goto CMP_2_OPs_AND_BR_UN; case CEE_BLT_S: case CEE_BLT: oper = GT_LT; goto CMP_2_OPs_AND_BR; case CEE_BLT_UN_S: case CEE_BLT_UN: oper = GT_LT; goto CMP_2_OPs_AND_BR_UN; case CEE_BNE_UN_S: case CEE_BNE_UN: oper = GT_NE; goto CMP_2_OPs_AND_BR_UN; CMP_2_OPs_AND_BR_UN: uns = true; unordered = true; goto CMP_2_OPs_AND_BR_ALL; CMP_2_OPs_AND_BR: uns = false; unordered = false; goto CMP_2_OPs_AND_BR_ALL; CMP_2_OPs_AND_BR_ALL: /* Pull two values */ op2 = impPopStack().val; op1 = impPopStack().val; #ifdef TARGET_64BIT if ((op1->TypeGet() == TYP_I_IMPL) && (genActualType(op2->TypeGet()) == TYP_INT)) { op2 = gtNewCastNode(TYP_I_IMPL, op2, uns, uns ? TYP_U_IMPL : TYP_I_IMPL); } else if ((op2->TypeGet() == TYP_I_IMPL) && (genActualType(op1->TypeGet()) == TYP_INT)) { op1 = gtNewCastNode(TYP_I_IMPL, op1, uns, uns ? TYP_U_IMPL : TYP_I_IMPL); } #endif // TARGET_64BIT assertImp(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) || (varTypeIsI(op1->TypeGet()) && varTypeIsI(op2->TypeGet())) || (varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType))); if (opts.OptimizationEnabled() && (block->bbJumpDest == block->bbNext)) { block->bbJumpKind = BBJ_NONE; if (op1->gtFlags & GTF_GLOB_EFFECT) { impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG( "Branch to next Optimization, op1 side effect")); impAppendTree(gtUnusedValNode(op1), (unsigned)CHECK_SPILL_NONE, impCurStmtDI); } if (op2->gtFlags & GTF_GLOB_EFFECT) { impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG( "Branch to next Optimization, op2 side effect")); impAppendTree(gtUnusedValNode(op2), (unsigned)CHECK_SPILL_NONE, impCurStmtDI); } #ifdef DEBUG if ((op1->gtFlags | op2->gtFlags) & GTF_GLOB_EFFECT) { impNoteLastILoffs(); } #endif break; } // We can generate an compare of different sized floating point op1 and op2 // We insert a cast // if (varTypeIsFloating(op1->TypeGet())) { if (op1->TypeGet() != op2->TypeGet()) { assert(varTypeIsFloating(op2->TypeGet())); // say op1=double, op2=float. To avoid loss of precision // while comparing, op2 is converted to double and double // comparison is done. if (op1->TypeGet() == TYP_DOUBLE) { // We insert a cast of op2 to TYP_DOUBLE op2 = gtNewCastNode(TYP_DOUBLE, op2, false, TYP_DOUBLE); } else if (op2->TypeGet() == TYP_DOUBLE) { // We insert a cast of op1 to TYP_DOUBLE op1 = gtNewCastNode(TYP_DOUBLE, op1, false, TYP_DOUBLE); } } } /* Create and append the operator */ op1 = gtNewOperNode(oper, TYP_INT, op1, op2); if (uns) { op1->gtFlags |= GTF_UNSIGNED; } if (unordered) { op1->gtFlags |= GTF_RELOP_NAN_UN; } goto COND_JUMP; case CEE_SWITCH: /* Pop the switch value off the stack */ op1 = impPopStack().val; assertImp(genActualTypeIsIntOrI(op1->TypeGet())); /* We can create a switch node */ op1 = gtNewOperNode(GT_SWITCH, TYP_VOID, op1); val = (int)getU4LittleEndian(codeAddr); codeAddr += 4 + val * 4; // skip over the switch-table goto SPILL_APPEND; /************************** Casting OPCODES ***************************/ case CEE_CONV_OVF_I1: lclTyp = TYP_BYTE; goto CONV_OVF; case CEE_CONV_OVF_I2: lclTyp = TYP_SHORT; goto CONV_OVF; case CEE_CONV_OVF_I: lclTyp = TYP_I_IMPL; goto CONV_OVF; case CEE_CONV_OVF_I4: lclTyp = TYP_INT; goto CONV_OVF; case CEE_CONV_OVF_I8: lclTyp = TYP_LONG; goto CONV_OVF; case CEE_CONV_OVF_U1: lclTyp = TYP_UBYTE; goto CONV_OVF; case CEE_CONV_OVF_U2: lclTyp = TYP_USHORT; goto CONV_OVF; case CEE_CONV_OVF_U: lclTyp = TYP_U_IMPL; goto CONV_OVF; case CEE_CONV_OVF_U4: lclTyp = TYP_UINT; goto CONV_OVF; case CEE_CONV_OVF_U8: lclTyp = TYP_ULONG; goto CONV_OVF; case CEE_CONV_OVF_I1_UN: lclTyp = TYP_BYTE; goto CONV_OVF_UN; case CEE_CONV_OVF_I2_UN: lclTyp = TYP_SHORT; goto CONV_OVF_UN; case CEE_CONV_OVF_I_UN: lclTyp = TYP_I_IMPL; goto CONV_OVF_UN; case CEE_CONV_OVF_I4_UN: lclTyp = TYP_INT; goto CONV_OVF_UN; case CEE_CONV_OVF_I8_UN: lclTyp = TYP_LONG; goto CONV_OVF_UN; case CEE_CONV_OVF_U1_UN: lclTyp = TYP_UBYTE; goto CONV_OVF_UN; case CEE_CONV_OVF_U2_UN: lclTyp = TYP_USHORT; goto CONV_OVF_UN; case CEE_CONV_OVF_U_UN: lclTyp = TYP_U_IMPL; goto CONV_OVF_UN; case CEE_CONV_OVF_U4_UN: lclTyp = TYP_UINT; goto CONV_OVF_UN; case CEE_CONV_OVF_U8_UN: lclTyp = TYP_ULONG; goto CONV_OVF_UN; CONV_OVF_UN: uns = true; goto CONV_OVF_COMMON; CONV_OVF: uns = false; goto CONV_OVF_COMMON; CONV_OVF_COMMON: ovfl = true; goto _CONV; case CEE_CONV_I1: lclTyp = TYP_BYTE; goto CONV; case CEE_CONV_I2: lclTyp = TYP_SHORT; goto CONV; case CEE_CONV_I: lclTyp = TYP_I_IMPL; goto CONV; case CEE_CONV_I4: lclTyp = TYP_INT; goto CONV; case CEE_CONV_I8: lclTyp = TYP_LONG; goto CONV; case CEE_CONV_U1: lclTyp = TYP_UBYTE; goto CONV; case CEE_CONV_U2: lclTyp = TYP_USHORT; goto CONV; #if (REGSIZE_BYTES == 8) case CEE_CONV_U: lclTyp = TYP_U_IMPL; goto CONV_UN; #else case CEE_CONV_U: lclTyp = TYP_U_IMPL; goto CONV; #endif case CEE_CONV_U4: lclTyp = TYP_UINT; goto CONV; case CEE_CONV_U8: lclTyp = TYP_ULONG; goto CONV_UN; case CEE_CONV_R4: lclTyp = TYP_FLOAT; goto CONV; case CEE_CONV_R8: lclTyp = TYP_DOUBLE; goto CONV; case CEE_CONV_R_UN: lclTyp = TYP_DOUBLE; goto CONV_UN; CONV_UN: uns = true; ovfl = false; goto _CONV; CONV: uns = false; ovfl = false; goto _CONV; _CONV: // only converts from FLOAT or DOUBLE to an integer type // and converts from ULONG (or LONG on ARM) to DOUBLE are morphed to calls if (varTypeIsFloating(lclTyp)) { callNode = varTypeIsLong(impStackTop().val) || uns // uint->dbl gets turned into uint->long->dbl #ifdef TARGET_64BIT // TODO-ARM64-Bug?: This was AMD64; I enabled it for ARM64 also. OK? // TYP_BYREF could be used as TYP_I_IMPL which is long. // TODO-CQ: remove this when we lower casts long/ulong --> float/double // and generate SSE2 code instead of going through helper calls. || (impStackTop().val->TypeGet() == TYP_BYREF) #endif ; } else { callNode = varTypeIsFloating(impStackTop().val->TypeGet()); } op1 = impPopStack().val; impBashVarAddrsToI(op1); // Casts from floating point types must not have GTF_UNSIGNED set. if (varTypeIsFloating(op1)) { uns = false; } // At this point uns, ovf, callNode are all set. if (varTypeIsSmall(lclTyp) && !ovfl && op1->gtType == TYP_INT && op1->gtOper == GT_AND) { op2 = op1->AsOp()->gtOp2; if (op2->gtOper == GT_CNS_INT) { ssize_t ival = op2->AsIntCon()->gtIconVal; ssize_t mask, umask; switch (lclTyp) { case TYP_BYTE: case TYP_UBYTE: mask = 0x00FF; umask = 0x007F; break; case TYP_USHORT: case TYP_SHORT: mask = 0xFFFF; umask = 0x7FFF; break; default: assert(!"unexpected type"); return; } if (((ival & umask) == ival) || ((ival & mask) == ival && uns)) { /* Toss the cast, it's a waste of time */ impPushOnStack(op1, tiRetVal); break; } else if (ival == mask) { /* Toss the masking, it's a waste of time, since we sign-extend from the small value anyways */ op1 = op1->AsOp()->gtOp1; } } } /* The 'op2' sub-operand of a cast is the 'real' type number, since the result of a cast to one of the 'small' integer types is an integer. */ type = genActualType(lclTyp); // If this is a no-op cast, just use op1. if (!ovfl && (type == op1->TypeGet()) && (genTypeSize(type) == genTypeSize(lclTyp))) { // Nothing needs to change } // Work is evidently required, add cast node else { if (callNode) { op1 = gtNewCastNodeL(type, op1, uns, lclTyp); } else { op1 = gtNewCastNode(type, op1, uns, lclTyp); } if (ovfl) { op1->gtFlags |= (GTF_OVERFLOW | GTF_EXCEPT); } if (op1->gtGetOp1()->OperIsConst() && opts.OptimizationEnabled()) { // Try and fold the introduced cast op1 = gtFoldExprConst(op1); } } impPushOnStack(op1, tiRetVal); break; case CEE_NEG: op1 = impPopStack().val; impBashVarAddrsToI(op1, nullptr); impPushOnStack(gtNewOperNode(GT_NEG, genActualType(op1->gtType), op1), tiRetVal); break; case CEE_POP: { /* Pull the top value from the stack */ StackEntry se = impPopStack(); clsHnd = se.seTypeInfo.GetClassHandle(); op1 = se.val; /* Get hold of the type of the value being duplicated */ lclTyp = genActualType(op1->gtType); /* Does the value have any side effects? */ if ((op1->gtFlags & GTF_SIDE_EFFECT) || opts.compDbgCode) { // Since we are throwing away the value, just normalize // it to its address. This is more efficient. if (varTypeIsStruct(op1)) { JITDUMP("\n ... CEE_POP struct ...\n"); DISPTREE(op1); #ifdef UNIX_AMD64_ABI // Non-calls, such as obj or ret_expr, have to go through this. // Calls with large struct return value have to go through this. // Helper calls with small struct return value also have to go // through this since they do not follow Unix calling convention. if (op1->gtOper != GT_CALL || !IsMultiRegReturnedType(clsHnd, op1->AsCall()->GetUnmanagedCallConv()) || op1->AsCall()->gtCallType == CT_HELPER) #endif // UNIX_AMD64_ABI { // If the value being produced comes from loading // via an underlying address, just null check the address. if (op1->OperIs(GT_FIELD, GT_IND, GT_OBJ)) { gtChangeOperToNullCheck(op1, block); } else { op1 = impGetStructAddr(op1, clsHnd, (unsigned)CHECK_SPILL_ALL, false); } JITDUMP("\n ... optimized to ...\n"); DISPTREE(op1); } } // If op1 is non-overflow cast, throw it away since it is useless. // Another reason for throwing away the useless cast is in the context of // implicit tail calls when the operand of pop is GT_CAST(GT_CALL(..)). // The cast gets added as part of importing GT_CALL, which gets in the way // of fgMorphCall() on the forms of tail call nodes that we assert. if ((op1->gtOper == GT_CAST) && !op1->gtOverflow()) { op1 = op1->AsOp()->gtOp1; } if (op1->gtOper != GT_CALL) { if ((op1->gtFlags & GTF_SIDE_EFFECT) != 0) { op1 = gtUnusedValNode(op1); } else { // Can't bash to NOP here because op1 can be referenced from `currentBlock->bbEntryState`, // if we ever need to reimport we need a valid LCL_VAR on it. op1 = gtNewNothingNode(); } } /* Append the value to the tree list */ goto SPILL_APPEND; } /* No side effects - just throw the <BEEP> thing away */ } break; case CEE_DUP: { StackEntry se = impPopStack(); GenTree* tree = se.val; tiRetVal = se.seTypeInfo; op1 = tree; // If the expression to dup is simple, just clone it. // Otherwise spill it to a temp, and reload the temp twice. bool cloneExpr = false; if (!opts.compDbgCode) { // Duplicate 0 and +0.0 if (op1->IsIntegralConst(0) || op1->IsFloatPositiveZero()) { cloneExpr = true; } // Duplicate locals and addresses of them else if (op1->IsLocal()) { cloneExpr = true; } else if (op1->TypeIs(TYP_BYREF) && op1->OperIs(GT_ADDR) && op1->gtGetOp1()->IsLocal() && (OPCODE)impGetNonPrefixOpcode(codeAddr + sz, codeEndp) != CEE_INITOBJ) { cloneExpr = true; } } else { // Always clone for debug mode cloneExpr = true; } if (!cloneExpr) { const unsigned tmpNum = lvaGrabTemp(true DEBUGARG("dup spill")); impAssignTempGen(tmpNum, op1, tiRetVal.GetClassHandle(), (unsigned)CHECK_SPILL_ALL); var_types type = genActualType(lvaTable[tmpNum].TypeGet()); op1 = gtNewLclvNode(tmpNum, type); // Propagate type info to the temp from the stack and the original tree if (type == TYP_REF) { assert(lvaTable[tmpNum].lvSingleDef == 0); lvaTable[tmpNum].lvSingleDef = 1; JITDUMP("Marked V%02u as a single def local\n", tmpNum); lvaSetClass(tmpNum, tree, tiRetVal.GetClassHandle()); } } op1 = impCloneExpr(op1, &op2, tiRetVal.GetClassHandle(), (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("DUP instruction")); assert(!(op1->gtFlags & GTF_GLOB_EFFECT) && !(op2->gtFlags & GTF_GLOB_EFFECT)); impPushOnStack(op1, tiRetVal); impPushOnStack(op2, tiRetVal); } break; case CEE_STIND_I1: lclTyp = TYP_BYTE; goto STIND; case CEE_STIND_I2: lclTyp = TYP_SHORT; goto STIND; case CEE_STIND_I4: lclTyp = TYP_INT; goto STIND; case CEE_STIND_I8: lclTyp = TYP_LONG; goto STIND; case CEE_STIND_I: lclTyp = TYP_I_IMPL; goto STIND; case CEE_STIND_REF: lclTyp = TYP_REF; goto STIND; case CEE_STIND_R4: lclTyp = TYP_FLOAT; goto STIND; case CEE_STIND_R8: lclTyp = TYP_DOUBLE; goto STIND; STIND: op2 = impPopStack().val; // value to store op1 = impPopStack().val; // address to store to // you can indirect off of a TYP_I_IMPL (if we are in C) or a BYREF assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF); impBashVarAddrsToI(op1, op2); op2 = impImplicitR4orR8Cast(op2, lclTyp); #ifdef TARGET_64BIT // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL if ((op2->OperGet() == GT_CNS_INT) && varTypeIsI(lclTyp) && !varTypeIsI(op2->gtType)) { op2->gtType = TYP_I_IMPL; } else { // Allow a downcast of op2 from TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity // if (varTypeIsI(op2->gtType) && (genActualType(lclTyp) == TYP_INT)) { op2 = gtNewCastNode(TYP_INT, op2, false, TYP_INT); } // Allow an upcast of op2 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity // if (varTypeIsI(lclTyp) && (genActualType(op2->gtType) == TYP_INT)) { op2 = gtNewCastNode(TYP_I_IMPL, op2, false, TYP_I_IMPL); } } #endif // TARGET_64BIT if (opcode == CEE_STIND_REF) { // STIND_REF can be used to store TYP_INT, TYP_I_IMPL, TYP_REF, or TYP_BYREF assertImp(varTypeIsIntOrI(op2->gtType) || varTypeIsGC(op2->gtType)); lclTyp = genActualType(op2->TypeGet()); } // Check target type. #ifdef DEBUG if (op2->gtType == TYP_BYREF || lclTyp == TYP_BYREF) { if (op2->gtType == TYP_BYREF) { assertImp(lclTyp == TYP_BYREF || lclTyp == TYP_I_IMPL); } else if (lclTyp == TYP_BYREF) { assertImp(op2->gtType == TYP_BYREF || varTypeIsIntOrI(op2->gtType)); } } else { assertImp(genActualType(op2->gtType) == genActualType(lclTyp) || ((lclTyp == TYP_I_IMPL) && (genActualType(op2->gtType) == TYP_INT)) || (varTypeIsFloating(op2->gtType) && varTypeIsFloating(lclTyp))); } #endif op1 = gtNewOperNode(GT_IND, lclTyp, op1); // stind could point anywhere, example a boxed class static int op1->gtFlags |= GTF_IND_TGTANYWHERE; if (prefixFlags & PREFIX_VOLATILE) { assert(op1->OperGet() == GT_IND); op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered op1->gtFlags |= GTF_IND_VOLATILE; } if ((prefixFlags & PREFIX_UNALIGNED) && !varTypeIsByte(lclTyp)) { assert(op1->OperGet() == GT_IND); op1->gtFlags |= GTF_IND_UNALIGNED; } op1 = gtNewAssignNode(op1, op2); op1->gtFlags |= GTF_EXCEPT | GTF_GLOB_REF; // Spill side-effects AND global-data-accesses if (verCurrentState.esStackDepth > 0) { impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("spill side effects before STIND")); } goto APPEND; case CEE_LDIND_I1: lclTyp = TYP_BYTE; goto LDIND; case CEE_LDIND_I2: lclTyp = TYP_SHORT; goto LDIND; case CEE_LDIND_U4: case CEE_LDIND_I4: lclTyp = TYP_INT; goto LDIND; case CEE_LDIND_I8: lclTyp = TYP_LONG; goto LDIND; case CEE_LDIND_REF: lclTyp = TYP_REF; goto LDIND; case CEE_LDIND_I: lclTyp = TYP_I_IMPL; goto LDIND; case CEE_LDIND_R4: lclTyp = TYP_FLOAT; goto LDIND; case CEE_LDIND_R8: lclTyp = TYP_DOUBLE; goto LDIND; case CEE_LDIND_U1: lclTyp = TYP_UBYTE; goto LDIND; case CEE_LDIND_U2: lclTyp = TYP_USHORT; goto LDIND; LDIND: op1 = impPopStack().val; // address to load from impBashVarAddrsToI(op1); #ifdef TARGET_64BIT // Allow an upcast of op1 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity // if (genActualType(op1->gtType) == TYP_INT) { op1 = gtNewCastNode(TYP_I_IMPL, op1, false, TYP_I_IMPL); } #endif assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF); op1 = gtNewOperNode(GT_IND, lclTyp, op1); // ldind could point anywhere, example a boxed class static int op1->gtFlags |= (GTF_EXCEPT | GTF_GLOB_REF | GTF_IND_TGTANYWHERE); if (prefixFlags & PREFIX_VOLATILE) { assert(op1->OperGet() == GT_IND); op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered op1->gtFlags |= GTF_IND_VOLATILE; } if ((prefixFlags & PREFIX_UNALIGNED) && !varTypeIsByte(lclTyp)) { assert(op1->OperGet() == GT_IND); op1->gtFlags |= GTF_IND_UNALIGNED; } impPushOnStack(op1, tiRetVal); break; case CEE_UNALIGNED: assert(sz == 1); val = getU1LittleEndian(codeAddr); ++codeAddr; JITDUMP(" %u", val); if ((val != 1) && (val != 2) && (val != 4)) { BADCODE("Alignment unaligned. must be 1, 2, or 4"); } Verify(!(prefixFlags & PREFIX_UNALIGNED), "Multiple unaligned. prefixes"); prefixFlags |= PREFIX_UNALIGNED; impValidateMemoryAccessOpcode(codeAddr, codeEndp, false); PREFIX: opcode = (OPCODE)getU1LittleEndian(codeAddr); opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode); codeAddr += sizeof(__int8); goto DECODE_OPCODE; case CEE_VOLATILE: Verify(!(prefixFlags & PREFIX_VOLATILE), "Multiple volatile. prefixes"); prefixFlags |= PREFIX_VOLATILE; impValidateMemoryAccessOpcode(codeAddr, codeEndp, true); assert(sz == 0); goto PREFIX; case CEE_LDFTN: { // Need to do a lookup here so that we perform an access check // and do a NOWAY if protections are violated _impResolveToken(CORINFO_TOKENKIND_Method); JITDUMP(" %08X", resolvedToken.token); eeGetCallInfo(&resolvedToken, (prefixFlags & PREFIX_CONSTRAINED) ? &constrainedResolvedToken : nullptr, combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_LDFTN), &callInfo); // This check really only applies to intrinsic Array.Address methods if (callInfo.sig.callConv & CORINFO_CALLCONV_PARAMTYPE) { NO_WAY("Currently do not support LDFTN of Parameterized functions"); } // Do this before DO_LDFTN since CEE_LDVIRTFN does it on its own. impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper); DO_LDFTN: op1 = impMethodPointer(&resolvedToken, &callInfo); if (compDonotInline()) { return; } // Call info may have more precise information about the function than // the resolved token. CORINFO_RESOLVED_TOKEN* heapToken = impAllocateToken(resolvedToken); assert(callInfo.hMethod != nullptr); heapToken->hMethod = callInfo.hMethod; impPushOnStack(op1, typeInfo(heapToken)); break; } case CEE_LDVIRTFTN: { /* Get the method token */ _impResolveToken(CORINFO_TOKENKIND_Method); JITDUMP(" %08X", resolvedToken.token); eeGetCallInfo(&resolvedToken, nullptr /* constraint typeRef */, combine(combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_LDFTN), CORINFO_CALLINFO_CALLVIRT), &callInfo); // This check really only applies to intrinsic Array.Address methods if (callInfo.sig.callConv & CORINFO_CALLCONV_PARAMTYPE) { NO_WAY("Currently do not support LDFTN of Parameterized functions"); } mflags = callInfo.methodFlags; impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper); if (compIsForInlining()) { if (mflags & (CORINFO_FLG_FINAL | CORINFO_FLG_STATIC) || !(mflags & CORINFO_FLG_VIRTUAL)) { compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDVIRTFN_ON_NON_VIRTUAL); return; } } CORINFO_SIG_INFO& ftnSig = callInfo.sig; /* Get the object-ref */ op1 = impPopStack().val; assertImp(op1->gtType == TYP_REF); if (opts.IsReadyToRun()) { if (callInfo.kind != CORINFO_VIRTUALCALL_LDVIRTFTN) { if (op1->gtFlags & GTF_SIDE_EFFECT) { op1 = gtUnusedValNode(op1); impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtDI); } goto DO_LDFTN; } } else if (mflags & (CORINFO_FLG_FINAL | CORINFO_FLG_STATIC) || !(mflags & CORINFO_FLG_VIRTUAL)) { if (op1->gtFlags & GTF_SIDE_EFFECT) { op1 = gtUnusedValNode(op1); impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtDI); } goto DO_LDFTN; } GenTree* fptr = impImportLdvirtftn(op1, &resolvedToken, &callInfo); if (compDonotInline()) { return; } CORINFO_RESOLVED_TOKEN* heapToken = impAllocateToken(resolvedToken); assert(heapToken->tokenType == CORINFO_TOKENKIND_Method); assert(callInfo.hMethod != nullptr); heapToken->tokenType = CORINFO_TOKENKIND_Ldvirtftn; heapToken->hMethod = callInfo.hMethod; impPushOnStack(fptr, typeInfo(heapToken)); break; } case CEE_CONSTRAINED: assertImp(sz == sizeof(unsigned)); impResolveToken(codeAddr, &constrainedResolvedToken, CORINFO_TOKENKIND_Constrained); codeAddr += sizeof(unsigned); // prefix instructions must increment codeAddr manually JITDUMP(" (%08X) ", constrainedResolvedToken.token); Verify(!(prefixFlags & PREFIX_CONSTRAINED), "Multiple constrained. prefixes"); prefixFlags |= PREFIX_CONSTRAINED; { OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp); if (actualOpcode != CEE_CALLVIRT && actualOpcode != CEE_CALL && actualOpcode != CEE_LDFTN) { BADCODE("constrained. has to be followed by callvirt, call or ldftn"); } } goto PREFIX; case CEE_READONLY: JITDUMP(" readonly."); Verify(!(prefixFlags & PREFIX_READONLY), "Multiple readonly. prefixes"); prefixFlags |= PREFIX_READONLY; { OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp); if (actualOpcode != CEE_LDELEMA && !impOpcodeIsCallOpcode(actualOpcode)) { BADCODE("readonly. has to be followed by ldelema or call"); } } assert(sz == 0); goto PREFIX; case CEE_TAILCALL: JITDUMP(" tail."); Verify(!(prefixFlags & PREFIX_TAILCALL_EXPLICIT), "Multiple tailcall. prefixes"); prefixFlags |= PREFIX_TAILCALL_EXPLICIT; { OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp); if (!impOpcodeIsCallOpcode(actualOpcode)) { BADCODE("tailcall. has to be followed by call, callvirt or calli"); } } assert(sz == 0); goto PREFIX; case CEE_NEWOBJ: /* Since we will implicitly insert newObjThisPtr at the start of the argument list, spill any GTF_ORDER_SIDEEFF */ impSpillSpecialSideEff(); /* NEWOBJ does not respond to TAIL */ prefixFlags &= ~PREFIX_TAILCALL_EXPLICIT; /* NEWOBJ does not respond to CONSTRAINED */ prefixFlags &= ~PREFIX_CONSTRAINED; _impResolveToken(CORINFO_TOKENKIND_NewObj); eeGetCallInfo(&resolvedToken, nullptr /* constraint typeRef*/, combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_ALLOWINSTPARAM), &callInfo); mflags = callInfo.methodFlags; if ((mflags & (CORINFO_FLG_STATIC | CORINFO_FLG_ABSTRACT)) != 0) { BADCODE("newobj on static or abstract method"); } // Insert the security callout before any actual code is generated impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper); // There are three different cases for new // Object size is variable (depends on arguments) // 1) Object is an array (arrays treated specially by the EE) // 2) Object is some other variable sized object (e.g. String) // 3) Class Size can be determined beforehand (normal case) // In the first case, we need to call a NEWOBJ helper (multinewarray) // in the second case we call the constructor with a '0' this pointer // In the third case we alloc the memory, then call the constuctor clsFlags = callInfo.classFlags; if (clsFlags & CORINFO_FLG_ARRAY) { // Arrays need to call the NEWOBJ helper. assertImp(clsFlags & CORINFO_FLG_VAROBJSIZE); impImportNewObjArray(&resolvedToken, &callInfo); if (compDonotInline()) { return; } callTyp = TYP_REF; break; } // At present this can only be String else if (clsFlags & CORINFO_FLG_VAROBJSIZE) { // Skip this thisPtr argument newObjThisPtr = nullptr; /* Remember that this basic block contains 'new' of an object */ block->bbFlags |= BBF_HAS_NEWOBJ; optMethodFlags |= OMF_HAS_NEWOBJ; } else { // This is the normal case where the size of the object is // fixed. Allocate the memory and call the constructor. // Note: We cannot add a peep to avoid use of temp here // becase we don't have enough interference info to detect when // sources and destination interfere, example: s = new S(ref); // TODO: We find the correct place to introduce a general // reverse copy prop for struct return values from newobj or // any function returning structs. /* get a temporary for the new object */ lclNum = lvaGrabTemp(true DEBUGARG("NewObj constructor temp")); if (compDonotInline()) { // Fail fast if lvaGrabTemp fails with CALLSITE_TOO_MANY_LOCALS. assert(compInlineResult->GetObservation() == InlineObservation::CALLSITE_TOO_MANY_LOCALS); return; } // In the value class case we only need clsHnd for size calcs. // // The lookup of the code pointer will be handled by CALL in this case if (clsFlags & CORINFO_FLG_VALUECLASS) { if (compIsForInlining()) { // If value class has GC fields, inform the inliner. It may choose to // bail out on the inline. DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass); if ((typeFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0) { compInlineResult->Note(InlineObservation::CALLEE_HAS_GC_STRUCT); if (compInlineResult->IsFailure()) { return; } // Do further notification in the case where the call site is rare; // some policies do not track the relative hotness of call sites for // "always" inline cases. if (impInlineInfo->iciBlock->isRunRarely()) { compInlineResult->Note(InlineObservation::CALLSITE_RARE_GC_STRUCT); if (compInlineResult->IsFailure()) { return; } } } } CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass); if (impIsPrimitive(jitTyp)) { lvaTable[lclNum].lvType = JITtype2varType(jitTyp); } else { // The local variable itself is the allocated space. // Here we need unsafe value cls check, since the address of struct is taken for further use // and potentially exploitable. lvaSetStruct(lclNum, resolvedToken.hClass, true /* unsafe value cls check */); } bool bbInALoop = impBlockIsInALoop(block); bool bbIsReturn = (block->bbJumpKind == BBJ_RETURN) && (!compIsForInlining() || (impInlineInfo->iciBlock->bbJumpKind == BBJ_RETURN)); LclVarDsc* const lclDsc = lvaGetDesc(lclNum); if (fgVarNeedsExplicitZeroInit(lclNum, bbInALoop, bbIsReturn)) { // Append a tree to zero-out the temp newObjThisPtr = gtNewLclvNode(lclNum, lclDsc->TypeGet()); newObjThisPtr = gtNewBlkOpNode(newObjThisPtr, // Dest gtNewIconNode(0), // Value false, // isVolatile false); // not copyBlock impAppendTree(newObjThisPtr, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); } else { JITDUMP("\nSuppressing zero-init for V%02u -- expect to zero in prolog\n", lclNum); lclDsc->lvSuppressedZeroInit = 1; compSuppressedZeroInit = true; } // Obtain the address of the temp newObjThisPtr = gtNewOperNode(GT_ADDR, TYP_BYREF, gtNewLclvNode(lclNum, lvaTable[lclNum].TypeGet())); } else { // If we're newing up a finalizable object, spill anything that can cause exceptions. // bool hasSideEffects = false; CorInfoHelpFunc newHelper = info.compCompHnd->getNewHelper(&resolvedToken, info.compMethodHnd, &hasSideEffects); if (hasSideEffects) { JITDUMP("\nSpilling stack for finalizable newobj\n"); impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("finalizable newobj spill")); } const bool useParent = true; op1 = gtNewAllocObjNode(&resolvedToken, useParent); if (op1 == nullptr) { return; } // Remember that this basic block contains 'new' of an object block->bbFlags |= BBF_HAS_NEWOBJ; optMethodFlags |= OMF_HAS_NEWOBJ; // Append the assignment to the temp/local. Dont need to spill // at all as we are just calling an EE-Jit helper which can only // cause an (async) OutOfMemoryException. // We assign the newly allocated object (by a GT_ALLOCOBJ node) // to a temp. Note that the pattern "temp = allocObj" is required // by ObjectAllocator phase to be able to determine GT_ALLOCOBJ nodes // without exhaustive walk over all expressions. impAssignTempGen(lclNum, op1, (unsigned)CHECK_SPILL_NONE); assert(lvaTable[lclNum].lvSingleDef == 0); lvaTable[lclNum].lvSingleDef = 1; JITDUMP("Marked V%02u as a single def local\n", lclNum); lvaSetClass(lclNum, resolvedToken.hClass, true /* is Exact */); newObjThisPtr = gtNewLclvNode(lclNum, TYP_REF); } } goto CALL; case CEE_CALLI: /* CALLI does not respond to CONSTRAINED */ prefixFlags &= ~PREFIX_CONSTRAINED; FALLTHROUGH; case CEE_CALLVIRT: case CEE_CALL: // We can't call getCallInfo on the token from a CALLI, but we need it in // many other places. We unfortunately embed that knowledge here. if (opcode != CEE_CALLI) { _impResolveToken(CORINFO_TOKENKIND_Method); eeGetCallInfo(&resolvedToken, (prefixFlags & PREFIX_CONSTRAINED) ? &constrainedResolvedToken : nullptr, // this is how impImportCall invokes getCallInfo combine(combine(CORINFO_CALLINFO_ALLOWINSTPARAM, CORINFO_CALLINFO_SECURITYCHECKS), (opcode == CEE_CALLVIRT) ? CORINFO_CALLINFO_CALLVIRT : CORINFO_CALLINFO_NONE), &callInfo); } else { // Suppress uninitialized use warning. memset(&resolvedToken, 0, sizeof(resolvedToken)); memset(&callInfo, 0, sizeof(callInfo)); resolvedToken.token = getU4LittleEndian(codeAddr); resolvedToken.tokenContext = impTokenLookupContextHandle; resolvedToken.tokenScope = info.compScopeHnd; } CALL: // memberRef should be set. // newObjThisPtr should be set for CEE_NEWOBJ JITDUMP(" %08X", resolvedToken.token); constraintCall = (prefixFlags & PREFIX_CONSTRAINED) != 0; bool newBBcreatedForTailcallStress; bool passedStressModeValidation; newBBcreatedForTailcallStress = false; passedStressModeValidation = true; if (compIsForInlining()) { if (compDonotInline()) { return; } // We rule out inlinees with explicit tail calls in fgMakeBasicBlocks. assert((prefixFlags & PREFIX_TAILCALL_EXPLICIT) == 0); } else { if (compTailCallStress()) { // Have we created a new BB after the "call" instruction in fgMakeBasicBlocks()? // Tail call stress only recognizes call+ret patterns and forces them to be // explicit tail prefixed calls. Also fgMakeBasicBlocks() under tail call stress // doesn't import 'ret' opcode following the call into the basic block containing // the call instead imports it to a new basic block. Note that fgMakeBasicBlocks() // is already checking that there is an opcode following call and hence it is // safe here to read next opcode without bounds check. newBBcreatedForTailcallStress = impOpcodeIsCallOpcode(opcode) && // Current opcode is a CALL, (not a CEE_NEWOBJ). So, don't // make it jump to RET. (OPCODE)getU1LittleEndian(codeAddr + sz) == CEE_RET; // Next opcode is a CEE_RET bool hasTailPrefix = (prefixFlags & PREFIX_TAILCALL_EXPLICIT); if (newBBcreatedForTailcallStress && !hasTailPrefix) { // Do a more detailed evaluation of legality const bool returnFalseIfInvalid = true; const bool passedConstraintCheck = verCheckTailCallConstraint(opcode, &resolvedToken, constraintCall ? &constrainedResolvedToken : nullptr, returnFalseIfInvalid); if (passedConstraintCheck) { // Now check with the runtime CORINFO_METHOD_HANDLE declaredCalleeHnd = callInfo.hMethod; bool isVirtual = (callInfo.kind == CORINFO_VIRTUALCALL_STUB) || (callInfo.kind == CORINFO_VIRTUALCALL_VTABLE); CORINFO_METHOD_HANDLE exactCalleeHnd = isVirtual ? nullptr : declaredCalleeHnd; if (info.compCompHnd->canTailCall(info.compMethodHnd, declaredCalleeHnd, exactCalleeHnd, hasTailPrefix)) // Is it legal to do tailcall? { // Stress the tailcall. JITDUMP(" (Tailcall stress: prefixFlags |= PREFIX_TAILCALL_EXPLICIT)"); prefixFlags |= PREFIX_TAILCALL_EXPLICIT; prefixFlags |= PREFIX_TAILCALL_STRESS; } else { // Runtime disallows this tail call JITDUMP(" (Tailcall stress: runtime preventing tailcall)"); passedStressModeValidation = false; } } else { // Constraints disallow this tail call JITDUMP(" (Tailcall stress: constraint check failed)"); passedStressModeValidation = false; } } } } // This is split up to avoid goto flow warnings. bool isRecursive; isRecursive = !compIsForInlining() && (callInfo.hMethod == info.compMethodHnd); // If we've already disqualified this call as a tail call under tail call stress, // don't consider it for implicit tail calling either. // // When not running under tail call stress, we may mark this call as an implicit // tail call candidate. We'll do an "equivalent" validation during impImportCall. // // Note that when running under tail call stress, a call marked as explicit // tail prefixed will not be considered for implicit tail calling. if (passedStressModeValidation && impIsImplicitTailCallCandidate(opcode, codeAddr + sz, codeEndp, prefixFlags, isRecursive)) { if (compIsForInlining()) { #if FEATURE_TAILCALL_OPT_SHARED_RETURN // Are we inlining at an implicit tail call site? If so the we can flag // implicit tail call sites in the inline body. These call sites // often end up in non BBJ_RETURN blocks, so only flag them when // we're able to handle shared returns. if (impInlineInfo->iciCall->IsImplicitTailCall()) { JITDUMP("\n (Inline Implicit Tail call: prefixFlags |= PREFIX_TAILCALL_IMPLICIT)"); prefixFlags |= PREFIX_TAILCALL_IMPLICIT; } #endif // FEATURE_TAILCALL_OPT_SHARED_RETURN } else { JITDUMP("\n (Implicit Tail call: prefixFlags |= PREFIX_TAILCALL_IMPLICIT)"); prefixFlags |= PREFIX_TAILCALL_IMPLICIT; } } // Treat this call as tail call for verification only if "tail" prefixed (i.e. explicit tail call). explicitTailCall = (prefixFlags & PREFIX_TAILCALL_EXPLICIT) != 0; readonlyCall = (prefixFlags & PREFIX_READONLY) != 0; if (opcode != CEE_CALLI && opcode != CEE_NEWOBJ) { // All calls and delegates need a security callout. // For delegates, this is the call to the delegate constructor, not the access check on the // LD(virt)FTN. impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper); } callTyp = impImportCall(opcode, &resolvedToken, constraintCall ? &constrainedResolvedToken : nullptr, newObjThisPtr, prefixFlags, &callInfo, opcodeOffs); if (compDonotInline()) { // We do not check fails after lvaGrabTemp. It is covered with CoreCLR_13272 issue. assert((callTyp == TYP_UNDEF) || (compInlineResult->GetObservation() == InlineObservation::CALLSITE_TOO_MANY_LOCALS)); return; } if (explicitTailCall || newBBcreatedForTailcallStress) // If newBBcreatedForTailcallStress is true, we // have created a new BB after the "call" // instruction in fgMakeBasicBlocks(). So we need to jump to RET regardless. { assert(!compIsForInlining()); goto RET; } break; case CEE_LDFLD: case CEE_LDSFLD: case CEE_LDFLDA: case CEE_LDSFLDA: { bool isLoadAddress = (opcode == CEE_LDFLDA || opcode == CEE_LDSFLDA); bool isLoadStatic = (opcode == CEE_LDSFLD || opcode == CEE_LDSFLDA); /* Get the CP_Fieldref index */ assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Field); JITDUMP(" %08X", resolvedToken.token); int aflags = isLoadAddress ? CORINFO_ACCESS_ADDRESS : CORINFO_ACCESS_GET; GenTree* obj = nullptr; typeInfo* tiObj = nullptr; CORINFO_CLASS_HANDLE objType = nullptr; // used for fields if (opcode == CEE_LDFLD || opcode == CEE_LDFLDA) { tiObj = &impStackTop().seTypeInfo; StackEntry se = impPopStack(); objType = se.seTypeInfo.GetClassHandle(); obj = se.val; if (impIsThis(obj)) { aflags |= CORINFO_ACCESS_THIS; } } eeGetFieldInfo(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo); // Figure out the type of the member. We always call canAccessField, so you always need this // handle CorInfoType ciType = fieldInfo.fieldType; clsHnd = fieldInfo.structType; lclTyp = JITtype2varType(ciType); if (compIsForInlining()) { switch (fieldInfo.fieldAccessor) { case CORINFO_FIELD_INSTANCE_HELPER: case CORINFO_FIELD_INSTANCE_ADDR_HELPER: case CORINFO_FIELD_STATIC_ADDR_HELPER: case CORINFO_FIELD_STATIC_TLS: compInlineResult->NoteFatal(InlineObservation::CALLEE_LDFLD_NEEDS_HELPER); return; case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER: case CORINFO_FIELD_STATIC_READYTORUN_HELPER: /* We may be able to inline the field accessors in specific instantiations of generic * methods */ compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDFLD_NEEDS_HELPER); return; default: break; } if (!isLoadAddress && (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && lclTyp == TYP_STRUCT && clsHnd) { if ((info.compCompHnd->getTypeForPrimitiveValueClass(clsHnd) == CORINFO_TYPE_UNDEF) && !(info.compFlags & CORINFO_FLG_FORCEINLINE)) { // Loading a static valuetype field usually will cause a JitHelper to be called // for the static base. This will bloat the code. compInlineResult->Note(InlineObservation::CALLEE_LDFLD_STATIC_VALUECLASS); if (compInlineResult->IsFailure()) { return; } } } } tiRetVal = verMakeTypeInfo(ciType, clsHnd); if (isLoadAddress) { tiRetVal.MakeByRef(); } else { tiRetVal.NormaliseForStack(); } // Perform this check always to ensure that we get field access exceptions even with // SkipVerification. impHandleAccessAllowed(fieldInfo.accessAllowed, &fieldInfo.accessCalloutHelper); // Raise InvalidProgramException if static load accesses non-static field if (isLoadStatic && ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) == 0)) { BADCODE("static access on an instance field"); } // We are using ldfld/a on a static field. We allow it, but need to get side-effect from obj. if ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && obj != nullptr) { if (obj->gtFlags & GTF_SIDE_EFFECT) { obj = gtUnusedValNode(obj); impAppendTree(obj, (unsigned)CHECK_SPILL_ALL, impCurStmtDI); } obj = nullptr; } /* Preserve 'small' int types */ if (!varTypeIsSmall(lclTyp)) { lclTyp = genActualType(lclTyp); } bool usesHelper = false; switch (fieldInfo.fieldAccessor) { case CORINFO_FIELD_INSTANCE: #ifdef FEATURE_READYTORUN case CORINFO_FIELD_INSTANCE_WITH_BASE: #endif { // If the object is a struct, what we really want is // for the field to operate on the address of the struct. if (!varTypeGCtype(obj->TypeGet()) && impIsValueType(tiObj)) { assert(opcode == CEE_LDFLD && objType != nullptr); obj = impGetStructAddr(obj, objType, (unsigned)CHECK_SPILL_ALL, true); } /* Create the data member node */ op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, obj, fieldInfo.offset); #ifdef FEATURE_READYTORUN if (fieldInfo.fieldAccessor == CORINFO_FIELD_INSTANCE_WITH_BASE) { op1->AsField()->gtFieldLookup = fieldInfo.fieldLookup; } #endif op1->gtFlags |= (obj->gtFlags & GTF_GLOB_EFFECT); if (fgAddrCouldBeNull(obj)) { op1->gtFlags |= GTF_EXCEPT; } // If the object is a BYREF then our target is a value class and // it could point anywhere, example a boxed class static int if (obj->gtType == TYP_BYREF) { op1->gtFlags |= GTF_IND_TGTANYWHERE; } DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass); if (StructHasOverlappingFields(typeFlags)) { op1->AsField()->gtFldMayOverlap = true; } // wrap it in a address of operator if necessary if (isLoadAddress) { op1 = gtNewOperNode(GT_ADDR, (var_types)(varTypeIsGC(obj->TypeGet()) ? TYP_BYREF : TYP_I_IMPL), op1); } else { if (compIsForInlining() && impInlineIsGuaranteedThisDerefBeforeAnySideEffects(nullptr, nullptr, obj, impInlineInfo->inlArgInfo)) { impInlineInfo->thisDereferencedFirst = true; } } } break; case CORINFO_FIELD_STATIC_TLS: #ifdef TARGET_X86 // Legacy TLS access is implemented as intrinsic on x86 only /* Create the data member node */ op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, NULL, fieldInfo.offset); op1->gtFlags |= GTF_IND_TLS_REF; // fgMorphField will handle the transformation if (isLoadAddress) { op1 = gtNewOperNode(GT_ADDR, (var_types)TYP_I_IMPL, op1); } break; #else fieldInfo.fieldAccessor = CORINFO_FIELD_STATIC_ADDR_HELPER; FALLTHROUGH; #endif case CORINFO_FIELD_STATIC_ADDR_HELPER: case CORINFO_FIELD_INSTANCE_HELPER: case CORINFO_FIELD_INSTANCE_ADDR_HELPER: op1 = gtNewRefCOMfield(obj, &resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo, lclTyp, clsHnd, nullptr); usesHelper = true; break; case CORINFO_FIELD_STATIC_ADDRESS: // Replace static read-only fields with constant if possible if ((aflags & CORINFO_ACCESS_GET) && (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_FINAL) && !(fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP) && (varTypeIsIntegral(lclTyp) || varTypeIsFloating(lclTyp))) { CorInfoInitClassResult initClassResult = info.compCompHnd->initClass(resolvedToken.hField, info.compMethodHnd, impTokenLookupContextHandle); if (initClassResult & CORINFO_INITCLASS_INITIALIZED) { void** pFldAddr = nullptr; void* fldAddr = info.compCompHnd->getFieldAddress(resolvedToken.hField, (void**)&pFldAddr); // We should always be able to access this static's address directly // assert(pFldAddr == nullptr); op1 = impImportStaticReadOnlyField(fldAddr, lclTyp); // Widen small types since we're propagating the value // instead of producing an indir. // op1->gtType = genActualType(lclTyp); goto FIELD_DONE; } } FALLTHROUGH; case CORINFO_FIELD_STATIC_RVA_ADDRESS: case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER: case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER: case CORINFO_FIELD_STATIC_READYTORUN_HELPER: op1 = impImportStaticFieldAccess(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo, lclTyp); break; case CORINFO_FIELD_INTRINSIC_ZERO: { assert(aflags & CORINFO_ACCESS_GET); // Widen to stack type lclTyp = genActualType(lclTyp); op1 = gtNewIconNode(0, lclTyp); goto FIELD_DONE; } break; case CORINFO_FIELD_INTRINSIC_EMPTY_STRING: { assert(aflags & CORINFO_ACCESS_GET); // Import String.Empty as "" (GT_CNS_STR with a fake SconCPX = 0) op1 = gtNewSconNode(EMPTY_STRING_SCON, nullptr); goto FIELD_DONE; } break; case CORINFO_FIELD_INTRINSIC_ISLITTLEENDIAN: { assert(aflags & CORINFO_ACCESS_GET); // Widen to stack type lclTyp = genActualType(lclTyp); #if BIGENDIAN op1 = gtNewIconNode(0, lclTyp); #else op1 = gtNewIconNode(1, lclTyp); #endif goto FIELD_DONE; } break; default: assert(!"Unexpected fieldAccessor"); } if (!isLoadAddress) { if (prefixFlags & PREFIX_VOLATILE) { op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered if (!usesHelper) { assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND) || (op1->OperGet() == GT_OBJ)); op1->gtFlags |= GTF_IND_VOLATILE; } } if ((prefixFlags & PREFIX_UNALIGNED) && !varTypeIsByte(lclTyp)) { if (!usesHelper) { assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND) || (op1->OperGet() == GT_OBJ)); op1->gtFlags |= GTF_IND_UNALIGNED; } } } /* Check if the class needs explicit initialization */ if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_INITCLASS) { GenTree* helperNode = impInitClass(&resolvedToken); if (compDonotInline()) { return; } if (helperNode != nullptr) { op1 = gtNewOperNode(GT_COMMA, op1->TypeGet(), helperNode, op1); } } FIELD_DONE: impPushOnStack(op1, tiRetVal); } break; case CEE_STFLD: case CEE_STSFLD: { bool isStoreStatic = (opcode == CEE_STSFLD); CORINFO_CLASS_HANDLE fieldClsHnd; // class of the field (if it's a ref type) /* Get the CP_Fieldref index */ assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Field); JITDUMP(" %08X", resolvedToken.token); int aflags = CORINFO_ACCESS_SET; GenTree* obj = nullptr; typeInfo* tiObj = nullptr; typeInfo tiVal; /* Pull the value from the stack */ StackEntry se = impPopStack(); op2 = se.val; tiVal = se.seTypeInfo; clsHnd = tiVal.GetClassHandle(); if (opcode == CEE_STFLD) { tiObj = &impStackTop().seTypeInfo; obj = impPopStack().val; if (impIsThis(obj)) { aflags |= CORINFO_ACCESS_THIS; } } eeGetFieldInfo(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo); // Figure out the type of the member. We always call canAccessField, so you always need this // handle CorInfoType ciType = fieldInfo.fieldType; fieldClsHnd = fieldInfo.structType; lclTyp = JITtype2varType(ciType); if (compIsForInlining()) { /* Is this a 'special' (COM) field? or a TLS ref static field?, field stored int GC heap? or * per-inst static? */ switch (fieldInfo.fieldAccessor) { case CORINFO_FIELD_INSTANCE_HELPER: case CORINFO_FIELD_INSTANCE_ADDR_HELPER: case CORINFO_FIELD_STATIC_ADDR_HELPER: case CORINFO_FIELD_STATIC_TLS: compInlineResult->NoteFatal(InlineObservation::CALLEE_STFLD_NEEDS_HELPER); return; case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER: case CORINFO_FIELD_STATIC_READYTORUN_HELPER: /* We may be able to inline the field accessors in specific instantiations of generic * methods */ compInlineResult->NoteFatal(InlineObservation::CALLSITE_STFLD_NEEDS_HELPER); return; default: break; } } impHandleAccessAllowed(fieldInfo.accessAllowed, &fieldInfo.accessCalloutHelper); // Raise InvalidProgramException if static store accesses non-static field if (isStoreStatic && ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) == 0)) { BADCODE("static access on an instance field"); } // We are using stfld on a static field. // We allow it, but need to eval any side-effects for obj if ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && obj != nullptr) { if (obj->gtFlags & GTF_SIDE_EFFECT) { obj = gtUnusedValNode(obj); impAppendTree(obj, (unsigned)CHECK_SPILL_ALL, impCurStmtDI); } obj = nullptr; } /* Preserve 'small' int types */ if (!varTypeIsSmall(lclTyp)) { lclTyp = genActualType(lclTyp); } switch (fieldInfo.fieldAccessor) { case CORINFO_FIELD_INSTANCE: #ifdef FEATURE_READYTORUN case CORINFO_FIELD_INSTANCE_WITH_BASE: #endif { /* Create the data member node */ op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, obj, fieldInfo.offset); DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass); if (StructHasOverlappingFields(typeFlags)) { op1->AsField()->gtFldMayOverlap = true; } #ifdef FEATURE_READYTORUN if (fieldInfo.fieldAccessor == CORINFO_FIELD_INSTANCE_WITH_BASE) { op1->AsField()->gtFieldLookup = fieldInfo.fieldLookup; } #endif op1->gtFlags |= (obj->gtFlags & GTF_GLOB_EFFECT); if (fgAddrCouldBeNull(obj)) { op1->gtFlags |= GTF_EXCEPT; } // If object is a BYREF then our target is a value class and // it could point anywhere, example a boxed class static int if (obj->gtType == TYP_BYREF) { op1->gtFlags |= GTF_IND_TGTANYWHERE; } if (compIsForInlining() && impInlineIsGuaranteedThisDerefBeforeAnySideEffects(op2, nullptr, obj, impInlineInfo->inlArgInfo)) { impInlineInfo->thisDereferencedFirst = true; } } break; case CORINFO_FIELD_STATIC_TLS: #ifdef TARGET_X86 // Legacy TLS access is implemented as intrinsic on x86 only /* Create the data member node */ op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, NULL, fieldInfo.offset); op1->gtFlags |= GTF_IND_TLS_REF; // fgMorphField will handle the transformation break; #else fieldInfo.fieldAccessor = CORINFO_FIELD_STATIC_ADDR_HELPER; FALLTHROUGH; #endif case CORINFO_FIELD_STATIC_ADDR_HELPER: case CORINFO_FIELD_INSTANCE_HELPER: case CORINFO_FIELD_INSTANCE_ADDR_HELPER: op1 = gtNewRefCOMfield(obj, &resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo, lclTyp, clsHnd, op2); goto SPILL_APPEND; case CORINFO_FIELD_STATIC_ADDRESS: case CORINFO_FIELD_STATIC_RVA_ADDRESS: case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER: case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER: case CORINFO_FIELD_STATIC_READYTORUN_HELPER: op1 = impImportStaticFieldAccess(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo, lclTyp); break; default: assert(!"Unexpected fieldAccessor"); } // Create the member assignment, unless we have a TYP_STRUCT. bool deferStructAssign = (lclTyp == TYP_STRUCT); if (!deferStructAssign) { if (prefixFlags & PREFIX_VOLATILE) { assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND)); op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered op1->gtFlags |= GTF_IND_VOLATILE; } if ((prefixFlags & PREFIX_UNALIGNED) && !varTypeIsByte(lclTyp)) { assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND)); op1->gtFlags |= GTF_IND_UNALIGNED; } /* V4.0 allows assignment of i4 constant values to i8 type vars when IL verifier is bypassed (full trust apps). The reason this works is that JIT stores an i4 constant in Gentree union during importation and reads from the union as if it were a long during code generation. Though this can potentially read garbage, one can get lucky to have this working correctly. This code pattern is generated by Dev10 MC++ compiler while storing to fields when compiled with /O2 switch (default when compiling retail configs in Dev10) and a customer app has taken a dependency on it. To be backward compatible, we will explicitly add an upward cast here so that it works correctly always. Note that this is limited to x86 alone as there is no back compat to be addressed for Arm JIT for V4.0. */ CLANG_FORMAT_COMMENT_ANCHOR; #ifndef TARGET_64BIT // In UWP6.0 and beyond (post-.NET Core 2.0), we decided to let this cast from int to long be // generated for ARM as well as x86, so the following IR will be accepted: // STMTx (IL 0x... ???) // * ASG long // +--* CLS_VAR long // \--* CNS_INT int 2 if ((op1->TypeGet() != op2->TypeGet()) && op2->OperIsConst() && varTypeIsIntOrI(op2->TypeGet()) && varTypeIsLong(op1->TypeGet())) { op2 = gtNewCastNode(op1->TypeGet(), op2, false, op1->TypeGet()); } #endif #ifdef TARGET_64BIT // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL if ((op2->OperGet() == GT_CNS_INT) && varTypeIsI(lclTyp) && !varTypeIsI(op2->gtType)) { op2->gtType = TYP_I_IMPL; } else { // Allow a downcast of op2 from TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity // if (varTypeIsI(op2->gtType) && (genActualType(lclTyp) == TYP_INT)) { op2 = gtNewCastNode(TYP_INT, op2, false, TYP_INT); } // Allow an upcast of op2 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity // if (varTypeIsI(lclTyp) && (genActualType(op2->gtType) == TYP_INT)) { op2 = gtNewCastNode(TYP_I_IMPL, op2, false, TYP_I_IMPL); } } #endif // We can generate an assignment to a TYP_FLOAT from a TYP_DOUBLE // We insert a cast to the dest 'op1' type // if ((op1->TypeGet() != op2->TypeGet()) && varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType)) { op2 = gtNewCastNode(op1->TypeGet(), op2, false, op1->TypeGet()); } op1 = gtNewAssignNode(op1, op2); /* Mark the expression as containing an assignment */ op1->gtFlags |= GTF_ASG; } /* Check if the class needs explicit initialization */ if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_INITCLASS) { GenTree* helperNode = impInitClass(&resolvedToken); if (compDonotInline()) { return; } if (helperNode != nullptr) { op1 = gtNewOperNode(GT_COMMA, op1->TypeGet(), helperNode, op1); } } /* stfld can interfere with value classes (consider the sequence ldloc, ldloca, ..., stfld, stloc). We will be conservative and spill all value class references from the stack. */ if (obj && ((obj->gtType == TYP_BYREF) || (obj->gtType == TYP_I_IMPL))) { assert(tiObj); // If we can resolve the field to be within some local, // then just spill that local. // GenTreeLclVarCommon* const lcl = obj->IsLocalAddrExpr(); if (lcl != nullptr) { impSpillLclRefs(lcl->GetLclNum()); } else if (impIsValueType(tiObj)) { impSpillEvalStack(); } else { impSpillValueClasses(); } } /* Spill any refs to the same member from the stack */ impSpillLclRefs((ssize_t)resolvedToken.hField); /* stsfld also interferes with indirect accesses (for aliased statics) and calls. But don't need to spill other statics as we have explicitly spilled this particular static field. */ impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("spill side effects before STFLD")); if (deferStructAssign) { op1 = impAssignStruct(op1, op2, clsHnd, (unsigned)CHECK_SPILL_ALL); } } goto APPEND; case CEE_NEWARR: { /* Get the class type index operand */ _impResolveToken(CORINFO_TOKENKIND_Newarr); JITDUMP(" %08X", resolvedToken.token); if (!opts.IsReadyToRun()) { // Need to restore array classes before creating array objects on the heap op1 = impTokenToHandle(&resolvedToken, nullptr, true /*mustRestoreHandle*/); if (op1 == nullptr) { // compDonotInline() return; } } tiRetVal = verMakeTypeInfo(resolvedToken.hClass); accessAllowedResult = info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper); impHandleAccessAllowed(accessAllowedResult, &calloutHelper); /* Form the arglist: array class handle, size */ op2 = impPopStack().val; assertImp(genActualTypeIsIntOrI(op2->gtType)); #ifdef TARGET_64BIT // The array helper takes a native int for array length. // So if we have an int, explicitly extend it to be a native int. if (genActualType(op2->TypeGet()) != TYP_I_IMPL) { if (op2->IsIntegralConst()) { op2->gtType = TYP_I_IMPL; } else { bool isUnsigned = false; op2 = gtNewCastNode(TYP_I_IMPL, op2, isUnsigned, TYP_I_IMPL); } } #endif // TARGET_64BIT #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { op1 = impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_NEWARR_1, TYP_REF, gtNewCallArgs(op2)); usingReadyToRunHelper = (op1 != nullptr); if (!usingReadyToRunHelper) { // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call // and the newarr call with a single call to a dynamic R2R cell that will: // 1) Load the context // 2) Perform the generic dictionary lookup and caching, and generate the appropriate stub // 3) Allocate the new array // Reason: performance (today, we'll always use the slow helper for the R2R generics case) // Need to restore array classes before creating array objects on the heap op1 = impTokenToHandle(&resolvedToken, nullptr, true /*mustRestoreHandle*/); if (op1 == nullptr) { // compDonotInline() return; } } } if (!usingReadyToRunHelper) #endif { GenTreeCall::Use* args = gtNewCallArgs(op1, op2); /* Create a call to 'new' */ // Note that this only works for shared generic code because the same helper is used for all // reference array types op1 = gtNewHelperCallNode(info.compCompHnd->getNewArrHelper(resolvedToken.hClass), TYP_REF, args); } op1->AsCall()->compileTimeHelperArgumentHandle = (CORINFO_GENERIC_HANDLE)resolvedToken.hClass; /* Remember that this basic block contains 'new' of an sd array */ block->bbFlags |= BBF_HAS_NEWARRAY; optMethodFlags |= OMF_HAS_NEWARRAY; /* Push the result of the call on the stack */ impPushOnStack(op1, tiRetVal); callTyp = TYP_REF; } break; case CEE_LOCALLOC: // We don't allow locallocs inside handlers if (block->hasHndIndex()) { BADCODE("Localloc can't be inside handler"); } // Get the size to allocate op2 = impPopStack().val; assertImp(genActualTypeIsIntOrI(op2->gtType)); if (verCurrentState.esStackDepth != 0) { BADCODE("Localloc can only be used when the stack is empty"); } // If the localloc is not in a loop and its size is a small constant, // create a new local var of TYP_BLK and return its address. { bool convertedToLocal = false; // Need to aggressively fold here, as even fixed-size locallocs // will have casts in the way. op2 = gtFoldExpr(op2); if (op2->IsIntegralConst()) { const ssize_t allocSize = op2->AsIntCon()->IconValue(); bool bbInALoop = impBlockIsInALoop(block); if (allocSize == 0) { // Result is nullptr JITDUMP("Converting stackalloc of 0 bytes to push null unmanaged pointer\n"); op1 = gtNewIconNode(0, TYP_I_IMPL); convertedToLocal = true; } else if ((allocSize > 0) && !bbInALoop) { // Get the size threshold for local conversion ssize_t maxSize = DEFAULT_MAX_LOCALLOC_TO_LOCAL_SIZE; #ifdef DEBUG // Optionally allow this to be modified maxSize = JitConfig.JitStackAllocToLocalSize(); #endif // DEBUG if (allocSize <= maxSize) { const unsigned stackallocAsLocal = lvaGrabTemp(false DEBUGARG("stackallocLocal")); JITDUMP("Converting stackalloc of %zd bytes to new local V%02u\n", allocSize, stackallocAsLocal); lvaTable[stackallocAsLocal].lvType = TYP_BLK; lvaTable[stackallocAsLocal].lvExactSize = (unsigned)allocSize; lvaTable[stackallocAsLocal].lvIsUnsafeBuffer = true; op1 = gtNewLclvNode(stackallocAsLocal, TYP_BLK); op1 = gtNewOperNode(GT_ADDR, TYP_I_IMPL, op1); convertedToLocal = true; if (!this->opts.compDbgEnC) { // Ensure we have stack security for this method. // Reorder layout since the converted localloc is treated as an unsafe buffer. setNeedsGSSecurityCookie(); compGSReorderStackLayout = true; } } } } if (!convertedToLocal) { // Bail out if inlining and the localloc was not converted. // // Note we might consider allowing the inline, if the call // site is not in a loop. if (compIsForInlining()) { InlineObservation obs = op2->IsIntegralConst() ? InlineObservation::CALLEE_LOCALLOC_TOO_LARGE : InlineObservation::CALLSITE_LOCALLOC_SIZE_UNKNOWN; compInlineResult->NoteFatal(obs); return; } op1 = gtNewOperNode(GT_LCLHEAP, TYP_I_IMPL, op2); // May throw a stack overflow exception. Obviously, we don't want locallocs to be CSE'd. op1->gtFlags |= (GTF_EXCEPT | GTF_DONT_CSE); // Ensure we have stack security for this method. setNeedsGSSecurityCookie(); /* The FP register may not be back to the original value at the end of the method, even if the frame size is 0, as localloc may have modified it. So we will HAVE to reset it */ compLocallocUsed = true; } else { compLocallocOptimized = true; } } impPushOnStack(op1, tiRetVal); break; case CEE_ISINST: { /* Get the type token */ assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Casting); JITDUMP(" %08X", resolvedToken.token); if (!opts.IsReadyToRun()) { op2 = impTokenToHandle(&resolvedToken, nullptr, false); if (op2 == nullptr) { // compDonotInline() return; } } accessAllowedResult = info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper); impHandleAccessAllowed(accessAllowedResult, &calloutHelper); op1 = impPopStack().val; GenTree* optTree = impOptimizeCastClassOrIsInst(op1, &resolvedToken, false); if (optTree != nullptr) { impPushOnStack(optTree, tiRetVal); } else { #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { GenTreeCall* opLookup = impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_ISINSTANCEOF, TYP_REF, gtNewCallArgs(op1)); usingReadyToRunHelper = (opLookup != nullptr); op1 = (usingReadyToRunHelper ? opLookup : op1); if (!usingReadyToRunHelper) { // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call // and the isinstanceof_any call with a single call to a dynamic R2R cell that will: // 1) Load the context // 2) Perform the generic dictionary lookup and caching, and generate the appropriate // stub // 3) Perform the 'is instance' check on the input object // Reason: performance (today, we'll always use the slow helper for the R2R generics case) op2 = impTokenToHandle(&resolvedToken, nullptr, false); if (op2 == nullptr) { // compDonotInline() return; } } } if (!usingReadyToRunHelper) #endif { op1 = impCastClassOrIsInstToTree(op1, op2, &resolvedToken, false, opcodeOffs); } if (compDonotInline()) { return; } impPushOnStack(op1, tiRetVal); } break; } case CEE_REFANYVAL: // get the class handle and make a ICON node out of it _impResolveToken(CORINFO_TOKENKIND_Class); JITDUMP(" %08X", resolvedToken.token); op2 = impTokenToHandle(&resolvedToken); if (op2 == nullptr) { // compDonotInline() return; } op1 = impPopStack().val; // make certain it is normalized; op1 = impNormStructVal(op1, impGetRefAnyClass(), (unsigned)CHECK_SPILL_ALL); // Call helper GETREFANY(classHandle, op1); op1 = gtNewHelperCallNode(CORINFO_HELP_GETREFANY, TYP_BYREF, gtNewCallArgs(op2, op1)); impPushOnStack(op1, tiRetVal); break; case CEE_REFANYTYPE: op1 = impPopStack().val; // make certain it is normalized; op1 = impNormStructVal(op1, impGetRefAnyClass(), (unsigned)CHECK_SPILL_ALL); if (op1->gtOper == GT_OBJ) { // Get the address of the refany op1 = op1->AsOp()->gtOp1; // Fetch the type from the correct slot op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1, gtNewIconNode(OFFSETOF__CORINFO_TypedReference__type, TYP_I_IMPL)); op1 = gtNewOperNode(GT_IND, TYP_BYREF, op1); } else { assertImp(op1->gtOper == GT_MKREFANY); // The pointer may have side-effects if (op1->AsOp()->gtOp1->gtFlags & GTF_SIDE_EFFECT) { impAppendTree(op1->AsOp()->gtOp1, (unsigned)CHECK_SPILL_ALL, impCurStmtDI); #ifdef DEBUG impNoteLastILoffs(); #endif } // We already have the class handle op1 = op1->AsOp()->gtOp2; } // convert native TypeHandle to RuntimeTypeHandle { GenTreeCall::Use* helperArgs = gtNewCallArgs(op1); op1 = gtNewHelperCallNode(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPEHANDLE_MAYBENULL, TYP_STRUCT, helperArgs); CORINFO_CLASS_HANDLE classHandle = impGetTypeHandleClass(); // The handle struct is returned in register op1->AsCall()->gtReturnType = GetRuntimeHandleUnderlyingType(); op1->AsCall()->gtRetClsHnd = classHandle; #if FEATURE_MULTIREG_RET op1->AsCall()->InitializeStructReturnType(this, classHandle, op1->AsCall()->GetUnmanagedCallConv()); #endif tiRetVal = typeInfo(TI_STRUCT, classHandle); } impPushOnStack(op1, tiRetVal); break; case CEE_LDTOKEN: { /* Get the Class index */ assertImp(sz == sizeof(unsigned)); lastLoadToken = codeAddr; _impResolveToken(CORINFO_TOKENKIND_Ldtoken); tokenType = info.compCompHnd->getTokenTypeAsHandle(&resolvedToken); op1 = impTokenToHandle(&resolvedToken, nullptr, true); if (op1 == nullptr) { // compDonotInline() return; } helper = CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPEHANDLE; assert(resolvedToken.hClass != nullptr); if (resolvedToken.hMethod != nullptr) { helper = CORINFO_HELP_METHODDESC_TO_STUBRUNTIMEMETHOD; } else if (resolvedToken.hField != nullptr) { helper = CORINFO_HELP_FIELDDESC_TO_STUBRUNTIMEFIELD; } GenTreeCall::Use* helperArgs = gtNewCallArgs(op1); op1 = gtNewHelperCallNode(helper, TYP_STRUCT, helperArgs); // The handle struct is returned in register and // it could be consumed both as `TYP_STRUCT` and `TYP_REF`. op1->AsCall()->gtReturnType = GetRuntimeHandleUnderlyingType(); #if FEATURE_MULTIREG_RET op1->AsCall()->InitializeStructReturnType(this, tokenType, op1->AsCall()->GetUnmanagedCallConv()); #endif op1->AsCall()->gtRetClsHnd = tokenType; tiRetVal = verMakeTypeInfo(tokenType); impPushOnStack(op1, tiRetVal); } break; case CEE_UNBOX: case CEE_UNBOX_ANY: { /* Get the Class index */ assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Class); JITDUMP(" %08X", resolvedToken.token); bool runtimeLookup; op2 = impTokenToHandle(&resolvedToken, &runtimeLookup); if (op2 == nullptr) { assert(compDonotInline()); return; } // Run this always so we can get access exceptions even with SkipVerification. accessAllowedResult = info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper); impHandleAccessAllowed(accessAllowedResult, &calloutHelper); if (opcode == CEE_UNBOX_ANY && !eeIsValueClass(resolvedToken.hClass)) { JITDUMP("\n Importing UNBOX.ANY(refClass) as CASTCLASS\n"); op1 = impPopStack().val; goto CASTCLASS; } /* Pop the object and create the unbox helper call */ /* You might think that for UNBOX_ANY we need to push a different */ /* (non-byref) type, but here we're making the tiRetVal that is used */ /* for the intermediate pointer which we then transfer onto the OBJ */ /* instruction. OBJ then creates the appropriate tiRetVal. */ op1 = impPopStack().val; assertImp(op1->gtType == TYP_REF); helper = info.compCompHnd->getUnBoxHelper(resolvedToken.hClass); assert(helper == CORINFO_HELP_UNBOX || helper == CORINFO_HELP_UNBOX_NULLABLE); // Check legality and profitability of inline expansion for unboxing. const bool canExpandInline = (helper == CORINFO_HELP_UNBOX); const bool shouldExpandInline = !compCurBB->isRunRarely() && opts.OptimizationEnabled(); if (canExpandInline && shouldExpandInline) { // See if we know anything about the type of op1, the object being unboxed. bool isExact = false; bool isNonNull = false; CORINFO_CLASS_HANDLE clsHnd = gtGetClassHandle(op1, &isExact, &isNonNull); // We can skip the "exact" bit here as we are comparing to a value class. // compareTypesForEquality should bail on comparisions for shared value classes. if (clsHnd != NO_CLASS_HANDLE) { const TypeCompareState compare = info.compCompHnd->compareTypesForEquality(resolvedToken.hClass, clsHnd); if (compare == TypeCompareState::Must) { JITDUMP("\nOptimizing %s (%s) -- type test will succeed\n", opcode == CEE_UNBOX ? "UNBOX" : "UNBOX.ANY", eeGetClassName(clsHnd)); // For UNBOX, null check (if necessary), and then leave the box payload byref on the stack. if (opcode == CEE_UNBOX) { GenTree* cloneOperand; op1 = impCloneExpr(op1, &cloneOperand, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("optimized unbox clone")); GenTree* boxPayloadOffset = gtNewIconNode(TARGET_POINTER_SIZE, TYP_I_IMPL); GenTree* boxPayloadAddress = gtNewOperNode(GT_ADD, TYP_BYREF, cloneOperand, boxPayloadOffset); GenTree* nullcheck = gtNewNullCheck(op1, block); GenTree* result = gtNewOperNode(GT_COMMA, TYP_BYREF, nullcheck, boxPayloadAddress); impPushOnStack(result, tiRetVal); break; } // For UNBOX.ANY load the struct from the box payload byref (the load will nullcheck) assert(opcode == CEE_UNBOX_ANY); GenTree* boxPayloadOffset = gtNewIconNode(TARGET_POINTER_SIZE, TYP_I_IMPL); GenTree* boxPayloadAddress = gtNewOperNode(GT_ADD, TYP_BYREF, op1, boxPayloadOffset); impPushOnStack(boxPayloadAddress, tiRetVal); oper = GT_OBJ; goto OBJ; } else { JITDUMP("\nUnable to optimize %s -- can't resolve type comparison\n", opcode == CEE_UNBOX ? "UNBOX" : "UNBOX.ANY"); } } else { JITDUMP("\nUnable to optimize %s -- class for [%06u] not known\n", opcode == CEE_UNBOX ? "UNBOX" : "UNBOX.ANY", dspTreeID(op1)); } JITDUMP("\n Importing %s as inline sequence\n", opcode == CEE_UNBOX ? "UNBOX" : "UNBOX.ANY"); // we are doing normal unboxing // inline the common case of the unbox helper // UNBOX(exp) morphs into // clone = pop(exp); // ((*clone == typeToken) ? nop : helper(clone, typeToken)); // push(clone + TARGET_POINTER_SIZE) // GenTree* cloneOperand; op1 = impCloneExpr(op1, &cloneOperand, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("inline UNBOX clone1")); op1 = gtNewMethodTableLookup(op1); GenTree* condBox = gtNewOperNode(GT_EQ, TYP_INT, op1, op2); op1 = impCloneExpr(cloneOperand, &cloneOperand, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("inline UNBOX clone2")); op2 = impTokenToHandle(&resolvedToken); if (op2 == nullptr) { // compDonotInline() return; } op1 = gtNewHelperCallNode(helper, TYP_VOID, gtNewCallArgs(op2, op1)); op1 = new (this, GT_COLON) GenTreeColon(TYP_VOID, gtNewNothingNode(), op1); op1 = gtNewQmarkNode(TYP_VOID, condBox, op1->AsColon()); // QMARK nodes cannot reside on the evaluation stack. Because there // may be other trees on the evaluation stack that side-effect the // sources of the UNBOX operation we must spill the stack. impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtDI); // Create the address-expression to reference past the object header // to the beginning of the value-type. Today this means adjusting // past the base of the objects vtable field which is pointer sized. op2 = gtNewIconNode(TARGET_POINTER_SIZE, TYP_I_IMPL); op1 = gtNewOperNode(GT_ADD, TYP_BYREF, cloneOperand, op2); } else { JITDUMP("\n Importing %s as helper call because %s\n", opcode == CEE_UNBOX ? "UNBOX" : "UNBOX.ANY", canExpandInline ? "want smaller code or faster jitting" : "inline expansion not legal"); // Don't optimize, just call the helper and be done with it op1 = gtNewHelperCallNode(helper, (var_types)((helper == CORINFO_HELP_UNBOX) ? TYP_BYREF : TYP_STRUCT), gtNewCallArgs(op2, op1)); if (op1->gtType == TYP_STRUCT) { op1->AsCall()->gtRetClsHnd = resolvedToken.hClass; } } assert((helper == CORINFO_HELP_UNBOX && op1->gtType == TYP_BYREF) || // Unbox helper returns a byref. (helper == CORINFO_HELP_UNBOX_NULLABLE && varTypeIsStruct(op1)) // UnboxNullable helper returns a struct. ); /* ---------------------------------------------------------------------- | \ helper | | | | \ | | | | \ | CORINFO_HELP_UNBOX | CORINFO_HELP_UNBOX_NULLABLE | | \ | (which returns a BYREF) | (which returns a STRUCT) | | | opcode \ | | | |--------------------------------------------------------------------- | UNBOX | push the BYREF | spill the STRUCT to a local, | | | | push the BYREF to this local | |--------------------------------------------------------------------- | UNBOX_ANY | push a GT_OBJ of | push the STRUCT | | | the BYREF | For Linux when the | | | | struct is returned in two | | | | registers create a temp | | | | which address is passed to | | | | the unbox_nullable helper. | |--------------------------------------------------------------------- */ if (opcode == CEE_UNBOX) { if (helper == CORINFO_HELP_UNBOX_NULLABLE) { // Unbox nullable helper returns a struct type. // We need to spill it to a temp so than can take the address of it. // Here we need unsafe value cls check, since the address of struct is taken to be used // further along and potetially be exploitable. unsigned tmp = lvaGrabTemp(true DEBUGARG("UNBOXing a nullable")); lvaSetStruct(tmp, resolvedToken.hClass, true /* unsafe value cls check */); op2 = gtNewLclvNode(tmp, TYP_STRUCT); op1 = impAssignStruct(op2, op1, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL); assert(op1->gtType == TYP_VOID); // We must be assigning the return struct to the temp. op2 = gtNewLclvNode(tmp, TYP_STRUCT); op2 = gtNewOperNode(GT_ADDR, TYP_BYREF, op2); op1 = gtNewOperNode(GT_COMMA, TYP_BYREF, op1, op2); } assert(op1->gtType == TYP_BYREF); } else { assert(opcode == CEE_UNBOX_ANY); if (helper == CORINFO_HELP_UNBOX) { // Normal unbox helper returns a TYP_BYREF. impPushOnStack(op1, tiRetVal); oper = GT_OBJ; goto OBJ; } assert(helper == CORINFO_HELP_UNBOX_NULLABLE && "Make sure the helper is nullable!"); #if FEATURE_MULTIREG_RET if (varTypeIsStruct(op1) && IsMultiRegReturnedType(resolvedToken.hClass, CorInfoCallConvExtension::Managed)) { // Unbox nullable helper returns a TYP_STRUCT. // For the multi-reg case we need to spill it to a temp so that // we can pass the address to the unbox_nullable jit helper. unsigned tmp = lvaGrabTemp(true DEBUGARG("UNBOXing a register returnable nullable")); lvaTable[tmp].lvIsMultiRegArg = true; lvaSetStruct(tmp, resolvedToken.hClass, true /* unsafe value cls check */); op2 = gtNewLclvNode(tmp, TYP_STRUCT); op1 = impAssignStruct(op2, op1, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL); assert(op1->gtType == TYP_VOID); // We must be assigning the return struct to the temp. op2 = gtNewLclvNode(tmp, TYP_STRUCT); op2 = gtNewOperNode(GT_ADDR, TYP_BYREF, op2); op1 = gtNewOperNode(GT_COMMA, TYP_BYREF, op1, op2); // In this case the return value of the unbox helper is TYP_BYREF. // Make sure the right type is placed on the operand type stack. impPushOnStack(op1, tiRetVal); // Load the struct. oper = GT_OBJ; assert(op1->gtType == TYP_BYREF); goto OBJ; } else #endif // !FEATURE_MULTIREG_RET { // If non register passable struct we have it materialized in the RetBuf. assert(op1->gtType == TYP_STRUCT); tiRetVal = verMakeTypeInfo(resolvedToken.hClass); assert(tiRetVal.IsValueClass()); } } impPushOnStack(op1, tiRetVal); } break; case CEE_BOX: { /* Get the Class index */ assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Box); JITDUMP(" %08X", resolvedToken.token); accessAllowedResult = info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper); impHandleAccessAllowed(accessAllowedResult, &calloutHelper); // Note BOX can be used on things that are not value classes, in which // case we get a NOP. However the verifier's view of the type on the // stack changes (in generic code a 'T' becomes a 'boxed T') if (!eeIsValueClass(resolvedToken.hClass)) { JITDUMP("\n Importing BOX(refClass) as NOP\n"); verCurrentState.esStack[verCurrentState.esStackDepth - 1].seTypeInfo = tiRetVal; break; } // Look ahead for box idioms int matched = impBoxPatternMatch(&resolvedToken, codeAddr + sz, codeEndp); if (matched >= 0) { // Skip the matched IL instructions sz += matched; break; } impImportAndPushBox(&resolvedToken); if (compDonotInline()) { return; } } break; case CEE_SIZEOF: /* Get the Class index */ assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Class); JITDUMP(" %08X", resolvedToken.token); op1 = gtNewIconNode(info.compCompHnd->getClassSize(resolvedToken.hClass)); impPushOnStack(op1, tiRetVal); break; case CEE_CASTCLASS: /* Get the Class index */ assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Casting); JITDUMP(" %08X", resolvedToken.token); if (!opts.IsReadyToRun()) { op2 = impTokenToHandle(&resolvedToken, nullptr, false); if (op2 == nullptr) { // compDonotInline() return; } } accessAllowedResult = info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper); impHandleAccessAllowed(accessAllowedResult, &calloutHelper); op1 = impPopStack().val; /* Pop the address and create the 'checked cast' helper call */ // At this point we expect typeRef to contain the token, op1 to contain the value being cast, // and op2 to contain code that creates the type handle corresponding to typeRef CASTCLASS: { GenTree* optTree = impOptimizeCastClassOrIsInst(op1, &resolvedToken, true); if (optTree != nullptr) { impPushOnStack(optTree, tiRetVal); } else { #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { GenTreeCall* opLookup = impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_CHKCAST, TYP_REF, gtNewCallArgs(op1)); usingReadyToRunHelper = (opLookup != nullptr); op1 = (usingReadyToRunHelper ? opLookup : op1); if (!usingReadyToRunHelper) { // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call // and the chkcastany call with a single call to a dynamic R2R cell that will: // 1) Load the context // 2) Perform the generic dictionary lookup and caching, and generate the appropriate // stub // 3) Check the object on the stack for the type-cast // Reason: performance (today, we'll always use the slow helper for the R2R generics case) op2 = impTokenToHandle(&resolvedToken, nullptr, false); if (op2 == nullptr) { // compDonotInline() return; } } } if (!usingReadyToRunHelper) #endif { op1 = impCastClassOrIsInstToTree(op1, op2, &resolvedToken, true, opcodeOffs); } if (compDonotInline()) { return; } /* Push the result back on the stack */ impPushOnStack(op1, tiRetVal); } } break; case CEE_THROW: // Any block with a throw is rarely executed. block->bbSetRunRarely(); // Pop the exception object and create the 'throw' helper call op1 = gtNewHelperCallNode(CORINFO_HELP_THROW, TYP_VOID, gtNewCallArgs(impPopStack().val)); // Fall through to clear out the eval stack. EVAL_APPEND: if (verCurrentState.esStackDepth > 0) { impEvalSideEffects(); } assert(verCurrentState.esStackDepth == 0); goto APPEND; case CEE_RETHROW: assert(!compIsForInlining()); if (info.compXcptnsCount == 0) { BADCODE("rethrow outside catch"); } /* Create the 'rethrow' helper call */ op1 = gtNewHelperCallNode(CORINFO_HELP_RETHROW, TYP_VOID); goto EVAL_APPEND; case CEE_INITOBJ: assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Class); JITDUMP(" %08X", resolvedToken.token); op2 = gtNewIconNode(0); // Value op1 = impPopStack().val; // Dest if (eeIsValueClass(resolvedToken.hClass)) { op1 = gtNewStructVal(resolvedToken.hClass, op1); if (op1->OperIs(GT_OBJ)) { gtSetObjGcInfo(op1->AsObj()); } } else { size = info.compCompHnd->getClassSize(resolvedToken.hClass); assert(size == TARGET_POINTER_SIZE); op1 = gtNewBlockVal(op1, size); } op1 = gtNewBlkOpNode(op1, op2, (prefixFlags & PREFIX_VOLATILE) != 0, false); goto SPILL_APPEND; case CEE_INITBLK: op3 = impPopStack().val; // Size op2 = impPopStack().val; // Value op1 = impPopStack().val; // Dst addr if (op3->IsCnsIntOrI()) { size = (unsigned)op3->AsIntConCommon()->IconValue(); op1 = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, op1, typGetBlkLayout(size)); op1 = gtNewBlkOpNode(op1, op2, (prefixFlags & PREFIX_VOLATILE) != 0, false); } else { if (!op2->IsIntegralConst(0)) { op2 = gtNewOperNode(GT_INIT_VAL, TYP_INT, op2); } op1 = new (this, GT_STORE_DYN_BLK) GenTreeStoreDynBlk(op1, op2, op3); size = 0; if ((prefixFlags & PREFIX_VOLATILE) != 0) { op1->gtFlags |= GTF_BLK_VOLATILE; } } goto SPILL_APPEND; case CEE_CPBLK: op3 = impPopStack().val; // Size op2 = impPopStack().val; // Src addr op1 = impPopStack().val; // Dst addr if (op2->OperGet() == GT_ADDR) { op2 = op2->AsOp()->gtOp1; } else { op2 = gtNewOperNode(GT_IND, TYP_STRUCT, op2); } if (op3->IsCnsIntOrI()) { size = (unsigned)op3->AsIntConCommon()->IconValue(); op1 = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, op1, typGetBlkLayout(size)); op1 = gtNewBlkOpNode(op1, op2, (prefixFlags & PREFIX_VOLATILE) != 0, true); } else { op1 = new (this, GT_STORE_DYN_BLK) GenTreeStoreDynBlk(op1, op2, op3); size = 0; if ((prefixFlags & PREFIX_VOLATILE) != 0) { op1->gtFlags |= GTF_BLK_VOLATILE; } } goto SPILL_APPEND; case CEE_CPOBJ: assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Class); JITDUMP(" %08X", resolvedToken.token); if (!eeIsValueClass(resolvedToken.hClass)) { op1 = impPopStack().val; // address to load from impBashVarAddrsToI(op1); assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF); op1 = gtNewOperNode(GT_IND, TYP_REF, op1); op1->gtFlags |= GTF_EXCEPT | GTF_GLOB_REF; impPushOnStack(op1, typeInfo()); opcode = CEE_STIND_REF; lclTyp = TYP_REF; goto STIND; } op2 = impPopStack().val; // Src op1 = impPopStack().val; // Dest op1 = gtNewCpObjNode(op1, op2, resolvedToken.hClass, ((prefixFlags & PREFIX_VOLATILE) != 0)); goto SPILL_APPEND; case CEE_STOBJ: { assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Class); JITDUMP(" %08X", resolvedToken.token); if (eeIsValueClass(resolvedToken.hClass)) { lclTyp = TYP_STRUCT; } else { lclTyp = TYP_REF; } if (lclTyp == TYP_REF) { opcode = CEE_STIND_REF; goto STIND; } CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass); if (impIsPrimitive(jitTyp)) { lclTyp = JITtype2varType(jitTyp); goto STIND; } op2 = impPopStack().val; // Value op1 = impPopStack().val; // Ptr assertImp(varTypeIsStruct(op2)); op1 = impAssignStructPtr(op1, op2, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL); if (op1->OperIsBlkOp() && (prefixFlags & PREFIX_UNALIGNED)) { op1->gtFlags |= GTF_BLK_UNALIGNED; } goto SPILL_APPEND; } case CEE_MKREFANY: assert(!compIsForInlining()); // Being lazy here. Refanys are tricky in terms of gc tracking. // Since it is uncommon, just don't perform struct promotion in any method that contains mkrefany. JITDUMP("disabling struct promotion because of mkrefany\n"); fgNoStructPromotion = true; oper = GT_MKREFANY; assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Class); JITDUMP(" %08X", resolvedToken.token); op2 = impTokenToHandle(&resolvedToken, nullptr, true); if (op2 == nullptr) { // compDonotInline() return; } accessAllowedResult = info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper); impHandleAccessAllowed(accessAllowedResult, &calloutHelper); op1 = impPopStack().val; // @SPECVIOLATION: TYP_INT should not be allowed here by a strict reading of the spec. // But JIT32 allowed it, so we continue to allow it. assertImp(op1->TypeGet() == TYP_BYREF || op1->TypeGet() == TYP_I_IMPL || op1->TypeGet() == TYP_INT); // MKREFANY returns a struct. op2 is the class token. op1 = gtNewOperNode(oper, TYP_STRUCT, op1, op2); impPushOnStack(op1, verMakeTypeInfo(impGetRefAnyClass())); break; case CEE_LDOBJ: { oper = GT_OBJ; assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Class); JITDUMP(" %08X", resolvedToken.token); OBJ: tiRetVal = verMakeTypeInfo(resolvedToken.hClass); if (eeIsValueClass(resolvedToken.hClass)) { lclTyp = TYP_STRUCT; } else { lclTyp = TYP_REF; opcode = CEE_LDIND_REF; goto LDIND; } op1 = impPopStack().val; assertImp(op1->TypeGet() == TYP_BYREF || op1->TypeGet() == TYP_I_IMPL); CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass); if (impIsPrimitive(jitTyp)) { op1 = gtNewOperNode(GT_IND, JITtype2varType(jitTyp), op1); // Could point anywhere, example a boxed class static int op1->gtFlags |= GTF_IND_TGTANYWHERE | GTF_GLOB_REF; assertImp(varTypeIsArithmetic(op1->gtType)); } else { // OBJ returns a struct // and an inline argument which is the class token of the loaded obj op1 = gtNewObjNode(resolvedToken.hClass, op1); } op1->gtFlags |= GTF_EXCEPT; if (prefixFlags & PREFIX_UNALIGNED) { op1->gtFlags |= GTF_IND_UNALIGNED; } impPushOnStack(op1, tiRetVal); break; } case CEE_LDLEN: op1 = impPopStack().val; if (opts.OptimizationEnabled()) { /* Use GT_ARR_LENGTH operator so rng check opts see this */ GenTreeArrLen* arrLen = gtNewArrLen(TYP_INT, op1, OFFSETOF__CORINFO_Array__length, block); op1 = arrLen; } else { /* Create the expression "*(array_addr + ArrLenOffs)" */ op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1, gtNewIconNode(OFFSETOF__CORINFO_Array__length, TYP_I_IMPL)); op1 = gtNewIndir(TYP_INT, op1); } /* Push the result back on the stack */ impPushOnStack(op1, tiRetVal); break; case CEE_BREAK: op1 = gtNewHelperCallNode(CORINFO_HELP_USER_BREAKPOINT, TYP_VOID); goto SPILL_APPEND; case CEE_NOP: if (opts.compDbgCode) { op1 = new (this, GT_NO_OP) GenTree(GT_NO_OP, TYP_VOID); goto SPILL_APPEND; } break; /******************************** NYI *******************************/ case 0xCC: OutputDebugStringA("CLR: Invalid x86 breakpoint in IL stream\n"); FALLTHROUGH; case CEE_ILLEGAL: case CEE_MACRO_END: default: if (compIsForInlining()) { compInlineResult->NoteFatal(InlineObservation::CALLEE_COMPILATION_ERROR); return; } BADCODE3("unknown opcode", ": %02X", (int)opcode); } codeAddr += sz; prevOpcode = opcode; prefixFlags = 0; } return; #undef _impResolveToken } #ifdef _PREFAST_ #pragma warning(pop) #endif // Push a local/argument treeon the operand stack void Compiler::impPushVar(GenTree* op, typeInfo tiRetVal) { tiRetVal.NormaliseForStack(); if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init) && tiRetVal.IsThisPtr()) { tiRetVal.SetUninitialisedObjRef(); } impPushOnStack(op, tiRetVal); } //------------------------------------------------------------------------ // impCreateLocal: create a GT_LCL_VAR node to access a local that might need to be normalized on load // // Arguments: // lclNum -- The index into lvaTable // offset -- The offset to associate with the node // // Returns: // The node // GenTreeLclVar* Compiler::impCreateLocalNode(unsigned lclNum DEBUGARG(IL_OFFSET offset)) { var_types lclTyp; if (lvaTable[lclNum].lvNormalizeOnLoad()) { lclTyp = lvaGetRealType(lclNum); } else { lclTyp = lvaGetActualType(lclNum); } return gtNewLclvNode(lclNum, lclTyp DEBUGARG(offset)); } // Load a local/argument on the operand stack // lclNum is an index into lvaTable *NOT* the arg/lcl index in the IL void Compiler::impLoadVar(unsigned lclNum, IL_OFFSET offset, const typeInfo& tiRetVal) { impPushVar(impCreateLocalNode(lclNum DEBUGARG(offset)), tiRetVal); } // Load an argument on the operand stack // Shared by the various CEE_LDARG opcodes // ilArgNum is the argument index as specified in IL. // It will be mapped to the correct lvaTable index void Compiler::impLoadArg(unsigned ilArgNum, IL_OFFSET offset) { Verify(ilArgNum < info.compILargsCount, "bad arg num"); if (compIsForInlining()) { if (ilArgNum >= info.compArgsCount) { compInlineResult->NoteFatal(InlineObservation::CALLEE_BAD_ARGUMENT_NUMBER); return; } impPushVar(impInlineFetchArg(ilArgNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo), impInlineInfo->lclVarInfo[ilArgNum].lclVerTypeInfo); } else { if (ilArgNum >= info.compArgsCount) { BADCODE("Bad IL"); } unsigned lclNum = compMapILargNum(ilArgNum); // account for possible hidden param if (lclNum == info.compThisArg) { lclNum = lvaArg0Var; } impLoadVar(lclNum, offset); } } // Load a local on the operand stack // Shared by the various CEE_LDLOC opcodes // ilLclNum is the local index as specified in IL. // It will be mapped to the correct lvaTable index void Compiler::impLoadLoc(unsigned ilLclNum, IL_OFFSET offset) { if (compIsForInlining()) { if (ilLclNum >= info.compMethodInfo->locals.numArgs) { compInlineResult->NoteFatal(InlineObservation::CALLEE_BAD_LOCAL_NUMBER); return; } // Get the local type var_types lclTyp = impInlineInfo->lclVarInfo[ilLclNum + impInlineInfo->argCnt].lclTypeInfo; typeInfo tiRetVal = impInlineInfo->lclVarInfo[ilLclNum + impInlineInfo->argCnt].lclVerTypeInfo; /* Have we allocated a temp for this local? */ unsigned lclNum = impInlineFetchLocal(ilLclNum DEBUGARG("Inline ldloc first use temp")); // All vars of inlined methods should be !lvNormalizeOnLoad() assert(!lvaTable[lclNum].lvNormalizeOnLoad()); lclTyp = genActualType(lclTyp); impPushVar(gtNewLclvNode(lclNum, lclTyp), tiRetVal); } else { if (ilLclNum >= info.compMethodInfo->locals.numArgs) { BADCODE("Bad IL"); } unsigned lclNum = info.compArgsCount + ilLclNum; impLoadVar(lclNum, offset); } } #ifdef TARGET_ARM /************************************************************************************** * * When assigning a vararg call src to a HFA lcl dest, mark that we cannot promote the * dst struct, because struct promotion will turn it into a float/double variable while * the rhs will be an int/long variable. We don't code generate assignment of int into * a float, but there is nothing that might prevent us from doing so. The tree however * would like: (=, (typ_float, typ_int)) or (GT_TRANSFER, (typ_float, typ_int)) * * tmpNum - the lcl dst variable num that is a struct. * src - the src tree assigned to the dest that is a struct/int (when varargs call.) * hClass - the type handle for the struct variable. * * TODO-ARM-CQ: [301608] This is a rare scenario with varargs and struct promotion coming into play, * however, we could do a codegen of transferring from int to float registers * (transfer, not a cast.) * */ void Compiler::impMarkLclDstNotPromotable(unsigned tmpNum, GenTree* src, CORINFO_CLASS_HANDLE hClass) { if (src->gtOper == GT_CALL && src->AsCall()->IsVarargs() && IsHfa(hClass)) { int hfaSlots = GetHfaCount(hClass); var_types hfaType = GetHfaType(hClass); // If we have varargs we morph the method's return type to be "int" irrespective of its original // type: struct/float at importer because the ABI calls out return in integer registers. // We don't want struct promotion to replace an expression like this: // lclFld_int = callvar_int() into lclFld_float = callvar_int(); // This means an int is getting assigned to a float without a cast. Prevent the promotion. if ((hfaType == TYP_DOUBLE && hfaSlots == sizeof(double) / REGSIZE_BYTES) || (hfaType == TYP_FLOAT && hfaSlots == sizeof(float) / REGSIZE_BYTES)) { // Make sure this struct type stays as struct so we can receive the call in a struct. lvaTable[tmpNum].lvIsMultiRegRet = true; } } } #endif // TARGET_ARM #if FEATURE_MULTIREG_RET //------------------------------------------------------------------------ // impAssignMultiRegTypeToVar: ensure calls that return structs in multiple // registers return values to suitable temps. // // Arguments: // op -- call returning a struct in registers // hClass -- class handle for struct // // Returns: // Tree with reference to struct local to use as call return value. GenTree* Compiler::impAssignMultiRegTypeToVar(GenTree* op, CORINFO_CLASS_HANDLE hClass DEBUGARG(CorInfoCallConvExtension callConv)) { unsigned tmpNum = lvaGrabTemp(true DEBUGARG("Return value temp for multireg return")); impAssignTempGen(tmpNum, op, hClass, (unsigned)CHECK_SPILL_ALL); GenTree* ret = gtNewLclvNode(tmpNum, lvaTable[tmpNum].lvType); // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns. ret->gtFlags |= GTF_DONT_CSE; assert(IsMultiRegReturnedType(hClass, callConv)); // Mark the var so that fields are not promoted and stay together. lvaTable[tmpNum].lvIsMultiRegRet = true; return ret; } #endif // FEATURE_MULTIREG_RET //------------------------------------------------------------------------ // impReturnInstruction: import a return or an explicit tail call // // Arguments: // prefixFlags -- active IL prefixes // opcode -- [in, out] IL opcode // // Returns: // True if import was successful (may fail for some inlinees) // bool Compiler::impReturnInstruction(int prefixFlags, OPCODE& opcode) { const bool isTailCall = (prefixFlags & PREFIX_TAILCALL) != 0; #ifdef DEBUG // If we are importing an inlinee and have GC ref locals we always // need to have a spill temp for the return value. This temp // should have been set up in advance, over in fgFindBasicBlocks. if (compIsForInlining() && impInlineInfo->HasGcRefLocals() && (info.compRetType != TYP_VOID)) { assert(lvaInlineeReturnSpillTemp != BAD_VAR_NUM); } #endif // DEBUG GenTree* op2 = nullptr; GenTree* op1 = nullptr; CORINFO_CLASS_HANDLE retClsHnd = nullptr; if (info.compRetType != TYP_VOID) { StackEntry se = impPopStack(); retClsHnd = se.seTypeInfo.GetClassHandle(); op2 = se.val; if (!compIsForInlining()) { impBashVarAddrsToI(op2); op2 = impImplicitIorI4Cast(op2, info.compRetType); op2 = impImplicitR4orR8Cast(op2, info.compRetType); // Note that we allow TYP_I_IMPL<->TYP_BYREF transformation, but only TYP_I_IMPL<-TYP_REF. assertImp((genActualType(op2->TypeGet()) == genActualType(info.compRetType)) || ((op2->TypeGet() == TYP_I_IMPL) && TypeIs(info.compRetType, TYP_BYREF)) || (op2->TypeIs(TYP_BYREF, TYP_REF) && (info.compRetType == TYP_I_IMPL)) || (varTypeIsFloating(op2->gtType) && varTypeIsFloating(info.compRetType)) || (varTypeIsStruct(op2) && varTypeIsStruct(info.compRetType))); #ifdef DEBUG if (!isTailCall && opts.compGcChecks && (info.compRetType == TYP_REF)) { // DDB 3483 : JIT Stress: early termination of GC ref's life time in exception code path // VSW 440513: Incorrect gcinfo on the return value under COMPlus_JitGCChecks=1 for methods with // one-return BB. assert(op2->gtType == TYP_REF); // confirm that the argument is a GC pointer (for debugging (GC stress)) GenTreeCall::Use* args = gtNewCallArgs(op2); op2 = gtNewHelperCallNode(CORINFO_HELP_CHECK_OBJ, TYP_REF, args); if (verbose) { printf("\ncompGcChecks tree:\n"); gtDispTree(op2); } } #endif } else { if (verCurrentState.esStackDepth != 0) { assert(compIsForInlining()); JITDUMP("CALLSITE_COMPILATION_ERROR: inlinee's stack is not empty."); compInlineResult->NoteFatal(InlineObservation::CALLSITE_COMPILATION_ERROR); return false; } #ifdef DEBUG if (verbose) { printf("\n\n Inlinee Return expression (before normalization) =>\n"); gtDispTree(op2); } #endif // Make sure the type matches the original call. var_types returnType = genActualType(op2->gtType); var_types originalCallType = impInlineInfo->inlineCandidateInfo->fncRetType; if ((returnType != originalCallType) && (originalCallType == TYP_STRUCT)) { originalCallType = impNormStructType(impInlineInfo->inlineCandidateInfo->methInfo.args.retTypeClass); } if (returnType != originalCallType) { // Allow TYP_BYREF to be returned as TYP_I_IMPL and vice versa. // Allow TYP_REF to be returned as TYP_I_IMPL and NOT vice verse. if ((TypeIs(returnType, TYP_BYREF, TYP_REF) && (originalCallType == TYP_I_IMPL)) || ((returnType == TYP_I_IMPL) && TypeIs(originalCallType, TYP_BYREF))) { JITDUMP("Allowing return type mismatch: have %s, needed %s\n", varTypeName(returnType), varTypeName(originalCallType)); } else { JITDUMP("Return type mismatch: have %s, needed %s\n", varTypeName(returnType), varTypeName(originalCallType)); compInlineResult->NoteFatal(InlineObservation::CALLSITE_RETURN_TYPE_MISMATCH); return false; } } // Below, we are going to set impInlineInfo->retExpr to the tree with the return // expression. At this point, retExpr could already be set if there are multiple // return blocks (meaning fgNeedReturnSpillTemp() == true) and one of // the other blocks already set it. If there is only a single return block, // retExpr shouldn't be set. However, this is not true if we reimport a block // with a return. In that case, retExpr will be set, then the block will be // reimported, but retExpr won't get cleared as part of setting the block to // be reimported. The reimported retExpr value should be the same, so even if // we don't unconditionally overwrite it, it shouldn't matter. if (info.compRetNativeType != TYP_STRUCT) { // compRetNativeType is not TYP_STRUCT. // This implies it could be either a scalar type or SIMD vector type or // a struct type that can be normalized to a scalar type. if (varTypeIsStruct(info.compRetType)) { noway_assert(info.compRetBuffArg == BAD_VAR_NUM); // adjust the type away from struct to integral // and no normalizing op2 = impFixupStructReturnType(op2, retClsHnd, info.compCallConv); } else { // Do we have to normalize? var_types fncRealRetType = JITtype2varType(info.compMethodInfo->args.retType); if ((varTypeIsSmall(op2->TypeGet()) || varTypeIsSmall(fncRealRetType)) && fgCastNeeded(op2, fncRealRetType)) { // Small-typed return values are normalized by the callee op2 = gtNewCastNode(TYP_INT, op2, false, fncRealRetType); } } if (fgNeedReturnSpillTemp()) { assert(info.compRetNativeType != TYP_VOID && (fgMoreThanOneReturnBlock() || impInlineInfo->HasGcRefLocals())); // If this method returns a ref type, track the actual types seen // in the returns. if (info.compRetType == TYP_REF) { bool isExact = false; bool isNonNull = false; CORINFO_CLASS_HANDLE returnClsHnd = gtGetClassHandle(op2, &isExact, &isNonNull); if (impInlineInfo->retExpr == nullptr) { // This is the first return, so best known type is the type // of this return value. impInlineInfo->retExprClassHnd = returnClsHnd; impInlineInfo->retExprClassHndIsExact = isExact; } else if (impInlineInfo->retExprClassHnd != returnClsHnd) { // This return site type differs from earlier seen sites, // so reset the info and we'll fall back to using the method's // declared return type for the return spill temp. impInlineInfo->retExprClassHnd = nullptr; impInlineInfo->retExprClassHndIsExact = false; } } impAssignTempGen(lvaInlineeReturnSpillTemp, op2, se.seTypeInfo.GetClassHandle(), (unsigned)CHECK_SPILL_ALL); var_types lclRetType = lvaGetDesc(lvaInlineeReturnSpillTemp)->lvType; GenTree* tmpOp2 = gtNewLclvNode(lvaInlineeReturnSpillTemp, lclRetType); op2 = tmpOp2; #ifdef DEBUG if (impInlineInfo->retExpr) { // Some other block(s) have seen the CEE_RET first. // Better they spilled to the same temp. assert(impInlineInfo->retExpr->gtOper == GT_LCL_VAR); assert(impInlineInfo->retExpr->AsLclVarCommon()->GetLclNum() == op2->AsLclVarCommon()->GetLclNum()); } #endif } #ifdef DEBUG if (verbose) { printf("\n\n Inlinee Return expression (after normalization) =>\n"); gtDispTree(op2); } #endif // Report the return expression impInlineInfo->retExpr = op2; } else { // compRetNativeType is TYP_STRUCT. // This implies that struct return via RetBuf arg or multi-reg struct return GenTreeCall* iciCall = impInlineInfo->iciCall->AsCall(); // Assign the inlinee return into a spill temp. // spill temp only exists if there are multiple return points if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM) { // in this case we have to insert multiple struct copies to the temp // and the retexpr is just the temp. assert(info.compRetNativeType != TYP_VOID); assert(fgMoreThanOneReturnBlock() || impInlineInfo->HasGcRefLocals()); impAssignTempGen(lvaInlineeReturnSpillTemp, op2, se.seTypeInfo.GetClassHandle(), (unsigned)CHECK_SPILL_ALL); } #if defined(TARGET_ARM) || defined(UNIX_AMD64_ABI) #if defined(TARGET_ARM) // TODO-ARM64-NYI: HFA // TODO-AMD64-Unix and TODO-ARM once the ARM64 functionality is implemented the // next ifdefs could be refactored in a single method with the ifdef inside. if (IsHfa(retClsHnd)) { // Same as !IsHfa but just don't bother with impAssignStructPtr. #else // defined(UNIX_AMD64_ABI) ReturnTypeDesc retTypeDesc; retTypeDesc.InitializeStructReturnType(this, retClsHnd, info.compCallConv); unsigned retRegCount = retTypeDesc.GetReturnRegCount(); if (retRegCount != 0) { // If single eightbyte, the return type would have been normalized and there won't be a temp var. // This code will be called only if the struct return has not been normalized (i.e. 2 eightbytes - // max allowed.) assert(retRegCount == MAX_RET_REG_COUNT); // Same as !structDesc.passedInRegisters but just don't bother with impAssignStructPtr. CLANG_FORMAT_COMMENT_ANCHOR; #endif // defined(UNIX_AMD64_ABI) if (fgNeedReturnSpillTemp()) { if (!impInlineInfo->retExpr) { #if defined(TARGET_ARM) impInlineInfo->retExpr = gtNewLclvNode(lvaInlineeReturnSpillTemp, info.compRetType); #else // defined(UNIX_AMD64_ABI) // The inlinee compiler has figured out the type of the temp already. Use it here. impInlineInfo->retExpr = gtNewLclvNode(lvaInlineeReturnSpillTemp, lvaTable[lvaInlineeReturnSpillTemp].lvType); #endif // defined(UNIX_AMD64_ABI) } } else { impInlineInfo->retExpr = op2; } } else #elif defined(TARGET_ARM64) ReturnTypeDesc retTypeDesc; retTypeDesc.InitializeStructReturnType(this, retClsHnd, info.compCallConv); unsigned retRegCount = retTypeDesc.GetReturnRegCount(); if (retRegCount != 0) { assert(!iciCall->HasRetBufArg()); assert(retRegCount >= 2); if (fgNeedReturnSpillTemp()) { if (!impInlineInfo->retExpr) { // The inlinee compiler has figured out the type of the temp already. Use it here. impInlineInfo->retExpr = gtNewLclvNode(lvaInlineeReturnSpillTemp, lvaTable[lvaInlineeReturnSpillTemp].lvType); } } else { impInlineInfo->retExpr = op2; } } else #elif defined(TARGET_X86) ReturnTypeDesc retTypeDesc; retTypeDesc.InitializeStructReturnType(this, retClsHnd, info.compCallConv); unsigned retRegCount = retTypeDesc.GetReturnRegCount(); if (retRegCount != 0) { assert(!iciCall->HasRetBufArg()); assert(retRegCount == MAX_RET_REG_COUNT); if (fgNeedReturnSpillTemp()) { if (!impInlineInfo->retExpr) { // The inlinee compiler has figured out the type of the temp already. Use it here. impInlineInfo->retExpr = gtNewLclvNode(lvaInlineeReturnSpillTemp, lvaTable[lvaInlineeReturnSpillTemp].lvType); } } else { impInlineInfo->retExpr = op2; } } else #endif // defined(TARGET_ARM64) { assert(iciCall->HasRetBufArg()); GenTree* dest = gtCloneExpr(iciCall->gtCallArgs->GetNode()); // spill temp only exists if there are multiple return points if (fgNeedReturnSpillTemp()) { // if this is the first return we have seen set the retExpr if (!impInlineInfo->retExpr) { impInlineInfo->retExpr = impAssignStructPtr(dest, gtNewLclvNode(lvaInlineeReturnSpillTemp, info.compRetType), retClsHnd, (unsigned)CHECK_SPILL_ALL); } } else { impInlineInfo->retExpr = impAssignStructPtr(dest, op2, retClsHnd, (unsigned)CHECK_SPILL_ALL); } } } if (impInlineInfo->retExpr != nullptr) { impInlineInfo->retBB = compCurBB; } } } if (compIsForInlining()) { return true; } if (info.compRetType == TYP_VOID) { // return void op1 = new (this, GT_RETURN) GenTreeOp(GT_RETURN, TYP_VOID); } else if (info.compRetBuffArg != BAD_VAR_NUM) { // Assign value to return buff (first param) GenTree* retBuffAddr = gtNewLclvNode(info.compRetBuffArg, TYP_BYREF DEBUGARG(impCurStmtDI.GetLocation().GetOffset())); op2 = impAssignStructPtr(retBuffAddr, op2, retClsHnd, (unsigned)CHECK_SPILL_ALL); impAppendTree(op2, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); // There are cases where the address of the implicit RetBuf should be returned explicitly (in RAX). CLANG_FORMAT_COMMENT_ANCHOR; #if defined(TARGET_AMD64) // x64 (System V and Win64) calling convention requires to // return the implicit return buffer explicitly (in RAX). // Change the return type to be BYREF. op1 = gtNewOperNode(GT_RETURN, TYP_BYREF, gtNewLclvNode(info.compRetBuffArg, TYP_BYREF)); #else // !defined(TARGET_AMD64) // In case of non-AMD64 targets the profiler hook requires to return the implicit RetBuf explicitly (in RAX). // In such case the return value of the function is changed to BYREF. // If profiler hook is not needed the return type of the function is TYP_VOID. if (compIsProfilerHookNeeded()) { op1 = gtNewOperNode(GT_RETURN, TYP_BYREF, gtNewLclvNode(info.compRetBuffArg, TYP_BYREF)); } #if defined(TARGET_ARM64) // On ARM64, the native instance calling convention variant // requires the implicit ByRef to be explicitly returned. else if (TargetOS::IsWindows && callConvIsInstanceMethodCallConv(info.compCallConv)) { op1 = gtNewOperNode(GT_RETURN, TYP_BYREF, gtNewLclvNode(info.compRetBuffArg, TYP_BYREF)); } #endif #if defined(TARGET_X86) else if (info.compCallConv != CorInfoCallConvExtension::Managed) { op1 = gtNewOperNode(GT_RETURN, TYP_BYREF, gtNewLclvNode(info.compRetBuffArg, TYP_BYREF)); } #endif else { // return void op1 = new (this, GT_RETURN) GenTreeOp(GT_RETURN, TYP_VOID); } #endif // !defined(TARGET_AMD64) } else if (varTypeIsStruct(info.compRetType)) { #if !FEATURE_MULTIREG_RET // For both ARM architectures the HFA native types are maintained as structs. // Also on System V AMD64 the multireg structs returns are also left as structs. noway_assert(info.compRetNativeType != TYP_STRUCT); #endif op2 = impFixupStructReturnType(op2, retClsHnd, info.compCallConv); // return op2 var_types returnType = info.compRetType; op1 = gtNewOperNode(GT_RETURN, genActualType(returnType), op2); } else { // return op2 op1 = gtNewOperNode(GT_RETURN, genActualType(info.compRetType), op2); } // We must have imported a tailcall and jumped to RET if (isTailCall) { assert(verCurrentState.esStackDepth == 0 && impOpcodeIsCallOpcode(opcode)); opcode = CEE_RET; // To prevent trying to spill if CALL_SITE_BOUNDARIES // impImportCall() would have already appended TYP_VOID calls if (info.compRetType == TYP_VOID) { return true; } } impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); #ifdef DEBUG // Remember at which BC offset the tree was finished impNoteLastILoffs(); #endif return true; } /***************************************************************************** * Mark the block as unimported. * Note that the caller is responsible for calling impImportBlockPending(), * with the appropriate stack-state */ inline void Compiler::impReimportMarkBlock(BasicBlock* block) { #ifdef DEBUG if (verbose && (block->bbFlags & BBF_IMPORTED)) { printf("\n" FMT_BB " will be reimported\n", block->bbNum); } #endif block->bbFlags &= ~BBF_IMPORTED; } /***************************************************************************** * Mark the successors of the given block as unimported. * Note that the caller is responsible for calling impImportBlockPending() * for all the successors, with the appropriate stack-state. */ void Compiler::impReimportMarkSuccessors(BasicBlock* block) { for (BasicBlock* const succBlock : block->Succs()) { impReimportMarkBlock(succBlock); } } /***************************************************************************** * * Filter wrapper to handle only passed in exception code * from it). */ LONG FilterVerificationExceptions(PEXCEPTION_POINTERS pExceptionPointers, LPVOID lpvParam) { if (pExceptionPointers->ExceptionRecord->ExceptionCode == SEH_VERIFICATION_EXCEPTION) { return EXCEPTION_EXECUTE_HANDLER; } return EXCEPTION_CONTINUE_SEARCH; } void Compiler::impVerifyEHBlock(BasicBlock* block, bool isTryStart) { assert(block->hasTryIndex()); assert(!compIsForInlining()); unsigned tryIndex = block->getTryIndex(); EHblkDsc* HBtab = ehGetDsc(tryIndex); if (isTryStart) { assert(block->bbFlags & BBF_TRY_BEG); // The Stack must be empty // if (block->bbStkDepth != 0) { BADCODE("Evaluation stack must be empty on entry into a try block"); } } // Save the stack contents, we'll need to restore it later // SavedStack blockState; impSaveStackState(&blockState, false); while (HBtab != nullptr) { if (isTryStart) { // Are we verifying that an instance constructor properly initializes it's 'this' pointer once? // We do not allow the 'this' pointer to be uninitialized when entering most kinds try regions // if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init)) { // We trigger an invalid program exception here unless we have a try/fault region. // if (HBtab->HasCatchHandler() || HBtab->HasFinallyHandler() || HBtab->HasFilter()) { BADCODE( "The 'this' pointer of an instance constructor is not intialized upon entry to a try region"); } else { // Allow a try/fault region to proceed. assert(HBtab->HasFaultHandler()); } } } // Recursively process the handler block, if we haven't already done so. BasicBlock* hndBegBB = HBtab->ebdHndBeg; if (((hndBegBB->bbFlags & BBF_IMPORTED) == 0) && (impGetPendingBlockMember(hndBegBB) == 0)) { // Construct the proper verification stack state // either empty or one that contains just // the Exception Object that we are dealing with // verCurrentState.esStackDepth = 0; if (handlerGetsXcptnObj(hndBegBB->bbCatchTyp)) { CORINFO_CLASS_HANDLE clsHnd; if (HBtab->HasFilter()) { clsHnd = impGetObjectClass(); } else { CORINFO_RESOLVED_TOKEN resolvedToken; resolvedToken.tokenContext = impTokenLookupContextHandle; resolvedToken.tokenScope = info.compScopeHnd; resolvedToken.token = HBtab->ebdTyp; resolvedToken.tokenType = CORINFO_TOKENKIND_Class; info.compCompHnd->resolveToken(&resolvedToken); clsHnd = resolvedToken.hClass; } // push catch arg the stack, spill to a temp if necessary // Note: can update HBtab->ebdHndBeg! hndBegBB = impPushCatchArgOnStack(hndBegBB, clsHnd, false); } // Queue up the handler for importing // impImportBlockPending(hndBegBB); } // Process the filter block, if we haven't already done so. if (HBtab->HasFilter()) { /* @VERIFICATION : Ideally the end of filter state should get propagated to the catch handler, this is an incompleteness, but is not a security/compliance issue, since the only interesting state is the 'thisInit' state. */ BasicBlock* filterBB = HBtab->ebdFilter; if (((filterBB->bbFlags & BBF_IMPORTED) == 0) && (impGetPendingBlockMember(filterBB) == 0)) { verCurrentState.esStackDepth = 0; // push catch arg the stack, spill to a temp if necessary // Note: can update HBtab->ebdFilter! const bool isSingleBlockFilter = (filterBB->bbNext == hndBegBB); filterBB = impPushCatchArgOnStack(filterBB, impGetObjectClass(), isSingleBlockFilter); impImportBlockPending(filterBB); } } // This seems redundant ....?? if (verTrackObjCtorInitState && HBtab->HasFaultHandler()) { /* Recursively process the handler block */ verCurrentState.esStackDepth = 0; // Queue up the fault handler for importing // impImportBlockPending(HBtab->ebdHndBeg); } // Now process our enclosing try index (if any) // tryIndex = HBtab->ebdEnclosingTryIndex; if (tryIndex == EHblkDsc::NO_ENCLOSING_INDEX) { HBtab = nullptr; } else { HBtab = ehGetDsc(tryIndex); } } // Restore the stack contents impRestoreStackState(&blockState); } //*************************************************************** // Import the instructions for the given basic block. Perform // verification, throwing an exception on failure. Push any successor blocks that are enabled for the first // time, or whose verification pre-state is changed. #ifdef _PREFAST_ #pragma warning(push) #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function #endif void Compiler::impImportBlock(BasicBlock* block) { // BBF_INTERNAL blocks only exist during importation due to EH canonicalization. We need to // handle them specially. In particular, there is no IL to import for them, but we do need // to mark them as imported and put their successors on the pending import list. if (block->bbFlags & BBF_INTERNAL) { JITDUMP("Marking BBF_INTERNAL block " FMT_BB " as BBF_IMPORTED\n", block->bbNum); block->bbFlags |= BBF_IMPORTED; for (BasicBlock* const succBlock : block->Succs()) { impImportBlockPending(succBlock); } return; } bool markImport; assert(block); /* Make the block globaly available */ compCurBB = block; #ifdef DEBUG /* Initialize the debug variables */ impCurOpcName = "unknown"; impCurOpcOffs = block->bbCodeOffs; #endif /* Set the current stack state to the merged result */ verResetCurrentState(block, &verCurrentState); /* Now walk the code and import the IL into GenTrees */ struct FilterVerificationExceptionsParam { Compiler* pThis; BasicBlock* block; }; FilterVerificationExceptionsParam param; param.pThis = this; param.block = block; PAL_TRY(FilterVerificationExceptionsParam*, pParam, &param) { /* @VERIFICATION : For now, the only state propagation from try to it's handler is "thisInit" state (stack is empty at start of try). In general, for state that we track in verification, we need to model the possibility that an exception might happen at any IL instruction, so we really need to merge all states that obtain between IL instructions in a try block into the start states of all handlers. However we do not allow the 'this' pointer to be uninitialized when entering most kinds try regions (only try/fault are allowed to have an uninitialized this pointer on entry to the try) Fortunately, the stack is thrown away when an exception leads to a handler, so we don't have to worry about that. We DO, however, have to worry about the "thisInit" state. But only for the try/fault case. The only allowed transition is from TIS_Uninit to TIS_Init. So for a try/fault region for the fault handler block we will merge the start state of the try begin and the post-state of each block that is part of this try region */ // merge the start state of the try begin // if (pParam->block->bbFlags & BBF_TRY_BEG) { pParam->pThis->impVerifyEHBlock(pParam->block, true); } pParam->pThis->impImportBlockCode(pParam->block); // As discussed above: // merge the post-state of each block that is part of this try region // if (pParam->block->hasTryIndex()) { pParam->pThis->impVerifyEHBlock(pParam->block, false); } } PAL_EXCEPT_FILTER(FilterVerificationExceptions) { verHandleVerificationFailure(block DEBUGARG(false)); } PAL_ENDTRY if (compDonotInline()) { return; } assert(!compDonotInline()); markImport = false; SPILLSTACK: unsigned baseTmp = NO_BASE_TMP; // input temps assigned to successor blocks bool reimportSpillClique = false; BasicBlock* tgtBlock = nullptr; /* If the stack is non-empty, we might have to spill its contents */ if (verCurrentState.esStackDepth != 0) { impBoxTemp = BAD_VAR_NUM; // if a box temp is used in a block that leaves something // on the stack, its lifetime is hard to determine, simply // don't reuse such temps. Statement* addStmt = nullptr; /* Do the successors of 'block' have any other predecessors ? We do not want to do some of the optimizations related to multiRef if we can reimport blocks */ unsigned multRef = impCanReimport ? unsigned(~0) : 0; switch (block->bbJumpKind) { case BBJ_COND: addStmt = impExtractLastStmt(); assert(addStmt->GetRootNode()->gtOper == GT_JTRUE); /* Note if the next block has more than one ancestor */ multRef |= block->bbNext->bbRefs; /* Does the next block have temps assigned? */ baseTmp = block->bbNext->bbStkTempsIn; tgtBlock = block->bbNext; if (baseTmp != NO_BASE_TMP) { break; } /* Try the target of the jump then */ multRef |= block->bbJumpDest->bbRefs; baseTmp = block->bbJumpDest->bbStkTempsIn; tgtBlock = block->bbJumpDest; break; case BBJ_ALWAYS: multRef |= block->bbJumpDest->bbRefs; baseTmp = block->bbJumpDest->bbStkTempsIn; tgtBlock = block->bbJumpDest; break; case BBJ_NONE: multRef |= block->bbNext->bbRefs; baseTmp = block->bbNext->bbStkTempsIn; tgtBlock = block->bbNext; break; case BBJ_SWITCH: addStmt = impExtractLastStmt(); assert(addStmt->GetRootNode()->gtOper == GT_SWITCH); for (BasicBlock* const tgtBlock : block->SwitchTargets()) { multRef |= tgtBlock->bbRefs; // Thanks to spill cliques, we should have assigned all or none assert((baseTmp == NO_BASE_TMP) || (baseTmp == tgtBlock->bbStkTempsIn)); baseTmp = tgtBlock->bbStkTempsIn; if (multRef > 1) { break; } } break; case BBJ_CALLFINALLY: case BBJ_EHCATCHRET: case BBJ_RETURN: case BBJ_EHFINALLYRET: case BBJ_EHFILTERRET: case BBJ_THROW: NO_WAY("can't have 'unreached' end of BB with non-empty stack"); break; default: noway_assert(!"Unexpected bbJumpKind"); break; } assert(multRef >= 1); /* Do we have a base temp number? */ bool newTemps = (baseTmp == NO_BASE_TMP); if (newTemps) { /* Grab enough temps for the whole stack */ baseTmp = impGetSpillTmpBase(block); } /* Spill all stack entries into temps */ unsigned level, tempNum; JITDUMP("\nSpilling stack entries into temps\n"); for (level = 0, tempNum = baseTmp; level < verCurrentState.esStackDepth; level++, tempNum++) { GenTree* tree = verCurrentState.esStack[level].val; /* VC generates code where it pushes a byref from one branch, and an int (ldc.i4 0) from the other. This should merge to a byref in unverifiable code. However, if the branch which leaves the TYP_I_IMPL on the stack is imported first, the successor would be imported assuming there was a TYP_I_IMPL on the stack. Thus the value would not get GC-tracked. Hence, change the temp to TYP_BYREF and reimport the successors. Note: We should only allow this in unverifiable code. */ if (tree->gtType == TYP_BYREF && lvaTable[tempNum].lvType == TYP_I_IMPL) { lvaTable[tempNum].lvType = TYP_BYREF; impReimportMarkSuccessors(block); markImport = true; } #ifdef TARGET_64BIT if (genActualType(tree->gtType) == TYP_I_IMPL && lvaTable[tempNum].lvType == TYP_INT) { // Some other block in the spill clique set this to "int", but now we have "native int". // Change the type and go back to re-import any blocks that used the wrong type. lvaTable[tempNum].lvType = TYP_I_IMPL; reimportSpillClique = true; } else if (genActualType(tree->gtType) == TYP_INT && lvaTable[tempNum].lvType == TYP_I_IMPL) { // Spill clique has decided this should be "native int", but this block only pushes an "int". // Insert a sign-extension to "native int" so we match the clique. verCurrentState.esStack[level].val = gtNewCastNode(TYP_I_IMPL, tree, false, TYP_I_IMPL); } // Consider the case where one branch left a 'byref' on the stack and the other leaves // an 'int'. On 32-bit, this is allowed (in non-verifiable code) since they are the same // size. JIT64 managed to make this work on 64-bit. For compatibility, we support JIT64 // behavior instead of asserting and then generating bad code (where we save/restore the // low 32 bits of a byref pointer to an 'int' sized local). If the 'int' side has been // imported already, we need to change the type of the local and reimport the spill clique. // If the 'byref' side has imported, we insert a cast from int to 'native int' to match // the 'byref' size. if (genActualType(tree->gtType) == TYP_BYREF && lvaTable[tempNum].lvType == TYP_INT) { // Some other block in the spill clique set this to "int", but now we have "byref". // Change the type and go back to re-import any blocks that used the wrong type. lvaTable[tempNum].lvType = TYP_BYREF; reimportSpillClique = true; } else if (genActualType(tree->gtType) == TYP_INT && lvaTable[tempNum].lvType == TYP_BYREF) { // Spill clique has decided this should be "byref", but this block only pushes an "int". // Insert a sign-extension to "native int" so we match the clique size. verCurrentState.esStack[level].val = gtNewCastNode(TYP_I_IMPL, tree, false, TYP_I_IMPL); } #endif // TARGET_64BIT if (tree->gtType == TYP_DOUBLE && lvaTable[tempNum].lvType == TYP_FLOAT) { // Some other block in the spill clique set this to "float", but now we have "double". // Change the type and go back to re-import any blocks that used the wrong type. lvaTable[tempNum].lvType = TYP_DOUBLE; reimportSpillClique = true; } else if (tree->gtType == TYP_FLOAT && lvaTable[tempNum].lvType == TYP_DOUBLE) { // Spill clique has decided this should be "double", but this block only pushes a "float". // Insert a cast to "double" so we match the clique. verCurrentState.esStack[level].val = gtNewCastNode(TYP_DOUBLE, tree, false, TYP_DOUBLE); } /* If addStmt has a reference to tempNum (can only happen if we are spilling to the temps already used by a previous block), we need to spill addStmt */ if (addStmt != nullptr && !newTemps && gtHasRef(addStmt->GetRootNode(), tempNum)) { GenTree* addTree = addStmt->GetRootNode(); if (addTree->gtOper == GT_JTRUE) { GenTree* relOp = addTree->AsOp()->gtOp1; assert(relOp->OperIsCompare()); var_types type = genActualType(relOp->AsOp()->gtOp1->TypeGet()); if (gtHasRef(relOp->AsOp()->gtOp1, tempNum)) { unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt JTRUE ref Op1")); impAssignTempGen(temp, relOp->AsOp()->gtOp1, level); type = genActualType(lvaTable[temp].TypeGet()); relOp->AsOp()->gtOp1 = gtNewLclvNode(temp, type); } if (gtHasRef(relOp->AsOp()->gtOp2, tempNum)) { unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt JTRUE ref Op2")); impAssignTempGen(temp, relOp->AsOp()->gtOp2, level); type = genActualType(lvaTable[temp].TypeGet()); relOp->AsOp()->gtOp2 = gtNewLclvNode(temp, type); } } else { assert(addTree->gtOper == GT_SWITCH && genActualTypeIsIntOrI(addTree->AsOp()->gtOp1->TypeGet())); unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt SWITCH")); impAssignTempGen(temp, addTree->AsOp()->gtOp1, level); addTree->AsOp()->gtOp1 = gtNewLclvNode(temp, genActualType(addTree->AsOp()->gtOp1->TypeGet())); } } /* Spill the stack entry, and replace with the temp */ if (!impSpillStackEntry(level, tempNum #ifdef DEBUG , true, "Spill Stack Entry" #endif )) { if (markImport) { BADCODE("bad stack state"); } // Oops. Something went wrong when spilling. Bad code. verHandleVerificationFailure(block DEBUGARG(true)); goto SPILLSTACK; } } /* Put back the 'jtrue'/'switch' if we removed it earlier */ if (addStmt != nullptr) { impAppendStmt(addStmt, (unsigned)CHECK_SPILL_NONE); } } // Some of the append/spill logic works on compCurBB assert(compCurBB == block); /* Save the tree list in the block */ impEndTreeList(block); // impEndTreeList sets BBF_IMPORTED on the block // We do *NOT* want to set it later than this because // impReimportSpillClique might clear it if this block is both a // predecessor and successor in the current spill clique assert(block->bbFlags & BBF_IMPORTED); // If we had a int/native int, or float/double collision, we need to re-import if (reimportSpillClique) { // This will re-import all the successors of block (as well as each of their predecessors) impReimportSpillClique(block); // For blocks that haven't been imported yet, we still need to mark them as pending import. for (BasicBlock* const succ : block->Succs()) { if ((succ->bbFlags & BBF_IMPORTED) == 0) { impImportBlockPending(succ); } } } else // the normal case { // otherwise just import the successors of block /* Does this block jump to any other blocks? */ for (BasicBlock* const succ : block->Succs()) { impImportBlockPending(succ); } } } #ifdef _PREFAST_ #pragma warning(pop) #endif /*****************************************************************************/ // // Ensures that "block" is a member of the list of BBs waiting to be imported, pushing it on the list if // necessary (and ensures that it is a member of the set of BB's on the list, by setting its byte in // impPendingBlockMembers). Merges the current verification state into the verification state of "block" // (its "pre-state"). void Compiler::impImportBlockPending(BasicBlock* block) { #ifdef DEBUG if (verbose) { printf("\nimpImportBlockPending for " FMT_BB "\n", block->bbNum); } #endif // We will add a block to the pending set if it has not already been imported (or needs to be re-imported), // or if it has, but merging in a predecessor's post-state changes the block's pre-state. // (When we're doing verification, we always attempt the merge to detect verification errors.) // If the block has not been imported, add to pending set. bool addToPending = ((block->bbFlags & BBF_IMPORTED) == 0); // Initialize bbEntryState just the first time we try to add this block to the pending list // Just because bbEntryState is NULL, doesn't mean the pre-state wasn't previously set // We use NULL to indicate the 'common' state to avoid memory allocation if ((block->bbEntryState == nullptr) && ((block->bbFlags & (BBF_IMPORTED | BBF_FAILED_VERIFICATION)) == 0) && (impGetPendingBlockMember(block) == 0)) { verInitBBEntryState(block, &verCurrentState); assert(block->bbStkDepth == 0); block->bbStkDepth = static_cast<unsigned short>(verCurrentState.esStackDepth); assert(addToPending); assert(impGetPendingBlockMember(block) == 0); } else { // The stack should have the same height on entry to the block from all its predecessors. if (block->bbStkDepth != verCurrentState.esStackDepth) { #ifdef DEBUG char buffer[400]; sprintf_s(buffer, sizeof(buffer), "Block at offset %4.4x to %4.4x in %0.200s entered with different stack depths.\n" "Previous depth was %d, current depth is %d", block->bbCodeOffs, block->bbCodeOffsEnd, info.compFullName, block->bbStkDepth, verCurrentState.esStackDepth); buffer[400 - 1] = 0; NO_WAY(buffer); #else NO_WAY("Block entered with different stack depths"); #endif } if (!addToPending) { return; } if (block->bbStkDepth > 0) { // We need to fix the types of any spill temps that might have changed: // int->native int, float->double, int->byref, etc. impRetypeEntryStateTemps(block); } // OK, we must add to the pending list, if it's not already in it. if (impGetPendingBlockMember(block) != 0) { return; } } // Get an entry to add to the pending list PendingDsc* dsc; if (impPendingFree) { // We can reuse one of the freed up dscs. dsc = impPendingFree; impPendingFree = dsc->pdNext; } else { // We have to create a new dsc dsc = new (this, CMK_Unknown) PendingDsc; } dsc->pdBB = block; dsc->pdSavedStack.ssDepth = verCurrentState.esStackDepth; dsc->pdThisPtrInit = verCurrentState.thisInitialized; // Save the stack trees for later if (verCurrentState.esStackDepth) { impSaveStackState(&dsc->pdSavedStack, false); } // Add the entry to the pending list dsc->pdNext = impPendingList; impPendingList = dsc; impSetPendingBlockMember(block, 1); // And indicate that it's now a member of the set. // Various assertions require us to now to consider the block as not imported (at least for // the final time...) block->bbFlags &= ~BBF_IMPORTED; #ifdef DEBUG if (verbose && 0) { printf("Added PendingDsc - %08p for " FMT_BB "\n", dspPtr(dsc), block->bbNum); } #endif } /*****************************************************************************/ // // Ensures that "block" is a member of the list of BBs waiting to be imported, pushing it on the list if // necessary (and ensures that it is a member of the set of BB's on the list, by setting its byte in // impPendingBlockMembers). Does *NOT* change the existing "pre-state" of the block. void Compiler::impReimportBlockPending(BasicBlock* block) { JITDUMP("\nimpReimportBlockPending for " FMT_BB, block->bbNum); assert(block->bbFlags & BBF_IMPORTED); // OK, we must add to the pending list, if it's not already in it. if (impGetPendingBlockMember(block) != 0) { return; } // Get an entry to add to the pending list PendingDsc* dsc; if (impPendingFree) { // We can reuse one of the freed up dscs. dsc = impPendingFree; impPendingFree = dsc->pdNext; } else { // We have to create a new dsc dsc = new (this, CMK_ImpStack) PendingDsc; } dsc->pdBB = block; if (block->bbEntryState) { dsc->pdThisPtrInit = block->bbEntryState->thisInitialized; dsc->pdSavedStack.ssDepth = block->bbEntryState->esStackDepth; dsc->pdSavedStack.ssTrees = block->bbEntryState->esStack; } else { dsc->pdThisPtrInit = TIS_Bottom; dsc->pdSavedStack.ssDepth = 0; dsc->pdSavedStack.ssTrees = nullptr; } // Add the entry to the pending list dsc->pdNext = impPendingList; impPendingList = dsc; impSetPendingBlockMember(block, 1); // And indicate that it's now a member of the set. // Various assertions require us to now to consider the block as not imported (at least for // the final time...) block->bbFlags &= ~BBF_IMPORTED; #ifdef DEBUG if (verbose && 0) { printf("Added PendingDsc - %08p for " FMT_BB "\n", dspPtr(dsc), block->bbNum); } #endif } void* Compiler::BlockListNode::operator new(size_t sz, Compiler* comp) { if (comp->impBlockListNodeFreeList == nullptr) { return comp->getAllocator(CMK_BasicBlock).allocate<BlockListNode>(1); } else { BlockListNode* res = comp->impBlockListNodeFreeList; comp->impBlockListNodeFreeList = res->m_next; return res; } } void Compiler::FreeBlockListNode(Compiler::BlockListNode* node) { node->m_next = impBlockListNodeFreeList; impBlockListNodeFreeList = node; } void Compiler::impWalkSpillCliqueFromPred(BasicBlock* block, SpillCliqueWalker* callback) { bool toDo = true; noway_assert(!fgComputePredsDone); if (!fgCheapPredsValid) { fgComputeCheapPreds(); } BlockListNode* succCliqueToDo = nullptr; BlockListNode* predCliqueToDo = new (this) BlockListNode(block); while (toDo) { toDo = false; // Look at the successors of every member of the predecessor to-do list. while (predCliqueToDo != nullptr) { BlockListNode* node = predCliqueToDo; predCliqueToDo = node->m_next; BasicBlock* blk = node->m_blk; FreeBlockListNode(node); for (BasicBlock* const succ : blk->Succs()) { // If it's not already in the clique, add it, and also add it // as a member of the successor "toDo" set. if (impSpillCliqueGetMember(SpillCliqueSucc, succ) == 0) { callback->Visit(SpillCliqueSucc, succ); impSpillCliqueSetMember(SpillCliqueSucc, succ, 1); succCliqueToDo = new (this) BlockListNode(succ, succCliqueToDo); toDo = true; } } } // Look at the predecessors of every member of the successor to-do list. while (succCliqueToDo != nullptr) { BlockListNode* node = succCliqueToDo; succCliqueToDo = node->m_next; BasicBlock* blk = node->m_blk; FreeBlockListNode(node); for (BasicBlockList* pred = blk->bbCheapPreds; pred != nullptr; pred = pred->next) { BasicBlock* predBlock = pred->block; // If it's not already in the clique, add it, and also add it // as a member of the predecessor "toDo" set. if (impSpillCliqueGetMember(SpillCliquePred, predBlock) == 0) { callback->Visit(SpillCliquePred, predBlock); impSpillCliqueSetMember(SpillCliquePred, predBlock, 1); predCliqueToDo = new (this) BlockListNode(predBlock, predCliqueToDo); toDo = true; } } } } // If this fails, it means we didn't walk the spill clique properly and somehow managed // miss walking back to include the predecessor we started from. // This most likely cause: missing or out of date bbPreds assert(impSpillCliqueGetMember(SpillCliquePred, block) != 0); } void Compiler::SetSpillTempsBase::Visit(SpillCliqueDir predOrSucc, BasicBlock* blk) { if (predOrSucc == SpillCliqueSucc) { assert(blk->bbStkTempsIn == NO_BASE_TMP); // Should not already be a member of a clique as a successor. blk->bbStkTempsIn = m_baseTmp; } else { assert(predOrSucc == SpillCliquePred); assert(blk->bbStkTempsOut == NO_BASE_TMP); // Should not already be a member of a clique as a predecessor. blk->bbStkTempsOut = m_baseTmp; } } void Compiler::ReimportSpillClique::Visit(SpillCliqueDir predOrSucc, BasicBlock* blk) { // For Preds we could be a little smarter and just find the existing store // and re-type it/add a cast, but that is complicated and hopefully very rare, so // just re-import the whole block (just like we do for successors) if (((blk->bbFlags & BBF_IMPORTED) == 0) && (m_pComp->impGetPendingBlockMember(blk) == 0)) { // If we haven't imported this block and we're not going to (because it isn't on // the pending list) then just ignore it for now. // This block has either never been imported (EntryState == NULL) or it failed // verification. Neither state requires us to force it to be imported now. assert((blk->bbEntryState == nullptr) || (blk->bbFlags & BBF_FAILED_VERIFICATION)); return; } // For successors we have a valid verCurrentState, so just mark them for reimport // the 'normal' way // Unlike predecessors, we *DO* need to reimport the current block because the // initial import had the wrong entry state types. // Similarly, blocks that are currently on the pending list, still need to call // impImportBlockPending to fixup their entry state. if (predOrSucc == SpillCliqueSucc) { m_pComp->impReimportMarkBlock(blk); // Set the current stack state to that of the blk->bbEntryState m_pComp->verResetCurrentState(blk, &m_pComp->verCurrentState); assert(m_pComp->verCurrentState.thisInitialized == blk->bbThisOnEntry()); m_pComp->impImportBlockPending(blk); } else if ((blk != m_pComp->compCurBB) && ((blk->bbFlags & BBF_IMPORTED) != 0)) { // As described above, we are only visiting predecessors so they can // add the appropriate casts, since we have already done that for the current // block, it does not need to be reimported. // Nor do we need to reimport blocks that are still pending, but not yet // imported. // // For predecessors, we have no state to seed the EntryState, so we just have // to assume the existing one is correct. // If the block is also a successor, it will get the EntryState properly // updated when it is visited as a successor in the above "if" block. assert(predOrSucc == SpillCliquePred); m_pComp->impReimportBlockPending(blk); } } // Re-type the incoming lclVar nodes to match the varDsc. void Compiler::impRetypeEntryStateTemps(BasicBlock* blk) { if (blk->bbEntryState != nullptr) { EntryState* es = blk->bbEntryState; for (unsigned level = 0; level < es->esStackDepth; level++) { GenTree* tree = es->esStack[level].val; if ((tree->gtOper == GT_LCL_VAR) || (tree->gtOper == GT_LCL_FLD)) { es->esStack[level].val->gtType = lvaGetDesc(tree->AsLclVarCommon())->TypeGet(); } } } } unsigned Compiler::impGetSpillTmpBase(BasicBlock* block) { if (block->bbStkTempsOut != NO_BASE_TMP) { return block->bbStkTempsOut; } #ifdef DEBUG if (verbose) { printf("\n*************** In impGetSpillTmpBase(" FMT_BB ")\n", block->bbNum); } #endif // DEBUG // Otherwise, choose one, and propagate to all members of the spill clique. // Grab enough temps for the whole stack. unsigned baseTmp = lvaGrabTemps(verCurrentState.esStackDepth DEBUGARG("IL Stack Entries")); SetSpillTempsBase callback(baseTmp); // We do *NOT* need to reset the SpillClique*Members because a block can only be the predecessor // to one spill clique, and similarly can only be the successor to one spill clique impWalkSpillCliqueFromPred(block, &callback); return baseTmp; } void Compiler::impReimportSpillClique(BasicBlock* block) { #ifdef DEBUG if (verbose) { printf("\n*************** In impReimportSpillClique(" FMT_BB ")\n", block->bbNum); } #endif // DEBUG // If we get here, it is because this block is already part of a spill clique // and one predecessor had an outgoing live stack slot of type int, and this // block has an outgoing live stack slot of type native int. // We need to reset these before traversal because they have already been set // by the previous walk to determine all the members of the spill clique. impInlineRoot()->impSpillCliquePredMembers.Reset(); impInlineRoot()->impSpillCliqueSuccMembers.Reset(); ReimportSpillClique callback(this); impWalkSpillCliqueFromPred(block, &callback); } // Set the pre-state of "block" (which should not have a pre-state allocated) to // a copy of "srcState", cloning tree pointers as required. void Compiler::verInitBBEntryState(BasicBlock* block, EntryState* srcState) { if (srcState->esStackDepth == 0 && srcState->thisInitialized == TIS_Bottom) { block->bbEntryState = nullptr; return; } block->bbEntryState = getAllocator(CMK_Unknown).allocate<EntryState>(1); // block->bbEntryState.esRefcount = 1; block->bbEntryState->esStackDepth = srcState->esStackDepth; block->bbEntryState->thisInitialized = TIS_Bottom; if (srcState->esStackDepth > 0) { block->bbSetStack(new (this, CMK_Unknown) StackEntry[srcState->esStackDepth]); unsigned stackSize = srcState->esStackDepth * sizeof(StackEntry); memcpy(block->bbEntryState->esStack, srcState->esStack, stackSize); for (unsigned level = 0; level < srcState->esStackDepth; level++) { GenTree* tree = srcState->esStack[level].val; block->bbEntryState->esStack[level].val = gtCloneExpr(tree); } } if (verTrackObjCtorInitState) { verSetThisInit(block, srcState->thisInitialized); } return; } void Compiler::verSetThisInit(BasicBlock* block, ThisInitState tis) { assert(tis != TIS_Bottom); // Precondition. if (block->bbEntryState == nullptr) { block->bbEntryState = new (this, CMK_Unknown) EntryState(); } block->bbEntryState->thisInitialized = tis; } /* * Resets the current state to the state at the start of the basic block */ void Compiler::verResetCurrentState(BasicBlock* block, EntryState* destState) { if (block->bbEntryState == nullptr) { destState->esStackDepth = 0; destState->thisInitialized = TIS_Bottom; return; } destState->esStackDepth = block->bbEntryState->esStackDepth; if (destState->esStackDepth > 0) { unsigned stackSize = destState->esStackDepth * sizeof(StackEntry); memcpy(destState->esStack, block->bbStackOnEntry(), stackSize); } destState->thisInitialized = block->bbThisOnEntry(); return; } ThisInitState BasicBlock::bbThisOnEntry() const { return bbEntryState ? bbEntryState->thisInitialized : TIS_Bottom; } unsigned BasicBlock::bbStackDepthOnEntry() const { return (bbEntryState ? bbEntryState->esStackDepth : 0); } void BasicBlock::bbSetStack(void* stackBuffer) { assert(bbEntryState); assert(stackBuffer); bbEntryState->esStack = (StackEntry*)stackBuffer; } StackEntry* BasicBlock::bbStackOnEntry() const { assert(bbEntryState); return bbEntryState->esStack; } void Compiler::verInitCurrentState() { verTrackObjCtorInitState = false; verCurrentState.thisInitialized = TIS_Bottom; // initialize stack info verCurrentState.esStackDepth = 0; assert(verCurrentState.esStack != nullptr); // copy current state to entry state of first BB verInitBBEntryState(fgFirstBB, &verCurrentState); } Compiler* Compiler::impInlineRoot() { if (impInlineInfo == nullptr) { return this; } else { return impInlineInfo->InlineRoot; } } BYTE Compiler::impSpillCliqueGetMember(SpillCliqueDir predOrSucc, BasicBlock* blk) { if (predOrSucc == SpillCliquePred) { return impInlineRoot()->impSpillCliquePredMembers.Get(blk->bbInd()); } else { assert(predOrSucc == SpillCliqueSucc); return impInlineRoot()->impSpillCliqueSuccMembers.Get(blk->bbInd()); } } void Compiler::impSpillCliqueSetMember(SpillCliqueDir predOrSucc, BasicBlock* blk, BYTE val) { if (predOrSucc == SpillCliquePred) { impInlineRoot()->impSpillCliquePredMembers.Set(blk->bbInd(), val); } else { assert(predOrSucc == SpillCliqueSucc); impInlineRoot()->impSpillCliqueSuccMembers.Set(blk->bbInd(), val); } } /***************************************************************************** * * Convert the instrs ("import") into our internal format (trees). The * basic flowgraph has already been constructed and is passed in. */ void Compiler::impImport() { #ifdef DEBUG if (verbose) { printf("*************** In impImport() for %s\n", info.compFullName); } #endif Compiler* inlineRoot = impInlineRoot(); if (info.compMaxStack <= SMALL_STACK_SIZE) { impStkSize = SMALL_STACK_SIZE; } else { impStkSize = info.compMaxStack; } if (this == inlineRoot) { // Allocate the stack contents verCurrentState.esStack = new (this, CMK_ImpStack) StackEntry[impStkSize]; } else { // This is the inlinee compiler, steal the stack from the inliner compiler // (after ensuring that it is large enough). if (inlineRoot->impStkSize < impStkSize) { inlineRoot->impStkSize = impStkSize; inlineRoot->verCurrentState.esStack = new (this, CMK_ImpStack) StackEntry[impStkSize]; } verCurrentState.esStack = inlineRoot->verCurrentState.esStack; } // initialize the entry state at start of method verInitCurrentState(); // Initialize stuff related to figuring "spill cliques" (see spec comment for impGetSpillTmpBase). if (this == inlineRoot) // These are only used on the root of the inlining tree. { // We have initialized these previously, but to size 0. Make them larger. impPendingBlockMembers.Init(getAllocator(), fgBBNumMax * 2); impSpillCliquePredMembers.Init(getAllocator(), fgBBNumMax * 2); impSpillCliqueSuccMembers.Init(getAllocator(), fgBBNumMax * 2); } inlineRoot->impPendingBlockMembers.Reset(fgBBNumMax * 2); inlineRoot->impSpillCliquePredMembers.Reset(fgBBNumMax * 2); inlineRoot->impSpillCliqueSuccMembers.Reset(fgBBNumMax * 2); impBlockListNodeFreeList = nullptr; #ifdef DEBUG impLastILoffsStmt = nullptr; impNestedStackSpill = false; #endif impBoxTemp = BAD_VAR_NUM; impPendingList = impPendingFree = nullptr; // Skip leading internal blocks. // These can arise from needing a leading scratch BB, from EH normalization, and from OSR entry redirects. // BasicBlock* entryBlock = fgFirstBB; while (entryBlock->bbFlags & BBF_INTERNAL) { JITDUMP("Marking leading BBF_INTERNAL block " FMT_BB " as BBF_IMPORTED\n", entryBlock->bbNum); entryBlock->bbFlags |= BBF_IMPORTED; if (entryBlock->bbJumpKind == BBJ_NONE) { entryBlock = entryBlock->bbNext; } else if (opts.IsOSR() && (entryBlock->bbJumpKind == BBJ_ALWAYS)) { entryBlock = entryBlock->bbJumpDest; } else { assert(!"unexpected bbJumpKind in entry sequence"); } } // Note for OSR we'd like to be able to verify this block must be // stack empty, but won't know that until we've imported...so instead // we'll BADCODE out if we mess up. // // (the concern here is that the runtime asks us to OSR a // different IL version than the one that matched the method that // triggered OSR). This should not happen but I might have the // IL versioning stuff wrong. // // TODO: we also currently expect this block to be a join point, // which we should verify over when we find jump targets. impImportBlockPending(entryBlock); /* Import blocks in the worker-list until there are no more */ while (impPendingList) { /* Remove the entry at the front of the list */ PendingDsc* dsc = impPendingList; impPendingList = impPendingList->pdNext; impSetPendingBlockMember(dsc->pdBB, 0); /* Restore the stack state */ verCurrentState.thisInitialized = dsc->pdThisPtrInit; verCurrentState.esStackDepth = dsc->pdSavedStack.ssDepth; if (verCurrentState.esStackDepth) { impRestoreStackState(&dsc->pdSavedStack); } /* Add the entry to the free list for reuse */ dsc->pdNext = impPendingFree; impPendingFree = dsc; /* Now import the block */ if (dsc->pdBB->bbFlags & BBF_FAILED_VERIFICATION) { verConvertBBToThrowVerificationException(dsc->pdBB DEBUGARG(true)); impEndTreeList(dsc->pdBB); } else { impImportBlock(dsc->pdBB); if (compDonotInline()) { return; } if (compIsForImportOnly()) { return; } } } #ifdef DEBUG if (verbose && info.compXcptnsCount) { printf("\nAfter impImport() added block for try,catch,finally"); fgDispBasicBlocks(); printf("\n"); } // Used in impImportBlockPending() for STRESS_CHK_REIMPORT for (BasicBlock* const block : Blocks()) { block->bbFlags &= ~BBF_VISITED; } #endif } // Checks if a typeinfo (usually stored in the type stack) is a struct. // The invariant here is that if it's not a ref or a method and has a class handle // it's a valuetype bool Compiler::impIsValueType(typeInfo* pTypeInfo) { if (pTypeInfo && pTypeInfo->IsValueClassWithClsHnd()) { return true; } else { return false; } } /***************************************************************************** * Check to see if the tree is the address of a local or the address of a field in a local. *lclVarTreeOut will contain the GT_LCL_VAR tree when it returns true. */ bool Compiler::impIsAddressInLocal(const GenTree* tree, GenTree** lclVarTreeOut) { if (tree->gtOper != GT_ADDR) { return false; } GenTree* op = tree->AsOp()->gtOp1; while (op->gtOper == GT_FIELD) { op = op->AsField()->GetFldObj(); if (op && op->gtOper == GT_ADDR) // Skip static fields where op will be NULL. { op = op->AsOp()->gtOp1; } else { return false; } } if (op->gtOper == GT_LCL_VAR) { if (lclVarTreeOut != nullptr) { *lclVarTreeOut = op; } return true; } else { return false; } } //------------------------------------------------------------------------ // impMakeDiscretionaryInlineObservations: make observations that help // determine the profitability of a discretionary inline // // Arguments: // pInlineInfo -- InlineInfo for the inline, or null for the prejit root // inlineResult -- InlineResult accumulating information about this inline // // Notes: // If inlining or prejitting the root, this method also makes // various observations about the method that factor into inline // decisions. It sets `compNativeSizeEstimate` as a side effect. void Compiler::impMakeDiscretionaryInlineObservations(InlineInfo* pInlineInfo, InlineResult* inlineResult) { assert((pInlineInfo != nullptr && compIsForInlining()) || // Perform the actual inlining. (pInlineInfo == nullptr && !compIsForInlining()) // Calculate the static inlining hint for ngen. ); // If we're really inlining, we should just have one result in play. assert((pInlineInfo == nullptr) || (inlineResult == pInlineInfo->inlineResult)); // If this is a "forceinline" method, the JIT probably shouldn't have gone // to the trouble of estimating the native code size. Even if it did, it // shouldn't be relying on the result of this method. assert(inlineResult->GetObservation() == InlineObservation::CALLEE_IS_DISCRETIONARY_INLINE); // Note if the caller contains NEWOBJ or NEWARR. Compiler* rootCompiler = impInlineRoot(); if ((rootCompiler->optMethodFlags & OMF_HAS_NEWARRAY) != 0) { inlineResult->Note(InlineObservation::CALLER_HAS_NEWARRAY); } if ((rootCompiler->optMethodFlags & OMF_HAS_NEWOBJ) != 0) { inlineResult->Note(InlineObservation::CALLER_HAS_NEWOBJ); } bool calleeIsStatic = (info.compFlags & CORINFO_FLG_STATIC) != 0; bool isSpecialMethod = (info.compFlags & CORINFO_FLG_CONSTRUCTOR) != 0; if (isSpecialMethod) { if (calleeIsStatic) { inlineResult->Note(InlineObservation::CALLEE_IS_CLASS_CTOR); } else { inlineResult->Note(InlineObservation::CALLEE_IS_INSTANCE_CTOR); } } else if (!calleeIsStatic) { // Callee is an instance method. // // Check if the callee has the same 'this' as the root. if (pInlineInfo != nullptr) { GenTree* thisArg = pInlineInfo->iciCall->AsCall()->gtCallThisArg->GetNode(); assert(thisArg); bool isSameThis = impIsThis(thisArg); inlineResult->NoteBool(InlineObservation::CALLSITE_IS_SAME_THIS, isSameThis); } } bool callsiteIsGeneric = (rootCompiler->info.compMethodInfo->args.sigInst.methInstCount != 0) || (rootCompiler->info.compMethodInfo->args.sigInst.classInstCount != 0); bool calleeIsGeneric = (info.compMethodInfo->args.sigInst.methInstCount != 0) || (info.compMethodInfo->args.sigInst.classInstCount != 0); if (!callsiteIsGeneric && calleeIsGeneric) { inlineResult->Note(InlineObservation::CALLSITE_NONGENERIC_CALLS_GENERIC); } // Inspect callee's arguments (and the actual values at the callsite for them) CORINFO_SIG_INFO sig = info.compMethodInfo->args; CORINFO_ARG_LIST_HANDLE sigArg = sig.args; GenTreeCall::Use* argUse = pInlineInfo == nullptr ? nullptr : pInlineInfo->iciCall->AsCall()->gtCallArgs; for (unsigned i = 0; i < info.compMethodInfo->args.numArgs; i++) { CORINFO_CLASS_HANDLE sigClass; CorInfoType corType = strip(info.compCompHnd->getArgType(&sig, sigArg, &sigClass)); GenTree* argNode = argUse == nullptr ? nullptr : argUse->GetNode()->gtSkipPutArgType(); if (corType == CORINFO_TYPE_CLASS) { sigClass = info.compCompHnd->getArgClass(&sig, sigArg); } else if (corType == CORINFO_TYPE_VALUECLASS) { inlineResult->Note(InlineObservation::CALLEE_ARG_STRUCT); } else if (corType == CORINFO_TYPE_BYREF) { sigClass = info.compCompHnd->getArgClass(&sig, sigArg); corType = info.compCompHnd->getChildType(sigClass, &sigClass); } if (argNode != nullptr) { bool isExact = false; bool isNonNull = false; CORINFO_CLASS_HANDLE argCls = gtGetClassHandle(argNode, &isExact, &isNonNull); if (argCls != nullptr) { const bool isArgValueType = eeIsValueClass(argCls); // Exact class of the arg is known if (isExact && !isArgValueType) { inlineResult->Note(InlineObservation::CALLSITE_ARG_EXACT_CLS); if ((argCls != sigClass) && (sigClass != nullptr)) { // .. but the signature accepts a less concrete type. inlineResult->Note(InlineObservation::CALLSITE_ARG_EXACT_CLS_SIG_IS_NOT); } } // Arg is a reference type in the signature and a boxed value type was passed. else if (isArgValueType && (corType == CORINFO_TYPE_CLASS)) { inlineResult->Note(InlineObservation::CALLSITE_ARG_BOXED); } } if (argNode->OperIsConst()) { inlineResult->Note(InlineObservation::CALLSITE_ARG_CONST); } argUse = argUse->GetNext(); } sigArg = info.compCompHnd->getArgNext(sigArg); } // Note if the callee's return type is a value type if (info.compMethodInfo->args.retType == CORINFO_TYPE_VALUECLASS) { inlineResult->Note(InlineObservation::CALLEE_RETURNS_STRUCT); } // Note if the callee's class is a promotable struct if ((info.compClassAttr & CORINFO_FLG_VALUECLASS) != 0) { assert(structPromotionHelper != nullptr); if (structPromotionHelper->CanPromoteStructType(info.compClassHnd)) { inlineResult->Note(InlineObservation::CALLEE_CLASS_PROMOTABLE); } inlineResult->Note(InlineObservation::CALLEE_CLASS_VALUETYPE); } #ifdef FEATURE_SIMD // Note if this method is has SIMD args or return value if (pInlineInfo != nullptr && pInlineInfo->hasSIMDTypeArgLocalOrReturn) { inlineResult->Note(InlineObservation::CALLEE_HAS_SIMD); } #endif // FEATURE_SIMD // Roughly classify callsite frequency. InlineCallsiteFrequency frequency = InlineCallsiteFrequency::UNUSED; // If this is a prejit root, or a maximally hot block... if ((pInlineInfo == nullptr) || (pInlineInfo->iciBlock->isMaxBBWeight())) { frequency = InlineCallsiteFrequency::HOT; } // No training data. Look for loop-like things. // We consider a recursive call loop-like. Do not give the inlining boost to the method itself. // However, give it to things nearby. else if ((pInlineInfo->iciBlock->bbFlags & BBF_BACKWARD_JUMP) && (pInlineInfo->fncHandle != pInlineInfo->inlineCandidateInfo->ilCallerHandle)) { frequency = InlineCallsiteFrequency::LOOP; } else if (pInlineInfo->iciBlock->hasProfileWeight() && (pInlineInfo->iciBlock->bbWeight > BB_ZERO_WEIGHT)) { frequency = InlineCallsiteFrequency::WARM; } // Now modify the multiplier based on where we're called from. else if (pInlineInfo->iciBlock->isRunRarely() || ((info.compFlags & FLG_CCTOR) == FLG_CCTOR)) { frequency = InlineCallsiteFrequency::RARE; } else { frequency = InlineCallsiteFrequency::BORING; } // Also capture the block weight of the call site. // // In the prejit root case, assume at runtime there might be a hot call site // for this method, so we won't prematurely conclude this method should never // be inlined. // weight_t weight = 0; if (pInlineInfo != nullptr) { weight = pInlineInfo->iciBlock->bbWeight; } else { const weight_t prejitHotCallerWeight = 1000000.0; weight = prejitHotCallerWeight; } inlineResult->NoteInt(InlineObservation::CALLSITE_FREQUENCY, static_cast<int>(frequency)); inlineResult->NoteInt(InlineObservation::CALLSITE_WEIGHT, (int)(weight)); bool hasProfile = false; double profileFreq = 0.0; // If the call site has profile data, report the relative frequency of the site. // if ((pInlineInfo != nullptr) && rootCompiler->fgHaveSufficientProfileData()) { const weight_t callSiteWeight = pInlineInfo->iciBlock->bbWeight; const weight_t entryWeight = rootCompiler->fgFirstBB->bbWeight; profileFreq = fgProfileWeightsEqual(entryWeight, 0.0) ? 0.0 : callSiteWeight / entryWeight; hasProfile = true; assert(callSiteWeight >= 0); assert(entryWeight >= 0); } else if (pInlineInfo == nullptr) { // Simulate a hot callsite for PrejitRoot mode. hasProfile = true; profileFreq = 1.0; } inlineResult->NoteBool(InlineObservation::CALLSITE_HAS_PROFILE, hasProfile); inlineResult->NoteDouble(InlineObservation::CALLSITE_PROFILE_FREQUENCY, profileFreq); } /***************************************************************************** This method makes STATIC inlining decision based on the IL code. It should not make any inlining decision based on the context. If forceInline is true, then the inlining decision should not depend on performance heuristics (code size, etc.). */ void Compiler::impCanInlineIL(CORINFO_METHOD_HANDLE fncHandle, CORINFO_METHOD_INFO* methInfo, bool forceInline, InlineResult* inlineResult) { unsigned codeSize = methInfo->ILCodeSize; // We shouldn't have made up our minds yet... assert(!inlineResult->IsDecided()); if (methInfo->EHcount) { inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_EH); return; } if ((methInfo->ILCode == nullptr) || (codeSize == 0)) { inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NO_BODY); return; } // For now we don't inline varargs (import code can't handle it) if (methInfo->args.isVarArg()) { inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_MANAGED_VARARGS); return; } // Reject if it has too many locals. // This is currently an implementation limit due to fixed-size arrays in the // inline info, rather than a performance heuristic. inlineResult->NoteInt(InlineObservation::CALLEE_NUMBER_OF_LOCALS, methInfo->locals.numArgs); if (methInfo->locals.numArgs > MAX_INL_LCLS) { inlineResult->NoteFatal(InlineObservation::CALLEE_TOO_MANY_LOCALS); return; } // Make sure there aren't too many arguments. // This is currently an implementation limit due to fixed-size arrays in the // inline info, rather than a performance heuristic. inlineResult->NoteInt(InlineObservation::CALLEE_NUMBER_OF_ARGUMENTS, methInfo->args.numArgs); if (methInfo->args.numArgs > MAX_INL_ARGS) { inlineResult->NoteFatal(InlineObservation::CALLEE_TOO_MANY_ARGUMENTS); return; } // Note force inline state inlineResult->NoteBool(InlineObservation::CALLEE_IS_FORCE_INLINE, forceInline); // Note IL code size inlineResult->NoteInt(InlineObservation::CALLEE_IL_CODE_SIZE, codeSize); if (inlineResult->IsFailure()) { return; } // Make sure maxstack is not too big inlineResult->NoteInt(InlineObservation::CALLEE_MAXSTACK, methInfo->maxStack); if (inlineResult->IsFailure()) { return; } } /***************************************************************************** */ void Compiler::impCheckCanInline(GenTreeCall* call, CORINFO_METHOD_HANDLE fncHandle, unsigned methAttr, CORINFO_CONTEXT_HANDLE exactContextHnd, InlineCandidateInfo** ppInlineCandidateInfo, InlineResult* inlineResult) { // Either EE or JIT might throw exceptions below. // If that happens, just don't inline the method. struct Param { Compiler* pThis; GenTreeCall* call; CORINFO_METHOD_HANDLE fncHandle; unsigned methAttr; CORINFO_CONTEXT_HANDLE exactContextHnd; InlineResult* result; InlineCandidateInfo** ppInlineCandidateInfo; } param; memset(&param, 0, sizeof(param)); param.pThis = this; param.call = call; param.fncHandle = fncHandle; param.methAttr = methAttr; param.exactContextHnd = (exactContextHnd != nullptr) ? exactContextHnd : MAKE_METHODCONTEXT(fncHandle); param.result = inlineResult; param.ppInlineCandidateInfo = ppInlineCandidateInfo; bool success = eeRunWithErrorTrap<Param>( [](Param* pParam) { CorInfoInitClassResult initClassResult; #ifdef DEBUG const char* methodName; const char* className; methodName = pParam->pThis->eeGetMethodName(pParam->fncHandle, &className); if (JitConfig.JitNoInline()) { pParam->result->NoteFatal(InlineObservation::CALLEE_IS_JIT_NOINLINE); goto _exit; } #endif /* Try to get the code address/size for the method */ CORINFO_METHOD_INFO methInfo; if (!pParam->pThis->info.compCompHnd->getMethodInfo(pParam->fncHandle, &methInfo)) { pParam->result->NoteFatal(InlineObservation::CALLEE_NO_METHOD_INFO); goto _exit; } // Profile data allows us to avoid early "too many IL bytes" outs. pParam->result->NoteBool(InlineObservation::CALLSITE_HAS_PROFILE, pParam->pThis->fgHaveSufficientProfileData()); bool forceInline; forceInline = !!(pParam->methAttr & CORINFO_FLG_FORCEINLINE); pParam->pThis->impCanInlineIL(pParam->fncHandle, &methInfo, forceInline, pParam->result); if (pParam->result->IsFailure()) { assert(pParam->result->IsNever()); goto _exit; } // Speculatively check if initClass() can be done. // If it can be done, we will try to inline the method. initClassResult = pParam->pThis->info.compCompHnd->initClass(nullptr /* field */, pParam->fncHandle /* method */, pParam->exactContextHnd /* context */); if (initClassResult & CORINFO_INITCLASS_DONT_INLINE) { pParam->result->NoteFatal(InlineObservation::CALLSITE_CANT_CLASS_INIT); goto _exit; } // Given the EE the final say in whether to inline or not. // This should be last since for verifiable code, this can be expensive /* VM Inline check also ensures that the method is verifiable if needed */ CorInfoInline vmResult; vmResult = pParam->pThis->info.compCompHnd->canInline(pParam->pThis->info.compMethodHnd, pParam->fncHandle); if (vmResult == INLINE_FAIL) { pParam->result->NoteFatal(InlineObservation::CALLSITE_IS_VM_NOINLINE); } else if (vmResult == INLINE_NEVER) { pParam->result->NoteFatal(InlineObservation::CALLEE_IS_VM_NOINLINE); } if (pParam->result->IsFailure()) { // Make sure not to report this one. It was already reported by the VM. pParam->result->SetReported(); goto _exit; } /* Get the method properties */ CORINFO_CLASS_HANDLE clsHandle; clsHandle = pParam->pThis->info.compCompHnd->getMethodClass(pParam->fncHandle); unsigned clsAttr; clsAttr = pParam->pThis->info.compCompHnd->getClassAttribs(clsHandle); /* Get the return type */ var_types fncRetType; fncRetType = pParam->call->TypeGet(); #ifdef DEBUG var_types fncRealRetType; fncRealRetType = JITtype2varType(methInfo.args.retType); assert((genActualType(fncRealRetType) == genActualType(fncRetType)) || // <BUGNUM> VSW 288602 </BUGNUM> // In case of IJW, we allow to assign a native pointer to a BYREF. (fncRetType == TYP_BYREF && methInfo.args.retType == CORINFO_TYPE_PTR) || (varTypeIsStruct(fncRetType) && (fncRealRetType == TYP_STRUCT))); #endif // Allocate an InlineCandidateInfo structure, // // Or, reuse the existing GuardedDevirtualizationCandidateInfo, // which was pre-allocated to have extra room. // InlineCandidateInfo* pInfo; if (pParam->call->IsGuardedDevirtualizationCandidate()) { pInfo = pParam->call->gtInlineCandidateInfo; } else { pInfo = new (pParam->pThis, CMK_Inlining) InlineCandidateInfo; // Null out bits we don't use when we're just inlining pInfo->guardedClassHandle = nullptr; pInfo->guardedMethodHandle = nullptr; pInfo->guardedMethodUnboxedEntryHandle = nullptr; pInfo->likelihood = 0; pInfo->requiresInstMethodTableArg = false; } pInfo->methInfo = methInfo; pInfo->ilCallerHandle = pParam->pThis->info.compMethodHnd; pInfo->clsHandle = clsHandle; pInfo->exactContextHnd = pParam->exactContextHnd; pInfo->retExpr = nullptr; pInfo->preexistingSpillTemp = BAD_VAR_NUM; pInfo->clsAttr = clsAttr; pInfo->methAttr = pParam->methAttr; pInfo->initClassResult = initClassResult; pInfo->fncRetType = fncRetType; pInfo->exactContextNeedsRuntimeLookup = false; pInfo->inlinersContext = pParam->pThis->compInlineContext; // Note exactContextNeedsRuntimeLookup is reset later on, // over in impMarkInlineCandidate. *(pParam->ppInlineCandidateInfo) = pInfo; _exit:; }, &param); if (!success) { param.result->NoteFatal(InlineObservation::CALLSITE_COMPILATION_ERROR); } } //------------------------------------------------------------------------ // impInlineRecordArgInfo: record information about an inline candidate argument // // Arguments: // pInlineInfo - inline info for the inline candidate // curArgVal - tree for the caller actual argument value // argNum - logical index of this argument // inlineResult - result of ongoing inline evaluation // // Notes: // // Checks for various inline blocking conditions and makes notes in // the inline info arg table about the properties of the actual. These // properties are used later by impInlineFetchArg to determine how best to // pass the argument into the inlinee. void Compiler::impInlineRecordArgInfo(InlineInfo* pInlineInfo, GenTree* curArgVal, unsigned argNum, InlineResult* inlineResult) { InlArgInfo* inlCurArgInfo = &pInlineInfo->inlArgInfo[argNum]; inlCurArgInfo->argNode = curArgVal; // Save the original tree, with PUT_ARG and RET_EXPR. curArgVal = curArgVal->gtSkipPutArgType(); curArgVal = curArgVal->gtRetExprVal(); if (curArgVal->gtOper == GT_MKREFANY) { inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_IS_MKREFANY); return; } GenTree* lclVarTree; const bool isAddressInLocal = impIsAddressInLocal(curArgVal, &lclVarTree); if (isAddressInLocal && varTypeIsStruct(lclVarTree)) { inlCurArgInfo->argIsByRefToStructLocal = true; #ifdef FEATURE_SIMD if (lvaTable[lclVarTree->AsLclVarCommon()->GetLclNum()].lvSIMDType) { pInlineInfo->hasSIMDTypeArgLocalOrReturn = true; } #endif // FEATURE_SIMD } if (curArgVal->gtFlags & GTF_ALL_EFFECT) { inlCurArgInfo->argHasGlobRef = (curArgVal->gtFlags & GTF_GLOB_REF) != 0; inlCurArgInfo->argHasSideEff = (curArgVal->gtFlags & (GTF_ALL_EFFECT & ~GTF_GLOB_REF)) != 0; } if (curArgVal->gtOper == GT_LCL_VAR) { inlCurArgInfo->argIsLclVar = true; /* Remember the "original" argument number */ INDEBUG(curArgVal->AsLclVar()->gtLclILoffs = argNum;) } if (curArgVal->IsInvariant()) { inlCurArgInfo->argIsInvariant = true; if (inlCurArgInfo->argIsThis && (curArgVal->gtOper == GT_CNS_INT) && (curArgVal->AsIntCon()->gtIconVal == 0)) { // Abort inlining at this call site inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_HAS_NULL_THIS); return; } } bool isExact = false; bool isNonNull = false; inlCurArgInfo->argIsExact = (gtGetClassHandle(curArgVal, &isExact, &isNonNull) != NO_CLASS_HANDLE) && isExact; // If the arg is a local that is address-taken, we can't safely // directly substitute it into the inlinee. // // Previously we'd accomplish this by setting "argHasLdargaOp" but // that has a stronger meaning: that the arg value can change in // the method body. Using that flag prevents type propagation, // which is safe in this case. // // Instead mark the arg as having a caller local ref. if (!inlCurArgInfo->argIsInvariant && gtHasLocalsWithAddrOp(curArgVal)) { inlCurArgInfo->argHasCallerLocalRef = true; } #ifdef DEBUG if (verbose) { if (inlCurArgInfo->argIsThis) { printf("thisArg:"); } else { printf("\nArgument #%u:", argNum); } if (inlCurArgInfo->argIsLclVar) { printf(" is a local var"); } if (inlCurArgInfo->argIsInvariant) { printf(" is a constant"); } if (inlCurArgInfo->argHasGlobRef) { printf(" has global refs"); } if (inlCurArgInfo->argHasCallerLocalRef) { printf(" has caller local ref"); } if (inlCurArgInfo->argHasSideEff) { printf(" has side effects"); } if (inlCurArgInfo->argHasLdargaOp) { printf(" has ldarga effect"); } if (inlCurArgInfo->argHasStargOp) { printf(" has starg effect"); } if (inlCurArgInfo->argIsByRefToStructLocal) { printf(" is byref to a struct local"); } printf("\n"); gtDispTree(curArgVal); printf("\n"); } #endif } //------------------------------------------------------------------------ // impInlineInitVars: setup inline information for inlinee args and locals // // Arguments: // pInlineInfo - inline info for the inline candidate // // Notes: // This method primarily adds caller-supplied info to the inlArgInfo // and sets up the lclVarInfo table. // // For args, the inlArgInfo records properties of the actual argument // including the tree node that produces the arg value. This node is // usually the tree node present at the call, but may also differ in // various ways: // - when the call arg is a GT_RET_EXPR, we search back through the ret // expr chain for the actual node. Note this will either be the original // call (which will be a failed inline by this point), or the return // expression from some set of inlines. // - when argument type casting is needed the necessary casts are added // around the argument node. // - if an argument can be simplified by folding then the node here is the // folded value. // // The method may make observations that lead to marking this candidate as // a failed inline. If this happens the initialization is abandoned immediately // to try and reduce the jit time cost for a failed inline. void Compiler::impInlineInitVars(InlineInfo* pInlineInfo) { assert(!compIsForInlining()); GenTreeCall* call = pInlineInfo->iciCall; CORINFO_METHOD_INFO* methInfo = &pInlineInfo->inlineCandidateInfo->methInfo; unsigned clsAttr = pInlineInfo->inlineCandidateInfo->clsAttr; InlArgInfo* inlArgInfo = pInlineInfo->inlArgInfo; InlLclVarInfo* lclVarInfo = pInlineInfo->lclVarInfo; InlineResult* inlineResult = pInlineInfo->inlineResult; // Inlined methods always use the managed calling convention const bool hasRetBuffArg = impMethodInfo_hasRetBuffArg(methInfo, CorInfoCallConvExtension::Managed); /* init the argument stuct */ memset(inlArgInfo, 0, (MAX_INL_ARGS + 1) * sizeof(inlArgInfo[0])); GenTreeCall::Use* thisArg = call->gtCallThisArg; unsigned argCnt = 0; // Count of the arguments assert((methInfo->args.hasThis()) == (thisArg != nullptr)); if (thisArg != nullptr) { inlArgInfo[0].argIsThis = true; impInlineRecordArgInfo(pInlineInfo, thisArg->GetNode(), argCnt, inlineResult); if (inlineResult->IsFailure()) { return; } /* Increment the argument count */ argCnt++; } /* Record some information about each of the arguments */ bool hasTypeCtxtArg = (methInfo->args.callConv & CORINFO_CALLCONV_PARAMTYPE) != 0; #if USER_ARGS_COME_LAST unsigned typeCtxtArg = (thisArg != nullptr) ? 1 : 0; #else // USER_ARGS_COME_LAST unsigned typeCtxtArg = methInfo->args.totalILArgs(); #endif // USER_ARGS_COME_LAST for (GenTreeCall::Use& use : call->Args()) { if (hasRetBuffArg && (&use == call->gtCallArgs)) { continue; } // Ignore the type context argument if (hasTypeCtxtArg && (argCnt == typeCtxtArg)) { pInlineInfo->typeContextArg = typeCtxtArg; typeCtxtArg = 0xFFFFFFFF; continue; } GenTree* actualArg = gtFoldExpr(use.GetNode()); impInlineRecordArgInfo(pInlineInfo, actualArg, argCnt, inlineResult); if (inlineResult->IsFailure()) { return; } /* Increment the argument count */ argCnt++; } /* Make sure we got the arg number right */ assert(argCnt == methInfo->args.totalILArgs()); #ifdef FEATURE_SIMD bool foundSIMDType = pInlineInfo->hasSIMDTypeArgLocalOrReturn; #endif // FEATURE_SIMD /* We have typeless opcodes, get type information from the signature */ if (thisArg != nullptr) { lclVarInfo[0].lclVerTypeInfo = verMakeTypeInfo(pInlineInfo->inlineCandidateInfo->clsHandle); lclVarInfo[0].lclHasLdlocaOp = false; #ifdef FEATURE_SIMD // We always want to check isSIMDClass, since we want to set foundSIMDType (to increase // the inlining multiplier) for anything in that assembly. // But we only need to normalize it if it is a TYP_STRUCT // (which we need to do even if we have already set foundSIMDType). if (!foundSIMDType && isSIMDorHWSIMDClass(&(lclVarInfo[0].lclVerTypeInfo))) { foundSIMDType = true; } #endif // FEATURE_SIMD var_types sigType = ((clsAttr & CORINFO_FLG_VALUECLASS) != 0) ? TYP_BYREF : TYP_REF; lclVarInfo[0].lclTypeInfo = sigType; GenTree* thisArgNode = thisArg->GetNode(); assert(varTypeIsGC(thisArgNode->TypeGet()) || // "this" is managed ((thisArgNode->TypeGet() == TYP_I_IMPL) && // "this" is unmgd but the method's class doesnt care (clsAttr & CORINFO_FLG_VALUECLASS))); if (genActualType(thisArgNode->TypeGet()) != genActualType(sigType)) { if (sigType == TYP_REF) { /* The argument cannot be bashed into a ref (see bug 750871) */ inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_NO_BASH_TO_REF); return; } /* This can only happen with byrefs <-> ints/shorts */ assert(sigType == TYP_BYREF); assert((genActualType(thisArgNode->TypeGet()) == TYP_I_IMPL) || (thisArgNode->TypeGet() == TYP_BYREF)); lclVarInfo[0].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL)); } } /* Init the types of the arguments and make sure the types * from the trees match the types in the signature */ CORINFO_ARG_LIST_HANDLE argLst; argLst = methInfo->args.args; unsigned i; for (i = (thisArg ? 1 : 0); i < argCnt; i++, argLst = info.compCompHnd->getArgNext(argLst)) { var_types sigType = (var_types)eeGetArgType(argLst, &methInfo->args); lclVarInfo[i].lclVerTypeInfo = verParseArgSigToTypeInfo(&methInfo->args, argLst); #ifdef FEATURE_SIMD if ((!foundSIMDType || (sigType == TYP_STRUCT)) && isSIMDorHWSIMDClass(&(lclVarInfo[i].lclVerTypeInfo))) { // If this is a SIMD class (i.e. in the SIMD assembly), then we will consider that we've // found a SIMD type, even if this may not be a type we recognize (the assumption is that // it is likely to use a SIMD type, and therefore we want to increase the inlining multiplier). foundSIMDType = true; if (sigType == TYP_STRUCT) { var_types structType = impNormStructType(lclVarInfo[i].lclVerTypeInfo.GetClassHandle()); sigType = structType; } } #endif // FEATURE_SIMD lclVarInfo[i].lclTypeInfo = sigType; lclVarInfo[i].lclHasLdlocaOp = false; /* Does the tree type match the signature type? */ GenTree* inlArgNode = inlArgInfo[i].argNode; if ((sigType != inlArgNode->gtType) || inlArgNode->OperIs(GT_PUTARG_TYPE)) { assert(impCheckImplicitArgumentCoercion(sigType, inlArgNode->gtType)); assert(!varTypeIsStruct(inlArgNode->gtType) && !varTypeIsStruct(sigType)); /* In valid IL, this can only happen for short integer types or byrefs <-> [native] ints, but in bad IL cases with caller-callee signature mismatches we can see other types. Intentionally reject cases with mismatches so the jit is more flexible when encountering bad IL. */ bool isPlausibleTypeMatch = (genActualType(sigType) == genActualType(inlArgNode->gtType)) || (genActualTypeIsIntOrI(sigType) && inlArgNode->gtType == TYP_BYREF) || (sigType == TYP_BYREF && genActualTypeIsIntOrI(inlArgNode->gtType)); if (!isPlausibleTypeMatch) { inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_TYPES_INCOMPATIBLE); return; } GenTree** pInlArgNode; if (inlArgNode->OperIs(GT_PUTARG_TYPE)) { // There was a widening or narrowing cast. GenTreeUnOp* putArgType = inlArgNode->AsUnOp(); pInlArgNode = &putArgType->gtOp1; inlArgNode = putArgType->gtOp1; } else { // The same size but different type of the arguments. pInlArgNode = &inlArgInfo[i].argNode; } /* Is it a narrowing or widening cast? * Widening casts are ok since the value computed is already * normalized to an int (on the IL stack) */ if (genTypeSize(inlArgNode->gtType) >= genTypeSize(sigType)) { if (sigType == TYP_BYREF) { lclVarInfo[i].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL)); } else if (inlArgNode->gtType == TYP_BYREF) { assert(varTypeIsIntOrI(sigType)); /* If possible bash the BYREF to an int */ if (inlArgNode->IsLocalAddrExpr() != nullptr) { inlArgNode->gtType = TYP_I_IMPL; lclVarInfo[i].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL)); } else { /* Arguments 'int <- byref' cannot be changed */ inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_NO_BASH_TO_INT); return; } } else if (genTypeSize(sigType) < TARGET_POINTER_SIZE) { // Narrowing cast. if (inlArgNode->OperIs(GT_LCL_VAR)) { const unsigned lclNum = inlArgNode->AsLclVarCommon()->GetLclNum(); if (!lvaTable[lclNum].lvNormalizeOnLoad() && sigType == lvaGetRealType(lclNum)) { // We don't need to insert a cast here as the variable // was assigned a normalized value of the right type. continue; } } inlArgNode = gtNewCastNode(TYP_INT, inlArgNode, false, sigType); inlArgInfo[i].argIsLclVar = false; // Try to fold the node in case we have constant arguments. if (inlArgInfo[i].argIsInvariant) { inlArgNode = gtFoldExprConst(inlArgNode); assert(inlArgNode->OperIsConst()); } *pInlArgNode = inlArgNode; } #ifdef TARGET_64BIT else if (genTypeSize(genActualType(inlArgNode->gtType)) < genTypeSize(sigType)) { // This should only happen for int -> native int widening inlArgNode = gtNewCastNode(genActualType(sigType), inlArgNode, false, sigType); inlArgInfo[i].argIsLclVar = false; /* Try to fold the node in case we have constant arguments */ if (inlArgInfo[i].argIsInvariant) { inlArgNode = gtFoldExprConst(inlArgNode); assert(inlArgNode->OperIsConst()); } *pInlArgNode = inlArgNode; } #endif // TARGET_64BIT } } } /* Init the types of the local variables */ CORINFO_ARG_LIST_HANDLE localsSig; localsSig = methInfo->locals.args; for (i = 0; i < methInfo->locals.numArgs; i++) { bool isPinned; var_types type = (var_types)eeGetArgType(localsSig, &methInfo->locals, &isPinned); lclVarInfo[i + argCnt].lclHasLdlocaOp = false; lclVarInfo[i + argCnt].lclTypeInfo = type; if (varTypeIsGC(type)) { if (isPinned) { JITDUMP("Inlinee local #%02u is pinned\n", i); lclVarInfo[i + argCnt].lclIsPinned = true; // Pinned locals may cause inlines to fail. inlineResult->Note(InlineObservation::CALLEE_HAS_PINNED_LOCALS); if (inlineResult->IsFailure()) { return; } } pInlineInfo->numberOfGcRefLocals++; } else if (isPinned) { JITDUMP("Ignoring pin on inlinee local #%02u -- not a GC type\n", i); } lclVarInfo[i + argCnt].lclVerTypeInfo = verParseArgSigToTypeInfo(&methInfo->locals, localsSig); // If this local is a struct type with GC fields, inform the inliner. It may choose to bail // out on the inline. if (type == TYP_STRUCT) { CORINFO_CLASS_HANDLE lclHandle = lclVarInfo[i + argCnt].lclVerTypeInfo.GetClassHandle(); DWORD typeFlags = info.compCompHnd->getClassAttribs(lclHandle); if ((typeFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0) { inlineResult->Note(InlineObservation::CALLEE_HAS_GC_STRUCT); if (inlineResult->IsFailure()) { return; } // Do further notification in the case where the call site is rare; some policies do // not track the relative hotness of call sites for "always" inline cases. if (pInlineInfo->iciBlock->isRunRarely()) { inlineResult->Note(InlineObservation::CALLSITE_RARE_GC_STRUCT); if (inlineResult->IsFailure()) { return; } } } } localsSig = info.compCompHnd->getArgNext(localsSig); #ifdef FEATURE_SIMD if ((!foundSIMDType || (type == TYP_STRUCT)) && isSIMDorHWSIMDClass(&(lclVarInfo[i + argCnt].lclVerTypeInfo))) { foundSIMDType = true; if (supportSIMDTypes() && type == TYP_STRUCT) { var_types structType = impNormStructType(lclVarInfo[i + argCnt].lclVerTypeInfo.GetClassHandle()); lclVarInfo[i + argCnt].lclTypeInfo = structType; } } #endif // FEATURE_SIMD } #ifdef FEATURE_SIMD if (!foundSIMDType && (call->AsCall()->gtRetClsHnd != nullptr) && isSIMDorHWSIMDClass(call->AsCall()->gtRetClsHnd)) { foundSIMDType = true; } pInlineInfo->hasSIMDTypeArgLocalOrReturn = foundSIMDType; #endif // FEATURE_SIMD } //------------------------------------------------------------------------ // impInlineFetchLocal: get a local var that represents an inlinee local // // Arguments: // lclNum -- number of the inlinee local // reason -- debug string describing purpose of the local var // // Returns: // Number of the local to use // // Notes: // This method is invoked only for locals actually used in the // inlinee body. // // Allocates a new temp if necessary, and copies key properties // over from the inlinee local var info. unsigned Compiler::impInlineFetchLocal(unsigned lclNum DEBUGARG(const char* reason)) { assert(compIsForInlining()); unsigned tmpNum = impInlineInfo->lclTmpNum[lclNum]; if (tmpNum == BAD_VAR_NUM) { const InlLclVarInfo& inlineeLocal = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt]; const var_types lclTyp = inlineeLocal.lclTypeInfo; // The lifetime of this local might span multiple BBs. // So it is a long lifetime local. impInlineInfo->lclTmpNum[lclNum] = tmpNum = lvaGrabTemp(false DEBUGARG(reason)); // Copy over key info lvaTable[tmpNum].lvType = lclTyp; lvaTable[tmpNum].lvHasLdAddrOp = inlineeLocal.lclHasLdlocaOp; lvaTable[tmpNum].lvPinned = inlineeLocal.lclIsPinned; lvaTable[tmpNum].lvHasILStoreOp = inlineeLocal.lclHasStlocOp; lvaTable[tmpNum].lvHasMultipleILStoreOp = inlineeLocal.lclHasMultipleStlocOp; // Copy over class handle for ref types. Note this may be a // shared type -- someday perhaps we can get the exact // signature and pass in a more precise type. if (lclTyp == TYP_REF) { assert(lvaTable[tmpNum].lvSingleDef == 0); lvaTable[tmpNum].lvSingleDef = !inlineeLocal.lclHasMultipleStlocOp && !inlineeLocal.lclHasLdlocaOp; if (lvaTable[tmpNum].lvSingleDef) { JITDUMP("Marked V%02u as a single def temp\n", tmpNum); } lvaSetClass(tmpNum, inlineeLocal.lclVerTypeInfo.GetClassHandleForObjRef()); } if (inlineeLocal.lclVerTypeInfo.IsStruct()) { if (varTypeIsStruct(lclTyp)) { lvaSetStruct(tmpNum, inlineeLocal.lclVerTypeInfo.GetClassHandle(), true /* unsafe value cls check */); } else { // This is a wrapped primitive. Make sure the verstate knows that lvaTable[tmpNum].lvVerTypeInfo = inlineeLocal.lclVerTypeInfo; } } #ifdef DEBUG // Sanity check that we're properly prepared for gc ref locals. if (varTypeIsGC(lclTyp)) { // Since there are gc locals we should have seen them earlier // and if there was a return value, set up the spill temp. assert(impInlineInfo->HasGcRefLocals()); assert((info.compRetNativeType == TYP_VOID) || fgNeedReturnSpillTemp()); } else { // Make sure all pinned locals count as gc refs. assert(!inlineeLocal.lclIsPinned); } #endif // DEBUG } return tmpNum; } //------------------------------------------------------------------------ // impInlineFetchArg: return tree node for argument value in an inlinee // // Arguments: // lclNum -- argument number in inlinee IL // inlArgInfo -- argument info for inlinee // lclVarInfo -- var info for inlinee // // Returns: // Tree for the argument's value. Often an inlinee-scoped temp // GT_LCL_VAR but can be other tree kinds, if the argument // expression from the caller can be directly substituted into the // inlinee body. // // Notes: // Must be used only for arguments -- use impInlineFetchLocal for // inlinee locals. // // Direct substitution is performed when the formal argument cannot // change value in the inlinee body (no starg or ldarga), and the // actual argument expression's value cannot be changed if it is // substituted it into the inlinee body. // // Even if an inlinee-scoped temp is returned here, it may later be // "bashed" to a caller-supplied tree when arguments are actually // passed (see fgInlinePrependStatements). Bashing can happen if // the argument ends up being single use and other conditions are // met. So the contents of the tree returned here may not end up // being the ones ultimately used for the argument. // // This method will side effect inlArgInfo. It should only be called // for actual uses of the argument in the inlinee. GenTree* Compiler::impInlineFetchArg(unsigned lclNum, InlArgInfo* inlArgInfo, InlLclVarInfo* lclVarInfo) { // Cache the relevant arg and lcl info for this argument. // We will modify argInfo but not lclVarInfo. InlArgInfo& argInfo = inlArgInfo[lclNum]; const InlLclVarInfo& lclInfo = lclVarInfo[lclNum]; const bool argCanBeModified = argInfo.argHasLdargaOp || argInfo.argHasStargOp; const var_types lclTyp = lclInfo.lclTypeInfo; GenTree* op1 = nullptr; GenTree* argNode = argInfo.argNode->gtSkipPutArgType()->gtRetExprVal(); if (argInfo.argIsInvariant && !argCanBeModified) { // Directly substitute constants or addresses of locals // // Clone the constant. Note that we cannot directly use // argNode in the trees even if !argInfo.argIsUsed as this // would introduce aliasing between inlArgInfo[].argNode and // impInlineExpr. Then gtFoldExpr() could change it, causing // further references to the argument working off of the // bashed copy. op1 = gtCloneExpr(argNode); PREFIX_ASSUME(op1 != nullptr); argInfo.argTmpNum = BAD_VAR_NUM; // We may need to retype to ensure we match the callee's view of the type. // Otherwise callee-pass throughs of arguments can create return type // mismatches that block inlining. // // Note argument type mismatches that prevent inlining should // have been caught in impInlineInitVars. if (op1->TypeGet() != lclTyp) { op1->gtType = genActualType(lclTyp); } } else if (argInfo.argIsLclVar && !argCanBeModified && !argInfo.argHasCallerLocalRef) { // Directly substitute unaliased caller locals for args that cannot be modified // // Use the caller-supplied node if this is the first use. op1 = argNode; unsigned argLclNum = op1->AsLclVarCommon()->GetLclNum(); argInfo.argTmpNum = argLclNum; // Use an equivalent copy if this is the second or subsequent // use. // // Note argument type mismatches that prevent inlining should // have been caught in impInlineInitVars. If inlining is not prevented // but a cast is necessary, we similarly expect it to have been inserted then. // So here we may have argument type mismatches that are benign, for instance // passing a TYP_SHORT local (eg. normalized-on-load) as a TYP_INT arg. // The exception is when the inlining means we should start tracking the argument. if (argInfo.argIsUsed || ((lclTyp == TYP_BYREF) && (op1->TypeGet() != TYP_BYREF))) { assert(op1->gtOper == GT_LCL_VAR); assert(lclNum == op1->AsLclVar()->gtLclILoffs); // Create a new lcl var node - remember the argument lclNum op1 = impCreateLocalNode(argLclNum DEBUGARG(op1->AsLclVar()->gtLclILoffs)); // Start tracking things as a byref if the parameter is a byref. if (lclTyp == TYP_BYREF) { op1->gtType = TYP_BYREF; } } } else if (argInfo.argIsByRefToStructLocal && !argInfo.argHasStargOp) { /* Argument is a by-ref address to a struct, a normed struct, or its field. In these cases, don't spill the byref to a local, simply clone the tree and use it. This way we will increase the chance for this byref to be optimized away by a subsequent "dereference" operation. From Dev11 bug #139955: Argument node can also be TYP_I_IMPL if we've bashed the tree (in impInlineInitVars()), if the arg has argHasLdargaOp as well as argIsByRefToStructLocal. For example, if the caller is: ldloca.s V_1 // V_1 is a local struct call void Test.ILPart::RunLdargaOnPointerArg(int32*) and the callee being inlined has: .method public static void RunLdargaOnPointerArg(int32* ptrToInts) cil managed ldarga.s ptrToInts call void Test.FourInts::NotInlined_SetExpectedValuesThroughPointerToPointer(int32**) then we change the argument tree (of "ldloca.s V_1") to TYP_I_IMPL to match the callee signature. We'll soon afterwards reject the inlining anyway, since the tree we return isn't a GT_LCL_VAR. */ assert(argNode->TypeGet() == TYP_BYREF || argNode->TypeGet() == TYP_I_IMPL); op1 = gtCloneExpr(argNode); } else { /* Argument is a complex expression - it must be evaluated into a temp */ if (argInfo.argHasTmp) { assert(argInfo.argIsUsed); assert(argInfo.argTmpNum < lvaCount); /* Create a new lcl var node - remember the argument lclNum */ op1 = gtNewLclvNode(argInfo.argTmpNum, genActualType(lclTyp)); /* This is the second or later use of the this argument, so we have to use the temp (instead of the actual arg) */ argInfo.argBashTmpNode = nullptr; } else { /* First time use */ assert(!argInfo.argIsUsed); /* Reserve a temp for the expression. * Use a large size node as we may change it later */ const unsigned tmpNum = lvaGrabTemp(true DEBUGARG("Inlining Arg")); lvaTable[tmpNum].lvType = lclTyp; // For ref types, determine the type of the temp. if (lclTyp == TYP_REF) { if (!argCanBeModified) { // If the arg can't be modified in the method // body, use the type of the value, if // known. Otherwise, use the declared type. assert(lvaTable[tmpNum].lvSingleDef == 0); lvaTable[tmpNum].lvSingleDef = 1; JITDUMP("Marked V%02u as a single def temp\n", tmpNum); lvaSetClass(tmpNum, argNode, lclInfo.lclVerTypeInfo.GetClassHandleForObjRef()); } else { // Arg might be modified, use the declared type of // the argument. lvaSetClass(tmpNum, lclInfo.lclVerTypeInfo.GetClassHandleForObjRef()); } } assert(!lvaTable[tmpNum].IsAddressExposed()); if (argInfo.argHasLdargaOp) { lvaTable[tmpNum].lvHasLdAddrOp = 1; } if (lclInfo.lclVerTypeInfo.IsStruct()) { if (varTypeIsStruct(lclTyp)) { lvaSetStruct(tmpNum, lclInfo.lclVerTypeInfo.GetClassHandle(), true /* unsafe value cls check */); if (info.compIsVarArgs) { lvaSetStructUsedAsVarArg(tmpNum); } } else { // This is a wrapped primitive. Make sure the verstate knows that lvaTable[tmpNum].lvVerTypeInfo = lclInfo.lclVerTypeInfo; } } argInfo.argHasTmp = true; argInfo.argTmpNum = tmpNum; // If we require strict exception order, then arguments must // be evaluated in sequence before the body of the inlined method. // So we need to evaluate them to a temp. // Also, if arguments have global or local references, we need to // evaluate them to a temp before the inlined body as the // inlined body may be modifying the global ref. // TODO-1stClassStructs: We currently do not reuse an existing lclVar // if it is a struct, because it requires some additional handling. if ((!varTypeIsStruct(lclTyp) && !argInfo.argHasSideEff && !argInfo.argHasGlobRef && !argInfo.argHasCallerLocalRef)) { /* Get a *LARGE* LCL_VAR node */ op1 = gtNewLclLNode(tmpNum, genActualType(lclTyp) DEBUGARG(lclNum)); /* Record op1 as the very first use of this argument. If there are no further uses of the arg, we may be able to use the actual arg node instead of the temp. If we do see any further uses, we will clear this. */ argInfo.argBashTmpNode = op1; } else { /* Get a small LCL_VAR node */ op1 = gtNewLclvNode(tmpNum, genActualType(lclTyp)); /* No bashing of this argument */ argInfo.argBashTmpNode = nullptr; } } } // Mark this argument as used. argInfo.argIsUsed = true; return op1; } /****************************************************************************** Is this the original "this" argument to the call being inlined? Note that we do not inline methods with "starg 0", and so we do not need to worry about it. */ bool Compiler::impInlineIsThis(GenTree* tree, InlArgInfo* inlArgInfo) { assert(compIsForInlining()); return (tree->gtOper == GT_LCL_VAR && tree->AsLclVarCommon()->GetLclNum() == inlArgInfo[0].argTmpNum); } //----------------------------------------------------------------------------- // impInlineIsGuaranteedThisDerefBeforeAnySideEffects: Check if a dereference in // the inlinee can guarantee that the "this" pointer is non-NULL. // // Arguments: // additionalTree - a tree to check for side effects // additionalCallArgs - a list of call args to check for side effects // dereferencedAddress - address expression being dereferenced // inlArgInfo - inlinee argument information // // Notes: // If we haven't hit a branch or a side effect, and we are dereferencing // from 'this' to access a field or make GTF_CALL_NULLCHECK call, // then we can avoid a separate null pointer check. // // The importer stack and current statement list are searched for side effects. // Trees that have been popped of the stack but haven't been appended to the // statement list and have to be checked for side effects may be provided via // additionalTree and additionalCallArgs. // bool Compiler::impInlineIsGuaranteedThisDerefBeforeAnySideEffects(GenTree* additionalTree, GenTreeCall::Use* additionalCallArgs, GenTree* dereferencedAddress, InlArgInfo* inlArgInfo) { assert(compIsForInlining()); assert(opts.OptEnabled(CLFLG_INLINING)); BasicBlock* block = compCurBB; if (block != fgFirstBB) { return false; } if (!impInlineIsThis(dereferencedAddress, inlArgInfo)) { return false; } if ((additionalTree != nullptr) && GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(additionalTree->gtFlags)) { return false; } for (GenTreeCall::Use& use : GenTreeCall::UseList(additionalCallArgs)) { if (GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(use.GetNode()->gtFlags)) { return false; } } for (Statement* stmt : StatementList(impStmtList)) { GenTree* expr = stmt->GetRootNode(); if (GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(expr->gtFlags)) { return false; } } for (unsigned level = 0; level < verCurrentState.esStackDepth; level++) { GenTreeFlags stackTreeFlags = verCurrentState.esStack[level].val->gtFlags; if (GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(stackTreeFlags)) { return false; } } return true; } //------------------------------------------------------------------------ // impMarkInlineCandidate: determine if this call can be subsequently inlined // // Arguments: // callNode -- call under scrutiny // exactContextHnd -- context handle for inlining // exactContextNeedsRuntimeLookup -- true if context required runtime lookup // callInfo -- call info from VM // // Notes: // Mostly a wrapper for impMarkInlineCandidateHelper that also undoes // guarded devirtualization for virtual calls where the method we'd // devirtualize to cannot be inlined. void Compiler::impMarkInlineCandidate(GenTree* callNode, CORINFO_CONTEXT_HANDLE exactContextHnd, bool exactContextNeedsRuntimeLookup, CORINFO_CALL_INFO* callInfo) { GenTreeCall* call = callNode->AsCall(); // Do the actual evaluation impMarkInlineCandidateHelper(call, exactContextHnd, exactContextNeedsRuntimeLookup, callInfo); // If this call is an inline candidate or is not a guarded devirtualization // candidate, we're done. if (call->IsInlineCandidate() || !call->IsGuardedDevirtualizationCandidate()) { return; } // If we can't inline the call we'd guardedly devirtualize to, // we undo the guarded devirtualization, as the benefit from // just guarded devirtualization alone is likely not worth the // extra jit time and code size. // // TODO: it is possibly interesting to allow this, but requires // fixes elsewhere too... JITDUMP("Revoking guarded devirtualization candidacy for call [%06u]: target method can't be inlined\n", dspTreeID(call)); call->ClearGuardedDevirtualizationCandidate(); } //------------------------------------------------------------------------ // impMarkInlineCandidateHelper: determine if this call can be subsequently // inlined // // Arguments: // callNode -- call under scrutiny // exactContextHnd -- context handle for inlining // exactContextNeedsRuntimeLookup -- true if context required runtime lookup // callInfo -- call info from VM // // Notes: // If callNode is an inline candidate, this method sets the flag // GTF_CALL_INLINE_CANDIDATE, and ensures that helper methods have // filled in the associated InlineCandidateInfo. // // If callNode is not an inline candidate, and the reason is // something that is inherent to the method being called, the // method may be marked as "noinline" to short-circuit any // future assessments of calls to this method. void Compiler::impMarkInlineCandidateHelper(GenTreeCall* call, CORINFO_CONTEXT_HANDLE exactContextHnd, bool exactContextNeedsRuntimeLookup, CORINFO_CALL_INFO* callInfo) { // Let the strategy know there's another call impInlineRoot()->m_inlineStrategy->NoteCall(); if (!opts.OptEnabled(CLFLG_INLINING)) { /* XXX Mon 8/18/2008 * This assert is misleading. The caller does not ensure that we have CLFLG_INLINING set before * calling impMarkInlineCandidate. However, if this assert trips it means that we're an inlinee and * CLFLG_MINOPT is set. That doesn't make a lot of sense. If you hit this assert, work back and * figure out why we did not set MAXOPT for this compile. */ assert(!compIsForInlining()); return; } if (compIsForImportOnly()) { // Don't bother creating the inline candidate during verification. // Otherwise the call to info.compCompHnd->canInline will trigger a recursive verification // that leads to the creation of multiple instances of Compiler. return; } InlineResult inlineResult(this, call, nullptr, "impMarkInlineCandidate"); // Don't inline if not optimizing root method if (opts.compDbgCode) { inlineResult.NoteFatal(InlineObservation::CALLER_DEBUG_CODEGEN); return; } // Don't inline if inlining into this method is disabled. if (impInlineRoot()->m_inlineStrategy->IsInliningDisabled()) { inlineResult.NoteFatal(InlineObservation::CALLER_IS_JIT_NOINLINE); return; } // Don't inline into callers that use the NextCallReturnAddress intrinsic. if (info.compHasNextCallRetAddr) { inlineResult.NoteFatal(InlineObservation::CALLER_USES_NEXT_CALL_RET_ADDR); return; } // Inlining candidate determination needs to honor only IL tail prefix. // Inlining takes precedence over implicit tail call optimization (if the call is not directly recursive). if (call->IsTailPrefixedCall()) { inlineResult.NoteFatal(InlineObservation::CALLSITE_EXPLICIT_TAIL_PREFIX); return; } // Delegate Invoke method doesn't have a body and gets special cased instead. // Don't even bother trying to inline it. if (call->IsDelegateInvoke()) { inlineResult.NoteFatal(InlineObservation::CALLEE_HAS_NO_BODY); return; } // Tail recursion elimination takes precedence over inlining. // TODO: We may want to do some of the additional checks from fgMorphCall // here to reduce the chance we don't inline a call that won't be optimized // as a fast tail call or turned into a loop. if (gtIsRecursiveCall(call) && call->IsImplicitTailCall()) { inlineResult.NoteFatal(InlineObservation::CALLSITE_IMPLICIT_REC_TAIL_CALL); return; } if (call->IsVirtual()) { // Allow guarded devirt calls to be treated as inline candidates, // but reject all other virtual calls. if (!call->IsGuardedDevirtualizationCandidate()) { inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_NOT_DIRECT); return; } } /* Ignore helper calls */ if (call->gtCallType == CT_HELPER) { assert(!call->IsGuardedDevirtualizationCandidate()); inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_CALL_TO_HELPER); return; } /* Ignore indirect calls */ if (call->gtCallType == CT_INDIRECT) { inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_NOT_DIRECT_MANAGED); return; } /* I removed the check for BBJ_THROW. BBJ_THROW is usually marked as rarely run. This more or less * restricts the inliner to non-expanding inlines. I removed the check to allow for non-expanding * inlining in throw blocks. I should consider the same thing for catch and filter regions. */ CORINFO_METHOD_HANDLE fncHandle; unsigned methAttr; if (call->IsGuardedDevirtualizationCandidate()) { if (call->gtGuardedDevirtualizationCandidateInfo->guardedMethodUnboxedEntryHandle != nullptr) { fncHandle = call->gtGuardedDevirtualizationCandidateInfo->guardedMethodUnboxedEntryHandle; } else { fncHandle = call->gtGuardedDevirtualizationCandidateInfo->guardedMethodHandle; } methAttr = info.compCompHnd->getMethodAttribs(fncHandle); } else { fncHandle = call->gtCallMethHnd; // Reuse method flags from the original callInfo if possible if (fncHandle == callInfo->hMethod) { methAttr = callInfo->methodFlags; } else { methAttr = info.compCompHnd->getMethodAttribs(fncHandle); } } #ifdef DEBUG if (compStressCompile(STRESS_FORCE_INLINE, 0)) { methAttr |= CORINFO_FLG_FORCEINLINE; } #endif // Check for COMPlus_AggressiveInlining if (compDoAggressiveInlining) { methAttr |= CORINFO_FLG_FORCEINLINE; } if (!(methAttr & CORINFO_FLG_FORCEINLINE)) { /* Don't bother inline blocks that are in the filter region */ if (bbInCatchHandlerILRange(compCurBB)) { #ifdef DEBUG if (verbose) { printf("\nWill not inline blocks that are in the catch handler region\n"); } #endif inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_WITHIN_CATCH); return; } if (bbInFilterILRange(compCurBB)) { #ifdef DEBUG if (verbose) { printf("\nWill not inline blocks that are in the filter region\n"); } #endif inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_WITHIN_FILTER); return; } } /* Check if we tried to inline this method before */ if (methAttr & CORINFO_FLG_DONT_INLINE) { inlineResult.NoteFatal(InlineObservation::CALLEE_IS_NOINLINE); return; } /* Cannot inline synchronized methods */ if (methAttr & CORINFO_FLG_SYNCH) { inlineResult.NoteFatal(InlineObservation::CALLEE_IS_SYNCHRONIZED); return; } /* Check legality of PInvoke callsite (for inlining of marshalling code) */ if (methAttr & CORINFO_FLG_PINVOKE) { // See comment in impCheckForPInvokeCall BasicBlock* block = compIsForInlining() ? impInlineInfo->iciBlock : compCurBB; if (!impCanPInvokeInlineCallSite(block)) { inlineResult.NoteFatal(InlineObservation::CALLSITE_PINVOKE_EH); return; } } InlineCandidateInfo* inlineCandidateInfo = nullptr; impCheckCanInline(call, fncHandle, methAttr, exactContextHnd, &inlineCandidateInfo, &inlineResult); if (inlineResult.IsFailure()) { return; } // The old value should be null OR this call should be a guarded devirtualization candidate. assert((call->gtInlineCandidateInfo == nullptr) || call->IsGuardedDevirtualizationCandidate()); // The new value should not be null. assert(inlineCandidateInfo != nullptr); inlineCandidateInfo->exactContextNeedsRuntimeLookup = exactContextNeedsRuntimeLookup; call->gtInlineCandidateInfo = inlineCandidateInfo; // If we're in an inlinee compiler, and have a return spill temp, and this inline candidate // is also a tail call candidate, it can use the same return spill temp. // if (compIsForInlining() && call->CanTailCall() && (impInlineInfo->inlineCandidateInfo->preexistingSpillTemp != BAD_VAR_NUM)) { inlineCandidateInfo->preexistingSpillTemp = impInlineInfo->inlineCandidateInfo->preexistingSpillTemp; JITDUMP("Inline candidate [%06u] can share spill temp V%02u\n", dspTreeID(call), inlineCandidateInfo->preexistingSpillTemp); } // Mark the call node as inline candidate. call->gtFlags |= GTF_CALL_INLINE_CANDIDATE; // Let the strategy know there's another candidate. impInlineRoot()->m_inlineStrategy->NoteCandidate(); // Since we're not actually inlining yet, and this call site is // still just an inline candidate, there's nothing to report. inlineResult.SetReported(); } /******************************************************************************/ // Returns true if the given intrinsic will be implemented by target-specific // instructions bool Compiler::IsTargetIntrinsic(NamedIntrinsic intrinsicName) { #if defined(TARGET_XARCH) switch (intrinsicName) { // AMD64/x86 has SSE2 instructions to directly compute sqrt/abs and SSE4.1 // instructions to directly compute round/ceiling/floor/truncate. case NI_System_Math_Abs: case NI_System_Math_Sqrt: return true; case NI_System_Math_Ceiling: case NI_System_Math_Floor: case NI_System_Math_Truncate: case NI_System_Math_Round: return compOpportunisticallyDependsOn(InstructionSet_SSE41); case NI_System_Math_FusedMultiplyAdd: return compOpportunisticallyDependsOn(InstructionSet_FMA); default: return false; } #elif defined(TARGET_ARM64) switch (intrinsicName) { case NI_System_Math_Abs: case NI_System_Math_Ceiling: case NI_System_Math_Floor: case NI_System_Math_Truncate: case NI_System_Math_Round: case NI_System_Math_Sqrt: case NI_System_Math_Max: case NI_System_Math_Min: return true; case NI_System_Math_FusedMultiplyAdd: return compOpportunisticallyDependsOn(InstructionSet_AdvSimd); default: return false; } #elif defined(TARGET_ARM) switch (intrinsicName) { case NI_System_Math_Abs: case NI_System_Math_Round: case NI_System_Math_Sqrt: return true; default: return false; } #else // TODO: This portion of logic is not implemented for other arch. // The reason for returning true is that on all other arch the only intrinsic // enabled are target intrinsics. return true; #endif } /******************************************************************************/ // Returns true if the given intrinsic will be implemented by calling System.Math // methods. bool Compiler::IsIntrinsicImplementedByUserCall(NamedIntrinsic intrinsicName) { // Currently, if a math intrinsic is not implemented by target-specific // instructions, it will be implemented by a System.Math call. In the // future, if we turn to implementing some of them with helper calls, // this predicate needs to be revisited. return !IsTargetIntrinsic(intrinsicName); } bool Compiler::IsMathIntrinsic(NamedIntrinsic intrinsicName) { switch (intrinsicName) { case NI_System_Math_Abs: case NI_System_Math_Acos: case NI_System_Math_Acosh: case NI_System_Math_Asin: case NI_System_Math_Asinh: case NI_System_Math_Atan: case NI_System_Math_Atanh: case NI_System_Math_Atan2: case NI_System_Math_Cbrt: case NI_System_Math_Ceiling: case NI_System_Math_Cos: case NI_System_Math_Cosh: case NI_System_Math_Exp: case NI_System_Math_Floor: case NI_System_Math_FMod: case NI_System_Math_FusedMultiplyAdd: case NI_System_Math_ILogB: case NI_System_Math_Log: case NI_System_Math_Log2: case NI_System_Math_Log10: case NI_System_Math_Max: case NI_System_Math_Min: case NI_System_Math_Pow: case NI_System_Math_Round: case NI_System_Math_Sin: case NI_System_Math_Sinh: case NI_System_Math_Sqrt: case NI_System_Math_Tan: case NI_System_Math_Tanh: case NI_System_Math_Truncate: { assert((intrinsicName > NI_SYSTEM_MATH_START) && (intrinsicName < NI_SYSTEM_MATH_END)); return true; } default: { assert((intrinsicName < NI_SYSTEM_MATH_START) || (intrinsicName > NI_SYSTEM_MATH_END)); return false; } } } bool Compiler::IsMathIntrinsic(GenTree* tree) { return (tree->OperGet() == GT_INTRINSIC) && IsMathIntrinsic(tree->AsIntrinsic()->gtIntrinsicName); } //------------------------------------------------------------------------ // impDevirtualizeCall: Attempt to change a virtual vtable call into a // normal call // // Arguments: // call -- the call node to examine/modify // pResolvedToken -- [IN] the resolved token used to create the call. Used for R2R. // method -- [IN/OUT] the method handle for call. Updated iff call devirtualized. // methodFlags -- [IN/OUT] flags for the method to call. Updated iff call devirtualized. // pContextHandle -- [IN/OUT] context handle for the call. Updated iff call devirtualized. // pExactContextHandle -- [OUT] updated context handle iff call devirtualized // isLateDevirtualization -- if devirtualization is happening after importation // isExplicitTailCalll -- [IN] true if we plan on using an explicit tail call // ilOffset -- IL offset of the call // // Notes: // Virtual calls in IL will always "invoke" the base class method. // // This transformation looks for evidence that the type of 'this' // in the call is exactly known, is a final class or would invoke // a final method, and if that and other safety checks pan out, // modifies the call and the call info to create a direct call. // // This transformation is initially done in the importer and not // in some subsequent optimization pass because we want it to be // upstream of inline candidate identification. // // However, later phases may supply improved type information that // can enable further devirtualization. We currently reinvoke this // code after inlining, if the return value of the inlined call is // the 'this obj' of a subsequent virtual call. // // If devirtualization succeeds and the call's this object is a // (boxed) value type, the jit will ask the EE for the unboxed entry // point. If this exists, the jit will invoke the unboxed entry // on the box payload. In addition if the boxing operation is // visible to the jit and the call is the only consmer of the box, // the jit will try analyze the box to see if the call can be instead // instead made on a local copy. If that is doable, the call is // updated to invoke the unboxed entry on the local copy and the // boxing operation is removed. // // When guarded devirtualization is enabled, this method will mark // calls as guarded devirtualization candidates, if the type of `this` // is not exactly known, and there is a plausible guess for the type. void Compiler::impDevirtualizeCall(GenTreeCall* call, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_METHOD_HANDLE* method, unsigned* methodFlags, CORINFO_CONTEXT_HANDLE* pContextHandle, CORINFO_CONTEXT_HANDLE* pExactContextHandle, bool isLateDevirtualization, bool isExplicitTailCall, IL_OFFSET ilOffset) { assert(call != nullptr); assert(method != nullptr); assert(methodFlags != nullptr); assert(pContextHandle != nullptr); // This should be a virtual vtable or virtual stub call. // assert(call->IsVirtual()); // Possibly instrument. Note for OSR+PGO we will instrument when // optimizing and (currently) won't devirtualize. We may want // to revisit -- if we can devirtualize we should be able to // suppress the probe. // // We strip BBINSTR from inlinees currently, so we'll only // do this for the root method calls. // if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_BBINSTR)) { assert(opts.OptimizationDisabled() || opts.IsOSR()); assert(!compIsForInlining()); // During importation, optionally flag this block as one that // contains calls requiring class profiling. Ideally perhaps // we'd just keep track of the calls themselves, so we don't // have to search for them later. // if ((call->gtCallType != CT_INDIRECT) && opts.jitFlags->IsSet(JitFlags::JIT_FLAG_BBINSTR) && !opts.jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT) && (JitConfig.JitClassProfiling() > 0) && !isLateDevirtualization) { JITDUMP("\n ... marking [%06u] in " FMT_BB " for class profile instrumentation\n", dspTreeID(call), compCurBB->bbNum); ClassProfileCandidateInfo* pInfo = new (this, CMK_Inlining) ClassProfileCandidateInfo; // Record some info needed for the class profiling probe. // pInfo->ilOffset = ilOffset; pInfo->probeIndex = info.compClassProbeCount++; call->gtClassProfileCandidateInfo = pInfo; // Flag block as needing scrutiny // compCurBB->bbFlags |= BBF_HAS_CLASS_PROFILE; } return; } // Bail if optimizations are disabled. if (opts.OptimizationDisabled()) { return; } #if defined(DEBUG) // Bail if devirt is disabled. if (JitConfig.JitEnableDevirtualization() == 0) { return; } // Optionally, print info on devirtualization Compiler* const rootCompiler = impInlineRoot(); const bool doPrint = JitConfig.JitPrintDevirtualizedMethods().contains(rootCompiler->info.compMethodName, rootCompiler->info.compClassName, &rootCompiler->info.compMethodInfo->args); #endif // DEBUG // Fetch information about the virtual method we're calling. CORINFO_METHOD_HANDLE baseMethod = *method; unsigned baseMethodAttribs = *methodFlags; if (baseMethodAttribs == 0) { // For late devirt we may not have method attributes, so fetch them. baseMethodAttribs = info.compCompHnd->getMethodAttribs(baseMethod); } else { #if defined(DEBUG) // Validate that callInfo has up to date method flags const DWORD freshBaseMethodAttribs = info.compCompHnd->getMethodAttribs(baseMethod); // All the base method attributes should agree, save that // CORINFO_FLG_DONT_INLINE may have changed from 0 to 1 // because of concurrent jitting activity. // // Note we don't look at this particular flag bit below, and // later on (if we do try and inline) we will rediscover why // the method can't be inlined, so there's no danger here in // seeing this particular flag bit in different states between // the cached and fresh values. if ((freshBaseMethodAttribs & ~CORINFO_FLG_DONT_INLINE) != (baseMethodAttribs & ~CORINFO_FLG_DONT_INLINE)) { assert(!"mismatched method attributes"); } #endif // DEBUG } // In R2R mode, we might see virtual stub calls to // non-virtuals. For instance cases where the non-virtual method // is in a different assembly but is called via CALLVIRT. For // verison resilience we must allow for the fact that the method // might become virtual in some update. // // In non-R2R modes CALLVIRT <nonvirtual> will be turned into a // regular call+nullcheck upstream, so we won't reach this // point. if ((baseMethodAttribs & CORINFO_FLG_VIRTUAL) == 0) { assert(call->IsVirtualStub()); assert(opts.IsReadyToRun()); JITDUMP("\nimpDevirtualizeCall: [R2R] base method not virtual, sorry\n"); return; } // Fetch information about the class that introduced the virtual method. CORINFO_CLASS_HANDLE baseClass = info.compCompHnd->getMethodClass(baseMethod); const DWORD baseClassAttribs = info.compCompHnd->getClassAttribs(baseClass); // Is the call an interface call? const bool isInterface = (baseClassAttribs & CORINFO_FLG_INTERFACE) != 0; // See what we know about the type of 'this' in the call. GenTree* thisObj = call->gtCallThisArg->GetNode()->gtEffectiveVal(false); bool isExact = false; bool objIsNonNull = false; CORINFO_CLASS_HANDLE objClass = gtGetClassHandle(thisObj, &isExact, &objIsNonNull); // Bail if we know nothing. if (objClass == NO_CLASS_HANDLE) { JITDUMP("\nimpDevirtualizeCall: no type available (op=%s)\n", GenTree::OpName(thisObj->OperGet())); // Don't try guarded devirtualiztion when we're doing late devirtualization. // if (isLateDevirtualization) { JITDUMP("No guarded devirt during late devirtualization\n"); return; } considerGuardedDevirtualization(call, ilOffset, isInterface, baseMethod, baseClass, pContextHandle DEBUGARG(objClass) DEBUGARG("unknown")); return; } // If the objClass is sealed (final), then we may be able to devirtualize. const DWORD objClassAttribs = info.compCompHnd->getClassAttribs(objClass); const bool objClassIsFinal = (objClassAttribs & CORINFO_FLG_FINAL) != 0; #if defined(DEBUG) const char* callKind = isInterface ? "interface" : "virtual"; const char* objClassNote = "[?]"; const char* objClassName = "?objClass"; const char* baseClassName = "?baseClass"; const char* baseMethodName = "?baseMethod"; if (verbose || doPrint) { objClassNote = isExact ? " [exact]" : objClassIsFinal ? " [final]" : ""; objClassName = info.compCompHnd->getClassName(objClass); baseClassName = info.compCompHnd->getClassName(baseClass); baseMethodName = eeGetMethodName(baseMethod, nullptr); if (verbose) { printf("\nimpDevirtualizeCall: Trying to devirtualize %s call:\n" " class for 'this' is %s%s (attrib %08x)\n" " base method is %s::%s\n", callKind, objClassName, objClassNote, objClassAttribs, baseClassName, baseMethodName); } } #endif // defined(DEBUG) // See if the jit's best type for `obj` is an interface. // See for instance System.ValueTuple`8::GetHashCode, where lcl 0 is System.IValueTupleInternal // IL_021d: ldloc.0 // IL_021e: callvirt instance int32 System.Object::GetHashCode() // // If so, we can't devirtualize, but we may be able to do guarded devirtualization. // if ((objClassAttribs & CORINFO_FLG_INTERFACE) != 0) { // Don't try guarded devirtualiztion when we're doing late devirtualization. // if (isLateDevirtualization) { JITDUMP("No guarded devirt during late devirtualization\n"); return; } considerGuardedDevirtualization(call, ilOffset, isInterface, baseMethod, baseClass, pContextHandle DEBUGARG(objClass) DEBUGARG(objClassName)); return; } // If we get this far, the jit has a lower bound class type for the `this` object being used for dispatch. // It may or may not know enough to devirtualize... if (isInterface) { assert(call->IsVirtualStub()); JITDUMP("--- base class is interface\n"); } // Fetch the method that would be called based on the declared type of 'this', // and prepare to fetch the method attributes. // CORINFO_DEVIRTUALIZATION_INFO dvInfo; dvInfo.virtualMethod = baseMethod; dvInfo.objClass = objClass; dvInfo.context = *pContextHandle; dvInfo.detail = CORINFO_DEVIRTUALIZATION_UNKNOWN; dvInfo.pResolvedTokenVirtualMethod = pResolvedToken; info.compCompHnd->resolveVirtualMethod(&dvInfo); CORINFO_METHOD_HANDLE derivedMethod = dvInfo.devirtualizedMethod; CORINFO_CONTEXT_HANDLE exactContext = dvInfo.exactContext; CORINFO_CLASS_HANDLE derivedClass = NO_CLASS_HANDLE; CORINFO_RESOLVED_TOKEN* pDerivedResolvedToken = &dvInfo.resolvedTokenDevirtualizedMethod; if (derivedMethod != nullptr) { assert(exactContext != nullptr); assert(((size_t)exactContext & CORINFO_CONTEXTFLAGS_MASK) == CORINFO_CONTEXTFLAGS_CLASS); derivedClass = (CORINFO_CLASS_HANDLE)((size_t)exactContext & ~CORINFO_CONTEXTFLAGS_MASK); } DWORD derivedMethodAttribs = 0; bool derivedMethodIsFinal = false; bool canDevirtualize = false; #if defined(DEBUG) const char* derivedClassName = "?derivedClass"; const char* derivedMethodName = "?derivedMethod"; const char* note = "inexact or not final"; #endif // If we failed to get a method handle, we can't directly devirtualize. // // This can happen when prejitting, if the devirtualization crosses // servicing bubble boundaries, or if objClass is a shared class. // if (derivedMethod == nullptr) { JITDUMP("--- no derived method: %s\n", devirtualizationDetailToString(dvInfo.detail)); } else { // Fetch method attributes to see if method is marked final. derivedMethodAttribs = info.compCompHnd->getMethodAttribs(derivedMethod); derivedMethodIsFinal = ((derivedMethodAttribs & CORINFO_FLG_FINAL) != 0); #if defined(DEBUG) if (isExact) { note = "exact"; } else if (objClassIsFinal) { note = "final class"; } else if (derivedMethodIsFinal) { note = "final method"; } if (verbose || doPrint) { derivedMethodName = eeGetMethodName(derivedMethod, nullptr); derivedClassName = eeGetClassName(derivedClass); if (verbose) { printf(" devirt to %s::%s -- %s\n", derivedClassName, derivedMethodName, note); gtDispTree(call); } } #endif // defined(DEBUG) canDevirtualize = isExact || objClassIsFinal || (!isInterface && derivedMethodIsFinal); } // We still might be able to do a guarded devirtualization. // Note the call might be an interface call or a virtual call. // if (!canDevirtualize) { JITDUMP(" Class not final or exact%s\n", isInterface ? "" : ", and method not final"); #if defined(DEBUG) // If we know the object type exactly, we generally expect we can devirtualize. // (don't when doing late devirt as we won't have an owner type (yet)) // if (!isLateDevirtualization && (isExact || objClassIsFinal) && JitConfig.JitNoteFailedExactDevirtualization()) { printf("@@@ Exact/Final devirt failure in %s at [%06u] $ %s\n", info.compFullName, dspTreeID(call), devirtualizationDetailToString(dvInfo.detail)); } #endif // Don't try guarded devirtualiztion if we're doing late devirtualization. // if (isLateDevirtualization) { JITDUMP("No guarded devirt during late devirtualization\n"); return; } considerGuardedDevirtualization(call, ilOffset, isInterface, baseMethod, baseClass, pContextHandle DEBUGARG(objClass) DEBUGARG(objClassName)); return; } // All checks done. Time to transform the call. // // We should always have an exact class context. // // Note that wouldnt' be true if the runtime side supported array interface devirt, // the resulting method would be a generic method of the non-generic SZArrayHelper class. // assert(canDevirtualize); JITDUMP(" %s; can devirtualize\n", note); // Make the updates. call->gtFlags &= ~GTF_CALL_VIRT_VTABLE; call->gtFlags &= ~GTF_CALL_VIRT_STUB; call->gtCallMethHnd = derivedMethod; call->gtCallType = CT_USER_FUNC; call->gtCallMoreFlags |= GTF_CALL_M_DEVIRTUALIZED; // Virtual calls include an implicit null check, which we may // now need to make explicit. if (!objIsNonNull) { call->gtFlags |= GTF_CALL_NULLCHECK; } // Clear the inline candidate info (may be non-null since // it's a union field used for other things by virtual // stubs) call->gtInlineCandidateInfo = nullptr; #if defined(DEBUG) if (verbose) { printf("... after devirt...\n"); gtDispTree(call); } if (doPrint) { printf("Devirtualized %s call to %s:%s; now direct call to %s:%s [%s]\n", callKind, baseClassName, baseMethodName, derivedClassName, derivedMethodName, note); } // If we successfully devirtualized based on an exact or final class, // and we have dynamic PGO data describing the likely class, make sure they agree. // // If pgo source is not dynamic we may see likely classes from other versions of this code // where types had different properties. // // If method is an inlinee we may be specializing to a class that wasn't seen at runtime. // const bool canSensiblyCheck = (isExact || objClassIsFinal) && (fgPgoSource == ICorJitInfo::PgoSource::Dynamic) && !compIsForInlining(); if (JitConfig.JitCrossCheckDevirtualizationAndPGO() && canSensiblyCheck) { // We only can handle a single likely class for now const int maxLikelyClasses = 1; LikelyClassRecord likelyClasses[maxLikelyClasses]; UINT32 numberOfClasses = getLikelyClasses(likelyClasses, maxLikelyClasses, fgPgoSchema, fgPgoSchemaCount, fgPgoData, ilOffset); UINT32 likelihood = likelyClasses[0].likelihood; CORINFO_CLASS_HANDLE likelyClass = likelyClasses[0].clsHandle; if (numberOfClasses > 0) { // PGO had better agree the class we devirtualized to is plausible. // if (likelyClass != derivedClass) { // Managed type system may report different addresses for a class handle // at different times....? // // Also, AOT may have a more nuanced notion of class equality. // if (!opts.jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT)) { bool mismatch = true; // derivedClass will be the introducer of derived method, so it's possible // likelyClass is a non-overriding subclass. Check up the hierarchy. // CORINFO_CLASS_HANDLE parentClass = likelyClass; while (parentClass != NO_CLASS_HANDLE) { if (parentClass == derivedClass) { mismatch = false; break; } parentClass = info.compCompHnd->getParentType(parentClass); } if (mismatch || (numberOfClasses != 1) || (likelihood != 100)) { printf("@@@ Likely %p (%s) != Derived %p (%s) [n=%u, l=%u, il=%u] in %s \n", likelyClass, eeGetClassName(likelyClass), derivedClass, eeGetClassName(derivedClass), numberOfClasses, likelihood, ilOffset, info.compFullName); } assert(!(mismatch || (numberOfClasses != 1) || (likelihood != 100))); } } } } #endif // defined(DEBUG) // If the 'this' object is a value class, see if we can rework the call to invoke the // unboxed entry. This effectively inlines the normally un-inlineable wrapper stub // and exposes the potentially inlinable unboxed entry method. // // We won't optimize explicit tail calls, as ensuring we get the right tail call info // is tricky (we'd need to pass an updated sig and resolved token back to some callers). // // Note we may not have a derived class in some cases (eg interface call on an array) // if (info.compCompHnd->isValueClass(derivedClass)) { if (isExplicitTailCall) { JITDUMP("Have a direct explicit tail call to boxed entry point; can't optimize further\n"); } else { JITDUMP("Have a direct call to boxed entry point. Trying to optimize to call an unboxed entry point\n"); // Note for some shared methods the unboxed entry point requires an extra parameter. bool requiresInstMethodTableArg = false; CORINFO_METHOD_HANDLE unboxedEntryMethod = info.compCompHnd->getUnboxedEntry(derivedMethod, &requiresInstMethodTableArg); if (unboxedEntryMethod != nullptr) { bool optimizedTheBox = false; // If the 'this' object is a local box, see if we can revise things // to not require boxing. // if (thisObj->IsBoxedValue() && !isExplicitTailCall) { // Since the call is the only consumer of the box, we know the box can't escape // since it is being passed an interior pointer. // // So, revise the box to simply create a local copy, use the address of that copy // as the this pointer, and update the entry point to the unboxed entry. // // Ideally, we then inline the boxed method and and if it turns out not to modify // the copy, we can undo the copy too. if (requiresInstMethodTableArg) { // Perform a trial box removal and ask for the type handle tree that fed the box. // JITDUMP("Unboxed entry needs method table arg...\n"); GenTree* methodTableArg = gtTryRemoveBoxUpstreamEffects(thisObj, BR_DONT_REMOVE_WANT_TYPE_HANDLE); if (methodTableArg != nullptr) { // If that worked, turn the box into a copy to a local var // JITDUMP("Found suitable method table arg tree [%06u]\n", dspTreeID(methodTableArg)); GenTree* localCopyThis = gtTryRemoveBoxUpstreamEffects(thisObj, BR_MAKE_LOCAL_COPY); if (localCopyThis != nullptr) { // Pass the local var as this and the type handle as a new arg // JITDUMP("Success! invoking unboxed entry point on local copy, and passing method table " "arg\n"); call->gtCallThisArg = gtNewCallArgs(localCopyThis); call->gtCallMoreFlags |= GTF_CALL_M_UNBOXED; // Prepend for R2L arg passing or empty L2R passing // Append for non-empty L2R // if ((Target::g_tgtArgOrder == Target::ARG_ORDER_R2L) || (call->gtCallArgs == nullptr)) { // If there's a ret buf, the method table is the second arg. // if (call->HasRetBufArg()) { gtInsertNewCallArgAfter(methodTableArg, call->gtCallArgs); } else { call->gtCallArgs = gtPrependNewCallArg(methodTableArg, call->gtCallArgs); } } else { GenTreeCall::Use* beforeArg = call->gtCallArgs; while (beforeArg->GetNext() != nullptr) { beforeArg = beforeArg->GetNext(); } beforeArg->SetNext(gtNewCallArgs(methodTableArg)); } call->gtCallMethHnd = unboxedEntryMethod; derivedMethod = unboxedEntryMethod; pDerivedResolvedToken = &dvInfo.resolvedTokenDevirtualizedUnboxedMethod; // Method attributes will differ because unboxed entry point is shared // const DWORD unboxedMethodAttribs = info.compCompHnd->getMethodAttribs(unboxedEntryMethod); JITDUMP("Updating method attribs from 0x%08x to 0x%08x\n", derivedMethodAttribs, unboxedMethodAttribs); derivedMethodAttribs = unboxedMethodAttribs; optimizedTheBox = true; } else { JITDUMP("Sorry, failed to undo the box -- can't convert to local copy\n"); } } else { JITDUMP("Sorry, failed to undo the box -- can't find method table arg\n"); } } else { JITDUMP("Found unboxed entry point, trying to simplify box to a local copy\n"); GenTree* localCopyThis = gtTryRemoveBoxUpstreamEffects(thisObj, BR_MAKE_LOCAL_COPY); if (localCopyThis != nullptr) { JITDUMP("Success! invoking unboxed entry point on local copy\n"); call->gtCallThisArg = gtNewCallArgs(localCopyThis); call->gtCallMethHnd = unboxedEntryMethod; call->gtCallMoreFlags |= GTF_CALL_M_UNBOXED; derivedMethod = unboxedEntryMethod; pDerivedResolvedToken = &dvInfo.resolvedTokenDevirtualizedUnboxedMethod; optimizedTheBox = true; } else { JITDUMP("Sorry, failed to undo the box\n"); } } if (optimizedTheBox) { #if FEATURE_TAILCALL_OPT if (call->IsImplicitTailCall()) { JITDUMP("Clearing the implicit tail call flag\n"); // If set, we clear the implicit tail call flag // as we just introduced a new address taken local variable // call->gtCallMoreFlags &= ~GTF_CALL_M_IMPLICIT_TAILCALL; } #endif // FEATURE_TAILCALL_OPT } } if (!optimizedTheBox) { // If we get here, we have a boxed value class that either wasn't boxed // locally, or was boxed locally but we were unable to remove the box for // various reasons. // // We can still update the call to invoke the unboxed entry, if the // boxed value is simple. // if (requiresInstMethodTableArg) { // Get the method table from the boxed object. // GenTree* const thisArg = call->gtCallThisArg->GetNode(); GenTree* const clonedThisArg = gtClone(thisArg); if (clonedThisArg == nullptr) { JITDUMP( "unboxed entry needs MT arg, but `this` was too complex to clone. Deferring update.\n"); } else { JITDUMP("revising call to invoke unboxed entry with additional method table arg\n"); GenTree* const methodTableArg = gtNewMethodTableLookup(clonedThisArg); // Update the 'this' pointer to refer to the box payload // GenTree* const payloadOffset = gtNewIconNode(TARGET_POINTER_SIZE, TYP_I_IMPL); GenTree* const boxPayload = gtNewOperNode(GT_ADD, TYP_BYREF, thisArg, payloadOffset); call->gtCallThisArg = gtNewCallArgs(boxPayload); call->gtCallMethHnd = unboxedEntryMethod; call->gtCallMoreFlags |= GTF_CALL_M_UNBOXED; // Method attributes will differ because unboxed entry point is shared // const DWORD unboxedMethodAttribs = info.compCompHnd->getMethodAttribs(unboxedEntryMethod); JITDUMP("Updating method attribs from 0x%08x to 0x%08x\n", derivedMethodAttribs, unboxedMethodAttribs); derivedMethod = unboxedEntryMethod; pDerivedResolvedToken = &dvInfo.resolvedTokenDevirtualizedUnboxedMethod; derivedMethodAttribs = unboxedMethodAttribs; // Add the method table argument. // // Prepend for R2L arg passing or empty L2R passing // Append for non-empty L2R // if ((Target::g_tgtArgOrder == Target::ARG_ORDER_R2L) || (call->gtCallArgs == nullptr)) { // If there's a ret buf, the method table is the second arg. // if (call->HasRetBufArg()) { gtInsertNewCallArgAfter(methodTableArg, call->gtCallArgs); } else { call->gtCallArgs = gtPrependNewCallArg(methodTableArg, call->gtCallArgs); } } else { GenTreeCall::Use* beforeArg = call->gtCallArgs; while (beforeArg->GetNext() != nullptr) { beforeArg = beforeArg->GetNext(); } beforeArg->SetNext(gtNewCallArgs(methodTableArg)); } } } else { JITDUMP("revising call to invoke unboxed entry\n"); GenTree* const thisArg = call->gtCallThisArg->GetNode(); GenTree* const payloadOffset = gtNewIconNode(TARGET_POINTER_SIZE, TYP_I_IMPL); GenTree* const boxPayload = gtNewOperNode(GT_ADD, TYP_BYREF, thisArg, payloadOffset); call->gtCallThisArg = gtNewCallArgs(boxPayload); call->gtCallMethHnd = unboxedEntryMethod; call->gtCallMoreFlags |= GTF_CALL_M_UNBOXED; derivedMethod = unboxedEntryMethod; pDerivedResolvedToken = &dvInfo.resolvedTokenDevirtualizedUnboxedMethod; } } } else { // Many of the low-level methods on value classes won't have unboxed entries, // as they need access to the type of the object. // // Note this may be a cue for us to stack allocate the boxed object, since // we probably know that these objects don't escape. JITDUMP("Sorry, failed to find unboxed entry point\n"); } } } // Need to update call info too. // *method = derivedMethod; *methodFlags = derivedMethodAttribs; // Update context handle // *pContextHandle = MAKE_METHODCONTEXT(derivedMethod); // Update exact context handle. // if (pExactContextHandle != nullptr) { *pExactContextHandle = MAKE_CLASSCONTEXT(derivedClass); } #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { // For R2R, getCallInfo triggers bookkeeping on the zap // side and acquires the actual symbol to call so we need to call it here. // Look up the new call info. CORINFO_CALL_INFO derivedCallInfo; eeGetCallInfo(pDerivedResolvedToken, nullptr, CORINFO_CALLINFO_ALLOWINSTPARAM, &derivedCallInfo); // Update the call. call->gtCallMoreFlags &= ~GTF_CALL_M_VIRTSTUB_REL_INDIRECT; call->gtCallMoreFlags &= ~GTF_CALL_M_R2R_REL_INDIRECT; call->setEntryPoint(derivedCallInfo.codePointerLookup.constLookup); } #endif // FEATURE_READYTORUN } //------------------------------------------------------------------------ // impGetSpecialIntrinsicExactReturnType: Look for special cases where a call // to an intrinsic returns an exact type // // Arguments: // methodHnd -- handle for the special intrinsic method // // Returns: // Exact class handle returned by the intrinsic call, if known. // Nullptr if not known, or not likely to lead to beneficial optimization. CORINFO_CLASS_HANDLE Compiler::impGetSpecialIntrinsicExactReturnType(CORINFO_METHOD_HANDLE methodHnd) { JITDUMP("Special intrinsic: looking for exact type returned by %s\n", eeGetMethodFullName(methodHnd)); CORINFO_CLASS_HANDLE result = nullptr; // See what intrinisc we have... const NamedIntrinsic ni = lookupNamedIntrinsic(methodHnd); switch (ni) { case NI_System_Collections_Generic_Comparer_get_Default: case NI_System_Collections_Generic_EqualityComparer_get_Default: { // Expect one class generic parameter; figure out which it is. CORINFO_SIG_INFO sig; info.compCompHnd->getMethodSig(methodHnd, &sig); assert(sig.sigInst.classInstCount == 1); CORINFO_CLASS_HANDLE typeHnd = sig.sigInst.classInst[0]; assert(typeHnd != nullptr); // Lookup can incorrect when we have __Canon as it won't appear // to implement any interface types. // // And if we do not have a final type, devirt & inlining is // unlikely to result in much simplification. // // We can use CORINFO_FLG_FINAL to screen out both of these cases. const DWORD typeAttribs = info.compCompHnd->getClassAttribs(typeHnd); const bool isFinalType = ((typeAttribs & CORINFO_FLG_FINAL) != 0); if (isFinalType) { if (ni == NI_System_Collections_Generic_EqualityComparer_get_Default) { result = info.compCompHnd->getDefaultEqualityComparerClass(typeHnd); } else { assert(ni == NI_System_Collections_Generic_Comparer_get_Default); result = info.compCompHnd->getDefaultComparerClass(typeHnd); } JITDUMP("Special intrinsic for type %s: return type is %s\n", eeGetClassName(typeHnd), result != nullptr ? eeGetClassName(result) : "unknown"); } else { JITDUMP("Special intrinsic for type %s: type not final, so deferring opt\n", eeGetClassName(typeHnd)); } break; } default: { JITDUMP("This special intrinsic not handled, sorry...\n"); break; } } return result; } //------------------------------------------------------------------------ // impAllocateToken: create CORINFO_RESOLVED_TOKEN into jit-allocated memory and init it. // // Arguments: // token - init value for the allocated token. // // Return Value: // pointer to token into jit-allocated memory. CORINFO_RESOLVED_TOKEN* Compiler::impAllocateToken(const CORINFO_RESOLVED_TOKEN& token) { CORINFO_RESOLVED_TOKEN* memory = getAllocator(CMK_Unknown).allocate<CORINFO_RESOLVED_TOKEN>(1); *memory = token; return memory; } //------------------------------------------------------------------------ // SpillRetExprHelper: iterate through arguments tree and spill ret_expr to local variables. // class SpillRetExprHelper { public: SpillRetExprHelper(Compiler* comp) : comp(comp) { } void StoreRetExprResultsInArgs(GenTreeCall* call) { for (GenTreeCall::Use& use : call->Args()) { comp->fgWalkTreePre(&use.NodeRef(), SpillRetExprVisitor, this); } if (call->gtCallThisArg != nullptr) { comp->fgWalkTreePre(&call->gtCallThisArg->NodeRef(), SpillRetExprVisitor, this); } } private: static Compiler::fgWalkResult SpillRetExprVisitor(GenTree** pTree, Compiler::fgWalkData* fgWalkPre) { assert((pTree != nullptr) && (*pTree != nullptr)); GenTree* tree = *pTree; if ((tree->gtFlags & GTF_CALL) == 0) { // Trees with ret_expr are marked as GTF_CALL. return Compiler::WALK_SKIP_SUBTREES; } if (tree->OperGet() == GT_RET_EXPR) { SpillRetExprHelper* walker = static_cast<SpillRetExprHelper*>(fgWalkPre->pCallbackData); walker->StoreRetExprAsLocalVar(pTree); } return Compiler::WALK_CONTINUE; } void StoreRetExprAsLocalVar(GenTree** pRetExpr) { GenTree* retExpr = *pRetExpr; assert(retExpr->OperGet() == GT_RET_EXPR); const unsigned tmp = comp->lvaGrabTemp(true DEBUGARG("spilling ret_expr")); JITDUMP("Storing return expression [%06u] to a local var V%02u.\n", comp->dspTreeID(retExpr), tmp); comp->impAssignTempGen(tmp, retExpr, (unsigned)Compiler::CHECK_SPILL_NONE); *pRetExpr = comp->gtNewLclvNode(tmp, retExpr->TypeGet()); if (retExpr->TypeGet() == TYP_REF) { assert(comp->lvaTable[tmp].lvSingleDef == 0); comp->lvaTable[tmp].lvSingleDef = 1; JITDUMP("Marked V%02u as a single def temp\n", tmp); bool isExact = false; bool isNonNull = false; CORINFO_CLASS_HANDLE retClsHnd = comp->gtGetClassHandle(retExpr, &isExact, &isNonNull); if (retClsHnd != nullptr) { comp->lvaSetClass(tmp, retClsHnd, isExact); } } } private: Compiler* comp; }; //------------------------------------------------------------------------ // addFatPointerCandidate: mark the call and the method, that they have a fat pointer candidate. // Spill ret_expr in the call node, because they can't be cloned. // // Arguments: // call - fat calli candidate // void Compiler::addFatPointerCandidate(GenTreeCall* call) { JITDUMP("Marking call [%06u] as fat pointer candidate\n", dspTreeID(call)); setMethodHasFatPointer(); call->SetFatPointerCandidate(); SpillRetExprHelper helper(this); helper.StoreRetExprResultsInArgs(call); } //------------------------------------------------------------------------ // considerGuardedDevirtualization: see if we can profitably guess at the // class involved in an interface or virtual call. // // Arguments: // // call - potential guarded devirtualization candidate // ilOffset - IL ofset of the call instruction // isInterface - true if this is an interface call // baseMethod - target method of the call // baseClass - class that introduced the target method // pContextHandle - context handle for the call // objClass - class of 'this' in the call // objClassName - name of the obj Class // // Notes: // Consults with VM to see if there's a likely class at runtime, // if so, adds a candidate for guarded devirtualization. // void Compiler::considerGuardedDevirtualization( GenTreeCall* call, IL_OFFSET ilOffset, bool isInterface, CORINFO_METHOD_HANDLE baseMethod, CORINFO_CLASS_HANDLE baseClass, CORINFO_CONTEXT_HANDLE* pContextHandle DEBUGARG(CORINFO_CLASS_HANDLE objClass) DEBUGARG(const char* objClassName)) { #if defined(DEBUG) const char* callKind = isInterface ? "interface" : "virtual"; #endif JITDUMP("Considering guarded devirtualization at IL offset %u (0x%x)\n", ilOffset, ilOffset); // We currently only get likely class guesses when there is PGO data // with class profiles. // if (fgPgoClassProfiles == 0) { JITDUMP("Not guessing for class: no class profile pgo data, or pgo disabled\n"); return; } // See if there's a likely guess for the class. // const unsigned likelihoodThreshold = isInterface ? 25 : 30; unsigned likelihood = 0; unsigned numberOfClasses = 0; CORINFO_CLASS_HANDLE likelyClass = NO_CLASS_HANDLE; bool doRandomDevirt = false; const int maxLikelyClasses = 32; LikelyClassRecord likelyClasses[maxLikelyClasses]; #ifdef DEBUG // Optional stress mode to pick a random known class, rather than // the most likely known class. // doRandomDevirt = JitConfig.JitRandomGuardedDevirtualization() != 0; if (doRandomDevirt) { // Reuse the random inliner's random state. // CLRRandom* const random = impInlineRoot()->m_inlineStrategy->GetRandom(JitConfig.JitRandomGuardedDevirtualization()); likelyClasses[0].clsHandle = getRandomClass(fgPgoSchema, fgPgoSchemaCount, fgPgoData, ilOffset, random); likelyClasses[0].likelihood = 100; if (likelyClasses[0].clsHandle != NO_CLASS_HANDLE) { numberOfClasses = 1; } } else #endif { numberOfClasses = getLikelyClasses(likelyClasses, maxLikelyClasses, fgPgoSchema, fgPgoSchemaCount, fgPgoData, ilOffset); } // For now we only use the most popular type likelihood = likelyClasses[0].likelihood; likelyClass = likelyClasses[0].clsHandle; if (numberOfClasses < 1) { JITDUMP("No likely class, sorry\n"); return; } assert(likelyClass != NO_CLASS_HANDLE); // Print all likely classes JITDUMP("%s classes for %p (%s):\n", doRandomDevirt ? "Random" : "Likely", dspPtr(objClass), objClassName) for (UINT32 i = 0; i < numberOfClasses; i++) { JITDUMP(" %u) %p (%s) [likelihood:%u%%]\n", i + 1, likelyClasses[i].clsHandle, eeGetClassName(likelyClasses[i].clsHandle), likelyClasses[i].likelihood); } // Todo: a more advanced heuristic using likelihood, number of // classes, and the profile count for this block. // // For now we will guess if the likelihood is at least 25%/30% (intfc/virt), as studies // have shown this transformation should pay off even if we guess wrong sometimes. // if (likelihood < likelihoodThreshold) { JITDUMP("Not guessing for class; likelihood is below %s call threshold %u\n", callKind, likelihoodThreshold); return; } uint32_t const likelyClassAttribs = info.compCompHnd->getClassAttribs(likelyClass); if ((likelyClassAttribs & CORINFO_FLG_ABSTRACT) != 0) { // We may see an abstract likely class, if we have a stale profile. // No point guessing for this. // JITDUMP("Not guessing for class; abstract (stale profile)\n"); return; } // Figure out which method will be called. // CORINFO_DEVIRTUALIZATION_INFO dvInfo; dvInfo.virtualMethod = baseMethod; dvInfo.objClass = likelyClass; dvInfo.context = *pContextHandle; dvInfo.exactContext = *pContextHandle; dvInfo.pResolvedTokenVirtualMethod = nullptr; const bool canResolve = info.compCompHnd->resolveVirtualMethod(&dvInfo); if (!canResolve) { JITDUMP("Can't figure out which method would be invoked, sorry\n"); return; } CORINFO_METHOD_HANDLE likelyMethod = dvInfo.devirtualizedMethod; JITDUMP("%s call would invoke method %s\n", callKind, eeGetMethodName(likelyMethod, nullptr)); // Add this as a potential candidate. // uint32_t const likelyMethodAttribs = info.compCompHnd->getMethodAttribs(likelyMethod); addGuardedDevirtualizationCandidate(call, likelyMethod, likelyClass, likelyMethodAttribs, likelyClassAttribs, likelihood); } //------------------------------------------------------------------------ // addGuardedDevirtualizationCandidate: potentially mark the call as a guarded // devirtualization candidate // // Notes: // // Call sites in rare or unoptimized code, and calls that require cookies are // not marked as candidates. // // As part of marking the candidate, the code spills GT_RET_EXPRs anywhere in any // child tree, because and we need to clone all these trees when we clone the call // as part of guarded devirtualization, and these IR nodes can't be cloned. // // Arguments: // call - potential guarded devirtualization candidate // methodHandle - method that will be invoked if the class test succeeds // classHandle - class that will be tested for at runtime // methodAttr - attributes of the method // classAttr - attributes of the class // likelihood - odds that this class is the class seen at runtime // void Compiler::addGuardedDevirtualizationCandidate(GenTreeCall* call, CORINFO_METHOD_HANDLE methodHandle, CORINFO_CLASS_HANDLE classHandle, unsigned methodAttr, unsigned classAttr, unsigned likelihood) { // This transformation only makes sense for virtual calls assert(call->IsVirtual()); // Only mark calls if the feature is enabled. const bool isEnabled = JitConfig.JitEnableGuardedDevirtualization() > 0; if (!isEnabled) { JITDUMP("NOT Marking call [%06u] as guarded devirtualization candidate -- disabled by jit config\n", dspTreeID(call)); return; } // Bail if not optimizing or the call site is very likely cold if (compCurBB->isRunRarely() || opts.OptimizationDisabled()) { JITDUMP("NOT Marking call [%06u] as guarded devirtualization candidate -- rare / dbg / minopts\n", dspTreeID(call)); return; } // CT_INDIRECT calls may use the cookie, bail if so... // // If transforming these provides a benefit, we could save this off in the same way // we save the stub address below. if ((call->gtCallType == CT_INDIRECT) && (call->AsCall()->gtCallCookie != nullptr)) { JITDUMP("NOT Marking call [%06u] as guarded devirtualization candidate -- CT_INDIRECT with cookie\n", dspTreeID(call)); return; } #ifdef DEBUG // See if disabled by range // static ConfigMethodRange JitGuardedDevirtualizationRange; JitGuardedDevirtualizationRange.EnsureInit(JitConfig.JitGuardedDevirtualizationRange()); assert(!JitGuardedDevirtualizationRange.Error()); if (!JitGuardedDevirtualizationRange.Contains(impInlineRoot()->info.compMethodHash())) { JITDUMP("NOT Marking call [%06u] as guarded devirtualization candidate -- excluded by " "JitGuardedDevirtualizationRange", dspTreeID(call)); return; } #endif // We're all set, proceed with candidate creation. // JITDUMP("Marking call [%06u] as guarded devirtualization candidate; will guess for class %s\n", dspTreeID(call), eeGetClassName(classHandle)); setMethodHasGuardedDevirtualization(); call->SetGuardedDevirtualizationCandidate(); // Spill off any GT_RET_EXPR subtrees so we can clone the call. // SpillRetExprHelper helper(this); helper.StoreRetExprResultsInArgs(call); // Gather some information for later. Note we actually allocate InlineCandidateInfo // here, as the devirtualized half of this call will likely become an inline candidate. // GuardedDevirtualizationCandidateInfo* pInfo = new (this, CMK_Inlining) InlineCandidateInfo; pInfo->guardedMethodHandle = methodHandle; pInfo->guardedMethodUnboxedEntryHandle = nullptr; pInfo->guardedClassHandle = classHandle; pInfo->likelihood = likelihood; pInfo->requiresInstMethodTableArg = false; // If the guarded class is a value class, look for an unboxed entry point. // if ((classAttr & CORINFO_FLG_VALUECLASS) != 0) { JITDUMP(" ... class is a value class, looking for unboxed entry\n"); bool requiresInstMethodTableArg = false; CORINFO_METHOD_HANDLE unboxedEntryMethodHandle = info.compCompHnd->getUnboxedEntry(methodHandle, &requiresInstMethodTableArg); if (unboxedEntryMethodHandle != nullptr) { JITDUMP(" ... updating GDV candidate with unboxed entry info\n"); pInfo->guardedMethodUnboxedEntryHandle = unboxedEntryMethodHandle; pInfo->requiresInstMethodTableArg = requiresInstMethodTableArg; } } call->gtGuardedDevirtualizationCandidateInfo = pInfo; } void Compiler::addExpRuntimeLookupCandidate(GenTreeCall* call) { setMethodHasExpRuntimeLookup(); call->SetExpRuntimeLookup(); } //------------------------------------------------------------------------ // impIsClassExact: check if a class handle can only describe values // of exactly one class. // // Arguments: // classHnd - handle for class in question // // Returns: // true if class is final and not subject to special casting from // variance or similar. // // Note: // We are conservative on arrays of primitive types here. bool Compiler::impIsClassExact(CORINFO_CLASS_HANDLE classHnd) { DWORD flags = info.compCompHnd->getClassAttribs(classHnd); DWORD flagsMask = CORINFO_FLG_FINAL | CORINFO_FLG_VARIANCE | CORINFO_FLG_ARRAY; if ((flags & flagsMask) == CORINFO_FLG_FINAL) { return true; } if ((flags & flagsMask) == (CORINFO_FLG_FINAL | CORINFO_FLG_ARRAY)) { CORINFO_CLASS_HANDLE arrayElementHandle = nullptr; CorInfoType type = info.compCompHnd->getChildType(classHnd, &arrayElementHandle); if ((type == CORINFO_TYPE_CLASS) || (type == CORINFO_TYPE_VALUECLASS)) { return impIsClassExact(arrayElementHandle); } } return false; } //------------------------------------------------------------------------ // impCanSkipCovariantStoreCheck: see if storing a ref type value to an array // can skip the array store covariance check. // // Arguments: // value -- tree producing the value to store // array -- tree representing the array to store to // // Returns: // true if the store does not require a covariance check. // bool Compiler::impCanSkipCovariantStoreCheck(GenTree* value, GenTree* array) { // We should only call this when optimizing. assert(opts.OptimizationEnabled()); // Check for assignment to same array, ie. arrLcl[i] = arrLcl[j] if (value->OperIs(GT_INDEX) && array->OperIs(GT_LCL_VAR)) { GenTree* valueIndex = value->AsIndex()->Arr(); if (valueIndex->OperIs(GT_LCL_VAR)) { unsigned valueLcl = valueIndex->AsLclVar()->GetLclNum(); unsigned arrayLcl = array->AsLclVar()->GetLclNum(); if ((valueLcl == arrayLcl) && !lvaGetDesc(arrayLcl)->IsAddressExposed()) { JITDUMP("\nstelem of ref from same array: skipping covariant store check\n"); return true; } } } // Check for assignment of NULL. if (value->OperIs(GT_CNS_INT)) { assert(value->gtType == TYP_REF); if (value->AsIntCon()->gtIconVal == 0) { JITDUMP("\nstelem of null: skipping covariant store check\n"); return true; } // Non-0 const refs can only occur with frozen objects assert(value->IsIconHandle(GTF_ICON_STR_HDL)); assert(doesMethodHaveFrozenString() || (compIsForInlining() && impInlineInfo->InlinerCompiler->doesMethodHaveFrozenString())); } // Try and get a class handle for the array if (value->gtType != TYP_REF) { return false; } bool arrayIsExact = false; bool arrayIsNonNull = false; CORINFO_CLASS_HANDLE arrayHandle = gtGetClassHandle(array, &arrayIsExact, &arrayIsNonNull); if (arrayHandle == NO_CLASS_HANDLE) { return false; } // There are some methods in corelib where we're storing to an array but the IL // doesn't reflect this (see SZArrayHelper). Avoid. DWORD attribs = info.compCompHnd->getClassAttribs(arrayHandle); if ((attribs & CORINFO_FLG_ARRAY) == 0) { return false; } CORINFO_CLASS_HANDLE arrayElementHandle = nullptr; CorInfoType arrayElemType = info.compCompHnd->getChildType(arrayHandle, &arrayElementHandle); // Verify array type handle is really an array of ref type assert(arrayElemType == CORINFO_TYPE_CLASS); // Check for exactly object[] if (arrayIsExact && (arrayElementHandle == impGetObjectClass())) { JITDUMP("\nstelem to (exact) object[]: skipping covariant store check\n"); return true; } const bool arrayTypeIsSealed = impIsClassExact(arrayElementHandle); if ((!arrayIsExact && !arrayTypeIsSealed) || (arrayElementHandle == NO_CLASS_HANDLE)) { // Bail out if we don't know array's exact type return false; } bool valueIsExact = false; bool valueIsNonNull = false; CORINFO_CLASS_HANDLE valueHandle = gtGetClassHandle(value, &valueIsExact, &valueIsNonNull); // Array's type is sealed and equals to value's type if (arrayTypeIsSealed && (valueHandle == arrayElementHandle)) { JITDUMP("\nstelem to T[] with T exact: skipping covariant store check\n"); return true; } // Array's type is not sealed but we know its exact type if (arrayIsExact && (valueHandle != NO_CLASS_HANDLE) && (info.compCompHnd->compareTypesForCast(valueHandle, arrayElementHandle) == TypeCompareState::Must)) { JITDUMP("\nstelem to T[] with T exact: skipping covariant store check\n"); return true; } return false; }
1
dotnet/runtime
66,282
Enable Fast Tail Call Optimization for ARM32
- Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
clamp03
2022-03-07T05:47:20Z
2022-03-13T11:50:20Z
227ff3d53784f655fa04dad059a98b3e8d291d61
51b90cc60b8528c77829ef18481b0f58db812776
Enable Fast Tail Call Optimization for ARM32. - Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
./src/coreclr/jit/lclvars.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX LclVarsInfo XX XX XX XX The variables to be used by the code generator. XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ #include "jitpch.h" #ifdef _MSC_VER #pragma hdrstop #endif #include "emit.h" #include "register_arg_convention.h" #include "jitstd/algorithm.h" #include "patchpointinfo.h" /*****************************************************************************/ #ifdef DEBUG #if DOUBLE_ALIGN /* static */ unsigned Compiler::s_lvaDoubleAlignedProcsCount = 0; #endif #endif /*****************************************************************************/ void Compiler::lvaInit() { /* We haven't allocated stack variables yet */ lvaRefCountState = RCS_INVALID; lvaGenericsContextInUse = false; lvaTrackedToVarNumSize = 0; lvaTrackedToVarNum = nullptr; lvaTrackedFixed = false; // false: We can still add new tracked variables lvaDoneFrameLayout = NO_FRAME_LAYOUT; #if !defined(FEATURE_EH_FUNCLETS) lvaShadowSPslotsVar = BAD_VAR_NUM; #endif // !FEATURE_EH_FUNCLETS lvaInlinedPInvokeFrameVar = BAD_VAR_NUM; lvaReversePInvokeFrameVar = BAD_VAR_NUM; #if FEATURE_FIXED_OUT_ARGS lvaPInvokeFrameRegSaveVar = BAD_VAR_NUM; lvaOutgoingArgSpaceVar = BAD_VAR_NUM; lvaOutgoingArgSpaceSize = PhasedVar<unsigned>(); #endif // FEATURE_FIXED_OUT_ARGS #ifdef JIT32_GCENCODER lvaLocAllocSPvar = BAD_VAR_NUM; #endif // JIT32_GCENCODER lvaNewObjArrayArgs = BAD_VAR_NUM; lvaGSSecurityCookie = BAD_VAR_NUM; #ifdef TARGET_X86 lvaVarargsBaseOfStkArgs = BAD_VAR_NUM; #endif // TARGET_X86 lvaVarargsHandleArg = BAD_VAR_NUM; lvaStubArgumentVar = BAD_VAR_NUM; lvaArg0Var = BAD_VAR_NUM; lvaMonAcquired = BAD_VAR_NUM; lvaRetAddrVar = BAD_VAR_NUM; lvaInlineeReturnSpillTemp = BAD_VAR_NUM; gsShadowVarInfo = nullptr; #if defined(FEATURE_EH_FUNCLETS) lvaPSPSym = BAD_VAR_NUM; #endif #if FEATURE_SIMD lvaSIMDInitTempVarNum = BAD_VAR_NUM; #endif // FEATURE_SIMD lvaCurEpoch = 0; structPromotionHelper = new (this, CMK_Generic) StructPromotionHelper(this); } /*****************************************************************************/ void Compiler::lvaInitTypeRef() { /* x86 args look something like this: [this ptr] [hidden return buffer] [declared arguments]* [generic context] [var arg cookie] x64 is closer to the native ABI: [this ptr] [hidden return buffer] [generic context] [var arg cookie] [declared arguments]* (Note: prior to .NET Framework 4.5.1 for Windows 8.1 (but not .NET Framework 4.5.1 "downlevel"), the "hidden return buffer" came before the "this ptr". Now, the "this ptr" comes first. This is different from the C++ order, where the "hidden return buffer" always comes first.) ARM and ARM64 are the same as the current x64 convention: [this ptr] [hidden return buffer] [generic context] [var arg cookie] [declared arguments]* Key difference: The var arg cookie and generic context are swapped with respect to the user arguments */ /* Set compArgsCount and compLocalsCount */ info.compArgsCount = info.compMethodInfo->args.numArgs; // Is there a 'this' pointer if (!info.compIsStatic) { info.compArgsCount++; } else { info.compThisArg = BAD_VAR_NUM; } info.compILargsCount = info.compArgsCount; #ifdef FEATURE_SIMD if (supportSIMDTypes() && (info.compRetNativeType == TYP_STRUCT)) { var_types structType = impNormStructType(info.compMethodInfo->args.retTypeClass); info.compRetType = structType; } #endif // FEATURE_SIMD // Are we returning a struct using a return buffer argument? // const bool hasRetBuffArg = impMethodInfo_hasRetBuffArg(info.compMethodInfo, info.compCallConv); // Possibly change the compRetNativeType from TYP_STRUCT to a "primitive" type // when we are returning a struct by value and it fits in one register // if (!hasRetBuffArg && varTypeIsStruct(info.compRetNativeType)) { CORINFO_CLASS_HANDLE retClsHnd = info.compMethodInfo->args.retTypeClass; Compiler::structPassingKind howToReturnStruct; var_types returnType = getReturnTypeForStruct(retClsHnd, info.compCallConv, &howToReturnStruct); // We can safely widen the return type for enclosed structs. if ((howToReturnStruct == SPK_PrimitiveType) || (howToReturnStruct == SPK_EnclosingType)) { assert(returnType != TYP_UNKNOWN); assert(returnType != TYP_STRUCT); info.compRetNativeType = returnType; // ToDo: Refactor this common code sequence into its own method as it is used 4+ times if ((returnType == TYP_LONG) && (compLongUsed == false)) { compLongUsed = true; } else if (((returnType == TYP_FLOAT) || (returnType == TYP_DOUBLE)) && (compFloatingPointUsed == false)) { compFloatingPointUsed = true; } } } // Do we have a RetBuffArg? if (hasRetBuffArg) { info.compArgsCount++; } else { info.compRetBuffArg = BAD_VAR_NUM; } /* There is a 'hidden' cookie pushed last when the calling convention is varargs */ if (info.compIsVarArgs) { info.compArgsCount++; } // Is there an extra parameter used to pass instantiation info to // shared generic methods and shared generic struct instance methods? if (info.compMethodInfo->args.callConv & CORINFO_CALLCONV_PARAMTYPE) { info.compArgsCount++; } else { info.compTypeCtxtArg = BAD_VAR_NUM; } lvaCount = info.compLocalsCount = info.compArgsCount + info.compMethodInfo->locals.numArgs; info.compILlocalsCount = info.compILargsCount + info.compMethodInfo->locals.numArgs; /* Now allocate the variable descriptor table */ if (compIsForInlining()) { lvaTable = impInlineInfo->InlinerCompiler->lvaTable; lvaCount = impInlineInfo->InlinerCompiler->lvaCount; lvaTableCnt = impInlineInfo->InlinerCompiler->lvaTableCnt; // No more stuff needs to be done. return; } lvaTableCnt = lvaCount * 2; if (lvaTableCnt < 16) { lvaTableCnt = 16; } lvaTable = getAllocator(CMK_LvaTable).allocate<LclVarDsc>(lvaTableCnt); size_t tableSize = lvaTableCnt * sizeof(*lvaTable); memset(lvaTable, 0, tableSize); for (unsigned i = 0; i < lvaTableCnt; i++) { new (&lvaTable[i], jitstd::placement_t()) LclVarDsc(); // call the constructor. } //------------------------------------------------------------------------- // Count the arguments and initialize the respective lvaTable[] entries // // First the implicit arguments //------------------------------------------------------------------------- InitVarDscInfo varDscInfo; #ifdef TARGET_X86 // x86 unmanaged calling conventions limit the number of registers supported // for accepting arguments. As a result, we need to modify the number of registers // when we emit a method with an unmanaged calling convention. switch (info.compCallConv) { case CorInfoCallConvExtension::Thiscall: // In thiscall the this parameter goes into a register. varDscInfo.Init(lvaTable, hasRetBuffArg, 1, 0); break; case CorInfoCallConvExtension::C: case CorInfoCallConvExtension::Stdcall: case CorInfoCallConvExtension::CMemberFunction: case CorInfoCallConvExtension::StdcallMemberFunction: varDscInfo.Init(lvaTable, hasRetBuffArg, 0, 0); break; case CorInfoCallConvExtension::Managed: case CorInfoCallConvExtension::Fastcall: case CorInfoCallConvExtension::FastcallMemberFunction: default: varDscInfo.Init(lvaTable, hasRetBuffArg, MAX_REG_ARG, MAX_FLOAT_REG_ARG); break; } #else varDscInfo.Init(lvaTable, hasRetBuffArg, MAX_REG_ARG, MAX_FLOAT_REG_ARG); #endif lvaInitArgs(&varDscInfo); //------------------------------------------------------------------------- // Finally the local variables //------------------------------------------------------------------------- unsigned varNum = varDscInfo.varNum; LclVarDsc* varDsc = varDscInfo.varDsc; CORINFO_ARG_LIST_HANDLE localsSig = info.compMethodInfo->locals.args; for (unsigned i = 0; i < info.compMethodInfo->locals.numArgs; i++, varNum++, varDsc++, localsSig = info.compCompHnd->getArgNext(localsSig)) { CORINFO_CLASS_HANDLE typeHnd; CorInfoTypeWithMod corInfoTypeWithMod = info.compCompHnd->getArgType(&info.compMethodInfo->locals, localsSig, &typeHnd); CorInfoType corInfoType = strip(corInfoTypeWithMod); lvaInitVarDsc(varDsc, varNum, corInfoType, typeHnd, localsSig, &info.compMethodInfo->locals); if ((corInfoTypeWithMod & CORINFO_TYPE_MOD_PINNED) != 0) { if ((corInfoType == CORINFO_TYPE_CLASS) || (corInfoType == CORINFO_TYPE_BYREF)) { JITDUMP("Setting lvPinned for V%02u\n", varNum); varDsc->lvPinned = 1; } else { JITDUMP("Ignoring pin for non-GC type V%02u\n", varNum); } } varDsc->lvOnFrame = true; // The final home for this local variable might be our local stack frame if (corInfoType == CORINFO_TYPE_CLASS) { CORINFO_CLASS_HANDLE clsHnd = info.compCompHnd->getArgClass(&info.compMethodInfo->locals, localsSig); lvaSetClass(varNum, clsHnd); } if (opts.IsOSR() && info.compPatchpointInfo->IsExposed(varNum)) { JITDUMP("-- V%02u is OSR exposed\n", varNum); varDsc->lvHasLdAddrOp = 1; // todo: Why does it apply only to non-structs? // if (!varTypeIsStruct(varDsc) && !varTypeIsSIMD(varDsc)) { lvaSetVarAddrExposed(varNum DEBUGARG(AddressExposedReason::OSR_EXPOSED)); } } } if ( // If there already exist unsafe buffers, don't mark more structs as unsafe // as that will cause them to be placed along with the real unsafe buffers, // unnecessarily exposing them to overruns. This can affect GS tests which // intentionally do buffer-overruns. !getNeedsGSSecurityCookie() && // GS checks require the stack to be re-ordered, which can't be done with EnC !opts.compDbgEnC && compStressCompile(STRESS_UNSAFE_BUFFER_CHECKS, 25)) { setNeedsGSSecurityCookie(); compGSReorderStackLayout = true; for (unsigned i = 0; i < lvaCount; i++) { if ((lvaTable[i].lvType == TYP_STRUCT) && compStressCompile(STRESS_GENERIC_VARN, 60)) { lvaTable[i].lvIsUnsafeBuffer = true; } } } if (getNeedsGSSecurityCookie()) { // Ensure that there will be at least one stack variable since // we require that the GSCookie does not have a 0 stack offset. unsigned dummy = lvaGrabTempWithImplicitUse(false DEBUGARG("GSCookie dummy")); LclVarDsc* gsCookieDummy = lvaGetDesc(dummy); gsCookieDummy->lvType = TYP_INT; gsCookieDummy->lvIsTemp = true; // It is not alive at all, set the flag to prevent zero-init. lvaSetVarDoNotEnregister(dummy DEBUGARG(DoNotEnregisterReason::VMNeedsStackAddr)); } // Allocate the lvaOutgoingArgSpaceVar now because we can run into problems in the // emitter when the varNum is greater that 32767 (see emitLclVarAddr::initLclVarAddr) lvaAllocOutgoingArgSpaceVar(); #ifdef DEBUG if (verbose) { lvaTableDump(INITIAL_FRAME_LAYOUT); } #endif } /*****************************************************************************/ void Compiler::lvaInitArgs(InitVarDscInfo* varDscInfo) { compArgSize = 0; #if defined(TARGET_ARM) && defined(PROFILING_SUPPORTED) // Prespill all argument regs on to stack in case of Arm when under profiler. if (compIsProfilerHookNeeded()) { codeGen->regSet.rsMaskPreSpillRegArg |= RBM_ARG_REGS; } #endif //---------------------------------------------------------------------- /* Is there a "this" pointer ? */ lvaInitThisPtr(varDscInfo); unsigned numUserArgsToSkip = 0; unsigned numUserArgs = info.compMethodInfo->args.numArgs; #if !defined(TARGET_ARM) if (TargetOS::IsWindows && callConvIsInstanceMethodCallConv(info.compCallConv)) { // If we are a native instance method, handle the first user arg // (the unmanaged this parameter) and then handle the hidden // return buffer parameter. assert(numUserArgs >= 1); lvaInitUserArgs(varDscInfo, 0, 1); numUserArgsToSkip++; numUserArgs--; lvaInitRetBuffArg(varDscInfo, false); } else #endif { /* If we have a hidden return-buffer parameter, that comes here */ lvaInitRetBuffArg(varDscInfo, true); } //====================================================================== #if USER_ARGS_COME_LAST //@GENERICS: final instantiation-info argument for shared generic methods // and shared generic struct instance methods lvaInitGenericsCtxt(varDscInfo); /* If the method is varargs, process the varargs cookie */ lvaInitVarArgsHandle(varDscInfo); #endif //------------------------------------------------------------------------- // Now walk the function signature for the explicit user arguments //------------------------------------------------------------------------- lvaInitUserArgs(varDscInfo, numUserArgsToSkip, numUserArgs); #if !USER_ARGS_COME_LAST //@GENERICS: final instantiation-info argument for shared generic methods // and shared generic struct instance methods lvaInitGenericsCtxt(varDscInfo); /* If the method is varargs, process the varargs cookie */ lvaInitVarArgsHandle(varDscInfo); #endif //---------------------------------------------------------------------- // We have set info.compArgsCount in compCompile() noway_assert(varDscInfo->varNum == info.compArgsCount); assert(varDscInfo->intRegArgNum <= MAX_REG_ARG); codeGen->intRegState.rsCalleeRegArgCount = varDscInfo->intRegArgNum; codeGen->floatRegState.rsCalleeRegArgCount = varDscInfo->floatRegArgNum; #if FEATURE_FASTTAILCALL // Save the stack usage information // We can get register usage information using codeGen->intRegState and // codeGen->floatRegState info.compArgStackSize = varDscInfo->stackArgSize; #endif // FEATURE_FASTTAILCALL // The total argument size must be aligned. noway_assert((compArgSize % TARGET_POINTER_SIZE) == 0); #ifdef TARGET_X86 /* We can not pass more than 2^16 dwords as arguments as the "ret" instruction can only pop 2^16 arguments. Could be handled correctly but it will be very difficult for fully interruptible code */ if (compArgSize != (size_t)(unsigned short)compArgSize) IMPL_LIMITATION("Too many arguments for the \"ret\" instruction to pop"); #endif } /*****************************************************************************/ void Compiler::lvaInitThisPtr(InitVarDscInfo* varDscInfo) { LclVarDsc* varDsc = varDscInfo->varDsc; if (!info.compIsStatic) { varDsc->lvIsParam = 1; varDsc->lvIsPtr = 1; lvaArg0Var = info.compThisArg = varDscInfo->varNum; noway_assert(info.compThisArg == 0); if (eeIsValueClass(info.compClassHnd)) { varDsc->lvType = TYP_BYREF; #ifdef FEATURE_SIMD if (supportSIMDTypes()) { CorInfoType simdBaseJitType = CORINFO_TYPE_UNDEF; var_types type = impNormStructType(info.compClassHnd, &simdBaseJitType); if (simdBaseJitType != CORINFO_TYPE_UNDEF) { assert(varTypeIsSIMD(type)); varDsc->lvSIMDType = true; varDsc->SetSimdBaseJitType(simdBaseJitType); varDsc->lvExactSize = genTypeSize(type); } } #endif // FEATURE_SIMD } else { varDsc->lvType = TYP_REF; lvaSetClass(varDscInfo->varNum, info.compClassHnd); } varDsc->lvVerTypeInfo = typeInfo(); // Mark the 'this' pointer for the method varDsc->lvVerTypeInfo.SetIsThisPtr(); varDsc->lvIsRegArg = 1; noway_assert(varDscInfo->intRegArgNum == 0); varDsc->SetArgReg(genMapRegArgNumToRegNum(varDscInfo->allocRegArg(TYP_INT), varDsc->TypeGet())); #if FEATURE_MULTIREG_ARGS varDsc->SetOtherArgReg(REG_NA); #endif varDsc->lvOnFrame = true; // The final home for this incoming register might be our local stack frame #ifdef DEBUG if (verbose) { printf("'this' passed in register %s\n", getRegName(varDsc->GetArgReg())); } #endif compArgSize += TARGET_POINTER_SIZE; varDscInfo->varNum++; varDscInfo->varDsc++; } } /*****************************************************************************/ void Compiler::lvaInitRetBuffArg(InitVarDscInfo* varDscInfo, bool useFixedRetBufReg) { LclVarDsc* varDsc = varDscInfo->varDsc; bool hasRetBuffArg = impMethodInfo_hasRetBuffArg(info.compMethodInfo, info.compCallConv); // These two should always match noway_assert(hasRetBuffArg == varDscInfo->hasRetBufArg); if (hasRetBuffArg) { info.compRetBuffArg = varDscInfo->varNum; varDsc->lvType = TYP_BYREF; varDsc->lvIsParam = 1; varDsc->lvIsRegArg = 0; if (useFixedRetBufReg && hasFixedRetBuffReg()) { varDsc->lvIsRegArg = 1; varDsc->SetArgReg(theFixedRetBuffReg()); } else if (varDscInfo->canEnreg(TYP_INT)) { varDsc->lvIsRegArg = 1; unsigned retBuffArgNum = varDscInfo->allocRegArg(TYP_INT); varDsc->SetArgReg(genMapIntRegArgNumToRegNum(retBuffArgNum)); } #if FEATURE_MULTIREG_ARGS varDsc->SetOtherArgReg(REG_NA); #endif varDsc->lvOnFrame = true; // The final home for this incoming register might be our local stack frame #ifdef FEATURE_SIMD if (supportSIMDTypes() && varTypeIsSIMD(info.compRetType)) { varDsc->lvSIMDType = true; CorInfoType simdBaseJitType = getBaseJitTypeAndSizeOfSIMDType(info.compMethodInfo->args.retTypeClass, &varDsc->lvExactSize); varDsc->SetSimdBaseJitType(simdBaseJitType); assert(varDsc->GetSimdBaseType() != TYP_UNKNOWN); } #endif // FEATURE_SIMD assert(!varDsc->lvIsRegArg || isValidIntArgReg(varDsc->GetArgReg())); #ifdef DEBUG if (varDsc->lvIsRegArg && verbose) { printf("'__retBuf' passed in register %s\n", getRegName(varDsc->GetArgReg())); } #endif /* Update the total argument size, count and varDsc */ compArgSize += TARGET_POINTER_SIZE; varDscInfo->varNum++; varDscInfo->varDsc++; } } //----------------------------------------------------------------------------- // lvaInitUserArgs: // Initialize local var descriptions for incoming user arguments // // Arguments: // varDscInfo - the local var descriptions // skipArgs - the number of user args to skip processing. // takeArgs - the number of user args to process (after skipping skipArgs number of args) // void Compiler::lvaInitUserArgs(InitVarDscInfo* varDscInfo, unsigned skipArgs, unsigned takeArgs) { //------------------------------------------------------------------------- // Walk the function signature for the explicit arguments //------------------------------------------------------------------------- #if defined(TARGET_X86) // Only (some of) the implicit args are enregistered for varargs if (info.compIsVarArgs) { varDscInfo->maxIntRegArgNum = varDscInfo->intRegArgNum; } #elif defined(TARGET_AMD64) && !defined(UNIX_AMD64_ABI) // On System V type environment the float registers are not indexed together with the int ones. varDscInfo->floatRegArgNum = varDscInfo->intRegArgNum; #endif // TARGET* CORINFO_ARG_LIST_HANDLE argLst = info.compMethodInfo->args.args; const unsigned argSigLen = info.compMethodInfo->args.numArgs; // We will process at most takeArgs arguments from the signature after skipping skipArgs arguments const int64_t numUserArgs = min(takeArgs, (argSigLen - (int64_t)skipArgs)); // If there are no user args or less than skipArgs args, return here since there's no work to do. if (numUserArgs <= 0) { return; } #ifdef TARGET_ARM regMaskTP doubleAlignMask = RBM_NONE; #endif // TARGET_ARM // Skip skipArgs arguments from the signature. for (unsigned i = 0; i < skipArgs; i++, argLst = info.compCompHnd->getArgNext(argLst)) { ; } // Process each user arg. for (unsigned i = 0; i < numUserArgs; i++, varDscInfo->varNum++, varDscInfo->varDsc++, argLst = info.compCompHnd->getArgNext(argLst)) { LclVarDsc* varDsc = varDscInfo->varDsc; CORINFO_CLASS_HANDLE typeHnd = nullptr; CorInfoTypeWithMod corInfoType = info.compCompHnd->getArgType(&info.compMethodInfo->args, argLst, &typeHnd); varDsc->lvIsParam = 1; lvaInitVarDsc(varDsc, varDscInfo->varNum, strip(corInfoType), typeHnd, argLst, &info.compMethodInfo->args); if (strip(corInfoType) == CORINFO_TYPE_CLASS) { CORINFO_CLASS_HANDLE clsHnd = info.compCompHnd->getArgClass(&info.compMethodInfo->args, argLst); lvaSetClass(varDscInfo->varNum, clsHnd); } // For ARM, ARM64, and AMD64 varargs, all arguments go in integer registers var_types argType = mangleVarArgsType(varDsc->TypeGet()); var_types origArgType = argType; // ARM softfp calling convention should affect only the floating point arguments. // Otherwise there appear too many surplus pre-spills and other memory operations // with the associated locations . bool isSoftFPPreSpill = opts.compUseSoftFP && varTypeIsFloating(varDsc->TypeGet()); unsigned argSize = eeGetArgSize(argLst, &info.compMethodInfo->args); unsigned cSlots = (argSize + TARGET_POINTER_SIZE - 1) / TARGET_POINTER_SIZE; // the total number of slots of this argument bool isHfaArg = false; var_types hfaType = TYP_UNDEF; // Methods that use VarArg or SoftFP cannot have HFA arguments except // Native varargs on arm64 unix use the regular calling convention. if (((TargetOS::IsUnix && TargetArchitecture::IsArm64) || !info.compIsVarArgs) && !opts.compUseSoftFP) { // If the argType is a struct, then check if it is an HFA if (varTypeIsStruct(argType)) { // hfaType is set to float, double, or SIMD type if it is an HFA, otherwise TYP_UNDEF hfaType = GetHfaType(typeHnd); isHfaArg = varTypeIsValidHfaType(hfaType); } } else if (info.compIsVarArgs) { // Currently native varargs is not implemented on non windows targets. // // Note that some targets like Arm64 Unix should not need much work as // the ABI is the same. While other targets may only need small changes // such as amd64 Unix, which just expects RAX to pass numFPArguments. if (TargetOS::IsUnix) { NYI("InitUserArgs for Vararg callee is not yet implemented on non Windows targets."); } } if (isHfaArg) { // We have an HFA argument, so from here on out treat the type as a float, double, or vector. // The orginal struct type is available by using origArgType. // We also update the cSlots to be the number of float/double/vector fields in the HFA. argType = hfaType; // TODO-Cleanup: remove this asignment and mark `argType` as const. varDsc->SetHfaType(hfaType); cSlots = varDsc->lvHfaSlots(); } // The number of slots that must be enregistered if we are to consider this argument enregistered. // This is normally the same as cSlots, since we normally either enregister the entire object, // or none of it. For structs on ARM, however, we only need to enregister a single slot to consider // it enregistered, as long as we can split the rest onto the stack. unsigned cSlotsToEnregister = cSlots; #if defined(TARGET_ARM64) if (compFeatureArgSplit()) { // On arm64 Windows we will need to properly handle the case where a >8byte <=16byte // struct is split between register r7 and virtual stack slot s[0] // We will only do this for calls to vararg methods on Windows Arm64 // // !!This does not affect the normal arm64 calling convention or Unix Arm64!! if (this->info.compIsVarArgs && argType == TYP_STRUCT) { if (varDscInfo->canEnreg(TYP_INT, 1) && // The beginning of the struct can go in a register !varDscInfo->canEnreg(TYP_INT, cSlots)) // The end of the struct can't fit in a register { cSlotsToEnregister = 1; // Force the split } } } #endif // defined(TARGET_ARM64) #ifdef TARGET_ARM // On ARM we pass the first 4 words of integer arguments and non-HFA structs in registers. // But we pre-spill user arguments in varargs methods and structs. // unsigned cAlign; bool preSpill = info.compIsVarArgs || isSoftFPPreSpill; switch (origArgType) { case TYP_STRUCT: assert(varDsc->lvSize() == argSize); cAlign = varDsc->lvStructDoubleAlign ? 2 : 1; // HFA arguments go on the stack frame. They don't get spilled in the prolog like struct // arguments passed in the integer registers but get homed immediately after the prolog. if (!isHfaArg) { // TODO-Arm32-Windows: vararg struct should be forced to split like // ARM64 above. cSlotsToEnregister = 1; // HFAs must be totally enregistered or not, but other structs can be split. preSpill = true; } break; case TYP_DOUBLE: case TYP_LONG: cAlign = 2; break; default: cAlign = 1; break; } if (isRegParamType(argType)) { compArgSize += varDscInfo->alignReg(argType, cAlign) * REGSIZE_BYTES; } if (argType == TYP_STRUCT) { // Are we going to split the struct between registers and stack? We can do that as long as // no floating-point arguments have been put on the stack. // // From the ARM Procedure Call Standard: // Rule C.5: "If the NCRN is less than r4 **and** the NSAA is equal to the SP," // then split the argument between registers and stack. Implication: if something // has already been spilled to the stack, then anything that would normally be // split between the core registers and the stack will be put on the stack. // Anything that follows will also be on the stack. However, if something from // floating point regs has been spilled to the stack, we can still use r0-r3 until they are full. if (varDscInfo->canEnreg(TYP_INT, 1) && // The beginning of the struct can go in a register !varDscInfo->canEnreg(TYP_INT, cSlots) && // The end of the struct can't fit in a register varDscInfo->existAnyFloatStackArgs()) // There's at least one stack-based FP arg already { varDscInfo->setAllRegArgUsed(TYP_INT); // Prevent all future use of integer registers preSpill = false; // This struct won't be prespilled, since it will go on the stack } } if (preSpill) { for (unsigned ix = 0; ix < cSlots; ix++) { if (!varDscInfo->canEnreg(TYP_INT, ix + 1)) { break; } regMaskTP regMask = genMapArgNumToRegMask(varDscInfo->regArgNum(TYP_INT) + ix, TYP_INT); if (cAlign == 2) { doubleAlignMask |= regMask; } codeGen->regSet.rsMaskPreSpillRegArg |= regMask; } } #else // !TARGET_ARM #if defined(UNIX_AMD64_ABI) SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR structDesc; if (varTypeIsStruct(argType)) { assert(typeHnd != nullptr); eeGetSystemVAmd64PassStructInRegisterDescriptor(typeHnd, &structDesc); if (structDesc.passedInRegisters) { unsigned intRegCount = 0; unsigned floatRegCount = 0; for (unsigned int i = 0; i < structDesc.eightByteCount; i++) { if (structDesc.IsIntegralSlot(i)) { intRegCount++; } else if (structDesc.IsSseSlot(i)) { floatRegCount++; } else { assert(false && "Invalid eightbyte classification type."); break; } } if (intRegCount != 0 && !varDscInfo->canEnreg(TYP_INT, intRegCount)) { structDesc.passedInRegisters = false; // No register to enregister the eightbytes. } if (floatRegCount != 0 && !varDscInfo->canEnreg(TYP_FLOAT, floatRegCount)) { structDesc.passedInRegisters = false; // No register to enregister the eightbytes. } } } #endif // UNIX_AMD64_ABI #endif // !TARGET_ARM // The final home for this incoming register might be our local stack frame. // For System V platforms the final home will always be on the local stack frame. varDsc->lvOnFrame = true; bool canPassArgInRegisters = false; #if defined(UNIX_AMD64_ABI) if (varTypeIsStruct(argType)) { canPassArgInRegisters = structDesc.passedInRegisters; } else #elif defined(TARGET_X86) if (varTypeIsStruct(argType) && isTrivialPointerSizedStruct(typeHnd)) { canPassArgInRegisters = varDscInfo->canEnreg(TYP_I_IMPL, cSlotsToEnregister); } else #endif // defined(UNIX_AMD64_ABI) { canPassArgInRegisters = varDscInfo->canEnreg(argType, cSlotsToEnregister); } if (canPassArgInRegisters) { /* Another register argument */ // Allocate the registers we need. allocRegArg() returns the first argument register number of the set. // For non-HFA structs, we still "try" to enregister the whole thing; it will just max out if splitting // to the stack happens. unsigned firstAllocatedRegArgNum = 0; #if FEATURE_MULTIREG_ARGS varDsc->SetOtherArgReg(REG_NA); #endif // FEATURE_MULTIREG_ARGS #if defined(UNIX_AMD64_ABI) unsigned secondAllocatedRegArgNum = 0; var_types firstEightByteType = TYP_UNDEF; var_types secondEightByteType = TYP_UNDEF; if (varTypeIsStruct(argType)) { if (structDesc.eightByteCount >= 1) { firstEightByteType = GetEightByteType(structDesc, 0); firstAllocatedRegArgNum = varDscInfo->allocRegArg(firstEightByteType, 1); } } else #endif // defined(UNIX_AMD64_ABI) { firstAllocatedRegArgNum = varDscInfo->allocRegArg(argType, cSlots); } if (isHfaArg) { // We need to save the fact that this HFA is enregistered // Note that we can have HVAs of SIMD types even if we are not recognizing intrinsics. // In that case, we won't have normalized the vector types on the varDsc, so if we have a single vector // register, we need to set the type now. Otherwise, later we'll assume this is passed by reference. if (varDsc->lvHfaSlots() != 1) { varDsc->lvIsMultiRegArg = true; } } varDsc->lvIsRegArg = 1; #if FEATURE_MULTIREG_ARGS #ifdef TARGET_ARM64 if (argType == TYP_STRUCT) { varDsc->SetArgReg(genMapRegArgNumToRegNum(firstAllocatedRegArgNum, TYP_I_IMPL)); if (cSlots == 2) { varDsc->SetOtherArgReg(genMapRegArgNumToRegNum(firstAllocatedRegArgNum + 1, TYP_I_IMPL)); varDsc->lvIsMultiRegArg = true; } } #elif defined(UNIX_AMD64_ABI) if (varTypeIsStruct(argType)) { varDsc->SetArgReg(genMapRegArgNumToRegNum(firstAllocatedRegArgNum, firstEightByteType)); // If there is a second eightbyte, get a register for it too and map the arg to the reg number. if (structDesc.eightByteCount >= 2) { secondEightByteType = GetEightByteType(structDesc, 1); secondAllocatedRegArgNum = varDscInfo->allocRegArg(secondEightByteType, 1); varDsc->lvIsMultiRegArg = true; } if (secondEightByteType != TYP_UNDEF) { varDsc->SetOtherArgReg(genMapRegArgNumToRegNum(secondAllocatedRegArgNum, secondEightByteType)); } } #else // ARM32 if (varTypeIsStruct(argType)) { varDsc->SetArgReg(genMapRegArgNumToRegNum(firstAllocatedRegArgNum, TYP_I_IMPL)); } #endif // ARM32 else #endif // FEATURE_MULTIREG_ARGS { varDsc->SetArgReg(genMapRegArgNumToRegNum(firstAllocatedRegArgNum, argType)); } #ifdef TARGET_ARM if (varDsc->TypeGet() == TYP_LONG) { varDsc->SetOtherArgReg(genMapRegArgNumToRegNum(firstAllocatedRegArgNum + 1, TYP_INT)); } #if FEATURE_FASTTAILCALL // Check if arg was split between registers and stack. if (!varTypeUsesFloatReg(argType)) { unsigned firstRegArgNum = genMapIntRegNumToRegArgNum(varDsc->GetArgReg()); unsigned lastRegArgNum = firstRegArgNum + cSlots - 1; if (lastRegArgNum >= varDscInfo->maxIntRegArgNum) { assert(varDscInfo->stackArgSize == 0); unsigned numEnregistered = varDscInfo->maxIntRegArgNum - firstRegArgNum; varDsc->SetStackOffset(-(int)numEnregistered * REGSIZE_BYTES); varDscInfo->stackArgSize += (cSlots - numEnregistered) * REGSIZE_BYTES; JITDUMP("set user arg V%02u offset to %d\n", varDscInfo->varNum, varDsc->GetStackOffset()); } } #endif #endif // TARGET_ARM #ifdef DEBUG if (verbose) { printf("Arg #%u passed in register(s) ", varDscInfo->varNum); #if defined(UNIX_AMD64_ABI) if (varTypeIsStruct(argType)) { // Print both registers, just to be clear if (firstEightByteType == TYP_UNDEF) { printf("firstEightByte: <not used>"); } else { printf("firstEightByte: %s", getRegName(genMapRegArgNumToRegNum(firstAllocatedRegArgNum, firstEightByteType))); } if (secondEightByteType == TYP_UNDEF) { printf(", secondEightByte: <not used>"); } else { printf(", secondEightByte: %s", getRegName(genMapRegArgNumToRegNum(secondAllocatedRegArgNum, secondEightByteType))); } } else #endif // defined(UNIX_AMD64_ABI) { bool isFloat = varTypeUsesFloatReg(argType); unsigned regArgNum = genMapRegNumToRegArgNum(varDsc->GetArgReg(), argType); for (unsigned ix = 0; ix < cSlots; ix++, regArgNum++) { if (ix > 0) { printf(","); } if (!isFloat && (regArgNum >= varDscInfo->maxIntRegArgNum)) // a struct has been split between // registers and stack { printf(" stack slots:%d", cSlots - ix); break; } #ifdef TARGET_ARM if (isFloat) { // Print register size prefix if (argType == TYP_DOUBLE) { // Print both registers, just to be clear printf("%s/%s", getRegName(genMapRegArgNumToRegNum(regArgNum, argType)), getRegName(genMapRegArgNumToRegNum(regArgNum + 1, argType))); // doubles take 2 slots assert(ix + 1 < cSlots); ++ix; ++regArgNum; } else { printf("%s", getRegName(genMapRegArgNumToRegNum(regArgNum, argType))); } } else #endif // TARGET_ARM { printf("%s", getRegName(genMapRegArgNumToRegNum(regArgNum, argType))); } } } printf("\n"); } #endif // DEBUG } // end if (canPassArgInRegisters) else { #if defined(TARGET_ARM) varDscInfo->setAllRegArgUsed(argType); if (varTypeUsesFloatReg(argType)) { varDscInfo->setAnyFloatStackArgs(); } #elif defined(TARGET_ARM64) // If we needed to use the stack in order to pass this argument then // record the fact that we have used up any remaining registers of this 'type' // This prevents any 'backfilling' from occuring on ARM64 // varDscInfo->setAllRegArgUsed(argType); #endif // TARGET_XXX #if FEATURE_FASTTAILCALL #ifdef TARGET_ARM unsigned argAlignment = cAlign * TARGET_POINTER_SIZE; #else unsigned argAlignment = eeGetArgSizeAlignment(origArgType, (hfaType == TYP_FLOAT)); // We expect the following rounding operation to be a noop on all // ABIs except ARM (where we have 8-byte aligned args) and macOS // ARM64 (that allows to pack multiple smaller parameters in a // single stack slot). assert(compMacOsArm64Abi() || ((varDscInfo->stackArgSize % argAlignment) == 0)); #endif varDscInfo->stackArgSize = roundUp(varDscInfo->stackArgSize, argAlignment); JITDUMP("set user arg V%02u offset to %u\n", varDscInfo->varNum, varDscInfo->stackArgSize); varDsc->SetStackOffset(varDscInfo->stackArgSize); varDscInfo->stackArgSize += argSize; #endif // FEATURE_FASTTAILCALL } #ifdef UNIX_AMD64_ABI // The arg size is returning the number of bytes of the argument. For a struct it could return a size not a // multiple of TARGET_POINTER_SIZE. The stack allocated space should always be multiple of TARGET_POINTER_SIZE, // so round it up. compArgSize += roundUp(argSize, TARGET_POINTER_SIZE); #else // !UNIX_AMD64_ABI compArgSize += argSize; #endif // !UNIX_AMD64_ABI if (info.compIsVarArgs || isSoftFPPreSpill) { #if defined(TARGET_X86) varDsc->SetStackOffset(compArgSize); #else // !TARGET_X86 // TODO-CQ: We shouldn't have to go as far as to declare these // address-exposed -- DoNotEnregister should suffice. lvaSetVarAddrExposed(varDscInfo->varNum DEBUGARG(AddressExposedReason::TOO_CONSERVATIVE)); #endif // !TARGET_X86 } if (opts.IsOSR() && info.compPatchpointInfo->IsExposed(varDscInfo->varNum)) { JITDUMP("-- V%02u is OSR exposed\n", varDscInfo->varNum); varDsc->lvHasLdAddrOp = 1; lvaSetVarAddrExposed(varDscInfo->varNum DEBUGARG(AddressExposedReason::OSR_EXPOSED)); } } compArgSize = GetOutgoingArgByteSize(compArgSize); #ifdef TARGET_ARM if (doubleAlignMask != RBM_NONE) { assert(RBM_ARG_REGS == 0xF); assert((doubleAlignMask & RBM_ARG_REGS) == doubleAlignMask); if (doubleAlignMask != RBM_NONE && doubleAlignMask != RBM_ARG_REGS) { // 'double aligned types' can begin only at r0 or r2 and we always expect at least two registers to be used // Note that in rare cases, we can have double-aligned structs of 12 bytes (if specified explicitly with // attributes) assert((doubleAlignMask == 0b0011) || (doubleAlignMask == 0b1100) || (doubleAlignMask == 0b0111) /* || 0b1111 is if'ed out */); // Now if doubleAlignMask is xyz1 i.e., the struct starts in r0, and we prespill r2 or r3 // but not both, then the stack would be misaligned for r0. So spill both // r2 and r3. // // ; +0 --- caller SP double aligned ---- // ; -4 r2 r3 // ; -8 r1 r1 // ; -c r0 r0 <-- misaligned. // ; callee saved regs bool startsAtR0 = (doubleAlignMask & 1) == 1; bool r2XorR3 = ((codeGen->regSet.rsMaskPreSpillRegArg & RBM_R2) == 0) != ((codeGen->regSet.rsMaskPreSpillRegArg & RBM_R3) == 0); if (startsAtR0 && r2XorR3) { codeGen->regSet.rsMaskPreSpillAlign = (~codeGen->regSet.rsMaskPreSpillRegArg & ~doubleAlignMask) & RBM_ARG_REGS; } } } #endif // TARGET_ARM } /*****************************************************************************/ void Compiler::lvaInitGenericsCtxt(InitVarDscInfo* varDscInfo) { //@GENERICS: final instantiation-info argument for shared generic methods // and shared generic struct instance methods if (info.compMethodInfo->args.callConv & CORINFO_CALLCONV_PARAMTYPE) { info.compTypeCtxtArg = varDscInfo->varNum; LclVarDsc* varDsc = varDscInfo->varDsc; varDsc->lvIsParam = 1; varDsc->lvType = TYP_I_IMPL; if (varDscInfo->canEnreg(TYP_I_IMPL)) { /* Another register argument */ varDsc->lvIsRegArg = 1; varDsc->SetArgReg(genMapRegArgNumToRegNum(varDscInfo->regArgNum(TYP_INT), varDsc->TypeGet())); #if FEATURE_MULTIREG_ARGS varDsc->SetOtherArgReg(REG_NA); #endif varDsc->lvOnFrame = true; // The final home for this incoming register might be our local stack frame varDscInfo->intRegArgNum++; #ifdef DEBUG if (verbose) { printf("'GenCtxt' passed in register %s\n", getRegName(varDsc->GetArgReg())); } #endif } else { // We need to mark these as being on the stack, as this is not done elsewhere in the case that canEnreg // returns false. varDsc->lvOnFrame = true; #if FEATURE_FASTTAILCALL varDsc->SetStackOffset(varDscInfo->stackArgSize); varDscInfo->stackArgSize += TARGET_POINTER_SIZE; #endif // FEATURE_FASTTAILCALL } compArgSize += TARGET_POINTER_SIZE; #if defined(TARGET_X86) if (info.compIsVarArgs) varDsc->SetStackOffset(compArgSize); #endif // TARGET_X86 varDscInfo->varNum++; varDscInfo->varDsc++; } } /*****************************************************************************/ void Compiler::lvaInitVarArgsHandle(InitVarDscInfo* varDscInfo) { if (info.compIsVarArgs) { lvaVarargsHandleArg = varDscInfo->varNum; LclVarDsc* varDsc = varDscInfo->varDsc; varDsc->lvType = TYP_I_IMPL; varDsc->lvIsParam = 1; #if defined(TARGET_X86) // Codegen will need it for x86 scope info. varDsc->lvImplicitlyReferenced = 1; #endif // TARGET_X86 lvaSetVarDoNotEnregister(lvaVarargsHandleArg DEBUGARG(DoNotEnregisterReason::VMNeedsStackAddr)); assert(mostRecentlyActivePhase == PHASE_PRE_IMPORT); // TODO-Cleanup: this is preImportation phase, why do we try to work with regs here? // Should it be just deleted? if (varDscInfo->canEnreg(TYP_I_IMPL)) { /* Another register argument */ unsigned varArgHndArgNum = varDscInfo->allocRegArg(TYP_I_IMPL); varDsc->lvIsRegArg = 1; varDsc->SetArgReg(genMapRegArgNumToRegNum(varArgHndArgNum, TYP_I_IMPL)); #if FEATURE_MULTIREG_ARGS varDsc->SetOtherArgReg(REG_NA); #endif varDsc->lvOnFrame = true; // The final home for this incoming register might be our local stack frame #ifdef TARGET_ARM // This has to be spilled right in front of the real arguments and we have // to pre-spill all the argument registers explicitly because we only have // have symbols for the declared ones, not any potential variadic ones. for (unsigned ix = varArgHndArgNum; ix < ArrLen(intArgMasks); ix++) { codeGen->regSet.rsMaskPreSpillRegArg |= intArgMasks[ix]; } #endif // TARGET_ARM #ifdef DEBUG if (verbose) { printf("'VarArgHnd' passed in register %s\n", getRegName(varDsc->GetArgReg())); } #endif // DEBUG } else { // We need to mark these as being on the stack, as this is not done elsewhere in the case that canEnreg // returns false. varDsc->lvOnFrame = true; #if FEATURE_FASTTAILCALL varDsc->SetStackOffset(varDscInfo->stackArgSize); varDscInfo->stackArgSize += TARGET_POINTER_SIZE; #endif // FEATURE_FASTTAILCALL } /* Update the total argument size, count and varDsc */ compArgSize += TARGET_POINTER_SIZE; varDscInfo->varNum++; varDscInfo->varDsc++; #if defined(TARGET_X86) varDsc->SetStackOffset(compArgSize); // Allocate a temp to point at the beginning of the args lvaVarargsBaseOfStkArgs = lvaGrabTemp(false DEBUGARG("Varargs BaseOfStkArgs")); lvaTable[lvaVarargsBaseOfStkArgs].lvType = TYP_I_IMPL; #endif // TARGET_X86 } } /*****************************************************************************/ void Compiler::lvaInitVarDsc(LclVarDsc* varDsc, unsigned varNum, CorInfoType corInfoType, CORINFO_CLASS_HANDLE typeHnd, CORINFO_ARG_LIST_HANDLE varList, CORINFO_SIG_INFO* varSig) { noway_assert(varDsc == lvaGetDesc(varNum)); switch (corInfoType) { // Mark types that looks like a pointer for doing shadow-copying of // parameters if we have an unsafe buffer. // Note that this does not handle structs with pointer fields. Instead, // we rely on using the assign-groups/equivalence-groups in // gsFindVulnerableParams() to determine if a buffer-struct contains a // pointer. We could do better by having the EE determine this for us. // Note that we want to keep buffers without pointers at lower memory // addresses than buffers with pointers. case CORINFO_TYPE_PTR: case CORINFO_TYPE_BYREF: case CORINFO_TYPE_CLASS: case CORINFO_TYPE_STRING: case CORINFO_TYPE_VAR: case CORINFO_TYPE_REFANY: varDsc->lvIsPtr = 1; break; default: break; } var_types type = JITtype2varType(corInfoType); if (varTypeIsFloating(type)) { compFloatingPointUsed = true; } if (typeHnd) { unsigned cFlags = info.compCompHnd->getClassAttribs(typeHnd); // We can get typeHnds for primitive types, these are value types which only contain // a primitive. We will need the typeHnd to distinguish them, so we store it here. if ((cFlags & CORINFO_FLG_VALUECLASS) && !varTypeIsStruct(type)) { // printf("This is a struct that the JIT will treat as a primitive\n"); varDsc->lvVerTypeInfo = verMakeTypeInfo(typeHnd); } varDsc->lvOverlappingFields = StructHasOverlappingFields(cFlags); } #if defined(TARGET_AMD64) || defined(TARGET_ARM64) varDsc->lvIsImplicitByRef = 0; #endif // defined(TARGET_AMD64) || defined(TARGET_ARM64) // Set the lvType (before this point it is TYP_UNDEF). if (GlobalJitOptions::compFeatureHfa) { varDsc->SetHfaType(TYP_UNDEF); } if ((varTypeIsStruct(type))) { lvaSetStruct(varNum, typeHnd, typeHnd != nullptr, true); if (info.compIsVarArgs) { lvaSetStructUsedAsVarArg(varNum); } } else { varDsc->lvType = type; } if (type == TYP_BOOL) { varDsc->lvIsBoolean = true; } #ifdef DEBUG varDsc->SetStackOffset(BAD_STK_OFFS); #endif #if FEATURE_MULTIREG_ARGS varDsc->SetOtherArgReg(REG_NA); #endif // FEATURE_MULTIREG_ARGS } /***************************************************************************** * Returns our internal varNum for a given IL variable. * Asserts assume it is called after lvaTable[] has been set up. */ unsigned Compiler::compMapILvarNum(unsigned ILvarNum) { noway_assert(ILvarNum < info.compILlocalsCount || ILvarNum > unsigned(ICorDebugInfo::UNKNOWN_ILNUM)); unsigned varNum; if (ILvarNum == (unsigned)ICorDebugInfo::VARARGS_HND_ILNUM) { // The varargs cookie is the last argument in lvaTable[] noway_assert(info.compIsVarArgs); varNum = lvaVarargsHandleArg; noway_assert(lvaTable[varNum].lvIsParam); } else if (ILvarNum == (unsigned)ICorDebugInfo::RETBUF_ILNUM) { noway_assert(info.compRetBuffArg != BAD_VAR_NUM); varNum = info.compRetBuffArg; } else if (ILvarNum == (unsigned)ICorDebugInfo::TYPECTXT_ILNUM) { noway_assert(info.compTypeCtxtArg >= 0); varNum = unsigned(info.compTypeCtxtArg); } else if (ILvarNum < info.compILargsCount) { // Parameter varNum = compMapILargNum(ILvarNum); noway_assert(lvaTable[varNum].lvIsParam); } else if (ILvarNum < info.compILlocalsCount) { // Local variable unsigned lclNum = ILvarNum - info.compILargsCount; varNum = info.compArgsCount + lclNum; noway_assert(!lvaTable[varNum].lvIsParam); } else { unreached(); } noway_assert(varNum < info.compLocalsCount); return varNum; } /***************************************************************************** * Returns the IL variable number given our internal varNum. * Special return values are VARG_ILNUM, RETBUF_ILNUM, TYPECTXT_ILNUM. * * Returns UNKNOWN_ILNUM if it can't be mapped. */ unsigned Compiler::compMap2ILvarNum(unsigned varNum) const { if (compIsForInlining()) { return impInlineInfo->InlinerCompiler->compMap2ILvarNum(varNum); } noway_assert(varNum < lvaCount); if (varNum == info.compRetBuffArg) { return (unsigned)ICorDebugInfo::RETBUF_ILNUM; } // Is this a varargs function? if (info.compIsVarArgs && varNum == lvaVarargsHandleArg) { return (unsigned)ICorDebugInfo::VARARGS_HND_ILNUM; } // We create an extra argument for the type context parameter // needed for shared generic code. if ((info.compMethodInfo->args.callConv & CORINFO_CALLCONV_PARAMTYPE) && varNum == (unsigned)info.compTypeCtxtArg) { return (unsigned)ICorDebugInfo::TYPECTXT_ILNUM; } #if FEATURE_FIXED_OUT_ARGS if (varNum == lvaOutgoingArgSpaceVar) { return (unsigned)ICorDebugInfo::UNKNOWN_ILNUM; // Cannot be mapped } #endif // FEATURE_FIXED_OUT_ARGS // Now mutate varNum to remove extra parameters from the count. if ((info.compMethodInfo->args.callConv & CORINFO_CALLCONV_PARAMTYPE) && varNum > (unsigned)info.compTypeCtxtArg) { varNum--; } if (info.compIsVarArgs && varNum > lvaVarargsHandleArg) { varNum--; } /* Is there a hidden argument for the return buffer. Note that this code works because if the RetBuffArg is not present, compRetBuffArg will be BAD_VAR_NUM */ if (info.compRetBuffArg != BAD_VAR_NUM && varNum > info.compRetBuffArg) { varNum--; } if (varNum >= info.compLocalsCount) { return (unsigned)ICorDebugInfo::UNKNOWN_ILNUM; // Cannot be mapped } return varNum; } /***************************************************************************** * Returns true if variable "varNum" may be address-exposed. */ bool Compiler::lvaVarAddrExposed(unsigned varNum) const { const LclVarDsc* varDsc = lvaGetDesc(varNum); return varDsc->IsAddressExposed(); } /***************************************************************************** * Returns true iff variable "varNum" should not be enregistered (or one of several reasons). */ bool Compiler::lvaVarDoNotEnregister(unsigned varNum) { LclVarDsc* varDsc = lvaGetDesc(varNum); return varDsc->lvDoNotEnregister; } //------------------------------------------------------------------------ // lvInitializeDoNotEnregFlag: a helper to initialize `lvDoNotEnregister` flag // for locals that were created before the compiler decided its optimization level. // // Assumptions: // compEnregLocals() value is finalized and is set to false. // void Compiler::lvSetMinOptsDoNotEnreg() { JITDUMP("compEnregLocals() is false, setting doNotEnreg flag for all locals."); assert(!compEnregLocals()); for (unsigned lclNum = 0; lclNum < lvaCount; lclNum++) { lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::NoRegVars)); } } /***************************************************************************** * Returns the handle to the class of the local variable varNum */ CORINFO_CLASS_HANDLE Compiler::lvaGetStruct(unsigned varNum) { const LclVarDsc* varDsc = lvaGetDesc(varNum); return varDsc->GetStructHnd(); } //-------------------------------------------------------------------------------------------- // lvaFieldOffsetCmp - a static compare function passed to jitstd::sort() by Compiler::StructPromotionHelper; // compares fields' offsets. // // Arguments: // field1 - pointer to the first field; // field2 - pointer to the second field. // // Return value: // 0 if the fields' offsets are equal, 1 if the first field has bigger offset, -1 otherwise. // bool Compiler::lvaFieldOffsetCmp::operator()(const lvaStructFieldInfo& field1, const lvaStructFieldInfo& field2) { return field1.fldOffset < field2.fldOffset; } //------------------------------------------------------------------------ // StructPromotionHelper constructor. // // Arguments: // compiler - pointer to a compiler to get access to an allocator, compHandle etc. // Compiler::StructPromotionHelper::StructPromotionHelper(Compiler* compiler) : compiler(compiler) , structPromotionInfo() #ifdef DEBUG , retypedFieldsMap(compiler->getAllocator(CMK_DebugOnly)) #endif // DEBUG { } //-------------------------------------------------------------------------------------------- // TryPromoteStructVar - promote struct var if it is possible and profitable. // // Arguments: // lclNum - struct number to try. // // Return value: // true if the struct var was promoted. // bool Compiler::StructPromotionHelper::TryPromoteStructVar(unsigned lclNum) { if (CanPromoteStructVar(lclNum)) { #if 0 // Often-useful debugging code: if you've narrowed down a struct-promotion problem to a single // method, this allows you to select a subset of the vars to promote (by 1-based ordinal number). static int structPromoVarNum = 0; structPromoVarNum++; if (atoi(getenv("structpromovarnumlo")) <= structPromoVarNum && structPromoVarNum <= atoi(getenv("structpromovarnumhi"))) #endif // 0 if (ShouldPromoteStructVar(lclNum)) { PromoteStructVar(lclNum); return true; } } return false; } #ifdef DEBUG //-------------------------------------------------------------------------------------------- // CheckRetypedAsScalar - check that the fldType for this fieldHnd was retyped as requested type. // // Arguments: // fieldHnd - the field handle; // requestedType - as which type the field was accessed; // // Notes: // For example it can happen when such struct A { struct B { long c } } is compiled and we access A.B.c, // it could look like "GT_FIELD struct B.c -> ADDR -> GT_FIELD struct A.B -> ADDR -> LCL_VAR A" , but // "GT_FIELD struct A.B -> ADDR -> LCL_VAR A" can be promoted to "LCL_VAR long A.B" and then // there is type mistmatch between "GT_FIELD struct B.c" and "LCL_VAR long A.B". // void Compiler::StructPromotionHelper::CheckRetypedAsScalar(CORINFO_FIELD_HANDLE fieldHnd, var_types requestedType) { assert(retypedFieldsMap.Lookup(fieldHnd)); assert(retypedFieldsMap[fieldHnd] == requestedType); } #endif // DEBUG //-------------------------------------------------------------------------------------------- // CanPromoteStructType - checks if the struct type can be promoted. // // Arguments: // typeHnd - struct handle to check. // // Return value: // true if the struct type can be promoted. // // Notes: // The last analyzed type is memorized to skip the check if we ask about the same time again next. // However, it was not found profitable to memorize all analyzed types in a map. // // The check initializes only nessasary fields in lvaStructPromotionInfo, // so if the promotion is rejected early than most fields will be uninitialized. // bool Compiler::StructPromotionHelper::CanPromoteStructType(CORINFO_CLASS_HANDLE typeHnd) { assert(typeHnd != nullptr); if (!compiler->eeIsValueClass(typeHnd)) { // TODO-ObjectStackAllocation: Enable promotion of fields of stack-allocated objects. return false; } if (structPromotionInfo.typeHnd == typeHnd) { // Asking for the same type of struct as the last time. // Nothing need to be done. // Fall through ... return structPromotionInfo.canPromote; } // Analyze this type from scratch. structPromotionInfo = lvaStructPromotionInfo(typeHnd); // sizeof(double) represents the size of the largest primitive type that we can struct promote. // In the future this may be changing to XMM_REGSIZE_BYTES. // Note: MaxOffset is used below to declare a local array, and therefore must be a compile-time constant. CLANG_FORMAT_COMMENT_ANCHOR; #if defined(FEATURE_SIMD) #if defined(TARGET_XARCH) // This will allow promotion of 4 Vector<T> fields on AVX2 or Vector256<T> on AVX, // or 8 Vector<T>/Vector128<T> fields on SSE2. const int MaxOffset = MAX_NumOfFieldsInPromotableStruct * YMM_REGSIZE_BYTES; #elif defined(TARGET_ARM64) const int MaxOffset = MAX_NumOfFieldsInPromotableStruct * FP_REGSIZE_BYTES; #endif // defined(TARGET_XARCH) || defined(TARGET_ARM64) #else // !FEATURE_SIMD const int MaxOffset = MAX_NumOfFieldsInPromotableStruct * sizeof(double); #endif // !FEATURE_SIMD assert((BYTE)MaxOffset == MaxOffset); // because lvaStructFieldInfo.fldOffset is byte-sized assert((BYTE)MAX_NumOfFieldsInPromotableStruct == MAX_NumOfFieldsInPromotableStruct); // because lvaStructFieldInfo.fieldCnt is byte-sized bool containsGCpointers = false; COMP_HANDLE compHandle = compiler->info.compCompHnd; unsigned structSize = compHandle->getClassSize(typeHnd); if (structSize > MaxOffset) { return false; // struct is too large } unsigned fieldCnt = compHandle->getClassNumInstanceFields(typeHnd); if (fieldCnt == 0 || fieldCnt > MAX_NumOfFieldsInPromotableStruct) { return false; // struct must have between 1 and MAX_NumOfFieldsInPromotableStruct fields } structPromotionInfo.fieldCnt = (unsigned char)fieldCnt; DWORD typeFlags = compHandle->getClassAttribs(typeHnd); bool overlappingFields = StructHasOverlappingFields(typeFlags); if (overlappingFields) { return false; } // Don't struct promote if we have an CUSTOMLAYOUT flag on an HFA type if (StructHasCustomLayout(typeFlags) && compiler->IsHfa(typeHnd)) { return false; } #ifdef TARGET_ARM // On ARM, we have a requirement on the struct alignment; see below. unsigned structAlignment = roundUp(compHandle->getClassAlignmentRequirement(typeHnd), TARGET_POINTER_SIZE); #endif // TARGET_ARM // If we have "Custom Layout" then we might have an explicit Size attribute // Managed C++ uses this for its structs, such C++ types will not contain GC pointers. // // The current VM implementation also incorrectly sets the CORINFO_FLG_CUSTOMLAYOUT // whenever a managed value class contains any GC pointers. // (See the comment for VMFLAG_NOT_TIGHTLY_PACKED in class.h) // // It is important to struct promote managed value classes that have GC pointers // So we compute the correct value for "CustomLayout" here // if (StructHasCustomLayout(typeFlags) && ((typeFlags & CORINFO_FLG_CONTAINS_GC_PTR) == 0)) { structPromotionInfo.customLayout = true; } if (StructHasDontDigFieldsFlagSet(typeFlags)) { return CanConstructAndPromoteField(&structPromotionInfo); } unsigned fieldsSize = 0; for (BYTE ordinal = 0; ordinal < fieldCnt; ++ordinal) { CORINFO_FIELD_HANDLE fieldHnd = compHandle->getFieldInClass(typeHnd, ordinal); structPromotionInfo.fields[ordinal] = GetFieldInfo(fieldHnd, ordinal); const lvaStructFieldInfo& fieldInfo = structPromotionInfo.fields[ordinal]; noway_assert(fieldInfo.fldOffset < structSize); if (fieldInfo.fldSize == 0) { // Not a scalar type. return false; } if ((fieldInfo.fldOffset % fieldInfo.fldSize) != 0) { // The code in Compiler::genPushArgList that reconstitutes // struct values on the stack from promoted fields expects // those fields to be at their natural alignment. return false; } if (varTypeIsGC(fieldInfo.fldType)) { containsGCpointers = true; } // The end offset for this field should never be larger than our structSize. noway_assert(fieldInfo.fldOffset + fieldInfo.fldSize <= structSize); fieldsSize += fieldInfo.fldSize; #ifdef TARGET_ARM // On ARM, for struct types that don't use explicit layout, the alignment of the struct is // at least the max alignment of its fields. We take advantage of this invariant in struct promotion, // so verify it here. if (fieldInfo.fldSize > structAlignment) { // Don't promote vars whose struct types violates the invariant. (Alignment == size for primitives.) return false; } #endif // TARGET_ARM } // If we saw any GC pointer or by-ref fields above then CORINFO_FLG_CONTAINS_GC_PTR or // CORINFO_FLG_BYREF_LIKE has to be set! noway_assert((containsGCpointers == false) || ((typeFlags & (CORINFO_FLG_CONTAINS_GC_PTR | CORINFO_FLG_BYREF_LIKE)) != 0)); // Check if this promoted struct contains any holes. assert(!overlappingFields); if (fieldsSize != structSize) { // If sizes do not match it means we have an overlapping fields or holes. // Overlapping fields were rejected early, so here it can mean only holes. structPromotionInfo.containsHoles = true; } // Cool, this struct is promotable. structPromotionInfo.canPromote = true; return true; } //-------------------------------------------------------------------------------------------- // CanConstructAndPromoteField - checks if we can construct field types without asking about them directly. // // Arguments: // structPromotionInfo - struct promotion candidate information. // // Return value: // true if we can figure out the fields from available knowledge. // // Notes: // This is needed for AOT R2R compilation when we can't cross compilation bubble borders // so we should not ask about fields that are not directly referenced. If we do VM will have // to emit a type check for this field type but it does not have enough information about it. // As a workaround for perfomance critical corner case: struct with 1 gcref, we try to construct // the field information from indirect observations. // bool Compiler::StructPromotionHelper::CanConstructAndPromoteField(lvaStructPromotionInfo* structPromotionInfo) { const CORINFO_CLASS_HANDLE typeHnd = structPromotionInfo->typeHnd; const COMP_HANDLE compHandle = compiler->info.compCompHnd; const DWORD typeFlags = compHandle->getClassAttribs(typeHnd); if (structPromotionInfo->fieldCnt != 1) { // Can't find out values for several fields. return false; } if ((typeFlags & CORINFO_FLG_CONTAINS_GC_PTR) == 0) { // Can't find out type of a non-gc field. return false; } const unsigned structSize = compHandle->getClassSize(typeHnd); if (structSize != TARGET_POINTER_SIZE) { return false; } assert(!structPromotionInfo->containsHoles); assert(!structPromotionInfo->customLayout); lvaStructFieldInfo& fldInfo = structPromotionInfo->fields[0]; fldInfo.fldHnd = compHandle->getFieldInClass(typeHnd, 0); // We should not read it anymore. fldInfo.fldTypeHnd = 0; fldInfo.fldOffset = 0; fldInfo.fldOrdinal = 0; fldInfo.fldSize = TARGET_POINTER_SIZE; fldInfo.fldType = TYP_BYREF; structPromotionInfo->canPromote = true; return true; } //-------------------------------------------------------------------------------------------- // CanPromoteStructVar - checks if the struct can be promoted. // // Arguments: // lclNum - struct number to check. // // Return value: // true if the struct var can be promoted. // bool Compiler::StructPromotionHelper::CanPromoteStructVar(unsigned lclNum) { LclVarDsc* varDsc = compiler->lvaGetDesc(lclNum); assert(varTypeIsStruct(varDsc)); assert(!varDsc->lvPromoted); // Don't ask again :) // If this lclVar is used in a SIMD intrinsic, then we don't want to struct promote it. // Note, however, that SIMD lclVars that are NOT used in a SIMD intrinsic may be // profitably promoted. if (varDsc->lvIsUsedInSIMDIntrinsic()) { JITDUMP(" struct promotion of V%02u is disabled because lvIsUsedInSIMDIntrinsic()\n", lclNum); return false; } // Reject struct promotion of parameters when -GS stack reordering is enabled // as we could introduce shadow copies of them. if (varDsc->lvIsParam && compiler->compGSReorderStackLayout) { JITDUMP(" struct promotion of V%02u is disabled because lvIsParam and compGSReorderStackLayout\n", lclNum); return false; } if (!compiler->lvaEnregMultiRegVars && varDsc->lvIsMultiRegArgOrRet()) { JITDUMP(" struct promotion of V%02u is disabled because lvIsMultiRegArgOrRet()\n", lclNum); return false; } CORINFO_CLASS_HANDLE typeHnd = varDsc->GetStructHnd(); assert(typeHnd != NO_CLASS_HANDLE); bool canPromote = CanPromoteStructType(typeHnd); if (canPromote && varDsc->lvIsMultiRegArgOrRet()) { unsigned fieldCnt = structPromotionInfo.fieldCnt; if (fieldCnt > MAX_MULTIREG_COUNT) { canPromote = false; } #if defined(TARGET_ARMARCH) else { for (unsigned i = 0; canPromote && (i < fieldCnt); i++) { var_types fieldType = structPromotionInfo.fields[i].fldType; // Non-HFA structs are always passed in general purpose registers. // If there are any floating point fields, don't promote for now. // Likewise, since HVA structs are passed in SIMD registers // promotion of non FP or SIMD type fields is disallowed. // TODO-1stClassStructs: add support in Lowering and prolog generation // to enable promoting these types. if (varDsc->lvIsParam && (varDsc->lvIsHfa() != varTypeUsesFloatReg(fieldType))) { canPromote = false; } #if defined(FEATURE_SIMD) // If we have a register-passed struct with mixed non-opaque SIMD types (i.e. with defined fields) // and non-SIMD types, we don't currently handle that case in the prolog, so we can't promote. else if ((fieldCnt > 1) && varTypeIsStruct(fieldType) && !compiler->isOpaqueSIMDType(structPromotionInfo.fields[i].fldTypeHnd)) { canPromote = false; } #endif // FEATURE_SIMD } } #elif defined(UNIX_AMD64_ABI) else { SortStructFields(); // Only promote if the field types match the registers, unless we have a single SIMD field. SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR structDesc; compiler->eeGetSystemVAmd64PassStructInRegisterDescriptor(typeHnd, &structDesc); unsigned regCount = structDesc.eightByteCount; if ((structPromotionInfo.fieldCnt == 1) && varTypeIsSIMD(structPromotionInfo.fields[0].fldType)) { // Allow the case of promoting a single SIMD field, even if there are multiple registers. // We will fix this up in the prolog. } else if (structPromotionInfo.fieldCnt != regCount) { canPromote = false; } else { for (unsigned i = 0; canPromote && (i < regCount); i++) { lvaStructFieldInfo* fieldInfo = &(structPromotionInfo.fields[i]); var_types fieldType = fieldInfo->fldType; // We don't currently support passing SIMD types in registers. if (varTypeIsSIMD(fieldType)) { canPromote = false; } else if (varTypeUsesFloatReg(fieldType) != (structDesc.eightByteClassifications[i] == SystemVClassificationTypeSSE)) { canPromote = false; } } } } #endif // UNIX_AMD64_ABI } return canPromote; } //-------------------------------------------------------------------------------------------- // ShouldPromoteStructVar - Should a struct var be promoted if it can be promoted? // This routine mainly performs profitability checks. Right now it also has // some correctness checks due to limitations of down-stream phases. // // Arguments: // lclNum - struct local number; // // Return value: // true if the struct should be promoted. // bool Compiler::StructPromotionHelper::ShouldPromoteStructVar(unsigned lclNum) { LclVarDsc* varDsc = compiler->lvaGetDesc(lclNum); assert(varTypeIsStruct(varDsc)); assert(varDsc->GetStructHnd() == structPromotionInfo.typeHnd); assert(structPromotionInfo.canPromote); bool shouldPromote = true; // We *can* promote; *should* we promote? // We should only do so if promotion has potential savings. One source of savings // is if a field of the struct is accessed, since this access will be turned into // an access of the corresponding promoted field variable. Even if there are no // field accesses, but only block-level operations on the whole struct, if the struct // has only one or two fields, then doing those block operations field-wise is probably faster // than doing a whole-variable block operation (e.g., a hardware "copy loop" on x86). // Struct promotion also provides the following benefits: reduce stack frame size, // reduce the need for zero init of stack frame and fine grained constant/copy prop. // Asm diffs indicate that promoting structs up to 3 fields is a net size win. // So if no fields are accessed independently, and there are four or more fields, // then do not promote. // // TODO: Ideally we would want to consider the impact of whether the struct is // passed as a parameter or assigned the return value of a call. Because once promoted, // struct copying is done by field by field assignment instead of a more efficient // rep.stos or xmm reg based copy. if (structPromotionInfo.fieldCnt > 3 && !varDsc->lvFieldAccessed) { JITDUMP("Not promoting promotable struct local V%02u: #fields = %d, fieldAccessed = %d.\n", lclNum, structPromotionInfo.fieldCnt, varDsc->lvFieldAccessed); shouldPromote = false; } else if (varDsc->lvIsMultiRegRet && structPromotionInfo.containsHoles && structPromotionInfo.customLayout) { JITDUMP("Not promoting multi-reg returned struct local V%02u with holes.\n", lclNum); shouldPromote = false; } #if defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_ARM) // TODO-PERF - Only do this when the LclVar is used in an argument context // TODO-ARM64 - HFA support should also eliminate the need for this. // TODO-ARM32 - HFA support should also eliminate the need for this. // TODO-LSRA - Currently doesn't support the passing of floating point LCL_VARS in the integer registers // // For now we currently don't promote structs with a single float field // Promoting it can cause us to shuffle it back and forth between the int and // the float regs when it is used as a argument, which is very expensive for XARCH // else if ((structPromotionInfo.fieldCnt == 1) && varTypeIsFloating(structPromotionInfo.fields[0].fldType)) { JITDUMP("Not promoting promotable struct local V%02u: #fields = %d because it is a struct with " "single float field.\n", lclNum, structPromotionInfo.fieldCnt); shouldPromote = false; } #endif // TARGET_AMD64 || TARGET_ARM64 || TARGET_ARM else if (varDsc->lvIsParam && !compiler->lvaIsImplicitByRefLocal(lclNum) && !varDsc->lvIsHfa()) { #if FEATURE_MULTIREG_STRUCT_PROMOTE // Is this a variable holding a value with exactly two fields passed in // multiple registers? if (compiler->lvaIsMultiregStruct(varDsc, compiler->info.compIsVarArgs)) { if (structPromotionInfo.containsHoles && structPromotionInfo.customLayout) { JITDUMP("Not promoting multi-reg struct local V%02u with holes.\n", lclNum); shouldPromote = false; } else if ((structPromotionInfo.fieldCnt != 2) && !((structPromotionInfo.fieldCnt == 1) && varTypeIsSIMD(structPromotionInfo.fields[0].fldType))) { JITDUMP("Not promoting multireg struct local V%02u, because lvIsParam is true, #fields != 2 and it's " "not a single SIMD.\n", lclNum); shouldPromote = false; } } else #endif // !FEATURE_MULTIREG_STRUCT_PROMOTE // TODO-PERF - Implement struct promotion for incoming single-register structs. // Also the implementation of jmp uses the 4 byte move to store // byte parameters to the stack, so that if we have a byte field // with something else occupying the same 4-byte slot, it will // overwrite other fields. if (structPromotionInfo.fieldCnt != 1) { JITDUMP("Not promoting promotable struct local V%02u, because lvIsParam is true and #fields = " "%d.\n", lclNum, structPromotionInfo.fieldCnt); shouldPromote = false; } } else if ((lclNum == compiler->genReturnLocal) && (structPromotionInfo.fieldCnt > 1)) { // TODO-1stClassStructs: a temporary solution to keep diffs small, it will be fixed later. shouldPromote = false; } #if defined(DEBUG) else if (compiler->compPromoteFewerStructs(lclNum)) { // Do not promote some structs, that can be promoted, to stress promoted/unpromoted moves. JITDUMP("Not promoting promotable struct local V%02u, because of STRESS_PROMOTE_FEWER_STRUCTS\n", lclNum); shouldPromote = false; } #endif // // If the lvRefCnt is zero and we have a struct promoted parameter we can end up with an extra store of // the the incoming register into the stack frame slot. // In that case, we would like to avoid promortion. // However we haven't yet computed the lvRefCnt values so we can't do that. // CLANG_FORMAT_COMMENT_ANCHOR; return shouldPromote; } //-------------------------------------------------------------------------------------------- // SortStructFields - sort the fields according to the increasing order of the field offset. // // Notes: // This is needed because the fields need to be pushed on stack (when referenced as a struct) in offset order. // void Compiler::StructPromotionHelper::SortStructFields() { if (!structPromotionInfo.fieldsSorted) { jitstd::sort(structPromotionInfo.fields, structPromotionInfo.fields + structPromotionInfo.fieldCnt, lvaFieldOffsetCmp()); structPromotionInfo.fieldsSorted = true; } } //-------------------------------------------------------------------------------------------- // GetFieldInfo - get struct field information. // Arguments: // fieldHnd - field handle to get info for; // ordinal - field ordinal. // // Return value: // field information. // Compiler::lvaStructFieldInfo Compiler::StructPromotionHelper::GetFieldInfo(CORINFO_FIELD_HANDLE fieldHnd, BYTE ordinal) { lvaStructFieldInfo fieldInfo; fieldInfo.fldHnd = fieldHnd; unsigned fldOffset = compiler->info.compCompHnd->getFieldOffset(fieldInfo.fldHnd); fieldInfo.fldOffset = (BYTE)fldOffset; fieldInfo.fldOrdinal = ordinal; CorInfoType corType = compiler->info.compCompHnd->getFieldType(fieldInfo.fldHnd, &fieldInfo.fldTypeHnd); fieldInfo.fldType = JITtype2varType(corType); fieldInfo.fldSize = genTypeSize(fieldInfo.fldType); #ifdef FEATURE_SIMD // Check to see if this is a SIMD type. // We will only check this if we have already found a SIMD type, which will be true if // we have encountered any SIMD intrinsics. if (compiler->usesSIMDTypes() && (fieldInfo.fldSize == 0) && compiler->isSIMDorHWSIMDClass(fieldInfo.fldTypeHnd)) { unsigned simdSize; CorInfoType simdBaseJitType = compiler->getBaseJitTypeAndSizeOfSIMDType(fieldInfo.fldTypeHnd, &simdSize); // We will only promote fields of SIMD types that fit into a SIMD register. if (simdBaseJitType != CORINFO_TYPE_UNDEF) { if ((simdSize >= compiler->minSIMDStructBytes()) && (simdSize <= compiler->maxSIMDStructBytes())) { fieldInfo.fldType = compiler->getSIMDTypeForSize(simdSize); fieldInfo.fldSize = simdSize; #ifdef DEBUG retypedFieldsMap.Set(fieldInfo.fldHnd, fieldInfo.fldType, RetypedAsScalarFieldsMap::Overwrite); #endif // DEBUG } } } #endif // FEATURE_SIMD if (fieldInfo.fldSize == 0) { TryPromoteStructField(fieldInfo); } return fieldInfo; } //-------------------------------------------------------------------------------------------- // TryPromoteStructField - checks that this struct's field is a struct that can be promoted as scalar type // aligned at its natural boundary. Promotes the field as a scalar if the check succeeded. // // Arguments: // fieldInfo - information about the field in the outer struct. // // Return value: // true if the internal struct was promoted. // bool Compiler::StructPromotionHelper::TryPromoteStructField(lvaStructFieldInfo& fieldInfo) { // Size of TYP_BLK, TYP_FUNC, TYP_VOID and TYP_STRUCT is zero. // Early out if field type is other than TYP_STRUCT. // This is a defensive check as we don't expect a struct to have // fields of TYP_BLK, TYP_FUNC or TYP_VOID. if (fieldInfo.fldType != TYP_STRUCT) { return false; } COMP_HANDLE compHandle = compiler->info.compCompHnd; // Do not promote if the struct field in turn has more than one field. if (compHandle->getClassNumInstanceFields(fieldInfo.fldTypeHnd) != 1) { return false; } // Do not promote if the single field is not aligned at its natural boundary within // the struct field. CORINFO_FIELD_HANDLE innerFieldHndl = compHandle->getFieldInClass(fieldInfo.fldTypeHnd, 0); unsigned innerFieldOffset = compHandle->getFieldOffset(innerFieldHndl); if (innerFieldOffset != 0) { return false; } CorInfoType fieldCorType = compHandle->getFieldType(innerFieldHndl); var_types fieldVarType = JITtype2varType(fieldCorType); unsigned fieldSize = genTypeSize(fieldVarType); // Do not promote if the field is not a primitive type, is floating-point, // or is not properly aligned. // // TODO-PERF: Structs containing a single floating-point field on Amd64 // need to be passed in integer registers. Right now LSRA doesn't support // passing of floating-point LCL_VARS in integer registers. Enabling promotion // of such structs results in an assert in lsra right now. // // TODO-CQ: Right now we only promote an actual SIMD typed field, which would cause // a nested SIMD type to fail promotion. if (fieldSize == 0 || fieldSize > TARGET_POINTER_SIZE || varTypeIsFloating(fieldVarType)) { JITDUMP("Promotion blocked: struct contains struct field with one field," " but that field has invalid size or type.\n"); return false; } if (fieldSize != TARGET_POINTER_SIZE) { unsigned outerFieldOffset = compHandle->getFieldOffset(fieldInfo.fldHnd); if ((outerFieldOffset % fieldSize) != 0) { JITDUMP("Promotion blocked: struct contains struct field with one field," " but the outer struct offset %u is not a multiple of the inner field size %u.\n", outerFieldOffset, fieldSize); return false; } } // Insist this wrapped field occupy all of its parent storage. unsigned innerStructSize = compHandle->getClassSize(fieldInfo.fldTypeHnd); if (fieldSize != innerStructSize) { JITDUMP("Promotion blocked: struct contains struct field with one field," " but that field is not the same size as its parent.\n"); return false; } // Retype the field as the type of the single field of the struct. // This is a hack that allows us to promote such fields before we support recursive struct promotion // (tracked by #10019). fieldInfo.fldType = fieldVarType; fieldInfo.fldSize = fieldSize; #ifdef DEBUG retypedFieldsMap.Set(fieldInfo.fldHnd, fieldInfo.fldType, RetypedAsScalarFieldsMap::Overwrite); #endif // DEBUG return true; } //-------------------------------------------------------------------------------------------- // PromoteStructVar - promote struct variable. // // Arguments: // lclNum - struct local number; // void Compiler::StructPromotionHelper::PromoteStructVar(unsigned lclNum) { LclVarDsc* varDsc = compiler->lvaGetDesc(lclNum); // We should never see a reg-sized non-field-addressed struct here. assert(!varDsc->lvRegStruct); assert(varDsc->GetStructHnd() == structPromotionInfo.typeHnd); assert(structPromotionInfo.canPromote); varDsc->lvFieldCnt = structPromotionInfo.fieldCnt; varDsc->lvFieldLclStart = compiler->lvaCount; varDsc->lvPromoted = true; varDsc->lvContainsHoles = structPromotionInfo.containsHoles; varDsc->lvCustomLayout = structPromotionInfo.customLayout; #ifdef DEBUG // Don't change the source to a TYP_BLK either. varDsc->lvKeepType = 1; #endif #ifdef DEBUG if (compiler->verbose) { printf("\nPromoting struct local V%02u (%s):", lclNum, compiler->eeGetClassName(varDsc->GetStructHnd())); } #endif SortStructFields(); for (unsigned index = 0; index < structPromotionInfo.fieldCnt; ++index) { const lvaStructFieldInfo* pFieldInfo = &structPromotionInfo.fields[index]; if (varTypeUsesFloatReg(pFieldInfo->fldType)) { // Whenever we promote a struct that contains a floating point field // it's possible we transition from a method that originally only had integer // local vars to start having FP. We have to communicate this through this flag // since LSRA later on will use this flag to determine whether or not to track FP register sets. compiler->compFloatingPointUsed = true; } // Now grab the temp for the field local. #ifdef DEBUG char buf[200]; sprintf_s(buf, sizeof(buf), "%s V%02u.%s (fldOffset=0x%x)", "field", lclNum, compiler->eeGetFieldName(pFieldInfo->fldHnd), pFieldInfo->fldOffset); // We need to copy 'buf' as lvaGrabTemp() below caches a copy to its argument. size_t len = strlen(buf) + 1; char* bufp = compiler->getAllocator(CMK_DebugOnly).allocate<char>(len); strcpy_s(bufp, len, buf); if (index > 0) { noway_assert(pFieldInfo->fldOffset > (pFieldInfo - 1)->fldOffset); } #endif // Lifetime of field locals might span multiple BBs, so they must be long lifetime temps. const unsigned varNum = compiler->lvaGrabTemp(false DEBUGARG(bufp)); // lvaGrabTemp can reallocate the lvaTable, so // refresh the cached varDsc for lclNum. varDsc = compiler->lvaGetDesc(lclNum); LclVarDsc* fieldVarDsc = compiler->lvaGetDesc(varNum); fieldVarDsc->lvType = pFieldInfo->fldType; fieldVarDsc->lvExactSize = pFieldInfo->fldSize; fieldVarDsc->lvIsStructField = true; fieldVarDsc->lvFieldHnd = pFieldInfo->fldHnd; fieldVarDsc->lvFldOffset = pFieldInfo->fldOffset; fieldVarDsc->lvFldOrdinal = pFieldInfo->fldOrdinal; fieldVarDsc->lvParentLcl = lclNum; fieldVarDsc->lvIsParam = varDsc->lvIsParam; // This new local may be the first time we've seen a long typed local. if (fieldVarDsc->lvType == TYP_LONG) { compiler->compLongUsed = true; } #if defined(TARGET_AMD64) || defined(TARGET_ARM64) // Reset the implicitByRef flag. fieldVarDsc->lvIsImplicitByRef = 0; #endif // Do we have a parameter that can be enregistered? // if (varDsc->lvIsRegArg) { fieldVarDsc->lvIsRegArg = true; regNumber parentArgReg = varDsc->GetArgReg(); #if FEATURE_MULTIREG_ARGS if (!compiler->lvaIsImplicitByRefLocal(lclNum)) { #ifdef UNIX_AMD64_ABI if (varTypeIsSIMD(fieldVarDsc) && (varDsc->lvFieldCnt == 1)) { // This SIMD typed field may be passed in multiple registers. fieldVarDsc->SetArgReg(parentArgReg); fieldVarDsc->SetOtherArgReg(varDsc->GetOtherArgReg()); } else #endif // UNIX_AMD64_ABI { regNumber fieldRegNum; if (index == 0) { fieldRegNum = parentArgReg; } else if (varDsc->lvIsHfa()) { unsigned regIncrement = fieldVarDsc->lvFldOrdinal; #ifdef TARGET_ARM // TODO: Need to determine if/how to handle split args. if (varDsc->GetHfaType() == TYP_DOUBLE) { regIncrement *= 2; } #endif // TARGET_ARM fieldRegNum = (regNumber)(parentArgReg + regIncrement); } else { assert(index == 1); fieldRegNum = varDsc->GetOtherArgReg(); } fieldVarDsc->SetArgReg(fieldRegNum); } } else #endif // FEATURE_MULTIREG_ARGS && defined(FEATURE_SIMD) { fieldVarDsc->SetArgReg(parentArgReg); } } #ifdef FEATURE_SIMD if (varTypeIsSIMD(pFieldInfo->fldType)) { // Set size to zero so that lvaSetStruct will appropriately set the SIMD-relevant fields. fieldVarDsc->lvExactSize = 0; compiler->lvaSetStruct(varNum, pFieldInfo->fldTypeHnd, false, true); // We will not recursively promote this, so mark it as 'lvRegStruct' (note that we wouldn't // be promoting this if we didn't think it could be enregistered. fieldVarDsc->lvRegStruct = true; } #endif // FEATURE_SIMD #ifdef DEBUG // This temporary should not be converted to a double in stress mode, // because we introduce assigns to it after the stress conversion fieldVarDsc->lvKeepType = 1; #endif } } //-------------------------------------------------------------------------------------------- // lvaGetFieldLocal - returns the local var index for a promoted field in a promoted struct var. // // Arguments: // varDsc - the promoted struct var descriptor; // fldOffset - field offset in the struct. // // Return value: // the index of the local that represents this field. // unsigned Compiler::lvaGetFieldLocal(const LclVarDsc* varDsc, unsigned int fldOffset) { noway_assert(varTypeIsStruct(varDsc)); noway_assert(varDsc->lvPromoted); for (unsigned i = varDsc->lvFieldLclStart; i < varDsc->lvFieldLclStart + varDsc->lvFieldCnt; ++i) { noway_assert(lvaTable[i].lvIsStructField); noway_assert(lvaTable[i].lvParentLcl == (unsigned)(varDsc - lvaTable)); if (lvaTable[i].lvFldOffset == fldOffset) { return i; } } // This is the not-found error return path, the caller should check for BAD_VAR_NUM return BAD_VAR_NUM; } /***************************************************************************** * * Set the local var "varNum" as address-exposed. * If this is a promoted struct, label it's fields the same way. */ void Compiler::lvaSetVarAddrExposed(unsigned varNum DEBUGARG(AddressExposedReason reason)) { LclVarDsc* varDsc = lvaGetDesc(varNum); varDsc->SetAddressExposed(true DEBUGARG(reason)); if (varDsc->lvPromoted) { noway_assert(varTypeIsStruct(varDsc)); for (unsigned i = varDsc->lvFieldLclStart; i < varDsc->lvFieldLclStart + varDsc->lvFieldCnt; ++i) { noway_assert(lvaTable[i].lvIsStructField); lvaTable[i].SetAddressExposed(true DEBUGARG(AddressExposedReason::PARENT_EXPOSED)); lvaSetVarDoNotEnregister(i DEBUGARG(DoNotEnregisterReason::AddrExposed)); } } lvaSetVarDoNotEnregister(varNum DEBUGARG(DoNotEnregisterReason::AddrExposed)); } //------------------------------------------------------------------------ // lvaSetVarLiveInOutOfHandler: Set the local varNum as being live in and/or out of a handler // // Arguments: // varNum - the varNum of the local // void Compiler::lvaSetVarLiveInOutOfHandler(unsigned varNum) { LclVarDsc* varDsc = lvaGetDesc(varNum); varDsc->lvLiveInOutOfHndlr = 1; if (varDsc->lvPromoted) { noway_assert(varTypeIsStruct(varDsc)); for (unsigned i = varDsc->lvFieldLclStart; i < varDsc->lvFieldLclStart + varDsc->lvFieldCnt; ++i) { noway_assert(lvaTable[i].lvIsStructField); lvaTable[i].lvLiveInOutOfHndlr = 1; // For now, only enregister an EH Var if it is a single def and whose refCnt > 1. if (!lvaEnregEHVars || !lvaTable[i].lvSingleDefRegCandidate || lvaTable[i].lvRefCnt() <= 1) { lvaSetVarDoNotEnregister(i DEBUGARG(DoNotEnregisterReason::LiveInOutOfHandler)); } } } // For now, only enregister an EH Var if it is a single def and whose refCnt > 1. if (!lvaEnregEHVars || !varDsc->lvSingleDefRegCandidate || varDsc->lvRefCnt() <= 1) { lvaSetVarDoNotEnregister(varNum DEBUGARG(DoNotEnregisterReason::LiveInOutOfHandler)); } #ifdef JIT32_GCENCODER else if (lvaKeepAliveAndReportThis() && (varNum == info.compThisArg)) { // For the JIT32_GCENCODER, when lvaKeepAliveAndReportThis is true, we must either keep the "this" pointer // in the same register for the entire method, or keep it on the stack. If it is EH-exposed, we can't ever // keep it in a register, since it must also be live on the stack. Therefore, we won't attempt to allocate it. lvaSetVarDoNotEnregister(varNum DEBUGARG(DoNotEnregisterReason::LiveInOutOfHandler)); } #endif // JIT32_GCENCODER } /***************************************************************************** * * Record that the local var "varNum" should not be enregistered (for one of several reasons.) */ void Compiler::lvaSetVarDoNotEnregister(unsigned varNum DEBUGARG(DoNotEnregisterReason reason)) { LclVarDsc* varDsc = lvaGetDesc(varNum); const bool wasAlreadyMarkedDoNotEnreg = (varDsc->lvDoNotEnregister == 1); varDsc->lvDoNotEnregister = 1; #ifdef DEBUG if (!wasAlreadyMarkedDoNotEnreg) { varDsc->SetDoNotEnregReason(reason); } if (verbose) { printf("\nLocal V%02u should not be enregistered because: ", varNum); } switch (reason) { case DoNotEnregisterReason::AddrExposed: JITDUMP("it is address exposed\n"); assert(varDsc->IsAddressExposed()); break; case DoNotEnregisterReason::DontEnregStructs: JITDUMP("struct enregistration is disabled\n"); assert(varTypeIsStruct(varDsc)); break; case DoNotEnregisterReason::NotRegSizeStruct: JITDUMP("struct size does not match reg size\n"); assert(varTypeIsStruct(varDsc)); break; case DoNotEnregisterReason::LocalField: JITDUMP("was accessed as a local field\n"); break; case DoNotEnregisterReason::VMNeedsStackAddr: JITDUMP("VM needs stack addr\n"); break; case DoNotEnregisterReason::LiveInOutOfHandler: JITDUMP("live in/out of a handler\n"); varDsc->lvLiveInOutOfHndlr = 1; break; case DoNotEnregisterReason::BlockOp: JITDUMP("written/read in a block op\n"); break; case DoNotEnregisterReason::IsStructArg: if (varTypeIsStruct(varDsc)) { JITDUMP("it is a struct arg\n"); } else { JITDUMP("it is reinterpreted as a struct arg\n"); } break; case DoNotEnregisterReason::DepField: JITDUMP("field of a dependently promoted struct\n"); assert(varDsc->lvIsStructField && (lvaGetParentPromotionType(varNum) != PROMOTION_TYPE_INDEPENDENT)); break; case DoNotEnregisterReason::NoRegVars: JITDUMP("opts.compFlags & CLFLG_REGVAR is not set\n"); assert(!compEnregLocals()); break; case DoNotEnregisterReason::MinOptsGC: JITDUMP("it is a GC Ref and we are compiling MinOpts\n"); assert(!JitConfig.JitMinOptsTrackGCrefs() && varTypeIsGC(varDsc->TypeGet())); break; #if !defined(TARGET_64BIT) case DoNotEnregisterReason::LongParamField: JITDUMP("it is a decomposed field of a long parameter\n"); break; #endif #ifdef JIT32_GCENCODER case DoNotEnregisterReason::PinningRef: JITDUMP("pinning ref\n"); assert(varDsc->lvPinned); break; #endif case DoNotEnregisterReason::LclAddrNode: JITDUMP("LclAddrVar/Fld takes the address of this node\n"); break; case DoNotEnregisterReason::CastTakesAddr: JITDUMP("cast takes addr\n"); break; case DoNotEnregisterReason::StoreBlkSrc: JITDUMP("the local is used as store block src\n"); break; case DoNotEnregisterReason::OneAsgRetyping: JITDUMP("OneAsg forbids enreg\n"); break; case DoNotEnregisterReason::SwizzleArg: JITDUMP("SwizzleArg\n"); break; case DoNotEnregisterReason::BlockOpRet: JITDUMP("return uses a block op\n"); break; case DoNotEnregisterReason::ReturnSpCheck: JITDUMP("Used for SP check\n"); break; case DoNotEnregisterReason::SimdUserForcesDep: JITDUMP("Promoted struct used by a SIMD/HWI node\n"); break; default: unreached(); break; } #endif } // Returns true if this local var is a multireg struct. // TODO-Throughput: This does a lookup on the class handle, and in the outgoing arg context // this information is already available on the fgArgTabEntry, and shouldn't need to be // recomputed. // bool Compiler::lvaIsMultiregStruct(LclVarDsc* varDsc, bool isVarArg) { if (varTypeIsStruct(varDsc->TypeGet())) { CORINFO_CLASS_HANDLE clsHnd = varDsc->GetStructHnd(); structPassingKind howToPassStruct; var_types type = getArgTypeForStruct(clsHnd, &howToPassStruct, isVarArg, varDsc->lvExactSize); if (howToPassStruct == SPK_ByValueAsHfa) { assert(type == TYP_STRUCT); return true; } #if defined(UNIX_AMD64_ABI) || defined(TARGET_ARM64) if (howToPassStruct == SPK_ByValue) { assert(type == TYP_STRUCT); return true; } #endif } return false; } /***************************************************************************** * Set the lvClass for a local variable of a struct type */ void Compiler::lvaSetStruct(unsigned varNum, CORINFO_CLASS_HANDLE typeHnd, bool unsafeValueClsCheck, bool setTypeInfo) { LclVarDsc* varDsc = lvaGetDesc(varNum); if (setTypeInfo) { varDsc->lvVerTypeInfo = typeInfo(TI_STRUCT, typeHnd); } // Set the type and associated info if we haven't already set it. if (varDsc->lvType == TYP_UNDEF) { varDsc->lvType = TYP_STRUCT; } if (varDsc->GetLayout() == nullptr) { ClassLayout* layout = typGetObjLayout(typeHnd); varDsc->SetLayout(layout); assert(varDsc->lvExactSize == 0); varDsc->lvExactSize = layout->GetSize(); assert(varDsc->lvExactSize != 0); if (layout->IsValueClass()) { CorInfoType simdBaseJitType = CORINFO_TYPE_UNDEF; varDsc->lvType = impNormStructType(typeHnd, &simdBaseJitType); #if defined(TARGET_AMD64) || defined(TARGET_ARM64) // Mark implicit byref struct parameters if (varDsc->lvIsParam && !varDsc->lvIsStructField) { structPassingKind howToReturnStruct; getArgTypeForStruct(typeHnd, &howToReturnStruct, this->info.compIsVarArgs, varDsc->lvExactSize); if (howToReturnStruct == SPK_ByReference) { JITDUMP("Marking V%02i as a byref parameter\n", varNum); varDsc->lvIsImplicitByRef = 1; } } #endif // defined(TARGET_AMD64) || defined(TARGET_ARM64) #if FEATURE_SIMD if (simdBaseJitType != CORINFO_TYPE_UNDEF) { assert(varTypeIsSIMD(varDsc)); varDsc->lvSIMDType = true; varDsc->SetSimdBaseJitType(simdBaseJitType); } #endif // FEATURE_SIMD if (GlobalJitOptions::compFeatureHfa) { // For structs that are small enough, we check and set HFA element type if (varDsc->lvExactSize <= MAX_PASS_MULTIREG_BYTES) { // hfaType is set to float, double or SIMD type if it is an HFA, otherwise TYP_UNDEF var_types hfaType = GetHfaType(typeHnd); if (varTypeIsValidHfaType(hfaType)) { varDsc->SetHfaType(hfaType); // hfa variables can never contain GC pointers assert(!layout->HasGCPtr()); // The size of this struct should be evenly divisible by 4 or 8 assert((varDsc->lvExactSize % genTypeSize(hfaType)) == 0); // The number of elements in the HFA should fit into our MAX_ARG_REG_COUNT limit assert((varDsc->lvExactSize / genTypeSize(hfaType)) <= MAX_ARG_REG_COUNT); } } } } } else { #if FEATURE_SIMD assert(!varTypeIsSIMD(varDsc) || (varDsc->GetSimdBaseType() != TYP_UNKNOWN)); #endif // FEATURE_SIMD ClassLayout* layout = typGetObjLayout(typeHnd); assert(ClassLayout::AreCompatible(varDsc->GetLayout(), layout)); // Inlining could replace a canon struct type with an exact one. varDsc->SetLayout(layout); assert(varDsc->lvExactSize != 0); } #ifndef TARGET_64BIT bool fDoubleAlignHint = false; #ifdef TARGET_X86 fDoubleAlignHint = true; #endif if (info.compCompHnd->getClassAlignmentRequirement(typeHnd, fDoubleAlignHint) == 8) { #ifdef DEBUG if (verbose) { printf("Marking struct in V%02i with double align flag\n", varNum); } #endif varDsc->lvStructDoubleAlign = 1; } #endif // not TARGET_64BIT unsigned classAttribs = info.compCompHnd->getClassAttribs(typeHnd); varDsc->lvOverlappingFields = StructHasOverlappingFields(classAttribs); // Check whether this local is an unsafe value type and requires GS cookie protection. // GS checks require the stack to be re-ordered, which can't be done with EnC. if (unsafeValueClsCheck && (classAttribs & CORINFO_FLG_UNSAFE_VALUECLASS) && !opts.compDbgEnC) { setNeedsGSSecurityCookie(); compGSReorderStackLayout = true; varDsc->lvIsUnsafeBuffer = true; } #ifdef DEBUG if (JitConfig.EnableExtraSuperPmiQueries()) { makeExtraStructQueries(typeHnd, 2); } #endif // DEBUG } #ifdef DEBUG //------------------------------------------------------------------------ // makeExtraStructQueries: Query the information for the given struct handle. // // Arguments: // structHandle -- The handle for the struct type we're querying. // level -- How many more levels to recurse. // void Compiler::makeExtraStructQueries(CORINFO_CLASS_HANDLE structHandle, int level) { if (level <= 0) { return; } assert(structHandle != NO_CLASS_HANDLE); (void)typGetObjLayout(structHandle); DWORD typeFlags = info.compCompHnd->getClassAttribs(structHandle); if (StructHasDontDigFieldsFlagSet(typeFlags)) { // In AOT ReadyToRun compilation, don't query fields of types // outside of the current version bubble. return; } unsigned fieldCnt = info.compCompHnd->getClassNumInstanceFields(structHandle); impNormStructType(structHandle); #ifdef TARGET_ARMARCH GetHfaType(structHandle); #endif for (unsigned int i = 0; i < fieldCnt; i++) { CORINFO_FIELD_HANDLE fieldHandle = info.compCompHnd->getFieldInClass(structHandle, i); unsigned fldOffset = info.compCompHnd->getFieldOffset(fieldHandle); CORINFO_CLASS_HANDLE fieldClassHandle = NO_CLASS_HANDLE; CorInfoType fieldCorType = info.compCompHnd->getFieldType(fieldHandle, &fieldClassHandle); var_types fieldVarType = JITtype2varType(fieldCorType); if (fieldClassHandle != NO_CLASS_HANDLE) { if (varTypeIsStruct(fieldVarType)) { makeExtraStructQueries(fieldClassHandle, level - 1); } } } } #endif // DEBUG //------------------------------------------------------------------------ // lvaSetStructUsedAsVarArg: update hfa information for vararg struct args // // Arguments: // varNum -- number of the variable // // Notes: // This only affects arm64 varargs on windows where we need to pass // hfa arguments as if they are not HFAs. // // This function should only be called if the struct is used in a varargs // method. void Compiler::lvaSetStructUsedAsVarArg(unsigned varNum) { if (GlobalJitOptions::compFeatureHfa && TargetOS::IsWindows) { #if defined(TARGET_ARM64) LclVarDsc* varDsc = lvaGetDesc(varNum); // For varargs methods incoming and outgoing arguments should not be treated // as HFA. varDsc->SetHfaType(TYP_UNDEF); #endif // defined(TARGET_ARM64) } } //------------------------------------------------------------------------ // lvaSetClass: set class information for a local var. // // Arguments: // varNum -- number of the variable // clsHnd -- class handle to use in set or update // isExact -- true if class is known exactly // // Notes: // varNum must not already have a ref class handle. void Compiler::lvaSetClass(unsigned varNum, CORINFO_CLASS_HANDLE clsHnd, bool isExact) { noway_assert(varNum < lvaCount); // If we are just importing, we cannot reliably track local ref types, // since the jit maps CORINFO_TYPE_VAR to TYP_REF. if (compIsForImportOnly()) { return; } // Else we should have a type handle. assert(clsHnd != nullptr); LclVarDsc* varDsc = lvaGetDesc(varNum); assert(varDsc->lvType == TYP_REF); // We shoud not have any ref type information for this var. assert(varDsc->lvClassHnd == NO_CLASS_HANDLE); assert(!varDsc->lvClassIsExact); JITDUMP("\nlvaSetClass: setting class for V%02i to (%p) %s %s\n", varNum, dspPtr(clsHnd), info.compCompHnd->getClassName(clsHnd), isExact ? " [exact]" : ""); varDsc->lvClassHnd = clsHnd; varDsc->lvClassIsExact = isExact; } //------------------------------------------------------------------------ // lvaSetClass: set class information for a local var from a tree or stack type // // Arguments: // varNum -- number of the variable. Must be a single def local // tree -- tree establishing the variable's value // stackHnd -- handle for the type from the evaluation stack // // Notes: // Preferentially uses the tree's type, when available. Since not all // tree kinds can track ref types, the stack type is used as a // fallback. If there is no stack type, then the class is set to object. void Compiler::lvaSetClass(unsigned varNum, GenTree* tree, CORINFO_CLASS_HANDLE stackHnd) { bool isExact = false; bool isNonNull = false; CORINFO_CLASS_HANDLE clsHnd = gtGetClassHandle(tree, &isExact, &isNonNull); if (clsHnd != nullptr) { lvaSetClass(varNum, clsHnd, isExact); } else if (stackHnd != nullptr) { lvaSetClass(varNum, stackHnd); } else { lvaSetClass(varNum, impGetObjectClass()); } } //------------------------------------------------------------------------ // lvaUpdateClass: update class information for a local var. // // Arguments: // varNum -- number of the variable // clsHnd -- class handle to use in set or update // isExact -- true if class is known exactly // // Notes: // // This method models the type update rule for an assignment. // // Updates currently should only happen for single-def user args or // locals, when we are processing the expression actually being // used to initialize the local (or inlined arg). The update will // change the local from the declared type to the type of the // initial value. // // These updates should always *improve* what we know about the // type, that is making an inexact type exact, or changing a type // to some subtype. However the jit lacks precise type information // for shared code, so ensuring this is so is currently not // possible. void Compiler::lvaUpdateClass(unsigned varNum, CORINFO_CLASS_HANDLE clsHnd, bool isExact) { assert(varNum < lvaCount); // If we are just importing, we cannot reliably track local ref types, // since the jit maps CORINFO_TYPE_VAR to TYP_REF. if (compIsForImportOnly()) { return; } // Else we should have a class handle to consider assert(clsHnd != nullptr); LclVarDsc* varDsc = lvaGetDesc(varNum); assert(varDsc->lvType == TYP_REF); // We should already have a class assert(varDsc->lvClassHnd != NO_CLASS_HANDLE); // We should only be updating classes for single-def locals. assert(varDsc->lvSingleDef); // Now see if we should update. // // New information may not always be "better" so do some // simple analysis to decide if the update is worthwhile. const bool isNewClass = (clsHnd != varDsc->lvClassHnd); bool shouldUpdate = false; // Are we attempting to update the class? Only check this when we have // an new type and the existing class is inexact... we should not be // updating exact classes. if (!varDsc->lvClassIsExact && isNewClass) { shouldUpdate = !!info.compCompHnd->isMoreSpecificType(varDsc->lvClassHnd, clsHnd); } // Else are we attempting to update exactness? else if (isExact && !varDsc->lvClassIsExact && !isNewClass) { shouldUpdate = true; } #if DEBUG if (isNewClass || (isExact != varDsc->lvClassIsExact)) { JITDUMP("\nlvaUpdateClass:%s Updating class for V%02u", shouldUpdate ? "" : " NOT", varNum); JITDUMP(" from (%p) %s%s", dspPtr(varDsc->lvClassHnd), info.compCompHnd->getClassName(varDsc->lvClassHnd), varDsc->lvClassIsExact ? " [exact]" : ""); JITDUMP(" to (%p) %s%s\n", dspPtr(clsHnd), info.compCompHnd->getClassName(clsHnd), isExact ? " [exact]" : ""); } #endif // DEBUG if (shouldUpdate) { varDsc->lvClassHnd = clsHnd; varDsc->lvClassIsExact = isExact; #if DEBUG // Note we've modified the type... varDsc->lvClassInfoUpdated = true; #endif // DEBUG } return; } //------------------------------------------------------------------------ // lvaUpdateClass: Uupdate class information for a local var from a tree // or stack type // // Arguments: // varNum -- number of the variable. Must be a single def local // tree -- tree establishing the variable's value // stackHnd -- handle for the type from the evaluation stack // // Notes: // Preferentially uses the tree's type, when available. Since not all // tree kinds can track ref types, the stack type is used as a // fallback. void Compiler::lvaUpdateClass(unsigned varNum, GenTree* tree, CORINFO_CLASS_HANDLE stackHnd) { bool isExact = false; bool isNonNull = false; CORINFO_CLASS_HANDLE clsHnd = gtGetClassHandle(tree, &isExact, &isNonNull); if (clsHnd != nullptr) { lvaUpdateClass(varNum, clsHnd, isExact); } else if (stackHnd != nullptr) { lvaUpdateClass(varNum, stackHnd); } } //------------------------------------------------------------------------ // lvaLclSize: returns size of a local variable, in bytes // // Arguments: // varNum -- variable to query // // Returns: // Number of bytes needed on the frame for such a local. unsigned Compiler::lvaLclSize(unsigned varNum) { assert(varNum < lvaCount); var_types varType = lvaTable[varNum].TypeGet(); switch (varType) { case TYP_STRUCT: case TYP_BLK: return lvaTable[varNum].lvSize(); case TYP_LCLBLK: #if FEATURE_FIXED_OUT_ARGS // Note that this operation performs a read of a PhasedVar noway_assert(varNum == lvaOutgoingArgSpaceVar); return lvaOutgoingArgSpaceSize; #else // FEATURE_FIXED_OUT_ARGS assert(!"Unknown size"); NO_WAY("Target doesn't support TYP_LCLBLK"); #endif // FEATURE_FIXED_OUT_ARGS default: // This must be a primitive var. Fall out of switch statement break; } #ifdef TARGET_64BIT // We only need this Quirk for TARGET_64BIT if (lvaTable[varNum].lvQuirkToLong) { noway_assert(lvaTable[varNum].IsAddressExposed()); return genTypeStSz(TYP_LONG) * sizeof(int); // return 8 (2 * 4) } #endif return genTypeStSz(varType) * sizeof(int); } // // Return the exact width of local variable "varNum" -- the number of bytes // you'd need to copy in order to overwrite the value. // unsigned Compiler::lvaLclExactSize(unsigned varNum) { assert(varNum < lvaCount); var_types varType = lvaTable[varNum].TypeGet(); switch (varType) { case TYP_STRUCT: case TYP_BLK: return lvaTable[varNum].lvExactSize; case TYP_LCLBLK: #if FEATURE_FIXED_OUT_ARGS // Note that this operation performs a read of a PhasedVar noway_assert(lvaOutgoingArgSpaceSize >= 0); noway_assert(varNum == lvaOutgoingArgSpaceVar); return lvaOutgoingArgSpaceSize; #else // FEATURE_FIXED_OUT_ARGS assert(!"Unknown size"); NO_WAY("Target doesn't support TYP_LCLBLK"); #endif // FEATURE_FIXED_OUT_ARGS default: // This must be a primitive var. Fall out of switch statement break; } return genTypeSize(varType); } // getCalledCount -- get the value used to normalized weights for this method // if we don't have profile data then getCalledCount will return BB_UNITY_WEIGHT (100) // otherwise it returns the number of times that profile data says the method was called. // // static weight_t BasicBlock::getCalledCount(Compiler* comp) { // when we don't have profile data then fgCalledCount will be BB_UNITY_WEIGHT (100) weight_t calledCount = comp->fgCalledCount; // If we haven't yet reach the place where we setup fgCalledCount it could still be zero // so return a reasonable value to use until we set it. // if (calledCount == 0) { if (comp->fgIsUsingProfileWeights()) { // When we use profile data block counts we have exact counts, // not multiples of BB_UNITY_WEIGHT (100) calledCount = 1; } else { calledCount = comp->fgFirstBB->bbWeight; if (calledCount == 0) { calledCount = BB_UNITY_WEIGHT; } } } return calledCount; } // getBBWeight -- get the normalized weight of this block weight_t BasicBlock::getBBWeight(Compiler* comp) { if (this->bbWeight == BB_ZERO_WEIGHT) { return BB_ZERO_WEIGHT; } else { weight_t calledCount = getCalledCount(comp); // Normalize the bbWeights by multiplying by BB_UNITY_WEIGHT and dividing by the calledCount. // weight_t fullResult = this->bbWeight * BB_UNITY_WEIGHT / calledCount; return fullResult; } } // LclVarDsc "less" comparer used to compare the weight of two locals, when optimizing for small code. class LclVarDsc_SmallCode_Less { const LclVarDsc* m_lvaTable; INDEBUG(unsigned m_lvaCount;) public: LclVarDsc_SmallCode_Less(const LclVarDsc* lvaTable DEBUGARG(unsigned lvaCount)) : m_lvaTable(lvaTable) #ifdef DEBUG , m_lvaCount(lvaCount) #endif { } bool operator()(unsigned n1, unsigned n2) { assert(n1 < m_lvaCount); assert(n2 < m_lvaCount); const LclVarDsc* dsc1 = &m_lvaTable[n1]; const LclVarDsc* dsc2 = &m_lvaTable[n2]; // We should not be sorting untracked variables assert(dsc1->lvTracked); assert(dsc2->lvTracked); // We should not be sorting after registers have been allocated assert(!dsc1->lvRegister); assert(!dsc2->lvRegister); unsigned weight1 = dsc1->lvRefCnt(); unsigned weight2 = dsc2->lvRefCnt(); #ifndef TARGET_ARM // ARM-TODO: this was disabled for ARM under !FEATURE_FP_REGALLOC; it was probably a left-over from // legacy backend. It should be enabled and verified. // Force integer candidates to sort above float candidates. const bool isFloat1 = isFloatRegType(dsc1->lvType); const bool isFloat2 = isFloatRegType(dsc2->lvType); if (isFloat1 != isFloat2) { if ((weight2 != 0) && isFloat1) { return false; } if ((weight1 != 0) && isFloat2) { return true; } } #endif if (weight1 != weight2) { return weight1 > weight2; } // If the weighted ref counts are different then use their difference. if (dsc1->lvRefCntWtd() != dsc2->lvRefCntWtd()) { return dsc1->lvRefCntWtd() > dsc2->lvRefCntWtd(); } // We have equal ref counts and weighted ref counts. // Break the tie by: // - Increasing the weight by 2 if we are a register arg. // - Increasing the weight by 0.5 if we are a GC type. // // Review: seems odd that this is mixing counts and weights. if (weight1 != 0) { if (dsc1->lvIsRegArg) { weight1 += 2 * BB_UNITY_WEIGHT_UNSIGNED; } if (varTypeIsGC(dsc1->TypeGet())) { weight1 += BB_UNITY_WEIGHT_UNSIGNED / 2; } } if (weight2 != 0) { if (dsc2->lvIsRegArg) { weight2 += 2 * BB_UNITY_WEIGHT_UNSIGNED; } if (varTypeIsGC(dsc2->TypeGet())) { weight2 += BB_UNITY_WEIGHT_UNSIGNED / 2; } } if (weight1 != weight2) { return weight1 > weight2; } // To achieve a stable sort we use the LclNum (by way of the pointer address). return dsc1 < dsc2; } }; // LclVarDsc "less" comparer used to compare the weight of two locals, when optimizing for blended code. class LclVarDsc_BlendedCode_Less { const LclVarDsc* m_lvaTable; INDEBUG(unsigned m_lvaCount;) public: LclVarDsc_BlendedCode_Less(const LclVarDsc* lvaTable DEBUGARG(unsigned lvaCount)) : m_lvaTable(lvaTable) #ifdef DEBUG , m_lvaCount(lvaCount) #endif { } bool operator()(unsigned n1, unsigned n2) { assert(n1 < m_lvaCount); assert(n2 < m_lvaCount); const LclVarDsc* dsc1 = &m_lvaTable[n1]; const LclVarDsc* dsc2 = &m_lvaTable[n2]; // We should not be sorting untracked variables assert(dsc1->lvTracked); assert(dsc2->lvTracked); // We should not be sorting after registers have been allocated assert(!dsc1->lvRegister); assert(!dsc2->lvRegister); weight_t weight1 = dsc1->lvRefCntWtd(); weight_t weight2 = dsc2->lvRefCntWtd(); #ifndef TARGET_ARM // ARM-TODO: this was disabled for ARM under !FEATURE_FP_REGALLOC; it was probably a left-over from // legacy backend. It should be enabled and verified. // Force integer candidates to sort above float candidates. const bool isFloat1 = isFloatRegType(dsc1->lvType); const bool isFloat2 = isFloatRegType(dsc2->lvType); if (isFloat1 != isFloat2) { if (!Compiler::fgProfileWeightsEqual(weight2, 0) && isFloat1) { return false; } if (!Compiler::fgProfileWeightsEqual(weight1, 0) && isFloat2) { return true; } } #endif if (!Compiler::fgProfileWeightsEqual(weight1, 0) && dsc1->lvIsRegArg) { weight1 += 2 * BB_UNITY_WEIGHT; } if (!Compiler::fgProfileWeightsEqual(weight2, 0) && dsc2->lvIsRegArg) { weight2 += 2 * BB_UNITY_WEIGHT; } if (!Compiler::fgProfileWeightsEqual(weight1, weight2)) { return weight1 > weight2; } // If the weighted ref counts are different then try the unweighted ref counts. if (dsc1->lvRefCnt() != dsc2->lvRefCnt()) { return dsc1->lvRefCnt() > dsc2->lvRefCnt(); } // If one is a GC type and the other is not the GC type wins. if (varTypeIsGC(dsc1->TypeGet()) != varTypeIsGC(dsc2->TypeGet())) { return varTypeIsGC(dsc1->TypeGet()); } // To achieve a stable sort we use the LclNum (by way of the pointer address). return dsc1 < dsc2; } }; /***************************************************************************** * * Sort the local variable table by refcount and assign tracking indices. */ void Compiler::lvaSortByRefCount() { lvaTrackedCount = 0; lvaTrackedCountInSizeTUnits = 0; #ifdef DEBUG VarSetOps::AssignNoCopy(this, lvaTrackedVars, VarSetOps::MakeEmpty(this)); #endif if (lvaCount == 0) { return; } /* We'll sort the variables by ref count - allocate the sorted table */ if (lvaTrackedToVarNumSize < lvaCount) { lvaTrackedToVarNumSize = lvaCount; lvaTrackedToVarNum = new (getAllocator(CMK_LvaTable)) unsigned[lvaTrackedToVarNumSize]; } unsigned trackedCount = 0; unsigned* tracked = lvaTrackedToVarNum; // Fill in the table used for sorting for (unsigned lclNum = 0; lclNum < lvaCount; lclNum++) { LclVarDsc* varDsc = lvaGetDesc(lclNum); // Start by assuming that the variable will be tracked. varDsc->lvTracked = 1; if (varDsc->lvRefCnt() == 0) { // Zero ref count, make this untracked. varDsc->lvTracked = 0; varDsc->setLvRefCntWtd(0); } #if !defined(TARGET_64BIT) if (varTypeIsLong(varDsc) && varDsc->lvPromoted) { varDsc->lvTracked = 0; } #endif // !defined(TARGET_64BIT) // Variables that are address-exposed, and all struct locals, are never enregistered, or tracked. // (The struct may be promoted, and its field variables enregistered/tracked, or the VM may "normalize" // its type so that its not seen by the JIT as a struct.) // Pinned variables may not be tracked (a condition of the GCInfo representation) // or enregistered, on x86 -- it is believed that we can enregister pinned (more properly, "pinning") // references when using the general GC encoding. if (varDsc->IsAddressExposed()) { varDsc->lvTracked = 0; assert(varDsc->lvType != TYP_STRUCT || varDsc->lvDoNotEnregister); // For structs, should have set this when we set m_addrExposed. } if (varTypeIsStruct(varDsc)) { // Promoted structs will never be considered for enregistration anyway, // and the DoNotEnregister flag was used to indicate whether promotion was // independent or dependent. if (varDsc->lvPromoted) { varDsc->lvTracked = 0; } else if (!varDsc->IsEnregisterableType()) { lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::NotRegSizeStruct)); } else if (varDsc->lvType == TYP_STRUCT) { if (!varDsc->lvRegStruct && !compEnregStructLocals()) { lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::DontEnregStructs)); } else if (varDsc->lvIsMultiRegArgOrRet()) { // Prolog and return generators do not support SIMD<->general register moves. lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::IsStructArg)); } #if defined(TARGET_ARM) else if (varDsc->lvIsParam) { // On arm we prespill all struct args, // TODO-Arm-CQ: keep them in registers, it will need a fix // to "On the ARM we will spill any incoming struct args" logic in codegencommon. lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::IsStructArg)); } #endif // TARGET_ARM } } if (varDsc->lvIsStructField && (lvaGetParentPromotionType(lclNum) != PROMOTION_TYPE_INDEPENDENT)) { lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::DepField)); } if (varDsc->lvPinned) { varDsc->lvTracked = 0; #ifdef JIT32_GCENCODER lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::PinningRef)); #endif } if (opts.MinOpts() && !JitConfig.JitMinOptsTrackGCrefs() && varTypeIsGC(varDsc->TypeGet())) { varDsc->lvTracked = 0; lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::MinOptsGC)); } if (!compEnregLocals()) { lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::NoRegVars)); } #if defined(JIT32_GCENCODER) && defined(FEATURE_EH_FUNCLETS) if (lvaIsOriginalThisArg(lclNum) && (info.compMethodInfo->options & CORINFO_GENERICS_CTXT_FROM_THIS) != 0) { // For x86/Linux, we need to track "this". // However we cannot have it in tracked variables, so we set "this" pointer always untracked varDsc->lvTracked = 0; } #endif // Are we not optimizing and we have exception handlers? // if so mark all args and locals "do not enregister". // if (opts.MinOpts() && compHndBBtabCount > 0) { lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::LiveInOutOfHandler)); } else { var_types type = genActualType(varDsc->TypeGet()); switch (type) { case TYP_FLOAT: case TYP_DOUBLE: case TYP_INT: case TYP_LONG: case TYP_REF: case TYP_BYREF: #ifdef FEATURE_SIMD case TYP_SIMD8: case TYP_SIMD12: case TYP_SIMD16: case TYP_SIMD32: #endif // FEATURE_SIMD case TYP_STRUCT: break; case TYP_UNDEF: case TYP_UNKNOWN: noway_assert(!"lvType not set correctly"); varDsc->lvType = TYP_INT; FALLTHROUGH; default: varDsc->lvTracked = 0; } } if (varDsc->lvTracked) { tracked[trackedCount++] = lclNum; } } // Now sort the tracked variable table by ref-count if (compCodeOpt() == SMALL_CODE) { jitstd::sort(tracked, tracked + trackedCount, LclVarDsc_SmallCode_Less(lvaTable DEBUGARG(lvaCount))); } else { jitstd::sort(tracked, tracked + trackedCount, LclVarDsc_BlendedCode_Less(lvaTable DEBUGARG(lvaCount))); } lvaTrackedCount = min((unsigned)JitConfig.JitMaxLocalsToTrack(), trackedCount); JITDUMP("Tracked variable (%u out of %u) table:\n", lvaTrackedCount, lvaCount); // Assign indices to all the variables we've decided to track for (unsigned varIndex = 0; varIndex < lvaTrackedCount; varIndex++) { LclVarDsc* varDsc = lvaGetDesc(tracked[varIndex]); assert(varDsc->lvTracked); varDsc->lvVarIndex = static_cast<unsigned short>(varIndex); INDEBUG(if (verbose) { gtDispLclVar(tracked[varIndex]); }) JITDUMP(" [%6s]: refCnt = %4u, refCntWtd = %6s\n", varTypeName(varDsc->TypeGet()), varDsc->lvRefCnt(), refCntWtd2str(varDsc->lvRefCntWtd())); } JITDUMP("\n"); // Mark all variables past the first 'lclMAX_TRACKED' as untracked for (unsigned varIndex = lvaTrackedCount; varIndex < trackedCount; varIndex++) { LclVarDsc* varDsc = lvaGetDesc(tracked[varIndex]); assert(varDsc->lvTracked); varDsc->lvTracked = 0; } // We have a new epoch, and also cache the tracked var count in terms of size_t's sufficient to hold that many bits. lvaCurEpoch++; lvaTrackedCountInSizeTUnits = roundUp((unsigned)lvaTrackedCount, (unsigned)(sizeof(size_t) * 8)) / unsigned(sizeof(size_t) * 8); #ifdef DEBUG VarSetOps::AssignNoCopy(this, lvaTrackedVars, VarSetOps::MakeFull(this)); #endif } /***************************************************************************** * * This is called by lvaMarkLclRefs to disqualify a variable from being * considered by optAddCopies() */ void LclVarDsc::lvaDisqualifyVar() { this->lvDisqualify = true; this->lvSingleDef = false; this->lvDefStmt = nullptr; } #ifdef FEATURE_SIMD var_types LclVarDsc::GetSimdBaseType() const { CorInfoType simdBaseJitType = GetSimdBaseJitType(); if (simdBaseJitType == CORINFO_TYPE_UNDEF) { return TYP_UNKNOWN; } return JitType2PreciseVarType(simdBaseJitType); } #endif // FEATURE_SIMD unsigned LclVarDsc::lvSize() const // Size needed for storage representation. Only used for structs or TYP_BLK. { // TODO-Review: Sometimes we get called on ARM with HFA struct variables that have been promoted, // where the struct itself is no longer used because all access is via its member fields. // When that happens, the struct is marked as unused and its type has been changed to // TYP_INT (to keep the GC tracking code from looking at it). // See Compiler::raAssignVars() for details. For example: // N002 ( 4, 3) [00EA067C] ------------- return struct $346 // N001 ( 3, 2) [00EA0628] ------------- lclVar struct(U) V03 loc2 // float V03.f1 (offs=0x00) -> V12 tmp7 // f8 (last use) (last use) $345 // Here, the "struct(U)" shows that the "V03 loc2" variable is unused. Not shown is that V03 // is now TYP_INT in the local variable table. It's not really unused, because it's in the tree. assert(varTypeIsStruct(lvType) || (lvType == TYP_BLK) || (lvPromoted && lvUnusedStruct)); if (lvIsParam) { assert(varTypeIsStruct(lvType)); const bool isFloatHfa = (lvIsHfa() && (GetHfaType() == TYP_FLOAT)); const unsigned argSizeAlignment = Compiler::eeGetArgSizeAlignment(lvType, isFloatHfa); return roundUp(lvExactSize, argSizeAlignment); } #if defined(FEATURE_SIMD) && !defined(TARGET_64BIT) // For 32-bit architectures, we make local variable SIMD12 types 16 bytes instead of just 12. We can't do // this for arguments, which must be passed according the defined ABI. We don't want to do this for // dependently promoted struct fields, but we don't know that here. See lvaMapSimd12ToSimd16(). // (Note that for 64-bits, we are already rounding up to 16.) if (lvType == TYP_SIMD12) { assert(!lvIsParam); assert(lvExactSize == 12); return 16; } #endif // defined(FEATURE_SIMD) && !defined(TARGET_64BIT) return roundUp(lvExactSize, TARGET_POINTER_SIZE); } /********************************************************************************** * Get stack size of the varDsc. */ size_t LclVarDsc::lvArgStackSize() const { // Make sure this will have a stack size assert(!this->lvIsRegArg); size_t stackSize = 0; if (varTypeIsStruct(this)) { #if defined(WINDOWS_AMD64_ABI) // Structs are either passed by reference or can be passed by value using one pointer stackSize = TARGET_POINTER_SIZE; #elif defined(TARGET_ARM64) || defined(UNIX_AMD64_ABI) // lvSize performs a roundup. stackSize = this->lvSize(); #if defined(TARGET_ARM64) if ((stackSize > TARGET_POINTER_SIZE * 2) && (!this->lvIsHfa())) { // If the size is greater than 16 bytes then it will // be passed by reference. stackSize = TARGET_POINTER_SIZE; } #endif // defined(TARGET_ARM64) #else // !TARGET_ARM64 !WINDOWS_AMD64_ABI !UNIX_AMD64_ABI NYI("Unsupported target."); unreached(); #endif // !TARGET_ARM64 !WINDOWS_AMD64_ABI !UNIX_AMD64_ABI } else { stackSize = TARGET_POINTER_SIZE; } return stackSize; } //------------------------------------------------------------------------ // GetRegisterType: Determine register type for this local var. // // Arguments: // tree - node that uses the local, its type is checked first. // // Return Value: // TYP_UNDEF if the layout is not enregistrable, the register type otherwise. // var_types LclVarDsc::GetRegisterType(const GenTreeLclVarCommon* tree) const { var_types targetType = tree->gtType; var_types lclVarType = TypeGet(); if (targetType == TYP_STRUCT) { if (lclVarType == TYP_STRUCT) { assert(!tree->OperIsLocalField() && "do not expect struct local fields."); lclVarType = GetLayout()->GetRegisterType(); } targetType = lclVarType; } #ifdef DEBUG if ((targetType != TYP_UNDEF) && tree->OperIs(GT_STORE_LCL_VAR) && lvNormalizeOnStore()) { const bool phiStore = (tree->gtGetOp1()->OperIsNonPhiLocal() == false); // Ensure that the lclVar node is typed correctly, // does not apply to phi-stores because they do not produce code in the merge block. assert(phiStore || targetType == genActualType(lclVarType)); } #endif return targetType; } //------------------------------------------------------------------------ // GetRegisterType: Determine register type for this local var. // // Return Value: // TYP_UNDEF if the layout is not enregistrable, the register type otherwise. // var_types LclVarDsc::GetRegisterType() const { if (TypeGet() != TYP_STRUCT) { #if !defined(TARGET_64BIT) if (TypeGet() == TYP_LONG) { return TYP_UNDEF; } #endif return TypeGet(); } assert(m_layout != nullptr); return m_layout->GetRegisterType(); } //------------------------------------------------------------------------ // GetActualRegisterType: Determine an actual register type for this local var. // // Return Value: // TYP_UNDEF if the layout is not enregistrable, the register type otherwise. // var_types LclVarDsc::GetActualRegisterType() const { return genActualType(GetRegisterType()); } //---------------------------------------------------------------------------------------------- // CanBeReplacedWithItsField: check if a whole struct reference could be replaced by a field. // // Arguments: // comp - the compiler instance; // // Return Value: // true if that can be replaced, false otherwise. // // Notes: // The replacement can be made only for independently promoted structs // with 1 field without holes. // bool LclVarDsc::CanBeReplacedWithItsField(Compiler* comp) const { if (!lvPromoted) { return false; } if (comp->lvaGetPromotionType(this) != Compiler::PROMOTION_TYPE_INDEPENDENT) { return false; } if (lvFieldCnt != 1) { return false; } if (lvContainsHoles) { return false; } #if defined(FEATURE_SIMD) // If we return `struct A { SIMD16 a; }` we split the struct into several fields. // In order to do that we have to have its field `a` in memory. Right now lowering cannot // handle RETURN struct(multiple registers)->SIMD16(one register), but it can be improved. LclVarDsc* fieldDsc = comp->lvaGetDesc(lvFieldLclStart); if (varTypeIsSIMD(fieldDsc)) { return false; } #endif // FEATURE_SIMD return true; } //------------------------------------------------------------------------ // lvaMarkLclRefs: increment local var references counts and more // // Arguments: // tree - some node in a tree // block - block that the tree node belongs to // stmt - stmt that the tree node belongs to // isRecompute - true if we should just recompute counts // // Notes: // Invoked via the MarkLocalVarsVisitor // // Primarily increments the regular and weighted local var ref // counts for any local referred to directly by tree. // // Also: // // Accounts for implicit references to frame list root for // pinvokes that will be expanded later. // // Determines if locals of TYP_BOOL can safely be considered // to hold only 0 or 1 or may have a broader range of true values. // // Does some setup work for assertion prop, noting locals that are // eligible for assertion prop, single defs, and tracking which blocks // hold uses. // // Looks for uses of generic context and sets lvaGenericsContextInUse. // // In checked builds: // // Verifies that local accesses are consistenly typed. // Verifies that casts remain in bounds. void Compiler::lvaMarkLclRefs(GenTree* tree, BasicBlock* block, Statement* stmt, bool isRecompute) { const weight_t weight = block->getBBWeight(this); /* Is this a call to unmanaged code ? */ if (tree->IsCall() && compMethodRequiresPInvokeFrame()) { assert((!opts.ShouldUsePInvokeHelpers()) || (info.compLvFrameListRoot == BAD_VAR_NUM)); if (!opts.ShouldUsePInvokeHelpers()) { /* Get the special variable descriptor */ LclVarDsc* varDsc = lvaGetDesc(info.compLvFrameListRoot); /* Increment the ref counts twice */ varDsc->incRefCnts(weight, this); varDsc->incRefCnts(weight, this); } } if (!isRecompute) { /* Is this an assigment? */ if (tree->OperIs(GT_ASG)) { GenTree* op1 = tree->AsOp()->gtOp1; GenTree* op2 = tree->AsOp()->gtOp2; /* Is this an assignment to a local variable? */ if (op1->gtOper == GT_LCL_VAR && op2->gtType != TYP_BOOL) { /* Only simple assignments allowed for booleans */ if (tree->gtOper != GT_ASG) { goto NOT_BOOL; } /* Is the RHS clearly a boolean value? */ switch (op2->gtOper) { unsigned lclNum; case GT_CNS_INT: if (op2->AsIntCon()->gtIconVal == 0) { break; } if (op2->AsIntCon()->gtIconVal == 1) { break; } // Not 0 or 1, fall through .... FALLTHROUGH; default: if (op2->OperIsCompare()) { break; } NOT_BOOL: lclNum = op1->AsLclVarCommon()->GetLclNum(); noway_assert(lclNum < lvaCount); lvaTable[lclNum].lvIsBoolean = false; break; } } } } if (tree->OperIsLocalAddr()) { LclVarDsc* varDsc = lvaGetDesc(tree->AsLclVarCommon()); assert(varDsc->IsAddressExposed()); varDsc->incRefCnts(weight, this); return; } if ((tree->gtOper != GT_LCL_VAR) && (tree->gtOper != GT_LCL_FLD)) { return; } /* This must be a local variable reference */ // See if this is a generics context use. if ((tree->gtFlags & GTF_VAR_CONTEXT) != 0) { assert(tree->OperIs(GT_LCL_VAR)); if (!lvaGenericsContextInUse) { JITDUMP("-- generic context in use at [%06u]\n", dspTreeID(tree)); lvaGenericsContextInUse = true; } } assert((tree->gtOper == GT_LCL_VAR) || (tree->gtOper == GT_LCL_FLD)); unsigned lclNum = tree->AsLclVarCommon()->GetLclNum(); LclVarDsc* varDsc = lvaGetDesc(lclNum); /* Increment the reference counts */ varDsc->incRefCnts(weight, this); #ifdef DEBUG if (varDsc->lvIsStructField) { // If ref count was increased for struct field, ensure that the // parent struct is still promoted. LclVarDsc* parentStruct = lvaGetDesc(varDsc->lvParentLcl); assert(!parentStruct->lvUndoneStructPromotion); } #endif if (!isRecompute) { if (lvaVarAddrExposed(lclNum)) { varDsc->lvIsBoolean = false; } if (tree->gtOper == GT_LCL_FLD) { // variables that have uses inside a GT_LCL_FLD // cause problems, so we will disqualify them here varDsc->lvaDisqualifyVar(); return; } if (fgDomsComputed && IsDominatedByExceptionalEntry(block)) { SetVolatileHint(varDsc); } /* Record if the variable has a single def or not */ if (!varDsc->lvDisqualify) // If this variable is already disqualified, we can skip this { if (tree->gtFlags & GTF_VAR_DEF) // Is this is a def of our variable { /* If we have one of these cases: 1. We have already seen a definition (i.e lvSingleDef is true) 2. or info.CompInitMem is true (thus this would be the second definition) 3. or we have an assignment inside QMARK-COLON trees 4. or we have an update form of assignment (i.e. +=, -=, *=) Then we must disqualify this variable for use in optAddCopies() Note that all parameters start out with lvSingleDef set to true */ if ((varDsc->lvSingleDef == true) || (info.compInitMem == true) || (tree->gtFlags & GTF_COLON_COND) || (tree->gtFlags & GTF_VAR_USEASG)) { varDsc->lvaDisqualifyVar(); } else { varDsc->lvSingleDef = true; varDsc->lvDefStmt = stmt; } } else // otherwise this is a ref of our variable { if (BlockSetOps::MayBeUninit(varDsc->lvRefBlks)) { // Lazy initialization BlockSetOps::AssignNoCopy(this, varDsc->lvRefBlks, BlockSetOps::MakeEmpty(this)); } BlockSetOps::AddElemD(this, varDsc->lvRefBlks, block->bbNum); } } if (!varDsc->lvDisqualifySingleDefRegCandidate) // If this var is already disqualified, we can skip this { if (tree->gtFlags & GTF_VAR_DEF) // Is this is a def of our variable { bool bbInALoop = (block->bbFlags & BBF_BACKWARD_JUMP) != 0; bool bbIsReturn = block->bbJumpKind == BBJ_RETURN; // TODO: Zero-inits in LSRA are created with below condition. But if filter out based on that condition // we filter lot of interesting variables that would benefit otherwise with EH var enregistration. // bool needsExplicitZeroInit = !varDsc->lvIsParam && (info.compInitMem || // varTypeIsGC(varDsc->TypeGet())); bool needsExplicitZeroInit = fgVarNeedsExplicitZeroInit(lclNum, bbInALoop, bbIsReturn); if (varDsc->lvSingleDefRegCandidate || needsExplicitZeroInit) { #ifdef DEBUG if (needsExplicitZeroInit) { varDsc->lvSingleDefDisqualifyReason = 'Z'; JITDUMP("V%02u needs explicit zero init. Disqualified as a single-def register candidate.\n", lclNum); } else { varDsc->lvSingleDefDisqualifyReason = 'M'; JITDUMP("V%02u has multiple definitions. Disqualified as a single-def register candidate.\n", lclNum); } #endif // DEBUG varDsc->lvSingleDefRegCandidate = false; varDsc->lvDisqualifySingleDefRegCandidate = true; } else { #if FEATURE_PARTIAL_SIMD_CALLEE_SAVE // TODO-CQ: If the varType needs partial callee save, conservatively do not enregister // such variable. In future, need to enable enregisteration for such variables. if (!varTypeNeedsPartialCalleeSave(varDsc->GetRegisterType())) #endif { varDsc->lvSingleDefRegCandidate = true; JITDUMP("Marking EH Var V%02u as a register candidate.\n", lclNum); } } } } bool allowStructs = false; #ifdef UNIX_AMD64_ABI // On System V the type of the var could be a struct type. allowStructs = varTypeIsStruct(varDsc); #endif // UNIX_AMD64_ABI /* Variables must be used as the same type throughout the method */ noway_assert(varDsc->lvType == TYP_UNDEF || tree->gtType == TYP_UNKNOWN || allowStructs || genActualType(varDsc->TypeGet()) == genActualType(tree->gtType) || (tree->gtType == TYP_BYREF && varDsc->TypeGet() == TYP_I_IMPL) || (tree->gtType == TYP_I_IMPL && varDsc->TypeGet() == TYP_BYREF) || (tree->gtFlags & GTF_VAR_CAST) || (varTypeIsFloating(varDsc) && varTypeIsFloating(tree)) || (varTypeIsStruct(varDsc) == varTypeIsStruct(tree))); /* Remember the type of the reference */ if (tree->gtType == TYP_UNKNOWN || varDsc->lvType == TYP_UNDEF) { varDsc->lvType = tree->gtType; noway_assert(genActualType(varDsc->TypeGet()) == tree->gtType); // no truncation } #ifdef DEBUG if (tree->gtFlags & GTF_VAR_CAST) { // it should never be bigger than the variable slot // Trees don't store the full information about structs // so we can't check them. if (tree->TypeGet() != TYP_STRUCT) { unsigned treeSize = genTypeSize(tree->TypeGet()); unsigned varSize = genTypeSize(varDsc->TypeGet()); if (varDsc->TypeGet() == TYP_STRUCT) { varSize = varDsc->lvSize(); } assert(treeSize <= varSize); } } #endif } } //------------------------------------------------------------------------ // IsDominatedByExceptionalEntry: Check is the block dominated by an exception entry block. // // Arguments: // block - the checking block. // bool Compiler::IsDominatedByExceptionalEntry(BasicBlock* block) { assert(fgDomsComputed); return block->IsDominatedByExceptionalEntryFlag(); } //------------------------------------------------------------------------ // SetVolatileHint: Set a local var's volatile hint. // // Arguments: // varDsc - the local variable that needs the hint. // void Compiler::SetVolatileHint(LclVarDsc* varDsc) { varDsc->lvVolatileHint = true; } //------------------------------------------------------------------------ // lvaMarkLocalVars: update local var ref counts for IR in a basic block // // Arguments: // block - the block in question // isRecompute - true if counts are being recomputed // // Notes: // Invokes lvaMarkLclRefs on each tree node for each // statement in the block. void Compiler::lvaMarkLocalVars(BasicBlock* block, bool isRecompute) { class MarkLocalVarsVisitor final : public GenTreeVisitor<MarkLocalVarsVisitor> { private: BasicBlock* m_block; Statement* m_stmt; bool m_isRecompute; public: enum { DoPreOrder = true, }; MarkLocalVarsVisitor(Compiler* compiler, BasicBlock* block, Statement* stmt, bool isRecompute) : GenTreeVisitor<MarkLocalVarsVisitor>(compiler), m_block(block), m_stmt(stmt), m_isRecompute(isRecompute) { } Compiler::fgWalkResult PreOrderVisit(GenTree** use, GenTree* user) { // TODO: Stop passing isRecompute once we are sure that this assert is never hit. assert(!m_isRecompute); m_compiler->lvaMarkLclRefs(*use, m_block, m_stmt, m_isRecompute); return WALK_CONTINUE; } }; JITDUMP("\n*** %s local variables in block " FMT_BB " (weight=%s)\n", isRecompute ? "recomputing" : "marking", block->bbNum, refCntWtd2str(block->getBBWeight(this))); for (Statement* const stmt : block->NonPhiStatements()) { MarkLocalVarsVisitor visitor(this, block, stmt, isRecompute); DISPSTMT(stmt); visitor.WalkTree(stmt->GetRootNodePointer(), nullptr); } } //------------------------------------------------------------------------ // lvaMarkLocalVars: enable normal ref counting, compute initial counts, sort locals table // // Notes: // Now behaves differently in minopts / debug. Instead of actually inspecting // the IR and counting references, the jit assumes all locals are referenced // and does not sort the locals table. // // Also, when optimizing, lays the groundwork for assertion prop and more. // See details in lvaMarkLclRefs. void Compiler::lvaMarkLocalVars() { JITDUMP("\n*************** In lvaMarkLocalVars()"); // If we have direct pinvokes, verify the frame list root local was set up properly if (compMethodRequiresPInvokeFrame()) { assert((!opts.ShouldUsePInvokeHelpers()) || (info.compLvFrameListRoot == BAD_VAR_NUM)); if (!opts.ShouldUsePInvokeHelpers()) { noway_assert(info.compLvFrameListRoot >= info.compLocalsCount && info.compLvFrameListRoot < lvaCount); } } #if !defined(FEATURE_EH_FUNCLETS) // Grab space for exception handling if (ehNeedsShadowSPslots()) { // The first slot is reserved for ICodeManager::FixContext(ppEndRegion) // ie. the offset of the end-of-last-executed-filter unsigned slotsNeeded = 1; unsigned handlerNestingLevel = ehMaxHndNestingCount; if (opts.compDbgEnC && (handlerNestingLevel < (unsigned)MAX_EnC_HANDLER_NESTING_LEVEL)) handlerNestingLevel = (unsigned)MAX_EnC_HANDLER_NESTING_LEVEL; slotsNeeded += handlerNestingLevel; // For a filter (which can be active at the same time as a catch/finally handler) slotsNeeded++; // For zero-termination of the shadow-Stack-pointer chain slotsNeeded++; lvaShadowSPslotsVar = lvaGrabTempWithImplicitUse(false DEBUGARG("lvaShadowSPslotsVar")); LclVarDsc* shadowSPslotsVar = lvaGetDesc(lvaShadowSPslotsVar); shadowSPslotsVar->lvType = TYP_BLK; shadowSPslotsVar->lvExactSize = (slotsNeeded * TARGET_POINTER_SIZE); } #endif // !FEATURE_EH_FUNCLETS // PSPSym and LocAllocSPvar are not used by the CoreRT ABI if (!IsTargetAbi(CORINFO_CORERT_ABI)) { #if defined(FEATURE_EH_FUNCLETS) if (ehNeedsPSPSym()) { lvaPSPSym = lvaGrabTempWithImplicitUse(false DEBUGARG("PSPSym")); LclVarDsc* lclPSPSym = lvaGetDesc(lvaPSPSym); lclPSPSym->lvType = TYP_I_IMPL; lvaSetVarDoNotEnregister(lvaPSPSym DEBUGARG(DoNotEnregisterReason::VMNeedsStackAddr)); } #endif // FEATURE_EH_FUNCLETS #ifdef JIT32_GCENCODER // LocAllocSPvar is only required by the implicit frame layout expected by the VM on x86. Whether // a function contains a Localloc is conveyed in the GC information, in the InfoHdrSmall.localloc // field. The function must have an EBP frame. Then, the VM finds the LocAllocSP slot by assuming // the following stack layout: // // -- higher addresses -- // saved EBP <-- EBP points here // other callee-saved registers // InfoHdrSmall.savedRegsCountExclFP specifies this size // optional GS cookie // InfoHdrSmall.security is 1 if this exists // LocAllocSP slot // -- lower addresses -- // // See also eetwain.cpp::GetLocallocSPOffset() and its callers. if (compLocallocUsed) { lvaLocAllocSPvar = lvaGrabTempWithImplicitUse(false DEBUGARG("LocAllocSPvar")); LclVarDsc* locAllocSPvar = lvaGetDesc(lvaLocAllocSPvar); locAllocSPvar->lvType = TYP_I_IMPL; } #endif // JIT32_GCENCODER } // Ref counting is now enabled normally. lvaRefCountState = RCS_NORMAL; #if defined(DEBUG) const bool setSlotNumbers = true; #else const bool setSlotNumbers = opts.compScopeInfo && (info.compVarScopesCount > 0); #endif // defined(DEBUG) const bool isRecompute = false; lvaComputeRefCounts(isRecompute, setSlotNumbers); // If we don't need precise reference counts, e.g. we're not optimizing, we're done. if (!PreciseRefCountsRequired()) { return; } const bool reportParamTypeArg = lvaReportParamTypeArg(); // Update bookkeeping on the generic context. if (lvaKeepAliveAndReportThis()) { lvaGetDesc(0u)->lvImplicitlyReferenced = reportParamTypeArg; } else if (lvaReportParamTypeArg()) { // We should have a context arg. assert(info.compTypeCtxtArg != (int)BAD_VAR_NUM); lvaGetDesc(info.compTypeCtxtArg)->lvImplicitlyReferenced = reportParamTypeArg; } assert(PreciseRefCountsRequired()); // Note: optAddCopies() depends on lvaRefBlks, which is set in lvaMarkLocalVars(BasicBlock*), called above. optAddCopies(); } //------------------------------------------------------------------------ // lvaComputeRefCounts: compute ref counts for locals // // Arguments: // isRecompute -- true if we just want ref counts and no other side effects; // false means to also look for true boolean locals, lay // groundwork for assertion prop, check type consistency, etc. // See lvaMarkLclRefs for details on what else goes on. // setSlotNumbers -- true if local slot numbers should be assigned. // // Notes: // Some implicit references are given actual counts or weight bumps here // to match pre-existing behavior. // // In fast-jitting modes where we don't ref count locals, this bypasses // actual counting, and makes all locals implicitly referenced on first // compute. It asserts all locals are implicitly referenced on recompute. // // When optimizing we also recompute lvaGenericsContextInUse based // on specially flagged LCL_VAR appearances. // void Compiler::lvaComputeRefCounts(bool isRecompute, bool setSlotNumbers) { JITDUMP("\n*** lvaComputeRefCounts ***\n"); unsigned lclNum = 0; LclVarDsc* varDsc = nullptr; // Fast path for minopts and debug codegen. // // On first compute: mark all locals as implicitly referenced and untracked. // On recompute: do nothing. if (!PreciseRefCountsRequired()) { if (isRecompute) { #if defined(DEBUG) // All local vars should be marked as implicitly referenced // and not tracked. for (lclNum = 0, varDsc = lvaTable; lclNum < lvaCount; lclNum++, varDsc++) { const bool isSpecialVarargsParam = varDsc->lvIsParam && raIsVarargsStackArg(lclNum); if (isSpecialVarargsParam) { assert(varDsc->lvRefCnt() == 0); } else { assert(varDsc->lvImplicitlyReferenced); } assert(!varDsc->lvTracked); } #endif // defined (DEBUG) return; } // First compute. for (lclNum = 0, varDsc = lvaTable; lclNum < lvaCount; lclNum++, varDsc++) { // Using lvImplicitlyReferenced here ensures that we can't // accidentally make locals be unreferenced later by decrementing // the ref count to zero. // // If, in minopts/debug, we really want to allow locals to become // unreferenced later, we'll have to explicitly clear this bit. varDsc->setLvRefCnt(0); varDsc->setLvRefCntWtd(BB_ZERO_WEIGHT); // Special case for some varargs params ... these must // remain unreferenced. const bool isSpecialVarargsParam = varDsc->lvIsParam && raIsVarargsStackArg(lclNum); if (!isSpecialVarargsParam) { varDsc->lvImplicitlyReferenced = 1; } varDsc->lvTracked = 0; if (setSlotNumbers) { varDsc->lvSlotNum = lclNum; } // Assert that it's ok to bypass the type repair logic in lvaMarkLclRefs assert((varDsc->lvType != TYP_UNDEF) && (varDsc->lvType != TYP_VOID) && (varDsc->lvType != TYP_UNKNOWN)); } lvaCurEpoch++; lvaTrackedCount = 0; lvaTrackedCountInSizeTUnits = 0; return; } // Slower path we take when optimizing, to get accurate counts. // // First, reset all explicit ref counts and weights. for (lclNum = 0, varDsc = lvaTable; lclNum < lvaCount; lclNum++, varDsc++) { varDsc->setLvRefCnt(0); varDsc->setLvRefCntWtd(BB_ZERO_WEIGHT); if (setSlotNumbers) { varDsc->lvSlotNum = lclNum; } // Set initial value for lvSingleDef for explicit and implicit // argument locals as they are "defined" on entry. // However, if we are just recomputing the ref counts, retain the value // that was set by past phases. if (!isRecompute) { varDsc->lvSingleDef = varDsc->lvIsParam; varDsc->lvSingleDefRegCandidate = varDsc->lvIsParam; } } // Remember current state of generic context use, and prepare // to compute new state. const bool oldLvaGenericsContextInUse = lvaGenericsContextInUse; lvaGenericsContextInUse = false; JITDUMP("\n*** lvaComputeRefCounts -- explicit counts ***\n"); // Second, account for all explicit local variable references for (BasicBlock* const block : Blocks()) { if (block->IsLIR()) { assert(isRecompute); const weight_t weight = block->getBBWeight(this); for (GenTree* node : LIR::AsRange(block)) { switch (node->OperGet()) { case GT_LCL_VAR: case GT_LCL_FLD: case GT_LCL_VAR_ADDR: case GT_LCL_FLD_ADDR: case GT_STORE_LCL_VAR: case GT_STORE_LCL_FLD: { LclVarDsc* varDsc = lvaGetDesc(node->AsLclVarCommon()); // If this is an EH var, use a zero weight for defs, so that we don't // count those in our heuristic for register allocation, since they always // must be stored, so there's no value in enregistering them at defs; only // if there are enough uses to justify it. if (varDsc->lvLiveInOutOfHndlr && !varDsc->lvDoNotEnregister && ((node->gtFlags & GTF_VAR_DEF) != 0)) { varDsc->incRefCnts(0, this); } else { varDsc->incRefCnts(weight, this); } if ((node->gtFlags & GTF_VAR_CONTEXT) != 0) { assert(node->OperIs(GT_LCL_VAR)); lvaGenericsContextInUse = true; } break; } default: break; } } } else { lvaMarkLocalVars(block, isRecompute); } } if (oldLvaGenericsContextInUse && !lvaGenericsContextInUse) { // Context was in use but no longer is. This can happen // if we're able to optimize, so just leave a note. JITDUMP("\n** Generics context no longer in use\n"); } else if (lvaGenericsContextInUse && !oldLvaGenericsContextInUse) { // Context was not in use but now is. // // Changing from unused->used should never happen; creation of any new IR // for context use should also be settting lvaGenericsContextInUse. assert(!"unexpected new use of generics context"); } JITDUMP("\n*** lvaComputeRefCounts -- implicit counts ***\n"); // Third, bump ref counts for some implicit prolog references for (lclNum = 0, varDsc = lvaTable; lclNum < lvaCount; lclNum++, varDsc++) { // Todo: review justification for these count bumps. if (varDsc->lvIsRegArg) { if ((lclNum < info.compArgsCount) && (varDsc->lvRefCnt() > 0)) { // Fix 388376 ARM JitStress WP7 varDsc->incRefCnts(BB_UNITY_WEIGHT, this); varDsc->incRefCnts(BB_UNITY_WEIGHT, this); } // Ref count bump that was in lvaPromoteStructVar // // This was formerly done during RCS_EARLY counting, // and we did not used to reset counts like we do now. if (varDsc->lvIsStructField) { varDsc->incRefCnts(BB_UNITY_WEIGHT, this); } } // If we have JMP, all arguments must have a location // even if we don't use them inside the method if (compJmpOpUsed && varDsc->lvIsParam && (varDsc->lvRefCnt() == 0)) { // except when we have varargs and the argument is // passed on the stack. In that case, it's important // for the ref count to be zero, so that we don't attempt // to track them for GC info (which is not possible since we // don't know their offset in the stack). See the assert at the // end of raMarkStkVars and bug #28949 for more info. if (!raIsVarargsStackArg(lclNum)) { varDsc->lvImplicitlyReferenced = 1; } } } } void Compiler::lvaAllocOutgoingArgSpaceVar() { #if FEATURE_FIXED_OUT_ARGS // Setup the outgoing argument region, in case we end up using it later if (lvaOutgoingArgSpaceVar == BAD_VAR_NUM) { lvaOutgoingArgSpaceVar = lvaGrabTemp(false DEBUGARG("OutgoingArgSpace")); lvaTable[lvaOutgoingArgSpaceVar].lvType = TYP_LCLBLK; lvaTable[lvaOutgoingArgSpaceVar].lvImplicitlyReferenced = 1; } noway_assert(lvaOutgoingArgSpaceVar >= info.compLocalsCount && lvaOutgoingArgSpaceVar < lvaCount); #endif // FEATURE_FIXED_OUT_ARGS } inline void Compiler::lvaIncrementFrameSize(unsigned size) { if (size > MAX_FrameSize || compLclFrameSize + size > MAX_FrameSize) { BADCODE("Frame size overflow"); } compLclFrameSize += size; } /**************************************************************************** * * Return true if absolute offsets of temps are larger than vars, or in other * words, did we allocate temps before of after vars. The /GS buffer overrun * checks want temps to be at low stack addresses than buffers */ bool Compiler::lvaTempsHaveLargerOffsetThanVars() { #ifdef TARGET_ARM // We never want to place the temps with larger offsets for ARM return false; #else if (compGSReorderStackLayout) { return codeGen->isFramePointerUsed(); } else { return true; } #endif } /**************************************************************************** * * Return an upper bound estimate for the size of the compiler spill temps * */ unsigned Compiler::lvaGetMaxSpillTempSize() { unsigned result = 0; if (codeGen->regSet.hasComputedTmpSize()) { result = codeGen->regSet.tmpGetTotalSize(); } else { result = MAX_SPILL_TEMP_SIZE; } return result; } // clang-format off /***************************************************************************** * * Compute stack frame offsets for arguments, locals and optionally temps. * * The frame is laid out as follows for x86: * * ESP frames * * | | * |-----------------------| * | incoming | * | arguments | * |-----------------------| <---- Virtual '0' * | return address | * +=======================+ * |Callee saved registers | * |-----------------------| * | Temps | * |-----------------------| * | Variables | * |-----------------------| <---- Ambient ESP * | Arguments for the | * ~ next function ~ * | | * | | | * | | Stack grows | * | downward * V * * * EBP frames * * | | * |-----------------------| * | incoming | * | arguments | * |-----------------------| <---- Virtual '0' * | return address | * +=======================+ * | incoming EBP | * |-----------------------| <---- EBP * |Callee saved registers | * |-----------------------| * | security object | * |-----------------------| * | ParamTypeArg | * |-----------------------| * | Last-executed-filter | * |-----------------------| * | | * ~ Shadow SPs ~ * | | * |-----------------------| * | | * ~ Variables ~ * | | * ~-----------------------| * | Temps | * |-----------------------| * | localloc | * |-----------------------| <---- Ambient ESP * | Arguments for the | * | next function ~ * | | * | | | * | | Stack grows | * | downward * V * * * The frame is laid out as follows for x64: * * RSP frames * | | * |-----------------------| * | incoming | * | arguments | * |-----------------------| * | 4 fixed incoming | * | argument slots | * |-----------------------| <---- Caller's SP & Virtual '0' * | return address | * +=======================+ * | Callee saved Int regs | * ------------------------- * | Padding | <---- this padding (0 or 8 bytes) is to ensure flt registers are saved at a mem location aligned at 16-bytes * | | so that we can save 128-bit callee saved xmm regs using performant "movaps" instruction instead of "movups" * ------------------------- * | Callee saved Flt regs | <----- entire 128-bits of callee saved xmm registers are stored here * |-----------------------| * | Temps | * |-----------------------| * | Variables | * |-----------------------| * | Arguments for the | * ~ next function ~ * | | * |-----------------------| * | 4 fixed outgoing | * | argument slots | * |-----------------------| <---- Ambient RSP * | | | * ~ | Stack grows ~ * | | downward | * V * * * RBP frames * | | * |-----------------------| * | incoming | * | arguments | * |-----------------------| * | 4 fixed incoming | * | argument slots | * |-----------------------| <---- Caller's SP & Virtual '0' * | return address | * +=======================+ * | Callee saved Int regs | * ------------------------- * | Padding | * ------------------------- * | Callee saved Flt regs | * |-----------------------| * | security object | * |-----------------------| * | ParamTypeArg | * |-----------------------| * | | * | | * ~ Variables ~ * | | * | | * |-----------------------| * | Temps | * |-----------------------| * | | * ~ localloc ~ // not in frames with EH * | | * |-----------------------| * | PSPSym | // only in frames with EH (thus no localloc) * | | * |-----------------------| <---- RBP in localloc frames (max 240 bytes from Initial-SP) * | Arguments for the | * ~ next function ~ * | | * |-----------------------| * | 4 fixed outgoing | * | argument slots | * |-----------------------| <---- Ambient RSP (before localloc, this is Initial-SP) * | | | * ~ | Stack grows ~ * | | downward | * V * * * The frame is laid out as follows for ARM (this is a general picture; details may differ for different conditions): * * SP frames * | | * |-----------------------| * | incoming | * | arguments | * +=======================+ <---- Caller's SP * | Pre-spill registers | * |-----------------------| <---- Virtual '0' * |Callee saved registers | * |-----------------------| * ~ possible double align ~ * |-----------------------| * | security object | * |-----------------------| * | ParamTypeArg | * |-----------------------| * | possible GS cookie | * |-----------------------| * | Variables | * |-----------------------| * | possible GS cookie | * |-----------------------| * | Temps | * |-----------------------| * | Stub Argument Var | * |-----------------------| * |Inlined PInvoke Frame V| * |-----------------------| * ~ possible double align ~ * |-----------------------| * | Arguments for the | * ~ next function ~ * | | * |-----------------------| <---- Ambient SP * | | | * ~ | Stack grows ~ * | | downward | * V * * * FP / R11 frames * | | * |-----------------------| * | incoming | * | arguments | * +=======================+ <---- Caller's SP * | Pre-spill registers | * |-----------------------| <---- Virtual '0' * |Callee saved registers | * |-----------------------| * | PSPSym | // Only for frames with EH, which means FP-based frames * |-----------------------| * ~ possible double align ~ * |-----------------------| * | security object | * |-----------------------| * | ParamTypeArg | * |-----------------------| * | possible GS cookie | * |-----------------------| * | Variables | * |-----------------------| * | possible GS cookie | * |-----------------------| * | Temps | * |-----------------------| * | Stub Argument Var | * |-----------------------| * |Inlined PInvoke Frame V| * |-----------------------| * ~ possible double align ~ * |-----------------------| * | localloc | * |-----------------------| * | Arguments for the | * ~ next function ~ * | | * |-----------------------| <---- Ambient SP * | | | * ~ | Stack grows ~ * | | downward | * V * * * The frame is laid out as follows for ARM64 (this is a general picture; details may differ for different conditions): * NOTE: SP must be 16-byte aligned, so there may be alignment slots in the frame. * We will often save and establish a frame pointer to create better ETW stack walks. * * SP frames * | | * |-----------------------| * | incoming | * | arguments | * +=======================+ <---- Caller's SP * | homed | // this is only needed if reg argument need to be homed, e.g., for varargs * | register arguments | * |-----------------------| <---- Virtual '0' * |Callee saved registers | * | except fp/lr | * |-----------------------| * | security object | * |-----------------------| * | ParamTypeArg | * |-----------------------| * | possible GS cookie | * |-----------------------| * | Variables | * |-----------------------| * | possible GS cookie | * |-----------------------| * | Temps | * |-----------------------| * | Stub Argument Var | * |-----------------------| * |Inlined PInvoke Frame V| * |-----------------------| * | Saved LR | * |-----------------------| * | Saved FP | <---- Frame pointer * |-----------------------| * | Stack arguments for | * | the next function | * |-----------------------| <---- SP * | | | * ~ | Stack grows ~ * | | downward | * V * * * FP (R29 / x29) frames * | | * |-----------------------| * | incoming | * | arguments | * +=======================+ <---- Caller's SP * | optional homed | // this is only needed if reg argument need to be homed, e.g., for varargs * | register arguments | * |-----------------------| <---- Virtual '0' * |Callee saved registers | * | except fp/lr | * |-----------------------| * | PSPSym | // Only for frames with EH, which requires FP-based frames * |-----------------------| * | security object | * |-----------------------| * | ParamTypeArg | * |-----------------------| * | possible GS cookie | * |-----------------------| * | Variables | * |-----------------------| * | possible GS cookie | * |-----------------------| * | Temps | * |-----------------------| * | Stub Argument Var | * |-----------------------| * |Inlined PInvoke Frame V| * |-----------------------| * | Saved LR | * |-----------------------| * | Saved FP | <---- Frame pointer * |-----------------------| * ~ localloc ~ * |-----------------------| * | Stack arguments for | * | the next function | * |-----------------------| <---- Ambient SP * | | | * ~ | Stack grows ~ * | | downward | * V * * * FP (R29 / x29) frames where FP/LR are stored at the top of the frame (frames requiring GS that have localloc) * | | * |-----------------------| * | incoming | * | arguments | * +=======================+ <---- Caller's SP * | optional homed | // this is only needed if reg argument need to be homed, e.g., for varargs * | register arguments | * |-----------------------| <---- Virtual '0' * | Saved LR | * |-----------------------| * | Saved FP | <---- Frame pointer * |-----------------------| * |Callee saved registers | * |-----------------------| * | PSPSym | // Only for frames with EH, which requires FP-based frames * |-----------------------| * | security object | * |-----------------------| * | ParamTypeArg | * |-----------------------| * | possible GS cookie | * |-----------------------| * | Variables | * |-----------------------| * | possible GS cookie | * |-----------------------| * | Temps | * |-----------------------| * | Stub Argument Var | * |-----------------------| * |Inlined PInvoke Frame V| * |-----------------------| * ~ localloc ~ * |-----------------------| * | Stack arguments for | * | the next function | * |-----------------------| <---- Ambient SP * | | | * ~ | Stack grows ~ * | | downward | * V * * * Doing this all in one pass is 'hard'. So instead we do it in 2 basic passes: * 1. Assign all the offsets relative to the Virtual '0'. Offsets above (the * incoming arguments) are positive. Offsets below (everything else) are * negative. This pass also calcuates the total frame size (between Caller's * SP/return address and the Ambient SP). * 2. Figure out where to place the frame pointer, and then adjust the offsets * as needed for the final stack size and whether the offset is frame pointer * relative or stack pointer relative. * */ // clang-format on void Compiler::lvaAssignFrameOffsets(FrameLayoutState curState) { noway_assert((lvaDoneFrameLayout < curState) || (curState == REGALLOC_FRAME_LAYOUT)); lvaDoneFrameLayout = curState; #ifdef DEBUG if (verbose) { printf("*************** In lvaAssignFrameOffsets"); if (curState == INITIAL_FRAME_LAYOUT) { printf("(INITIAL_FRAME_LAYOUT)"); } else if (curState == PRE_REGALLOC_FRAME_LAYOUT) { printf("(PRE_REGALLOC_FRAME_LAYOUT)"); } else if (curState == REGALLOC_FRAME_LAYOUT) { printf("(REGALLOC_FRAME_LAYOUT)"); } else if (curState == TENTATIVE_FRAME_LAYOUT) { printf("(TENTATIVE_FRAME_LAYOUT)"); } else if (curState == FINAL_FRAME_LAYOUT) { printf("(FINAL_FRAME_LAYOUT)"); } else { printf("(UNKNOWN)"); unreached(); } printf("\n"); } #endif #if FEATURE_FIXED_OUT_ARGS assert(lvaOutgoingArgSpaceVar != BAD_VAR_NUM); #endif // FEATURE_FIXED_OUT_ARGS /*------------------------------------------------------------------------- * * First process the arguments. * *------------------------------------------------------------------------- */ lvaAssignVirtualFrameOffsetsToArgs(); /*------------------------------------------------------------------------- * * Now compute stack offsets for any variables that don't live in registers * *------------------------------------------------------------------------- */ lvaAssignVirtualFrameOffsetsToLocals(); lvaAlignFrame(); /*------------------------------------------------------------------------- * * Now patch the offsets * *------------------------------------------------------------------------- */ lvaFixVirtualFrameOffsets(); // Modify the stack offset for fields of promoted structs. lvaAssignFrameOffsetsToPromotedStructs(); /*------------------------------------------------------------------------- * * Finalize * *------------------------------------------------------------------------- */ // If it's not the final frame layout, then it's just an estimate. This means // we're allowed to once again write to these variables, even if we've read // from them to make tentative code generation or frame layout decisions. if (curState < FINAL_FRAME_LAYOUT) { codeGen->resetFramePointerUsedWritePhase(); } } /***************************************************************************** * lvaFixVirtualFrameOffsets() : Now that everything has a virtual offset, * determine the final value for the frame pointer (if needed) and then * adjust all the offsets appropriately. * * This routine fixes virtual offset to be relative to frame pointer or SP * based on whether varDsc->lvFramePointerBased is true or false respectively. */ void Compiler::lvaFixVirtualFrameOffsets() { LclVarDsc* varDsc; #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_AMD64) if (lvaPSPSym != BAD_VAR_NUM) { // We need to fix the offset of the PSPSym so there is no padding between it and the outgoing argument space. // Without this code, lvaAlignFrame might have put the padding lower than the PSPSym, which would be between // the PSPSym and the outgoing argument space. varDsc = lvaGetDesc(lvaPSPSym); assert(varDsc->lvFramePointerBased); // We always access it RBP-relative. assert(!varDsc->lvMustInit); // It is never "must init". varDsc->SetStackOffset(codeGen->genCallerSPtoInitialSPdelta() + lvaLclSize(lvaOutgoingArgSpaceVar)); if (opts.IsOSR()) { // With OSR RBP points at the base of the OSR frame, but the virtual offsets // are from the base of the Tier0 frame. Adjust. // varDsc->SetStackOffset(varDsc->GetStackOffset() - info.compPatchpointInfo->TotalFrameSize()); } } #endif // The delta to be added to virtual offset to adjust it relative to frame pointer or SP int delta = 0; #ifdef TARGET_XARCH delta += REGSIZE_BYTES; // pushed PC (return address) for x86/x64 JITDUMP("--- delta bump %d for RA\n", REGSIZE_BYTES); if (codeGen->doubleAlignOrFramePointerUsed()) { JITDUMP("--- delta bump %d for FP\n", REGSIZE_BYTES); delta += REGSIZE_BYTES; // pushed EBP (frame pointer) } #endif if (!codeGen->isFramePointerUsed()) { // pushed registers, return address, and padding JITDUMP("--- delta bump %d for RSP frame\n", codeGen->genTotalFrameSize()); delta += codeGen->genTotalFrameSize(); } #if defined(TARGET_ARM) else { // We set FP to be after LR, FP delta += 2 * REGSIZE_BYTES; } #elif defined(TARGET_AMD64) || defined(TARGET_ARM64) else { // FP is used. JITDUMP("--- delta bump %d for FP frame\n", codeGen->genTotalFrameSize() - codeGen->genSPtoFPdelta()); delta += codeGen->genTotalFrameSize() - codeGen->genSPtoFPdelta(); } #endif // TARGET_AMD64 if (opts.IsOSR()) { #if defined(TARGET_AMD64) || defined(TARGET_ARM64) // Stack offset includes Tier0 frame. // JITDUMP("--- delta bump %d for OSR + Tier0 frame\n", info.compPatchpointInfo->TotalFrameSize()); delta += info.compPatchpointInfo->TotalFrameSize(); #endif } JITDUMP("--- virtual stack offset to actual stack offset delta is %d\n", delta); unsigned lclNum; for (lclNum = 0, varDsc = lvaTable; lclNum < lvaCount; lclNum++, varDsc++) { bool doAssignStkOffs = true; // Can't be relative to EBP unless we have an EBP noway_assert(!varDsc->lvFramePointerBased || codeGen->doubleAlignOrFramePointerUsed()); // Is this a non-param promoted struct field? // if so then set doAssignStkOffs to false. // if (varDsc->lvIsStructField) { LclVarDsc* parentvarDsc = lvaGetDesc(varDsc->lvParentLcl); lvaPromotionType promotionType = lvaGetPromotionType(parentvarDsc); #if defined(TARGET_X86) // On x86, we set the stack offset for a promoted field // to match a struct parameter in lvAssignFrameOffsetsToPromotedStructs. if ((!varDsc->lvIsParam || parentvarDsc->lvIsParam) && promotionType == PROMOTION_TYPE_DEPENDENT) #else if (!varDsc->lvIsParam && promotionType == PROMOTION_TYPE_DEPENDENT) #endif { doAssignStkOffs = false; // Assigned later in lvaAssignFrameOffsetsToPromotedStructs() } } if (!varDsc->lvOnFrame) { if (!varDsc->lvIsParam #if !defined(TARGET_AMD64) || (varDsc->lvIsRegArg #if defined(TARGET_ARM) && defined(PROFILING_SUPPORTED) && compIsProfilerHookNeeded() && !lvaIsPreSpilled(lclNum, codeGen->regSet.rsMaskPreSpillRegs(false)) // We need assign stack offsets // for prespilled arguments #endif ) #endif // !defined(TARGET_AMD64) ) { doAssignStkOffs = false; // Not on frame or an incomming stack arg } } if (doAssignStkOffs) { JITDUMP("-- V%02u was %d, now %d\n", lclNum, varDsc->GetStackOffset(), varDsc->GetStackOffset() + delta); varDsc->SetStackOffset(varDsc->GetStackOffset() + delta); #if DOUBLE_ALIGN if (genDoubleAlign() && !codeGen->isFramePointerUsed()) { if (varDsc->lvFramePointerBased) { varDsc->SetStackOffset(varDsc->GetStackOffset() - delta); // We need to re-adjust the offsets of the parameters so they are EBP // relative rather than stack/frame pointer relative varDsc->SetStackOffset(varDsc->GetStackOffset() + (2 * TARGET_POINTER_SIZE)); // return address and pushed EBP noway_assert(varDsc->GetStackOffset() >= FIRST_ARG_STACK_OFFS); } } #endif // On System V environments the stkOffs could be 0 for params passed in registers. // // For normal methods only EBP relative references can have negative offsets. assert(codeGen->isFramePointerUsed() || varDsc->GetStackOffset() >= 0); } } assert(codeGen->regSet.tmpAllFree()); for (TempDsc* temp = codeGen->regSet.tmpListBeg(); temp != nullptr; temp = codeGen->regSet.tmpListNxt(temp)) { temp->tdAdjustTempOffs(delta); } lvaCachedGenericContextArgOffs += delta; #if FEATURE_FIXED_OUT_ARGS if (lvaOutgoingArgSpaceVar != BAD_VAR_NUM) { varDsc = lvaGetDesc(lvaOutgoingArgSpaceVar); varDsc->SetStackOffset(0); varDsc->lvFramePointerBased = false; varDsc->lvMustInit = false; } #endif // FEATURE_FIXED_OUT_ARGS #ifdef TARGET_ARM64 // We normally add alignment below the locals between them and the outgoing // arg space area. When we store fp/lr at the bottom, however, this will be // below the alignment. So we should not apply the alignment adjustment to // them. On ARM64 it turns out we always store these at +0 and +8 of the FP, // so instead of dealing with skipping adjustment just for them we just set // them here always. assert(codeGen->isFramePointerUsed()); if (lvaRetAddrVar != BAD_VAR_NUM) { lvaTable[lvaRetAddrVar].SetStackOffset(REGSIZE_BYTES); } #endif } #ifdef TARGET_ARM bool Compiler::lvaIsPreSpilled(unsigned lclNum, regMaskTP preSpillMask) { const LclVarDsc& desc = lvaTable[lclNum]; return desc.lvIsRegArg && (preSpillMask & genRegMask(desc.GetArgReg())); } #endif // TARGET_ARM //------------------------------------------------------------------------ // lvaUpdateArgWithInitialReg: Set the initial register of a local variable // to the one assigned by the register allocator. // // Arguments: // varDsc - the local variable descriptor // void Compiler::lvaUpdateArgWithInitialReg(LclVarDsc* varDsc) { noway_assert(varDsc->lvIsParam); if (varDsc->lvIsRegCandidate()) { varDsc->SetRegNum(varDsc->GetArgInitReg()); } } //------------------------------------------------------------------------ // lvaUpdateArgsWithInitialReg() : For each argument variable descriptor, update // its current register with the initial register as assigned by LSRA. // void Compiler::lvaUpdateArgsWithInitialReg() { if (!compLSRADone) { return; } for (unsigned lclNum = 0; lclNum < info.compArgsCount; lclNum++) { LclVarDsc* varDsc = lvaGetDesc(lclNum); if (varDsc->lvPromotedStruct()) { for (unsigned fieldVarNum = varDsc->lvFieldLclStart; fieldVarNum < varDsc->lvFieldLclStart + varDsc->lvFieldCnt; ++fieldVarNum) { LclVarDsc* fieldVarDsc = lvaGetDesc(fieldVarNum); lvaUpdateArgWithInitialReg(fieldVarDsc); } } else { lvaUpdateArgWithInitialReg(varDsc); } } } /***************************************************************************** * lvaAssignVirtualFrameOffsetsToArgs() : Assign virtual stack offsets to the * arguments, and implicit arguments (this ptr, return buffer, generics, * and varargs). */ void Compiler::lvaAssignVirtualFrameOffsetsToArgs() { unsigned lclNum = 0; int argOffs = 0; #ifdef UNIX_AMD64_ABI int callerArgOffset = 0; #endif // UNIX_AMD64_ABI /* Assign stack offsets to arguments (in reverse order of passing). This means that if we pass arguments left->right, we start at the end of the list and work backwards, for right->left we start with the first argument and move forward. This is all relative to our Virtual '0' */ if (info.compArgOrder == Target::ARG_ORDER_L2R) { argOffs = compArgSize; } /* Update the argOffs to reflect arguments that are passed in registers */ noway_assert(codeGen->intRegState.rsCalleeRegArgCount <= MAX_REG_ARG); noway_assert(compMacOsArm64Abi() || compArgSize >= codeGen->intRegState.rsCalleeRegArgCount * REGSIZE_BYTES); if (info.compArgOrder == Target::ARG_ORDER_L2R) { argOffs -= codeGen->intRegState.rsCalleeRegArgCount * REGSIZE_BYTES; } // Update the arg initial register locations. lvaUpdateArgsWithInitialReg(); /* Is there a "this" argument? */ if (!info.compIsStatic) { noway_assert(lclNum == info.compThisArg); #ifndef TARGET_X86 argOffs = lvaAssignVirtualFrameOffsetToArg(lclNum, REGSIZE_BYTES, argOffs UNIX_AMD64_ABI_ONLY_ARG(&callerArgOffset)); #endif // TARGET_X86 lclNum++; } unsigned userArgsToSkip = 0; #if !defined(TARGET_ARM) // In the native instance method calling convention on Windows, // the this parameter comes before the hidden return buffer parameter. // So, we want to process the native "this" parameter before we process // the native return buffer parameter. if (TargetOS::IsWindows && callConvIsInstanceMethodCallConv(info.compCallConv)) { #ifdef TARGET_X86 if (!lvaTable[lclNum].lvIsRegArg) { argOffs = lvaAssignVirtualFrameOffsetToArg(lclNum, REGSIZE_BYTES, argOffs); } #elif !defined(UNIX_AMD64_ABI) argOffs = lvaAssignVirtualFrameOffsetToArg(lclNum, REGSIZE_BYTES, argOffs); #endif // TARGET_X86 lclNum++; userArgsToSkip++; } #endif /* if we have a hidden buffer parameter, that comes here */ if (info.compRetBuffArg != BAD_VAR_NUM) { noway_assert(lclNum == info.compRetBuffArg); argOffs = lvaAssignVirtualFrameOffsetToArg(lclNum, REGSIZE_BYTES, argOffs UNIX_AMD64_ABI_ONLY_ARG(&callerArgOffset)); lclNum++; } #if USER_ARGS_COME_LAST //@GENERICS: extra argument for instantiation info if (info.compMethodInfo->args.callConv & CORINFO_CALLCONV_PARAMTYPE) { noway_assert(lclNum == (unsigned)info.compTypeCtxtArg); argOffs = lvaAssignVirtualFrameOffsetToArg(lclNum++, REGSIZE_BYTES, argOffs UNIX_AMD64_ABI_ONLY_ARG(&callerArgOffset)); } if (info.compIsVarArgs) { argOffs = lvaAssignVirtualFrameOffsetToArg(lclNum++, REGSIZE_BYTES, argOffs UNIX_AMD64_ABI_ONLY_ARG(&callerArgOffset)); } #endif // USER_ARGS_COME_LAST CORINFO_ARG_LIST_HANDLE argLst = info.compMethodInfo->args.args; unsigned argSigLen = info.compMethodInfo->args.numArgs; // Skip any user args that we've already processed. assert(userArgsToSkip <= argSigLen); argSigLen -= userArgsToSkip; for (unsigned i = 0; i < userArgsToSkip; i++, argLst = info.compCompHnd->getArgNext(argLst)) { ; } #ifdef TARGET_ARM // // struct_n { int; int; ... n times }; // // Consider signature: // // Foo (float a,double b,float c,double d,float e,double f,float g,double h, // float i,double j,float k,double l,struct_3 m) { } // // Basically the signature is: (all float regs full, 1 double, struct_3); // // The double argument occurs before pre spill in the argument iteration and // computes an argOffset of 0. struct_3 offset becomes 8. This is wrong. // Because struct_3 is prespilled and double occurs after prespill. // The correct offsets are double = 16 (aligned stk), struct_3 = 0..12, // Offset 12 will be skipped for double alignment of double. // // Another example is (struct_2, all float regs full, double, struct_2); // Here, notice the order is similarly messed up because of 2 pre-spilled // struct_2. // // Succinctly, // ARG_INDEX(i) > ARG_INDEX(j) DOES NOT IMPLY |ARG_OFFSET(i)| > |ARG_OFFSET(j)| // // Therefore, we'll do a two pass offset calculation, one that considers pre-spill // and the next, stack args. // unsigned argLcls = 0; // Take care of pre spill registers first. regMaskTP preSpillMask = codeGen->regSet.rsMaskPreSpillRegs(false); regMaskTP tempMask = RBM_NONE; for (unsigned i = 0, preSpillLclNum = lclNum; i < argSigLen; ++i, ++preSpillLclNum) { if (lvaIsPreSpilled(preSpillLclNum, preSpillMask)) { unsigned argSize = eeGetArgSize(argLst, &info.compMethodInfo->args); argOffs = lvaAssignVirtualFrameOffsetToArg(preSpillLclNum, argSize, argOffs); argLcls++; // Early out if we can. If size is 8 and base reg is 2, then the mask is 0x1100 tempMask |= ((((1 << (roundUp(argSize, TARGET_POINTER_SIZE) / REGSIZE_BYTES))) - 1) << lvaTable[preSpillLclNum].GetArgReg()); if (tempMask == preSpillMask) { // We won't encounter more pre-spilled registers, // so don't bother iterating further. break; } } argLst = info.compCompHnd->getArgNext(argLst); } // Take care of non pre-spilled stack arguments. argLst = info.compMethodInfo->args.args; for (unsigned i = 0, stkLclNum = lclNum; i < argSigLen; ++i, ++stkLclNum) { if (!lvaIsPreSpilled(stkLclNum, preSpillMask)) { const unsigned argSize = eeGetArgSize(argLst, &info.compMethodInfo->args); argOffs = lvaAssignVirtualFrameOffsetToArg(stkLclNum, argSize, argOffs); argLcls++; } argLst = info.compCompHnd->getArgNext(argLst); } lclNum += argLcls; #else // !TARGET_ARM for (unsigned i = 0; i < argSigLen; i++) { unsigned argumentSize = eeGetArgSize(argLst, &info.compMethodInfo->args); assert(compMacOsArm64Abi() || argumentSize % TARGET_POINTER_SIZE == 0); argOffs = lvaAssignVirtualFrameOffsetToArg(lclNum++, argumentSize, argOffs UNIX_AMD64_ABI_ONLY_ARG(&callerArgOffset)); argLst = info.compCompHnd->getArgNext(argLst); } #endif // !TARGET_ARM #if !USER_ARGS_COME_LAST //@GENERICS: extra argument for instantiation info if (info.compMethodInfo->args.callConv & CORINFO_CALLCONV_PARAMTYPE) { noway_assert(lclNum == (unsigned)info.compTypeCtxtArg); argOffs = lvaAssignVirtualFrameOffsetToArg(lclNum++, REGSIZE_BYTES, argOffs UNIX_AMD64_ABI_ONLY_ARG(&callerArgOffset)); } if (info.compIsVarArgs) { argOffs = lvaAssignVirtualFrameOffsetToArg(lclNum++, REGSIZE_BYTES, argOffs UNIX_AMD64_ABI_ONLY_ARG(&callerArgOffset)); } #endif // USER_ARGS_COME_LAST } #ifdef UNIX_AMD64_ABI // // lvaAssignVirtualFrameOffsetToArg() : Assign virtual stack offsets to an // individual argument, and return the offset for the next argument. // Note: This method only calculates the initial offset of the stack passed/spilled arguments // (if any - the RA might decide to spill(home on the stack) register passed arguments, if rarely used.) // The final offset is calculated in lvaFixVirtualFrameOffsets method. It accounts for FP existance, // ret address slot, stack frame padding, alloca instructions, etc. // Note: This is the implementation for UNIX_AMD64 System V platforms. // int Compiler::lvaAssignVirtualFrameOffsetToArg(unsigned lclNum, unsigned argSize, int argOffs UNIX_AMD64_ABI_ONLY_ARG(int* callerArgOffset)) { noway_assert(lclNum < info.compArgsCount); noway_assert(argSize); if (info.compArgOrder == Target::ARG_ORDER_L2R) { argOffs -= argSize; } unsigned fieldVarNum = BAD_VAR_NUM; LclVarDsc* varDsc = lvaGetDesc(lclNum); noway_assert(varDsc->lvIsParam); if (varDsc->lvIsRegArg) { // Argument is passed in a register, don't count it // when updating the current offset on the stack. if (varDsc->lvOnFrame) { // The offset for args needs to be set only for the stack homed arguments for System V. varDsc->SetStackOffset(argOffs); } else { varDsc->SetStackOffset(0); } } else { // For Windows AMD64 there are 4 slots for the register passed arguments on the top of the caller's stack. // This is where they are always homed. So, they can be accessed with positive offset. // On System V platforms, if the RA decides to home a register passed arg on the stack, it creates a stack // location on the callee stack (like any other local var.) In such a case, the register passed, stack homed // arguments are accessed using negative offsets and the stack passed arguments are accessed using positive // offset (from the caller's stack.) // For System V platforms if there is no frame pointer the caller stack parameter offset should include the // callee allocated space. If frame register is used, the callee allocated space should not be included for // accessing the caller stack parameters. The last two requirements are met in lvaFixVirtualFrameOffsets // method, which fixes the offsets, based on frame pointer existence, existence of alloca instructions, ret // address pushed, ets. varDsc->SetStackOffset(*callerArgOffset); // Structs passed on stack could be of size less than TARGET_POINTER_SIZE. // Make sure they get at least TARGET_POINTER_SIZE on the stack - this is required for alignment. if (argSize > TARGET_POINTER_SIZE) { *callerArgOffset += (int)roundUp(argSize, TARGET_POINTER_SIZE); } else { *callerArgOffset += TARGET_POINTER_SIZE; } } // For struct promoted parameters we need to set the offsets for the field lclVars. // // For a promoted struct we also assign the struct fields stack offset if (varDsc->lvPromotedStruct()) { unsigned firstFieldNum = varDsc->lvFieldLclStart; int offset = varDsc->GetStackOffset(); for (unsigned i = 0; i < varDsc->lvFieldCnt; i++) { LclVarDsc* fieldVarDsc = lvaGetDesc(firstFieldNum + i); fieldVarDsc->SetStackOffset(offset + fieldVarDsc->lvFldOffset); } } if (info.compArgOrder == Target::ARG_ORDER_R2L && !varDsc->lvIsRegArg) { argOffs += argSize; } return argOffs; } #else // !UNIX_AMD64_ABI // // lvaAssignVirtualFrameOffsetToArg() : Assign virtual stack offsets to an // individual argument, and return the offset for the next argument. // Note: This method only calculates the initial offset of the stack passed/spilled arguments // (if any - the RA might decide to spill(home on the stack) register passed arguments, if rarely used.) // The final offset is calculated in lvaFixVirtualFrameOffsets method. It accounts for FP existance, // ret address slot, stack frame padding, alloca instructions, etc. // Note: This implementation for all the platforms but UNIX_AMD64 OSs (System V 64 bit.) int Compiler::lvaAssignVirtualFrameOffsetToArg(unsigned lclNum, unsigned argSize, int argOffs UNIX_AMD64_ABI_ONLY_ARG(int* callerArgOffset)) { noway_assert(lclNum < info.compArgsCount); noway_assert(argSize); if (info.compArgOrder == Target::ARG_ORDER_L2R) { argOffs -= argSize; } unsigned fieldVarNum = BAD_VAR_NUM; LclVarDsc* varDsc = lvaGetDesc(lclNum); noway_assert(varDsc->lvIsParam); if (varDsc->lvIsRegArg) { /* Argument is passed in a register, don't count it * when updating the current offset on the stack */ CLANG_FORMAT_COMMENT_ANCHOR; #if !defined(TARGET_ARMARCH) #if DEBUG // TODO: Remove this noway_assert and replace occurrences of TARGET_POINTER_SIZE with argSize // Also investigate why we are incrementing argOffs for X86 as this seems incorrect // noway_assert(argSize == TARGET_POINTER_SIZE); #endif // DEBUG #endif #if defined(TARGET_X86) argOffs += TARGET_POINTER_SIZE; #elif defined(TARGET_AMD64) // Register arguments on AMD64 also takes stack space. (in the backing store) varDsc->SetStackOffset(argOffs); argOffs += TARGET_POINTER_SIZE; #elif defined(TARGET_ARM64) // Register arguments on ARM64 only take stack space when they have a frame home. // Unless on windows and in a vararg method. if (compFeatureArgSplit() && this->info.compIsVarArgs) { if (varDsc->lvType == TYP_STRUCT && varDsc->GetOtherArgReg() >= MAX_REG_ARG && varDsc->GetOtherArgReg() != REG_NA) { // This is a split struct. It will account for an extra (8 bytes) // of alignment. varDsc->SetStackOffset(varDsc->GetStackOffset() + TARGET_POINTER_SIZE); argOffs += TARGET_POINTER_SIZE; } } #elif defined(TARGET_ARM) // On ARM we spill the registers in codeGen->regSet.rsMaskPreSpillRegArg // in the prolog, so we have to do SetStackOffset() here // regMaskTP regMask = genRegMask(varDsc->GetArgReg()); if (codeGen->regSet.rsMaskPreSpillRegArg & regMask) { // Signature: void foo(struct_8, int, struct_4) // ------- CALLER SP ------- // r3 struct_4 // r2 int - not prespilled, but added for alignment. argOffs should skip this. // r1 struct_8 // r0 struct_8 // ------------------------- // If we added alignment we need to fix argOffs for all registers above alignment. if (codeGen->regSet.rsMaskPreSpillAlign != RBM_NONE) { assert(genCountBits(codeGen->regSet.rsMaskPreSpillAlign) == 1); // Is register beyond the alignment pos? if (regMask > codeGen->regSet.rsMaskPreSpillAlign) { // Increment argOffs just once for the _first_ register after alignment pos // in the prespill mask. if (!BitsBetween(codeGen->regSet.rsMaskPreSpillRegArg, regMask, codeGen->regSet.rsMaskPreSpillAlign)) { argOffs += TARGET_POINTER_SIZE; } } } switch (varDsc->lvType) { case TYP_STRUCT: if (!varDsc->lvStructDoubleAlign) { break; } FALLTHROUGH; case TYP_DOUBLE: case TYP_LONG: { // // Let's assign offsets to arg1, a double in r2. argOffs has to be 4 not 8. // // ------- CALLER SP ------- // r3 // r2 double -- argOffs = 4, but it doesn't need to be skipped, because there is no skipping. // r1 VACookie -- argOffs = 0 // ------------------------- // // Consider argOffs as if it accounts for number of prespilled registers before the current // register. In the above example, for r2, it is r1 that is prespilled, but since r1 is // accounted for by argOffs being 4, there should have been no skipping. Instead, if we didn't // assign r1 to any variable, then argOffs would still be 0 which implies it is not accounting // for r1, equivalently r1 is skipped. // // If prevRegsSize is unaccounted for by a corresponding argOffs, we must have skipped a register. int prevRegsSize = genCountBits(codeGen->regSet.rsMaskPreSpillRegArg & (regMask - 1)) * TARGET_POINTER_SIZE; if (argOffs < prevRegsSize) { // We must align up the argOffset to a multiple of 8 to account for skipped registers. argOffs = roundUp((unsigned)argOffs, 2 * TARGET_POINTER_SIZE); } // We should've skipped only a single register. assert(argOffs == prevRegsSize); } break; default: // No alignment of argOffs required break; } varDsc->SetStackOffset(argOffs); argOffs += argSize; } #else // TARGET* #error Unsupported or unset target architecture #endif // TARGET* } else { #if defined(TARGET_ARM) // Dev11 Bug 42817: incorrect codegen for DrawFlatCheckBox causes A/V in WinForms // // Here we have method with a signature (int a1, struct a2, struct a3, int a4, int a5). // Struct parameter 'a2' is 16-bytes with no alignment requirements; // it uses r1,r2,r3 and [OutArg+0] when passed. // Struct parameter 'a3' is 16-bytes that is required to be double aligned; // the caller skips [OutArg+4] and starts the argument at [OutArg+8]. // Thus the caller generates the correct code to pass the arguments. // When generating code to receive the arguments we set codeGen->regSet.rsMaskPreSpillRegArg to [r1,r2,r3] // and spill these three registers as the first instruction in the prolog. // Then when we layout the arguments' stack offsets we have an argOffs 0 which // points at the location that we spilled r1 into the stack. For this first // struct we take the lvIsRegArg path above with "codeGen->regSet.rsMaskPreSpillRegArg &" matching. // Next when we calculate the argOffs for the second 16-byte struct we have an argOffs // of 16, which appears to be aligned properly so we don't skip a stack slot. // // To fix this we must recover the actual OutArg offset by subtracting off the // sizeof of the PreSpill register args. // Then we align this offset to a multiple of 8 and add back the sizeof // of the PreSpill register args. // // Dev11 Bug 71767: failure of assert(sizeofPreSpillRegArgs <= argOffs) // // We have a method with 'this' passed in r0, RetBuf arg in r1, VarArgs cookie // in r2. The first user arg is a 144 byte struct with double alignment required, // r3 is skipped, and the struct is passed on the stack. However, 'r3' is added // to the codeGen->regSet.rsMaskPreSpillRegArg mask by the VarArgs cookie code, since we need to // home all the potential varargs arguments in registers, even if we don't have // signature type information for the variadic arguments. However, due to alignment, // we have skipped a register that doesn't have a corresponding symbol. Make up // for that by increasing argOffs here. // int sizeofPreSpillRegArgs = genCountBits(codeGen->regSet.rsMaskPreSpillRegs(true)) * REGSIZE_BYTES; if (argOffs < sizeofPreSpillRegArgs) { // This can only happen if we skipped the last register spot because current stk arg // is a struct requiring alignment or a pre-spill alignment was required because the // first reg arg needed alignment. // // Example 1: First Stk Argument requiring alignment in vararg case (same as above comment.) // Signature (int a0, int a1, int a2, struct {long} a3, ...) // // stk arg a3 --> argOffs here will be 12 (r0-r2) but pre-spill will be 16. // ---- Caller SP ---- // r3 --> Stack slot is skipped in this case. // r2 int a2 // r1 int a1 // r0 int a0 // // Example 2: First Reg Argument requiring alignment in no-vararg case. // Signature (struct {long} a0, struct {int} a1, int a2, int a3) // // stk arg --> argOffs here will be 12 {r0-r2} but pre-spill will be 16. // ---- Caller SP ---- // r3 int a2 --> pushed (not pre-spilled) for alignment of a0 by lvaInitUserArgs. // r2 struct { int } a1 // r0-r1 struct { long } a0 CLANG_FORMAT_COMMENT_ANCHOR; #ifdef PROFILING_SUPPORTED // On Arm under profiler, r0-r3 are always prespilled on stack. // It is possible to have methods that accept only HFAs as parameters e.g. Signature(struct hfa1, struct // hfa2), in which case hfa1 and hfa2 will be en-registered in co-processor registers and will have an // argument offset less than size of preSpill. // // For this reason the following conditions are asserted when not under profiler. if (!compIsProfilerHookNeeded()) #endif { bool cond = ((info.compIsVarArgs || opts.compUseSoftFP) && // Does cur stk arg require double alignment? ((varDsc->lvType == TYP_STRUCT && varDsc->lvStructDoubleAlign) || (varDsc->lvType == TYP_DOUBLE) || (varDsc->lvType == TYP_LONG))) || // Did first reg arg require alignment? (codeGen->regSet.rsMaskPreSpillAlign & genRegMask(REG_ARG_LAST)); noway_assert(cond); noway_assert(sizeofPreSpillRegArgs <= argOffs + TARGET_POINTER_SIZE); // at most one register of alignment } argOffs = sizeofPreSpillRegArgs; } noway_assert(argOffs >= sizeofPreSpillRegArgs); int argOffsWithoutPreSpillRegArgs = argOffs - sizeofPreSpillRegArgs; switch (varDsc->lvType) { case TYP_STRUCT: if (!varDsc->lvStructDoubleAlign) break; FALLTHROUGH; case TYP_DOUBLE: case TYP_LONG: // We must align up the argOffset to a multiple of 8 argOffs = roundUp((unsigned)argOffsWithoutPreSpillRegArgs, 2 * TARGET_POINTER_SIZE) + sizeofPreSpillRegArgs; break; default: // No alignment of argOffs required break; } #endif // TARGET_ARM const bool isFloatHfa = (varDsc->lvIsHfa() && (varDsc->GetHfaType() == TYP_FLOAT)); const unsigned argAlignment = eeGetArgSizeAlignment(varDsc->lvType, isFloatHfa); if (compMacOsArm64Abi()) { argOffs = roundUp(argOffs, argAlignment); } assert((argSize % argAlignment) == 0); assert((argOffs % argAlignment) == 0); varDsc->SetStackOffset(argOffs); } // For struct promoted parameters we need to set the offsets for both LclVars. // // For a dependent promoted struct we also assign the struct fields stack offset CLANG_FORMAT_COMMENT_ANCHOR; #if !defined(TARGET_64BIT) if ((varDsc->TypeGet() == TYP_LONG) && varDsc->lvPromoted) { noway_assert(varDsc->lvFieldCnt == 2); fieldVarNum = varDsc->lvFieldLclStart; lvaTable[fieldVarNum].SetStackOffset(varDsc->GetStackOffset()); lvaTable[fieldVarNum + 1].SetStackOffset(varDsc->GetStackOffset() + genTypeSize(TYP_INT)); } else #endif // !defined(TARGET_64BIT) if (varDsc->lvPromotedStruct()) { unsigned firstFieldNum = varDsc->lvFieldLclStart; for (unsigned i = 0; i < varDsc->lvFieldCnt; i++) { LclVarDsc* fieldVarDsc = lvaGetDesc(firstFieldNum + i); JITDUMP("Adjusting offset of dependent V%02u of arg V%02u: parent %u field %u net %u\n", lclNum, firstFieldNum + i, varDsc->GetStackOffset(), fieldVarDsc->lvFldOffset, varDsc->GetStackOffset() + fieldVarDsc->lvFldOffset); fieldVarDsc->SetStackOffset(varDsc->GetStackOffset() + fieldVarDsc->lvFldOffset); } } if (info.compArgOrder == Target::ARG_ORDER_R2L && !varDsc->lvIsRegArg) { argOffs += argSize; } return argOffs; } #endif // !UNIX_AMD64_ABI //----------------------------------------------------------------------------- // lvaAssingVirtualFrameOffsetsToLocals: compute the virtual stack offsets for // all elements on the stackframe. // // Notes: // Can be called multiple times. Early calls can be used to estimate various // frame offsets, but details may change. // void Compiler::lvaAssignVirtualFrameOffsetsToLocals() { // (1) Account for things that are set up by the prolog and undone by the epilog. // int stkOffs = 0; int originalFrameStkOffs = 0; int originalFrameSize = 0; // codeGen->isFramePointerUsed is set in regalloc phase. Initialize it to a guess for pre-regalloc layout. if (lvaDoneFrameLayout <= PRE_REGALLOC_FRAME_LAYOUT) { codeGen->setFramePointerUsed(codeGen->isFramePointerRequired()); } #ifdef TARGET_ARM64 // Decide where to save FP and LR registers. We store FP/LR registers at the bottom of the frame if there is // a frame pointer used (so we get positive offsets from the frame pointer to access locals), but not if we // need a GS cookie AND localloc is used, since we need the GS cookie to protect the saved return value, // and also the saved frame pointer. See CodeGen::genPushCalleeSavedRegisters() for more details about the // frame types. Since saving FP/LR at high addresses is a relatively rare case, force using it during stress. // (It should be legal to use these frame types for every frame). if (opts.compJitSaveFpLrWithCalleeSavedRegisters == 0) { // Default configuration codeGen->SetSaveFpLrWithAllCalleeSavedRegisters((getNeedsGSSecurityCookie() && compLocallocUsed) || compStressCompile(STRESS_GENERIC_VARN, 20)); } else if (opts.compJitSaveFpLrWithCalleeSavedRegisters == 1) { codeGen->SetSaveFpLrWithAllCalleeSavedRegisters(false); // Disable using new frames } else if (opts.compJitSaveFpLrWithCalleeSavedRegisters == 2) { codeGen->SetSaveFpLrWithAllCalleeSavedRegisters(true); // Force using new frames } #endif // TARGET_ARM64 #ifdef TARGET_XARCH // On x86/amd64, the return address has already been pushed by the call instruction in the caller. stkOffs -= TARGET_POINTER_SIZE; // return address; if (lvaRetAddrVar != BAD_VAR_NUM) { lvaTable[lvaRetAddrVar].SetStackOffset(stkOffs); } #endif // If we are an OSR method, we "inherit" the frame of the original method // if (opts.IsOSR()) { originalFrameSize = info.compPatchpointInfo->TotalFrameSize(); originalFrameStkOffs = stkOffs; stkOffs -= originalFrameSize; } #ifdef TARGET_XARCH // TODO-AMD64-CQ: for X64 eventually this should be pushed with all the other // calleeregs. When you fix this, you'll also need to fix // the assert at the bottom of this method if (codeGen->doubleAlignOrFramePointerUsed()) { stkOffs -= REGSIZE_BYTES; } #endif int preSpillSize = 0; bool mustDoubleAlign = false; #ifdef TARGET_ARM mustDoubleAlign = true; preSpillSize = genCountBits(codeGen->regSet.rsMaskPreSpillRegs(true)) * REGSIZE_BYTES; #else // !TARGET_ARM #if DOUBLE_ALIGN if (genDoubleAlign()) { mustDoubleAlign = true; // X86 only } #endif #endif // !TARGET_ARM #ifdef TARGET_ARM64 // If the frame pointer is used, then we'll save FP/LR at the bottom of the stack. // Otherwise, we won't store FP, and we'll store LR at the top, with the other callee-save // registers (if any). int initialStkOffs = 0; if (info.compIsVarArgs) { // For varargs we always save all of the integer register arguments // so that they are contiguous with the incoming stack arguments. initialStkOffs = MAX_REG_ARG * REGSIZE_BYTES; stkOffs -= initialStkOffs; } if (codeGen->IsSaveFpLrWithAllCalleeSavedRegisters() || !isFramePointerUsed()) // Note that currently we always have a frame pointer { stkOffs -= compCalleeRegsPushed * REGSIZE_BYTES; } else { // Subtract off FP and LR. assert(compCalleeRegsPushed >= 2); stkOffs -= (compCalleeRegsPushed - 2) * REGSIZE_BYTES; } #else // !TARGET_ARM64 #ifdef TARGET_ARM // On ARM32 LR is part of the pushed registers and is always stored at the // top. if (lvaRetAddrVar != BAD_VAR_NUM) { lvaTable[lvaRetAddrVar].SetStackOffset(stkOffs - REGSIZE_BYTES); } #endif stkOffs -= compCalleeRegsPushed * REGSIZE_BYTES; #endif // !TARGET_ARM64 // (2) Account for the remainder of the frame // // From this point on the code must generally adjust both // stkOffs and the local frame size. The latter is done via: // // lvaIncrementFrameSize -- for space not associated with a local var // lvaAllocLocalAndSetVirtualOffset -- for space associated with a local var // // One exception to the above: OSR locals that have offsets within the Tier0 // portion of the frame. // compLclFrameSize = 0; #ifdef TARGET_AMD64 // For methods with patchpoints, the Tier0 method must reserve // space for all the callee saves, as this area is shared with the // OSR method, and we have to anticipate that collectively the // Tier0 and OSR methods end up saving all callee saves. // // Currently this is x64 only. // if (doesMethodHavePatchpoints() || doesMethodHavePartialCompilationPatchpoints()) { const unsigned regsPushed = compCalleeRegsPushed + (codeGen->isFramePointerUsed() ? 1 : 0); const unsigned extraSlots = genCountBits(RBM_OSR_INT_CALLEE_SAVED) - regsPushed; const unsigned extraSlotSize = extraSlots * REGSIZE_BYTES; JITDUMP("\nMethod has patchpoints and has %u callee saves.\n" "Reserving %u extra slots (%u bytes) for potential OSR method callee saves\n", regsPushed, extraSlots, extraSlotSize); stkOffs -= extraSlotSize; lvaIncrementFrameSize(extraSlotSize); } // In case of Amd64 compCalleeRegsPushed does not include float regs (Xmm6-xmm15) that // need to be pushed. But Amd64 doesn't support push/pop of xmm registers. // Instead we need to allocate space for them on the stack and save them in prolog. // Therefore, we consider xmm registers being saved while computing stack offsets // but space for xmm registers is considered part of compLclFrameSize. // Notes // 1) We need to save the entire 128-bits of xmm register to stack, since amd64 // prolog unwind codes allow encoding of an instruction that stores the entire xmm reg // at an offset relative to SP // 2) We adjust frame size so that SP is aligned at 16-bytes after pushing integer registers. // This means while saving the first xmm register to its allocated stack location we might // have to skip 8-bytes. The reason for padding is to use efficient "movaps" to save/restore // xmm registers to/from stack to match Jit64 codegen. Without the aligning on 16-byte // boundary we would have to use movups when offset turns out unaligned. Movaps is more // performant than movups. const unsigned calleeFPRegsSavedSize = genCountBits(compCalleeFPRegsSavedMask) * XMM_REGSIZE_BYTES; // For OSR the alignment pad computation should not take the original frame into account. // Original frame size includes the pseudo-saved RA and so is always = 8 mod 16. const int offsetForAlign = -(stkOffs + originalFrameSize); if ((calleeFPRegsSavedSize > 0) && ((offsetForAlign % XMM_REGSIZE_BYTES) != 0)) { // Take care of alignment int alignPad = (int)AlignmentPad((unsigned)offsetForAlign, XMM_REGSIZE_BYTES); assert(alignPad != 0); stkOffs -= alignPad; lvaIncrementFrameSize(alignPad); } stkOffs -= calleeFPRegsSavedSize; lvaIncrementFrameSize(calleeFPRegsSavedSize); // Quirk for VS debug-launch scenario to work if (compVSQuirkStackPaddingNeeded > 0) { #ifdef DEBUG if (verbose) { printf("\nAdding VS quirk stack padding of %d bytes between save-reg area and locals\n", compVSQuirkStackPaddingNeeded); } #endif // DEBUG stkOffs -= compVSQuirkStackPaddingNeeded; lvaIncrementFrameSize(compVSQuirkStackPaddingNeeded); } #endif // TARGET_AMD64 #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARMARCH) if (lvaPSPSym != BAD_VAR_NUM) { // On ARM/ARM64, if we need a PSPSym, allocate it first, before anything else, including // padding (so we can avoid computing the same padding in the funclet // frame). Note that there is no special padding requirement for the PSPSym. noway_assert(codeGen->isFramePointerUsed()); // We need an explicit frame pointer stkOffs = lvaAllocLocalAndSetVirtualOffset(lvaPSPSym, TARGET_POINTER_SIZE, stkOffs); } #endif // FEATURE_EH_FUNCLETS && defined(TARGET_ARMARCH) if (mustDoubleAlign) { if (lvaDoneFrameLayout != FINAL_FRAME_LAYOUT) { // Allocate a pointer sized stack slot, since we may need to double align here // when lvaDoneFrameLayout == FINAL_FRAME_LAYOUT // lvaIncrementFrameSize(TARGET_POINTER_SIZE); stkOffs -= TARGET_POINTER_SIZE; // If we have any TYP_LONG, TYP_DOUBLE or double aligned structs // then we need to allocate a second pointer sized stack slot, // since we may need to double align that LclVar when we see it // in the loop below. We will just always do this so that the // offsets that we calculate for the stack frame will always // be greater (or equal) to what they can be in the final layout. // lvaIncrementFrameSize(TARGET_POINTER_SIZE); stkOffs -= TARGET_POINTER_SIZE; } else // FINAL_FRAME_LAYOUT { if (((stkOffs + preSpillSize) % (2 * TARGET_POINTER_SIZE)) != 0) { lvaIncrementFrameSize(TARGET_POINTER_SIZE); stkOffs -= TARGET_POINTER_SIZE; } // We should now have a double-aligned (stkOffs+preSpillSize) noway_assert(((stkOffs + preSpillSize) % (2 * TARGET_POINTER_SIZE)) == 0); } } if (lvaMonAcquired != BAD_VAR_NUM) { // For OSR we use the flag set up by the original method. // if (opts.IsOSR()) { assert(info.compPatchpointInfo->HasMonitorAcquired()); int originalOffset = info.compPatchpointInfo->MonitorAcquiredOffset(); int offset = originalFrameStkOffs + originalOffset; JITDUMP( "---OSR--- V%02u (on tier0 frame, monitor aquired) tier0 FP-rel offset %d tier0 frame offset %d new " "virt offset %d\n", lvaMonAcquired, originalOffset, originalFrameStkOffs, offset); lvaTable[lvaMonAcquired].SetStackOffset(offset); } else { // This var must go first, in what is called the 'frame header' for EnC so that it is // preserved when remapping occurs. See vm\eetwain.cpp for detailed comment specifying frame // layout requirements for EnC to work. stkOffs = lvaAllocLocalAndSetVirtualOffset(lvaMonAcquired, lvaLclSize(lvaMonAcquired), stkOffs); } } #ifdef JIT32_GCENCODER if (lvaLocAllocSPvar != BAD_VAR_NUM) { noway_assert(codeGen->isFramePointerUsed()); // else offsets of locals of frameless methods will be incorrect stkOffs = lvaAllocLocalAndSetVirtualOffset(lvaLocAllocSPvar, TARGET_POINTER_SIZE, stkOffs); } #endif // JIT32_GCENCODER // For OSR methods, param type args are always reportable via the root method frame slot. // (see gcInfoBlockHdrSave) and so do not need a new slot on the frame. // // OSR methods may also be able to use the root frame kept alive this, if the root // method needed to report this. // // Inlining done under OSR may introduce new reporting, in which case the OSR frame // must allocate a slot. if (lvaReportParamTypeArg()) { #ifdef JIT32_GCENCODER noway_assert(codeGen->isFramePointerUsed()); #endif if (opts.IsOSR()) { PatchpointInfo* ppInfo = info.compPatchpointInfo; assert(ppInfo->HasGenericContextArgOffset()); const int originalOffset = ppInfo->GenericContextArgOffset(); lvaCachedGenericContextArgOffs = originalFrameStkOffs + originalOffset; } else { // For CORINFO_CALLCONV_PARAMTYPE (if needed) lvaIncrementFrameSize(TARGET_POINTER_SIZE); stkOffs -= TARGET_POINTER_SIZE; lvaCachedGenericContextArgOffs = stkOffs; } } #ifndef JIT32_GCENCODER else if (lvaKeepAliveAndReportThis()) { bool canUseExistingSlot = false; if (opts.IsOSR()) { PatchpointInfo* ppInfo = info.compPatchpointInfo; if (ppInfo->HasKeptAliveThis()) { const int originalOffset = ppInfo->KeptAliveThisOffset(); lvaCachedGenericContextArgOffs = originalFrameStkOffs + originalOffset; canUseExistingSlot = true; } } if (!canUseExistingSlot) { // When "this" is also used as generic context arg. lvaIncrementFrameSize(TARGET_POINTER_SIZE); stkOffs -= TARGET_POINTER_SIZE; lvaCachedGenericContextArgOffs = stkOffs; } } #endif #if !defined(FEATURE_EH_FUNCLETS) /* If we need space for slots for shadow SP, reserve it now */ if (ehNeedsShadowSPslots()) { noway_assert(codeGen->isFramePointerUsed()); // else offsets of locals of frameless methods will be incorrect if (!lvaReportParamTypeArg()) { #ifndef JIT32_GCENCODER if (!lvaKeepAliveAndReportThis()) #endif { // In order to keep the gc info encoding smaller, the VM assumes that all methods with EH // have also saved space for a ParamTypeArg, so we need to do that here lvaIncrementFrameSize(TARGET_POINTER_SIZE); stkOffs -= TARGET_POINTER_SIZE; } } stkOffs = lvaAllocLocalAndSetVirtualOffset(lvaShadowSPslotsVar, lvaLclSize(lvaShadowSPslotsVar), stkOffs); } #endif // !FEATURE_EH_FUNCLETS if (compGSReorderStackLayout) { assert(getNeedsGSSecurityCookie()); if (!opts.IsOSR() || !info.compPatchpointInfo->HasSecurityCookie()) { stkOffs = lvaAllocLocalAndSetVirtualOffset(lvaGSSecurityCookie, lvaLclSize(lvaGSSecurityCookie), stkOffs); } } /* If we're supposed to track lifetimes of pointer temps, we'll assign frame offsets in the following order: non-pointer local variables (also untracked pointer variables) pointer local variables pointer temps non-pointer temps */ enum Allocation { ALLOC_NON_PTRS = 0x1, // assign offsets to non-ptr ALLOC_PTRS = 0x2, // Second pass, assign offsets to tracked ptrs ALLOC_UNSAFE_BUFFERS = 0x4, ALLOC_UNSAFE_BUFFERS_WITH_PTRS = 0x8 }; UINT alloc_order[5]; unsigned int cur = 0; if (compGSReorderStackLayout) { noway_assert(getNeedsGSSecurityCookie()); if (codeGen->isFramePointerUsed()) { alloc_order[cur++] = ALLOC_UNSAFE_BUFFERS; alloc_order[cur++] = ALLOC_UNSAFE_BUFFERS_WITH_PTRS; } } bool tempsAllocated = false; if (lvaTempsHaveLargerOffsetThanVars() && !codeGen->isFramePointerUsed()) { // Because we want the temps to have a larger offset than locals // and we're not using a frame pointer, we have to place the temps // above the vars. Otherwise we place them after the vars (at the // bottom of the frame). noway_assert(!tempsAllocated); stkOffs = lvaAllocateTemps(stkOffs, mustDoubleAlign); tempsAllocated = true; } alloc_order[cur++] = ALLOC_NON_PTRS; if (opts.compDbgEnC) { /* We will use just one pass, and assign offsets to all variables */ alloc_order[cur - 1] |= ALLOC_PTRS; noway_assert(compGSReorderStackLayout == false); } else { alloc_order[cur++] = ALLOC_PTRS; } if (!codeGen->isFramePointerUsed() && compGSReorderStackLayout) { alloc_order[cur++] = ALLOC_UNSAFE_BUFFERS_WITH_PTRS; alloc_order[cur++] = ALLOC_UNSAFE_BUFFERS; } alloc_order[cur] = 0; noway_assert(cur < ArrLen(alloc_order)); // Force first pass to happen UINT assignMore = 0xFFFFFFFF; bool have_LclVarDoubleAlign = false; for (cur = 0; alloc_order[cur]; cur++) { if ((assignMore & alloc_order[cur]) == 0) { continue; } assignMore = 0; unsigned lclNum; LclVarDsc* varDsc; for (lclNum = 0, varDsc = lvaTable; lclNum < lvaCount; lclNum++, varDsc++) { /* Ignore field locals of the promotion type PROMOTION_TYPE_FIELD_DEPENDENT. In other words, we will not calculate the "base" address of the struct local if the promotion type is PROMOTION_TYPE_FIELD_DEPENDENT. */ if (lvaIsFieldOfDependentlyPromotedStruct(varDsc)) { continue; } #if FEATURE_FIXED_OUT_ARGS // The scratch mem is used for the outgoing arguments, and it must be absolutely last if (lclNum == lvaOutgoingArgSpaceVar) { continue; } #endif bool allocateOnFrame = varDsc->lvOnFrame; if (varDsc->lvRegister && (lvaDoneFrameLayout == REGALLOC_FRAME_LAYOUT) && ((varDsc->TypeGet() != TYP_LONG) || (varDsc->GetOtherReg() != REG_STK))) { allocateOnFrame = false; } // For OSR args and locals, we use the slots on the original frame. // // Note we must do this even for "non frame" locals, as we sometimes // will refer to their memory homes. if (lvaIsOSRLocal(lclNum)) { if (varDsc->lvIsStructField) { const unsigned parentLclNum = varDsc->lvParentLcl; const int parentOriginalOffset = info.compPatchpointInfo->Offset(parentLclNum); const int offset = originalFrameStkOffs + parentOriginalOffset + varDsc->lvFldOffset; JITDUMP("---OSR--- V%02u (promoted field of V%02u; on tier0 frame) tier0 FP-rel offset %d tier0 " "frame offset %d field offset %d new virt offset " "%d\n", lclNum, parentLclNum, parentOriginalOffset, originalFrameStkOffs, varDsc->lvFldOffset, offset); lvaTable[lclNum].SetStackOffset(offset); } else { // Add frampointer-relative offset of this OSR live local in the original frame // to the offset of original frame in our new frame. const int originalOffset = info.compPatchpointInfo->Offset(lclNum); const int offset = originalFrameStkOffs + originalOffset; JITDUMP( "---OSR--- V%02u (on tier0 frame) tier0 FP-rel offset %d tier0 frame offset %d new virt offset " "%d\n", lclNum, originalOffset, originalFrameStkOffs, offset); lvaTable[lclNum].SetStackOffset(offset); } continue; } /* Ignore variables that are not on the stack frame */ if (!allocateOnFrame) { /* For EnC, all variables have to be allocated space on the stack, even though they may actually be enregistered. This way, the frame layout can be directly inferred from the locals-sig. */ if (!opts.compDbgEnC) { continue; } else if (lclNum >= info.compLocalsCount) { // ignore temps for EnC continue; } } else if (lvaGSSecurityCookie == lclNum && getNeedsGSSecurityCookie()) { // Special case for OSR. If the original method had a cookie, // we use its slot on the original frame. if (opts.IsOSR() && info.compPatchpointInfo->HasSecurityCookie()) { int originalOffset = info.compPatchpointInfo->SecurityCookieOffset(); int offset = originalFrameStkOffs + originalOffset; JITDUMP("---OSR--- V%02u (on tier0 frame, security cookie) tier0 FP-rel offset %d tier0 frame " "offset %d new " "virt offset %d\n", lclNum, originalOffset, originalFrameStkOffs, offset); lvaTable[lclNum].SetStackOffset(offset); } continue; } // These need to be located as the very first variables (highest memory address) // and so they have already been assigned an offset if ( #if defined(FEATURE_EH_FUNCLETS) lclNum == lvaPSPSym || #else lclNum == lvaShadowSPslotsVar || #endif // FEATURE_EH_FUNCLETS #ifdef JIT32_GCENCODER lclNum == lvaLocAllocSPvar || #endif // JIT32_GCENCODER lclNum == lvaRetAddrVar) { assert(varDsc->GetStackOffset() != BAD_STK_OFFS); continue; } if (lclNum == lvaMonAcquired) { continue; } // This should be low on the stack. Hence, it will be assigned later. if (lclNum == lvaStubArgumentVar) { #ifdef JIT32_GCENCODER noway_assert(codeGen->isFramePointerUsed()); #endif continue; } // This should be low on the stack. Hence, it will be assigned later. if (lclNum == lvaInlinedPInvokeFrameVar) { noway_assert(codeGen->isFramePointerUsed()); continue; } if (varDsc->lvIsParam) { #if defined(TARGET_AMD64) && !defined(UNIX_AMD64_ABI) // On Windows AMD64 we can use the caller-reserved stack area that is already setup assert(varDsc->GetStackOffset() != BAD_STK_OFFS); continue; #else // !TARGET_AMD64 // A register argument that is not enregistered ends up as // a local variable which will need stack frame space. // if (!varDsc->lvIsRegArg) { continue; } #ifdef TARGET_ARM64 if (info.compIsVarArgs && varDsc->GetArgReg() != theFixedRetBuffArgNum()) { // Stack offset to varargs (parameters) should point to home area which will be preallocated. const unsigned regArgNum = genMapIntRegNumToRegArgNum(varDsc->GetArgReg()); varDsc->SetStackOffset(-initialStkOffs + regArgNum * REGSIZE_BYTES); continue; } #endif #ifdef TARGET_ARM // On ARM we spill the registers in codeGen->regSet.rsMaskPreSpillRegArg // in the prolog, thus they don't need stack frame space. // if ((codeGen->regSet.rsMaskPreSpillRegs(false) & genRegMask(varDsc->GetArgReg())) != 0) { assert(varDsc->GetStackOffset() != BAD_STK_OFFS); continue; } #endif #endif // !TARGET_AMD64 } /* Make sure the type is appropriate */ if (varDsc->lvIsUnsafeBuffer && compGSReorderStackLayout) { if (varDsc->lvIsPtr) { if ((alloc_order[cur] & ALLOC_UNSAFE_BUFFERS_WITH_PTRS) == 0) { assignMore |= ALLOC_UNSAFE_BUFFERS_WITH_PTRS; continue; } } else { if ((alloc_order[cur] & ALLOC_UNSAFE_BUFFERS) == 0) { assignMore |= ALLOC_UNSAFE_BUFFERS; continue; } } } else if (varTypeIsGC(varDsc->TypeGet()) && varDsc->lvTracked) { if ((alloc_order[cur] & ALLOC_PTRS) == 0) { assignMore |= ALLOC_PTRS; continue; } } else { if ((alloc_order[cur] & ALLOC_NON_PTRS) == 0) { assignMore |= ALLOC_NON_PTRS; continue; } } /* Need to align the offset? */ if (mustDoubleAlign && (varDsc->lvType == TYP_DOUBLE // Align doubles for ARM and x86 #ifdef TARGET_ARM || varDsc->lvType == TYP_LONG // Align longs for ARM #endif #ifndef TARGET_64BIT || varDsc->lvStructDoubleAlign // Align when lvStructDoubleAlign is true #endif // !TARGET_64BIT )) { noway_assert((compLclFrameSize % TARGET_POINTER_SIZE) == 0); if ((lvaDoneFrameLayout != FINAL_FRAME_LAYOUT) && !have_LclVarDoubleAlign) { // If this is the first TYP_LONG, TYP_DOUBLE or double aligned struct // then we have seen in this loop then we allocate a pointer sized // stack slot since we may need to double align this LclVar // when lvaDoneFrameLayout == FINAL_FRAME_LAYOUT // lvaIncrementFrameSize(TARGET_POINTER_SIZE); stkOffs -= TARGET_POINTER_SIZE; } else { if (((stkOffs + preSpillSize) % (2 * TARGET_POINTER_SIZE)) != 0) { lvaIncrementFrameSize(TARGET_POINTER_SIZE); stkOffs -= TARGET_POINTER_SIZE; } // We should now have a double-aligned (stkOffs+preSpillSize) noway_assert(((stkOffs + preSpillSize) % (2 * TARGET_POINTER_SIZE)) == 0); } // Remember that we had to double align a LclVar have_LclVarDoubleAlign = true; } // Reserve the stack space for this variable stkOffs = lvaAllocLocalAndSetVirtualOffset(lclNum, lvaLclSize(lclNum), stkOffs); #ifdef TARGET_ARMARCH // If we have an incoming register argument that has a struct promoted field // then we need to copy the lvStkOff (the stack home) from the reg arg to the field lclvar // if (varDsc->lvIsRegArg && varDsc->lvPromotedStruct()) { unsigned firstFieldNum = varDsc->lvFieldLclStart; for (unsigned i = 0; i < varDsc->lvFieldCnt; i++) { LclVarDsc* fieldVarDsc = lvaGetDesc(firstFieldNum + i); fieldVarDsc->SetStackOffset(varDsc->GetStackOffset() + fieldVarDsc->lvFldOffset); } } #ifdef TARGET_ARM // If we have an incoming register argument that has a promoted long // then we need to copy the lvStkOff (the stack home) from the reg arg to the field lclvar // else if (varDsc->lvIsRegArg && varDsc->lvPromoted) { assert(varTypeIsLong(varDsc) && (varDsc->lvFieldCnt == 2)); unsigned fieldVarNum = varDsc->lvFieldLclStart; lvaTable[fieldVarNum].SetStackOffset(varDsc->GetStackOffset()); lvaTable[fieldVarNum + 1].SetStackOffset(varDsc->GetStackOffset() + 4); } #endif // TARGET_ARM #endif // TARGET_ARM64 } } if (getNeedsGSSecurityCookie() && !compGSReorderStackLayout) { if (!opts.IsOSR() || !info.compPatchpointInfo->HasSecurityCookie()) { // LOCALLOC used, but we have no unsafe buffer. Allocated cookie last, close to localloc buffer. stkOffs = lvaAllocLocalAndSetVirtualOffset(lvaGSSecurityCookie, lvaLclSize(lvaGSSecurityCookie), stkOffs); } } if (tempsAllocated == false) { /*------------------------------------------------------------------------- * * Now the temps * *------------------------------------------------------------------------- */ stkOffs = lvaAllocateTemps(stkOffs, mustDoubleAlign); } /*------------------------------------------------------------------------- * * Now do some final stuff * *------------------------------------------------------------------------- */ // lvaInlinedPInvokeFrameVar and lvaStubArgumentVar need to be assigned last // Important: The stack walker depends on lvaStubArgumentVar immediately // following lvaInlinedPInvokeFrameVar in the frame. if (lvaStubArgumentVar != BAD_VAR_NUM) { #ifdef JIT32_GCENCODER noway_assert(codeGen->isFramePointerUsed()); #endif stkOffs = lvaAllocLocalAndSetVirtualOffset(lvaStubArgumentVar, lvaLclSize(lvaStubArgumentVar), stkOffs); } if (lvaInlinedPInvokeFrameVar != BAD_VAR_NUM) { noway_assert(codeGen->isFramePointerUsed()); stkOffs = lvaAllocLocalAndSetVirtualOffset(lvaInlinedPInvokeFrameVar, lvaLclSize(lvaInlinedPInvokeFrameVar), stkOffs); } if (mustDoubleAlign) { if (lvaDoneFrameLayout != FINAL_FRAME_LAYOUT) { // Allocate a pointer sized stack slot, since we may need to double align here // when lvaDoneFrameLayout == FINAL_FRAME_LAYOUT // lvaIncrementFrameSize(TARGET_POINTER_SIZE); stkOffs -= TARGET_POINTER_SIZE; if (have_LclVarDoubleAlign) { // If we have any TYP_LONG, TYP_DOUBLE or double aligned structs // the we need to allocate a second pointer sized stack slot, // since we may need to double align the last LclVar that we saw // in the loop above. We do this so that the offsets that we // calculate for the stack frame are always greater than they will // be in the final layout. // lvaIncrementFrameSize(TARGET_POINTER_SIZE); stkOffs -= TARGET_POINTER_SIZE; } } else // FINAL_FRAME_LAYOUT { if (((stkOffs + preSpillSize) % (2 * TARGET_POINTER_SIZE)) != 0) { lvaIncrementFrameSize(TARGET_POINTER_SIZE); stkOffs -= TARGET_POINTER_SIZE; } // We should now have a double-aligned (stkOffs+preSpillSize) noway_assert(((stkOffs + preSpillSize) % (2 * TARGET_POINTER_SIZE)) == 0); } } #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_AMD64) if (lvaPSPSym != BAD_VAR_NUM) { // On AMD64, if we need a PSPSym, allocate it last, immediately above the outgoing argument // space. Any padding will be higher on the stack than this // (including the padding added by lvaAlignFrame()). noway_assert(codeGen->isFramePointerUsed()); // We need an explicit frame pointer stkOffs = lvaAllocLocalAndSetVirtualOffset(lvaPSPSym, TARGET_POINTER_SIZE, stkOffs); } #endif // FEATURE_EH_FUNCLETS && defined(TARGET_AMD64) #ifdef TARGET_ARM64 if (!codeGen->IsSaveFpLrWithAllCalleeSavedRegisters() && isFramePointerUsed()) // Note that currently we always have a frame pointer { // Create space for saving FP and LR. stkOffs -= 2 * REGSIZE_BYTES; } #endif // TARGET_ARM64 #if FEATURE_FIXED_OUT_ARGS if (lvaOutgoingArgSpaceSize > 0) { #if defined(TARGET_AMD64) && !defined(UNIX_AMD64_ABI) // No 4 slots for outgoing params on System V. noway_assert(lvaOutgoingArgSpaceSize >= (4 * TARGET_POINTER_SIZE)); #endif noway_assert((lvaOutgoingArgSpaceSize % TARGET_POINTER_SIZE) == 0); // Give it a value so we can avoid asserts in CHK builds. // Since this will always use an SP relative offset of zero // at the end of lvaFixVirtualFrameOffsets, it will be set to absolute '0' stkOffs = lvaAllocLocalAndSetVirtualOffset(lvaOutgoingArgSpaceVar, lvaLclSize(lvaOutgoingArgSpaceVar), stkOffs); } #endif // FEATURE_FIXED_OUT_ARGS // compLclFrameSize equals our negated virtual stack offset minus the pushed registers and return address // and the pushed frame pointer register which for some strange reason isn't part of 'compCalleeRegsPushed'. int pushedCount = compCalleeRegsPushed; #ifdef TARGET_ARM64 if (info.compIsVarArgs) { pushedCount += MAX_REG_ARG; } #endif #ifdef TARGET_XARCH if (codeGen->doubleAlignOrFramePointerUsed()) { pushedCount += 1; // pushed EBP (frame pointer) } pushedCount += 1; // pushed PC (return address) #endif noway_assert(compLclFrameSize + originalFrameSize == (unsigned)-(stkOffs + (pushedCount * (int)TARGET_POINTER_SIZE))); } int Compiler::lvaAllocLocalAndSetVirtualOffset(unsigned lclNum, unsigned size, int stkOffs) { noway_assert(lclNum != BAD_VAR_NUM); #ifdef TARGET_64BIT // Before final frame layout, assume the worst case, that every >=8 byte local will need // maximum padding to be aligned. This is because we generate code based on the stack offset // computed during tentative frame layout. These offsets cannot get bigger during final // frame layout, as that would possibly require different code generation (for example, // using a 4-byte offset instead of a 1-byte offset in an instruction). The offsets can get // smaller. It is possible there is different alignment at the point locals are allocated // between tentative and final frame layout which would introduce padding between locals // and thus increase the offset (from the stack pointer) of one of the locals. Hence the // need to assume the worst alignment before final frame layout. // We could probably improve this by sorting all the objects by alignment, // such that all 8 byte objects are together, 4 byte objects are together, etc., which // would require at most one alignment padding per group. // // TYP_SIMD structs locals have alignment preference given by getSIMDTypeAlignment() for // better performance. if ((size >= 8) && ((lvaDoneFrameLayout != FINAL_FRAME_LAYOUT) || ((stkOffs % 8) != 0) #if defined(FEATURE_SIMD) && ALIGN_SIMD_TYPES || lclVarIsSIMDType(lclNum) #endif )) { // Note that stack offsets are negative or equal to zero assert(stkOffs <= 0); // alignment padding unsigned pad = 0; #if defined(FEATURE_SIMD) && ALIGN_SIMD_TYPES if (lclVarIsSIMDType(lclNum) && !lvaIsImplicitByRefLocal(lclNum)) { int alignment = getSIMDTypeAlignment(lvaTable[lclNum].lvType); if (stkOffs % alignment != 0) { if (lvaDoneFrameLayout != FINAL_FRAME_LAYOUT) { pad = alignment - 1; // Note that all the objects will probably be misaligned, but we'll fix that in final layout. } else { pad = alignment + (stkOffs % alignment); // +1 to +(alignment-1) bytes } } } else #endif // FEATURE_SIMD && ALIGN_SIMD_TYPES { if (lvaDoneFrameLayout != FINAL_FRAME_LAYOUT) { pad = 7; // Note that all the objects will probably be misaligned, but we'll fix that in final layout. } else { pad = 8 + (stkOffs % 8); // +1 to +7 bytes } } // Will the pad ever be anything except 4? Do we put smaller-than-4-sized objects on the stack? lvaIncrementFrameSize(pad); stkOffs -= pad; #ifdef DEBUG if (verbose) { printf("Pad "); gtDispLclVar(lclNum, /*pad*/ false); printf(", size=%d, stkOffs=%c0x%x, pad=%d\n", size, stkOffs < 0 ? '-' : '+', stkOffs < 0 ? -stkOffs : stkOffs, pad); } #endif } #endif // TARGET_64BIT /* Reserve space on the stack by bumping the frame size */ lvaIncrementFrameSize(size); stkOffs -= size; lvaTable[lclNum].SetStackOffset(stkOffs); #ifdef DEBUG if (verbose) { printf("Assign "); gtDispLclVar(lclNum, /*pad*/ false); printf(", size=%d, stkOffs=%c0x%x\n", size, stkOffs < 0 ? '-' : '+', stkOffs < 0 ? -stkOffs : stkOffs); } #endif return stkOffs; } #ifdef TARGET_AMD64 /***************************************************************************** * lvaIsCalleeSavedIntRegCountEven() : returns true if the number of integer registers * pushed onto stack is even including RBP if used as frame pointer * * Note that this excludes return address (PC) pushed by caller. To know whether * the SP offset after pushing integer registers is aligned, we need to take * negation of this routine. */ bool Compiler::lvaIsCalleeSavedIntRegCountEven() { unsigned regsPushed = compCalleeRegsPushed + (codeGen->isFramePointerUsed() ? 1 : 0); return (regsPushed % (16 / REGSIZE_BYTES)) == 0; } #endif // TARGET_AMD64 /***************************************************************************** * lvaAlignFrame() : After allocating everything on the frame, reserve any * extra space needed to keep the frame aligned */ void Compiler::lvaAlignFrame() { #if defined(TARGET_AMD64) // Leaf frames do not need full alignment, but the unwind info is smaller if we // are at least 8 byte aligned (and we assert as much) if ((compLclFrameSize % 8) != 0) { lvaIncrementFrameSize(8 - (compLclFrameSize % 8)); } else if (lvaDoneFrameLayout != FINAL_FRAME_LAYOUT) { // If we are not doing final layout, we don't know the exact value of compLclFrameSize // and thus do not know how much we will need to add in order to be aligned. // We add 8 so compLclFrameSize is still a multiple of 8. lvaIncrementFrameSize(8); } assert((compLclFrameSize % 8) == 0); // Ensure that the stack is always 16-byte aligned by grabbing an unused QWORD // if needed, but off by 8 because of the return value. // And don't forget that compCalleeRegsPused does *not* include RBP if we are // using it as the frame pointer. // bool regPushedCountAligned = lvaIsCalleeSavedIntRegCountEven(); bool lclFrameSizeAligned = (compLclFrameSize % 16) == 0; // If this isn't the final frame layout, assume we have to push an extra QWORD // Just so the offsets are true upper limits. CLANG_FORMAT_COMMENT_ANCHOR; #ifdef UNIX_AMD64_ABI // The compNeedToAlignFrame flag is indicating if there is a need to align the frame. // On AMD64-Windows, if there are calls, 4 slots for the outgoing ars are allocated, except for // FastTailCall. This slots makes the frame size non-zero, so alignment logic will be called. // On AMD64-Unix, there are no such slots. There is a possibility to have calls in the method with frame size of 0. // The frame alignment logic won't kick in. This flags takes care of the AMD64-Unix case by remembering that there // are calls and making sure the frame alignment logic is executed. bool stackNeedsAlignment = (compLclFrameSize != 0 || opts.compNeedToAlignFrame); #else // !UNIX_AMD64_ABI bool stackNeedsAlignment = compLclFrameSize != 0; #endif // !UNIX_AMD64_ABI if ((!codeGen->isFramePointerUsed() && (lvaDoneFrameLayout != FINAL_FRAME_LAYOUT)) || (stackNeedsAlignment && (regPushedCountAligned == lclFrameSizeAligned))) { lvaIncrementFrameSize(REGSIZE_BYTES); } #elif defined(TARGET_ARM64) // The stack on ARM64 must be 16 byte aligned. // First, align up to 8. if ((compLclFrameSize % 8) != 0) { lvaIncrementFrameSize(8 - (compLclFrameSize % 8)); } else if (lvaDoneFrameLayout != FINAL_FRAME_LAYOUT) { // If we are not doing final layout, we don't know the exact value of compLclFrameSize // and thus do not know how much we will need to add in order to be aligned. // We add 8 so compLclFrameSize is still a multiple of 8. lvaIncrementFrameSize(8); } assert((compLclFrameSize % 8) == 0); // Ensure that the stack is always 16-byte aligned by grabbing an unused QWORD // if needed. bool regPushedCountAligned = (compCalleeRegsPushed % (16 / REGSIZE_BYTES)) == 0; bool lclFrameSizeAligned = (compLclFrameSize % 16) == 0; // If this isn't the final frame layout, assume we have to push an extra QWORD // Just so the offsets are true upper limits. if ((lvaDoneFrameLayout != FINAL_FRAME_LAYOUT) || (regPushedCountAligned != lclFrameSizeAligned)) { lvaIncrementFrameSize(REGSIZE_BYTES); } #elif defined(TARGET_ARM) // Ensure that stack offsets will be double-aligned by grabbing an unused DWORD if needed. // bool lclFrameSizeAligned = (compLclFrameSize % sizeof(double)) == 0; bool regPushedCountAligned = ((compCalleeRegsPushed + genCountBits(codeGen->regSet.rsMaskPreSpillRegs(true))) % (sizeof(double) / TARGET_POINTER_SIZE)) == 0; if (regPushedCountAligned != lclFrameSizeAligned) { lvaIncrementFrameSize(TARGET_POINTER_SIZE); } #elif defined(TARGET_X86) #if DOUBLE_ALIGN if (genDoubleAlign()) { // Double Frame Alignment for x86 is handled in Compiler::lvaAssignVirtualFrameOffsetsToLocals() if (compLclFrameSize == 0) { // This can only happen with JitStress=1 or JitDoubleAlign=2 lvaIncrementFrameSize(TARGET_POINTER_SIZE); } } #endif if (STACK_ALIGN > REGSIZE_BYTES) { if (lvaDoneFrameLayout != FINAL_FRAME_LAYOUT) { // If we are not doing final layout, we don't know the exact value of compLclFrameSize // and thus do not know how much we will need to add in order to be aligned. // We add the maximum pad that we could ever have (which is 12) lvaIncrementFrameSize(STACK_ALIGN - REGSIZE_BYTES); } // Align the stack with STACK_ALIGN value. int adjustFrameSize = compLclFrameSize; #if defined(UNIX_X86_ABI) bool isEbpPushed = codeGen->isFramePointerUsed(); #if DOUBLE_ALIGN isEbpPushed |= genDoubleAlign(); #endif // we need to consider spilled register(s) plus return address and/or EBP int adjustCount = compCalleeRegsPushed + 1 + (isEbpPushed ? 1 : 0); adjustFrameSize += (adjustCount * REGSIZE_BYTES) % STACK_ALIGN; #endif if ((adjustFrameSize % STACK_ALIGN) != 0) { lvaIncrementFrameSize(STACK_ALIGN - (adjustFrameSize % STACK_ALIGN)); } } #else NYI("TARGET specific lvaAlignFrame"); #endif // !TARGET_AMD64 } /***************************************************************************** * lvaAssignFrameOffsetsToPromotedStructs() : Assign offsets to fields * within a promoted struct (worker for lvaAssignFrameOffsets). */ void Compiler::lvaAssignFrameOffsetsToPromotedStructs() { LclVarDsc* varDsc = lvaTable; for (unsigned lclNum = 0; lclNum < lvaCount; lclNum++, varDsc++) { // For promoted struct fields that are params, we will // assign their offsets in lvaAssignVirtualFrameOffsetToArg(). // This is not true for the System V systems since there is no // outgoing args space. Assign the dependently promoted fields properly. // CLANG_FORMAT_COMMENT_ANCHOR; #if defined(UNIX_AMD64_ABI) || defined(TARGET_ARM) || defined(TARGET_X86) // ARM: lo/hi parts of a promoted long arg need to be updated. // // For System V platforms there is no outgoing args space. // // For System V and x86, a register passed struct arg is homed on the stack in a separate local var. // The offset of these structs is already calculated in lvaAssignVirtualFrameOffsetToArg methos. // Make sure the code below is not executed for these structs and the offset is not changed. // const bool mustProcessParams = true; #else // OSR must also assign offsets here. // const bool mustProcessParams = opts.IsOSR(); #endif // defined(UNIX_AMD64_ABI) || defined(TARGET_ARM) || defined(TARGET_X86) if (varDsc->lvIsStructField && (!varDsc->lvIsParam || mustProcessParams)) { LclVarDsc* parentvarDsc = lvaGetDesc(varDsc->lvParentLcl); lvaPromotionType promotionType = lvaGetPromotionType(parentvarDsc); if (promotionType == PROMOTION_TYPE_INDEPENDENT) { // The stack offset for these field locals must have been calculated // by the normal frame offset assignment. continue; } else { noway_assert(promotionType == PROMOTION_TYPE_DEPENDENT); noway_assert(varDsc->lvOnFrame); if (parentvarDsc->lvOnFrame) { JITDUMP("Adjusting offset of dependent V%02u of V%02u: parent %u field %u net %u\n", lclNum, varDsc->lvParentLcl, parentvarDsc->GetStackOffset(), varDsc->lvFldOffset, parentvarDsc->GetStackOffset() + varDsc->lvFldOffset); varDsc->SetStackOffset(parentvarDsc->GetStackOffset() + varDsc->lvFldOffset); } else { varDsc->lvOnFrame = false; noway_assert(varDsc->lvRefCnt() == 0); } } } } } /***************************************************************************** * lvaAllocateTemps() : Assign virtual offsets to temps (always negative). */ int Compiler::lvaAllocateTemps(int stkOffs, bool mustDoubleAlign) { unsigned spillTempSize = 0; if (lvaDoneFrameLayout == FINAL_FRAME_LAYOUT) { int preSpillSize = 0; #ifdef TARGET_ARM preSpillSize = genCountBits(codeGen->regSet.rsMaskPreSpillRegs(true)) * TARGET_POINTER_SIZE; #endif /* Allocate temps */ assert(codeGen->regSet.tmpAllFree()); for (TempDsc* temp = codeGen->regSet.tmpListBeg(); temp != nullptr; temp = codeGen->regSet.tmpListNxt(temp)) { var_types tempType = temp->tdTempType(); unsigned size = temp->tdTempSize(); /* Figure out and record the stack offset of the temp */ /* Need to align the offset? */ CLANG_FORMAT_COMMENT_ANCHOR; #ifdef TARGET_64BIT if (varTypeIsGC(tempType) && ((stkOffs % TARGET_POINTER_SIZE) != 0)) { // Calculate 'pad' as the number of bytes to align up 'stkOffs' to be a multiple of TARGET_POINTER_SIZE // In practice this is really just a fancy way of writing 4. (as all stack locations are at least 4-byte // aligned). Note stkOffs is always negative, so (stkOffs % TARGET_POINTER_SIZE) yields a negative // value. // int alignPad = (int)AlignmentPad((unsigned)-stkOffs, TARGET_POINTER_SIZE); spillTempSize += alignPad; lvaIncrementFrameSize(alignPad); stkOffs -= alignPad; noway_assert((stkOffs % TARGET_POINTER_SIZE) == 0); } #endif if (mustDoubleAlign && (tempType == TYP_DOUBLE)) // Align doubles for x86 and ARM { noway_assert((compLclFrameSize % TARGET_POINTER_SIZE) == 0); if (((stkOffs + preSpillSize) % (2 * TARGET_POINTER_SIZE)) != 0) { spillTempSize += TARGET_POINTER_SIZE; lvaIncrementFrameSize(TARGET_POINTER_SIZE); stkOffs -= TARGET_POINTER_SIZE; } // We should now have a double-aligned (stkOffs+preSpillSize) noway_assert(((stkOffs + preSpillSize) % (2 * TARGET_POINTER_SIZE)) == 0); } spillTempSize += size; lvaIncrementFrameSize(size); stkOffs -= size; temp->tdSetTempOffs(stkOffs); } #ifdef TARGET_ARM // Only required for the ARM platform that we have an accurate estimate for the spillTempSize noway_assert(spillTempSize <= lvaGetMaxSpillTempSize()); #endif } else // We haven't run codegen, so there are no Spill temps yet! { unsigned size = lvaGetMaxSpillTempSize(); lvaIncrementFrameSize(size); stkOffs -= size; } return stkOffs; } #ifdef DEBUG /***************************************************************************** * * Dump the register a local is in right now. It is only the current location, since the location changes and it * is updated throughout code generation based on LSRA register assignments. */ void Compiler::lvaDumpRegLocation(unsigned lclNum) { const LclVarDsc* varDsc = lvaGetDesc(lclNum); #ifdef TARGET_ARM if (varDsc->TypeGet() == TYP_DOUBLE) { // The assigned registers are `lvRegNum:RegNext(lvRegNum)` printf("%3s:%-3s ", getRegName(varDsc->GetRegNum()), getRegName(REG_NEXT(varDsc->GetRegNum()))); } else #endif // TARGET_ARM { printf("%3s ", getRegName(varDsc->GetRegNum())); } } /***************************************************************************** * * Dump the frame location assigned to a local. * It's the home location, even though the variable doesn't always live * in its home location. */ void Compiler::lvaDumpFrameLocation(unsigned lclNum) { int offset; regNumber baseReg; #ifdef TARGET_ARM offset = lvaFrameAddress(lclNum, compLocallocUsed, &baseReg, 0, /* isFloatUsage */ false); #else bool EBPbased; offset = lvaFrameAddress(lclNum, &EBPbased); baseReg = EBPbased ? REG_FPBASE : REG_SPBASE; #endif printf("[%2s%1s%02XH] ", getRegName(baseReg), (offset < 0 ? "-" : "+"), (offset < 0 ? -offset : offset)); } /***************************************************************************** * * dump a single lvaTable entry */ void Compiler::lvaDumpEntry(unsigned lclNum, FrameLayoutState curState, size_t refCntWtdWidth) { LclVarDsc* varDsc = lvaGetDesc(lclNum); var_types type = varDsc->TypeGet(); if (curState == INITIAL_FRAME_LAYOUT) { printf("; "); gtDispLclVar(lclNum); printf(" %7s ", varTypeName(type)); gtDispLclVarStructType(lclNum); } else { if (varDsc->lvRefCnt() == 0) { // Print this with a special indicator that the variable is unused. Even though the // variable itself is unused, it might be a struct that is promoted, so seeing it // can be useful when looking at the promoted struct fields. It's also weird to see // missing var numbers if these aren't printed. printf(";* "); } #if FEATURE_FIXED_OUT_ARGS // Since lvaOutgoingArgSpaceSize is a PhasedVar we can't read it for Dumping until // after we set it to something. else if ((lclNum == lvaOutgoingArgSpaceVar) && lvaOutgoingArgSpaceSize.HasFinalValue() && (lvaOutgoingArgSpaceSize == 0)) { // Similar to above; print this anyway. printf(";# "); } #endif // FEATURE_FIXED_OUT_ARGS else { printf("; "); } gtDispLclVar(lclNum); printf("[V%02u", lclNum); if (varDsc->lvTracked) { printf(",T%02u]", varDsc->lvVarIndex); } else { printf(" ]"); } printf(" (%3u,%*s)", varDsc->lvRefCnt(), (int)refCntWtdWidth, refCntWtd2str(varDsc->lvRefCntWtd())); printf(" %7s ", varTypeName(type)); if (genTypeSize(type) == 0) { printf("(%2d) ", lvaLclSize(lclNum)); } else { printf(" -> "); } // The register or stack location field is 11 characters wide. if ((varDsc->lvRefCnt() == 0) && !varDsc->lvImplicitlyReferenced) { printf("zero-ref "); } else if (varDsc->lvRegister != 0) { // It's always a register, and always in the same register. lvaDumpRegLocation(lclNum); } else if (varDsc->lvOnFrame == 0) { printf("registers "); } else { // For RyuJIT backend, it might be in a register part of the time, but it will definitely have a stack home // location. Otherwise, it's always on the stack. if (lvaDoneFrameLayout != NO_FRAME_LAYOUT) { lvaDumpFrameLocation(lclNum); } } } if (varDsc->lvIsHfa()) { printf(" HFA(%s) ", varTypeName(varDsc->GetHfaType())); } if (varDsc->lvDoNotEnregister) { printf(" do-not-enreg["); if (varDsc->IsAddressExposed()) { printf("X"); } if (varTypeIsStruct(varDsc)) { printf("S"); } if (varDsc->GetDoNotEnregReason() == DoNotEnregisterReason::VMNeedsStackAddr) { printf("V"); } if (lvaEnregEHVars && varDsc->lvLiveInOutOfHndlr) { printf("%c", varDsc->lvSingleDefDisqualifyReason); } if (varDsc->GetDoNotEnregReason() == DoNotEnregisterReason::LocalField) { printf("F"); } if (varDsc->GetDoNotEnregReason() == DoNotEnregisterReason::BlockOp) { printf("B"); } if (varDsc->lvIsMultiRegArg) { printf("A"); } if (varDsc->lvIsMultiRegRet) { printf("R"); } #ifdef JIT32_GCENCODER if (varDsc->lvPinned) printf("P"); #endif // JIT32_GCENCODER printf("]"); } if (varDsc->lvIsMultiRegArg) { printf(" multireg-arg"); } if (varDsc->lvIsMultiRegRet) { printf(" multireg-ret"); } if (varDsc->lvMustInit) { printf(" must-init"); } if (varDsc->IsAddressExposed()) { printf(" addr-exposed"); } if (varDsc->lvHasLdAddrOp) { printf(" ld-addr-op"); } if (varDsc->lvVerTypeInfo.IsThisPtr()) { printf(" this"); } if (varDsc->lvPinned) { printf(" pinned"); } if (varDsc->lvStackByref) { printf(" stack-byref"); } if (varDsc->lvClassHnd != NO_CLASS_HANDLE) { printf(" class-hnd"); } if (varDsc->lvClassIsExact) { printf(" exact"); } if (varDsc->lvLiveInOutOfHndlr) { printf(" EH-live"); } if (varDsc->lvSpillAtSingleDef) { printf(" spill-single-def"); } else if (varDsc->lvSingleDefRegCandidate) { printf(" single-def"); } if (lvaIsOSRLocal(lclNum) && varDsc->lvOnFrame) { printf(" tier0-frame"); } #ifndef TARGET_64BIT if (varDsc->lvStructDoubleAlign) printf(" double-align"); #endif // !TARGET_64BIT if (varDsc->lvOverlappingFields) { printf(" overlapping-fields"); } if (compGSReorderStackLayout && !varDsc->lvRegister) { if (varDsc->lvIsPtr) { printf(" ptr"); } if (varDsc->lvIsUnsafeBuffer) { printf(" unsafe-buffer"); } } if (varDsc->lvIsStructField) { LclVarDsc* parentvarDsc = lvaGetDesc(varDsc->lvParentLcl); #if !defined(TARGET_64BIT) if (varTypeIsLong(parentvarDsc)) { bool isLo = (lclNum == parentvarDsc->lvFieldLclStart); printf(" V%02u.%s(offs=0x%02x)", varDsc->lvParentLcl, isLo ? "lo" : "hi", isLo ? 0 : genTypeSize(TYP_INT)); } else #endif // !defined(TARGET_64BIT) { CORINFO_CLASS_HANDLE typeHnd = parentvarDsc->GetStructHnd(); CORINFO_FIELD_HANDLE fldHnd = info.compCompHnd->getFieldInClass(typeHnd, varDsc->lvFldOrdinal); printf(" V%02u.%s(offs=0x%02x)", varDsc->lvParentLcl, eeGetFieldName(fldHnd), varDsc->lvFldOffset); lvaPromotionType promotionType = lvaGetPromotionType(parentvarDsc); switch (promotionType) { case PROMOTION_TYPE_NONE: printf(" P-NONE"); break; case PROMOTION_TYPE_DEPENDENT: printf(" P-DEP"); break; case PROMOTION_TYPE_INDEPENDENT: printf(" P-INDEP"); break; } } } if (varDsc->lvReason != nullptr) { printf(" \"%s\"", varDsc->lvReason); } printf("\n"); } /***************************************************************************** * * dump the lvaTable */ void Compiler::lvaTableDump(FrameLayoutState curState) { if (curState == NO_FRAME_LAYOUT) { curState = lvaDoneFrameLayout; if (curState == NO_FRAME_LAYOUT) { // Still no layout? Could be a bug, but just display the initial layout curState = INITIAL_FRAME_LAYOUT; } } if (curState == INITIAL_FRAME_LAYOUT) { printf("; Initial"); } else if (curState == PRE_REGALLOC_FRAME_LAYOUT) { printf("; Pre-RegAlloc"); } else if (curState == REGALLOC_FRAME_LAYOUT) { printf("; RegAlloc"); } else if (curState == TENTATIVE_FRAME_LAYOUT) { printf("; Tentative"); } else if (curState == FINAL_FRAME_LAYOUT) { printf("; Final"); } else { printf("UNKNOWN FrameLayoutState!"); unreached(); } printf(" local variable assignments\n"); printf(";\n"); unsigned lclNum; LclVarDsc* varDsc; // Figure out some sizes, to help line things up size_t refCntWtdWidth = 6; // Use 6 as the minimum width if (curState != INITIAL_FRAME_LAYOUT) // don't need this info for INITIAL_FRAME_LAYOUT { for (lclNum = 0, varDsc = lvaTable; lclNum < lvaCount; lclNum++, varDsc++) { size_t width = strlen(refCntWtd2str(varDsc->lvRefCntWtd())); if (width > refCntWtdWidth) { refCntWtdWidth = width; } } } // Do the actual output for (lclNum = 0, varDsc = lvaTable; lclNum < lvaCount; lclNum++, varDsc++) { lvaDumpEntry(lclNum, curState, refCntWtdWidth); } //------------------------------------------------------------------------- // Display the code-gen temps assert(codeGen->regSet.tmpAllFree()); for (TempDsc* temp = codeGen->regSet.tmpListBeg(); temp != nullptr; temp = codeGen->regSet.tmpListNxt(temp)) { printf("; TEMP_%02u %26s%*s%7s -> ", -temp->tdTempNum(), " ", refCntWtdWidth, " ", varTypeName(temp->tdTempType())); int offset = temp->tdTempOffs(); printf(" [%2s%1s0x%02X]\n", isFramePointerUsed() ? STR_FPBASE : STR_SPBASE, (offset < 0 ? "-" : "+"), (offset < 0 ? -offset : offset)); } if (curState >= TENTATIVE_FRAME_LAYOUT) { printf(";\n"); printf("; Lcl frame size = %d\n", compLclFrameSize); } } #endif // DEBUG /***************************************************************************** * * Conservatively estimate the layout of the stack frame. * * This function is only used before final frame layout. It conservatively estimates the * number of callee-saved registers that must be saved, then calls lvaAssignFrameOffsets(). * To do final frame layout, the callee-saved registers are known precisely, so * lvaAssignFrameOffsets() is called directly. * * Returns the (conservative, that is, overly large) estimated size of the frame, * including the callee-saved registers. This is only used by the emitter during code * generation when estimating the size of the offset of instructions accessing temps, * and only if temps have a larger offset than variables. */ unsigned Compiler::lvaFrameSize(FrameLayoutState curState) { assert(curState < FINAL_FRAME_LAYOUT); unsigned result; /* Layout the stack frame conservatively. Assume all callee-saved registers are spilled to stack */ compCalleeRegsPushed = CNT_CALLEE_SAVED; #if defined(TARGET_ARMARCH) if (compFloatingPointUsed) compCalleeRegsPushed += CNT_CALLEE_SAVED_FLOAT; compCalleeRegsPushed++; // we always push LR. See genPushCalleeSavedRegisters #elif defined(TARGET_AMD64) if (compFloatingPointUsed) { compCalleeFPRegsSavedMask = RBM_FLT_CALLEE_SAVED; } else { compCalleeFPRegsSavedMask = RBM_NONE; } #endif #if DOUBLE_ALIGN if (genDoubleAlign()) { // X86 only - account for extra 4-byte pad that may be created by "and esp, -8" instruction compCalleeRegsPushed++; } #endif #ifdef TARGET_XARCH // Since FP/EBP is included in the SAVED_REG_MAXSZ we need to // subtract 1 register if codeGen->isFramePointerUsed() is true. if (codeGen->isFramePointerUsed()) { compCalleeRegsPushed--; } #endif lvaAssignFrameOffsets(curState); unsigned calleeSavedRegMaxSz = CALLEE_SAVED_REG_MAXSZ; #if defined(TARGET_ARMARCH) if (compFloatingPointUsed) { calleeSavedRegMaxSz += CALLEE_SAVED_FLOAT_MAXSZ; } calleeSavedRegMaxSz += REGSIZE_BYTES; // we always push LR. See genPushCalleeSavedRegisters #endif result = compLclFrameSize + calleeSavedRegMaxSz; return result; } //------------------------------------------------------------------------ // lvaGetSPRelativeOffset: Given a variable, return the offset of that // variable in the frame from the stack pointer. This number will be positive, // since the stack pointer must be at a lower address than everything on the // stack. // // This can't be called for localloc functions, since the stack pointer // varies, and thus there is no fixed offset to a variable from the stack pointer. // // Arguments: // varNum - the variable number // // Return Value: // The offset. int Compiler::lvaGetSPRelativeOffset(unsigned varNum) { assert(!compLocallocUsed); assert(lvaDoneFrameLayout == FINAL_FRAME_LAYOUT); const LclVarDsc* varDsc = lvaGetDesc(varNum); assert(varDsc->lvOnFrame); int spRelativeOffset; if (varDsc->lvFramePointerBased) { // The stack offset is relative to the frame pointer, so convert it to be // relative to the stack pointer (which makes no sense for localloc functions). spRelativeOffset = varDsc->GetStackOffset() + codeGen->genSPtoFPdelta(); } else { spRelativeOffset = varDsc->GetStackOffset(); } assert(spRelativeOffset >= 0); return spRelativeOffset; } /***************************************************************************** * * Return the caller-SP-relative stack offset of a local/parameter. * Requires the local to be on the stack and frame layout to be complete. */ int Compiler::lvaGetCallerSPRelativeOffset(unsigned varNum) { assert(lvaDoneFrameLayout == FINAL_FRAME_LAYOUT); const LclVarDsc* varDsc = lvaGetDesc(varNum); assert(varDsc->lvOnFrame); return lvaToCallerSPRelativeOffset(varDsc->GetStackOffset(), varDsc->lvFramePointerBased); } //----------------------------------------------------------------------------- // lvaToCallerSPRelativeOffset: translate a frame offset into an offset from // the caller's stack pointer. // // Arguments: // offset - frame offset // isFpBase - if true, offset is from FP, otherwise offset is from SP // forRootFrame - if the current method is an OSR method, adjust the offset // to be relative to the SP for the root method, instead of being relative // to the SP for the OSR method. // // Returins: // suitable offset // int Compiler::lvaToCallerSPRelativeOffset(int offset, bool isFpBased, bool forRootFrame) const { assert(lvaDoneFrameLayout == FINAL_FRAME_LAYOUT); if (isFpBased) { offset += codeGen->genCallerSPtoFPdelta(); } else { offset += codeGen->genCallerSPtoInitialSPdelta(); } #if defined(TARGET_AMD64) || defined(TARGET_ARM64) if (forRootFrame && opts.IsOSR()) { const PatchpointInfo* const ppInfo = info.compPatchpointInfo; #if defined(TARGET_AMD64) // The offset computed above already includes the OSR frame adjustment, plus the // pop of the "pseudo return address" from the OSR frame. // // To get to root method caller-SP, we need to subtract off the tier0 frame // size and the pushed return address and RBP for the tier0 frame (which we know is an // RPB frame). // // ppInfo's TotalFrameSize also accounts for the popped pseudo return address // between the tier0 method frame and the OSR frame. So the net adjustment // is simply TotalFrameSize plus one register. // const int adjustment = ppInfo->TotalFrameSize() + REGSIZE_BYTES; #elif defined(TARGET_ARM64) const int adjustment = ppInfo->TotalFrameSize(); #endif offset -= adjustment; } #else // OSR NYI for other targets. assert(!opts.IsOSR()); #endif return offset; } /***************************************************************************** * * Return the Initial-SP-relative stack offset of a local/parameter. * Requires the local to be on the stack and frame layout to be complete. */ int Compiler::lvaGetInitialSPRelativeOffset(unsigned varNum) { assert(lvaDoneFrameLayout == FINAL_FRAME_LAYOUT); const LclVarDsc* varDsc = lvaGetDesc(varNum); assert(varDsc->lvOnFrame); return lvaToInitialSPRelativeOffset(varDsc->GetStackOffset(), varDsc->lvFramePointerBased); } // Given a local variable offset, and whether that offset is frame-pointer based, return its offset from Initial-SP. // This is used, for example, to figure out the offset of the frame pointer from Initial-SP. int Compiler::lvaToInitialSPRelativeOffset(unsigned offset, bool isFpBased) { assert(lvaDoneFrameLayout == FINAL_FRAME_LAYOUT); #ifdef TARGET_AMD64 if (isFpBased) { // Currently, the frame starts by pushing ebp, ebp points to the saved ebp // (so we have ebp pointer chaining). Add the fixed-size frame size plus the // size of the callee-saved regs (not including ebp itself) to find Initial-SP. assert(codeGen->isFramePointerUsed()); offset += codeGen->genSPtoFPdelta(); } else { // The offset is correct already! } #else // !TARGET_AMD64 NYI("lvaToInitialSPRelativeOffset"); #endif // !TARGET_AMD64 return offset; } /*****************************************************************************/ #ifdef DEBUG /***************************************************************************** * Pick a padding size at "random" for the local. * 0 means that it should not be converted to a GT_LCL_FLD */ static unsigned LCL_FLD_PADDING(unsigned lclNum) { // Convert every 2nd variable if (lclNum % 2) { return 0; } // Pick a padding size at "random" unsigned size = lclNum % 7; return size; } /***************************************************************************** * * Callback for fgWalkAllTreesPre() * Convert as many GT_LCL_VAR's to GT_LCL_FLD's */ /* static */ /* The stress mode does 2 passes. In the first pass we will mark the locals where we CAN't apply the stress mode. In the second pass we will do the appropiate morphing wherever we've not determined we can't do it. */ Compiler::fgWalkResult Compiler::lvaStressLclFldCB(GenTree** pTree, fgWalkData* data) { GenTree* tree = *pTree; genTreeOps oper = tree->OperGet(); GenTree* lcl; switch (oper) { case GT_LCL_VAR: case GT_LCL_VAR_ADDR: lcl = tree; break; case GT_ADDR: if (tree->AsOp()->gtOp1->gtOper != GT_LCL_VAR) { return WALK_CONTINUE; } lcl = tree->AsOp()->gtOp1; break; default: return WALK_CONTINUE; } noway_assert(lcl->OperIs(GT_LCL_VAR, GT_LCL_VAR_ADDR)); Compiler* const pComp = ((lvaStressLclFldArgs*)data->pCallbackData)->m_pCompiler; const bool bFirstPass = ((lvaStressLclFldArgs*)data->pCallbackData)->m_bFirstPass; const unsigned lclNum = lcl->AsLclVarCommon()->GetLclNum(); var_types type = lcl->TypeGet(); LclVarDsc* const varDsc = pComp->lvaGetDesc(lclNum); if (varDsc->lvNoLclFldStress) { // Already determined we can't do anything for this var return WALK_SKIP_SUBTREES; } if (bFirstPass) { // Ignore arguments and temps if (varDsc->lvIsParam || lclNum >= pComp->info.compLocalsCount) { varDsc->lvNoLclFldStress = true; return WALK_SKIP_SUBTREES; } // Ignore OSR locals; if in memory, they will live on the // Tier0 frame and so can't have their storage adjusted. // if (pComp->lvaIsOSRLocal(lclNum)) { varDsc->lvNoLclFldStress = true; return WALK_SKIP_SUBTREES; } // Likewise for Tier0 methods with patchpoints -- // if we modify them we'll misreport their locations in the patchpoint info. // if (pComp->doesMethodHavePatchpoints() || pComp->doesMethodHavePartialCompilationPatchpoints()) { varDsc->lvNoLclFldStress = true; return WALK_SKIP_SUBTREES; } // Fix for lcl_fld stress mode if (varDsc->lvKeepType) { varDsc->lvNoLclFldStress = true; return WALK_SKIP_SUBTREES; } // Can't have GC ptrs in TYP_BLK. if (!varTypeIsArithmetic(type)) { varDsc->lvNoLclFldStress = true; return WALK_SKIP_SUBTREES; } // The noway_assert in the second pass below, requires that these types match, or we have a TYP_BLK // if ((varDsc->lvType != lcl->gtType) && (varDsc->lvType != TYP_BLK)) { varDsc->lvNoLclFldStress = true; return WALK_SKIP_SUBTREES; } // Weed out "small" types like TYP_BYTE as we don't mark the GT_LCL_VAR // node with the accurate small type. If we bash lvaTable[].lvType, // then there will be no indication that it was ever a small type. var_types varType = varDsc->TypeGet(); if (varType != TYP_BLK && genTypeSize(varType) != genTypeSize(genActualType(varType))) { varDsc->lvNoLclFldStress = true; return WALK_SKIP_SUBTREES; } // Offset some of the local variable by a "random" non-zero amount unsigned padding = LCL_FLD_PADDING(lclNum); if (padding == 0) { varDsc->lvNoLclFldStress = true; return WALK_SKIP_SUBTREES; } } else { // Do the morphing noway_assert((varDsc->lvType == lcl->gtType) || (varDsc->lvType == TYP_BLK)); var_types varType = varDsc->TypeGet(); // Calculate padding unsigned padding = LCL_FLD_PADDING(lclNum); #ifdef TARGET_ARMARCH // We need to support alignment requirements to access memory on ARM ARCH unsigned alignment = 1; pComp->codeGen->InferOpSizeAlign(lcl, &alignment); alignment = roundUp(alignment, TARGET_POINTER_SIZE); padding = roundUp(padding, alignment); #endif // TARGET_ARMARCH // Change the variable to a TYP_BLK if (varType != TYP_BLK) { varDsc->lvExactSize = roundUp(padding + pComp->lvaLclSize(lclNum), TARGET_POINTER_SIZE); varDsc->lvType = TYP_BLK; pComp->lvaSetVarAddrExposed(lclNum DEBUGARG(AddressExposedReason::STRESS_LCL_FLD)); } tree->gtFlags |= GTF_GLOB_REF; /* Now morph the tree appropriately */ if (oper == GT_LCL_VAR) { /* Change lclVar(lclNum) to lclFld(lclNum,padding) */ tree->ChangeOper(GT_LCL_FLD); tree->AsLclFld()->SetLclOffs(padding); } else if (oper == GT_LCL_VAR_ADDR) { tree->ChangeOper(GT_LCL_FLD_ADDR); tree->AsLclFld()->SetLclOffs(padding); } else { /* Change addr(lclVar) to addr(lclVar)+padding */ noway_assert(oper == GT_ADDR); GenTree* paddingTree = pComp->gtNewIconNode(padding); GenTree* newAddr = pComp->gtNewOperNode(GT_ADD, tree->gtType, tree, paddingTree); *pTree = newAddr; lcl->gtType = TYP_BLK; } } return WALK_SKIP_SUBTREES; } /*****************************************************************************/ void Compiler::lvaStressLclFld() { if (!compStressCompile(STRESS_LCL_FLDS, 5)) { return; } lvaStressLclFldArgs Args; Args.m_pCompiler = this; Args.m_bFirstPass = true; // Do First pass fgWalkAllTreesPre(lvaStressLclFldCB, &Args); // Second pass Args.m_bFirstPass = false; fgWalkAllTreesPre(lvaStressLclFldCB, &Args); } #endif // DEBUG /***************************************************************************** * * A little routine that displays a local variable bitset. * 'set' is mask of variables that have to be displayed * 'allVars' is the complete set of interesting variables (blank space is * inserted if its corresponding bit is not in 'set'). */ #ifdef DEBUG void Compiler::lvaDispVarSet(VARSET_VALARG_TP set) { VARSET_TP allVars(VarSetOps::MakeEmpty(this)); lvaDispVarSet(set, allVars); } void Compiler::lvaDispVarSet(VARSET_VALARG_TP set, VARSET_VALARG_TP allVars) { printf("{"); bool needSpace = false; for (unsigned index = 0; index < lvaTrackedCount; index++) { if (VarSetOps::IsMember(this, set, index)) { unsigned lclNum; LclVarDsc* varDsc; /* Look for the matching variable */ for (lclNum = 0, varDsc = lvaTable; lclNum < lvaCount; lclNum++, varDsc++) { if ((varDsc->lvVarIndex == index) && varDsc->lvTracked) { break; } } if (needSpace) { printf(" "); } else { needSpace = true; } printf("V%02u", lclNum); } else if (VarSetOps::IsMember(this, allVars, index)) { if (needSpace) { printf(" "); } else { needSpace = true; } printf(" "); } } printf("}"); } #endif // DEBUG
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX LclVarsInfo XX XX XX XX The variables to be used by the code generator. XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ #include "jitpch.h" #ifdef _MSC_VER #pragma hdrstop #endif #include "emit.h" #include "register_arg_convention.h" #include "jitstd/algorithm.h" #include "patchpointinfo.h" /*****************************************************************************/ #ifdef DEBUG #if DOUBLE_ALIGN /* static */ unsigned Compiler::s_lvaDoubleAlignedProcsCount = 0; #endif #endif /*****************************************************************************/ void Compiler::lvaInit() { /* We haven't allocated stack variables yet */ lvaRefCountState = RCS_INVALID; lvaGenericsContextInUse = false; lvaTrackedToVarNumSize = 0; lvaTrackedToVarNum = nullptr; lvaTrackedFixed = false; // false: We can still add new tracked variables lvaDoneFrameLayout = NO_FRAME_LAYOUT; #if !defined(FEATURE_EH_FUNCLETS) lvaShadowSPslotsVar = BAD_VAR_NUM; #endif // !FEATURE_EH_FUNCLETS lvaInlinedPInvokeFrameVar = BAD_VAR_NUM; lvaReversePInvokeFrameVar = BAD_VAR_NUM; #if FEATURE_FIXED_OUT_ARGS lvaPInvokeFrameRegSaveVar = BAD_VAR_NUM; lvaOutgoingArgSpaceVar = BAD_VAR_NUM; lvaOutgoingArgSpaceSize = PhasedVar<unsigned>(); #endif // FEATURE_FIXED_OUT_ARGS #ifdef JIT32_GCENCODER lvaLocAllocSPvar = BAD_VAR_NUM; #endif // JIT32_GCENCODER lvaNewObjArrayArgs = BAD_VAR_NUM; lvaGSSecurityCookie = BAD_VAR_NUM; #ifdef TARGET_X86 lvaVarargsBaseOfStkArgs = BAD_VAR_NUM; #endif // TARGET_X86 lvaVarargsHandleArg = BAD_VAR_NUM; lvaStubArgumentVar = BAD_VAR_NUM; lvaArg0Var = BAD_VAR_NUM; lvaMonAcquired = BAD_VAR_NUM; lvaRetAddrVar = BAD_VAR_NUM; lvaInlineeReturnSpillTemp = BAD_VAR_NUM; gsShadowVarInfo = nullptr; #if defined(FEATURE_EH_FUNCLETS) lvaPSPSym = BAD_VAR_NUM; #endif #if FEATURE_SIMD lvaSIMDInitTempVarNum = BAD_VAR_NUM; #endif // FEATURE_SIMD lvaCurEpoch = 0; structPromotionHelper = new (this, CMK_Generic) StructPromotionHelper(this); } /*****************************************************************************/ void Compiler::lvaInitTypeRef() { /* x86 args look something like this: [this ptr] [hidden return buffer] [declared arguments]* [generic context] [var arg cookie] x64 is closer to the native ABI: [this ptr] [hidden return buffer] [generic context] [var arg cookie] [declared arguments]* (Note: prior to .NET Framework 4.5.1 for Windows 8.1 (but not .NET Framework 4.5.1 "downlevel"), the "hidden return buffer" came before the "this ptr". Now, the "this ptr" comes first. This is different from the C++ order, where the "hidden return buffer" always comes first.) ARM and ARM64 are the same as the current x64 convention: [this ptr] [hidden return buffer] [generic context] [var arg cookie] [declared arguments]* Key difference: The var arg cookie and generic context are swapped with respect to the user arguments */ /* Set compArgsCount and compLocalsCount */ info.compArgsCount = info.compMethodInfo->args.numArgs; // Is there a 'this' pointer if (!info.compIsStatic) { info.compArgsCount++; } else { info.compThisArg = BAD_VAR_NUM; } info.compILargsCount = info.compArgsCount; #ifdef FEATURE_SIMD if (supportSIMDTypes() && (info.compRetNativeType == TYP_STRUCT)) { var_types structType = impNormStructType(info.compMethodInfo->args.retTypeClass); info.compRetType = structType; } #endif // FEATURE_SIMD // Are we returning a struct using a return buffer argument? // const bool hasRetBuffArg = impMethodInfo_hasRetBuffArg(info.compMethodInfo, info.compCallConv); // Possibly change the compRetNativeType from TYP_STRUCT to a "primitive" type // when we are returning a struct by value and it fits in one register // if (!hasRetBuffArg && varTypeIsStruct(info.compRetNativeType)) { CORINFO_CLASS_HANDLE retClsHnd = info.compMethodInfo->args.retTypeClass; Compiler::structPassingKind howToReturnStruct; var_types returnType = getReturnTypeForStruct(retClsHnd, info.compCallConv, &howToReturnStruct); // We can safely widen the return type for enclosed structs. if ((howToReturnStruct == SPK_PrimitiveType) || (howToReturnStruct == SPK_EnclosingType)) { assert(returnType != TYP_UNKNOWN); assert(returnType != TYP_STRUCT); info.compRetNativeType = returnType; // ToDo: Refactor this common code sequence into its own method as it is used 4+ times if ((returnType == TYP_LONG) && (compLongUsed == false)) { compLongUsed = true; } else if (((returnType == TYP_FLOAT) || (returnType == TYP_DOUBLE)) && (compFloatingPointUsed == false)) { compFloatingPointUsed = true; } } } // Do we have a RetBuffArg? if (hasRetBuffArg) { info.compArgsCount++; } else { info.compRetBuffArg = BAD_VAR_NUM; } /* There is a 'hidden' cookie pushed last when the calling convention is varargs */ if (info.compIsVarArgs) { info.compArgsCount++; } // Is there an extra parameter used to pass instantiation info to // shared generic methods and shared generic struct instance methods? if (info.compMethodInfo->args.callConv & CORINFO_CALLCONV_PARAMTYPE) { info.compArgsCount++; } else { info.compTypeCtxtArg = BAD_VAR_NUM; } lvaCount = info.compLocalsCount = info.compArgsCount + info.compMethodInfo->locals.numArgs; info.compILlocalsCount = info.compILargsCount + info.compMethodInfo->locals.numArgs; /* Now allocate the variable descriptor table */ if (compIsForInlining()) { lvaTable = impInlineInfo->InlinerCompiler->lvaTable; lvaCount = impInlineInfo->InlinerCompiler->lvaCount; lvaTableCnt = impInlineInfo->InlinerCompiler->lvaTableCnt; // No more stuff needs to be done. return; } lvaTableCnt = lvaCount * 2; if (lvaTableCnt < 16) { lvaTableCnt = 16; } lvaTable = getAllocator(CMK_LvaTable).allocate<LclVarDsc>(lvaTableCnt); size_t tableSize = lvaTableCnt * sizeof(*lvaTable); memset(lvaTable, 0, tableSize); for (unsigned i = 0; i < lvaTableCnt; i++) { new (&lvaTable[i], jitstd::placement_t()) LclVarDsc(); // call the constructor. } //------------------------------------------------------------------------- // Count the arguments and initialize the respective lvaTable[] entries // // First the implicit arguments //------------------------------------------------------------------------- InitVarDscInfo varDscInfo; #ifdef TARGET_X86 // x86 unmanaged calling conventions limit the number of registers supported // for accepting arguments. As a result, we need to modify the number of registers // when we emit a method with an unmanaged calling convention. switch (info.compCallConv) { case CorInfoCallConvExtension::Thiscall: // In thiscall the this parameter goes into a register. varDscInfo.Init(lvaTable, hasRetBuffArg, 1, 0); break; case CorInfoCallConvExtension::C: case CorInfoCallConvExtension::Stdcall: case CorInfoCallConvExtension::CMemberFunction: case CorInfoCallConvExtension::StdcallMemberFunction: varDscInfo.Init(lvaTable, hasRetBuffArg, 0, 0); break; case CorInfoCallConvExtension::Managed: case CorInfoCallConvExtension::Fastcall: case CorInfoCallConvExtension::FastcallMemberFunction: default: varDscInfo.Init(lvaTable, hasRetBuffArg, MAX_REG_ARG, MAX_FLOAT_REG_ARG); break; } #else varDscInfo.Init(lvaTable, hasRetBuffArg, MAX_REG_ARG, MAX_FLOAT_REG_ARG); #endif lvaInitArgs(&varDscInfo); //------------------------------------------------------------------------- // Finally the local variables //------------------------------------------------------------------------- unsigned varNum = varDscInfo.varNum; LclVarDsc* varDsc = varDscInfo.varDsc; CORINFO_ARG_LIST_HANDLE localsSig = info.compMethodInfo->locals.args; #ifdef TARGET_ARM compHasSplitParam = varDscInfo.hasSplitParam; #endif for (unsigned i = 0; i < info.compMethodInfo->locals.numArgs; i++, varNum++, varDsc++, localsSig = info.compCompHnd->getArgNext(localsSig)) { CORINFO_CLASS_HANDLE typeHnd; CorInfoTypeWithMod corInfoTypeWithMod = info.compCompHnd->getArgType(&info.compMethodInfo->locals, localsSig, &typeHnd); CorInfoType corInfoType = strip(corInfoTypeWithMod); lvaInitVarDsc(varDsc, varNum, corInfoType, typeHnd, localsSig, &info.compMethodInfo->locals); if ((corInfoTypeWithMod & CORINFO_TYPE_MOD_PINNED) != 0) { if ((corInfoType == CORINFO_TYPE_CLASS) || (corInfoType == CORINFO_TYPE_BYREF)) { JITDUMP("Setting lvPinned for V%02u\n", varNum); varDsc->lvPinned = 1; } else { JITDUMP("Ignoring pin for non-GC type V%02u\n", varNum); } } varDsc->lvOnFrame = true; // The final home for this local variable might be our local stack frame if (corInfoType == CORINFO_TYPE_CLASS) { CORINFO_CLASS_HANDLE clsHnd = info.compCompHnd->getArgClass(&info.compMethodInfo->locals, localsSig); lvaSetClass(varNum, clsHnd); } if (opts.IsOSR() && info.compPatchpointInfo->IsExposed(varNum)) { JITDUMP("-- V%02u is OSR exposed\n", varNum); varDsc->lvHasLdAddrOp = 1; // todo: Why does it apply only to non-structs? // if (!varTypeIsStruct(varDsc) && !varTypeIsSIMD(varDsc)) { lvaSetVarAddrExposed(varNum DEBUGARG(AddressExposedReason::OSR_EXPOSED)); } } } if ( // If there already exist unsafe buffers, don't mark more structs as unsafe // as that will cause them to be placed along with the real unsafe buffers, // unnecessarily exposing them to overruns. This can affect GS tests which // intentionally do buffer-overruns. !getNeedsGSSecurityCookie() && // GS checks require the stack to be re-ordered, which can't be done with EnC !opts.compDbgEnC && compStressCompile(STRESS_UNSAFE_BUFFER_CHECKS, 25)) { setNeedsGSSecurityCookie(); compGSReorderStackLayout = true; for (unsigned i = 0; i < lvaCount; i++) { if ((lvaTable[i].lvType == TYP_STRUCT) && compStressCompile(STRESS_GENERIC_VARN, 60)) { lvaTable[i].lvIsUnsafeBuffer = true; } } } if (getNeedsGSSecurityCookie()) { // Ensure that there will be at least one stack variable since // we require that the GSCookie does not have a 0 stack offset. unsigned dummy = lvaGrabTempWithImplicitUse(false DEBUGARG("GSCookie dummy")); LclVarDsc* gsCookieDummy = lvaGetDesc(dummy); gsCookieDummy->lvType = TYP_INT; gsCookieDummy->lvIsTemp = true; // It is not alive at all, set the flag to prevent zero-init. lvaSetVarDoNotEnregister(dummy DEBUGARG(DoNotEnregisterReason::VMNeedsStackAddr)); } // Allocate the lvaOutgoingArgSpaceVar now because we can run into problems in the // emitter when the varNum is greater that 32767 (see emitLclVarAddr::initLclVarAddr) lvaAllocOutgoingArgSpaceVar(); #ifdef DEBUG if (verbose) { lvaTableDump(INITIAL_FRAME_LAYOUT); } #endif } /*****************************************************************************/ void Compiler::lvaInitArgs(InitVarDscInfo* varDscInfo) { compArgSize = 0; #if defined(TARGET_ARM) && defined(PROFILING_SUPPORTED) // Prespill all argument regs on to stack in case of Arm when under profiler. if (compIsProfilerHookNeeded()) { codeGen->regSet.rsMaskPreSpillRegArg |= RBM_ARG_REGS; } #endif //---------------------------------------------------------------------- /* Is there a "this" pointer ? */ lvaInitThisPtr(varDscInfo); unsigned numUserArgsToSkip = 0; unsigned numUserArgs = info.compMethodInfo->args.numArgs; #if !defined(TARGET_ARM) if (TargetOS::IsWindows && callConvIsInstanceMethodCallConv(info.compCallConv)) { // If we are a native instance method, handle the first user arg // (the unmanaged this parameter) and then handle the hidden // return buffer parameter. assert(numUserArgs >= 1); lvaInitUserArgs(varDscInfo, 0, 1); numUserArgsToSkip++; numUserArgs--; lvaInitRetBuffArg(varDscInfo, false); } else #endif { /* If we have a hidden return-buffer parameter, that comes here */ lvaInitRetBuffArg(varDscInfo, true); } //====================================================================== #if USER_ARGS_COME_LAST //@GENERICS: final instantiation-info argument for shared generic methods // and shared generic struct instance methods lvaInitGenericsCtxt(varDscInfo); /* If the method is varargs, process the varargs cookie */ lvaInitVarArgsHandle(varDscInfo); #endif //------------------------------------------------------------------------- // Now walk the function signature for the explicit user arguments //------------------------------------------------------------------------- lvaInitUserArgs(varDscInfo, numUserArgsToSkip, numUserArgs); #if !USER_ARGS_COME_LAST //@GENERICS: final instantiation-info argument for shared generic methods // and shared generic struct instance methods lvaInitGenericsCtxt(varDscInfo); /* If the method is varargs, process the varargs cookie */ lvaInitVarArgsHandle(varDscInfo); #endif //---------------------------------------------------------------------- // We have set info.compArgsCount in compCompile() noway_assert(varDscInfo->varNum == info.compArgsCount); assert(varDscInfo->intRegArgNum <= MAX_REG_ARG); codeGen->intRegState.rsCalleeRegArgCount = varDscInfo->intRegArgNum; codeGen->floatRegState.rsCalleeRegArgCount = varDscInfo->floatRegArgNum; #if FEATURE_FASTTAILCALL // Save the stack usage information // We can get register usage information using codeGen->intRegState and // codeGen->floatRegState info.compArgStackSize = varDscInfo->stackArgSize; #endif // FEATURE_FASTTAILCALL // The total argument size must be aligned. noway_assert((compArgSize % TARGET_POINTER_SIZE) == 0); #ifdef TARGET_X86 /* We can not pass more than 2^16 dwords as arguments as the "ret" instruction can only pop 2^16 arguments. Could be handled correctly but it will be very difficult for fully interruptible code */ if (compArgSize != (size_t)(unsigned short)compArgSize) IMPL_LIMITATION("Too many arguments for the \"ret\" instruction to pop"); #endif } /*****************************************************************************/ void Compiler::lvaInitThisPtr(InitVarDscInfo* varDscInfo) { LclVarDsc* varDsc = varDscInfo->varDsc; if (!info.compIsStatic) { varDsc->lvIsParam = 1; varDsc->lvIsPtr = 1; lvaArg0Var = info.compThisArg = varDscInfo->varNum; noway_assert(info.compThisArg == 0); if (eeIsValueClass(info.compClassHnd)) { varDsc->lvType = TYP_BYREF; #ifdef FEATURE_SIMD if (supportSIMDTypes()) { CorInfoType simdBaseJitType = CORINFO_TYPE_UNDEF; var_types type = impNormStructType(info.compClassHnd, &simdBaseJitType); if (simdBaseJitType != CORINFO_TYPE_UNDEF) { assert(varTypeIsSIMD(type)); varDsc->lvSIMDType = true; varDsc->SetSimdBaseJitType(simdBaseJitType); varDsc->lvExactSize = genTypeSize(type); } } #endif // FEATURE_SIMD } else { varDsc->lvType = TYP_REF; lvaSetClass(varDscInfo->varNum, info.compClassHnd); } varDsc->lvVerTypeInfo = typeInfo(); // Mark the 'this' pointer for the method varDsc->lvVerTypeInfo.SetIsThisPtr(); varDsc->lvIsRegArg = 1; noway_assert(varDscInfo->intRegArgNum == 0); varDsc->SetArgReg(genMapRegArgNumToRegNum(varDscInfo->allocRegArg(TYP_INT), varDsc->TypeGet())); #if FEATURE_MULTIREG_ARGS varDsc->SetOtherArgReg(REG_NA); #endif varDsc->lvOnFrame = true; // The final home for this incoming register might be our local stack frame #ifdef DEBUG if (verbose) { printf("'this' passed in register %s\n", getRegName(varDsc->GetArgReg())); } #endif compArgSize += TARGET_POINTER_SIZE; varDscInfo->varNum++; varDscInfo->varDsc++; } } /*****************************************************************************/ void Compiler::lvaInitRetBuffArg(InitVarDscInfo* varDscInfo, bool useFixedRetBufReg) { LclVarDsc* varDsc = varDscInfo->varDsc; bool hasRetBuffArg = impMethodInfo_hasRetBuffArg(info.compMethodInfo, info.compCallConv); // These two should always match noway_assert(hasRetBuffArg == varDscInfo->hasRetBufArg); if (hasRetBuffArg) { info.compRetBuffArg = varDscInfo->varNum; varDsc->lvType = TYP_BYREF; varDsc->lvIsParam = 1; varDsc->lvIsRegArg = 0; if (useFixedRetBufReg && hasFixedRetBuffReg()) { varDsc->lvIsRegArg = 1; varDsc->SetArgReg(theFixedRetBuffReg()); } else if (varDscInfo->canEnreg(TYP_INT)) { varDsc->lvIsRegArg = 1; unsigned retBuffArgNum = varDscInfo->allocRegArg(TYP_INT); varDsc->SetArgReg(genMapIntRegArgNumToRegNum(retBuffArgNum)); } #if FEATURE_MULTIREG_ARGS varDsc->SetOtherArgReg(REG_NA); #endif varDsc->lvOnFrame = true; // The final home for this incoming register might be our local stack frame #ifdef FEATURE_SIMD if (supportSIMDTypes() && varTypeIsSIMD(info.compRetType)) { varDsc->lvSIMDType = true; CorInfoType simdBaseJitType = getBaseJitTypeAndSizeOfSIMDType(info.compMethodInfo->args.retTypeClass, &varDsc->lvExactSize); varDsc->SetSimdBaseJitType(simdBaseJitType); assert(varDsc->GetSimdBaseType() != TYP_UNKNOWN); } #endif // FEATURE_SIMD assert(!varDsc->lvIsRegArg || isValidIntArgReg(varDsc->GetArgReg())); #ifdef DEBUG if (varDsc->lvIsRegArg && verbose) { printf("'__retBuf' passed in register %s\n", getRegName(varDsc->GetArgReg())); } #endif /* Update the total argument size, count and varDsc */ compArgSize += TARGET_POINTER_SIZE; varDscInfo->varNum++; varDscInfo->varDsc++; } } //----------------------------------------------------------------------------- // lvaInitUserArgs: // Initialize local var descriptions for incoming user arguments // // Arguments: // varDscInfo - the local var descriptions // skipArgs - the number of user args to skip processing. // takeArgs - the number of user args to process (after skipping skipArgs number of args) // void Compiler::lvaInitUserArgs(InitVarDscInfo* varDscInfo, unsigned skipArgs, unsigned takeArgs) { //------------------------------------------------------------------------- // Walk the function signature for the explicit arguments //------------------------------------------------------------------------- #if defined(TARGET_X86) // Only (some of) the implicit args are enregistered for varargs if (info.compIsVarArgs) { varDscInfo->maxIntRegArgNum = varDscInfo->intRegArgNum; } #elif defined(TARGET_AMD64) && !defined(UNIX_AMD64_ABI) // On System V type environment the float registers are not indexed together with the int ones. varDscInfo->floatRegArgNum = varDscInfo->intRegArgNum; #endif // TARGET* CORINFO_ARG_LIST_HANDLE argLst = info.compMethodInfo->args.args; const unsigned argSigLen = info.compMethodInfo->args.numArgs; // We will process at most takeArgs arguments from the signature after skipping skipArgs arguments const int64_t numUserArgs = min(takeArgs, (argSigLen - (int64_t)skipArgs)); // If there are no user args or less than skipArgs args, return here since there's no work to do. if (numUserArgs <= 0) { return; } #ifdef TARGET_ARM regMaskTP doubleAlignMask = RBM_NONE; #endif // TARGET_ARM // Skip skipArgs arguments from the signature. for (unsigned i = 0; i < skipArgs; i++, argLst = info.compCompHnd->getArgNext(argLst)) { ; } // Process each user arg. for (unsigned i = 0; i < numUserArgs; i++, varDscInfo->varNum++, varDscInfo->varDsc++, argLst = info.compCompHnd->getArgNext(argLst)) { LclVarDsc* varDsc = varDscInfo->varDsc; CORINFO_CLASS_HANDLE typeHnd = nullptr; CorInfoTypeWithMod corInfoType = info.compCompHnd->getArgType(&info.compMethodInfo->args, argLst, &typeHnd); varDsc->lvIsParam = 1; lvaInitVarDsc(varDsc, varDscInfo->varNum, strip(corInfoType), typeHnd, argLst, &info.compMethodInfo->args); if (strip(corInfoType) == CORINFO_TYPE_CLASS) { CORINFO_CLASS_HANDLE clsHnd = info.compCompHnd->getArgClass(&info.compMethodInfo->args, argLst); lvaSetClass(varDscInfo->varNum, clsHnd); } // For ARM, ARM64, and AMD64 varargs, all arguments go in integer registers var_types argType = mangleVarArgsType(varDsc->TypeGet()); var_types origArgType = argType; // ARM softfp calling convention should affect only the floating point arguments. // Otherwise there appear too many surplus pre-spills and other memory operations // with the associated locations . bool isSoftFPPreSpill = opts.compUseSoftFP && varTypeIsFloating(varDsc->TypeGet()); unsigned argSize = eeGetArgSize(argLst, &info.compMethodInfo->args); unsigned cSlots = (argSize + TARGET_POINTER_SIZE - 1) / TARGET_POINTER_SIZE; // the total number of slots of this argument bool isHfaArg = false; var_types hfaType = TYP_UNDEF; // Methods that use VarArg or SoftFP cannot have HFA arguments except // Native varargs on arm64 unix use the regular calling convention. if (((TargetOS::IsUnix && TargetArchitecture::IsArm64) || !info.compIsVarArgs) && !opts.compUseSoftFP) { // If the argType is a struct, then check if it is an HFA if (varTypeIsStruct(argType)) { // hfaType is set to float, double, or SIMD type if it is an HFA, otherwise TYP_UNDEF hfaType = GetHfaType(typeHnd); isHfaArg = varTypeIsValidHfaType(hfaType); } } else if (info.compIsVarArgs) { // Currently native varargs is not implemented on non windows targets. // // Note that some targets like Arm64 Unix should not need much work as // the ABI is the same. While other targets may only need small changes // such as amd64 Unix, which just expects RAX to pass numFPArguments. if (TargetOS::IsUnix) { NYI("InitUserArgs for Vararg callee is not yet implemented on non Windows targets."); } } if (isHfaArg) { // We have an HFA argument, so from here on out treat the type as a float, double, or vector. // The orginal struct type is available by using origArgType. // We also update the cSlots to be the number of float/double/vector fields in the HFA. argType = hfaType; // TODO-Cleanup: remove this asignment and mark `argType` as const. varDsc->SetHfaType(hfaType); cSlots = varDsc->lvHfaSlots(); } // The number of slots that must be enregistered if we are to consider this argument enregistered. // This is normally the same as cSlots, since we normally either enregister the entire object, // or none of it. For structs on ARM, however, we only need to enregister a single slot to consider // it enregistered, as long as we can split the rest onto the stack. unsigned cSlotsToEnregister = cSlots; #if defined(TARGET_ARM64) if (compFeatureArgSplit()) { // On arm64 Windows we will need to properly handle the case where a >8byte <=16byte // struct is split between register r7 and virtual stack slot s[0] // We will only do this for calls to vararg methods on Windows Arm64 // // !!This does not affect the normal arm64 calling convention or Unix Arm64!! if (this->info.compIsVarArgs && argType == TYP_STRUCT) { if (varDscInfo->canEnreg(TYP_INT, 1) && // The beginning of the struct can go in a register !varDscInfo->canEnreg(TYP_INT, cSlots)) // The end of the struct can't fit in a register { cSlotsToEnregister = 1; // Force the split } } } #endif // defined(TARGET_ARM64) #ifdef TARGET_ARM // On ARM we pass the first 4 words of integer arguments and non-HFA structs in registers. // But we pre-spill user arguments in varargs methods and structs. // unsigned cAlign; bool preSpill = info.compIsVarArgs || isSoftFPPreSpill; switch (origArgType) { case TYP_STRUCT: assert(varDsc->lvSize() == argSize); cAlign = varDsc->lvStructDoubleAlign ? 2 : 1; // HFA arguments go on the stack frame. They don't get spilled in the prolog like struct // arguments passed in the integer registers but get homed immediately after the prolog. if (!isHfaArg) { // TODO-Arm32-Windows: vararg struct should be forced to split like // ARM64 above. cSlotsToEnregister = 1; // HFAs must be totally enregistered or not, but other structs can be split. preSpill = true; } break; case TYP_DOUBLE: case TYP_LONG: cAlign = 2; break; default: cAlign = 1; break; } if (isRegParamType(argType)) { compArgSize += varDscInfo->alignReg(argType, cAlign) * REGSIZE_BYTES; } if (argType == TYP_STRUCT) { // Are we going to split the struct between registers and stack? We can do that as long as // no floating-point arguments have been put on the stack. // // From the ARM Procedure Call Standard: // Rule C.5: "If the NCRN is less than r4 **and** the NSAA is equal to the SP," // then split the argument between registers and stack. Implication: if something // has already been spilled to the stack, then anything that would normally be // split between the core registers and the stack will be put on the stack. // Anything that follows will also be on the stack. However, if something from // floating point regs has been spilled to the stack, we can still use r0-r3 until they are full. if (varDscInfo->canEnreg(TYP_INT, 1) && // The beginning of the struct can go in a register !varDscInfo->canEnreg(TYP_INT, cSlots) && // The end of the struct can't fit in a register varDscInfo->existAnyFloatStackArgs()) // There's at least one stack-based FP arg already { varDscInfo->setAllRegArgUsed(TYP_INT); // Prevent all future use of integer registers preSpill = false; // This struct won't be prespilled, since it will go on the stack } } if (preSpill) { for (unsigned ix = 0; ix < cSlots; ix++) { if (!varDscInfo->canEnreg(TYP_INT, ix + 1)) { break; } regMaskTP regMask = genMapArgNumToRegMask(varDscInfo->regArgNum(TYP_INT) + ix, TYP_INT); if (cAlign == 2) { doubleAlignMask |= regMask; } codeGen->regSet.rsMaskPreSpillRegArg |= regMask; } } #else // !TARGET_ARM #if defined(UNIX_AMD64_ABI) SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR structDesc; if (varTypeIsStruct(argType)) { assert(typeHnd != nullptr); eeGetSystemVAmd64PassStructInRegisterDescriptor(typeHnd, &structDesc); if (structDesc.passedInRegisters) { unsigned intRegCount = 0; unsigned floatRegCount = 0; for (unsigned int i = 0; i < structDesc.eightByteCount; i++) { if (structDesc.IsIntegralSlot(i)) { intRegCount++; } else if (structDesc.IsSseSlot(i)) { floatRegCount++; } else { assert(false && "Invalid eightbyte classification type."); break; } } if (intRegCount != 0 && !varDscInfo->canEnreg(TYP_INT, intRegCount)) { structDesc.passedInRegisters = false; // No register to enregister the eightbytes. } if (floatRegCount != 0 && !varDscInfo->canEnreg(TYP_FLOAT, floatRegCount)) { structDesc.passedInRegisters = false; // No register to enregister the eightbytes. } } } #endif // UNIX_AMD64_ABI #endif // !TARGET_ARM // The final home for this incoming register might be our local stack frame. // For System V platforms the final home will always be on the local stack frame. varDsc->lvOnFrame = true; bool canPassArgInRegisters = false; #if defined(UNIX_AMD64_ABI) if (varTypeIsStruct(argType)) { canPassArgInRegisters = structDesc.passedInRegisters; } else #elif defined(TARGET_X86) if (varTypeIsStruct(argType) && isTrivialPointerSizedStruct(typeHnd)) { canPassArgInRegisters = varDscInfo->canEnreg(TYP_I_IMPL, cSlotsToEnregister); } else #endif // defined(UNIX_AMD64_ABI) { canPassArgInRegisters = varDscInfo->canEnreg(argType, cSlotsToEnregister); } if (canPassArgInRegisters) { /* Another register argument */ // Allocate the registers we need. allocRegArg() returns the first argument register number of the set. // For non-HFA structs, we still "try" to enregister the whole thing; it will just max out if splitting // to the stack happens. unsigned firstAllocatedRegArgNum = 0; #if FEATURE_MULTIREG_ARGS varDsc->SetOtherArgReg(REG_NA); #endif // FEATURE_MULTIREG_ARGS #if defined(UNIX_AMD64_ABI) unsigned secondAllocatedRegArgNum = 0; var_types firstEightByteType = TYP_UNDEF; var_types secondEightByteType = TYP_UNDEF; if (varTypeIsStruct(argType)) { if (structDesc.eightByteCount >= 1) { firstEightByteType = GetEightByteType(structDesc, 0); firstAllocatedRegArgNum = varDscInfo->allocRegArg(firstEightByteType, 1); } } else #endif // defined(UNIX_AMD64_ABI) { firstAllocatedRegArgNum = varDscInfo->allocRegArg(argType, cSlots); } if (isHfaArg) { // We need to save the fact that this HFA is enregistered // Note that we can have HVAs of SIMD types even if we are not recognizing intrinsics. // In that case, we won't have normalized the vector types on the varDsc, so if we have a single vector // register, we need to set the type now. Otherwise, later we'll assume this is passed by reference. if (varDsc->lvHfaSlots() != 1) { varDsc->lvIsMultiRegArg = true; } } varDsc->lvIsRegArg = 1; #if FEATURE_MULTIREG_ARGS #ifdef TARGET_ARM64 if (argType == TYP_STRUCT) { varDsc->SetArgReg(genMapRegArgNumToRegNum(firstAllocatedRegArgNum, TYP_I_IMPL)); if (cSlots == 2) { varDsc->SetOtherArgReg(genMapRegArgNumToRegNum(firstAllocatedRegArgNum + 1, TYP_I_IMPL)); varDsc->lvIsMultiRegArg = true; } } #elif defined(UNIX_AMD64_ABI) if (varTypeIsStruct(argType)) { varDsc->SetArgReg(genMapRegArgNumToRegNum(firstAllocatedRegArgNum, firstEightByteType)); // If there is a second eightbyte, get a register for it too and map the arg to the reg number. if (structDesc.eightByteCount >= 2) { secondEightByteType = GetEightByteType(structDesc, 1); secondAllocatedRegArgNum = varDscInfo->allocRegArg(secondEightByteType, 1); varDsc->lvIsMultiRegArg = true; } if (secondEightByteType != TYP_UNDEF) { varDsc->SetOtherArgReg(genMapRegArgNumToRegNum(secondAllocatedRegArgNum, secondEightByteType)); } } #else // ARM32 if (varTypeIsStruct(argType)) { varDsc->SetArgReg(genMapRegArgNumToRegNum(firstAllocatedRegArgNum, TYP_I_IMPL)); } #endif // ARM32 else #endif // FEATURE_MULTIREG_ARGS { varDsc->SetArgReg(genMapRegArgNumToRegNum(firstAllocatedRegArgNum, argType)); } #ifdef TARGET_ARM if (varDsc->TypeGet() == TYP_LONG) { varDsc->SetOtherArgReg(genMapRegArgNumToRegNum(firstAllocatedRegArgNum + 1, TYP_INT)); } #if FEATURE_FASTTAILCALL // Check if arg was split between registers and stack. if (!varTypeUsesFloatReg(argType)) { unsigned firstRegArgNum = genMapIntRegNumToRegArgNum(varDsc->GetArgReg()); unsigned lastRegArgNum = firstRegArgNum + cSlots - 1; if (lastRegArgNum >= varDscInfo->maxIntRegArgNum) { assert(varDscInfo->stackArgSize == 0); unsigned numEnregistered = varDscInfo->maxIntRegArgNum - firstRegArgNum; varDsc->SetStackOffset(-(int)numEnregistered * REGSIZE_BYTES); varDscInfo->stackArgSize += (cSlots - numEnregistered) * REGSIZE_BYTES; varDscInfo->hasSplitParam = true; JITDUMP("set user arg V%02u offset to %d\n", varDscInfo->varNum, varDsc->GetStackOffset()); } } #endif #endif // TARGET_ARM #ifdef DEBUG if (verbose) { printf("Arg #%u passed in register(s) ", varDscInfo->varNum); #if defined(UNIX_AMD64_ABI) if (varTypeIsStruct(argType)) { // Print both registers, just to be clear if (firstEightByteType == TYP_UNDEF) { printf("firstEightByte: <not used>"); } else { printf("firstEightByte: %s", getRegName(genMapRegArgNumToRegNum(firstAllocatedRegArgNum, firstEightByteType))); } if (secondEightByteType == TYP_UNDEF) { printf(", secondEightByte: <not used>"); } else { printf(", secondEightByte: %s", getRegName(genMapRegArgNumToRegNum(secondAllocatedRegArgNum, secondEightByteType))); } } else #endif // defined(UNIX_AMD64_ABI) { bool isFloat = varTypeUsesFloatReg(argType); unsigned regArgNum = genMapRegNumToRegArgNum(varDsc->GetArgReg(), argType); for (unsigned ix = 0; ix < cSlots; ix++, regArgNum++) { if (ix > 0) { printf(","); } if (!isFloat && (regArgNum >= varDscInfo->maxIntRegArgNum)) // a struct has been split between // registers and stack { printf(" stack slots:%d", cSlots - ix); break; } #ifdef TARGET_ARM if (isFloat) { // Print register size prefix if (argType == TYP_DOUBLE) { // Print both registers, just to be clear printf("%s/%s", getRegName(genMapRegArgNumToRegNum(regArgNum, argType)), getRegName(genMapRegArgNumToRegNum(regArgNum + 1, argType))); // doubles take 2 slots assert(ix + 1 < cSlots); ++ix; ++regArgNum; } else { printf("%s", getRegName(genMapRegArgNumToRegNum(regArgNum, argType))); } } else #endif // TARGET_ARM { printf("%s", getRegName(genMapRegArgNumToRegNum(regArgNum, argType))); } } } printf("\n"); } #endif // DEBUG } // end if (canPassArgInRegisters) else { #if defined(TARGET_ARM) varDscInfo->setAllRegArgUsed(argType); if (varTypeUsesFloatReg(argType)) { varDscInfo->setAnyFloatStackArgs(); } #elif defined(TARGET_ARM64) // If we needed to use the stack in order to pass this argument then // record the fact that we have used up any remaining registers of this 'type' // This prevents any 'backfilling' from occuring on ARM64 // varDscInfo->setAllRegArgUsed(argType); #endif // TARGET_XXX #if FEATURE_FASTTAILCALL #ifdef TARGET_ARM unsigned argAlignment = cAlign * TARGET_POINTER_SIZE; #else unsigned argAlignment = eeGetArgSizeAlignment(origArgType, (hfaType == TYP_FLOAT)); // We expect the following rounding operation to be a noop on all // ABIs except ARM (where we have 8-byte aligned args) and macOS // ARM64 (that allows to pack multiple smaller parameters in a // single stack slot). assert(compMacOsArm64Abi() || ((varDscInfo->stackArgSize % argAlignment) == 0)); #endif varDscInfo->stackArgSize = roundUp(varDscInfo->stackArgSize, argAlignment); JITDUMP("set user arg V%02u offset to %u\n", varDscInfo->varNum, varDscInfo->stackArgSize); varDsc->SetStackOffset(varDscInfo->stackArgSize); varDscInfo->stackArgSize += argSize; #endif // FEATURE_FASTTAILCALL } #ifdef UNIX_AMD64_ABI // The arg size is returning the number of bytes of the argument. For a struct it could return a size not a // multiple of TARGET_POINTER_SIZE. The stack allocated space should always be multiple of TARGET_POINTER_SIZE, // so round it up. compArgSize += roundUp(argSize, TARGET_POINTER_SIZE); #else // !UNIX_AMD64_ABI compArgSize += argSize; #endif // !UNIX_AMD64_ABI if (info.compIsVarArgs || isSoftFPPreSpill) { #if defined(TARGET_X86) varDsc->SetStackOffset(compArgSize); #else // !TARGET_X86 // TODO-CQ: We shouldn't have to go as far as to declare these // address-exposed -- DoNotEnregister should suffice. lvaSetVarAddrExposed(varDscInfo->varNum DEBUGARG(AddressExposedReason::TOO_CONSERVATIVE)); #endif // !TARGET_X86 } if (opts.IsOSR() && info.compPatchpointInfo->IsExposed(varDscInfo->varNum)) { JITDUMP("-- V%02u is OSR exposed\n", varDscInfo->varNum); varDsc->lvHasLdAddrOp = 1; lvaSetVarAddrExposed(varDscInfo->varNum DEBUGARG(AddressExposedReason::OSR_EXPOSED)); } } compArgSize = GetOutgoingArgByteSize(compArgSize); #ifdef TARGET_ARM if (doubleAlignMask != RBM_NONE) { assert(RBM_ARG_REGS == 0xF); assert((doubleAlignMask & RBM_ARG_REGS) == doubleAlignMask); if (doubleAlignMask != RBM_NONE && doubleAlignMask != RBM_ARG_REGS) { // 'double aligned types' can begin only at r0 or r2 and we always expect at least two registers to be used // Note that in rare cases, we can have double-aligned structs of 12 bytes (if specified explicitly with // attributes) assert((doubleAlignMask == 0b0011) || (doubleAlignMask == 0b1100) || (doubleAlignMask == 0b0111) /* || 0b1111 is if'ed out */); // Now if doubleAlignMask is xyz1 i.e., the struct starts in r0, and we prespill r2 or r3 // but not both, then the stack would be misaligned for r0. So spill both // r2 and r3. // // ; +0 --- caller SP double aligned ---- // ; -4 r2 r3 // ; -8 r1 r1 // ; -c r0 r0 <-- misaligned. // ; callee saved regs bool startsAtR0 = (doubleAlignMask & 1) == 1; bool r2XorR3 = ((codeGen->regSet.rsMaskPreSpillRegArg & RBM_R2) == 0) != ((codeGen->regSet.rsMaskPreSpillRegArg & RBM_R3) == 0); if (startsAtR0 && r2XorR3) { codeGen->regSet.rsMaskPreSpillAlign = (~codeGen->regSet.rsMaskPreSpillRegArg & ~doubleAlignMask) & RBM_ARG_REGS; } } } #endif // TARGET_ARM } /*****************************************************************************/ void Compiler::lvaInitGenericsCtxt(InitVarDscInfo* varDscInfo) { //@GENERICS: final instantiation-info argument for shared generic methods // and shared generic struct instance methods if (info.compMethodInfo->args.callConv & CORINFO_CALLCONV_PARAMTYPE) { info.compTypeCtxtArg = varDscInfo->varNum; LclVarDsc* varDsc = varDscInfo->varDsc; varDsc->lvIsParam = 1; varDsc->lvType = TYP_I_IMPL; if (varDscInfo->canEnreg(TYP_I_IMPL)) { /* Another register argument */ varDsc->lvIsRegArg = 1; varDsc->SetArgReg(genMapRegArgNumToRegNum(varDscInfo->regArgNum(TYP_INT), varDsc->TypeGet())); #if FEATURE_MULTIREG_ARGS varDsc->SetOtherArgReg(REG_NA); #endif varDsc->lvOnFrame = true; // The final home for this incoming register might be our local stack frame varDscInfo->intRegArgNum++; #ifdef DEBUG if (verbose) { printf("'GenCtxt' passed in register %s\n", getRegName(varDsc->GetArgReg())); } #endif } else { // We need to mark these as being on the stack, as this is not done elsewhere in the case that canEnreg // returns false. varDsc->lvOnFrame = true; #if FEATURE_FASTTAILCALL varDsc->SetStackOffset(varDscInfo->stackArgSize); varDscInfo->stackArgSize += TARGET_POINTER_SIZE; #endif // FEATURE_FASTTAILCALL } compArgSize += TARGET_POINTER_SIZE; #if defined(TARGET_X86) if (info.compIsVarArgs) varDsc->SetStackOffset(compArgSize); #endif // TARGET_X86 varDscInfo->varNum++; varDscInfo->varDsc++; } } /*****************************************************************************/ void Compiler::lvaInitVarArgsHandle(InitVarDscInfo* varDscInfo) { if (info.compIsVarArgs) { lvaVarargsHandleArg = varDscInfo->varNum; LclVarDsc* varDsc = varDscInfo->varDsc; varDsc->lvType = TYP_I_IMPL; varDsc->lvIsParam = 1; #if defined(TARGET_X86) // Codegen will need it for x86 scope info. varDsc->lvImplicitlyReferenced = 1; #endif // TARGET_X86 lvaSetVarDoNotEnregister(lvaVarargsHandleArg DEBUGARG(DoNotEnregisterReason::VMNeedsStackAddr)); assert(mostRecentlyActivePhase == PHASE_PRE_IMPORT); // TODO-Cleanup: this is preImportation phase, why do we try to work with regs here? // Should it be just deleted? if (varDscInfo->canEnreg(TYP_I_IMPL)) { /* Another register argument */ unsigned varArgHndArgNum = varDscInfo->allocRegArg(TYP_I_IMPL); varDsc->lvIsRegArg = 1; varDsc->SetArgReg(genMapRegArgNumToRegNum(varArgHndArgNum, TYP_I_IMPL)); #if FEATURE_MULTIREG_ARGS varDsc->SetOtherArgReg(REG_NA); #endif varDsc->lvOnFrame = true; // The final home for this incoming register might be our local stack frame #ifdef TARGET_ARM // This has to be spilled right in front of the real arguments and we have // to pre-spill all the argument registers explicitly because we only have // have symbols for the declared ones, not any potential variadic ones. for (unsigned ix = varArgHndArgNum; ix < ArrLen(intArgMasks); ix++) { codeGen->regSet.rsMaskPreSpillRegArg |= intArgMasks[ix]; } #endif // TARGET_ARM #ifdef DEBUG if (verbose) { printf("'VarArgHnd' passed in register %s\n", getRegName(varDsc->GetArgReg())); } #endif // DEBUG } else { // We need to mark these as being on the stack, as this is not done elsewhere in the case that canEnreg // returns false. varDsc->lvOnFrame = true; #if FEATURE_FASTTAILCALL varDsc->SetStackOffset(varDscInfo->stackArgSize); varDscInfo->stackArgSize += TARGET_POINTER_SIZE; #endif // FEATURE_FASTTAILCALL } /* Update the total argument size, count and varDsc */ compArgSize += TARGET_POINTER_SIZE; varDscInfo->varNum++; varDscInfo->varDsc++; #if defined(TARGET_X86) varDsc->SetStackOffset(compArgSize); // Allocate a temp to point at the beginning of the args lvaVarargsBaseOfStkArgs = lvaGrabTemp(false DEBUGARG("Varargs BaseOfStkArgs")); lvaTable[lvaVarargsBaseOfStkArgs].lvType = TYP_I_IMPL; #endif // TARGET_X86 } } /*****************************************************************************/ void Compiler::lvaInitVarDsc(LclVarDsc* varDsc, unsigned varNum, CorInfoType corInfoType, CORINFO_CLASS_HANDLE typeHnd, CORINFO_ARG_LIST_HANDLE varList, CORINFO_SIG_INFO* varSig) { noway_assert(varDsc == lvaGetDesc(varNum)); switch (corInfoType) { // Mark types that looks like a pointer for doing shadow-copying of // parameters if we have an unsafe buffer. // Note that this does not handle structs with pointer fields. Instead, // we rely on using the assign-groups/equivalence-groups in // gsFindVulnerableParams() to determine if a buffer-struct contains a // pointer. We could do better by having the EE determine this for us. // Note that we want to keep buffers without pointers at lower memory // addresses than buffers with pointers. case CORINFO_TYPE_PTR: case CORINFO_TYPE_BYREF: case CORINFO_TYPE_CLASS: case CORINFO_TYPE_STRING: case CORINFO_TYPE_VAR: case CORINFO_TYPE_REFANY: varDsc->lvIsPtr = 1; break; default: break; } var_types type = JITtype2varType(corInfoType); if (varTypeIsFloating(type)) { compFloatingPointUsed = true; } if (typeHnd) { unsigned cFlags = info.compCompHnd->getClassAttribs(typeHnd); // We can get typeHnds for primitive types, these are value types which only contain // a primitive. We will need the typeHnd to distinguish them, so we store it here. if ((cFlags & CORINFO_FLG_VALUECLASS) && !varTypeIsStruct(type)) { // printf("This is a struct that the JIT will treat as a primitive\n"); varDsc->lvVerTypeInfo = verMakeTypeInfo(typeHnd); } varDsc->lvOverlappingFields = StructHasOverlappingFields(cFlags); } #if defined(TARGET_AMD64) || defined(TARGET_ARM64) varDsc->lvIsImplicitByRef = 0; #endif // defined(TARGET_AMD64) || defined(TARGET_ARM64) // Set the lvType (before this point it is TYP_UNDEF). if (GlobalJitOptions::compFeatureHfa) { varDsc->SetHfaType(TYP_UNDEF); } if ((varTypeIsStruct(type))) { lvaSetStruct(varNum, typeHnd, typeHnd != nullptr, true); if (info.compIsVarArgs) { lvaSetStructUsedAsVarArg(varNum); } } else { varDsc->lvType = type; } if (type == TYP_BOOL) { varDsc->lvIsBoolean = true; } #ifdef DEBUG varDsc->SetStackOffset(BAD_STK_OFFS); #endif #if FEATURE_MULTIREG_ARGS varDsc->SetOtherArgReg(REG_NA); #endif // FEATURE_MULTIREG_ARGS } /***************************************************************************** * Returns our internal varNum for a given IL variable. * Asserts assume it is called after lvaTable[] has been set up. */ unsigned Compiler::compMapILvarNum(unsigned ILvarNum) { noway_assert(ILvarNum < info.compILlocalsCount || ILvarNum > unsigned(ICorDebugInfo::UNKNOWN_ILNUM)); unsigned varNum; if (ILvarNum == (unsigned)ICorDebugInfo::VARARGS_HND_ILNUM) { // The varargs cookie is the last argument in lvaTable[] noway_assert(info.compIsVarArgs); varNum = lvaVarargsHandleArg; noway_assert(lvaTable[varNum].lvIsParam); } else if (ILvarNum == (unsigned)ICorDebugInfo::RETBUF_ILNUM) { noway_assert(info.compRetBuffArg != BAD_VAR_NUM); varNum = info.compRetBuffArg; } else if (ILvarNum == (unsigned)ICorDebugInfo::TYPECTXT_ILNUM) { noway_assert(info.compTypeCtxtArg >= 0); varNum = unsigned(info.compTypeCtxtArg); } else if (ILvarNum < info.compILargsCount) { // Parameter varNum = compMapILargNum(ILvarNum); noway_assert(lvaTable[varNum].lvIsParam); } else if (ILvarNum < info.compILlocalsCount) { // Local variable unsigned lclNum = ILvarNum - info.compILargsCount; varNum = info.compArgsCount + lclNum; noway_assert(!lvaTable[varNum].lvIsParam); } else { unreached(); } noway_assert(varNum < info.compLocalsCount); return varNum; } /***************************************************************************** * Returns the IL variable number given our internal varNum. * Special return values are VARG_ILNUM, RETBUF_ILNUM, TYPECTXT_ILNUM. * * Returns UNKNOWN_ILNUM if it can't be mapped. */ unsigned Compiler::compMap2ILvarNum(unsigned varNum) const { if (compIsForInlining()) { return impInlineInfo->InlinerCompiler->compMap2ILvarNum(varNum); } noway_assert(varNum < lvaCount); if (varNum == info.compRetBuffArg) { return (unsigned)ICorDebugInfo::RETBUF_ILNUM; } // Is this a varargs function? if (info.compIsVarArgs && varNum == lvaVarargsHandleArg) { return (unsigned)ICorDebugInfo::VARARGS_HND_ILNUM; } // We create an extra argument for the type context parameter // needed for shared generic code. if ((info.compMethodInfo->args.callConv & CORINFO_CALLCONV_PARAMTYPE) && varNum == (unsigned)info.compTypeCtxtArg) { return (unsigned)ICorDebugInfo::TYPECTXT_ILNUM; } #if FEATURE_FIXED_OUT_ARGS if (varNum == lvaOutgoingArgSpaceVar) { return (unsigned)ICorDebugInfo::UNKNOWN_ILNUM; // Cannot be mapped } #endif // FEATURE_FIXED_OUT_ARGS // Now mutate varNum to remove extra parameters from the count. if ((info.compMethodInfo->args.callConv & CORINFO_CALLCONV_PARAMTYPE) && varNum > (unsigned)info.compTypeCtxtArg) { varNum--; } if (info.compIsVarArgs && varNum > lvaVarargsHandleArg) { varNum--; } /* Is there a hidden argument for the return buffer. Note that this code works because if the RetBuffArg is not present, compRetBuffArg will be BAD_VAR_NUM */ if (info.compRetBuffArg != BAD_VAR_NUM && varNum > info.compRetBuffArg) { varNum--; } if (varNum >= info.compLocalsCount) { return (unsigned)ICorDebugInfo::UNKNOWN_ILNUM; // Cannot be mapped } return varNum; } /***************************************************************************** * Returns true if variable "varNum" may be address-exposed. */ bool Compiler::lvaVarAddrExposed(unsigned varNum) const { const LclVarDsc* varDsc = lvaGetDesc(varNum); return varDsc->IsAddressExposed(); } /***************************************************************************** * Returns true iff variable "varNum" should not be enregistered (or one of several reasons). */ bool Compiler::lvaVarDoNotEnregister(unsigned varNum) { LclVarDsc* varDsc = lvaGetDesc(varNum); return varDsc->lvDoNotEnregister; } //------------------------------------------------------------------------ // lvInitializeDoNotEnregFlag: a helper to initialize `lvDoNotEnregister` flag // for locals that were created before the compiler decided its optimization level. // // Assumptions: // compEnregLocals() value is finalized and is set to false. // void Compiler::lvSetMinOptsDoNotEnreg() { JITDUMP("compEnregLocals() is false, setting doNotEnreg flag for all locals."); assert(!compEnregLocals()); for (unsigned lclNum = 0; lclNum < lvaCount; lclNum++) { lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::NoRegVars)); } } /***************************************************************************** * Returns the handle to the class of the local variable varNum */ CORINFO_CLASS_HANDLE Compiler::lvaGetStruct(unsigned varNum) { const LclVarDsc* varDsc = lvaGetDesc(varNum); return varDsc->GetStructHnd(); } //-------------------------------------------------------------------------------------------- // lvaFieldOffsetCmp - a static compare function passed to jitstd::sort() by Compiler::StructPromotionHelper; // compares fields' offsets. // // Arguments: // field1 - pointer to the first field; // field2 - pointer to the second field. // // Return value: // 0 if the fields' offsets are equal, 1 if the first field has bigger offset, -1 otherwise. // bool Compiler::lvaFieldOffsetCmp::operator()(const lvaStructFieldInfo& field1, const lvaStructFieldInfo& field2) { return field1.fldOffset < field2.fldOffset; } //------------------------------------------------------------------------ // StructPromotionHelper constructor. // // Arguments: // compiler - pointer to a compiler to get access to an allocator, compHandle etc. // Compiler::StructPromotionHelper::StructPromotionHelper(Compiler* compiler) : compiler(compiler) , structPromotionInfo() #ifdef DEBUG , retypedFieldsMap(compiler->getAllocator(CMK_DebugOnly)) #endif // DEBUG { } //-------------------------------------------------------------------------------------------- // TryPromoteStructVar - promote struct var if it is possible and profitable. // // Arguments: // lclNum - struct number to try. // // Return value: // true if the struct var was promoted. // bool Compiler::StructPromotionHelper::TryPromoteStructVar(unsigned lclNum) { if (CanPromoteStructVar(lclNum)) { #if 0 // Often-useful debugging code: if you've narrowed down a struct-promotion problem to a single // method, this allows you to select a subset of the vars to promote (by 1-based ordinal number). static int structPromoVarNum = 0; structPromoVarNum++; if (atoi(getenv("structpromovarnumlo")) <= structPromoVarNum && structPromoVarNum <= atoi(getenv("structpromovarnumhi"))) #endif // 0 if (ShouldPromoteStructVar(lclNum)) { PromoteStructVar(lclNum); return true; } } return false; } #ifdef DEBUG //-------------------------------------------------------------------------------------------- // CheckRetypedAsScalar - check that the fldType for this fieldHnd was retyped as requested type. // // Arguments: // fieldHnd - the field handle; // requestedType - as which type the field was accessed; // // Notes: // For example it can happen when such struct A { struct B { long c } } is compiled and we access A.B.c, // it could look like "GT_FIELD struct B.c -> ADDR -> GT_FIELD struct A.B -> ADDR -> LCL_VAR A" , but // "GT_FIELD struct A.B -> ADDR -> LCL_VAR A" can be promoted to "LCL_VAR long A.B" and then // there is type mistmatch between "GT_FIELD struct B.c" and "LCL_VAR long A.B". // void Compiler::StructPromotionHelper::CheckRetypedAsScalar(CORINFO_FIELD_HANDLE fieldHnd, var_types requestedType) { assert(retypedFieldsMap.Lookup(fieldHnd)); assert(retypedFieldsMap[fieldHnd] == requestedType); } #endif // DEBUG //-------------------------------------------------------------------------------------------- // CanPromoteStructType - checks if the struct type can be promoted. // // Arguments: // typeHnd - struct handle to check. // // Return value: // true if the struct type can be promoted. // // Notes: // The last analyzed type is memorized to skip the check if we ask about the same time again next. // However, it was not found profitable to memorize all analyzed types in a map. // // The check initializes only nessasary fields in lvaStructPromotionInfo, // so if the promotion is rejected early than most fields will be uninitialized. // bool Compiler::StructPromotionHelper::CanPromoteStructType(CORINFO_CLASS_HANDLE typeHnd) { assert(typeHnd != nullptr); if (!compiler->eeIsValueClass(typeHnd)) { // TODO-ObjectStackAllocation: Enable promotion of fields of stack-allocated objects. return false; } if (structPromotionInfo.typeHnd == typeHnd) { // Asking for the same type of struct as the last time. // Nothing need to be done. // Fall through ... return structPromotionInfo.canPromote; } // Analyze this type from scratch. structPromotionInfo = lvaStructPromotionInfo(typeHnd); // sizeof(double) represents the size of the largest primitive type that we can struct promote. // In the future this may be changing to XMM_REGSIZE_BYTES. // Note: MaxOffset is used below to declare a local array, and therefore must be a compile-time constant. CLANG_FORMAT_COMMENT_ANCHOR; #if defined(FEATURE_SIMD) #if defined(TARGET_XARCH) // This will allow promotion of 4 Vector<T> fields on AVX2 or Vector256<T> on AVX, // or 8 Vector<T>/Vector128<T> fields on SSE2. const int MaxOffset = MAX_NumOfFieldsInPromotableStruct * YMM_REGSIZE_BYTES; #elif defined(TARGET_ARM64) const int MaxOffset = MAX_NumOfFieldsInPromotableStruct * FP_REGSIZE_BYTES; #endif // defined(TARGET_XARCH) || defined(TARGET_ARM64) #else // !FEATURE_SIMD const int MaxOffset = MAX_NumOfFieldsInPromotableStruct * sizeof(double); #endif // !FEATURE_SIMD assert((BYTE)MaxOffset == MaxOffset); // because lvaStructFieldInfo.fldOffset is byte-sized assert((BYTE)MAX_NumOfFieldsInPromotableStruct == MAX_NumOfFieldsInPromotableStruct); // because lvaStructFieldInfo.fieldCnt is byte-sized bool containsGCpointers = false; COMP_HANDLE compHandle = compiler->info.compCompHnd; unsigned structSize = compHandle->getClassSize(typeHnd); if (structSize > MaxOffset) { return false; // struct is too large } unsigned fieldCnt = compHandle->getClassNumInstanceFields(typeHnd); if (fieldCnt == 0 || fieldCnt > MAX_NumOfFieldsInPromotableStruct) { return false; // struct must have between 1 and MAX_NumOfFieldsInPromotableStruct fields } structPromotionInfo.fieldCnt = (unsigned char)fieldCnt; DWORD typeFlags = compHandle->getClassAttribs(typeHnd); bool overlappingFields = StructHasOverlappingFields(typeFlags); if (overlappingFields) { return false; } // Don't struct promote if we have an CUSTOMLAYOUT flag on an HFA type if (StructHasCustomLayout(typeFlags) && compiler->IsHfa(typeHnd)) { return false; } #ifdef TARGET_ARM // On ARM, we have a requirement on the struct alignment; see below. unsigned structAlignment = roundUp(compHandle->getClassAlignmentRequirement(typeHnd), TARGET_POINTER_SIZE); #endif // TARGET_ARM // If we have "Custom Layout" then we might have an explicit Size attribute // Managed C++ uses this for its structs, such C++ types will not contain GC pointers. // // The current VM implementation also incorrectly sets the CORINFO_FLG_CUSTOMLAYOUT // whenever a managed value class contains any GC pointers. // (See the comment for VMFLAG_NOT_TIGHTLY_PACKED in class.h) // // It is important to struct promote managed value classes that have GC pointers // So we compute the correct value for "CustomLayout" here // if (StructHasCustomLayout(typeFlags) && ((typeFlags & CORINFO_FLG_CONTAINS_GC_PTR) == 0)) { structPromotionInfo.customLayout = true; } if (StructHasDontDigFieldsFlagSet(typeFlags)) { return CanConstructAndPromoteField(&structPromotionInfo); } unsigned fieldsSize = 0; for (BYTE ordinal = 0; ordinal < fieldCnt; ++ordinal) { CORINFO_FIELD_HANDLE fieldHnd = compHandle->getFieldInClass(typeHnd, ordinal); structPromotionInfo.fields[ordinal] = GetFieldInfo(fieldHnd, ordinal); const lvaStructFieldInfo& fieldInfo = structPromotionInfo.fields[ordinal]; noway_assert(fieldInfo.fldOffset < structSize); if (fieldInfo.fldSize == 0) { // Not a scalar type. return false; } if ((fieldInfo.fldOffset % fieldInfo.fldSize) != 0) { // The code in Compiler::genPushArgList that reconstitutes // struct values on the stack from promoted fields expects // those fields to be at their natural alignment. return false; } if (varTypeIsGC(fieldInfo.fldType)) { containsGCpointers = true; } // The end offset for this field should never be larger than our structSize. noway_assert(fieldInfo.fldOffset + fieldInfo.fldSize <= structSize); fieldsSize += fieldInfo.fldSize; #ifdef TARGET_ARM // On ARM, for struct types that don't use explicit layout, the alignment of the struct is // at least the max alignment of its fields. We take advantage of this invariant in struct promotion, // so verify it here. if (fieldInfo.fldSize > structAlignment) { // Don't promote vars whose struct types violates the invariant. (Alignment == size for primitives.) return false; } #endif // TARGET_ARM } // If we saw any GC pointer or by-ref fields above then CORINFO_FLG_CONTAINS_GC_PTR or // CORINFO_FLG_BYREF_LIKE has to be set! noway_assert((containsGCpointers == false) || ((typeFlags & (CORINFO_FLG_CONTAINS_GC_PTR | CORINFO_FLG_BYREF_LIKE)) != 0)); // Check if this promoted struct contains any holes. assert(!overlappingFields); if (fieldsSize != structSize) { // If sizes do not match it means we have an overlapping fields or holes. // Overlapping fields were rejected early, so here it can mean only holes. structPromotionInfo.containsHoles = true; } // Cool, this struct is promotable. structPromotionInfo.canPromote = true; return true; } //-------------------------------------------------------------------------------------------- // CanConstructAndPromoteField - checks if we can construct field types without asking about them directly. // // Arguments: // structPromotionInfo - struct promotion candidate information. // // Return value: // true if we can figure out the fields from available knowledge. // // Notes: // This is needed for AOT R2R compilation when we can't cross compilation bubble borders // so we should not ask about fields that are not directly referenced. If we do VM will have // to emit a type check for this field type but it does not have enough information about it. // As a workaround for perfomance critical corner case: struct with 1 gcref, we try to construct // the field information from indirect observations. // bool Compiler::StructPromotionHelper::CanConstructAndPromoteField(lvaStructPromotionInfo* structPromotionInfo) { const CORINFO_CLASS_HANDLE typeHnd = structPromotionInfo->typeHnd; const COMP_HANDLE compHandle = compiler->info.compCompHnd; const DWORD typeFlags = compHandle->getClassAttribs(typeHnd); if (structPromotionInfo->fieldCnt != 1) { // Can't find out values for several fields. return false; } if ((typeFlags & CORINFO_FLG_CONTAINS_GC_PTR) == 0) { // Can't find out type of a non-gc field. return false; } const unsigned structSize = compHandle->getClassSize(typeHnd); if (structSize != TARGET_POINTER_SIZE) { return false; } assert(!structPromotionInfo->containsHoles); assert(!structPromotionInfo->customLayout); lvaStructFieldInfo& fldInfo = structPromotionInfo->fields[0]; fldInfo.fldHnd = compHandle->getFieldInClass(typeHnd, 0); // We should not read it anymore. fldInfo.fldTypeHnd = 0; fldInfo.fldOffset = 0; fldInfo.fldOrdinal = 0; fldInfo.fldSize = TARGET_POINTER_SIZE; fldInfo.fldType = TYP_BYREF; structPromotionInfo->canPromote = true; return true; } //-------------------------------------------------------------------------------------------- // CanPromoteStructVar - checks if the struct can be promoted. // // Arguments: // lclNum - struct number to check. // // Return value: // true if the struct var can be promoted. // bool Compiler::StructPromotionHelper::CanPromoteStructVar(unsigned lclNum) { LclVarDsc* varDsc = compiler->lvaGetDesc(lclNum); assert(varTypeIsStruct(varDsc)); assert(!varDsc->lvPromoted); // Don't ask again :) // If this lclVar is used in a SIMD intrinsic, then we don't want to struct promote it. // Note, however, that SIMD lclVars that are NOT used in a SIMD intrinsic may be // profitably promoted. if (varDsc->lvIsUsedInSIMDIntrinsic()) { JITDUMP(" struct promotion of V%02u is disabled because lvIsUsedInSIMDIntrinsic()\n", lclNum); return false; } // Reject struct promotion of parameters when -GS stack reordering is enabled // as we could introduce shadow copies of them. if (varDsc->lvIsParam && compiler->compGSReorderStackLayout) { JITDUMP(" struct promotion of V%02u is disabled because lvIsParam and compGSReorderStackLayout\n", lclNum); return false; } if (!compiler->lvaEnregMultiRegVars && varDsc->lvIsMultiRegArgOrRet()) { JITDUMP(" struct promotion of V%02u is disabled because lvIsMultiRegArgOrRet()\n", lclNum); return false; } CORINFO_CLASS_HANDLE typeHnd = varDsc->GetStructHnd(); assert(typeHnd != NO_CLASS_HANDLE); bool canPromote = CanPromoteStructType(typeHnd); if (canPromote && varDsc->lvIsMultiRegArgOrRet()) { unsigned fieldCnt = structPromotionInfo.fieldCnt; if (fieldCnt > MAX_MULTIREG_COUNT) { canPromote = false; } #if defined(TARGET_ARMARCH) else { for (unsigned i = 0; canPromote && (i < fieldCnt); i++) { var_types fieldType = structPromotionInfo.fields[i].fldType; // Non-HFA structs are always passed in general purpose registers. // If there are any floating point fields, don't promote for now. // Likewise, since HVA structs are passed in SIMD registers // promotion of non FP or SIMD type fields is disallowed. // TODO-1stClassStructs: add support in Lowering and prolog generation // to enable promoting these types. if (varDsc->lvIsParam && (varDsc->lvIsHfa() != varTypeUsesFloatReg(fieldType))) { canPromote = false; } #if defined(FEATURE_SIMD) // If we have a register-passed struct with mixed non-opaque SIMD types (i.e. with defined fields) // and non-SIMD types, we don't currently handle that case in the prolog, so we can't promote. else if ((fieldCnt > 1) && varTypeIsStruct(fieldType) && !compiler->isOpaqueSIMDType(structPromotionInfo.fields[i].fldTypeHnd)) { canPromote = false; } #endif // FEATURE_SIMD } } #elif defined(UNIX_AMD64_ABI) else { SortStructFields(); // Only promote if the field types match the registers, unless we have a single SIMD field. SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR structDesc; compiler->eeGetSystemVAmd64PassStructInRegisterDescriptor(typeHnd, &structDesc); unsigned regCount = structDesc.eightByteCount; if ((structPromotionInfo.fieldCnt == 1) && varTypeIsSIMD(structPromotionInfo.fields[0].fldType)) { // Allow the case of promoting a single SIMD field, even if there are multiple registers. // We will fix this up in the prolog. } else if (structPromotionInfo.fieldCnt != regCount) { canPromote = false; } else { for (unsigned i = 0; canPromote && (i < regCount); i++) { lvaStructFieldInfo* fieldInfo = &(structPromotionInfo.fields[i]); var_types fieldType = fieldInfo->fldType; // We don't currently support passing SIMD types in registers. if (varTypeIsSIMD(fieldType)) { canPromote = false; } else if (varTypeUsesFloatReg(fieldType) != (structDesc.eightByteClassifications[i] == SystemVClassificationTypeSSE)) { canPromote = false; } } } } #endif // UNIX_AMD64_ABI } return canPromote; } //-------------------------------------------------------------------------------------------- // ShouldPromoteStructVar - Should a struct var be promoted if it can be promoted? // This routine mainly performs profitability checks. Right now it also has // some correctness checks due to limitations of down-stream phases. // // Arguments: // lclNum - struct local number; // // Return value: // true if the struct should be promoted. // bool Compiler::StructPromotionHelper::ShouldPromoteStructVar(unsigned lclNum) { LclVarDsc* varDsc = compiler->lvaGetDesc(lclNum); assert(varTypeIsStruct(varDsc)); assert(varDsc->GetStructHnd() == structPromotionInfo.typeHnd); assert(structPromotionInfo.canPromote); bool shouldPromote = true; // We *can* promote; *should* we promote? // We should only do so if promotion has potential savings. One source of savings // is if a field of the struct is accessed, since this access will be turned into // an access of the corresponding promoted field variable. Even if there are no // field accesses, but only block-level operations on the whole struct, if the struct // has only one or two fields, then doing those block operations field-wise is probably faster // than doing a whole-variable block operation (e.g., a hardware "copy loop" on x86). // Struct promotion also provides the following benefits: reduce stack frame size, // reduce the need for zero init of stack frame and fine grained constant/copy prop. // Asm diffs indicate that promoting structs up to 3 fields is a net size win. // So if no fields are accessed independently, and there are four or more fields, // then do not promote. // // TODO: Ideally we would want to consider the impact of whether the struct is // passed as a parameter or assigned the return value of a call. Because once promoted, // struct copying is done by field by field assignment instead of a more efficient // rep.stos or xmm reg based copy. if (structPromotionInfo.fieldCnt > 3 && !varDsc->lvFieldAccessed) { JITDUMP("Not promoting promotable struct local V%02u: #fields = %d, fieldAccessed = %d.\n", lclNum, structPromotionInfo.fieldCnt, varDsc->lvFieldAccessed); shouldPromote = false; } else if (varDsc->lvIsMultiRegRet && structPromotionInfo.containsHoles && structPromotionInfo.customLayout) { JITDUMP("Not promoting multi-reg returned struct local V%02u with holes.\n", lclNum); shouldPromote = false; } #if defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_ARM) // TODO-PERF - Only do this when the LclVar is used in an argument context // TODO-ARM64 - HFA support should also eliminate the need for this. // TODO-ARM32 - HFA support should also eliminate the need for this. // TODO-LSRA - Currently doesn't support the passing of floating point LCL_VARS in the integer registers // // For now we currently don't promote structs with a single float field // Promoting it can cause us to shuffle it back and forth between the int and // the float regs when it is used as a argument, which is very expensive for XARCH // else if ((structPromotionInfo.fieldCnt == 1) && varTypeIsFloating(structPromotionInfo.fields[0].fldType)) { JITDUMP("Not promoting promotable struct local V%02u: #fields = %d because it is a struct with " "single float field.\n", lclNum, structPromotionInfo.fieldCnt); shouldPromote = false; } #endif // TARGET_AMD64 || TARGET_ARM64 || TARGET_ARM else if (varDsc->lvIsParam && !compiler->lvaIsImplicitByRefLocal(lclNum) && !varDsc->lvIsHfa()) { #if FEATURE_MULTIREG_STRUCT_PROMOTE // Is this a variable holding a value with exactly two fields passed in // multiple registers? if (compiler->lvaIsMultiregStruct(varDsc, compiler->info.compIsVarArgs)) { if (structPromotionInfo.containsHoles && structPromotionInfo.customLayout) { JITDUMP("Not promoting multi-reg struct local V%02u with holes.\n", lclNum); shouldPromote = false; } else if ((structPromotionInfo.fieldCnt != 2) && !((structPromotionInfo.fieldCnt == 1) && varTypeIsSIMD(structPromotionInfo.fields[0].fldType))) { JITDUMP("Not promoting multireg struct local V%02u, because lvIsParam is true, #fields != 2 and it's " "not a single SIMD.\n", lclNum); shouldPromote = false; } } else #endif // !FEATURE_MULTIREG_STRUCT_PROMOTE // TODO-PERF - Implement struct promotion for incoming single-register structs. // Also the implementation of jmp uses the 4 byte move to store // byte parameters to the stack, so that if we have a byte field // with something else occupying the same 4-byte slot, it will // overwrite other fields. if (structPromotionInfo.fieldCnt != 1) { JITDUMP("Not promoting promotable struct local V%02u, because lvIsParam is true and #fields = " "%d.\n", lclNum, structPromotionInfo.fieldCnt); shouldPromote = false; } } else if ((lclNum == compiler->genReturnLocal) && (structPromotionInfo.fieldCnt > 1)) { // TODO-1stClassStructs: a temporary solution to keep diffs small, it will be fixed later. shouldPromote = false; } #if defined(DEBUG) else if (compiler->compPromoteFewerStructs(lclNum)) { // Do not promote some structs, that can be promoted, to stress promoted/unpromoted moves. JITDUMP("Not promoting promotable struct local V%02u, because of STRESS_PROMOTE_FEWER_STRUCTS\n", lclNum); shouldPromote = false; } #endif // // If the lvRefCnt is zero and we have a struct promoted parameter we can end up with an extra store of // the the incoming register into the stack frame slot. // In that case, we would like to avoid promortion. // However we haven't yet computed the lvRefCnt values so we can't do that. // CLANG_FORMAT_COMMENT_ANCHOR; return shouldPromote; } //-------------------------------------------------------------------------------------------- // SortStructFields - sort the fields according to the increasing order of the field offset. // // Notes: // This is needed because the fields need to be pushed on stack (when referenced as a struct) in offset order. // void Compiler::StructPromotionHelper::SortStructFields() { if (!structPromotionInfo.fieldsSorted) { jitstd::sort(structPromotionInfo.fields, structPromotionInfo.fields + structPromotionInfo.fieldCnt, lvaFieldOffsetCmp()); structPromotionInfo.fieldsSorted = true; } } //-------------------------------------------------------------------------------------------- // GetFieldInfo - get struct field information. // Arguments: // fieldHnd - field handle to get info for; // ordinal - field ordinal. // // Return value: // field information. // Compiler::lvaStructFieldInfo Compiler::StructPromotionHelper::GetFieldInfo(CORINFO_FIELD_HANDLE fieldHnd, BYTE ordinal) { lvaStructFieldInfo fieldInfo; fieldInfo.fldHnd = fieldHnd; unsigned fldOffset = compiler->info.compCompHnd->getFieldOffset(fieldInfo.fldHnd); fieldInfo.fldOffset = (BYTE)fldOffset; fieldInfo.fldOrdinal = ordinal; CorInfoType corType = compiler->info.compCompHnd->getFieldType(fieldInfo.fldHnd, &fieldInfo.fldTypeHnd); fieldInfo.fldType = JITtype2varType(corType); fieldInfo.fldSize = genTypeSize(fieldInfo.fldType); #ifdef FEATURE_SIMD // Check to see if this is a SIMD type. // We will only check this if we have already found a SIMD type, which will be true if // we have encountered any SIMD intrinsics. if (compiler->usesSIMDTypes() && (fieldInfo.fldSize == 0) && compiler->isSIMDorHWSIMDClass(fieldInfo.fldTypeHnd)) { unsigned simdSize; CorInfoType simdBaseJitType = compiler->getBaseJitTypeAndSizeOfSIMDType(fieldInfo.fldTypeHnd, &simdSize); // We will only promote fields of SIMD types that fit into a SIMD register. if (simdBaseJitType != CORINFO_TYPE_UNDEF) { if ((simdSize >= compiler->minSIMDStructBytes()) && (simdSize <= compiler->maxSIMDStructBytes())) { fieldInfo.fldType = compiler->getSIMDTypeForSize(simdSize); fieldInfo.fldSize = simdSize; #ifdef DEBUG retypedFieldsMap.Set(fieldInfo.fldHnd, fieldInfo.fldType, RetypedAsScalarFieldsMap::Overwrite); #endif // DEBUG } } } #endif // FEATURE_SIMD if (fieldInfo.fldSize == 0) { TryPromoteStructField(fieldInfo); } return fieldInfo; } //-------------------------------------------------------------------------------------------- // TryPromoteStructField - checks that this struct's field is a struct that can be promoted as scalar type // aligned at its natural boundary. Promotes the field as a scalar if the check succeeded. // // Arguments: // fieldInfo - information about the field in the outer struct. // // Return value: // true if the internal struct was promoted. // bool Compiler::StructPromotionHelper::TryPromoteStructField(lvaStructFieldInfo& fieldInfo) { // Size of TYP_BLK, TYP_FUNC, TYP_VOID and TYP_STRUCT is zero. // Early out if field type is other than TYP_STRUCT. // This is a defensive check as we don't expect a struct to have // fields of TYP_BLK, TYP_FUNC or TYP_VOID. if (fieldInfo.fldType != TYP_STRUCT) { return false; } COMP_HANDLE compHandle = compiler->info.compCompHnd; // Do not promote if the struct field in turn has more than one field. if (compHandle->getClassNumInstanceFields(fieldInfo.fldTypeHnd) != 1) { return false; } // Do not promote if the single field is not aligned at its natural boundary within // the struct field. CORINFO_FIELD_HANDLE innerFieldHndl = compHandle->getFieldInClass(fieldInfo.fldTypeHnd, 0); unsigned innerFieldOffset = compHandle->getFieldOffset(innerFieldHndl); if (innerFieldOffset != 0) { return false; } CorInfoType fieldCorType = compHandle->getFieldType(innerFieldHndl); var_types fieldVarType = JITtype2varType(fieldCorType); unsigned fieldSize = genTypeSize(fieldVarType); // Do not promote if the field is not a primitive type, is floating-point, // or is not properly aligned. // // TODO-PERF: Structs containing a single floating-point field on Amd64 // need to be passed in integer registers. Right now LSRA doesn't support // passing of floating-point LCL_VARS in integer registers. Enabling promotion // of such structs results in an assert in lsra right now. // // TODO-CQ: Right now we only promote an actual SIMD typed field, which would cause // a nested SIMD type to fail promotion. if (fieldSize == 0 || fieldSize > TARGET_POINTER_SIZE || varTypeIsFloating(fieldVarType)) { JITDUMP("Promotion blocked: struct contains struct field with one field," " but that field has invalid size or type.\n"); return false; } if (fieldSize != TARGET_POINTER_SIZE) { unsigned outerFieldOffset = compHandle->getFieldOffset(fieldInfo.fldHnd); if ((outerFieldOffset % fieldSize) != 0) { JITDUMP("Promotion blocked: struct contains struct field with one field," " but the outer struct offset %u is not a multiple of the inner field size %u.\n", outerFieldOffset, fieldSize); return false; } } // Insist this wrapped field occupy all of its parent storage. unsigned innerStructSize = compHandle->getClassSize(fieldInfo.fldTypeHnd); if (fieldSize != innerStructSize) { JITDUMP("Promotion blocked: struct contains struct field with one field," " but that field is not the same size as its parent.\n"); return false; } // Retype the field as the type of the single field of the struct. // This is a hack that allows us to promote such fields before we support recursive struct promotion // (tracked by #10019). fieldInfo.fldType = fieldVarType; fieldInfo.fldSize = fieldSize; #ifdef DEBUG retypedFieldsMap.Set(fieldInfo.fldHnd, fieldInfo.fldType, RetypedAsScalarFieldsMap::Overwrite); #endif // DEBUG return true; } //-------------------------------------------------------------------------------------------- // PromoteStructVar - promote struct variable. // // Arguments: // lclNum - struct local number; // void Compiler::StructPromotionHelper::PromoteStructVar(unsigned lclNum) { LclVarDsc* varDsc = compiler->lvaGetDesc(lclNum); // We should never see a reg-sized non-field-addressed struct here. assert(!varDsc->lvRegStruct); assert(varDsc->GetStructHnd() == structPromotionInfo.typeHnd); assert(structPromotionInfo.canPromote); varDsc->lvFieldCnt = structPromotionInfo.fieldCnt; varDsc->lvFieldLclStart = compiler->lvaCount; varDsc->lvPromoted = true; varDsc->lvContainsHoles = structPromotionInfo.containsHoles; varDsc->lvCustomLayout = structPromotionInfo.customLayout; #ifdef DEBUG // Don't change the source to a TYP_BLK either. varDsc->lvKeepType = 1; #endif #ifdef DEBUG if (compiler->verbose) { printf("\nPromoting struct local V%02u (%s):", lclNum, compiler->eeGetClassName(varDsc->GetStructHnd())); } #endif SortStructFields(); for (unsigned index = 0; index < structPromotionInfo.fieldCnt; ++index) { const lvaStructFieldInfo* pFieldInfo = &structPromotionInfo.fields[index]; if (varTypeUsesFloatReg(pFieldInfo->fldType)) { // Whenever we promote a struct that contains a floating point field // it's possible we transition from a method that originally only had integer // local vars to start having FP. We have to communicate this through this flag // since LSRA later on will use this flag to determine whether or not to track FP register sets. compiler->compFloatingPointUsed = true; } // Now grab the temp for the field local. #ifdef DEBUG char buf[200]; sprintf_s(buf, sizeof(buf), "%s V%02u.%s (fldOffset=0x%x)", "field", lclNum, compiler->eeGetFieldName(pFieldInfo->fldHnd), pFieldInfo->fldOffset); // We need to copy 'buf' as lvaGrabTemp() below caches a copy to its argument. size_t len = strlen(buf) + 1; char* bufp = compiler->getAllocator(CMK_DebugOnly).allocate<char>(len); strcpy_s(bufp, len, buf); if (index > 0) { noway_assert(pFieldInfo->fldOffset > (pFieldInfo - 1)->fldOffset); } #endif // Lifetime of field locals might span multiple BBs, so they must be long lifetime temps. const unsigned varNum = compiler->lvaGrabTemp(false DEBUGARG(bufp)); // lvaGrabTemp can reallocate the lvaTable, so // refresh the cached varDsc for lclNum. varDsc = compiler->lvaGetDesc(lclNum); LclVarDsc* fieldVarDsc = compiler->lvaGetDesc(varNum); fieldVarDsc->lvType = pFieldInfo->fldType; fieldVarDsc->lvExactSize = pFieldInfo->fldSize; fieldVarDsc->lvIsStructField = true; fieldVarDsc->lvFieldHnd = pFieldInfo->fldHnd; fieldVarDsc->lvFldOffset = pFieldInfo->fldOffset; fieldVarDsc->lvFldOrdinal = pFieldInfo->fldOrdinal; fieldVarDsc->lvParentLcl = lclNum; fieldVarDsc->lvIsParam = varDsc->lvIsParam; // This new local may be the first time we've seen a long typed local. if (fieldVarDsc->lvType == TYP_LONG) { compiler->compLongUsed = true; } #if defined(TARGET_AMD64) || defined(TARGET_ARM64) // Reset the implicitByRef flag. fieldVarDsc->lvIsImplicitByRef = 0; #endif // Do we have a parameter that can be enregistered? // if (varDsc->lvIsRegArg) { fieldVarDsc->lvIsRegArg = true; regNumber parentArgReg = varDsc->GetArgReg(); #if FEATURE_MULTIREG_ARGS if (!compiler->lvaIsImplicitByRefLocal(lclNum)) { #ifdef UNIX_AMD64_ABI if (varTypeIsSIMD(fieldVarDsc) && (varDsc->lvFieldCnt == 1)) { // This SIMD typed field may be passed in multiple registers. fieldVarDsc->SetArgReg(parentArgReg); fieldVarDsc->SetOtherArgReg(varDsc->GetOtherArgReg()); } else #endif // UNIX_AMD64_ABI { regNumber fieldRegNum; if (index == 0) { fieldRegNum = parentArgReg; } else if (varDsc->lvIsHfa()) { unsigned regIncrement = fieldVarDsc->lvFldOrdinal; #ifdef TARGET_ARM // TODO: Need to determine if/how to handle split args. if (varDsc->GetHfaType() == TYP_DOUBLE) { regIncrement *= 2; } #endif // TARGET_ARM fieldRegNum = (regNumber)(parentArgReg + regIncrement); } else { assert(index == 1); fieldRegNum = varDsc->GetOtherArgReg(); } fieldVarDsc->SetArgReg(fieldRegNum); } } else #endif // FEATURE_MULTIREG_ARGS && defined(FEATURE_SIMD) { fieldVarDsc->SetArgReg(parentArgReg); } } #ifdef FEATURE_SIMD if (varTypeIsSIMD(pFieldInfo->fldType)) { // Set size to zero so that lvaSetStruct will appropriately set the SIMD-relevant fields. fieldVarDsc->lvExactSize = 0; compiler->lvaSetStruct(varNum, pFieldInfo->fldTypeHnd, false, true); // We will not recursively promote this, so mark it as 'lvRegStruct' (note that we wouldn't // be promoting this if we didn't think it could be enregistered. fieldVarDsc->lvRegStruct = true; } #endif // FEATURE_SIMD #ifdef DEBUG // This temporary should not be converted to a double in stress mode, // because we introduce assigns to it after the stress conversion fieldVarDsc->lvKeepType = 1; #endif } } //-------------------------------------------------------------------------------------------- // lvaGetFieldLocal - returns the local var index for a promoted field in a promoted struct var. // // Arguments: // varDsc - the promoted struct var descriptor; // fldOffset - field offset in the struct. // // Return value: // the index of the local that represents this field. // unsigned Compiler::lvaGetFieldLocal(const LclVarDsc* varDsc, unsigned int fldOffset) { noway_assert(varTypeIsStruct(varDsc)); noway_assert(varDsc->lvPromoted); for (unsigned i = varDsc->lvFieldLclStart; i < varDsc->lvFieldLclStart + varDsc->lvFieldCnt; ++i) { noway_assert(lvaTable[i].lvIsStructField); noway_assert(lvaTable[i].lvParentLcl == (unsigned)(varDsc - lvaTable)); if (lvaTable[i].lvFldOffset == fldOffset) { return i; } } // This is the not-found error return path, the caller should check for BAD_VAR_NUM return BAD_VAR_NUM; } /***************************************************************************** * * Set the local var "varNum" as address-exposed. * If this is a promoted struct, label it's fields the same way. */ void Compiler::lvaSetVarAddrExposed(unsigned varNum DEBUGARG(AddressExposedReason reason)) { LclVarDsc* varDsc = lvaGetDesc(varNum); varDsc->SetAddressExposed(true DEBUGARG(reason)); if (varDsc->lvPromoted) { noway_assert(varTypeIsStruct(varDsc)); for (unsigned i = varDsc->lvFieldLclStart; i < varDsc->lvFieldLclStart + varDsc->lvFieldCnt; ++i) { noway_assert(lvaTable[i].lvIsStructField); lvaTable[i].SetAddressExposed(true DEBUGARG(AddressExposedReason::PARENT_EXPOSED)); lvaSetVarDoNotEnregister(i DEBUGARG(DoNotEnregisterReason::AddrExposed)); } } lvaSetVarDoNotEnregister(varNum DEBUGARG(DoNotEnregisterReason::AddrExposed)); } //------------------------------------------------------------------------ // lvaSetVarLiveInOutOfHandler: Set the local varNum as being live in and/or out of a handler // // Arguments: // varNum - the varNum of the local // void Compiler::lvaSetVarLiveInOutOfHandler(unsigned varNum) { LclVarDsc* varDsc = lvaGetDesc(varNum); varDsc->lvLiveInOutOfHndlr = 1; if (varDsc->lvPromoted) { noway_assert(varTypeIsStruct(varDsc)); for (unsigned i = varDsc->lvFieldLclStart; i < varDsc->lvFieldLclStart + varDsc->lvFieldCnt; ++i) { noway_assert(lvaTable[i].lvIsStructField); lvaTable[i].lvLiveInOutOfHndlr = 1; // For now, only enregister an EH Var if it is a single def and whose refCnt > 1. if (!lvaEnregEHVars || !lvaTable[i].lvSingleDefRegCandidate || lvaTable[i].lvRefCnt() <= 1) { lvaSetVarDoNotEnregister(i DEBUGARG(DoNotEnregisterReason::LiveInOutOfHandler)); } } } // For now, only enregister an EH Var if it is a single def and whose refCnt > 1. if (!lvaEnregEHVars || !varDsc->lvSingleDefRegCandidate || varDsc->lvRefCnt() <= 1) { lvaSetVarDoNotEnregister(varNum DEBUGARG(DoNotEnregisterReason::LiveInOutOfHandler)); } #ifdef JIT32_GCENCODER else if (lvaKeepAliveAndReportThis() && (varNum == info.compThisArg)) { // For the JIT32_GCENCODER, when lvaKeepAliveAndReportThis is true, we must either keep the "this" pointer // in the same register for the entire method, or keep it on the stack. If it is EH-exposed, we can't ever // keep it in a register, since it must also be live on the stack. Therefore, we won't attempt to allocate it. lvaSetVarDoNotEnregister(varNum DEBUGARG(DoNotEnregisterReason::LiveInOutOfHandler)); } #endif // JIT32_GCENCODER } /***************************************************************************** * * Record that the local var "varNum" should not be enregistered (for one of several reasons.) */ void Compiler::lvaSetVarDoNotEnregister(unsigned varNum DEBUGARG(DoNotEnregisterReason reason)) { LclVarDsc* varDsc = lvaGetDesc(varNum); const bool wasAlreadyMarkedDoNotEnreg = (varDsc->lvDoNotEnregister == 1); varDsc->lvDoNotEnregister = 1; #ifdef DEBUG if (!wasAlreadyMarkedDoNotEnreg) { varDsc->SetDoNotEnregReason(reason); } if (verbose) { printf("\nLocal V%02u should not be enregistered because: ", varNum); } switch (reason) { case DoNotEnregisterReason::AddrExposed: JITDUMP("it is address exposed\n"); assert(varDsc->IsAddressExposed()); break; case DoNotEnregisterReason::DontEnregStructs: JITDUMP("struct enregistration is disabled\n"); assert(varTypeIsStruct(varDsc)); break; case DoNotEnregisterReason::NotRegSizeStruct: JITDUMP("struct size does not match reg size\n"); assert(varTypeIsStruct(varDsc)); break; case DoNotEnregisterReason::LocalField: JITDUMP("was accessed as a local field\n"); break; case DoNotEnregisterReason::VMNeedsStackAddr: JITDUMP("VM needs stack addr\n"); break; case DoNotEnregisterReason::LiveInOutOfHandler: JITDUMP("live in/out of a handler\n"); varDsc->lvLiveInOutOfHndlr = 1; break; case DoNotEnregisterReason::BlockOp: JITDUMP("written/read in a block op\n"); break; case DoNotEnregisterReason::IsStructArg: if (varTypeIsStruct(varDsc)) { JITDUMP("it is a struct arg\n"); } else { JITDUMP("it is reinterpreted as a struct arg\n"); } break; case DoNotEnregisterReason::DepField: JITDUMP("field of a dependently promoted struct\n"); assert(varDsc->lvIsStructField && (lvaGetParentPromotionType(varNum) != PROMOTION_TYPE_INDEPENDENT)); break; case DoNotEnregisterReason::NoRegVars: JITDUMP("opts.compFlags & CLFLG_REGVAR is not set\n"); assert(!compEnregLocals()); break; case DoNotEnregisterReason::MinOptsGC: JITDUMP("it is a GC Ref and we are compiling MinOpts\n"); assert(!JitConfig.JitMinOptsTrackGCrefs() && varTypeIsGC(varDsc->TypeGet())); break; #if !defined(TARGET_64BIT) case DoNotEnregisterReason::LongParamField: JITDUMP("it is a decomposed field of a long parameter\n"); break; #endif #ifdef JIT32_GCENCODER case DoNotEnregisterReason::PinningRef: JITDUMP("pinning ref\n"); assert(varDsc->lvPinned); break; #endif case DoNotEnregisterReason::LclAddrNode: JITDUMP("LclAddrVar/Fld takes the address of this node\n"); break; case DoNotEnregisterReason::CastTakesAddr: JITDUMP("cast takes addr\n"); break; case DoNotEnregisterReason::StoreBlkSrc: JITDUMP("the local is used as store block src\n"); break; case DoNotEnregisterReason::OneAsgRetyping: JITDUMP("OneAsg forbids enreg\n"); break; case DoNotEnregisterReason::SwizzleArg: JITDUMP("SwizzleArg\n"); break; case DoNotEnregisterReason::BlockOpRet: JITDUMP("return uses a block op\n"); break; case DoNotEnregisterReason::ReturnSpCheck: JITDUMP("Used for SP check\n"); break; case DoNotEnregisterReason::SimdUserForcesDep: JITDUMP("Promoted struct used by a SIMD/HWI node\n"); break; default: unreached(); break; } #endif } // Returns true if this local var is a multireg struct. // TODO-Throughput: This does a lookup on the class handle, and in the outgoing arg context // this information is already available on the fgArgTabEntry, and shouldn't need to be // recomputed. // bool Compiler::lvaIsMultiregStruct(LclVarDsc* varDsc, bool isVarArg) { if (varTypeIsStruct(varDsc->TypeGet())) { CORINFO_CLASS_HANDLE clsHnd = varDsc->GetStructHnd(); structPassingKind howToPassStruct; var_types type = getArgTypeForStruct(clsHnd, &howToPassStruct, isVarArg, varDsc->lvExactSize); if (howToPassStruct == SPK_ByValueAsHfa) { assert(type == TYP_STRUCT); return true; } #if defined(UNIX_AMD64_ABI) || defined(TARGET_ARM64) if (howToPassStruct == SPK_ByValue) { assert(type == TYP_STRUCT); return true; } #endif } return false; } /***************************************************************************** * Set the lvClass for a local variable of a struct type */ void Compiler::lvaSetStruct(unsigned varNum, CORINFO_CLASS_HANDLE typeHnd, bool unsafeValueClsCheck, bool setTypeInfo) { LclVarDsc* varDsc = lvaGetDesc(varNum); if (setTypeInfo) { varDsc->lvVerTypeInfo = typeInfo(TI_STRUCT, typeHnd); } // Set the type and associated info if we haven't already set it. if (varDsc->lvType == TYP_UNDEF) { varDsc->lvType = TYP_STRUCT; } if (varDsc->GetLayout() == nullptr) { ClassLayout* layout = typGetObjLayout(typeHnd); varDsc->SetLayout(layout); assert(varDsc->lvExactSize == 0); varDsc->lvExactSize = layout->GetSize(); assert(varDsc->lvExactSize != 0); if (layout->IsValueClass()) { CorInfoType simdBaseJitType = CORINFO_TYPE_UNDEF; varDsc->lvType = impNormStructType(typeHnd, &simdBaseJitType); #if defined(TARGET_AMD64) || defined(TARGET_ARM64) // Mark implicit byref struct parameters if (varDsc->lvIsParam && !varDsc->lvIsStructField) { structPassingKind howToReturnStruct; getArgTypeForStruct(typeHnd, &howToReturnStruct, this->info.compIsVarArgs, varDsc->lvExactSize); if (howToReturnStruct == SPK_ByReference) { JITDUMP("Marking V%02i as a byref parameter\n", varNum); varDsc->lvIsImplicitByRef = 1; } } #endif // defined(TARGET_AMD64) || defined(TARGET_ARM64) #if FEATURE_SIMD if (simdBaseJitType != CORINFO_TYPE_UNDEF) { assert(varTypeIsSIMD(varDsc)); varDsc->lvSIMDType = true; varDsc->SetSimdBaseJitType(simdBaseJitType); } #endif // FEATURE_SIMD if (GlobalJitOptions::compFeatureHfa) { // For structs that are small enough, we check and set HFA element type if (varDsc->lvExactSize <= MAX_PASS_MULTIREG_BYTES) { // hfaType is set to float, double or SIMD type if it is an HFA, otherwise TYP_UNDEF var_types hfaType = GetHfaType(typeHnd); if (varTypeIsValidHfaType(hfaType)) { varDsc->SetHfaType(hfaType); // hfa variables can never contain GC pointers assert(!layout->HasGCPtr()); // The size of this struct should be evenly divisible by 4 or 8 assert((varDsc->lvExactSize % genTypeSize(hfaType)) == 0); // The number of elements in the HFA should fit into our MAX_ARG_REG_COUNT limit assert((varDsc->lvExactSize / genTypeSize(hfaType)) <= MAX_ARG_REG_COUNT); } } } } } else { #if FEATURE_SIMD assert(!varTypeIsSIMD(varDsc) || (varDsc->GetSimdBaseType() != TYP_UNKNOWN)); #endif // FEATURE_SIMD ClassLayout* layout = typGetObjLayout(typeHnd); assert(ClassLayout::AreCompatible(varDsc->GetLayout(), layout)); // Inlining could replace a canon struct type with an exact one. varDsc->SetLayout(layout); assert(varDsc->lvExactSize != 0); } #ifndef TARGET_64BIT bool fDoubleAlignHint = false; #ifdef TARGET_X86 fDoubleAlignHint = true; #endif if (info.compCompHnd->getClassAlignmentRequirement(typeHnd, fDoubleAlignHint) == 8) { #ifdef DEBUG if (verbose) { printf("Marking struct in V%02i with double align flag\n", varNum); } #endif varDsc->lvStructDoubleAlign = 1; } #endif // not TARGET_64BIT unsigned classAttribs = info.compCompHnd->getClassAttribs(typeHnd); varDsc->lvOverlappingFields = StructHasOverlappingFields(classAttribs); // Check whether this local is an unsafe value type and requires GS cookie protection. // GS checks require the stack to be re-ordered, which can't be done with EnC. if (unsafeValueClsCheck && (classAttribs & CORINFO_FLG_UNSAFE_VALUECLASS) && !opts.compDbgEnC) { setNeedsGSSecurityCookie(); compGSReorderStackLayout = true; varDsc->lvIsUnsafeBuffer = true; } #ifdef DEBUG if (JitConfig.EnableExtraSuperPmiQueries()) { makeExtraStructQueries(typeHnd, 2); } #endif // DEBUG } #ifdef DEBUG //------------------------------------------------------------------------ // makeExtraStructQueries: Query the information for the given struct handle. // // Arguments: // structHandle -- The handle for the struct type we're querying. // level -- How many more levels to recurse. // void Compiler::makeExtraStructQueries(CORINFO_CLASS_HANDLE structHandle, int level) { if (level <= 0) { return; } assert(structHandle != NO_CLASS_HANDLE); (void)typGetObjLayout(structHandle); DWORD typeFlags = info.compCompHnd->getClassAttribs(structHandle); if (StructHasDontDigFieldsFlagSet(typeFlags)) { // In AOT ReadyToRun compilation, don't query fields of types // outside of the current version bubble. return; } unsigned fieldCnt = info.compCompHnd->getClassNumInstanceFields(structHandle); impNormStructType(structHandle); #ifdef TARGET_ARMARCH GetHfaType(structHandle); #endif for (unsigned int i = 0; i < fieldCnt; i++) { CORINFO_FIELD_HANDLE fieldHandle = info.compCompHnd->getFieldInClass(structHandle, i); unsigned fldOffset = info.compCompHnd->getFieldOffset(fieldHandle); CORINFO_CLASS_HANDLE fieldClassHandle = NO_CLASS_HANDLE; CorInfoType fieldCorType = info.compCompHnd->getFieldType(fieldHandle, &fieldClassHandle); var_types fieldVarType = JITtype2varType(fieldCorType); if (fieldClassHandle != NO_CLASS_HANDLE) { if (varTypeIsStruct(fieldVarType)) { makeExtraStructQueries(fieldClassHandle, level - 1); } } } } #endif // DEBUG //------------------------------------------------------------------------ // lvaSetStructUsedAsVarArg: update hfa information for vararg struct args // // Arguments: // varNum -- number of the variable // // Notes: // This only affects arm64 varargs on windows where we need to pass // hfa arguments as if they are not HFAs. // // This function should only be called if the struct is used in a varargs // method. void Compiler::lvaSetStructUsedAsVarArg(unsigned varNum) { if (GlobalJitOptions::compFeatureHfa && TargetOS::IsWindows) { #if defined(TARGET_ARM64) LclVarDsc* varDsc = lvaGetDesc(varNum); // For varargs methods incoming and outgoing arguments should not be treated // as HFA. varDsc->SetHfaType(TYP_UNDEF); #endif // defined(TARGET_ARM64) } } //------------------------------------------------------------------------ // lvaSetClass: set class information for a local var. // // Arguments: // varNum -- number of the variable // clsHnd -- class handle to use in set or update // isExact -- true if class is known exactly // // Notes: // varNum must not already have a ref class handle. void Compiler::lvaSetClass(unsigned varNum, CORINFO_CLASS_HANDLE clsHnd, bool isExact) { noway_assert(varNum < lvaCount); // If we are just importing, we cannot reliably track local ref types, // since the jit maps CORINFO_TYPE_VAR to TYP_REF. if (compIsForImportOnly()) { return; } // Else we should have a type handle. assert(clsHnd != nullptr); LclVarDsc* varDsc = lvaGetDesc(varNum); assert(varDsc->lvType == TYP_REF); // We shoud not have any ref type information for this var. assert(varDsc->lvClassHnd == NO_CLASS_HANDLE); assert(!varDsc->lvClassIsExact); JITDUMP("\nlvaSetClass: setting class for V%02i to (%p) %s %s\n", varNum, dspPtr(clsHnd), info.compCompHnd->getClassName(clsHnd), isExact ? " [exact]" : ""); varDsc->lvClassHnd = clsHnd; varDsc->lvClassIsExact = isExact; } //------------------------------------------------------------------------ // lvaSetClass: set class information for a local var from a tree or stack type // // Arguments: // varNum -- number of the variable. Must be a single def local // tree -- tree establishing the variable's value // stackHnd -- handle for the type from the evaluation stack // // Notes: // Preferentially uses the tree's type, when available. Since not all // tree kinds can track ref types, the stack type is used as a // fallback. If there is no stack type, then the class is set to object. void Compiler::lvaSetClass(unsigned varNum, GenTree* tree, CORINFO_CLASS_HANDLE stackHnd) { bool isExact = false; bool isNonNull = false; CORINFO_CLASS_HANDLE clsHnd = gtGetClassHandle(tree, &isExact, &isNonNull); if (clsHnd != nullptr) { lvaSetClass(varNum, clsHnd, isExact); } else if (stackHnd != nullptr) { lvaSetClass(varNum, stackHnd); } else { lvaSetClass(varNum, impGetObjectClass()); } } //------------------------------------------------------------------------ // lvaUpdateClass: update class information for a local var. // // Arguments: // varNum -- number of the variable // clsHnd -- class handle to use in set or update // isExact -- true if class is known exactly // // Notes: // // This method models the type update rule for an assignment. // // Updates currently should only happen for single-def user args or // locals, when we are processing the expression actually being // used to initialize the local (or inlined arg). The update will // change the local from the declared type to the type of the // initial value. // // These updates should always *improve* what we know about the // type, that is making an inexact type exact, or changing a type // to some subtype. However the jit lacks precise type information // for shared code, so ensuring this is so is currently not // possible. void Compiler::lvaUpdateClass(unsigned varNum, CORINFO_CLASS_HANDLE clsHnd, bool isExact) { assert(varNum < lvaCount); // If we are just importing, we cannot reliably track local ref types, // since the jit maps CORINFO_TYPE_VAR to TYP_REF. if (compIsForImportOnly()) { return; } // Else we should have a class handle to consider assert(clsHnd != nullptr); LclVarDsc* varDsc = lvaGetDesc(varNum); assert(varDsc->lvType == TYP_REF); // We should already have a class assert(varDsc->lvClassHnd != NO_CLASS_HANDLE); // We should only be updating classes for single-def locals. assert(varDsc->lvSingleDef); // Now see if we should update. // // New information may not always be "better" so do some // simple analysis to decide if the update is worthwhile. const bool isNewClass = (clsHnd != varDsc->lvClassHnd); bool shouldUpdate = false; // Are we attempting to update the class? Only check this when we have // an new type and the existing class is inexact... we should not be // updating exact classes. if (!varDsc->lvClassIsExact && isNewClass) { shouldUpdate = !!info.compCompHnd->isMoreSpecificType(varDsc->lvClassHnd, clsHnd); } // Else are we attempting to update exactness? else if (isExact && !varDsc->lvClassIsExact && !isNewClass) { shouldUpdate = true; } #if DEBUG if (isNewClass || (isExact != varDsc->lvClassIsExact)) { JITDUMP("\nlvaUpdateClass:%s Updating class for V%02u", shouldUpdate ? "" : " NOT", varNum); JITDUMP(" from (%p) %s%s", dspPtr(varDsc->lvClassHnd), info.compCompHnd->getClassName(varDsc->lvClassHnd), varDsc->lvClassIsExact ? " [exact]" : ""); JITDUMP(" to (%p) %s%s\n", dspPtr(clsHnd), info.compCompHnd->getClassName(clsHnd), isExact ? " [exact]" : ""); } #endif // DEBUG if (shouldUpdate) { varDsc->lvClassHnd = clsHnd; varDsc->lvClassIsExact = isExact; #if DEBUG // Note we've modified the type... varDsc->lvClassInfoUpdated = true; #endif // DEBUG } return; } //------------------------------------------------------------------------ // lvaUpdateClass: Uupdate class information for a local var from a tree // or stack type // // Arguments: // varNum -- number of the variable. Must be a single def local // tree -- tree establishing the variable's value // stackHnd -- handle for the type from the evaluation stack // // Notes: // Preferentially uses the tree's type, when available. Since not all // tree kinds can track ref types, the stack type is used as a // fallback. void Compiler::lvaUpdateClass(unsigned varNum, GenTree* tree, CORINFO_CLASS_HANDLE stackHnd) { bool isExact = false; bool isNonNull = false; CORINFO_CLASS_HANDLE clsHnd = gtGetClassHandle(tree, &isExact, &isNonNull); if (clsHnd != nullptr) { lvaUpdateClass(varNum, clsHnd, isExact); } else if (stackHnd != nullptr) { lvaUpdateClass(varNum, stackHnd); } } //------------------------------------------------------------------------ // lvaLclSize: returns size of a local variable, in bytes // // Arguments: // varNum -- variable to query // // Returns: // Number of bytes needed on the frame for such a local. unsigned Compiler::lvaLclSize(unsigned varNum) { assert(varNum < lvaCount); var_types varType = lvaTable[varNum].TypeGet(); switch (varType) { case TYP_STRUCT: case TYP_BLK: return lvaTable[varNum].lvSize(); case TYP_LCLBLK: #if FEATURE_FIXED_OUT_ARGS // Note that this operation performs a read of a PhasedVar noway_assert(varNum == lvaOutgoingArgSpaceVar); return lvaOutgoingArgSpaceSize; #else // FEATURE_FIXED_OUT_ARGS assert(!"Unknown size"); NO_WAY("Target doesn't support TYP_LCLBLK"); #endif // FEATURE_FIXED_OUT_ARGS default: // This must be a primitive var. Fall out of switch statement break; } #ifdef TARGET_64BIT // We only need this Quirk for TARGET_64BIT if (lvaTable[varNum].lvQuirkToLong) { noway_assert(lvaTable[varNum].IsAddressExposed()); return genTypeStSz(TYP_LONG) * sizeof(int); // return 8 (2 * 4) } #endif return genTypeStSz(varType) * sizeof(int); } // // Return the exact width of local variable "varNum" -- the number of bytes // you'd need to copy in order to overwrite the value. // unsigned Compiler::lvaLclExactSize(unsigned varNum) { assert(varNum < lvaCount); var_types varType = lvaTable[varNum].TypeGet(); switch (varType) { case TYP_STRUCT: case TYP_BLK: return lvaTable[varNum].lvExactSize; case TYP_LCLBLK: #if FEATURE_FIXED_OUT_ARGS // Note that this operation performs a read of a PhasedVar noway_assert(lvaOutgoingArgSpaceSize >= 0); noway_assert(varNum == lvaOutgoingArgSpaceVar); return lvaOutgoingArgSpaceSize; #else // FEATURE_FIXED_OUT_ARGS assert(!"Unknown size"); NO_WAY("Target doesn't support TYP_LCLBLK"); #endif // FEATURE_FIXED_OUT_ARGS default: // This must be a primitive var. Fall out of switch statement break; } return genTypeSize(varType); } // getCalledCount -- get the value used to normalized weights for this method // if we don't have profile data then getCalledCount will return BB_UNITY_WEIGHT (100) // otherwise it returns the number of times that profile data says the method was called. // // static weight_t BasicBlock::getCalledCount(Compiler* comp) { // when we don't have profile data then fgCalledCount will be BB_UNITY_WEIGHT (100) weight_t calledCount = comp->fgCalledCount; // If we haven't yet reach the place where we setup fgCalledCount it could still be zero // so return a reasonable value to use until we set it. // if (calledCount == 0) { if (comp->fgIsUsingProfileWeights()) { // When we use profile data block counts we have exact counts, // not multiples of BB_UNITY_WEIGHT (100) calledCount = 1; } else { calledCount = comp->fgFirstBB->bbWeight; if (calledCount == 0) { calledCount = BB_UNITY_WEIGHT; } } } return calledCount; } // getBBWeight -- get the normalized weight of this block weight_t BasicBlock::getBBWeight(Compiler* comp) { if (this->bbWeight == BB_ZERO_WEIGHT) { return BB_ZERO_WEIGHT; } else { weight_t calledCount = getCalledCount(comp); // Normalize the bbWeights by multiplying by BB_UNITY_WEIGHT and dividing by the calledCount. // weight_t fullResult = this->bbWeight * BB_UNITY_WEIGHT / calledCount; return fullResult; } } // LclVarDsc "less" comparer used to compare the weight of two locals, when optimizing for small code. class LclVarDsc_SmallCode_Less { const LclVarDsc* m_lvaTable; INDEBUG(unsigned m_lvaCount;) public: LclVarDsc_SmallCode_Less(const LclVarDsc* lvaTable DEBUGARG(unsigned lvaCount)) : m_lvaTable(lvaTable) #ifdef DEBUG , m_lvaCount(lvaCount) #endif { } bool operator()(unsigned n1, unsigned n2) { assert(n1 < m_lvaCount); assert(n2 < m_lvaCount); const LclVarDsc* dsc1 = &m_lvaTable[n1]; const LclVarDsc* dsc2 = &m_lvaTable[n2]; // We should not be sorting untracked variables assert(dsc1->lvTracked); assert(dsc2->lvTracked); // We should not be sorting after registers have been allocated assert(!dsc1->lvRegister); assert(!dsc2->lvRegister); unsigned weight1 = dsc1->lvRefCnt(); unsigned weight2 = dsc2->lvRefCnt(); #ifndef TARGET_ARM // ARM-TODO: this was disabled for ARM under !FEATURE_FP_REGALLOC; it was probably a left-over from // legacy backend. It should be enabled and verified. // Force integer candidates to sort above float candidates. const bool isFloat1 = isFloatRegType(dsc1->lvType); const bool isFloat2 = isFloatRegType(dsc2->lvType); if (isFloat1 != isFloat2) { if ((weight2 != 0) && isFloat1) { return false; } if ((weight1 != 0) && isFloat2) { return true; } } #endif if (weight1 != weight2) { return weight1 > weight2; } // If the weighted ref counts are different then use their difference. if (dsc1->lvRefCntWtd() != dsc2->lvRefCntWtd()) { return dsc1->lvRefCntWtd() > dsc2->lvRefCntWtd(); } // We have equal ref counts and weighted ref counts. // Break the tie by: // - Increasing the weight by 2 if we are a register arg. // - Increasing the weight by 0.5 if we are a GC type. // // Review: seems odd that this is mixing counts and weights. if (weight1 != 0) { if (dsc1->lvIsRegArg) { weight1 += 2 * BB_UNITY_WEIGHT_UNSIGNED; } if (varTypeIsGC(dsc1->TypeGet())) { weight1 += BB_UNITY_WEIGHT_UNSIGNED / 2; } } if (weight2 != 0) { if (dsc2->lvIsRegArg) { weight2 += 2 * BB_UNITY_WEIGHT_UNSIGNED; } if (varTypeIsGC(dsc2->TypeGet())) { weight2 += BB_UNITY_WEIGHT_UNSIGNED / 2; } } if (weight1 != weight2) { return weight1 > weight2; } // To achieve a stable sort we use the LclNum (by way of the pointer address). return dsc1 < dsc2; } }; // LclVarDsc "less" comparer used to compare the weight of two locals, when optimizing for blended code. class LclVarDsc_BlendedCode_Less { const LclVarDsc* m_lvaTable; INDEBUG(unsigned m_lvaCount;) public: LclVarDsc_BlendedCode_Less(const LclVarDsc* lvaTable DEBUGARG(unsigned lvaCount)) : m_lvaTable(lvaTable) #ifdef DEBUG , m_lvaCount(lvaCount) #endif { } bool operator()(unsigned n1, unsigned n2) { assert(n1 < m_lvaCount); assert(n2 < m_lvaCount); const LclVarDsc* dsc1 = &m_lvaTable[n1]; const LclVarDsc* dsc2 = &m_lvaTable[n2]; // We should not be sorting untracked variables assert(dsc1->lvTracked); assert(dsc2->lvTracked); // We should not be sorting after registers have been allocated assert(!dsc1->lvRegister); assert(!dsc2->lvRegister); weight_t weight1 = dsc1->lvRefCntWtd(); weight_t weight2 = dsc2->lvRefCntWtd(); #ifndef TARGET_ARM // ARM-TODO: this was disabled for ARM under !FEATURE_FP_REGALLOC; it was probably a left-over from // legacy backend. It should be enabled and verified. // Force integer candidates to sort above float candidates. const bool isFloat1 = isFloatRegType(dsc1->lvType); const bool isFloat2 = isFloatRegType(dsc2->lvType); if (isFloat1 != isFloat2) { if (!Compiler::fgProfileWeightsEqual(weight2, 0) && isFloat1) { return false; } if (!Compiler::fgProfileWeightsEqual(weight1, 0) && isFloat2) { return true; } } #endif if (!Compiler::fgProfileWeightsEqual(weight1, 0) && dsc1->lvIsRegArg) { weight1 += 2 * BB_UNITY_WEIGHT; } if (!Compiler::fgProfileWeightsEqual(weight2, 0) && dsc2->lvIsRegArg) { weight2 += 2 * BB_UNITY_WEIGHT; } if (!Compiler::fgProfileWeightsEqual(weight1, weight2)) { return weight1 > weight2; } // If the weighted ref counts are different then try the unweighted ref counts. if (dsc1->lvRefCnt() != dsc2->lvRefCnt()) { return dsc1->lvRefCnt() > dsc2->lvRefCnt(); } // If one is a GC type and the other is not the GC type wins. if (varTypeIsGC(dsc1->TypeGet()) != varTypeIsGC(dsc2->TypeGet())) { return varTypeIsGC(dsc1->TypeGet()); } // To achieve a stable sort we use the LclNum (by way of the pointer address). return dsc1 < dsc2; } }; /***************************************************************************** * * Sort the local variable table by refcount and assign tracking indices. */ void Compiler::lvaSortByRefCount() { lvaTrackedCount = 0; lvaTrackedCountInSizeTUnits = 0; #ifdef DEBUG VarSetOps::AssignNoCopy(this, lvaTrackedVars, VarSetOps::MakeEmpty(this)); #endif if (lvaCount == 0) { return; } /* We'll sort the variables by ref count - allocate the sorted table */ if (lvaTrackedToVarNumSize < lvaCount) { lvaTrackedToVarNumSize = lvaCount; lvaTrackedToVarNum = new (getAllocator(CMK_LvaTable)) unsigned[lvaTrackedToVarNumSize]; } unsigned trackedCount = 0; unsigned* tracked = lvaTrackedToVarNum; // Fill in the table used for sorting for (unsigned lclNum = 0; lclNum < lvaCount; lclNum++) { LclVarDsc* varDsc = lvaGetDesc(lclNum); // Start by assuming that the variable will be tracked. varDsc->lvTracked = 1; if (varDsc->lvRefCnt() == 0) { // Zero ref count, make this untracked. varDsc->lvTracked = 0; varDsc->setLvRefCntWtd(0); } #if !defined(TARGET_64BIT) if (varTypeIsLong(varDsc) && varDsc->lvPromoted) { varDsc->lvTracked = 0; } #endif // !defined(TARGET_64BIT) // Variables that are address-exposed, and all struct locals, are never enregistered, or tracked. // (The struct may be promoted, and its field variables enregistered/tracked, or the VM may "normalize" // its type so that its not seen by the JIT as a struct.) // Pinned variables may not be tracked (a condition of the GCInfo representation) // or enregistered, on x86 -- it is believed that we can enregister pinned (more properly, "pinning") // references when using the general GC encoding. if (varDsc->IsAddressExposed()) { varDsc->lvTracked = 0; assert(varDsc->lvType != TYP_STRUCT || varDsc->lvDoNotEnregister); // For structs, should have set this when we set m_addrExposed. } if (varTypeIsStruct(varDsc)) { // Promoted structs will never be considered for enregistration anyway, // and the DoNotEnregister flag was used to indicate whether promotion was // independent or dependent. if (varDsc->lvPromoted) { varDsc->lvTracked = 0; } else if (!varDsc->IsEnregisterableType()) { lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::NotRegSizeStruct)); } else if (varDsc->lvType == TYP_STRUCT) { if (!varDsc->lvRegStruct && !compEnregStructLocals()) { lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::DontEnregStructs)); } else if (varDsc->lvIsMultiRegArgOrRet()) { // Prolog and return generators do not support SIMD<->general register moves. lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::IsStructArg)); } #if defined(TARGET_ARM) else if (varDsc->lvIsParam) { // On arm we prespill all struct args, // TODO-Arm-CQ: keep them in registers, it will need a fix // to "On the ARM we will spill any incoming struct args" logic in codegencommon. lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::IsStructArg)); } #endif // TARGET_ARM } } if (varDsc->lvIsStructField && (lvaGetParentPromotionType(lclNum) != PROMOTION_TYPE_INDEPENDENT)) { lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::DepField)); } if (varDsc->lvPinned) { varDsc->lvTracked = 0; #ifdef JIT32_GCENCODER lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::PinningRef)); #endif } if (opts.MinOpts() && !JitConfig.JitMinOptsTrackGCrefs() && varTypeIsGC(varDsc->TypeGet())) { varDsc->lvTracked = 0; lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::MinOptsGC)); } if (!compEnregLocals()) { lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::NoRegVars)); } #if defined(JIT32_GCENCODER) && defined(FEATURE_EH_FUNCLETS) if (lvaIsOriginalThisArg(lclNum) && (info.compMethodInfo->options & CORINFO_GENERICS_CTXT_FROM_THIS) != 0) { // For x86/Linux, we need to track "this". // However we cannot have it in tracked variables, so we set "this" pointer always untracked varDsc->lvTracked = 0; } #endif // Are we not optimizing and we have exception handlers? // if so mark all args and locals "do not enregister". // if (opts.MinOpts() && compHndBBtabCount > 0) { lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::LiveInOutOfHandler)); } else { var_types type = genActualType(varDsc->TypeGet()); switch (type) { case TYP_FLOAT: case TYP_DOUBLE: case TYP_INT: case TYP_LONG: case TYP_REF: case TYP_BYREF: #ifdef FEATURE_SIMD case TYP_SIMD8: case TYP_SIMD12: case TYP_SIMD16: case TYP_SIMD32: #endif // FEATURE_SIMD case TYP_STRUCT: break; case TYP_UNDEF: case TYP_UNKNOWN: noway_assert(!"lvType not set correctly"); varDsc->lvType = TYP_INT; FALLTHROUGH; default: varDsc->lvTracked = 0; } } if (varDsc->lvTracked) { tracked[trackedCount++] = lclNum; } } // Now sort the tracked variable table by ref-count if (compCodeOpt() == SMALL_CODE) { jitstd::sort(tracked, tracked + trackedCount, LclVarDsc_SmallCode_Less(lvaTable DEBUGARG(lvaCount))); } else { jitstd::sort(tracked, tracked + trackedCount, LclVarDsc_BlendedCode_Less(lvaTable DEBUGARG(lvaCount))); } lvaTrackedCount = min((unsigned)JitConfig.JitMaxLocalsToTrack(), trackedCount); JITDUMP("Tracked variable (%u out of %u) table:\n", lvaTrackedCount, lvaCount); // Assign indices to all the variables we've decided to track for (unsigned varIndex = 0; varIndex < lvaTrackedCount; varIndex++) { LclVarDsc* varDsc = lvaGetDesc(tracked[varIndex]); assert(varDsc->lvTracked); varDsc->lvVarIndex = static_cast<unsigned short>(varIndex); INDEBUG(if (verbose) { gtDispLclVar(tracked[varIndex]); }) JITDUMP(" [%6s]: refCnt = %4u, refCntWtd = %6s\n", varTypeName(varDsc->TypeGet()), varDsc->lvRefCnt(), refCntWtd2str(varDsc->lvRefCntWtd())); } JITDUMP("\n"); // Mark all variables past the first 'lclMAX_TRACKED' as untracked for (unsigned varIndex = lvaTrackedCount; varIndex < trackedCount; varIndex++) { LclVarDsc* varDsc = lvaGetDesc(tracked[varIndex]); assert(varDsc->lvTracked); varDsc->lvTracked = 0; } // We have a new epoch, and also cache the tracked var count in terms of size_t's sufficient to hold that many bits. lvaCurEpoch++; lvaTrackedCountInSizeTUnits = roundUp((unsigned)lvaTrackedCount, (unsigned)(sizeof(size_t) * 8)) / unsigned(sizeof(size_t) * 8); #ifdef DEBUG VarSetOps::AssignNoCopy(this, lvaTrackedVars, VarSetOps::MakeFull(this)); #endif } /***************************************************************************** * * This is called by lvaMarkLclRefs to disqualify a variable from being * considered by optAddCopies() */ void LclVarDsc::lvaDisqualifyVar() { this->lvDisqualify = true; this->lvSingleDef = false; this->lvDefStmt = nullptr; } #ifdef FEATURE_SIMD var_types LclVarDsc::GetSimdBaseType() const { CorInfoType simdBaseJitType = GetSimdBaseJitType(); if (simdBaseJitType == CORINFO_TYPE_UNDEF) { return TYP_UNKNOWN; } return JitType2PreciseVarType(simdBaseJitType); } #endif // FEATURE_SIMD unsigned LclVarDsc::lvSize() const // Size needed for storage representation. Only used for structs or TYP_BLK. { // TODO-Review: Sometimes we get called on ARM with HFA struct variables that have been promoted, // where the struct itself is no longer used because all access is via its member fields. // When that happens, the struct is marked as unused and its type has been changed to // TYP_INT (to keep the GC tracking code from looking at it). // See Compiler::raAssignVars() for details. For example: // N002 ( 4, 3) [00EA067C] ------------- return struct $346 // N001 ( 3, 2) [00EA0628] ------------- lclVar struct(U) V03 loc2 // float V03.f1 (offs=0x00) -> V12 tmp7 // f8 (last use) (last use) $345 // Here, the "struct(U)" shows that the "V03 loc2" variable is unused. Not shown is that V03 // is now TYP_INT in the local variable table. It's not really unused, because it's in the tree. assert(varTypeIsStruct(lvType) || (lvType == TYP_BLK) || (lvPromoted && lvUnusedStruct)); if (lvIsParam) { assert(varTypeIsStruct(lvType)); const bool isFloatHfa = (lvIsHfa() && (GetHfaType() == TYP_FLOAT)); const unsigned argSizeAlignment = Compiler::eeGetArgSizeAlignment(lvType, isFloatHfa); return roundUp(lvExactSize, argSizeAlignment); } #if defined(FEATURE_SIMD) && !defined(TARGET_64BIT) // For 32-bit architectures, we make local variable SIMD12 types 16 bytes instead of just 12. We can't do // this for arguments, which must be passed according the defined ABI. We don't want to do this for // dependently promoted struct fields, but we don't know that here. See lvaMapSimd12ToSimd16(). // (Note that for 64-bits, we are already rounding up to 16.) if (lvType == TYP_SIMD12) { assert(!lvIsParam); assert(lvExactSize == 12); return 16; } #endif // defined(FEATURE_SIMD) && !defined(TARGET_64BIT) return roundUp(lvExactSize, TARGET_POINTER_SIZE); } /********************************************************************************** * Get stack size of the varDsc. */ size_t LclVarDsc::lvArgStackSize() const { // Make sure this will have a stack size assert(!this->lvIsRegArg); size_t stackSize = 0; if (varTypeIsStruct(this)) { #if defined(WINDOWS_AMD64_ABI) // Structs are either passed by reference or can be passed by value using one pointer stackSize = TARGET_POINTER_SIZE; #elif defined(TARGET_ARM64) || defined(UNIX_AMD64_ABI) // lvSize performs a roundup. stackSize = this->lvSize(); #if defined(TARGET_ARM64) if ((stackSize > TARGET_POINTER_SIZE * 2) && (!this->lvIsHfa())) { // If the size is greater than 16 bytes then it will // be passed by reference. stackSize = TARGET_POINTER_SIZE; } #endif // defined(TARGET_ARM64) #else // !TARGET_ARM64 !WINDOWS_AMD64_ABI !UNIX_AMD64_ABI NYI("Unsupported target."); unreached(); #endif // !TARGET_ARM64 !WINDOWS_AMD64_ABI !UNIX_AMD64_ABI } else { stackSize = TARGET_POINTER_SIZE; } return stackSize; } //------------------------------------------------------------------------ // GetRegisterType: Determine register type for this local var. // // Arguments: // tree - node that uses the local, its type is checked first. // // Return Value: // TYP_UNDEF if the layout is not enregistrable, the register type otherwise. // var_types LclVarDsc::GetRegisterType(const GenTreeLclVarCommon* tree) const { var_types targetType = tree->gtType; var_types lclVarType = TypeGet(); if (targetType == TYP_STRUCT) { if (lclVarType == TYP_STRUCT) { assert(!tree->OperIsLocalField() && "do not expect struct local fields."); lclVarType = GetLayout()->GetRegisterType(); } targetType = lclVarType; } #ifdef DEBUG if ((targetType != TYP_UNDEF) && tree->OperIs(GT_STORE_LCL_VAR) && lvNormalizeOnStore()) { const bool phiStore = (tree->gtGetOp1()->OperIsNonPhiLocal() == false); // Ensure that the lclVar node is typed correctly, // does not apply to phi-stores because they do not produce code in the merge block. assert(phiStore || targetType == genActualType(lclVarType)); } #endif return targetType; } //------------------------------------------------------------------------ // GetRegisterType: Determine register type for this local var. // // Return Value: // TYP_UNDEF if the layout is not enregistrable, the register type otherwise. // var_types LclVarDsc::GetRegisterType() const { if (TypeGet() != TYP_STRUCT) { #if !defined(TARGET_64BIT) if (TypeGet() == TYP_LONG) { return TYP_UNDEF; } #endif return TypeGet(); } assert(m_layout != nullptr); return m_layout->GetRegisterType(); } //------------------------------------------------------------------------ // GetActualRegisterType: Determine an actual register type for this local var. // // Return Value: // TYP_UNDEF if the layout is not enregistrable, the register type otherwise. // var_types LclVarDsc::GetActualRegisterType() const { return genActualType(GetRegisterType()); } //---------------------------------------------------------------------------------------------- // CanBeReplacedWithItsField: check if a whole struct reference could be replaced by a field. // // Arguments: // comp - the compiler instance; // // Return Value: // true if that can be replaced, false otherwise. // // Notes: // The replacement can be made only for independently promoted structs // with 1 field without holes. // bool LclVarDsc::CanBeReplacedWithItsField(Compiler* comp) const { if (!lvPromoted) { return false; } if (comp->lvaGetPromotionType(this) != Compiler::PROMOTION_TYPE_INDEPENDENT) { return false; } if (lvFieldCnt != 1) { return false; } if (lvContainsHoles) { return false; } #if defined(FEATURE_SIMD) // If we return `struct A { SIMD16 a; }` we split the struct into several fields. // In order to do that we have to have its field `a` in memory. Right now lowering cannot // handle RETURN struct(multiple registers)->SIMD16(one register), but it can be improved. LclVarDsc* fieldDsc = comp->lvaGetDesc(lvFieldLclStart); if (varTypeIsSIMD(fieldDsc)) { return false; } #endif // FEATURE_SIMD return true; } //------------------------------------------------------------------------ // lvaMarkLclRefs: increment local var references counts and more // // Arguments: // tree - some node in a tree // block - block that the tree node belongs to // stmt - stmt that the tree node belongs to // isRecompute - true if we should just recompute counts // // Notes: // Invoked via the MarkLocalVarsVisitor // // Primarily increments the regular and weighted local var ref // counts for any local referred to directly by tree. // // Also: // // Accounts for implicit references to frame list root for // pinvokes that will be expanded later. // // Determines if locals of TYP_BOOL can safely be considered // to hold only 0 or 1 or may have a broader range of true values. // // Does some setup work for assertion prop, noting locals that are // eligible for assertion prop, single defs, and tracking which blocks // hold uses. // // Looks for uses of generic context and sets lvaGenericsContextInUse. // // In checked builds: // // Verifies that local accesses are consistenly typed. // Verifies that casts remain in bounds. void Compiler::lvaMarkLclRefs(GenTree* tree, BasicBlock* block, Statement* stmt, bool isRecompute) { const weight_t weight = block->getBBWeight(this); /* Is this a call to unmanaged code ? */ if (tree->IsCall() && compMethodRequiresPInvokeFrame()) { assert((!opts.ShouldUsePInvokeHelpers()) || (info.compLvFrameListRoot == BAD_VAR_NUM)); if (!opts.ShouldUsePInvokeHelpers()) { /* Get the special variable descriptor */ LclVarDsc* varDsc = lvaGetDesc(info.compLvFrameListRoot); /* Increment the ref counts twice */ varDsc->incRefCnts(weight, this); varDsc->incRefCnts(weight, this); } } if (!isRecompute) { /* Is this an assigment? */ if (tree->OperIs(GT_ASG)) { GenTree* op1 = tree->AsOp()->gtOp1; GenTree* op2 = tree->AsOp()->gtOp2; /* Is this an assignment to a local variable? */ if (op1->gtOper == GT_LCL_VAR && op2->gtType != TYP_BOOL) { /* Only simple assignments allowed for booleans */ if (tree->gtOper != GT_ASG) { goto NOT_BOOL; } /* Is the RHS clearly a boolean value? */ switch (op2->gtOper) { unsigned lclNum; case GT_CNS_INT: if (op2->AsIntCon()->gtIconVal == 0) { break; } if (op2->AsIntCon()->gtIconVal == 1) { break; } // Not 0 or 1, fall through .... FALLTHROUGH; default: if (op2->OperIsCompare()) { break; } NOT_BOOL: lclNum = op1->AsLclVarCommon()->GetLclNum(); noway_assert(lclNum < lvaCount); lvaTable[lclNum].lvIsBoolean = false; break; } } } } if (tree->OperIsLocalAddr()) { LclVarDsc* varDsc = lvaGetDesc(tree->AsLclVarCommon()); assert(varDsc->IsAddressExposed()); varDsc->incRefCnts(weight, this); return; } if ((tree->gtOper != GT_LCL_VAR) && (tree->gtOper != GT_LCL_FLD)) { return; } /* This must be a local variable reference */ // See if this is a generics context use. if ((tree->gtFlags & GTF_VAR_CONTEXT) != 0) { assert(tree->OperIs(GT_LCL_VAR)); if (!lvaGenericsContextInUse) { JITDUMP("-- generic context in use at [%06u]\n", dspTreeID(tree)); lvaGenericsContextInUse = true; } } assert((tree->gtOper == GT_LCL_VAR) || (tree->gtOper == GT_LCL_FLD)); unsigned lclNum = tree->AsLclVarCommon()->GetLclNum(); LclVarDsc* varDsc = lvaGetDesc(lclNum); /* Increment the reference counts */ varDsc->incRefCnts(weight, this); #ifdef DEBUG if (varDsc->lvIsStructField) { // If ref count was increased for struct field, ensure that the // parent struct is still promoted. LclVarDsc* parentStruct = lvaGetDesc(varDsc->lvParentLcl); assert(!parentStruct->lvUndoneStructPromotion); } #endif if (!isRecompute) { if (lvaVarAddrExposed(lclNum)) { varDsc->lvIsBoolean = false; } if (tree->gtOper == GT_LCL_FLD) { // variables that have uses inside a GT_LCL_FLD // cause problems, so we will disqualify them here varDsc->lvaDisqualifyVar(); return; } if (fgDomsComputed && IsDominatedByExceptionalEntry(block)) { SetVolatileHint(varDsc); } /* Record if the variable has a single def or not */ if (!varDsc->lvDisqualify) // If this variable is already disqualified, we can skip this { if (tree->gtFlags & GTF_VAR_DEF) // Is this is a def of our variable { /* If we have one of these cases: 1. We have already seen a definition (i.e lvSingleDef is true) 2. or info.CompInitMem is true (thus this would be the second definition) 3. or we have an assignment inside QMARK-COLON trees 4. or we have an update form of assignment (i.e. +=, -=, *=) Then we must disqualify this variable for use in optAddCopies() Note that all parameters start out with lvSingleDef set to true */ if ((varDsc->lvSingleDef == true) || (info.compInitMem == true) || (tree->gtFlags & GTF_COLON_COND) || (tree->gtFlags & GTF_VAR_USEASG)) { varDsc->lvaDisqualifyVar(); } else { varDsc->lvSingleDef = true; varDsc->lvDefStmt = stmt; } } else // otherwise this is a ref of our variable { if (BlockSetOps::MayBeUninit(varDsc->lvRefBlks)) { // Lazy initialization BlockSetOps::AssignNoCopy(this, varDsc->lvRefBlks, BlockSetOps::MakeEmpty(this)); } BlockSetOps::AddElemD(this, varDsc->lvRefBlks, block->bbNum); } } if (!varDsc->lvDisqualifySingleDefRegCandidate) // If this var is already disqualified, we can skip this { if (tree->gtFlags & GTF_VAR_DEF) // Is this is a def of our variable { bool bbInALoop = (block->bbFlags & BBF_BACKWARD_JUMP) != 0; bool bbIsReturn = block->bbJumpKind == BBJ_RETURN; // TODO: Zero-inits in LSRA are created with below condition. But if filter out based on that condition // we filter lot of interesting variables that would benefit otherwise with EH var enregistration. // bool needsExplicitZeroInit = !varDsc->lvIsParam && (info.compInitMem || // varTypeIsGC(varDsc->TypeGet())); bool needsExplicitZeroInit = fgVarNeedsExplicitZeroInit(lclNum, bbInALoop, bbIsReturn); if (varDsc->lvSingleDefRegCandidate || needsExplicitZeroInit) { #ifdef DEBUG if (needsExplicitZeroInit) { varDsc->lvSingleDefDisqualifyReason = 'Z'; JITDUMP("V%02u needs explicit zero init. Disqualified as a single-def register candidate.\n", lclNum); } else { varDsc->lvSingleDefDisqualifyReason = 'M'; JITDUMP("V%02u has multiple definitions. Disqualified as a single-def register candidate.\n", lclNum); } #endif // DEBUG varDsc->lvSingleDefRegCandidate = false; varDsc->lvDisqualifySingleDefRegCandidate = true; } else { #if FEATURE_PARTIAL_SIMD_CALLEE_SAVE // TODO-CQ: If the varType needs partial callee save, conservatively do not enregister // such variable. In future, need to enable enregisteration for such variables. if (!varTypeNeedsPartialCalleeSave(varDsc->GetRegisterType())) #endif { varDsc->lvSingleDefRegCandidate = true; JITDUMP("Marking EH Var V%02u as a register candidate.\n", lclNum); } } } } bool allowStructs = false; #ifdef UNIX_AMD64_ABI // On System V the type of the var could be a struct type. allowStructs = varTypeIsStruct(varDsc); #endif // UNIX_AMD64_ABI /* Variables must be used as the same type throughout the method */ noway_assert(varDsc->lvType == TYP_UNDEF || tree->gtType == TYP_UNKNOWN || allowStructs || genActualType(varDsc->TypeGet()) == genActualType(tree->gtType) || (tree->gtType == TYP_BYREF && varDsc->TypeGet() == TYP_I_IMPL) || (tree->gtType == TYP_I_IMPL && varDsc->TypeGet() == TYP_BYREF) || (tree->gtFlags & GTF_VAR_CAST) || (varTypeIsFloating(varDsc) && varTypeIsFloating(tree)) || (varTypeIsStruct(varDsc) == varTypeIsStruct(tree))); /* Remember the type of the reference */ if (tree->gtType == TYP_UNKNOWN || varDsc->lvType == TYP_UNDEF) { varDsc->lvType = tree->gtType; noway_assert(genActualType(varDsc->TypeGet()) == tree->gtType); // no truncation } #ifdef DEBUG if (tree->gtFlags & GTF_VAR_CAST) { // it should never be bigger than the variable slot // Trees don't store the full information about structs // so we can't check them. if (tree->TypeGet() != TYP_STRUCT) { unsigned treeSize = genTypeSize(tree->TypeGet()); unsigned varSize = genTypeSize(varDsc->TypeGet()); if (varDsc->TypeGet() == TYP_STRUCT) { varSize = varDsc->lvSize(); } assert(treeSize <= varSize); } } #endif } } //------------------------------------------------------------------------ // IsDominatedByExceptionalEntry: Check is the block dominated by an exception entry block. // // Arguments: // block - the checking block. // bool Compiler::IsDominatedByExceptionalEntry(BasicBlock* block) { assert(fgDomsComputed); return block->IsDominatedByExceptionalEntryFlag(); } //------------------------------------------------------------------------ // SetVolatileHint: Set a local var's volatile hint. // // Arguments: // varDsc - the local variable that needs the hint. // void Compiler::SetVolatileHint(LclVarDsc* varDsc) { varDsc->lvVolatileHint = true; } //------------------------------------------------------------------------ // lvaMarkLocalVars: update local var ref counts for IR in a basic block // // Arguments: // block - the block in question // isRecompute - true if counts are being recomputed // // Notes: // Invokes lvaMarkLclRefs on each tree node for each // statement in the block. void Compiler::lvaMarkLocalVars(BasicBlock* block, bool isRecompute) { class MarkLocalVarsVisitor final : public GenTreeVisitor<MarkLocalVarsVisitor> { private: BasicBlock* m_block; Statement* m_stmt; bool m_isRecompute; public: enum { DoPreOrder = true, }; MarkLocalVarsVisitor(Compiler* compiler, BasicBlock* block, Statement* stmt, bool isRecompute) : GenTreeVisitor<MarkLocalVarsVisitor>(compiler), m_block(block), m_stmt(stmt), m_isRecompute(isRecompute) { } Compiler::fgWalkResult PreOrderVisit(GenTree** use, GenTree* user) { // TODO: Stop passing isRecompute once we are sure that this assert is never hit. assert(!m_isRecompute); m_compiler->lvaMarkLclRefs(*use, m_block, m_stmt, m_isRecompute); return WALK_CONTINUE; } }; JITDUMP("\n*** %s local variables in block " FMT_BB " (weight=%s)\n", isRecompute ? "recomputing" : "marking", block->bbNum, refCntWtd2str(block->getBBWeight(this))); for (Statement* const stmt : block->NonPhiStatements()) { MarkLocalVarsVisitor visitor(this, block, stmt, isRecompute); DISPSTMT(stmt); visitor.WalkTree(stmt->GetRootNodePointer(), nullptr); } } //------------------------------------------------------------------------ // lvaMarkLocalVars: enable normal ref counting, compute initial counts, sort locals table // // Notes: // Now behaves differently in minopts / debug. Instead of actually inspecting // the IR and counting references, the jit assumes all locals are referenced // and does not sort the locals table. // // Also, when optimizing, lays the groundwork for assertion prop and more. // See details in lvaMarkLclRefs. void Compiler::lvaMarkLocalVars() { JITDUMP("\n*************** In lvaMarkLocalVars()"); // If we have direct pinvokes, verify the frame list root local was set up properly if (compMethodRequiresPInvokeFrame()) { assert((!opts.ShouldUsePInvokeHelpers()) || (info.compLvFrameListRoot == BAD_VAR_NUM)); if (!opts.ShouldUsePInvokeHelpers()) { noway_assert(info.compLvFrameListRoot >= info.compLocalsCount && info.compLvFrameListRoot < lvaCount); } } #if !defined(FEATURE_EH_FUNCLETS) // Grab space for exception handling if (ehNeedsShadowSPslots()) { // The first slot is reserved for ICodeManager::FixContext(ppEndRegion) // ie. the offset of the end-of-last-executed-filter unsigned slotsNeeded = 1; unsigned handlerNestingLevel = ehMaxHndNestingCount; if (opts.compDbgEnC && (handlerNestingLevel < (unsigned)MAX_EnC_HANDLER_NESTING_LEVEL)) handlerNestingLevel = (unsigned)MAX_EnC_HANDLER_NESTING_LEVEL; slotsNeeded += handlerNestingLevel; // For a filter (which can be active at the same time as a catch/finally handler) slotsNeeded++; // For zero-termination of the shadow-Stack-pointer chain slotsNeeded++; lvaShadowSPslotsVar = lvaGrabTempWithImplicitUse(false DEBUGARG("lvaShadowSPslotsVar")); LclVarDsc* shadowSPslotsVar = lvaGetDesc(lvaShadowSPslotsVar); shadowSPslotsVar->lvType = TYP_BLK; shadowSPslotsVar->lvExactSize = (slotsNeeded * TARGET_POINTER_SIZE); } #endif // !FEATURE_EH_FUNCLETS // PSPSym and LocAllocSPvar are not used by the CoreRT ABI if (!IsTargetAbi(CORINFO_CORERT_ABI)) { #if defined(FEATURE_EH_FUNCLETS) if (ehNeedsPSPSym()) { lvaPSPSym = lvaGrabTempWithImplicitUse(false DEBUGARG("PSPSym")); LclVarDsc* lclPSPSym = lvaGetDesc(lvaPSPSym); lclPSPSym->lvType = TYP_I_IMPL; lvaSetVarDoNotEnregister(lvaPSPSym DEBUGARG(DoNotEnregisterReason::VMNeedsStackAddr)); } #endif // FEATURE_EH_FUNCLETS #ifdef JIT32_GCENCODER // LocAllocSPvar is only required by the implicit frame layout expected by the VM on x86. Whether // a function contains a Localloc is conveyed in the GC information, in the InfoHdrSmall.localloc // field. The function must have an EBP frame. Then, the VM finds the LocAllocSP slot by assuming // the following stack layout: // // -- higher addresses -- // saved EBP <-- EBP points here // other callee-saved registers // InfoHdrSmall.savedRegsCountExclFP specifies this size // optional GS cookie // InfoHdrSmall.security is 1 if this exists // LocAllocSP slot // -- lower addresses -- // // See also eetwain.cpp::GetLocallocSPOffset() and its callers. if (compLocallocUsed) { lvaLocAllocSPvar = lvaGrabTempWithImplicitUse(false DEBUGARG("LocAllocSPvar")); LclVarDsc* locAllocSPvar = lvaGetDesc(lvaLocAllocSPvar); locAllocSPvar->lvType = TYP_I_IMPL; } #endif // JIT32_GCENCODER } // Ref counting is now enabled normally. lvaRefCountState = RCS_NORMAL; #if defined(DEBUG) const bool setSlotNumbers = true; #else const bool setSlotNumbers = opts.compScopeInfo && (info.compVarScopesCount > 0); #endif // defined(DEBUG) const bool isRecompute = false; lvaComputeRefCounts(isRecompute, setSlotNumbers); // If we don't need precise reference counts, e.g. we're not optimizing, we're done. if (!PreciseRefCountsRequired()) { return; } const bool reportParamTypeArg = lvaReportParamTypeArg(); // Update bookkeeping on the generic context. if (lvaKeepAliveAndReportThis()) { lvaGetDesc(0u)->lvImplicitlyReferenced = reportParamTypeArg; } else if (lvaReportParamTypeArg()) { // We should have a context arg. assert(info.compTypeCtxtArg != (int)BAD_VAR_NUM); lvaGetDesc(info.compTypeCtxtArg)->lvImplicitlyReferenced = reportParamTypeArg; } assert(PreciseRefCountsRequired()); // Note: optAddCopies() depends on lvaRefBlks, which is set in lvaMarkLocalVars(BasicBlock*), called above. optAddCopies(); } //------------------------------------------------------------------------ // lvaComputeRefCounts: compute ref counts for locals // // Arguments: // isRecompute -- true if we just want ref counts and no other side effects; // false means to also look for true boolean locals, lay // groundwork for assertion prop, check type consistency, etc. // See lvaMarkLclRefs for details on what else goes on. // setSlotNumbers -- true if local slot numbers should be assigned. // // Notes: // Some implicit references are given actual counts or weight bumps here // to match pre-existing behavior. // // In fast-jitting modes where we don't ref count locals, this bypasses // actual counting, and makes all locals implicitly referenced on first // compute. It asserts all locals are implicitly referenced on recompute. // // When optimizing we also recompute lvaGenericsContextInUse based // on specially flagged LCL_VAR appearances. // void Compiler::lvaComputeRefCounts(bool isRecompute, bool setSlotNumbers) { JITDUMP("\n*** lvaComputeRefCounts ***\n"); unsigned lclNum = 0; LclVarDsc* varDsc = nullptr; // Fast path for minopts and debug codegen. // // On first compute: mark all locals as implicitly referenced and untracked. // On recompute: do nothing. if (!PreciseRefCountsRequired()) { if (isRecompute) { #if defined(DEBUG) // All local vars should be marked as implicitly referenced // and not tracked. for (lclNum = 0, varDsc = lvaTable; lclNum < lvaCount; lclNum++, varDsc++) { const bool isSpecialVarargsParam = varDsc->lvIsParam && raIsVarargsStackArg(lclNum); if (isSpecialVarargsParam) { assert(varDsc->lvRefCnt() == 0); } else { assert(varDsc->lvImplicitlyReferenced); } assert(!varDsc->lvTracked); } #endif // defined (DEBUG) return; } // First compute. for (lclNum = 0, varDsc = lvaTable; lclNum < lvaCount; lclNum++, varDsc++) { // Using lvImplicitlyReferenced here ensures that we can't // accidentally make locals be unreferenced later by decrementing // the ref count to zero. // // If, in minopts/debug, we really want to allow locals to become // unreferenced later, we'll have to explicitly clear this bit. varDsc->setLvRefCnt(0); varDsc->setLvRefCntWtd(BB_ZERO_WEIGHT); // Special case for some varargs params ... these must // remain unreferenced. const bool isSpecialVarargsParam = varDsc->lvIsParam && raIsVarargsStackArg(lclNum); if (!isSpecialVarargsParam) { varDsc->lvImplicitlyReferenced = 1; } varDsc->lvTracked = 0; if (setSlotNumbers) { varDsc->lvSlotNum = lclNum; } // Assert that it's ok to bypass the type repair logic in lvaMarkLclRefs assert((varDsc->lvType != TYP_UNDEF) && (varDsc->lvType != TYP_VOID) && (varDsc->lvType != TYP_UNKNOWN)); } lvaCurEpoch++; lvaTrackedCount = 0; lvaTrackedCountInSizeTUnits = 0; return; } // Slower path we take when optimizing, to get accurate counts. // // First, reset all explicit ref counts and weights. for (lclNum = 0, varDsc = lvaTable; lclNum < lvaCount; lclNum++, varDsc++) { varDsc->setLvRefCnt(0); varDsc->setLvRefCntWtd(BB_ZERO_WEIGHT); if (setSlotNumbers) { varDsc->lvSlotNum = lclNum; } // Set initial value for lvSingleDef for explicit and implicit // argument locals as they are "defined" on entry. // However, if we are just recomputing the ref counts, retain the value // that was set by past phases. if (!isRecompute) { varDsc->lvSingleDef = varDsc->lvIsParam; varDsc->lvSingleDefRegCandidate = varDsc->lvIsParam; } } // Remember current state of generic context use, and prepare // to compute new state. const bool oldLvaGenericsContextInUse = lvaGenericsContextInUse; lvaGenericsContextInUse = false; JITDUMP("\n*** lvaComputeRefCounts -- explicit counts ***\n"); // Second, account for all explicit local variable references for (BasicBlock* const block : Blocks()) { if (block->IsLIR()) { assert(isRecompute); const weight_t weight = block->getBBWeight(this); for (GenTree* node : LIR::AsRange(block)) { switch (node->OperGet()) { case GT_LCL_VAR: case GT_LCL_FLD: case GT_LCL_VAR_ADDR: case GT_LCL_FLD_ADDR: case GT_STORE_LCL_VAR: case GT_STORE_LCL_FLD: { LclVarDsc* varDsc = lvaGetDesc(node->AsLclVarCommon()); // If this is an EH var, use a zero weight for defs, so that we don't // count those in our heuristic for register allocation, since they always // must be stored, so there's no value in enregistering them at defs; only // if there are enough uses to justify it. if (varDsc->lvLiveInOutOfHndlr && !varDsc->lvDoNotEnregister && ((node->gtFlags & GTF_VAR_DEF) != 0)) { varDsc->incRefCnts(0, this); } else { varDsc->incRefCnts(weight, this); } if ((node->gtFlags & GTF_VAR_CONTEXT) != 0) { assert(node->OperIs(GT_LCL_VAR)); lvaGenericsContextInUse = true; } break; } default: break; } } } else { lvaMarkLocalVars(block, isRecompute); } } if (oldLvaGenericsContextInUse && !lvaGenericsContextInUse) { // Context was in use but no longer is. This can happen // if we're able to optimize, so just leave a note. JITDUMP("\n** Generics context no longer in use\n"); } else if (lvaGenericsContextInUse && !oldLvaGenericsContextInUse) { // Context was not in use but now is. // // Changing from unused->used should never happen; creation of any new IR // for context use should also be settting lvaGenericsContextInUse. assert(!"unexpected new use of generics context"); } JITDUMP("\n*** lvaComputeRefCounts -- implicit counts ***\n"); // Third, bump ref counts for some implicit prolog references for (lclNum = 0, varDsc = lvaTable; lclNum < lvaCount; lclNum++, varDsc++) { // Todo: review justification for these count bumps. if (varDsc->lvIsRegArg) { if ((lclNum < info.compArgsCount) && (varDsc->lvRefCnt() > 0)) { // Fix 388376 ARM JitStress WP7 varDsc->incRefCnts(BB_UNITY_WEIGHT, this); varDsc->incRefCnts(BB_UNITY_WEIGHT, this); } // Ref count bump that was in lvaPromoteStructVar // // This was formerly done during RCS_EARLY counting, // and we did not used to reset counts like we do now. if (varDsc->lvIsStructField) { varDsc->incRefCnts(BB_UNITY_WEIGHT, this); } } // If we have JMP, all arguments must have a location // even if we don't use them inside the method if (compJmpOpUsed && varDsc->lvIsParam && (varDsc->lvRefCnt() == 0)) { // except when we have varargs and the argument is // passed on the stack. In that case, it's important // for the ref count to be zero, so that we don't attempt // to track them for GC info (which is not possible since we // don't know their offset in the stack). See the assert at the // end of raMarkStkVars and bug #28949 for more info. if (!raIsVarargsStackArg(lclNum)) { varDsc->lvImplicitlyReferenced = 1; } } } } void Compiler::lvaAllocOutgoingArgSpaceVar() { #if FEATURE_FIXED_OUT_ARGS // Setup the outgoing argument region, in case we end up using it later if (lvaOutgoingArgSpaceVar == BAD_VAR_NUM) { lvaOutgoingArgSpaceVar = lvaGrabTemp(false DEBUGARG("OutgoingArgSpace")); lvaTable[lvaOutgoingArgSpaceVar].lvType = TYP_LCLBLK; lvaTable[lvaOutgoingArgSpaceVar].lvImplicitlyReferenced = 1; } noway_assert(lvaOutgoingArgSpaceVar >= info.compLocalsCount && lvaOutgoingArgSpaceVar < lvaCount); #endif // FEATURE_FIXED_OUT_ARGS } inline void Compiler::lvaIncrementFrameSize(unsigned size) { if (size > MAX_FrameSize || compLclFrameSize + size > MAX_FrameSize) { BADCODE("Frame size overflow"); } compLclFrameSize += size; } /**************************************************************************** * * Return true if absolute offsets of temps are larger than vars, or in other * words, did we allocate temps before of after vars. The /GS buffer overrun * checks want temps to be at low stack addresses than buffers */ bool Compiler::lvaTempsHaveLargerOffsetThanVars() { #ifdef TARGET_ARM // We never want to place the temps with larger offsets for ARM return false; #else if (compGSReorderStackLayout) { return codeGen->isFramePointerUsed(); } else { return true; } #endif } /**************************************************************************** * * Return an upper bound estimate for the size of the compiler spill temps * */ unsigned Compiler::lvaGetMaxSpillTempSize() { unsigned result = 0; if (codeGen->regSet.hasComputedTmpSize()) { result = codeGen->regSet.tmpGetTotalSize(); } else { result = MAX_SPILL_TEMP_SIZE; } return result; } // clang-format off /***************************************************************************** * * Compute stack frame offsets for arguments, locals and optionally temps. * * The frame is laid out as follows for x86: * * ESP frames * * | | * |-----------------------| * | incoming | * | arguments | * |-----------------------| <---- Virtual '0' * | return address | * +=======================+ * |Callee saved registers | * |-----------------------| * | Temps | * |-----------------------| * | Variables | * |-----------------------| <---- Ambient ESP * | Arguments for the | * ~ next function ~ * | | * | | | * | | Stack grows | * | downward * V * * * EBP frames * * | | * |-----------------------| * | incoming | * | arguments | * |-----------------------| <---- Virtual '0' * | return address | * +=======================+ * | incoming EBP | * |-----------------------| <---- EBP * |Callee saved registers | * |-----------------------| * | security object | * |-----------------------| * | ParamTypeArg | * |-----------------------| * | Last-executed-filter | * |-----------------------| * | | * ~ Shadow SPs ~ * | | * |-----------------------| * | | * ~ Variables ~ * | | * ~-----------------------| * | Temps | * |-----------------------| * | localloc | * |-----------------------| <---- Ambient ESP * | Arguments for the | * | next function ~ * | | * | | | * | | Stack grows | * | downward * V * * * The frame is laid out as follows for x64: * * RSP frames * | | * |-----------------------| * | incoming | * | arguments | * |-----------------------| * | 4 fixed incoming | * | argument slots | * |-----------------------| <---- Caller's SP & Virtual '0' * | return address | * +=======================+ * | Callee saved Int regs | * ------------------------- * | Padding | <---- this padding (0 or 8 bytes) is to ensure flt registers are saved at a mem location aligned at 16-bytes * | | so that we can save 128-bit callee saved xmm regs using performant "movaps" instruction instead of "movups" * ------------------------- * | Callee saved Flt regs | <----- entire 128-bits of callee saved xmm registers are stored here * |-----------------------| * | Temps | * |-----------------------| * | Variables | * |-----------------------| * | Arguments for the | * ~ next function ~ * | | * |-----------------------| * | 4 fixed outgoing | * | argument slots | * |-----------------------| <---- Ambient RSP * | | | * ~ | Stack grows ~ * | | downward | * V * * * RBP frames * | | * |-----------------------| * | incoming | * | arguments | * |-----------------------| * | 4 fixed incoming | * | argument slots | * |-----------------------| <---- Caller's SP & Virtual '0' * | return address | * +=======================+ * | Callee saved Int regs | * ------------------------- * | Padding | * ------------------------- * | Callee saved Flt regs | * |-----------------------| * | security object | * |-----------------------| * | ParamTypeArg | * |-----------------------| * | | * | | * ~ Variables ~ * | | * | | * |-----------------------| * | Temps | * |-----------------------| * | | * ~ localloc ~ // not in frames with EH * | | * |-----------------------| * | PSPSym | // only in frames with EH (thus no localloc) * | | * |-----------------------| <---- RBP in localloc frames (max 240 bytes from Initial-SP) * | Arguments for the | * ~ next function ~ * | | * |-----------------------| * | 4 fixed outgoing | * | argument slots | * |-----------------------| <---- Ambient RSP (before localloc, this is Initial-SP) * | | | * ~ | Stack grows ~ * | | downward | * V * * * The frame is laid out as follows for ARM (this is a general picture; details may differ for different conditions): * * SP frames * | | * |-----------------------| * | incoming | * | arguments | * +=======================+ <---- Caller's SP * | Pre-spill registers | * |-----------------------| <---- Virtual '0' * |Callee saved registers | * |-----------------------| * ~ possible double align ~ * |-----------------------| * | security object | * |-----------------------| * | ParamTypeArg | * |-----------------------| * | possible GS cookie | * |-----------------------| * | Variables | * |-----------------------| * | possible GS cookie | * |-----------------------| * | Temps | * |-----------------------| * | Stub Argument Var | * |-----------------------| * |Inlined PInvoke Frame V| * |-----------------------| * ~ possible double align ~ * |-----------------------| * | Arguments for the | * ~ next function ~ * | | * |-----------------------| <---- Ambient SP * | | | * ~ | Stack grows ~ * | | downward | * V * * * FP / R11 frames * | | * |-----------------------| * | incoming | * | arguments | * +=======================+ <---- Caller's SP * | Pre-spill registers | * |-----------------------| <---- Virtual '0' * |Callee saved registers | * |-----------------------| * | PSPSym | // Only for frames with EH, which means FP-based frames * |-----------------------| * ~ possible double align ~ * |-----------------------| * | security object | * |-----------------------| * | ParamTypeArg | * |-----------------------| * | possible GS cookie | * |-----------------------| * | Variables | * |-----------------------| * | possible GS cookie | * |-----------------------| * | Temps | * |-----------------------| * | Stub Argument Var | * |-----------------------| * |Inlined PInvoke Frame V| * |-----------------------| * ~ possible double align ~ * |-----------------------| * | localloc | * |-----------------------| * | Arguments for the | * ~ next function ~ * | | * |-----------------------| <---- Ambient SP * | | | * ~ | Stack grows ~ * | | downward | * V * * * The frame is laid out as follows for ARM64 (this is a general picture; details may differ for different conditions): * NOTE: SP must be 16-byte aligned, so there may be alignment slots in the frame. * We will often save and establish a frame pointer to create better ETW stack walks. * * SP frames * | | * |-----------------------| * | incoming | * | arguments | * +=======================+ <---- Caller's SP * | homed | // this is only needed if reg argument need to be homed, e.g., for varargs * | register arguments | * |-----------------------| <---- Virtual '0' * |Callee saved registers | * | except fp/lr | * |-----------------------| * | security object | * |-----------------------| * | ParamTypeArg | * |-----------------------| * | possible GS cookie | * |-----------------------| * | Variables | * |-----------------------| * | possible GS cookie | * |-----------------------| * | Temps | * |-----------------------| * | Stub Argument Var | * |-----------------------| * |Inlined PInvoke Frame V| * |-----------------------| * | Saved LR | * |-----------------------| * | Saved FP | <---- Frame pointer * |-----------------------| * | Stack arguments for | * | the next function | * |-----------------------| <---- SP * | | | * ~ | Stack grows ~ * | | downward | * V * * * FP (R29 / x29) frames * | | * |-----------------------| * | incoming | * | arguments | * +=======================+ <---- Caller's SP * | optional homed | // this is only needed if reg argument need to be homed, e.g., for varargs * | register arguments | * |-----------------------| <---- Virtual '0' * |Callee saved registers | * | except fp/lr | * |-----------------------| * | PSPSym | // Only for frames with EH, which requires FP-based frames * |-----------------------| * | security object | * |-----------------------| * | ParamTypeArg | * |-----------------------| * | possible GS cookie | * |-----------------------| * | Variables | * |-----------------------| * | possible GS cookie | * |-----------------------| * | Temps | * |-----------------------| * | Stub Argument Var | * |-----------------------| * |Inlined PInvoke Frame V| * |-----------------------| * | Saved LR | * |-----------------------| * | Saved FP | <---- Frame pointer * |-----------------------| * ~ localloc ~ * |-----------------------| * | Stack arguments for | * | the next function | * |-----------------------| <---- Ambient SP * | | | * ~ | Stack grows ~ * | | downward | * V * * * FP (R29 / x29) frames where FP/LR are stored at the top of the frame (frames requiring GS that have localloc) * | | * |-----------------------| * | incoming | * | arguments | * +=======================+ <---- Caller's SP * | optional homed | // this is only needed if reg argument need to be homed, e.g., for varargs * | register arguments | * |-----------------------| <---- Virtual '0' * | Saved LR | * |-----------------------| * | Saved FP | <---- Frame pointer * |-----------------------| * |Callee saved registers | * |-----------------------| * | PSPSym | // Only for frames with EH, which requires FP-based frames * |-----------------------| * | security object | * |-----------------------| * | ParamTypeArg | * |-----------------------| * | possible GS cookie | * |-----------------------| * | Variables | * |-----------------------| * | possible GS cookie | * |-----------------------| * | Temps | * |-----------------------| * | Stub Argument Var | * |-----------------------| * |Inlined PInvoke Frame V| * |-----------------------| * ~ localloc ~ * |-----------------------| * | Stack arguments for | * | the next function | * |-----------------------| <---- Ambient SP * | | | * ~ | Stack grows ~ * | | downward | * V * * * Doing this all in one pass is 'hard'. So instead we do it in 2 basic passes: * 1. Assign all the offsets relative to the Virtual '0'. Offsets above (the * incoming arguments) are positive. Offsets below (everything else) are * negative. This pass also calcuates the total frame size (between Caller's * SP/return address and the Ambient SP). * 2. Figure out where to place the frame pointer, and then adjust the offsets * as needed for the final stack size and whether the offset is frame pointer * relative or stack pointer relative. * */ // clang-format on void Compiler::lvaAssignFrameOffsets(FrameLayoutState curState) { noway_assert((lvaDoneFrameLayout < curState) || (curState == REGALLOC_FRAME_LAYOUT)); lvaDoneFrameLayout = curState; #ifdef DEBUG if (verbose) { printf("*************** In lvaAssignFrameOffsets"); if (curState == INITIAL_FRAME_LAYOUT) { printf("(INITIAL_FRAME_LAYOUT)"); } else if (curState == PRE_REGALLOC_FRAME_LAYOUT) { printf("(PRE_REGALLOC_FRAME_LAYOUT)"); } else if (curState == REGALLOC_FRAME_LAYOUT) { printf("(REGALLOC_FRAME_LAYOUT)"); } else if (curState == TENTATIVE_FRAME_LAYOUT) { printf("(TENTATIVE_FRAME_LAYOUT)"); } else if (curState == FINAL_FRAME_LAYOUT) { printf("(FINAL_FRAME_LAYOUT)"); } else { printf("(UNKNOWN)"); unreached(); } printf("\n"); } #endif #if FEATURE_FIXED_OUT_ARGS assert(lvaOutgoingArgSpaceVar != BAD_VAR_NUM); #endif // FEATURE_FIXED_OUT_ARGS /*------------------------------------------------------------------------- * * First process the arguments. * *------------------------------------------------------------------------- */ lvaAssignVirtualFrameOffsetsToArgs(); /*------------------------------------------------------------------------- * * Now compute stack offsets for any variables that don't live in registers * *------------------------------------------------------------------------- */ lvaAssignVirtualFrameOffsetsToLocals(); lvaAlignFrame(); /*------------------------------------------------------------------------- * * Now patch the offsets * *------------------------------------------------------------------------- */ lvaFixVirtualFrameOffsets(); // Modify the stack offset for fields of promoted structs. lvaAssignFrameOffsetsToPromotedStructs(); /*------------------------------------------------------------------------- * * Finalize * *------------------------------------------------------------------------- */ // If it's not the final frame layout, then it's just an estimate. This means // we're allowed to once again write to these variables, even if we've read // from them to make tentative code generation or frame layout decisions. if (curState < FINAL_FRAME_LAYOUT) { codeGen->resetFramePointerUsedWritePhase(); } } /***************************************************************************** * lvaFixVirtualFrameOffsets() : Now that everything has a virtual offset, * determine the final value for the frame pointer (if needed) and then * adjust all the offsets appropriately. * * This routine fixes virtual offset to be relative to frame pointer or SP * based on whether varDsc->lvFramePointerBased is true or false respectively. */ void Compiler::lvaFixVirtualFrameOffsets() { LclVarDsc* varDsc; #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_AMD64) if (lvaPSPSym != BAD_VAR_NUM) { // We need to fix the offset of the PSPSym so there is no padding between it and the outgoing argument space. // Without this code, lvaAlignFrame might have put the padding lower than the PSPSym, which would be between // the PSPSym and the outgoing argument space. varDsc = lvaGetDesc(lvaPSPSym); assert(varDsc->lvFramePointerBased); // We always access it RBP-relative. assert(!varDsc->lvMustInit); // It is never "must init". varDsc->SetStackOffset(codeGen->genCallerSPtoInitialSPdelta() + lvaLclSize(lvaOutgoingArgSpaceVar)); if (opts.IsOSR()) { // With OSR RBP points at the base of the OSR frame, but the virtual offsets // are from the base of the Tier0 frame. Adjust. // varDsc->SetStackOffset(varDsc->GetStackOffset() - info.compPatchpointInfo->TotalFrameSize()); } } #endif // The delta to be added to virtual offset to adjust it relative to frame pointer or SP int delta = 0; #ifdef TARGET_XARCH delta += REGSIZE_BYTES; // pushed PC (return address) for x86/x64 JITDUMP("--- delta bump %d for RA\n", REGSIZE_BYTES); if (codeGen->doubleAlignOrFramePointerUsed()) { JITDUMP("--- delta bump %d for FP\n", REGSIZE_BYTES); delta += REGSIZE_BYTES; // pushed EBP (frame pointer) } #endif if (!codeGen->isFramePointerUsed()) { // pushed registers, return address, and padding JITDUMP("--- delta bump %d for RSP frame\n", codeGen->genTotalFrameSize()); delta += codeGen->genTotalFrameSize(); } #if defined(TARGET_ARM) else { // We set FP to be after LR, FP delta += 2 * REGSIZE_BYTES; } #elif defined(TARGET_AMD64) || defined(TARGET_ARM64) else { // FP is used. JITDUMP("--- delta bump %d for FP frame\n", codeGen->genTotalFrameSize() - codeGen->genSPtoFPdelta()); delta += codeGen->genTotalFrameSize() - codeGen->genSPtoFPdelta(); } #endif // TARGET_AMD64 if (opts.IsOSR()) { #if defined(TARGET_AMD64) || defined(TARGET_ARM64) // Stack offset includes Tier0 frame. // JITDUMP("--- delta bump %d for OSR + Tier0 frame\n", info.compPatchpointInfo->TotalFrameSize()); delta += info.compPatchpointInfo->TotalFrameSize(); #endif } JITDUMP("--- virtual stack offset to actual stack offset delta is %d\n", delta); unsigned lclNum; for (lclNum = 0, varDsc = lvaTable; lclNum < lvaCount; lclNum++, varDsc++) { bool doAssignStkOffs = true; // Can't be relative to EBP unless we have an EBP noway_assert(!varDsc->lvFramePointerBased || codeGen->doubleAlignOrFramePointerUsed()); // Is this a non-param promoted struct field? // if so then set doAssignStkOffs to false. // if (varDsc->lvIsStructField) { LclVarDsc* parentvarDsc = lvaGetDesc(varDsc->lvParentLcl); lvaPromotionType promotionType = lvaGetPromotionType(parentvarDsc); #if defined(TARGET_X86) // On x86, we set the stack offset for a promoted field // to match a struct parameter in lvAssignFrameOffsetsToPromotedStructs. if ((!varDsc->lvIsParam || parentvarDsc->lvIsParam) && promotionType == PROMOTION_TYPE_DEPENDENT) #else if (!varDsc->lvIsParam && promotionType == PROMOTION_TYPE_DEPENDENT) #endif { doAssignStkOffs = false; // Assigned later in lvaAssignFrameOffsetsToPromotedStructs() } } if (!varDsc->lvOnFrame) { if (!varDsc->lvIsParam #if !defined(TARGET_AMD64) || (varDsc->lvIsRegArg #if defined(TARGET_ARM) && defined(PROFILING_SUPPORTED) && compIsProfilerHookNeeded() && !lvaIsPreSpilled(lclNum, codeGen->regSet.rsMaskPreSpillRegs(false)) // We need assign stack offsets // for prespilled arguments #endif ) #endif // !defined(TARGET_AMD64) ) { doAssignStkOffs = false; // Not on frame or an incomming stack arg } } if (doAssignStkOffs) { JITDUMP("-- V%02u was %d, now %d\n", lclNum, varDsc->GetStackOffset(), varDsc->GetStackOffset() + delta); varDsc->SetStackOffset(varDsc->GetStackOffset() + delta); #if DOUBLE_ALIGN if (genDoubleAlign() && !codeGen->isFramePointerUsed()) { if (varDsc->lvFramePointerBased) { varDsc->SetStackOffset(varDsc->GetStackOffset() - delta); // We need to re-adjust the offsets of the parameters so they are EBP // relative rather than stack/frame pointer relative varDsc->SetStackOffset(varDsc->GetStackOffset() + (2 * TARGET_POINTER_SIZE)); // return address and pushed EBP noway_assert(varDsc->GetStackOffset() >= FIRST_ARG_STACK_OFFS); } } #endif // On System V environments the stkOffs could be 0 for params passed in registers. // // For normal methods only EBP relative references can have negative offsets. assert(codeGen->isFramePointerUsed() || varDsc->GetStackOffset() >= 0); } } assert(codeGen->regSet.tmpAllFree()); for (TempDsc* temp = codeGen->regSet.tmpListBeg(); temp != nullptr; temp = codeGen->regSet.tmpListNxt(temp)) { temp->tdAdjustTempOffs(delta); } lvaCachedGenericContextArgOffs += delta; #if FEATURE_FIXED_OUT_ARGS if (lvaOutgoingArgSpaceVar != BAD_VAR_NUM) { varDsc = lvaGetDesc(lvaOutgoingArgSpaceVar); varDsc->SetStackOffset(0); varDsc->lvFramePointerBased = false; varDsc->lvMustInit = false; } #endif // FEATURE_FIXED_OUT_ARGS #ifdef TARGET_ARM64 // We normally add alignment below the locals between them and the outgoing // arg space area. When we store fp/lr at the bottom, however, this will be // below the alignment. So we should not apply the alignment adjustment to // them. On ARM64 it turns out we always store these at +0 and +8 of the FP, // so instead of dealing with skipping adjustment just for them we just set // them here always. assert(codeGen->isFramePointerUsed()); if (lvaRetAddrVar != BAD_VAR_NUM) { lvaTable[lvaRetAddrVar].SetStackOffset(REGSIZE_BYTES); } #endif } #ifdef TARGET_ARM bool Compiler::lvaIsPreSpilled(unsigned lclNum, regMaskTP preSpillMask) { const LclVarDsc& desc = lvaTable[lclNum]; return desc.lvIsRegArg && (preSpillMask & genRegMask(desc.GetArgReg())); } #endif // TARGET_ARM //------------------------------------------------------------------------ // lvaUpdateArgWithInitialReg: Set the initial register of a local variable // to the one assigned by the register allocator. // // Arguments: // varDsc - the local variable descriptor // void Compiler::lvaUpdateArgWithInitialReg(LclVarDsc* varDsc) { noway_assert(varDsc->lvIsParam); if (varDsc->lvIsRegCandidate()) { varDsc->SetRegNum(varDsc->GetArgInitReg()); } } //------------------------------------------------------------------------ // lvaUpdateArgsWithInitialReg() : For each argument variable descriptor, update // its current register with the initial register as assigned by LSRA. // void Compiler::lvaUpdateArgsWithInitialReg() { if (!compLSRADone) { return; } for (unsigned lclNum = 0; lclNum < info.compArgsCount; lclNum++) { LclVarDsc* varDsc = lvaGetDesc(lclNum); if (varDsc->lvPromotedStruct()) { for (unsigned fieldVarNum = varDsc->lvFieldLclStart; fieldVarNum < varDsc->lvFieldLclStart + varDsc->lvFieldCnt; ++fieldVarNum) { LclVarDsc* fieldVarDsc = lvaGetDesc(fieldVarNum); lvaUpdateArgWithInitialReg(fieldVarDsc); } } else { lvaUpdateArgWithInitialReg(varDsc); } } } /***************************************************************************** * lvaAssignVirtualFrameOffsetsToArgs() : Assign virtual stack offsets to the * arguments, and implicit arguments (this ptr, return buffer, generics, * and varargs). */ void Compiler::lvaAssignVirtualFrameOffsetsToArgs() { unsigned lclNum = 0; int argOffs = 0; #ifdef UNIX_AMD64_ABI int callerArgOffset = 0; #endif // UNIX_AMD64_ABI /* Assign stack offsets to arguments (in reverse order of passing). This means that if we pass arguments left->right, we start at the end of the list and work backwards, for right->left we start with the first argument and move forward. This is all relative to our Virtual '0' */ if (info.compArgOrder == Target::ARG_ORDER_L2R) { argOffs = compArgSize; } /* Update the argOffs to reflect arguments that are passed in registers */ noway_assert(codeGen->intRegState.rsCalleeRegArgCount <= MAX_REG_ARG); noway_assert(compMacOsArm64Abi() || compArgSize >= codeGen->intRegState.rsCalleeRegArgCount * REGSIZE_BYTES); if (info.compArgOrder == Target::ARG_ORDER_L2R) { argOffs -= codeGen->intRegState.rsCalleeRegArgCount * REGSIZE_BYTES; } // Update the arg initial register locations. lvaUpdateArgsWithInitialReg(); /* Is there a "this" argument? */ if (!info.compIsStatic) { noway_assert(lclNum == info.compThisArg); #ifndef TARGET_X86 argOffs = lvaAssignVirtualFrameOffsetToArg(lclNum, REGSIZE_BYTES, argOffs UNIX_AMD64_ABI_ONLY_ARG(&callerArgOffset)); #endif // TARGET_X86 lclNum++; } unsigned userArgsToSkip = 0; #if !defined(TARGET_ARM) // In the native instance method calling convention on Windows, // the this parameter comes before the hidden return buffer parameter. // So, we want to process the native "this" parameter before we process // the native return buffer parameter. if (TargetOS::IsWindows && callConvIsInstanceMethodCallConv(info.compCallConv)) { #ifdef TARGET_X86 if (!lvaTable[lclNum].lvIsRegArg) { argOffs = lvaAssignVirtualFrameOffsetToArg(lclNum, REGSIZE_BYTES, argOffs); } #elif !defined(UNIX_AMD64_ABI) argOffs = lvaAssignVirtualFrameOffsetToArg(lclNum, REGSIZE_BYTES, argOffs); #endif // TARGET_X86 lclNum++; userArgsToSkip++; } #endif /* if we have a hidden buffer parameter, that comes here */ if (info.compRetBuffArg != BAD_VAR_NUM) { noway_assert(lclNum == info.compRetBuffArg); argOffs = lvaAssignVirtualFrameOffsetToArg(lclNum, REGSIZE_BYTES, argOffs UNIX_AMD64_ABI_ONLY_ARG(&callerArgOffset)); lclNum++; } #if USER_ARGS_COME_LAST //@GENERICS: extra argument for instantiation info if (info.compMethodInfo->args.callConv & CORINFO_CALLCONV_PARAMTYPE) { noway_assert(lclNum == (unsigned)info.compTypeCtxtArg); argOffs = lvaAssignVirtualFrameOffsetToArg(lclNum++, REGSIZE_BYTES, argOffs UNIX_AMD64_ABI_ONLY_ARG(&callerArgOffset)); } if (info.compIsVarArgs) { argOffs = lvaAssignVirtualFrameOffsetToArg(lclNum++, REGSIZE_BYTES, argOffs UNIX_AMD64_ABI_ONLY_ARG(&callerArgOffset)); } #endif // USER_ARGS_COME_LAST CORINFO_ARG_LIST_HANDLE argLst = info.compMethodInfo->args.args; unsigned argSigLen = info.compMethodInfo->args.numArgs; // Skip any user args that we've already processed. assert(userArgsToSkip <= argSigLen); argSigLen -= userArgsToSkip; for (unsigned i = 0; i < userArgsToSkip; i++, argLst = info.compCompHnd->getArgNext(argLst)) { ; } #ifdef TARGET_ARM // // struct_n { int; int; ... n times }; // // Consider signature: // // Foo (float a,double b,float c,double d,float e,double f,float g,double h, // float i,double j,float k,double l,struct_3 m) { } // // Basically the signature is: (all float regs full, 1 double, struct_3); // // The double argument occurs before pre spill in the argument iteration and // computes an argOffset of 0. struct_3 offset becomes 8. This is wrong. // Because struct_3 is prespilled and double occurs after prespill. // The correct offsets are double = 16 (aligned stk), struct_3 = 0..12, // Offset 12 will be skipped for double alignment of double. // // Another example is (struct_2, all float regs full, double, struct_2); // Here, notice the order is similarly messed up because of 2 pre-spilled // struct_2. // // Succinctly, // ARG_INDEX(i) > ARG_INDEX(j) DOES NOT IMPLY |ARG_OFFSET(i)| > |ARG_OFFSET(j)| // // Therefore, we'll do a two pass offset calculation, one that considers pre-spill // and the next, stack args. // unsigned argLcls = 0; // Take care of pre spill registers first. regMaskTP preSpillMask = codeGen->regSet.rsMaskPreSpillRegs(false); regMaskTP tempMask = RBM_NONE; for (unsigned i = 0, preSpillLclNum = lclNum; i < argSigLen; ++i, ++preSpillLclNum) { if (lvaIsPreSpilled(preSpillLclNum, preSpillMask)) { unsigned argSize = eeGetArgSize(argLst, &info.compMethodInfo->args); argOffs = lvaAssignVirtualFrameOffsetToArg(preSpillLclNum, argSize, argOffs); argLcls++; // Early out if we can. If size is 8 and base reg is 2, then the mask is 0x1100 tempMask |= ((((1 << (roundUp(argSize, TARGET_POINTER_SIZE) / REGSIZE_BYTES))) - 1) << lvaTable[preSpillLclNum].GetArgReg()); if (tempMask == preSpillMask) { // We won't encounter more pre-spilled registers, // so don't bother iterating further. break; } } argLst = info.compCompHnd->getArgNext(argLst); } // Take care of non pre-spilled stack arguments. argLst = info.compMethodInfo->args.args; for (unsigned i = 0, stkLclNum = lclNum; i < argSigLen; ++i, ++stkLclNum) { if (!lvaIsPreSpilled(stkLclNum, preSpillMask)) { const unsigned argSize = eeGetArgSize(argLst, &info.compMethodInfo->args); argOffs = lvaAssignVirtualFrameOffsetToArg(stkLclNum, argSize, argOffs); argLcls++; } argLst = info.compCompHnd->getArgNext(argLst); } lclNum += argLcls; #else // !TARGET_ARM for (unsigned i = 0; i < argSigLen; i++) { unsigned argumentSize = eeGetArgSize(argLst, &info.compMethodInfo->args); assert(compMacOsArm64Abi() || argumentSize % TARGET_POINTER_SIZE == 0); argOffs = lvaAssignVirtualFrameOffsetToArg(lclNum++, argumentSize, argOffs UNIX_AMD64_ABI_ONLY_ARG(&callerArgOffset)); argLst = info.compCompHnd->getArgNext(argLst); } #endif // !TARGET_ARM #if !USER_ARGS_COME_LAST //@GENERICS: extra argument for instantiation info if (info.compMethodInfo->args.callConv & CORINFO_CALLCONV_PARAMTYPE) { noway_assert(lclNum == (unsigned)info.compTypeCtxtArg); argOffs = lvaAssignVirtualFrameOffsetToArg(lclNum++, REGSIZE_BYTES, argOffs UNIX_AMD64_ABI_ONLY_ARG(&callerArgOffset)); } if (info.compIsVarArgs) { argOffs = lvaAssignVirtualFrameOffsetToArg(lclNum++, REGSIZE_BYTES, argOffs UNIX_AMD64_ABI_ONLY_ARG(&callerArgOffset)); } #endif // USER_ARGS_COME_LAST } #ifdef UNIX_AMD64_ABI // // lvaAssignVirtualFrameOffsetToArg() : Assign virtual stack offsets to an // individual argument, and return the offset for the next argument. // Note: This method only calculates the initial offset of the stack passed/spilled arguments // (if any - the RA might decide to spill(home on the stack) register passed arguments, if rarely used.) // The final offset is calculated in lvaFixVirtualFrameOffsets method. It accounts for FP existance, // ret address slot, stack frame padding, alloca instructions, etc. // Note: This is the implementation for UNIX_AMD64 System V platforms. // int Compiler::lvaAssignVirtualFrameOffsetToArg(unsigned lclNum, unsigned argSize, int argOffs UNIX_AMD64_ABI_ONLY_ARG(int* callerArgOffset)) { noway_assert(lclNum < info.compArgsCount); noway_assert(argSize); if (info.compArgOrder == Target::ARG_ORDER_L2R) { argOffs -= argSize; } unsigned fieldVarNum = BAD_VAR_NUM; LclVarDsc* varDsc = lvaGetDesc(lclNum); noway_assert(varDsc->lvIsParam); if (varDsc->lvIsRegArg) { // Argument is passed in a register, don't count it // when updating the current offset on the stack. if (varDsc->lvOnFrame) { // The offset for args needs to be set only for the stack homed arguments for System V. varDsc->SetStackOffset(argOffs); } else { varDsc->SetStackOffset(0); } } else { // For Windows AMD64 there are 4 slots for the register passed arguments on the top of the caller's stack. // This is where they are always homed. So, they can be accessed with positive offset. // On System V platforms, if the RA decides to home a register passed arg on the stack, it creates a stack // location on the callee stack (like any other local var.) In such a case, the register passed, stack homed // arguments are accessed using negative offsets and the stack passed arguments are accessed using positive // offset (from the caller's stack.) // For System V platforms if there is no frame pointer the caller stack parameter offset should include the // callee allocated space. If frame register is used, the callee allocated space should not be included for // accessing the caller stack parameters. The last two requirements are met in lvaFixVirtualFrameOffsets // method, which fixes the offsets, based on frame pointer existence, existence of alloca instructions, ret // address pushed, ets. varDsc->SetStackOffset(*callerArgOffset); // Structs passed on stack could be of size less than TARGET_POINTER_SIZE. // Make sure they get at least TARGET_POINTER_SIZE on the stack - this is required for alignment. if (argSize > TARGET_POINTER_SIZE) { *callerArgOffset += (int)roundUp(argSize, TARGET_POINTER_SIZE); } else { *callerArgOffset += TARGET_POINTER_SIZE; } } // For struct promoted parameters we need to set the offsets for the field lclVars. // // For a promoted struct we also assign the struct fields stack offset if (varDsc->lvPromotedStruct()) { unsigned firstFieldNum = varDsc->lvFieldLclStart; int offset = varDsc->GetStackOffset(); for (unsigned i = 0; i < varDsc->lvFieldCnt; i++) { LclVarDsc* fieldVarDsc = lvaGetDesc(firstFieldNum + i); fieldVarDsc->SetStackOffset(offset + fieldVarDsc->lvFldOffset); } } if (info.compArgOrder == Target::ARG_ORDER_R2L && !varDsc->lvIsRegArg) { argOffs += argSize; } return argOffs; } #else // !UNIX_AMD64_ABI // // lvaAssignVirtualFrameOffsetToArg() : Assign virtual stack offsets to an // individual argument, and return the offset for the next argument. // Note: This method only calculates the initial offset of the stack passed/spilled arguments // (if any - the RA might decide to spill(home on the stack) register passed arguments, if rarely used.) // The final offset is calculated in lvaFixVirtualFrameOffsets method. It accounts for FP existance, // ret address slot, stack frame padding, alloca instructions, etc. // Note: This implementation for all the platforms but UNIX_AMD64 OSs (System V 64 bit.) int Compiler::lvaAssignVirtualFrameOffsetToArg(unsigned lclNum, unsigned argSize, int argOffs UNIX_AMD64_ABI_ONLY_ARG(int* callerArgOffset)) { noway_assert(lclNum < info.compArgsCount); noway_assert(argSize); if (info.compArgOrder == Target::ARG_ORDER_L2R) { argOffs -= argSize; } unsigned fieldVarNum = BAD_VAR_NUM; LclVarDsc* varDsc = lvaGetDesc(lclNum); noway_assert(varDsc->lvIsParam); if (varDsc->lvIsRegArg) { /* Argument is passed in a register, don't count it * when updating the current offset on the stack */ CLANG_FORMAT_COMMENT_ANCHOR; #if !defined(TARGET_ARMARCH) #if DEBUG // TODO: Remove this noway_assert and replace occurrences of TARGET_POINTER_SIZE with argSize // Also investigate why we are incrementing argOffs for X86 as this seems incorrect // noway_assert(argSize == TARGET_POINTER_SIZE); #endif // DEBUG #endif #if defined(TARGET_X86) argOffs += TARGET_POINTER_SIZE; #elif defined(TARGET_AMD64) // Register arguments on AMD64 also takes stack space. (in the backing store) varDsc->SetStackOffset(argOffs); argOffs += TARGET_POINTER_SIZE; #elif defined(TARGET_ARM64) // Register arguments on ARM64 only take stack space when they have a frame home. // Unless on windows and in a vararg method. if (compFeatureArgSplit() && this->info.compIsVarArgs) { if (varDsc->lvType == TYP_STRUCT && varDsc->GetOtherArgReg() >= MAX_REG_ARG && varDsc->GetOtherArgReg() != REG_NA) { // This is a split struct. It will account for an extra (8 bytes) // of alignment. varDsc->SetStackOffset(varDsc->GetStackOffset() + TARGET_POINTER_SIZE); argOffs += TARGET_POINTER_SIZE; } } #elif defined(TARGET_ARM) // On ARM we spill the registers in codeGen->regSet.rsMaskPreSpillRegArg // in the prolog, so we have to do SetStackOffset() here // regMaskTP regMask = genRegMask(varDsc->GetArgReg()); if (codeGen->regSet.rsMaskPreSpillRegArg & regMask) { // Signature: void foo(struct_8, int, struct_4) // ------- CALLER SP ------- // r3 struct_4 // r2 int - not prespilled, but added for alignment. argOffs should skip this. // r1 struct_8 // r0 struct_8 // ------------------------- // If we added alignment we need to fix argOffs for all registers above alignment. if (codeGen->regSet.rsMaskPreSpillAlign != RBM_NONE) { assert(genCountBits(codeGen->regSet.rsMaskPreSpillAlign) == 1); // Is register beyond the alignment pos? if (regMask > codeGen->regSet.rsMaskPreSpillAlign) { // Increment argOffs just once for the _first_ register after alignment pos // in the prespill mask. if (!BitsBetween(codeGen->regSet.rsMaskPreSpillRegArg, regMask, codeGen->regSet.rsMaskPreSpillAlign)) { argOffs += TARGET_POINTER_SIZE; } } } switch (varDsc->lvType) { case TYP_STRUCT: if (!varDsc->lvStructDoubleAlign) { break; } FALLTHROUGH; case TYP_DOUBLE: case TYP_LONG: { // // Let's assign offsets to arg1, a double in r2. argOffs has to be 4 not 8. // // ------- CALLER SP ------- // r3 // r2 double -- argOffs = 4, but it doesn't need to be skipped, because there is no skipping. // r1 VACookie -- argOffs = 0 // ------------------------- // // Consider argOffs as if it accounts for number of prespilled registers before the current // register. In the above example, for r2, it is r1 that is prespilled, but since r1 is // accounted for by argOffs being 4, there should have been no skipping. Instead, if we didn't // assign r1 to any variable, then argOffs would still be 0 which implies it is not accounting // for r1, equivalently r1 is skipped. // // If prevRegsSize is unaccounted for by a corresponding argOffs, we must have skipped a register. int prevRegsSize = genCountBits(codeGen->regSet.rsMaskPreSpillRegArg & (regMask - 1)) * TARGET_POINTER_SIZE; if (argOffs < prevRegsSize) { // We must align up the argOffset to a multiple of 8 to account for skipped registers. argOffs = roundUp((unsigned)argOffs, 2 * TARGET_POINTER_SIZE); } // We should've skipped only a single register. assert(argOffs == prevRegsSize); } break; default: // No alignment of argOffs required break; } varDsc->SetStackOffset(argOffs); argOffs += argSize; } #else // TARGET* #error Unsupported or unset target architecture #endif // TARGET* } else { #if defined(TARGET_ARM) // Dev11 Bug 42817: incorrect codegen for DrawFlatCheckBox causes A/V in WinForms // // Here we have method with a signature (int a1, struct a2, struct a3, int a4, int a5). // Struct parameter 'a2' is 16-bytes with no alignment requirements; // it uses r1,r2,r3 and [OutArg+0] when passed. // Struct parameter 'a3' is 16-bytes that is required to be double aligned; // the caller skips [OutArg+4] and starts the argument at [OutArg+8]. // Thus the caller generates the correct code to pass the arguments. // When generating code to receive the arguments we set codeGen->regSet.rsMaskPreSpillRegArg to [r1,r2,r3] // and spill these three registers as the first instruction in the prolog. // Then when we layout the arguments' stack offsets we have an argOffs 0 which // points at the location that we spilled r1 into the stack. For this first // struct we take the lvIsRegArg path above with "codeGen->regSet.rsMaskPreSpillRegArg &" matching. // Next when we calculate the argOffs for the second 16-byte struct we have an argOffs // of 16, which appears to be aligned properly so we don't skip a stack slot. // // To fix this we must recover the actual OutArg offset by subtracting off the // sizeof of the PreSpill register args. // Then we align this offset to a multiple of 8 and add back the sizeof // of the PreSpill register args. // // Dev11 Bug 71767: failure of assert(sizeofPreSpillRegArgs <= argOffs) // // We have a method with 'this' passed in r0, RetBuf arg in r1, VarArgs cookie // in r2. The first user arg is a 144 byte struct with double alignment required, // r3 is skipped, and the struct is passed on the stack. However, 'r3' is added // to the codeGen->regSet.rsMaskPreSpillRegArg mask by the VarArgs cookie code, since we need to // home all the potential varargs arguments in registers, even if we don't have // signature type information for the variadic arguments. However, due to alignment, // we have skipped a register that doesn't have a corresponding symbol. Make up // for that by increasing argOffs here. // int sizeofPreSpillRegArgs = genCountBits(codeGen->regSet.rsMaskPreSpillRegs(true)) * REGSIZE_BYTES; if (argOffs < sizeofPreSpillRegArgs) { // This can only happen if we skipped the last register spot because current stk arg // is a struct requiring alignment or a pre-spill alignment was required because the // first reg arg needed alignment. // // Example 1: First Stk Argument requiring alignment in vararg case (same as above comment.) // Signature (int a0, int a1, int a2, struct {long} a3, ...) // // stk arg a3 --> argOffs here will be 12 (r0-r2) but pre-spill will be 16. // ---- Caller SP ---- // r3 --> Stack slot is skipped in this case. // r2 int a2 // r1 int a1 // r0 int a0 // // Example 2: First Reg Argument requiring alignment in no-vararg case. // Signature (struct {long} a0, struct {int} a1, int a2, int a3) // // stk arg --> argOffs here will be 12 {r0-r2} but pre-spill will be 16. // ---- Caller SP ---- // r3 int a2 --> pushed (not pre-spilled) for alignment of a0 by lvaInitUserArgs. // r2 struct { int } a1 // r0-r1 struct { long } a0 CLANG_FORMAT_COMMENT_ANCHOR; #ifdef PROFILING_SUPPORTED // On Arm under profiler, r0-r3 are always prespilled on stack. // It is possible to have methods that accept only HFAs as parameters e.g. Signature(struct hfa1, struct // hfa2), in which case hfa1 and hfa2 will be en-registered in co-processor registers and will have an // argument offset less than size of preSpill. // // For this reason the following conditions are asserted when not under profiler. if (!compIsProfilerHookNeeded()) #endif { bool cond = ((info.compIsVarArgs || opts.compUseSoftFP) && // Does cur stk arg require double alignment? ((varDsc->lvType == TYP_STRUCT && varDsc->lvStructDoubleAlign) || (varDsc->lvType == TYP_DOUBLE) || (varDsc->lvType == TYP_LONG))) || // Did first reg arg require alignment? (codeGen->regSet.rsMaskPreSpillAlign & genRegMask(REG_ARG_LAST)); noway_assert(cond); noway_assert(sizeofPreSpillRegArgs <= argOffs + TARGET_POINTER_SIZE); // at most one register of alignment } argOffs = sizeofPreSpillRegArgs; } noway_assert(argOffs >= sizeofPreSpillRegArgs); int argOffsWithoutPreSpillRegArgs = argOffs - sizeofPreSpillRegArgs; switch (varDsc->lvType) { case TYP_STRUCT: if (!varDsc->lvStructDoubleAlign) break; FALLTHROUGH; case TYP_DOUBLE: case TYP_LONG: // We must align up the argOffset to a multiple of 8 argOffs = roundUp((unsigned)argOffsWithoutPreSpillRegArgs, 2 * TARGET_POINTER_SIZE) + sizeofPreSpillRegArgs; break; default: // No alignment of argOffs required break; } #endif // TARGET_ARM const bool isFloatHfa = (varDsc->lvIsHfa() && (varDsc->GetHfaType() == TYP_FLOAT)); const unsigned argAlignment = eeGetArgSizeAlignment(varDsc->lvType, isFloatHfa); if (compMacOsArm64Abi()) { argOffs = roundUp(argOffs, argAlignment); } assert((argSize % argAlignment) == 0); assert((argOffs % argAlignment) == 0); varDsc->SetStackOffset(argOffs); } // For struct promoted parameters we need to set the offsets for both LclVars. // // For a dependent promoted struct we also assign the struct fields stack offset CLANG_FORMAT_COMMENT_ANCHOR; #if !defined(TARGET_64BIT) if ((varDsc->TypeGet() == TYP_LONG) && varDsc->lvPromoted) { noway_assert(varDsc->lvFieldCnt == 2); fieldVarNum = varDsc->lvFieldLclStart; lvaTable[fieldVarNum].SetStackOffset(varDsc->GetStackOffset()); lvaTable[fieldVarNum + 1].SetStackOffset(varDsc->GetStackOffset() + genTypeSize(TYP_INT)); } else #endif // !defined(TARGET_64BIT) if (varDsc->lvPromotedStruct()) { unsigned firstFieldNum = varDsc->lvFieldLclStart; for (unsigned i = 0; i < varDsc->lvFieldCnt; i++) { LclVarDsc* fieldVarDsc = lvaGetDesc(firstFieldNum + i); JITDUMP("Adjusting offset of dependent V%02u of arg V%02u: parent %u field %u net %u\n", lclNum, firstFieldNum + i, varDsc->GetStackOffset(), fieldVarDsc->lvFldOffset, varDsc->GetStackOffset() + fieldVarDsc->lvFldOffset); fieldVarDsc->SetStackOffset(varDsc->GetStackOffset() + fieldVarDsc->lvFldOffset); } } if (info.compArgOrder == Target::ARG_ORDER_R2L && !varDsc->lvIsRegArg) { argOffs += argSize; } return argOffs; } #endif // !UNIX_AMD64_ABI //----------------------------------------------------------------------------- // lvaAssingVirtualFrameOffsetsToLocals: compute the virtual stack offsets for // all elements on the stackframe. // // Notes: // Can be called multiple times. Early calls can be used to estimate various // frame offsets, but details may change. // void Compiler::lvaAssignVirtualFrameOffsetsToLocals() { // (1) Account for things that are set up by the prolog and undone by the epilog. // int stkOffs = 0; int originalFrameStkOffs = 0; int originalFrameSize = 0; // codeGen->isFramePointerUsed is set in regalloc phase. Initialize it to a guess for pre-regalloc layout. if (lvaDoneFrameLayout <= PRE_REGALLOC_FRAME_LAYOUT) { codeGen->setFramePointerUsed(codeGen->isFramePointerRequired()); } #ifdef TARGET_ARM64 // Decide where to save FP and LR registers. We store FP/LR registers at the bottom of the frame if there is // a frame pointer used (so we get positive offsets from the frame pointer to access locals), but not if we // need a GS cookie AND localloc is used, since we need the GS cookie to protect the saved return value, // and also the saved frame pointer. See CodeGen::genPushCalleeSavedRegisters() for more details about the // frame types. Since saving FP/LR at high addresses is a relatively rare case, force using it during stress. // (It should be legal to use these frame types for every frame). if (opts.compJitSaveFpLrWithCalleeSavedRegisters == 0) { // Default configuration codeGen->SetSaveFpLrWithAllCalleeSavedRegisters((getNeedsGSSecurityCookie() && compLocallocUsed) || compStressCompile(STRESS_GENERIC_VARN, 20)); } else if (opts.compJitSaveFpLrWithCalleeSavedRegisters == 1) { codeGen->SetSaveFpLrWithAllCalleeSavedRegisters(false); // Disable using new frames } else if (opts.compJitSaveFpLrWithCalleeSavedRegisters == 2) { codeGen->SetSaveFpLrWithAllCalleeSavedRegisters(true); // Force using new frames } #endif // TARGET_ARM64 #ifdef TARGET_XARCH // On x86/amd64, the return address has already been pushed by the call instruction in the caller. stkOffs -= TARGET_POINTER_SIZE; // return address; if (lvaRetAddrVar != BAD_VAR_NUM) { lvaTable[lvaRetAddrVar].SetStackOffset(stkOffs); } #endif // If we are an OSR method, we "inherit" the frame of the original method // if (opts.IsOSR()) { originalFrameSize = info.compPatchpointInfo->TotalFrameSize(); originalFrameStkOffs = stkOffs; stkOffs -= originalFrameSize; } #ifdef TARGET_XARCH // TODO-AMD64-CQ: for X64 eventually this should be pushed with all the other // calleeregs. When you fix this, you'll also need to fix // the assert at the bottom of this method if (codeGen->doubleAlignOrFramePointerUsed()) { stkOffs -= REGSIZE_BYTES; } #endif int preSpillSize = 0; bool mustDoubleAlign = false; #ifdef TARGET_ARM mustDoubleAlign = true; preSpillSize = genCountBits(codeGen->regSet.rsMaskPreSpillRegs(true)) * REGSIZE_BYTES; #else // !TARGET_ARM #if DOUBLE_ALIGN if (genDoubleAlign()) { mustDoubleAlign = true; // X86 only } #endif #endif // !TARGET_ARM #ifdef TARGET_ARM64 // If the frame pointer is used, then we'll save FP/LR at the bottom of the stack. // Otherwise, we won't store FP, and we'll store LR at the top, with the other callee-save // registers (if any). int initialStkOffs = 0; if (info.compIsVarArgs) { // For varargs we always save all of the integer register arguments // so that they are contiguous with the incoming stack arguments. initialStkOffs = MAX_REG_ARG * REGSIZE_BYTES; stkOffs -= initialStkOffs; } if (codeGen->IsSaveFpLrWithAllCalleeSavedRegisters() || !isFramePointerUsed()) // Note that currently we always have a frame pointer { stkOffs -= compCalleeRegsPushed * REGSIZE_BYTES; } else { // Subtract off FP and LR. assert(compCalleeRegsPushed >= 2); stkOffs -= (compCalleeRegsPushed - 2) * REGSIZE_BYTES; } #else // !TARGET_ARM64 #ifdef TARGET_ARM // On ARM32 LR is part of the pushed registers and is always stored at the // top. if (lvaRetAddrVar != BAD_VAR_NUM) { lvaTable[lvaRetAddrVar].SetStackOffset(stkOffs - REGSIZE_BYTES); } #endif stkOffs -= compCalleeRegsPushed * REGSIZE_BYTES; #endif // !TARGET_ARM64 // (2) Account for the remainder of the frame // // From this point on the code must generally adjust both // stkOffs and the local frame size. The latter is done via: // // lvaIncrementFrameSize -- for space not associated with a local var // lvaAllocLocalAndSetVirtualOffset -- for space associated with a local var // // One exception to the above: OSR locals that have offsets within the Tier0 // portion of the frame. // compLclFrameSize = 0; #ifdef TARGET_AMD64 // For methods with patchpoints, the Tier0 method must reserve // space for all the callee saves, as this area is shared with the // OSR method, and we have to anticipate that collectively the // Tier0 and OSR methods end up saving all callee saves. // // Currently this is x64 only. // if (doesMethodHavePatchpoints() || doesMethodHavePartialCompilationPatchpoints()) { const unsigned regsPushed = compCalleeRegsPushed + (codeGen->isFramePointerUsed() ? 1 : 0); const unsigned extraSlots = genCountBits(RBM_OSR_INT_CALLEE_SAVED) - regsPushed; const unsigned extraSlotSize = extraSlots * REGSIZE_BYTES; JITDUMP("\nMethod has patchpoints and has %u callee saves.\n" "Reserving %u extra slots (%u bytes) for potential OSR method callee saves\n", regsPushed, extraSlots, extraSlotSize); stkOffs -= extraSlotSize; lvaIncrementFrameSize(extraSlotSize); } // In case of Amd64 compCalleeRegsPushed does not include float regs (Xmm6-xmm15) that // need to be pushed. But Amd64 doesn't support push/pop of xmm registers. // Instead we need to allocate space for them on the stack and save them in prolog. // Therefore, we consider xmm registers being saved while computing stack offsets // but space for xmm registers is considered part of compLclFrameSize. // Notes // 1) We need to save the entire 128-bits of xmm register to stack, since amd64 // prolog unwind codes allow encoding of an instruction that stores the entire xmm reg // at an offset relative to SP // 2) We adjust frame size so that SP is aligned at 16-bytes after pushing integer registers. // This means while saving the first xmm register to its allocated stack location we might // have to skip 8-bytes. The reason for padding is to use efficient "movaps" to save/restore // xmm registers to/from stack to match Jit64 codegen. Without the aligning on 16-byte // boundary we would have to use movups when offset turns out unaligned. Movaps is more // performant than movups. const unsigned calleeFPRegsSavedSize = genCountBits(compCalleeFPRegsSavedMask) * XMM_REGSIZE_BYTES; // For OSR the alignment pad computation should not take the original frame into account. // Original frame size includes the pseudo-saved RA and so is always = 8 mod 16. const int offsetForAlign = -(stkOffs + originalFrameSize); if ((calleeFPRegsSavedSize > 0) && ((offsetForAlign % XMM_REGSIZE_BYTES) != 0)) { // Take care of alignment int alignPad = (int)AlignmentPad((unsigned)offsetForAlign, XMM_REGSIZE_BYTES); assert(alignPad != 0); stkOffs -= alignPad; lvaIncrementFrameSize(alignPad); } stkOffs -= calleeFPRegsSavedSize; lvaIncrementFrameSize(calleeFPRegsSavedSize); // Quirk for VS debug-launch scenario to work if (compVSQuirkStackPaddingNeeded > 0) { #ifdef DEBUG if (verbose) { printf("\nAdding VS quirk stack padding of %d bytes between save-reg area and locals\n", compVSQuirkStackPaddingNeeded); } #endif // DEBUG stkOffs -= compVSQuirkStackPaddingNeeded; lvaIncrementFrameSize(compVSQuirkStackPaddingNeeded); } #endif // TARGET_AMD64 #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARMARCH) if (lvaPSPSym != BAD_VAR_NUM) { // On ARM/ARM64, if we need a PSPSym, allocate it first, before anything else, including // padding (so we can avoid computing the same padding in the funclet // frame). Note that there is no special padding requirement for the PSPSym. noway_assert(codeGen->isFramePointerUsed()); // We need an explicit frame pointer stkOffs = lvaAllocLocalAndSetVirtualOffset(lvaPSPSym, TARGET_POINTER_SIZE, stkOffs); } #endif // FEATURE_EH_FUNCLETS && defined(TARGET_ARMARCH) if (mustDoubleAlign) { if (lvaDoneFrameLayout != FINAL_FRAME_LAYOUT) { // Allocate a pointer sized stack slot, since we may need to double align here // when lvaDoneFrameLayout == FINAL_FRAME_LAYOUT // lvaIncrementFrameSize(TARGET_POINTER_SIZE); stkOffs -= TARGET_POINTER_SIZE; // If we have any TYP_LONG, TYP_DOUBLE or double aligned structs // then we need to allocate a second pointer sized stack slot, // since we may need to double align that LclVar when we see it // in the loop below. We will just always do this so that the // offsets that we calculate for the stack frame will always // be greater (or equal) to what they can be in the final layout. // lvaIncrementFrameSize(TARGET_POINTER_SIZE); stkOffs -= TARGET_POINTER_SIZE; } else // FINAL_FRAME_LAYOUT { if (((stkOffs + preSpillSize) % (2 * TARGET_POINTER_SIZE)) != 0) { lvaIncrementFrameSize(TARGET_POINTER_SIZE); stkOffs -= TARGET_POINTER_SIZE; } // We should now have a double-aligned (stkOffs+preSpillSize) noway_assert(((stkOffs + preSpillSize) % (2 * TARGET_POINTER_SIZE)) == 0); } } if (lvaMonAcquired != BAD_VAR_NUM) { // For OSR we use the flag set up by the original method. // if (opts.IsOSR()) { assert(info.compPatchpointInfo->HasMonitorAcquired()); int originalOffset = info.compPatchpointInfo->MonitorAcquiredOffset(); int offset = originalFrameStkOffs + originalOffset; JITDUMP( "---OSR--- V%02u (on tier0 frame, monitor aquired) tier0 FP-rel offset %d tier0 frame offset %d new " "virt offset %d\n", lvaMonAcquired, originalOffset, originalFrameStkOffs, offset); lvaTable[lvaMonAcquired].SetStackOffset(offset); } else { // This var must go first, in what is called the 'frame header' for EnC so that it is // preserved when remapping occurs. See vm\eetwain.cpp for detailed comment specifying frame // layout requirements for EnC to work. stkOffs = lvaAllocLocalAndSetVirtualOffset(lvaMonAcquired, lvaLclSize(lvaMonAcquired), stkOffs); } } #ifdef JIT32_GCENCODER if (lvaLocAllocSPvar != BAD_VAR_NUM) { noway_assert(codeGen->isFramePointerUsed()); // else offsets of locals of frameless methods will be incorrect stkOffs = lvaAllocLocalAndSetVirtualOffset(lvaLocAllocSPvar, TARGET_POINTER_SIZE, stkOffs); } #endif // JIT32_GCENCODER // For OSR methods, param type args are always reportable via the root method frame slot. // (see gcInfoBlockHdrSave) and so do not need a new slot on the frame. // // OSR methods may also be able to use the root frame kept alive this, if the root // method needed to report this. // // Inlining done under OSR may introduce new reporting, in which case the OSR frame // must allocate a slot. if (lvaReportParamTypeArg()) { #ifdef JIT32_GCENCODER noway_assert(codeGen->isFramePointerUsed()); #endif if (opts.IsOSR()) { PatchpointInfo* ppInfo = info.compPatchpointInfo; assert(ppInfo->HasGenericContextArgOffset()); const int originalOffset = ppInfo->GenericContextArgOffset(); lvaCachedGenericContextArgOffs = originalFrameStkOffs + originalOffset; } else { // For CORINFO_CALLCONV_PARAMTYPE (if needed) lvaIncrementFrameSize(TARGET_POINTER_SIZE); stkOffs -= TARGET_POINTER_SIZE; lvaCachedGenericContextArgOffs = stkOffs; } } #ifndef JIT32_GCENCODER else if (lvaKeepAliveAndReportThis()) { bool canUseExistingSlot = false; if (opts.IsOSR()) { PatchpointInfo* ppInfo = info.compPatchpointInfo; if (ppInfo->HasKeptAliveThis()) { const int originalOffset = ppInfo->KeptAliveThisOffset(); lvaCachedGenericContextArgOffs = originalFrameStkOffs + originalOffset; canUseExistingSlot = true; } } if (!canUseExistingSlot) { // When "this" is also used as generic context arg. lvaIncrementFrameSize(TARGET_POINTER_SIZE); stkOffs -= TARGET_POINTER_SIZE; lvaCachedGenericContextArgOffs = stkOffs; } } #endif #if !defined(FEATURE_EH_FUNCLETS) /* If we need space for slots for shadow SP, reserve it now */ if (ehNeedsShadowSPslots()) { noway_assert(codeGen->isFramePointerUsed()); // else offsets of locals of frameless methods will be incorrect if (!lvaReportParamTypeArg()) { #ifndef JIT32_GCENCODER if (!lvaKeepAliveAndReportThis()) #endif { // In order to keep the gc info encoding smaller, the VM assumes that all methods with EH // have also saved space for a ParamTypeArg, so we need to do that here lvaIncrementFrameSize(TARGET_POINTER_SIZE); stkOffs -= TARGET_POINTER_SIZE; } } stkOffs = lvaAllocLocalAndSetVirtualOffset(lvaShadowSPslotsVar, lvaLclSize(lvaShadowSPslotsVar), stkOffs); } #endif // !FEATURE_EH_FUNCLETS if (compGSReorderStackLayout) { assert(getNeedsGSSecurityCookie()); if (!opts.IsOSR() || !info.compPatchpointInfo->HasSecurityCookie()) { stkOffs = lvaAllocLocalAndSetVirtualOffset(lvaGSSecurityCookie, lvaLclSize(lvaGSSecurityCookie), stkOffs); } } /* If we're supposed to track lifetimes of pointer temps, we'll assign frame offsets in the following order: non-pointer local variables (also untracked pointer variables) pointer local variables pointer temps non-pointer temps */ enum Allocation { ALLOC_NON_PTRS = 0x1, // assign offsets to non-ptr ALLOC_PTRS = 0x2, // Second pass, assign offsets to tracked ptrs ALLOC_UNSAFE_BUFFERS = 0x4, ALLOC_UNSAFE_BUFFERS_WITH_PTRS = 0x8 }; UINT alloc_order[5]; unsigned int cur = 0; if (compGSReorderStackLayout) { noway_assert(getNeedsGSSecurityCookie()); if (codeGen->isFramePointerUsed()) { alloc_order[cur++] = ALLOC_UNSAFE_BUFFERS; alloc_order[cur++] = ALLOC_UNSAFE_BUFFERS_WITH_PTRS; } } bool tempsAllocated = false; if (lvaTempsHaveLargerOffsetThanVars() && !codeGen->isFramePointerUsed()) { // Because we want the temps to have a larger offset than locals // and we're not using a frame pointer, we have to place the temps // above the vars. Otherwise we place them after the vars (at the // bottom of the frame). noway_assert(!tempsAllocated); stkOffs = lvaAllocateTemps(stkOffs, mustDoubleAlign); tempsAllocated = true; } alloc_order[cur++] = ALLOC_NON_PTRS; if (opts.compDbgEnC) { /* We will use just one pass, and assign offsets to all variables */ alloc_order[cur - 1] |= ALLOC_PTRS; noway_assert(compGSReorderStackLayout == false); } else { alloc_order[cur++] = ALLOC_PTRS; } if (!codeGen->isFramePointerUsed() && compGSReorderStackLayout) { alloc_order[cur++] = ALLOC_UNSAFE_BUFFERS_WITH_PTRS; alloc_order[cur++] = ALLOC_UNSAFE_BUFFERS; } alloc_order[cur] = 0; noway_assert(cur < ArrLen(alloc_order)); // Force first pass to happen UINT assignMore = 0xFFFFFFFF; bool have_LclVarDoubleAlign = false; for (cur = 0; alloc_order[cur]; cur++) { if ((assignMore & alloc_order[cur]) == 0) { continue; } assignMore = 0; unsigned lclNum; LclVarDsc* varDsc; for (lclNum = 0, varDsc = lvaTable; lclNum < lvaCount; lclNum++, varDsc++) { /* Ignore field locals of the promotion type PROMOTION_TYPE_FIELD_DEPENDENT. In other words, we will not calculate the "base" address of the struct local if the promotion type is PROMOTION_TYPE_FIELD_DEPENDENT. */ if (lvaIsFieldOfDependentlyPromotedStruct(varDsc)) { continue; } #if FEATURE_FIXED_OUT_ARGS // The scratch mem is used for the outgoing arguments, and it must be absolutely last if (lclNum == lvaOutgoingArgSpaceVar) { continue; } #endif bool allocateOnFrame = varDsc->lvOnFrame; if (varDsc->lvRegister && (lvaDoneFrameLayout == REGALLOC_FRAME_LAYOUT) && ((varDsc->TypeGet() != TYP_LONG) || (varDsc->GetOtherReg() != REG_STK))) { allocateOnFrame = false; } // For OSR args and locals, we use the slots on the original frame. // // Note we must do this even for "non frame" locals, as we sometimes // will refer to their memory homes. if (lvaIsOSRLocal(lclNum)) { if (varDsc->lvIsStructField) { const unsigned parentLclNum = varDsc->lvParentLcl; const int parentOriginalOffset = info.compPatchpointInfo->Offset(parentLclNum); const int offset = originalFrameStkOffs + parentOriginalOffset + varDsc->lvFldOffset; JITDUMP("---OSR--- V%02u (promoted field of V%02u; on tier0 frame) tier0 FP-rel offset %d tier0 " "frame offset %d field offset %d new virt offset " "%d\n", lclNum, parentLclNum, parentOriginalOffset, originalFrameStkOffs, varDsc->lvFldOffset, offset); lvaTable[lclNum].SetStackOffset(offset); } else { // Add frampointer-relative offset of this OSR live local in the original frame // to the offset of original frame in our new frame. const int originalOffset = info.compPatchpointInfo->Offset(lclNum); const int offset = originalFrameStkOffs + originalOffset; JITDUMP( "---OSR--- V%02u (on tier0 frame) tier0 FP-rel offset %d tier0 frame offset %d new virt offset " "%d\n", lclNum, originalOffset, originalFrameStkOffs, offset); lvaTable[lclNum].SetStackOffset(offset); } continue; } /* Ignore variables that are not on the stack frame */ if (!allocateOnFrame) { /* For EnC, all variables have to be allocated space on the stack, even though they may actually be enregistered. This way, the frame layout can be directly inferred from the locals-sig. */ if (!opts.compDbgEnC) { continue; } else if (lclNum >= info.compLocalsCount) { // ignore temps for EnC continue; } } else if (lvaGSSecurityCookie == lclNum && getNeedsGSSecurityCookie()) { // Special case for OSR. If the original method had a cookie, // we use its slot on the original frame. if (opts.IsOSR() && info.compPatchpointInfo->HasSecurityCookie()) { int originalOffset = info.compPatchpointInfo->SecurityCookieOffset(); int offset = originalFrameStkOffs + originalOffset; JITDUMP("---OSR--- V%02u (on tier0 frame, security cookie) tier0 FP-rel offset %d tier0 frame " "offset %d new " "virt offset %d\n", lclNum, originalOffset, originalFrameStkOffs, offset); lvaTable[lclNum].SetStackOffset(offset); } continue; } // These need to be located as the very first variables (highest memory address) // and so they have already been assigned an offset if ( #if defined(FEATURE_EH_FUNCLETS) lclNum == lvaPSPSym || #else lclNum == lvaShadowSPslotsVar || #endif // FEATURE_EH_FUNCLETS #ifdef JIT32_GCENCODER lclNum == lvaLocAllocSPvar || #endif // JIT32_GCENCODER lclNum == lvaRetAddrVar) { assert(varDsc->GetStackOffset() != BAD_STK_OFFS); continue; } if (lclNum == lvaMonAcquired) { continue; } // This should be low on the stack. Hence, it will be assigned later. if (lclNum == lvaStubArgumentVar) { #ifdef JIT32_GCENCODER noway_assert(codeGen->isFramePointerUsed()); #endif continue; } // This should be low on the stack. Hence, it will be assigned later. if (lclNum == lvaInlinedPInvokeFrameVar) { noway_assert(codeGen->isFramePointerUsed()); continue; } if (varDsc->lvIsParam) { #if defined(TARGET_AMD64) && !defined(UNIX_AMD64_ABI) // On Windows AMD64 we can use the caller-reserved stack area that is already setup assert(varDsc->GetStackOffset() != BAD_STK_OFFS); continue; #else // !TARGET_AMD64 // A register argument that is not enregistered ends up as // a local variable which will need stack frame space. // if (!varDsc->lvIsRegArg) { continue; } #ifdef TARGET_ARM64 if (info.compIsVarArgs && varDsc->GetArgReg() != theFixedRetBuffArgNum()) { // Stack offset to varargs (parameters) should point to home area which will be preallocated. const unsigned regArgNum = genMapIntRegNumToRegArgNum(varDsc->GetArgReg()); varDsc->SetStackOffset(-initialStkOffs + regArgNum * REGSIZE_BYTES); continue; } #endif #ifdef TARGET_ARM // On ARM we spill the registers in codeGen->regSet.rsMaskPreSpillRegArg // in the prolog, thus they don't need stack frame space. // if ((codeGen->regSet.rsMaskPreSpillRegs(false) & genRegMask(varDsc->GetArgReg())) != 0) { assert(varDsc->GetStackOffset() != BAD_STK_OFFS); continue; } #endif #endif // !TARGET_AMD64 } /* Make sure the type is appropriate */ if (varDsc->lvIsUnsafeBuffer && compGSReorderStackLayout) { if (varDsc->lvIsPtr) { if ((alloc_order[cur] & ALLOC_UNSAFE_BUFFERS_WITH_PTRS) == 0) { assignMore |= ALLOC_UNSAFE_BUFFERS_WITH_PTRS; continue; } } else { if ((alloc_order[cur] & ALLOC_UNSAFE_BUFFERS) == 0) { assignMore |= ALLOC_UNSAFE_BUFFERS; continue; } } } else if (varTypeIsGC(varDsc->TypeGet()) && varDsc->lvTracked) { if ((alloc_order[cur] & ALLOC_PTRS) == 0) { assignMore |= ALLOC_PTRS; continue; } } else { if ((alloc_order[cur] & ALLOC_NON_PTRS) == 0) { assignMore |= ALLOC_NON_PTRS; continue; } } /* Need to align the offset? */ if (mustDoubleAlign && (varDsc->lvType == TYP_DOUBLE // Align doubles for ARM and x86 #ifdef TARGET_ARM || varDsc->lvType == TYP_LONG // Align longs for ARM #endif #ifndef TARGET_64BIT || varDsc->lvStructDoubleAlign // Align when lvStructDoubleAlign is true #endif // !TARGET_64BIT )) { noway_assert((compLclFrameSize % TARGET_POINTER_SIZE) == 0); if ((lvaDoneFrameLayout != FINAL_FRAME_LAYOUT) && !have_LclVarDoubleAlign) { // If this is the first TYP_LONG, TYP_DOUBLE or double aligned struct // then we have seen in this loop then we allocate a pointer sized // stack slot since we may need to double align this LclVar // when lvaDoneFrameLayout == FINAL_FRAME_LAYOUT // lvaIncrementFrameSize(TARGET_POINTER_SIZE); stkOffs -= TARGET_POINTER_SIZE; } else { if (((stkOffs + preSpillSize) % (2 * TARGET_POINTER_SIZE)) != 0) { lvaIncrementFrameSize(TARGET_POINTER_SIZE); stkOffs -= TARGET_POINTER_SIZE; } // We should now have a double-aligned (stkOffs+preSpillSize) noway_assert(((stkOffs + preSpillSize) % (2 * TARGET_POINTER_SIZE)) == 0); } // Remember that we had to double align a LclVar have_LclVarDoubleAlign = true; } // Reserve the stack space for this variable stkOffs = lvaAllocLocalAndSetVirtualOffset(lclNum, lvaLclSize(lclNum), stkOffs); #ifdef TARGET_ARMARCH // If we have an incoming register argument that has a struct promoted field // then we need to copy the lvStkOff (the stack home) from the reg arg to the field lclvar // if (varDsc->lvIsRegArg && varDsc->lvPromotedStruct()) { unsigned firstFieldNum = varDsc->lvFieldLclStart; for (unsigned i = 0; i < varDsc->lvFieldCnt; i++) { LclVarDsc* fieldVarDsc = lvaGetDesc(firstFieldNum + i); fieldVarDsc->SetStackOffset(varDsc->GetStackOffset() + fieldVarDsc->lvFldOffset); } } #ifdef TARGET_ARM // If we have an incoming register argument that has a promoted long // then we need to copy the lvStkOff (the stack home) from the reg arg to the field lclvar // else if (varDsc->lvIsRegArg && varDsc->lvPromoted) { assert(varTypeIsLong(varDsc) && (varDsc->lvFieldCnt == 2)); unsigned fieldVarNum = varDsc->lvFieldLclStart; lvaTable[fieldVarNum].SetStackOffset(varDsc->GetStackOffset()); lvaTable[fieldVarNum + 1].SetStackOffset(varDsc->GetStackOffset() + 4); } #endif // TARGET_ARM #endif // TARGET_ARM64 } } if (getNeedsGSSecurityCookie() && !compGSReorderStackLayout) { if (!opts.IsOSR() || !info.compPatchpointInfo->HasSecurityCookie()) { // LOCALLOC used, but we have no unsafe buffer. Allocated cookie last, close to localloc buffer. stkOffs = lvaAllocLocalAndSetVirtualOffset(lvaGSSecurityCookie, lvaLclSize(lvaGSSecurityCookie), stkOffs); } } if (tempsAllocated == false) { /*------------------------------------------------------------------------- * * Now the temps * *------------------------------------------------------------------------- */ stkOffs = lvaAllocateTemps(stkOffs, mustDoubleAlign); } /*------------------------------------------------------------------------- * * Now do some final stuff * *------------------------------------------------------------------------- */ // lvaInlinedPInvokeFrameVar and lvaStubArgumentVar need to be assigned last // Important: The stack walker depends on lvaStubArgumentVar immediately // following lvaInlinedPInvokeFrameVar in the frame. if (lvaStubArgumentVar != BAD_VAR_NUM) { #ifdef JIT32_GCENCODER noway_assert(codeGen->isFramePointerUsed()); #endif stkOffs = lvaAllocLocalAndSetVirtualOffset(lvaStubArgumentVar, lvaLclSize(lvaStubArgumentVar), stkOffs); } if (lvaInlinedPInvokeFrameVar != BAD_VAR_NUM) { noway_assert(codeGen->isFramePointerUsed()); stkOffs = lvaAllocLocalAndSetVirtualOffset(lvaInlinedPInvokeFrameVar, lvaLclSize(lvaInlinedPInvokeFrameVar), stkOffs); } if (mustDoubleAlign) { if (lvaDoneFrameLayout != FINAL_FRAME_LAYOUT) { // Allocate a pointer sized stack slot, since we may need to double align here // when lvaDoneFrameLayout == FINAL_FRAME_LAYOUT // lvaIncrementFrameSize(TARGET_POINTER_SIZE); stkOffs -= TARGET_POINTER_SIZE; if (have_LclVarDoubleAlign) { // If we have any TYP_LONG, TYP_DOUBLE or double aligned structs // the we need to allocate a second pointer sized stack slot, // since we may need to double align the last LclVar that we saw // in the loop above. We do this so that the offsets that we // calculate for the stack frame are always greater than they will // be in the final layout. // lvaIncrementFrameSize(TARGET_POINTER_SIZE); stkOffs -= TARGET_POINTER_SIZE; } } else // FINAL_FRAME_LAYOUT { if (((stkOffs + preSpillSize) % (2 * TARGET_POINTER_SIZE)) != 0) { lvaIncrementFrameSize(TARGET_POINTER_SIZE); stkOffs -= TARGET_POINTER_SIZE; } // We should now have a double-aligned (stkOffs+preSpillSize) noway_assert(((stkOffs + preSpillSize) % (2 * TARGET_POINTER_SIZE)) == 0); } } #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_AMD64) if (lvaPSPSym != BAD_VAR_NUM) { // On AMD64, if we need a PSPSym, allocate it last, immediately above the outgoing argument // space. Any padding will be higher on the stack than this // (including the padding added by lvaAlignFrame()). noway_assert(codeGen->isFramePointerUsed()); // We need an explicit frame pointer stkOffs = lvaAllocLocalAndSetVirtualOffset(lvaPSPSym, TARGET_POINTER_SIZE, stkOffs); } #endif // FEATURE_EH_FUNCLETS && defined(TARGET_AMD64) #ifdef TARGET_ARM64 if (!codeGen->IsSaveFpLrWithAllCalleeSavedRegisters() && isFramePointerUsed()) // Note that currently we always have a frame pointer { // Create space for saving FP and LR. stkOffs -= 2 * REGSIZE_BYTES; } #endif // TARGET_ARM64 #if FEATURE_FIXED_OUT_ARGS if (lvaOutgoingArgSpaceSize > 0) { #if defined(TARGET_AMD64) && !defined(UNIX_AMD64_ABI) // No 4 slots for outgoing params on System V. noway_assert(lvaOutgoingArgSpaceSize >= (4 * TARGET_POINTER_SIZE)); #endif noway_assert((lvaOutgoingArgSpaceSize % TARGET_POINTER_SIZE) == 0); // Give it a value so we can avoid asserts in CHK builds. // Since this will always use an SP relative offset of zero // at the end of lvaFixVirtualFrameOffsets, it will be set to absolute '0' stkOffs = lvaAllocLocalAndSetVirtualOffset(lvaOutgoingArgSpaceVar, lvaLclSize(lvaOutgoingArgSpaceVar), stkOffs); } #endif // FEATURE_FIXED_OUT_ARGS // compLclFrameSize equals our negated virtual stack offset minus the pushed registers and return address // and the pushed frame pointer register which for some strange reason isn't part of 'compCalleeRegsPushed'. int pushedCount = compCalleeRegsPushed; #ifdef TARGET_ARM64 if (info.compIsVarArgs) { pushedCount += MAX_REG_ARG; } #endif #ifdef TARGET_XARCH if (codeGen->doubleAlignOrFramePointerUsed()) { pushedCount += 1; // pushed EBP (frame pointer) } pushedCount += 1; // pushed PC (return address) #endif noway_assert(compLclFrameSize + originalFrameSize == (unsigned)-(stkOffs + (pushedCount * (int)TARGET_POINTER_SIZE))); } int Compiler::lvaAllocLocalAndSetVirtualOffset(unsigned lclNum, unsigned size, int stkOffs) { noway_assert(lclNum != BAD_VAR_NUM); #ifdef TARGET_64BIT // Before final frame layout, assume the worst case, that every >=8 byte local will need // maximum padding to be aligned. This is because we generate code based on the stack offset // computed during tentative frame layout. These offsets cannot get bigger during final // frame layout, as that would possibly require different code generation (for example, // using a 4-byte offset instead of a 1-byte offset in an instruction). The offsets can get // smaller. It is possible there is different alignment at the point locals are allocated // between tentative and final frame layout which would introduce padding between locals // and thus increase the offset (from the stack pointer) of one of the locals. Hence the // need to assume the worst alignment before final frame layout. // We could probably improve this by sorting all the objects by alignment, // such that all 8 byte objects are together, 4 byte objects are together, etc., which // would require at most one alignment padding per group. // // TYP_SIMD structs locals have alignment preference given by getSIMDTypeAlignment() for // better performance. if ((size >= 8) && ((lvaDoneFrameLayout != FINAL_FRAME_LAYOUT) || ((stkOffs % 8) != 0) #if defined(FEATURE_SIMD) && ALIGN_SIMD_TYPES || lclVarIsSIMDType(lclNum) #endif )) { // Note that stack offsets are negative or equal to zero assert(stkOffs <= 0); // alignment padding unsigned pad = 0; #if defined(FEATURE_SIMD) && ALIGN_SIMD_TYPES if (lclVarIsSIMDType(lclNum) && !lvaIsImplicitByRefLocal(lclNum)) { int alignment = getSIMDTypeAlignment(lvaTable[lclNum].lvType); if (stkOffs % alignment != 0) { if (lvaDoneFrameLayout != FINAL_FRAME_LAYOUT) { pad = alignment - 1; // Note that all the objects will probably be misaligned, but we'll fix that in final layout. } else { pad = alignment + (stkOffs % alignment); // +1 to +(alignment-1) bytes } } } else #endif // FEATURE_SIMD && ALIGN_SIMD_TYPES { if (lvaDoneFrameLayout != FINAL_FRAME_LAYOUT) { pad = 7; // Note that all the objects will probably be misaligned, but we'll fix that in final layout. } else { pad = 8 + (stkOffs % 8); // +1 to +7 bytes } } // Will the pad ever be anything except 4? Do we put smaller-than-4-sized objects on the stack? lvaIncrementFrameSize(pad); stkOffs -= pad; #ifdef DEBUG if (verbose) { printf("Pad "); gtDispLclVar(lclNum, /*pad*/ false); printf(", size=%d, stkOffs=%c0x%x, pad=%d\n", size, stkOffs < 0 ? '-' : '+', stkOffs < 0 ? -stkOffs : stkOffs, pad); } #endif } #endif // TARGET_64BIT /* Reserve space on the stack by bumping the frame size */ lvaIncrementFrameSize(size); stkOffs -= size; lvaTable[lclNum].SetStackOffset(stkOffs); #ifdef DEBUG if (verbose) { printf("Assign "); gtDispLclVar(lclNum, /*pad*/ false); printf(", size=%d, stkOffs=%c0x%x\n", size, stkOffs < 0 ? '-' : '+', stkOffs < 0 ? -stkOffs : stkOffs); } #endif return stkOffs; } #ifdef TARGET_AMD64 /***************************************************************************** * lvaIsCalleeSavedIntRegCountEven() : returns true if the number of integer registers * pushed onto stack is even including RBP if used as frame pointer * * Note that this excludes return address (PC) pushed by caller. To know whether * the SP offset after pushing integer registers is aligned, we need to take * negation of this routine. */ bool Compiler::lvaIsCalleeSavedIntRegCountEven() { unsigned regsPushed = compCalleeRegsPushed + (codeGen->isFramePointerUsed() ? 1 : 0); return (regsPushed % (16 / REGSIZE_BYTES)) == 0; } #endif // TARGET_AMD64 /***************************************************************************** * lvaAlignFrame() : After allocating everything on the frame, reserve any * extra space needed to keep the frame aligned */ void Compiler::lvaAlignFrame() { #if defined(TARGET_AMD64) // Leaf frames do not need full alignment, but the unwind info is smaller if we // are at least 8 byte aligned (and we assert as much) if ((compLclFrameSize % 8) != 0) { lvaIncrementFrameSize(8 - (compLclFrameSize % 8)); } else if (lvaDoneFrameLayout != FINAL_FRAME_LAYOUT) { // If we are not doing final layout, we don't know the exact value of compLclFrameSize // and thus do not know how much we will need to add in order to be aligned. // We add 8 so compLclFrameSize is still a multiple of 8. lvaIncrementFrameSize(8); } assert((compLclFrameSize % 8) == 0); // Ensure that the stack is always 16-byte aligned by grabbing an unused QWORD // if needed, but off by 8 because of the return value. // And don't forget that compCalleeRegsPused does *not* include RBP if we are // using it as the frame pointer. // bool regPushedCountAligned = lvaIsCalleeSavedIntRegCountEven(); bool lclFrameSizeAligned = (compLclFrameSize % 16) == 0; // If this isn't the final frame layout, assume we have to push an extra QWORD // Just so the offsets are true upper limits. CLANG_FORMAT_COMMENT_ANCHOR; #ifdef UNIX_AMD64_ABI // The compNeedToAlignFrame flag is indicating if there is a need to align the frame. // On AMD64-Windows, if there are calls, 4 slots for the outgoing ars are allocated, except for // FastTailCall. This slots makes the frame size non-zero, so alignment logic will be called. // On AMD64-Unix, there are no such slots. There is a possibility to have calls in the method with frame size of 0. // The frame alignment logic won't kick in. This flags takes care of the AMD64-Unix case by remembering that there // are calls and making sure the frame alignment logic is executed. bool stackNeedsAlignment = (compLclFrameSize != 0 || opts.compNeedToAlignFrame); #else // !UNIX_AMD64_ABI bool stackNeedsAlignment = compLclFrameSize != 0; #endif // !UNIX_AMD64_ABI if ((!codeGen->isFramePointerUsed() && (lvaDoneFrameLayout != FINAL_FRAME_LAYOUT)) || (stackNeedsAlignment && (regPushedCountAligned == lclFrameSizeAligned))) { lvaIncrementFrameSize(REGSIZE_BYTES); } #elif defined(TARGET_ARM64) // The stack on ARM64 must be 16 byte aligned. // First, align up to 8. if ((compLclFrameSize % 8) != 0) { lvaIncrementFrameSize(8 - (compLclFrameSize % 8)); } else if (lvaDoneFrameLayout != FINAL_FRAME_LAYOUT) { // If we are not doing final layout, we don't know the exact value of compLclFrameSize // and thus do not know how much we will need to add in order to be aligned. // We add 8 so compLclFrameSize is still a multiple of 8. lvaIncrementFrameSize(8); } assert((compLclFrameSize % 8) == 0); // Ensure that the stack is always 16-byte aligned by grabbing an unused QWORD // if needed. bool regPushedCountAligned = (compCalleeRegsPushed % (16 / REGSIZE_BYTES)) == 0; bool lclFrameSizeAligned = (compLclFrameSize % 16) == 0; // If this isn't the final frame layout, assume we have to push an extra QWORD // Just so the offsets are true upper limits. if ((lvaDoneFrameLayout != FINAL_FRAME_LAYOUT) || (regPushedCountAligned != lclFrameSizeAligned)) { lvaIncrementFrameSize(REGSIZE_BYTES); } #elif defined(TARGET_ARM) // Ensure that stack offsets will be double-aligned by grabbing an unused DWORD if needed. // bool lclFrameSizeAligned = (compLclFrameSize % sizeof(double)) == 0; bool regPushedCountAligned = ((compCalleeRegsPushed + genCountBits(codeGen->regSet.rsMaskPreSpillRegs(true))) % (sizeof(double) / TARGET_POINTER_SIZE)) == 0; if (regPushedCountAligned != lclFrameSizeAligned) { lvaIncrementFrameSize(TARGET_POINTER_SIZE); } #elif defined(TARGET_X86) #if DOUBLE_ALIGN if (genDoubleAlign()) { // Double Frame Alignment for x86 is handled in Compiler::lvaAssignVirtualFrameOffsetsToLocals() if (compLclFrameSize == 0) { // This can only happen with JitStress=1 or JitDoubleAlign=2 lvaIncrementFrameSize(TARGET_POINTER_SIZE); } } #endif if (STACK_ALIGN > REGSIZE_BYTES) { if (lvaDoneFrameLayout != FINAL_FRAME_LAYOUT) { // If we are not doing final layout, we don't know the exact value of compLclFrameSize // and thus do not know how much we will need to add in order to be aligned. // We add the maximum pad that we could ever have (which is 12) lvaIncrementFrameSize(STACK_ALIGN - REGSIZE_BYTES); } // Align the stack with STACK_ALIGN value. int adjustFrameSize = compLclFrameSize; #if defined(UNIX_X86_ABI) bool isEbpPushed = codeGen->isFramePointerUsed(); #if DOUBLE_ALIGN isEbpPushed |= genDoubleAlign(); #endif // we need to consider spilled register(s) plus return address and/or EBP int adjustCount = compCalleeRegsPushed + 1 + (isEbpPushed ? 1 : 0); adjustFrameSize += (adjustCount * REGSIZE_BYTES) % STACK_ALIGN; #endif if ((adjustFrameSize % STACK_ALIGN) != 0) { lvaIncrementFrameSize(STACK_ALIGN - (adjustFrameSize % STACK_ALIGN)); } } #else NYI("TARGET specific lvaAlignFrame"); #endif // !TARGET_AMD64 } /***************************************************************************** * lvaAssignFrameOffsetsToPromotedStructs() : Assign offsets to fields * within a promoted struct (worker for lvaAssignFrameOffsets). */ void Compiler::lvaAssignFrameOffsetsToPromotedStructs() { LclVarDsc* varDsc = lvaTable; for (unsigned lclNum = 0; lclNum < lvaCount; lclNum++, varDsc++) { // For promoted struct fields that are params, we will // assign their offsets in lvaAssignVirtualFrameOffsetToArg(). // This is not true for the System V systems since there is no // outgoing args space. Assign the dependently promoted fields properly. // CLANG_FORMAT_COMMENT_ANCHOR; #if defined(UNIX_AMD64_ABI) || defined(TARGET_ARM) || defined(TARGET_X86) // ARM: lo/hi parts of a promoted long arg need to be updated. // // For System V platforms there is no outgoing args space. // // For System V and x86, a register passed struct arg is homed on the stack in a separate local var. // The offset of these structs is already calculated in lvaAssignVirtualFrameOffsetToArg methos. // Make sure the code below is not executed for these structs and the offset is not changed. // const bool mustProcessParams = true; #else // OSR must also assign offsets here. // const bool mustProcessParams = opts.IsOSR(); #endif // defined(UNIX_AMD64_ABI) || defined(TARGET_ARM) || defined(TARGET_X86) if (varDsc->lvIsStructField && (!varDsc->lvIsParam || mustProcessParams)) { LclVarDsc* parentvarDsc = lvaGetDesc(varDsc->lvParentLcl); lvaPromotionType promotionType = lvaGetPromotionType(parentvarDsc); if (promotionType == PROMOTION_TYPE_INDEPENDENT) { // The stack offset for these field locals must have been calculated // by the normal frame offset assignment. continue; } else { noway_assert(promotionType == PROMOTION_TYPE_DEPENDENT); noway_assert(varDsc->lvOnFrame); if (parentvarDsc->lvOnFrame) { JITDUMP("Adjusting offset of dependent V%02u of V%02u: parent %u field %u net %u\n", lclNum, varDsc->lvParentLcl, parentvarDsc->GetStackOffset(), varDsc->lvFldOffset, parentvarDsc->GetStackOffset() + varDsc->lvFldOffset); varDsc->SetStackOffset(parentvarDsc->GetStackOffset() + varDsc->lvFldOffset); } else { varDsc->lvOnFrame = false; noway_assert(varDsc->lvRefCnt() == 0); } } } } } /***************************************************************************** * lvaAllocateTemps() : Assign virtual offsets to temps (always negative). */ int Compiler::lvaAllocateTemps(int stkOffs, bool mustDoubleAlign) { unsigned spillTempSize = 0; if (lvaDoneFrameLayout == FINAL_FRAME_LAYOUT) { int preSpillSize = 0; #ifdef TARGET_ARM preSpillSize = genCountBits(codeGen->regSet.rsMaskPreSpillRegs(true)) * TARGET_POINTER_SIZE; #endif /* Allocate temps */ assert(codeGen->regSet.tmpAllFree()); for (TempDsc* temp = codeGen->regSet.tmpListBeg(); temp != nullptr; temp = codeGen->regSet.tmpListNxt(temp)) { var_types tempType = temp->tdTempType(); unsigned size = temp->tdTempSize(); /* Figure out and record the stack offset of the temp */ /* Need to align the offset? */ CLANG_FORMAT_COMMENT_ANCHOR; #ifdef TARGET_64BIT if (varTypeIsGC(tempType) && ((stkOffs % TARGET_POINTER_SIZE) != 0)) { // Calculate 'pad' as the number of bytes to align up 'stkOffs' to be a multiple of TARGET_POINTER_SIZE // In practice this is really just a fancy way of writing 4. (as all stack locations are at least 4-byte // aligned). Note stkOffs is always negative, so (stkOffs % TARGET_POINTER_SIZE) yields a negative // value. // int alignPad = (int)AlignmentPad((unsigned)-stkOffs, TARGET_POINTER_SIZE); spillTempSize += alignPad; lvaIncrementFrameSize(alignPad); stkOffs -= alignPad; noway_assert((stkOffs % TARGET_POINTER_SIZE) == 0); } #endif if (mustDoubleAlign && (tempType == TYP_DOUBLE)) // Align doubles for x86 and ARM { noway_assert((compLclFrameSize % TARGET_POINTER_SIZE) == 0); if (((stkOffs + preSpillSize) % (2 * TARGET_POINTER_SIZE)) != 0) { spillTempSize += TARGET_POINTER_SIZE; lvaIncrementFrameSize(TARGET_POINTER_SIZE); stkOffs -= TARGET_POINTER_SIZE; } // We should now have a double-aligned (stkOffs+preSpillSize) noway_assert(((stkOffs + preSpillSize) % (2 * TARGET_POINTER_SIZE)) == 0); } spillTempSize += size; lvaIncrementFrameSize(size); stkOffs -= size; temp->tdSetTempOffs(stkOffs); } #ifdef TARGET_ARM // Only required for the ARM platform that we have an accurate estimate for the spillTempSize noway_assert(spillTempSize <= lvaGetMaxSpillTempSize()); #endif } else // We haven't run codegen, so there are no Spill temps yet! { unsigned size = lvaGetMaxSpillTempSize(); lvaIncrementFrameSize(size); stkOffs -= size; } return stkOffs; } #ifdef DEBUG /***************************************************************************** * * Dump the register a local is in right now. It is only the current location, since the location changes and it * is updated throughout code generation based on LSRA register assignments. */ void Compiler::lvaDumpRegLocation(unsigned lclNum) { const LclVarDsc* varDsc = lvaGetDesc(lclNum); #ifdef TARGET_ARM if (varDsc->TypeGet() == TYP_DOUBLE) { // The assigned registers are `lvRegNum:RegNext(lvRegNum)` printf("%3s:%-3s ", getRegName(varDsc->GetRegNum()), getRegName(REG_NEXT(varDsc->GetRegNum()))); } else #endif // TARGET_ARM { printf("%3s ", getRegName(varDsc->GetRegNum())); } } /***************************************************************************** * * Dump the frame location assigned to a local. * It's the home location, even though the variable doesn't always live * in its home location. */ void Compiler::lvaDumpFrameLocation(unsigned lclNum) { int offset; regNumber baseReg; #ifdef TARGET_ARM offset = lvaFrameAddress(lclNum, compLocallocUsed, &baseReg, 0, /* isFloatUsage */ false); #else bool EBPbased; offset = lvaFrameAddress(lclNum, &EBPbased); baseReg = EBPbased ? REG_FPBASE : REG_SPBASE; #endif printf("[%2s%1s%02XH] ", getRegName(baseReg), (offset < 0 ? "-" : "+"), (offset < 0 ? -offset : offset)); } /***************************************************************************** * * dump a single lvaTable entry */ void Compiler::lvaDumpEntry(unsigned lclNum, FrameLayoutState curState, size_t refCntWtdWidth) { LclVarDsc* varDsc = lvaGetDesc(lclNum); var_types type = varDsc->TypeGet(); if (curState == INITIAL_FRAME_LAYOUT) { printf("; "); gtDispLclVar(lclNum); printf(" %7s ", varTypeName(type)); gtDispLclVarStructType(lclNum); } else { if (varDsc->lvRefCnt() == 0) { // Print this with a special indicator that the variable is unused. Even though the // variable itself is unused, it might be a struct that is promoted, so seeing it // can be useful when looking at the promoted struct fields. It's also weird to see // missing var numbers if these aren't printed. printf(";* "); } #if FEATURE_FIXED_OUT_ARGS // Since lvaOutgoingArgSpaceSize is a PhasedVar we can't read it for Dumping until // after we set it to something. else if ((lclNum == lvaOutgoingArgSpaceVar) && lvaOutgoingArgSpaceSize.HasFinalValue() && (lvaOutgoingArgSpaceSize == 0)) { // Similar to above; print this anyway. printf(";# "); } #endif // FEATURE_FIXED_OUT_ARGS else { printf("; "); } gtDispLclVar(lclNum); printf("[V%02u", lclNum); if (varDsc->lvTracked) { printf(",T%02u]", varDsc->lvVarIndex); } else { printf(" ]"); } printf(" (%3u,%*s)", varDsc->lvRefCnt(), (int)refCntWtdWidth, refCntWtd2str(varDsc->lvRefCntWtd())); printf(" %7s ", varTypeName(type)); if (genTypeSize(type) == 0) { printf("(%2d) ", lvaLclSize(lclNum)); } else { printf(" -> "); } // The register or stack location field is 11 characters wide. if ((varDsc->lvRefCnt() == 0) && !varDsc->lvImplicitlyReferenced) { printf("zero-ref "); } else if (varDsc->lvRegister != 0) { // It's always a register, and always in the same register. lvaDumpRegLocation(lclNum); } else if (varDsc->lvOnFrame == 0) { printf("registers "); } else { // For RyuJIT backend, it might be in a register part of the time, but it will definitely have a stack home // location. Otherwise, it's always on the stack. if (lvaDoneFrameLayout != NO_FRAME_LAYOUT) { lvaDumpFrameLocation(lclNum); } } } if (varDsc->lvIsHfa()) { printf(" HFA(%s) ", varTypeName(varDsc->GetHfaType())); } if (varDsc->lvDoNotEnregister) { printf(" do-not-enreg["); if (varDsc->IsAddressExposed()) { printf("X"); } if (varTypeIsStruct(varDsc)) { printf("S"); } if (varDsc->GetDoNotEnregReason() == DoNotEnregisterReason::VMNeedsStackAddr) { printf("V"); } if (lvaEnregEHVars && varDsc->lvLiveInOutOfHndlr) { printf("%c", varDsc->lvSingleDefDisqualifyReason); } if (varDsc->GetDoNotEnregReason() == DoNotEnregisterReason::LocalField) { printf("F"); } if (varDsc->GetDoNotEnregReason() == DoNotEnregisterReason::BlockOp) { printf("B"); } if (varDsc->lvIsMultiRegArg) { printf("A"); } if (varDsc->lvIsMultiRegRet) { printf("R"); } #ifdef JIT32_GCENCODER if (varDsc->lvPinned) printf("P"); #endif // JIT32_GCENCODER printf("]"); } if (varDsc->lvIsMultiRegArg) { printf(" multireg-arg"); } if (varDsc->lvIsMultiRegRet) { printf(" multireg-ret"); } if (varDsc->lvMustInit) { printf(" must-init"); } if (varDsc->IsAddressExposed()) { printf(" addr-exposed"); } if (varDsc->lvHasLdAddrOp) { printf(" ld-addr-op"); } if (varDsc->lvVerTypeInfo.IsThisPtr()) { printf(" this"); } if (varDsc->lvPinned) { printf(" pinned"); } if (varDsc->lvStackByref) { printf(" stack-byref"); } if (varDsc->lvClassHnd != NO_CLASS_HANDLE) { printf(" class-hnd"); } if (varDsc->lvClassIsExact) { printf(" exact"); } if (varDsc->lvLiveInOutOfHndlr) { printf(" EH-live"); } if (varDsc->lvSpillAtSingleDef) { printf(" spill-single-def"); } else if (varDsc->lvSingleDefRegCandidate) { printf(" single-def"); } if (lvaIsOSRLocal(lclNum) && varDsc->lvOnFrame) { printf(" tier0-frame"); } #ifndef TARGET_64BIT if (varDsc->lvStructDoubleAlign) printf(" double-align"); #endif // !TARGET_64BIT if (varDsc->lvOverlappingFields) { printf(" overlapping-fields"); } if (compGSReorderStackLayout && !varDsc->lvRegister) { if (varDsc->lvIsPtr) { printf(" ptr"); } if (varDsc->lvIsUnsafeBuffer) { printf(" unsafe-buffer"); } } if (varDsc->lvIsStructField) { LclVarDsc* parentvarDsc = lvaGetDesc(varDsc->lvParentLcl); #if !defined(TARGET_64BIT) if (varTypeIsLong(parentvarDsc)) { bool isLo = (lclNum == parentvarDsc->lvFieldLclStart); printf(" V%02u.%s(offs=0x%02x)", varDsc->lvParentLcl, isLo ? "lo" : "hi", isLo ? 0 : genTypeSize(TYP_INT)); } else #endif // !defined(TARGET_64BIT) { CORINFO_CLASS_HANDLE typeHnd = parentvarDsc->GetStructHnd(); CORINFO_FIELD_HANDLE fldHnd = info.compCompHnd->getFieldInClass(typeHnd, varDsc->lvFldOrdinal); printf(" V%02u.%s(offs=0x%02x)", varDsc->lvParentLcl, eeGetFieldName(fldHnd), varDsc->lvFldOffset); lvaPromotionType promotionType = lvaGetPromotionType(parentvarDsc); switch (promotionType) { case PROMOTION_TYPE_NONE: printf(" P-NONE"); break; case PROMOTION_TYPE_DEPENDENT: printf(" P-DEP"); break; case PROMOTION_TYPE_INDEPENDENT: printf(" P-INDEP"); break; } } } if (varDsc->lvReason != nullptr) { printf(" \"%s\"", varDsc->lvReason); } printf("\n"); } /***************************************************************************** * * dump the lvaTable */ void Compiler::lvaTableDump(FrameLayoutState curState) { if (curState == NO_FRAME_LAYOUT) { curState = lvaDoneFrameLayout; if (curState == NO_FRAME_LAYOUT) { // Still no layout? Could be a bug, but just display the initial layout curState = INITIAL_FRAME_LAYOUT; } } if (curState == INITIAL_FRAME_LAYOUT) { printf("; Initial"); } else if (curState == PRE_REGALLOC_FRAME_LAYOUT) { printf("; Pre-RegAlloc"); } else if (curState == REGALLOC_FRAME_LAYOUT) { printf("; RegAlloc"); } else if (curState == TENTATIVE_FRAME_LAYOUT) { printf("; Tentative"); } else if (curState == FINAL_FRAME_LAYOUT) { printf("; Final"); } else { printf("UNKNOWN FrameLayoutState!"); unreached(); } printf(" local variable assignments\n"); printf(";\n"); unsigned lclNum; LclVarDsc* varDsc; // Figure out some sizes, to help line things up size_t refCntWtdWidth = 6; // Use 6 as the minimum width if (curState != INITIAL_FRAME_LAYOUT) // don't need this info for INITIAL_FRAME_LAYOUT { for (lclNum = 0, varDsc = lvaTable; lclNum < lvaCount; lclNum++, varDsc++) { size_t width = strlen(refCntWtd2str(varDsc->lvRefCntWtd())); if (width > refCntWtdWidth) { refCntWtdWidth = width; } } } // Do the actual output for (lclNum = 0, varDsc = lvaTable; lclNum < lvaCount; lclNum++, varDsc++) { lvaDumpEntry(lclNum, curState, refCntWtdWidth); } //------------------------------------------------------------------------- // Display the code-gen temps assert(codeGen->regSet.tmpAllFree()); for (TempDsc* temp = codeGen->regSet.tmpListBeg(); temp != nullptr; temp = codeGen->regSet.tmpListNxt(temp)) { printf("; TEMP_%02u %26s%*s%7s -> ", -temp->tdTempNum(), " ", refCntWtdWidth, " ", varTypeName(temp->tdTempType())); int offset = temp->tdTempOffs(); printf(" [%2s%1s0x%02X]\n", isFramePointerUsed() ? STR_FPBASE : STR_SPBASE, (offset < 0 ? "-" : "+"), (offset < 0 ? -offset : offset)); } if (curState >= TENTATIVE_FRAME_LAYOUT) { printf(";\n"); printf("; Lcl frame size = %d\n", compLclFrameSize); } } #endif // DEBUG /***************************************************************************** * * Conservatively estimate the layout of the stack frame. * * This function is only used before final frame layout. It conservatively estimates the * number of callee-saved registers that must be saved, then calls lvaAssignFrameOffsets(). * To do final frame layout, the callee-saved registers are known precisely, so * lvaAssignFrameOffsets() is called directly. * * Returns the (conservative, that is, overly large) estimated size of the frame, * including the callee-saved registers. This is only used by the emitter during code * generation when estimating the size of the offset of instructions accessing temps, * and only if temps have a larger offset than variables. */ unsigned Compiler::lvaFrameSize(FrameLayoutState curState) { assert(curState < FINAL_FRAME_LAYOUT); unsigned result; /* Layout the stack frame conservatively. Assume all callee-saved registers are spilled to stack */ compCalleeRegsPushed = CNT_CALLEE_SAVED; #if defined(TARGET_ARMARCH) if (compFloatingPointUsed) compCalleeRegsPushed += CNT_CALLEE_SAVED_FLOAT; compCalleeRegsPushed++; // we always push LR. See genPushCalleeSavedRegisters #elif defined(TARGET_AMD64) if (compFloatingPointUsed) { compCalleeFPRegsSavedMask = RBM_FLT_CALLEE_SAVED; } else { compCalleeFPRegsSavedMask = RBM_NONE; } #endif #if DOUBLE_ALIGN if (genDoubleAlign()) { // X86 only - account for extra 4-byte pad that may be created by "and esp, -8" instruction compCalleeRegsPushed++; } #endif #ifdef TARGET_XARCH // Since FP/EBP is included in the SAVED_REG_MAXSZ we need to // subtract 1 register if codeGen->isFramePointerUsed() is true. if (codeGen->isFramePointerUsed()) { compCalleeRegsPushed--; } #endif lvaAssignFrameOffsets(curState); unsigned calleeSavedRegMaxSz = CALLEE_SAVED_REG_MAXSZ; #if defined(TARGET_ARMARCH) if (compFloatingPointUsed) { calleeSavedRegMaxSz += CALLEE_SAVED_FLOAT_MAXSZ; } calleeSavedRegMaxSz += REGSIZE_BYTES; // we always push LR. See genPushCalleeSavedRegisters #endif result = compLclFrameSize + calleeSavedRegMaxSz; return result; } //------------------------------------------------------------------------ // lvaGetSPRelativeOffset: Given a variable, return the offset of that // variable in the frame from the stack pointer. This number will be positive, // since the stack pointer must be at a lower address than everything on the // stack. // // This can't be called for localloc functions, since the stack pointer // varies, and thus there is no fixed offset to a variable from the stack pointer. // // Arguments: // varNum - the variable number // // Return Value: // The offset. int Compiler::lvaGetSPRelativeOffset(unsigned varNum) { assert(!compLocallocUsed); assert(lvaDoneFrameLayout == FINAL_FRAME_LAYOUT); const LclVarDsc* varDsc = lvaGetDesc(varNum); assert(varDsc->lvOnFrame); int spRelativeOffset; if (varDsc->lvFramePointerBased) { // The stack offset is relative to the frame pointer, so convert it to be // relative to the stack pointer (which makes no sense for localloc functions). spRelativeOffset = varDsc->GetStackOffset() + codeGen->genSPtoFPdelta(); } else { spRelativeOffset = varDsc->GetStackOffset(); } assert(spRelativeOffset >= 0); return spRelativeOffset; } /***************************************************************************** * * Return the caller-SP-relative stack offset of a local/parameter. * Requires the local to be on the stack and frame layout to be complete. */ int Compiler::lvaGetCallerSPRelativeOffset(unsigned varNum) { assert(lvaDoneFrameLayout == FINAL_FRAME_LAYOUT); const LclVarDsc* varDsc = lvaGetDesc(varNum); assert(varDsc->lvOnFrame); return lvaToCallerSPRelativeOffset(varDsc->GetStackOffset(), varDsc->lvFramePointerBased); } //----------------------------------------------------------------------------- // lvaToCallerSPRelativeOffset: translate a frame offset into an offset from // the caller's stack pointer. // // Arguments: // offset - frame offset // isFpBase - if true, offset is from FP, otherwise offset is from SP // forRootFrame - if the current method is an OSR method, adjust the offset // to be relative to the SP for the root method, instead of being relative // to the SP for the OSR method. // // Returins: // suitable offset // int Compiler::lvaToCallerSPRelativeOffset(int offset, bool isFpBased, bool forRootFrame) const { assert(lvaDoneFrameLayout == FINAL_FRAME_LAYOUT); if (isFpBased) { offset += codeGen->genCallerSPtoFPdelta(); } else { offset += codeGen->genCallerSPtoInitialSPdelta(); } #if defined(TARGET_AMD64) || defined(TARGET_ARM64) if (forRootFrame && opts.IsOSR()) { const PatchpointInfo* const ppInfo = info.compPatchpointInfo; #if defined(TARGET_AMD64) // The offset computed above already includes the OSR frame adjustment, plus the // pop of the "pseudo return address" from the OSR frame. // // To get to root method caller-SP, we need to subtract off the tier0 frame // size and the pushed return address and RBP for the tier0 frame (which we know is an // RPB frame). // // ppInfo's TotalFrameSize also accounts for the popped pseudo return address // between the tier0 method frame and the OSR frame. So the net adjustment // is simply TotalFrameSize plus one register. // const int adjustment = ppInfo->TotalFrameSize() + REGSIZE_BYTES; #elif defined(TARGET_ARM64) const int adjustment = ppInfo->TotalFrameSize(); #endif offset -= adjustment; } #else // OSR NYI for other targets. assert(!opts.IsOSR()); #endif return offset; } /***************************************************************************** * * Return the Initial-SP-relative stack offset of a local/parameter. * Requires the local to be on the stack and frame layout to be complete. */ int Compiler::lvaGetInitialSPRelativeOffset(unsigned varNum) { assert(lvaDoneFrameLayout == FINAL_FRAME_LAYOUT); const LclVarDsc* varDsc = lvaGetDesc(varNum); assert(varDsc->lvOnFrame); return lvaToInitialSPRelativeOffset(varDsc->GetStackOffset(), varDsc->lvFramePointerBased); } // Given a local variable offset, and whether that offset is frame-pointer based, return its offset from Initial-SP. // This is used, for example, to figure out the offset of the frame pointer from Initial-SP. int Compiler::lvaToInitialSPRelativeOffset(unsigned offset, bool isFpBased) { assert(lvaDoneFrameLayout == FINAL_FRAME_LAYOUT); #ifdef TARGET_AMD64 if (isFpBased) { // Currently, the frame starts by pushing ebp, ebp points to the saved ebp // (so we have ebp pointer chaining). Add the fixed-size frame size plus the // size of the callee-saved regs (not including ebp itself) to find Initial-SP. assert(codeGen->isFramePointerUsed()); offset += codeGen->genSPtoFPdelta(); } else { // The offset is correct already! } #else // !TARGET_AMD64 NYI("lvaToInitialSPRelativeOffset"); #endif // !TARGET_AMD64 return offset; } /*****************************************************************************/ #ifdef DEBUG /***************************************************************************** * Pick a padding size at "random" for the local. * 0 means that it should not be converted to a GT_LCL_FLD */ static unsigned LCL_FLD_PADDING(unsigned lclNum) { // Convert every 2nd variable if (lclNum % 2) { return 0; } // Pick a padding size at "random" unsigned size = lclNum % 7; return size; } /***************************************************************************** * * Callback for fgWalkAllTreesPre() * Convert as many GT_LCL_VAR's to GT_LCL_FLD's */ /* static */ /* The stress mode does 2 passes. In the first pass we will mark the locals where we CAN't apply the stress mode. In the second pass we will do the appropiate morphing wherever we've not determined we can't do it. */ Compiler::fgWalkResult Compiler::lvaStressLclFldCB(GenTree** pTree, fgWalkData* data) { GenTree* tree = *pTree; genTreeOps oper = tree->OperGet(); GenTree* lcl; switch (oper) { case GT_LCL_VAR: case GT_LCL_VAR_ADDR: lcl = tree; break; case GT_ADDR: if (tree->AsOp()->gtOp1->gtOper != GT_LCL_VAR) { return WALK_CONTINUE; } lcl = tree->AsOp()->gtOp1; break; default: return WALK_CONTINUE; } noway_assert(lcl->OperIs(GT_LCL_VAR, GT_LCL_VAR_ADDR)); Compiler* const pComp = ((lvaStressLclFldArgs*)data->pCallbackData)->m_pCompiler; const bool bFirstPass = ((lvaStressLclFldArgs*)data->pCallbackData)->m_bFirstPass; const unsigned lclNum = lcl->AsLclVarCommon()->GetLclNum(); var_types type = lcl->TypeGet(); LclVarDsc* const varDsc = pComp->lvaGetDesc(lclNum); if (varDsc->lvNoLclFldStress) { // Already determined we can't do anything for this var return WALK_SKIP_SUBTREES; } if (bFirstPass) { // Ignore arguments and temps if (varDsc->lvIsParam || lclNum >= pComp->info.compLocalsCount) { varDsc->lvNoLclFldStress = true; return WALK_SKIP_SUBTREES; } // Ignore OSR locals; if in memory, they will live on the // Tier0 frame and so can't have their storage adjusted. // if (pComp->lvaIsOSRLocal(lclNum)) { varDsc->lvNoLclFldStress = true; return WALK_SKIP_SUBTREES; } // Likewise for Tier0 methods with patchpoints -- // if we modify them we'll misreport their locations in the patchpoint info. // if (pComp->doesMethodHavePatchpoints() || pComp->doesMethodHavePartialCompilationPatchpoints()) { varDsc->lvNoLclFldStress = true; return WALK_SKIP_SUBTREES; } // Fix for lcl_fld stress mode if (varDsc->lvKeepType) { varDsc->lvNoLclFldStress = true; return WALK_SKIP_SUBTREES; } // Can't have GC ptrs in TYP_BLK. if (!varTypeIsArithmetic(type)) { varDsc->lvNoLclFldStress = true; return WALK_SKIP_SUBTREES; } // The noway_assert in the second pass below, requires that these types match, or we have a TYP_BLK // if ((varDsc->lvType != lcl->gtType) && (varDsc->lvType != TYP_BLK)) { varDsc->lvNoLclFldStress = true; return WALK_SKIP_SUBTREES; } // Weed out "small" types like TYP_BYTE as we don't mark the GT_LCL_VAR // node with the accurate small type. If we bash lvaTable[].lvType, // then there will be no indication that it was ever a small type. var_types varType = varDsc->TypeGet(); if (varType != TYP_BLK && genTypeSize(varType) != genTypeSize(genActualType(varType))) { varDsc->lvNoLclFldStress = true; return WALK_SKIP_SUBTREES; } // Offset some of the local variable by a "random" non-zero amount unsigned padding = LCL_FLD_PADDING(lclNum); if (padding == 0) { varDsc->lvNoLclFldStress = true; return WALK_SKIP_SUBTREES; } } else { // Do the morphing noway_assert((varDsc->lvType == lcl->gtType) || (varDsc->lvType == TYP_BLK)); var_types varType = varDsc->TypeGet(); // Calculate padding unsigned padding = LCL_FLD_PADDING(lclNum); #ifdef TARGET_ARMARCH // We need to support alignment requirements to access memory on ARM ARCH unsigned alignment = 1; pComp->codeGen->InferOpSizeAlign(lcl, &alignment); alignment = roundUp(alignment, TARGET_POINTER_SIZE); padding = roundUp(padding, alignment); #endif // TARGET_ARMARCH // Change the variable to a TYP_BLK if (varType != TYP_BLK) { varDsc->lvExactSize = roundUp(padding + pComp->lvaLclSize(lclNum), TARGET_POINTER_SIZE); varDsc->lvType = TYP_BLK; pComp->lvaSetVarAddrExposed(lclNum DEBUGARG(AddressExposedReason::STRESS_LCL_FLD)); } tree->gtFlags |= GTF_GLOB_REF; /* Now morph the tree appropriately */ if (oper == GT_LCL_VAR) { /* Change lclVar(lclNum) to lclFld(lclNum,padding) */ tree->ChangeOper(GT_LCL_FLD); tree->AsLclFld()->SetLclOffs(padding); } else if (oper == GT_LCL_VAR_ADDR) { tree->ChangeOper(GT_LCL_FLD_ADDR); tree->AsLclFld()->SetLclOffs(padding); } else { /* Change addr(lclVar) to addr(lclVar)+padding */ noway_assert(oper == GT_ADDR); GenTree* paddingTree = pComp->gtNewIconNode(padding); GenTree* newAddr = pComp->gtNewOperNode(GT_ADD, tree->gtType, tree, paddingTree); *pTree = newAddr; lcl->gtType = TYP_BLK; } } return WALK_SKIP_SUBTREES; } /*****************************************************************************/ void Compiler::lvaStressLclFld() { if (!compStressCompile(STRESS_LCL_FLDS, 5)) { return; } lvaStressLclFldArgs Args; Args.m_pCompiler = this; Args.m_bFirstPass = true; // Do First pass fgWalkAllTreesPre(lvaStressLclFldCB, &Args); // Second pass Args.m_bFirstPass = false; fgWalkAllTreesPre(lvaStressLclFldCB, &Args); } #endif // DEBUG /***************************************************************************** * * A little routine that displays a local variable bitset. * 'set' is mask of variables that have to be displayed * 'allVars' is the complete set of interesting variables (blank space is * inserted if its corresponding bit is not in 'set'). */ #ifdef DEBUG void Compiler::lvaDispVarSet(VARSET_VALARG_TP set) { VARSET_TP allVars(VarSetOps::MakeEmpty(this)); lvaDispVarSet(set, allVars); } void Compiler::lvaDispVarSet(VARSET_VALARG_TP set, VARSET_VALARG_TP allVars) { printf("{"); bool needSpace = false; for (unsigned index = 0; index < lvaTrackedCount; index++) { if (VarSetOps::IsMember(this, set, index)) { unsigned lclNum; LclVarDsc* varDsc; /* Look for the matching variable */ for (lclNum = 0, varDsc = lvaTable; lclNum < lvaCount; lclNum++, varDsc++) { if ((varDsc->lvVarIndex == index) && varDsc->lvTracked) { break; } } if (needSpace) { printf(" "); } else { needSpace = true; } printf("V%02u", lclNum); } else if (VarSetOps::IsMember(this, allVars, index)) { if (needSpace) { printf(" "); } else { needSpace = true; } printf(" "); } } printf("}"); } #endif // DEBUG
1
dotnet/runtime
66,282
Enable Fast Tail Call Optimization for ARM32
- Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
clamp03
2022-03-07T05:47:20Z
2022-03-13T11:50:20Z
227ff3d53784f655fa04dad059a98b3e8d291d61
51b90cc60b8528c77829ef18481b0f58db812776
Enable Fast Tail Call Optimization for ARM32. - Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
./src/coreclr/jit/lower.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Lower XX XX XX XX Preconditions: XX XX XX XX Postconditions (for the nodes currently handled): XX XX - All operands requiring a register are explicit in the graph XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ #include "jitpch.h" #ifdef _MSC_VER #pragma hdrstop #endif #include "lower.h" #if !defined(TARGET_64BIT) #include "decomposelongs.h" #endif // !defined(TARGET_64BIT) //------------------------------------------------------------------------ // MakeSrcContained: Make "childNode" a contained node // // Arguments: // parentNode - is a non-leaf node that can contain its 'childNode' // childNode - is an op that will now be contained by its parent. // // Notes: // If 'childNode' it has any existing sources, they will now be sources for the parent. // void Lowering::MakeSrcContained(GenTree* parentNode, GenTree* childNode) const { assert(!parentNode->OperIsLeaf()); assert(childNode->canBeContained()); childNode->SetContained(); assert(childNode->isContained()); #ifdef DEBUG if (IsContainableMemoryOp(childNode)) { // Verify caller of this method checked safety. // const bool isSafeToContainMem = IsSafeToContainMem(parentNode, childNode); if (!isSafeToContainMem) { JITDUMP("** Unsafe mem containment of [%06u] in [%06u}, comp->dspTreeID(childNode), " "comp->dspTreeID(parentNode)\n"); assert(isSafeToContainMem); } } #endif } //------------------------------------------------------------------------ // CheckImmedAndMakeContained: Checks if the 'childNode' is a containable immediate // and, if so, makes it contained. // // Arguments: // parentNode - is any non-leaf node // childNode - is an child op of 'parentNode' // // Return value: // true if we are able to make childNode a contained immediate // bool Lowering::CheckImmedAndMakeContained(GenTree* parentNode, GenTree* childNode) { assert(!parentNode->OperIsLeaf()); // If childNode is a containable immediate if (IsContainableImmed(parentNode, childNode)) { // then make it contained within the parentNode MakeSrcContained(parentNode, childNode); return true; } return false; } //------------------------------------------------------------------------ // IsSafeToContainMem: Checks for conflicts between childNode and parentNode, // and returns 'true' iff memory operand childNode can be contained in parentNode. // // Arguments: // parentNode - any non-leaf node // childNode - some node that is an input to `parentNode` // // Return value: // true if it is safe to make childNode a contained memory operand. // bool Lowering::IsSafeToContainMem(GenTree* parentNode, GenTree* childNode) const { // Quick early-out for unary cases // if (childNode->gtNext == parentNode) { return true; } m_scratchSideEffects.Clear(); m_scratchSideEffects.AddNode(comp, childNode); for (GenTree* node = childNode->gtNext; node != parentNode; node = node->gtNext) { const bool strict = true; if (m_scratchSideEffects.InterferesWith(comp, node, strict)) { return false; } } return true; } //------------------------------------------------------------------------ // IsSafeToContainMem: Checks for conflicts between childNode and grandParentNode // and returns 'true' iff memory operand childNode can be contained in ancestorNode // // Arguments: // grandParentNode - any non-leaf node // parentNode - parent of `childNode` and an input to `grandParentNode` // childNode - some node that is an input to `parentNode` // // Return value: // true if it is safe to make childNode a contained memory operand. // bool Lowering::IsSafeToContainMem(GenTree* grandparentNode, GenTree* parentNode, GenTree* childNode) const { m_scratchSideEffects.Clear(); m_scratchSideEffects.AddNode(comp, childNode); for (GenTree* node = childNode->gtNext; node != grandparentNode; node = node->gtNext) { if (node == parentNode) { continue; } const bool strict = true; if (m_scratchSideEffects.InterferesWith(comp, node, strict)) { return false; } } return true; } //------------------------------------------------------------------------ // LowerNode: this is the main entry point for Lowering. // // Arguments: // node - the node we are lowering. // // Returns: // next node in the transformed node sequence that needs to be lowered. // GenTree* Lowering::LowerNode(GenTree* node) { assert(node != nullptr); switch (node->gtOper) { case GT_NULLCHECK: case GT_IND: LowerIndir(node->AsIndir()); break; case GT_STOREIND: LowerStoreIndirCommon(node->AsStoreInd()); break; case GT_ADD: { GenTree* next = LowerAdd(node->AsOp()); if (next != nullptr) { return next; } } break; #if !defined(TARGET_64BIT) case GT_ADD_LO: case GT_ADD_HI: case GT_SUB_LO: case GT_SUB_HI: #endif case GT_SUB: case GT_AND: case GT_OR: case GT_XOR: return LowerBinaryArithmetic(node->AsOp()); case GT_MUL: case GT_MULHI: #if defined(TARGET_X86) || defined(TARGET_ARM64) case GT_MUL_LONG: #endif return LowerMul(node->AsOp()); case GT_UDIV: case GT_UMOD: if (!LowerUnsignedDivOrMod(node->AsOp())) { ContainCheckDivOrMod(node->AsOp()); } break; case GT_DIV: case GT_MOD: return LowerSignedDivOrMod(node); case GT_SWITCH: return LowerSwitch(node); case GT_CALL: LowerCall(node); break; case GT_LT: case GT_LE: case GT_GT: case GT_GE: case GT_EQ: case GT_NE: case GT_TEST_EQ: case GT_TEST_NE: case GT_CMP: return LowerCompare(node); case GT_JTRUE: return LowerJTrue(node->AsOp()); case GT_JMP: LowerJmpMethod(node); break; case GT_RETURN: LowerRet(node->AsUnOp()); break; case GT_RETURNTRAP: ContainCheckReturnTrap(node->AsOp()); break; case GT_CAST: LowerCast(node); break; #if defined(TARGET_XARCH) || defined(TARGET_ARM64) case GT_BOUNDS_CHECK: ContainCheckBoundsChk(node->AsBoundsChk()); break; #endif // TARGET_XARCH case GT_ARR_ELEM: return LowerArrElem(node); case GT_ARR_OFFSET: ContainCheckArrOffset(node->AsArrOffs()); break; case GT_ROL: case GT_ROR: LowerRotate(node); break; #ifndef TARGET_64BIT case GT_LSH_HI: case GT_RSH_LO: ContainCheckShiftRotate(node->AsOp()); break; #endif // !TARGET_64BIT case GT_LSH: case GT_RSH: case GT_RSZ: #if defined(TARGET_XARCH) || defined(TARGET_ARM64) LowerShift(node->AsOp()); #else ContainCheckShiftRotate(node->AsOp()); #endif break; case GT_STORE_BLK: case GT_STORE_OBJ: if (node->AsBlk()->Data()->IsCall()) { LowerStoreSingleRegCallStruct(node->AsBlk()); break; } FALLTHROUGH; case GT_STORE_DYN_BLK: LowerBlockStoreCommon(node->AsBlk()); break; case GT_LCLHEAP: ContainCheckLclHeap(node->AsOp()); break; #ifdef TARGET_XARCH case GT_INTRINSIC: ContainCheckIntrinsic(node->AsOp()); break; #endif // TARGET_XARCH #ifdef FEATURE_SIMD case GT_SIMD: LowerSIMD(node->AsSIMD()); break; #endif // FEATURE_SIMD #ifdef FEATURE_HW_INTRINSICS case GT_HWINTRINSIC: LowerHWIntrinsic(node->AsHWIntrinsic()); break; #endif // FEATURE_HW_INTRINSICS case GT_LCL_FLD: { // We should only encounter this for lclVars that are lvDoNotEnregister. verifyLclFldDoNotEnregister(node->AsLclVarCommon()->GetLclNum()); break; } case GT_LCL_VAR: { GenTreeLclVar* lclNode = node->AsLclVar(); WidenSIMD12IfNecessary(lclNode); LclVarDsc* varDsc = comp->lvaGetDesc(lclNode); // The consumer of this node must check compatibility of the fields. // This merely checks whether it is possible for this to be a multireg node. if (lclNode->IsMultiRegLclVar()) { if (!varDsc->lvPromoted || (comp->lvaGetPromotionType(varDsc) != Compiler::PROMOTION_TYPE_INDEPENDENT) || (varDsc->lvFieldCnt > MAX_MULTIREG_COUNT)) { lclNode->ClearMultiReg(); if (lclNode->TypeIs(TYP_STRUCT)) { comp->lvaSetVarDoNotEnregister(lclNode->GetLclNum() DEBUGARG(DoNotEnregisterReason::BlockOp)); } } } break; } case GT_STORE_LCL_VAR: WidenSIMD12IfNecessary(node->AsLclVarCommon()); FALLTHROUGH; case GT_STORE_LCL_FLD: LowerStoreLocCommon(node->AsLclVarCommon()); break; #if defined(TARGET_ARM64) case GT_CMPXCHG: CheckImmedAndMakeContained(node, node->AsCmpXchg()->gtOpComparand); break; case GT_XORR: case GT_XAND: case GT_XADD: CheckImmedAndMakeContained(node, node->AsOp()->gtOp2); break; #elif defined(TARGET_XARCH) case GT_XORR: case GT_XAND: case GT_XADD: if (node->IsUnusedValue()) { node->ClearUnusedValue(); // Make sure the types are identical, since the node type is changed to VOID // CodeGen relies on op2's type to determine the instruction size. // Note that the node type cannot be a small int but the data operand can. assert(genActualType(node->gtGetOp2()->TypeGet()) == node->TypeGet()); node->SetOper(GT_LOCKADD); node->gtType = TYP_VOID; CheckImmedAndMakeContained(node, node->gtGetOp2()); } break; #endif #ifndef TARGET_ARMARCH // TODO-ARMARCH-CQ: We should contain this as long as the offset fits. case GT_OBJ: if (node->AsObj()->Addr()->OperIsLocalAddr()) { node->AsObj()->Addr()->SetContained(); } break; #endif // !TARGET_ARMARCH case GT_KEEPALIVE: node->gtGetOp1()->SetRegOptional(); break; case GT_LCL_FLD_ADDR: case GT_LCL_VAR_ADDR: { const GenTreeLclVarCommon* lclAddr = node->AsLclVarCommon(); const LclVarDsc* varDsc = comp->lvaGetDesc(lclAddr); if (!varDsc->lvDoNotEnregister) { // TODO-Cleanup: this is definitely not the best place for this detection, // but for now it is the easiest. Move it to morph. comp->lvaSetVarDoNotEnregister(lclAddr->GetLclNum() DEBUGARG(DoNotEnregisterReason::LclAddrNode)); } } break; default: break; } return node->gtNext; } /** -- Switch Lowering -- * The main idea of switch lowering is to keep transparency of the register requirements of this node * downstream in LSRA. Given that the switch instruction is inherently a control statement which in the JIT * is represented as a simple tree node, at the time we actually generate code for it we end up * generating instructions that actually modify the flow of execution that imposes complicated * register requirement and lifetimes. * * So, for the purpose of LSRA, we want to have a more detailed specification of what a switch node actually * means and more importantly, which and when do we need a register for each instruction we want to issue * to correctly allocate them downstream. * * For this purpose, this procedure performs switch lowering in two different ways: * * a) Represent the switch statement as a zero-index jump table construct. This means that for every destination * of the switch, we will store this destination in an array of addresses and the code generator will issue * a data section where this array will live and will emit code that based on the switch index, will indirect and * jump to the destination specified in the jump table. * * For this transformation we introduce a new GT node called GT_SWITCH_TABLE that is a specialization of the switch * node for jump table based switches. * The overall structure of a GT_SWITCH_TABLE is: * * GT_SWITCH_TABLE * |_________ localVar (a temporary local that holds the switch index) * |_________ jumpTable (this is a special node that holds the address of the jump table array) * * Now, the way we morph a GT_SWITCH node into this lowered switch table node form is the following: * * Input: GT_SWITCH (inside a basic block whose Branch Type is BBJ_SWITCH) * |_____ expr (an arbitrarily complex GT_NODE that represents the switch index) * * This gets transformed into the following statements inside a BBJ_COND basic block (the target would be * the default case of the switch in case the conditional is evaluated to true). * * ----- original block, transformed * GT_STORE_LCL_VAR tempLocal (a new temporary local variable used to store the switch index) * |_____ expr (the index expression) * * GT_JTRUE * |_____ GT_COND * |_____ GT_GE * |___ Int_Constant (This constant is the index of the default case * that happens to be the highest index in the jump table). * |___ tempLocal (The local variable were we stored the index expression). * * ----- new basic block * GT_SWITCH_TABLE * |_____ tempLocal * |_____ jumpTable (a new jump table node that now LSRA can allocate registers for explicitly * and LinearCodeGen will be responsible to generate downstream). * * This way there are no implicit temporaries. * * b) For small-sized switches, we will actually morph them into a series of conditionals of the form * if (case falls into the default){ goto jumpTable[size]; // last entry in the jump table is the default case } * (For the default case conditional, we'll be constructing the exact same code as the jump table case one). * else if (case == firstCase){ goto jumpTable[1]; } * else if (case == secondCase) { goto jumptable[2]; } and so on. * * This transformation is of course made in JIT-IR, not downstream to CodeGen level, so this way we no longer * require internal temporaries to maintain the index we're evaluating plus we're using existing code from * LinearCodeGen to implement this instead of implement all the control flow constructs using InstrDscs and * InstrGroups downstream. */ GenTree* Lowering::LowerSwitch(GenTree* node) { unsigned jumpCnt; unsigned targetCnt; BasicBlock** jumpTab; assert(node->gtOper == GT_SWITCH); // The first step is to build the default case conditional construct that is // shared between both kinds of expansion of the switch node. // To avoid confusion, we'll alias m_block to originalSwitchBB // that represents the node we're morphing. BasicBlock* originalSwitchBB = m_block; LIR::Range& switchBBRange = LIR::AsRange(originalSwitchBB); // jumpCnt is the number of elements in the jump table array. // jumpTab is the actual pointer to the jump table array. // targetCnt is the number of unique targets in the jump table array. jumpCnt = originalSwitchBB->bbJumpSwt->bbsCount; jumpTab = originalSwitchBB->bbJumpSwt->bbsDstTab; targetCnt = originalSwitchBB->NumSucc(comp); // GT_SWITCH must be a top-level node with no use. #ifdef DEBUG { LIR::Use use; assert(!switchBBRange.TryGetUse(node, &use)); } #endif JITDUMP("Lowering switch " FMT_BB ", %d cases\n", originalSwitchBB->bbNum, jumpCnt); // Handle a degenerate case: if the switch has only a default case, just convert it // to an unconditional branch. This should only happen in minopts or with debuggable // code. if (targetCnt == 1) { JITDUMP("Lowering switch " FMT_BB ": single target; converting to BBJ_ALWAYS\n", originalSwitchBB->bbNum); noway_assert(comp->opts.OptimizationDisabled()); if (originalSwitchBB->bbNext == jumpTab[0]) { originalSwitchBB->bbJumpKind = BBJ_NONE; originalSwitchBB->bbJumpDest = nullptr; } else { originalSwitchBB->bbJumpKind = BBJ_ALWAYS; originalSwitchBB->bbJumpDest = jumpTab[0]; } // Remove extra predecessor links if there was more than one case. for (unsigned i = 1; i < jumpCnt; ++i) { (void)comp->fgRemoveRefPred(jumpTab[i], originalSwitchBB); } // We have to get rid of the GT_SWITCH node but a child might have side effects so just assign // the result of the child subtree to a temp. GenTree* rhs = node->AsOp()->gtOp1; unsigned lclNum = comp->lvaGrabTemp(true DEBUGARG("Lowering is creating a new local variable")); comp->lvaTable[lclNum].lvType = rhs->TypeGet(); GenTreeLclVar* store = comp->gtNewStoreLclVar(lclNum, rhs); switchBBRange.InsertAfter(node, store); switchBBRange.Remove(node); return store; } noway_assert(jumpCnt >= 2); // Spill the argument to the switch node into a local so that it can be used later. LIR::Use use(switchBBRange, &(node->AsOp()->gtOp1), node); ReplaceWithLclVar(use); // GT_SWITCH(indexExpression) is now two statements: // 1. a statement containing 'asg' (for temp = indexExpression) // 2. and a statement with GT_SWITCH(temp) assert(node->gtOper == GT_SWITCH); GenTree* temp = node->AsOp()->gtOp1; assert(temp->gtOper == GT_LCL_VAR); unsigned tempLclNum = temp->AsLclVarCommon()->GetLclNum(); var_types tempLclType = temp->TypeGet(); BasicBlock* defaultBB = jumpTab[jumpCnt - 1]; BasicBlock* followingBB = originalSwitchBB->bbNext; /* Is the number of cases right for a test and jump switch? */ const bool fFirstCaseFollows = (followingBB == jumpTab[0]); const bool fDefaultFollows = (followingBB == defaultBB); unsigned minSwitchTabJumpCnt = 2; // table is better than just 2 cmp/jcc // This means really just a single cmp/jcc (aka a simple if/else) if (fFirstCaseFollows || fDefaultFollows) { minSwitchTabJumpCnt++; } #if defined(TARGET_ARM) // On ARM for small switch tables we will // generate a sequence of compare and branch instructions // because the code to load the base of the switch // table is huge and hideous due to the relocation... :( minSwitchTabJumpCnt += 2; #endif // TARGET_ARM // Once we have the temporary variable, we construct the conditional branch for // the default case. As stated above, this conditional is being shared between // both GT_SWITCH lowering code paths. // This condition is of the form: if (temp > jumpTableLength - 2){ goto jumpTable[jumpTableLength - 1]; } GenTree* gtDefaultCaseCond = comp->gtNewOperNode(GT_GT, TYP_INT, comp->gtNewLclvNode(tempLclNum, tempLclType), comp->gtNewIconNode(jumpCnt - 2, genActualType(tempLclType))); // Make sure we perform an unsigned comparison, just in case the switch index in 'temp' // is now less than zero 0 (that would also hit the default case). gtDefaultCaseCond->gtFlags |= GTF_UNSIGNED; GenTree* gtDefaultCaseJump = comp->gtNewOperNode(GT_JTRUE, TYP_VOID, gtDefaultCaseCond); gtDefaultCaseJump->gtFlags = node->gtFlags; LIR::Range condRange = LIR::SeqTree(comp, gtDefaultCaseJump); switchBBRange.InsertAtEnd(std::move(condRange)); BasicBlock* afterDefaultCondBlock = comp->fgSplitBlockAfterNode(originalSwitchBB, condRange.LastNode()); // afterDefaultCondBlock is now the switch, and all the switch targets have it as a predecessor. // originalSwitchBB is now a BBJ_NONE, and there is a predecessor edge in afterDefaultCondBlock // representing the fall-through flow from originalSwitchBB. assert(originalSwitchBB->bbJumpKind == BBJ_NONE); assert(originalSwitchBB->bbNext == afterDefaultCondBlock); assert(afterDefaultCondBlock->bbJumpKind == BBJ_SWITCH); assert(afterDefaultCondBlock->bbJumpSwt->bbsHasDefault); assert(afterDefaultCondBlock->isEmpty()); // Nothing here yet. // The GT_SWITCH code is still in originalSwitchBB (it will be removed later). // Turn originalSwitchBB into a BBJ_COND. originalSwitchBB->bbJumpKind = BBJ_COND; originalSwitchBB->bbJumpDest = jumpTab[jumpCnt - 1]; // Fix the pred for the default case: the default block target still has originalSwitchBB // as a predecessor, but the fgSplitBlockAfterStatement() moved all predecessors to point // to afterDefaultCondBlock. flowList* oldEdge = comp->fgRemoveRefPred(jumpTab[jumpCnt - 1], afterDefaultCondBlock); comp->fgAddRefPred(jumpTab[jumpCnt - 1], originalSwitchBB, oldEdge); bool useJumpSequence = jumpCnt < minSwitchTabJumpCnt; if (TargetOS::IsUnix && TargetArchitecture::IsArm32) { // Force using an inlined jumping instead switch table generation. // Switch jump table is generated with incorrect values in CoreRT case, // so any large switch will crash after loading to PC any such value. // I think this is due to the fact that we use absolute addressing // instead of relative. But in CoreRT is used as a rule relative // addressing when we generate an executable. // See also https://github.com/dotnet/runtime/issues/8683 // Also https://github.com/dotnet/coreclr/pull/13197 useJumpSequence = useJumpSequence || comp->IsTargetAbi(CORINFO_CORERT_ABI); } // If we originally had 2 unique successors, check to see whether there is a unique // non-default case, in which case we can eliminate the switch altogether. // Note that the single unique successor case is handled above. BasicBlock* uniqueSucc = nullptr; if (targetCnt == 2) { uniqueSucc = jumpTab[0]; noway_assert(jumpCnt >= 2); for (unsigned i = 1; i < jumpCnt - 1; i++) { if (jumpTab[i] != uniqueSucc) { uniqueSucc = nullptr; break; } } } if (uniqueSucc != nullptr) { // If the unique successor immediately follows this block, we have nothing to do - // it will simply fall-through after we remove the switch, below. // Otherwise, make this a BBJ_ALWAYS. // Now, fixup the predecessor links to uniqueSucc. In the original jumpTab: // jumpTab[i-1] was the default target, which we handled above, // jumpTab[0] is the first target, and we'll leave that predecessor link. // Remove any additional predecessor links to uniqueSucc. for (unsigned i = 1; i < jumpCnt - 1; ++i) { assert(jumpTab[i] == uniqueSucc); (void)comp->fgRemoveRefPred(uniqueSucc, afterDefaultCondBlock); } if (afterDefaultCondBlock->bbNext == uniqueSucc) { afterDefaultCondBlock->bbJumpKind = BBJ_NONE; afterDefaultCondBlock->bbJumpDest = nullptr; } else { afterDefaultCondBlock->bbJumpKind = BBJ_ALWAYS; afterDefaultCondBlock->bbJumpDest = uniqueSucc; } } // If the number of possible destinations is small enough, we proceed to expand the switch // into a series of conditional branches, otherwise we follow the jump table based switch // transformation. else if (useJumpSequence || comp->compStressCompile(Compiler::STRESS_SWITCH_CMP_BR_EXPANSION, 50)) { // Lower the switch into a series of compare and branch IR trees. // // In this case we will morph the node in the following way: // 1. Generate a JTRUE statement to evaluate the default case. (This happens above.) // 2. Start splitting the switch basic block into subsequent basic blocks, each of which will contain // a statement that is responsible for performing a comparison of the table index and conditional // branch if equal. JITDUMP("Lowering switch " FMT_BB ": using compare/branch expansion\n", originalSwitchBB->bbNum); // We'll use 'afterDefaultCondBlock' for the first conditional. After that, we'll add new // blocks. If we end up not needing it at all (say, if all the non-default cases just fall through), // we'll delete it. bool fUsedAfterDefaultCondBlock = false; BasicBlock* currentBlock = afterDefaultCondBlock; LIR::Range* currentBBRange = &LIR::AsRange(currentBlock); // Walk to entries 0 to jumpCnt - 1. If a case target follows, ignore it and let it fall through. // If no case target follows, the last one doesn't need to be a compare/branch: it can be an // unconditional branch. bool fAnyTargetFollows = false; for (unsigned i = 0; i < jumpCnt - 1; ++i) { assert(currentBlock != nullptr); // Remove the switch from the predecessor list of this case target's block. // We'll add the proper new predecessor edge later. flowList* oldEdge = comp->fgRemoveRefPred(jumpTab[i], afterDefaultCondBlock); if (jumpTab[i] == followingBB) { // This case label follows the switch; let it fall through. fAnyTargetFollows = true; continue; } // We need a block to put in the new compare and/or branch. // If we haven't used the afterDefaultCondBlock yet, then use that. if (fUsedAfterDefaultCondBlock) { BasicBlock* newBlock = comp->fgNewBBafter(BBJ_NONE, currentBlock, true); comp->fgAddRefPred(newBlock, currentBlock); // The fall-through predecessor. currentBlock = newBlock; currentBBRange = &LIR::AsRange(currentBlock); } else { assert(currentBlock == afterDefaultCondBlock); fUsedAfterDefaultCondBlock = true; } // We're going to have a branch, either a conditional or unconditional, // to the target. Set the target. currentBlock->bbJumpDest = jumpTab[i]; // Wire up the predecessor list for the "branch" case. comp->fgAddRefPred(jumpTab[i], currentBlock, oldEdge); if (!fAnyTargetFollows && (i == jumpCnt - 2)) { // We're processing the last one, and there is no fall through from any case // to the following block, so we can use an unconditional branch to the final // case: there is no need to compare against the case index, since it's // guaranteed to be taken (since the default case was handled first, above). currentBlock->bbJumpKind = BBJ_ALWAYS; } else { // Otherwise, it's a conditional branch. Set the branch kind, then add the // condition statement. currentBlock->bbJumpKind = BBJ_COND; // Now, build the conditional statement for the current case that is // being evaluated: // GT_JTRUE // |__ GT_COND // |____GT_EQ // |____ (switchIndex) (The temp variable) // |____ (ICon) (The actual case constant) GenTree* gtCaseCond = comp->gtNewOperNode(GT_EQ, TYP_INT, comp->gtNewLclvNode(tempLclNum, tempLclType), comp->gtNewIconNode(i, tempLclType)); GenTree* gtCaseBranch = comp->gtNewOperNode(GT_JTRUE, TYP_VOID, gtCaseCond); LIR::Range caseRange = LIR::SeqTree(comp, gtCaseBranch); currentBBRange->InsertAtEnd(std::move(caseRange)); } } if (fAnyTargetFollows) { // There is a fall-through to the following block. In the loop // above, we deleted all the predecessor edges from the switch. // In this case, we need to add one back. comp->fgAddRefPred(currentBlock->bbNext, currentBlock); } if (!fUsedAfterDefaultCondBlock) { // All the cases were fall-through! We don't need this block. // Convert it from BBJ_SWITCH to BBJ_NONE and unset the BBF_DONT_REMOVE flag // so fgRemoveBlock() doesn't complain. JITDUMP("Lowering switch " FMT_BB ": all switch cases were fall-through\n", originalSwitchBB->bbNum); assert(currentBlock == afterDefaultCondBlock); assert(currentBlock->bbJumpKind == BBJ_SWITCH); currentBlock->bbJumpKind = BBJ_NONE; currentBlock->bbFlags &= ~BBF_DONT_REMOVE; comp->fgRemoveBlock(currentBlock, /* unreachable */ false); // It's an empty block. } } else { // At this point the default case has already been handled and we need to generate a jump // table based switch or a bit test based switch at the end of afterDefaultCondBlock. Both // switch variants need the switch value so create the necessary LclVar node here. GenTree* switchValue = comp->gtNewLclvNode(tempLclNum, tempLclType); LIR::Range& switchBlockRange = LIR::AsRange(afterDefaultCondBlock); switchBlockRange.InsertAtEnd(switchValue); // Try generating a bit test based switch first, // if that's not possible a jump table based switch will be generated. if (!TryLowerSwitchToBitTest(jumpTab, jumpCnt, targetCnt, afterDefaultCondBlock, switchValue)) { JITDUMP("Lowering switch " FMT_BB ": using jump table expansion\n", originalSwitchBB->bbNum); #ifdef TARGET_64BIT if (tempLclType != TYP_I_IMPL) { // SWITCH_TABLE expects the switch value (the index into the jump table) to be TYP_I_IMPL. // Note that the switch value is unsigned so the cast should be unsigned as well. switchValue = comp->gtNewCastNode(TYP_I_IMPL, switchValue, true, TYP_U_IMPL); switchBlockRange.InsertAtEnd(switchValue); } #endif GenTree* switchTable = comp->gtNewJmpTableNode(); GenTree* switchJump = comp->gtNewOperNode(GT_SWITCH_TABLE, TYP_VOID, switchValue, switchTable); switchBlockRange.InsertAfter(switchValue, switchTable, switchJump); // this block no longer branches to the default block afterDefaultCondBlock->bbJumpSwt->removeDefault(); } comp->fgInvalidateSwitchDescMapEntry(afterDefaultCondBlock); } GenTree* next = node->gtNext; // Get rid of the GT_SWITCH(temp). switchBBRange.Remove(node->AsOp()->gtOp1); switchBBRange.Remove(node); return next; } //------------------------------------------------------------------------ // TryLowerSwitchToBitTest: Attempts to transform a jump table switch into a bit test. // // Arguments: // jumpTable - The jump table // jumpCount - The number of blocks in the jump table // targetCount - The number of distinct blocks in the jump table // bbSwitch - The switch block // switchValue - A LclVar node that provides the switch value // // Return value: // true if the switch has been lowered to a bit test // // Notes: // If the jump table contains less than 32 (64 on 64 bit targets) entries and there // are at most 2 distinct jump targets then the jump table can be converted to a word // of bits where a 0 bit corresponds to one jump target and a 1 bit corresponds to the // other jump target. Instead of the indirect jump a BT-JCC sequence is used to jump // to the appropriate target: // mov eax, 245 ; jump table converted to a "bit table" // bt eax, ebx ; ebx is supposed to contain the switch value // jc target1 // target0: // ... // target1: // Such code is both shorter and faster (in part due to the removal of a memory load) // than the traditional jump table base code. And of course, it also avoids the need // to emit the jump table itself that can reach up to 256 bytes (for 64 entries). // bool Lowering::TryLowerSwitchToBitTest( BasicBlock* jumpTable[], unsigned jumpCount, unsigned targetCount, BasicBlock* bbSwitch, GenTree* switchValue) { #ifndef TARGET_XARCH // Other architectures may use this if they substitute GT_BT with equivalent code. return false; #else assert(jumpCount >= 2); assert(targetCount >= 2); assert(bbSwitch->bbJumpKind == BBJ_SWITCH); assert(switchValue->OperIs(GT_LCL_VAR)); // // Quick check to see if it's worth going through the jump table. The bit test switch supports // up to 2 targets but targetCount also includes the default block so we need to allow 3 targets. // We'll ensure that there are only 2 targets when building the bit table. // if (targetCount > 3) { return false; } // // The number of bits in the bit table is the same as the number of jump table entries. But the // jump table also includes the default target (at the end) so we need to ignore it. The default // has already been handled by a JTRUE(GT(switchValue, jumpCount - 2)) that LowerSwitch generates. // const unsigned bitCount = jumpCount - 1; if (bitCount > (genTypeSize(TYP_I_IMPL) * 8)) { return false; } // // Build a bit table where a bit set to 0 corresponds to bbCase0 and a bit set to 1 corresponds to // bbCase1. Simply use the first block in the jump table as bbCase1, later we can invert the bit // table and/or swap the blocks if it's beneficial. // BasicBlock* bbCase0 = nullptr; BasicBlock* bbCase1 = jumpTable[0]; size_t bitTable = 1; for (unsigned bitIndex = 1; bitIndex < bitCount; bitIndex++) { if (jumpTable[bitIndex] == bbCase1) { bitTable |= (size_t(1) << bitIndex); } else if (bbCase0 == nullptr) { bbCase0 = jumpTable[bitIndex]; } else if (jumpTable[bitIndex] != bbCase0) { // If it's neither bbCase0 nor bbCase1 then it means we have 3 targets. There can't be more // than 3 because of the check at the start of the function. assert(targetCount == 3); return false; } } // // One of the case blocks has to follow the switch block. This requirement could be avoided // by adding a BBJ_ALWAYS block after the switch block but doing that sometimes negatively // impacts register allocation. // if ((bbSwitch->bbNext != bbCase0) && (bbSwitch->bbNext != bbCase1)) { return false; } #ifdef TARGET_64BIT // // See if we can avoid a 8 byte immediate on 64 bit targets. If all upper 32 bits are 1 // then inverting the bit table will make them 0 so that the table now fits in 32 bits. // Note that this does not change the number of bits in the bit table, it just takes // advantage of the fact that loading a 32 bit immediate into a 64 bit register zero // extends the immediate value to 64 bit. // if (~bitTable <= UINT32_MAX) { bitTable = ~bitTable; std::swap(bbCase0, bbCase1); } #endif // // Rewire the blocks as needed and figure out the condition to use for JCC. // GenCondition bbSwitchCondition; bbSwitch->bbJumpKind = BBJ_COND; comp->fgRemoveAllRefPreds(bbCase1, bbSwitch); comp->fgRemoveAllRefPreds(bbCase0, bbSwitch); if (bbSwitch->bbNext == bbCase0) { // GenCondition::C generates JC so we jump to bbCase1 when the bit is set bbSwitchCondition = GenCondition::C; bbSwitch->bbJumpDest = bbCase1; comp->fgAddRefPred(bbCase0, bbSwitch); comp->fgAddRefPred(bbCase1, bbSwitch); } else { assert(bbSwitch->bbNext == bbCase1); // GenCondition::NC generates JNC so we jump to bbCase0 when the bit is not set bbSwitchCondition = GenCondition::NC; bbSwitch->bbJumpDest = bbCase0; comp->fgAddRefPred(bbCase0, bbSwitch); comp->fgAddRefPred(bbCase1, bbSwitch); } // // Append BT(bitTable, switchValue) and JCC(condition) to the switch block. // var_types bitTableType = (bitCount <= (genTypeSize(TYP_INT) * 8)) ? TYP_INT : TYP_LONG; GenTree* bitTableIcon = comp->gtNewIconNode(bitTable, bitTableType); GenTree* bitTest = comp->gtNewOperNode(GT_BT, TYP_VOID, bitTableIcon, switchValue); bitTest->gtFlags |= GTF_SET_FLAGS; GenTreeCC* jcc = new (comp, GT_JCC) GenTreeCC(GT_JCC, bbSwitchCondition); jcc->gtFlags |= GTF_USE_FLAGS; LIR::AsRange(bbSwitch).InsertAfter(switchValue, bitTableIcon, bitTest, jcc); return true; #endif // TARGET_XARCH } // NOTE: this method deliberately does not update the call arg table. It must only // be used by NewPutArg and LowerArg; these functions are responsible for updating // the call arg table as necessary. void Lowering::ReplaceArgWithPutArgOrBitcast(GenTree** argSlot, GenTree* putArgOrBitcast) { assert(argSlot != nullptr); assert(*argSlot != nullptr); assert(putArgOrBitcast->OperIsPutArg() || putArgOrBitcast->OperIs(GT_BITCAST)); GenTree* arg = *argSlot; // Replace the argument with the putarg/copy *argSlot = putArgOrBitcast; putArgOrBitcast->AsOp()->gtOp1 = arg; // Insert the putarg/copy into the block BlockRange().InsertAfter(arg, putArgOrBitcast); } //------------------------------------------------------------------------ // NewPutArg: rewrites the tree to put an arg in a register or on the stack. // // Arguments: // call - the call whose arg is being rewritten. // arg - the arg being rewritten. // info - the fgArgTabEntry information for the argument. // type - the type of the argument. // // Return Value: // The new tree that was created to put the arg in the right place // or the incoming arg if the arg tree was not rewritten. // // Assumptions: // call, arg, and info must be non-null. // // Notes: // For System V systems with native struct passing (i.e. UNIX_AMD64_ABI defined) // this method allocates a single GT_PUTARG_REG for 1 eightbyte structs and a GT_FIELD_LIST of two GT_PUTARG_REGs // for two eightbyte structs. // // For STK passed structs the method generates GT_PUTARG_STK tree. For System V systems with native struct passing // (i.e. UNIX_AMD64_ABI defined) this method also sets the GC pointers count and the pointers // layout object, so the codegen of the GT_PUTARG_STK could use this for optimizing copying to the stack by value. // (using block copy primitives for non GC pointers and a single TARGET_POINTER_SIZE copy with recording GC info.) // GenTree* Lowering::NewPutArg(GenTreeCall* call, GenTree* arg, fgArgTabEntry* info, var_types type) { assert(call != nullptr); assert(arg != nullptr); assert(info != nullptr); GenTree* putArg = nullptr; bool isOnStack = (info->GetRegNum() == REG_STK); #ifdef TARGET_ARMARCH // Mark contained when we pass struct // GT_FIELD_LIST is always marked contained when it is generated if (type == TYP_STRUCT) { arg->SetContained(); if ((arg->OperGet() == GT_OBJ) && (arg->AsObj()->Addr()->OperGet() == GT_LCL_VAR_ADDR)) { MakeSrcContained(arg, arg->AsObj()->Addr()); } } #endif #if FEATURE_ARG_SPLIT // Struct can be split into register(s) and stack on ARM if (compFeatureArgSplit() && info->IsSplit()) { assert(arg->OperGet() == GT_OBJ || arg->OperGet() == GT_FIELD_LIST); // TODO: Need to check correctness for FastTailCall if (call->IsFastTailCall()) { #ifdef TARGET_ARM NYI_ARM("lower: struct argument by fast tail call"); #endif // TARGET_ARM } const unsigned slotNumber = info->GetByteOffset() / TARGET_POINTER_SIZE; DEBUG_ARG_SLOTS_ASSERT(slotNumber == info->slotNum); const bool putInIncomingArgArea = call->IsFastTailCall(); putArg = new (comp, GT_PUTARG_SPLIT) GenTreePutArgSplit(arg, info->GetByteOffset(), #if defined(DEBUG_ARG_SLOTS) && defined(FEATURE_PUT_STRUCT_ARG_STK) info->GetStackByteSize(), slotNumber, info->GetStackSlotsNumber(), #elif defined(DEBUG_ARG_SLOTS) && !defined(FEATURE_PUT_STRUCT_ARG_STK) slotNumber, #elif !defined(DEBUG_ARG_SLOTS) && defined(FEATURE_PUT_STRUCT_ARG_STK) info->GetStackByteSize(), #endif info->numRegs, call, putInIncomingArgArea); // If struct argument is morphed to GT_FIELD_LIST node(s), // we can know GC info by type of each GT_FIELD_LIST node. // So we skip setting GC Pointer info. // GenTreePutArgSplit* argSplit = putArg->AsPutArgSplit(); for (unsigned regIndex = 0; regIndex < info->numRegs; regIndex++) { argSplit->SetRegNumByIdx(info->GetRegNum(regIndex), regIndex); } if (arg->OperGet() == GT_OBJ) { ClassLayout* layout = arg->AsObj()->GetLayout(); // Set type of registers for (unsigned index = 0; index < info->numRegs; index++) { argSplit->m_regType[index] = layout->GetGCPtrType(index); } } else { unsigned regIndex = 0; for (GenTreeFieldList::Use& use : arg->AsFieldList()->Uses()) { if (regIndex >= info->numRegs) { break; } var_types regType = use.GetNode()->TypeGet(); // Account for the possibility that float fields may be passed in integer registers. if (varTypeIsFloating(regType) && !genIsValidFloatReg(argSplit->GetRegNumByIdx(regIndex))) { regType = (regType == TYP_FLOAT) ? TYP_INT : TYP_LONG; } argSplit->m_regType[regIndex] = regType; regIndex++; } // Clear the register assignment on the fieldList node, as these are contained. arg->SetRegNum(REG_NA); } } else #endif // FEATURE_ARG_SPLIT { if (!isOnStack) { #if FEATURE_MULTIREG_ARGS if ((info->numRegs > 1) && (arg->OperGet() == GT_FIELD_LIST)) { unsigned int regIndex = 0; for (GenTreeFieldList::Use& use : arg->AsFieldList()->Uses()) { regNumber argReg = info->GetRegNum(regIndex); GenTree* curOp = use.GetNode(); var_types curTyp = curOp->TypeGet(); // Create a new GT_PUTARG_REG node with op1 GenTree* newOper = comp->gtNewPutArgReg(curTyp, curOp, argReg); // Splice in the new GT_PUTARG_REG node in the GT_FIELD_LIST ReplaceArgWithPutArgOrBitcast(&use.NodeRef(), newOper); regIndex++; } // Just return arg. The GT_FIELD_LIST is not replaced. // Nothing more to do. return arg; } else #endif // FEATURE_MULTIREG_ARGS { putArg = comp->gtNewPutArgReg(type, arg, info->GetRegNum()); } } else { // Mark this one as tail call arg if it is a fast tail call. // This provides the info to put this argument in in-coming arg area slot // instead of in out-going arg area slot. CLANG_FORMAT_COMMENT_ANCHOR; #ifdef DEBUG // Make sure state is correct. The PUTARG_STK has TYP_VOID, as it doesn't produce // a result. So the type of its operand must be the correct type to push on the stack. // For a FIELD_LIST, this will be the type of the field (not the type of the arg), // but otherwise it is generally the type of the operand. info->checkIsStruct(); #endif if ((arg->OperGet() != GT_FIELD_LIST)) { #if defined(FEATURE_SIMD) && defined(FEATURE_PUT_STRUCT_ARG_STK) if (type == TYP_SIMD12) { #if !defined(TARGET_64BIT) assert(info->GetByteSize() == 12); #else // TARGET_64BIT if (compMacOsArm64Abi()) { assert(info->GetByteSize() == 12); } else { assert(info->GetByteSize() == 16); } #endif // TARGET_64BIT } else #endif // defined(FEATURE_SIMD) && defined(FEATURE_PUT_STRUCT_ARG_STK) { assert(genActualType(arg->TypeGet()) == type); } } const unsigned slotNumber = info->GetByteOffset() / TARGET_POINTER_SIZE; const bool putInIncomingArgArea = call->IsFastTailCall(); putArg = new (comp, GT_PUTARG_STK) GenTreePutArgStk(GT_PUTARG_STK, TYP_VOID, arg, info->GetByteOffset(), #if defined(DEBUG_ARG_SLOTS) && defined(FEATURE_PUT_STRUCT_ARG_STK) info->GetStackByteSize(), slotNumber, info->GetStackSlotsNumber(), #elif defined(DEBUG_ARG_SLOTS) && !defined(FEATURE_PUT_STRUCT_ARG_STK) slotNumber, #elif !defined(DEBUG_ARG_SLOTS) && defined(FEATURE_PUT_STRUCT_ARG_STK) info->GetStackByteSize(), #endif call, putInIncomingArgArea); #ifdef FEATURE_PUT_STRUCT_ARG_STK // If the ArgTabEntry indicates that this arg is a struct // get and store the number of slots that are references. // This is later used in the codegen for PUT_ARG_STK implementation // for struct to decide whether and how many single eight-byte copies // to be done (only for reference slots), so gcinfo is emitted. // For non-reference slots faster/smaller size instructions are used - // pair copying using XMM registers or rep mov instructions. if (info->isStruct) { // We use GT_OBJ only for non-lclVar, non-SIMD, non-FIELD_LIST struct arguments. if (arg->OperIsLocal()) { // This must have a type with a known size (SIMD or has been morphed to a primitive type). assert(arg->TypeGet() != TYP_STRUCT); } else if (arg->OperIs(GT_OBJ)) { assert(!varTypeIsSIMD(arg)); #ifdef TARGET_X86 // On x86 VM lies about the type of a struct containing a pointer sized // integer field by returning the type of its field as the type of struct. // Such struct can be passed in a register depending its position in // parameter list. VM does this unwrapping only one level and therefore // a type like Struct Foo { Struct Bar { int f}} awlays needs to be // passed on stack. Also, VM doesn't lie about type of such a struct // when it is a field of another struct. That is VM doesn't lie about // the type of Foo.Bar // // We now support the promotion of fields that are of type struct. // However we only support a limited case where the struct field has a // single field and that single field must be a scalar type. Say Foo.Bar // field is getting passed as a parameter to a call, Since it is a TYP_STRUCT, // as per x86 ABI it should always be passed on stack. Therefore GenTree // node under a PUTARG_STK could be GT_OBJ(GT_LCL_VAR_ADDR(v1)), where // local v1 could be a promoted field standing for Foo.Bar. Note that // the type of v1 will be the type of field of Foo.Bar.f when Foo is // promoted. That is v1 will be a scalar type. In this case we need to // pass v1 on stack instead of in a register. // // TODO-PERF: replace GT_OBJ(GT_LCL_VAR_ADDR(v1)) with v1 if v1 is // a scalar type and the width of GT_OBJ matches the type size of v1. // Note that this cannot be done till call node arguments are morphed // because we should not lose the fact that the type of argument is // a struct so that the arg gets correctly marked to be passed on stack. GenTree* objOp1 = arg->gtGetOp1(); if (objOp1->OperGet() == GT_LCL_VAR_ADDR) { unsigned lclNum = objOp1->AsLclVarCommon()->GetLclNum(); if (comp->lvaTable[lclNum].lvType != TYP_STRUCT) { comp->lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::VMNeedsStackAddr)); } } #endif // TARGET_X86 } else if (!arg->OperIs(GT_FIELD_LIST)) { #ifdef TARGET_ARM assert((info->GetStackSlotsNumber() == 1) || ((arg->TypeGet() == TYP_DOUBLE) && (info->GetStackSlotsNumber() == 2))); #else assert(varTypeIsSIMD(arg) || (info->GetStackSlotsNumber() == 1)); #endif } } #endif // FEATURE_PUT_STRUCT_ARG_STK } } JITDUMP("new node is : "); DISPNODE(putArg); JITDUMP("\n"); if (arg->gtFlags & GTF_LATE_ARG) { putArg->gtFlags |= GTF_LATE_ARG; } return putArg; } //------------------------------------------------------------------------ // LowerArg: Lower one argument of a call. This entails splicing a "putarg" node between // the argument evaluation and the call. This is the point at which the source is // consumed and the value transitions from control of the register allocator to the calling // convention. // // Arguments: // call - The call node // ppArg - Pointer to the call argument pointer. We might replace the call argument by // changing *ppArg. // // Return Value: // None. // void Lowering::LowerArg(GenTreeCall* call, GenTree** ppArg) { GenTree* arg = *ppArg; JITDUMP("lowering arg : "); DISPNODE(arg); // No assignments should remain by Lowering. assert(!arg->OperIs(GT_ASG)); assert(!arg->OperIsPutArgStk()); // Assignments/stores at this level are not really placing an argument. // They are setting up temporary locals that will later be placed into // outgoing regs or stack. // Note that atomic ops may be stores and still produce a value. if (!arg->IsValue()) { assert((arg->OperIsStore() && !arg->IsValue()) || arg->IsArgPlaceHolderNode() || arg->IsNothingNode() || arg->OperIsCopyBlkOp()); return; } fgArgTabEntry* info = comp->gtArgEntryByNode(call, arg); assert(info->GetNode() == arg); var_types type = arg->TypeGet(); if (varTypeIsSmall(type)) { // Normalize 'type', it represents the item that we will be storing in the Outgoing Args type = TYP_INT; } #if defined(FEATURE_SIMD) #if defined(TARGET_X86) // Non-param TYP_SIMD12 local var nodes are massaged in Lower to TYP_SIMD16 to match their // allocated size (see lvSize()). However, when passing the variables as arguments, and // storing the variables to the outgoing argument area on the stack, we must use their // actual TYP_SIMD12 type, so exactly 12 bytes is allocated and written. if (type == TYP_SIMD16) { if ((arg->OperGet() == GT_LCL_VAR) || (arg->OperGet() == GT_STORE_LCL_VAR)) { const LclVarDsc* varDsc = comp->lvaGetDesc(arg->AsLclVarCommon()); type = varDsc->lvType; } else if (arg->OperIs(GT_SIMD, GT_HWINTRINSIC)) { GenTreeJitIntrinsic* jitIntrinsic = reinterpret_cast<GenTreeJitIntrinsic*>(arg); // For HWIntrinsic, there are some intrinsics like ExtractVector128 which have // a gtType of TYP_SIMD16 but a SimdSize of 32, so we need to include that in // the assert below. assert((jitIntrinsic->GetSimdSize() == 12) || (jitIntrinsic->GetSimdSize() == 16) || (jitIntrinsic->GetSimdSize() == 32)); if (jitIntrinsic->GetSimdSize() == 12) { type = TYP_SIMD12; } } } #elif defined(TARGET_AMD64) // TYP_SIMD8 parameters that are passed as longs if (type == TYP_SIMD8 && genIsValidIntReg(info->GetRegNum())) { GenTree* bitcast = comp->gtNewBitCastNode(TYP_LONG, arg); BlockRange().InsertAfter(arg, bitcast); *ppArg = arg = bitcast; assert(info->GetNode() == arg); type = TYP_LONG; } #endif // defined(TARGET_X86) #endif // defined(FEATURE_SIMD) // If we hit this we are probably double-lowering. assert(!arg->OperIsPutArg()); #if !defined(TARGET_64BIT) if (varTypeIsLong(type)) { noway_assert(arg->OperIs(GT_LONG)); GenTreeFieldList* fieldList = new (comp, GT_FIELD_LIST) GenTreeFieldList(); fieldList->AddFieldLIR(comp, arg->AsOp()->gtGetOp1(), 0, TYP_INT); fieldList->AddFieldLIR(comp, arg->AsOp()->gtGetOp2(), 4, TYP_INT); GenTree* newArg = NewPutArg(call, fieldList, info, type); if (info->GetRegNum() != REG_STK) { assert(info->numRegs == 2); // In the register argument case, NewPutArg replaces the original field list args with new // GT_PUTARG_REG nodes, inserts them in linear order and returns the field list. So the // only thing left to do is to insert the field list itself in linear order. assert(newArg == fieldList); BlockRange().InsertBefore(arg, newArg); } else { // For longs, we will replace the GT_LONG with a GT_FIELD_LIST, and put that under a PUTARG_STK. // Although the hi argument needs to be pushed first, that will be handled by the general case, // in which the fields will be reversed. assert(info->numSlots == 2); newArg->SetRegNum(REG_STK); BlockRange().InsertBefore(arg, fieldList, newArg); } *ppArg = newArg; assert(info->GetNode() == newArg); BlockRange().Remove(arg); } else #endif // !defined(TARGET_64BIT) { #ifdef TARGET_ARMARCH if (call->IsVarargs() || comp->opts.compUseSoftFP) { // For vararg call or on armel, reg args should be all integer. // Insert copies as needed to move float value to integer register. GenTree* newNode = LowerFloatArg(ppArg, info); if (newNode != nullptr) { type = newNode->TypeGet(); } } #endif // TARGET_ARMARCH GenTree* putArg = NewPutArg(call, arg, info, type); // In the case of register passable struct (in one or two registers) // the NewPutArg returns a new node (GT_PUTARG_REG or a GT_FIELD_LIST with two GT_PUTARG_REGs.) // If an extra node is returned, splice it in the right place in the tree. if (arg != putArg) { ReplaceArgWithPutArgOrBitcast(ppArg, putArg); } } } #ifdef TARGET_ARMARCH //------------------------------------------------------------------------ // LowerFloatArg: Lower float call arguments on the arm platform. // // Arguments: // arg - The arg node // info - call argument info // // Return Value: // Return nullptr, if no transformation was done; // return arg if there was in place transformation; // return a new tree if the root was changed. // // Notes: // This must handle scalar float arguments as well as GT_FIELD_LISTs // with floating point fields. // GenTree* Lowering::LowerFloatArg(GenTree** pArg, fgArgTabEntry* info) { GenTree* arg = *pArg; if (info->GetRegNum() != REG_STK) { if (arg->OperIs(GT_FIELD_LIST)) { // Transform fields that are passed as registers in place. regNumber currRegNumber = info->GetRegNum(); unsigned regIndex = 0; for (GenTreeFieldList::Use& use : arg->AsFieldList()->Uses()) { if (regIndex >= info->numRegs) { break; } GenTree* node = use.GetNode(); if (varTypeIsFloating(node)) { GenTree* intNode = LowerFloatArgReg(node, currRegNumber); assert(intNode != nullptr); ReplaceArgWithPutArgOrBitcast(&use.NodeRef(), intNode); } if (node->TypeGet() == TYP_DOUBLE) { currRegNumber = REG_NEXT(REG_NEXT(currRegNumber)); regIndex += 2; } else { currRegNumber = REG_NEXT(currRegNumber); regIndex += 1; } } // List fields were replaced in place. return arg; } else if (varTypeIsFloating(arg)) { GenTree* intNode = LowerFloatArgReg(arg, info->GetRegNum()); assert(intNode != nullptr); ReplaceArgWithPutArgOrBitcast(pArg, intNode); return *pArg; } } return nullptr; } //------------------------------------------------------------------------ // LowerFloatArgReg: Lower the float call argument node that is passed via register. // // Arguments: // arg - The arg node // regNum - register number // // Return Value: // Return new bitcast node, that moves float to int register. // GenTree* Lowering::LowerFloatArgReg(GenTree* arg, regNumber regNum) { var_types floatType = arg->TypeGet(); assert(varTypeIsFloating(floatType)); var_types intType = (floatType == TYP_DOUBLE) ? TYP_LONG : TYP_INT; GenTree* intArg = comp->gtNewBitCastNode(intType, arg); intArg->SetRegNum(regNum); #ifdef TARGET_ARM if (floatType == TYP_DOUBLE) { // A special case when we introduce TYP_LONG // during lowering for arm32 softFP to pass double // in int registers. assert(comp->opts.compUseSoftFP); regNumber nextReg = REG_NEXT(regNum); intArg->AsMultiRegOp()->gtOtherReg = nextReg; } #endif return intArg; } #endif // do lowering steps for each arg of a call void Lowering::LowerArgsForCall(GenTreeCall* call) { JITDUMP("objp:\n======\n"); if (call->gtCallThisArg != nullptr) { LowerArg(call, &call->gtCallThisArg->NodeRef()); } JITDUMP("\nargs:\n======\n"); for (GenTreeCall::Use& use : call->Args()) { LowerArg(call, &use.NodeRef()); } JITDUMP("\nlate:\n======\n"); for (GenTreeCall::Use& use : call->LateArgs()) { LowerArg(call, &use.NodeRef()); } } // helper that create a node representing a relocatable physical address computation GenTree* Lowering::AddrGen(ssize_t addr) { // this should end up in codegen as : instGen_Set_Reg_To_Imm(EA_HANDLE_CNS_RELOC, reg, addr) GenTree* result = comp->gtNewIconHandleNode(addr, GTF_ICON_FTN_ADDR); return result; } // variant that takes a void* GenTree* Lowering::AddrGen(void* addr) { return AddrGen((ssize_t)addr); } // do lowering steps for a call // this includes: // - adding the placement nodes (either stack or register variety) for arguments // - lowering the expression that calculates the target address // - adding nodes for other operations that occur after the call sequence starts and before // control transfer occurs (profiling and tail call helpers, pinvoke incantations) // void Lowering::LowerCall(GenTree* node) { GenTreeCall* call = node->AsCall(); JITDUMP("lowering call (before):\n"); DISPTREERANGE(BlockRange(), call); JITDUMP("\n"); call->ClearOtherRegs(); LowerArgsForCall(call); // note that everything generated from this point might run AFTER the outgoing args are placed GenTree* controlExpr = nullptr; bool callWasExpandedEarly = false; // for x86, this is where we record ESP for checking later to make sure stack is balanced // Check for Delegate.Invoke(). If so, we inline it. We get the // target-object and target-function from the delegate-object, and do // an indirect call. if (call->IsDelegateInvoke()) { controlExpr = LowerDelegateInvoke(call); } else { // Virtual and interface calls switch (call->gtFlags & GTF_CALL_VIRT_KIND_MASK) { case GTF_CALL_VIRT_STUB: controlExpr = LowerVirtualStubCall(call); break; case GTF_CALL_VIRT_VTABLE: assert(call->IsVirtualVtable()); if (!call->IsExpandedEarly()) { assert(call->gtControlExpr == nullptr); controlExpr = LowerVirtualVtableCall(call); } else { callWasExpandedEarly = true; controlExpr = call->gtControlExpr; } break; case GTF_CALL_NONVIRT: if (call->IsUnmanaged()) { controlExpr = LowerNonvirtPinvokeCall(call); } else if (call->gtCallType == CT_INDIRECT) { controlExpr = LowerIndirectNonvirtCall(call); } else { controlExpr = LowerDirectCall(call); } break; default: noway_assert(!"strange call type"); break; } } // Indirect calls should always go through GenTreeCall::gtCallAddr and // should never have a control expression as well. assert((call->gtCallType != CT_INDIRECT) || (controlExpr == nullptr)); if (call->IsTailCallViaJitHelper()) { // Either controlExpr or gtCallAddr must contain real call target. if (controlExpr == nullptr) { assert(call->gtCallType == CT_INDIRECT); assert(call->gtCallAddr != nullptr); controlExpr = call->gtCallAddr; } controlExpr = LowerTailCallViaJitHelper(call, controlExpr); } // Check if we need to thread a newly created controlExpr into the LIR // if ((controlExpr != nullptr) && !callWasExpandedEarly) { LIR::Range controlExprRange = LIR::SeqTree(comp, controlExpr); JITDUMP("results of lowering call:\n"); DISPRANGE(controlExprRange); ContainCheckRange(controlExprRange); BlockRange().InsertBefore(call, std::move(controlExprRange)); call->gtControlExpr = controlExpr; } if (comp->opts.IsCFGEnabled()) { LowerCFGCall(call); } if (call->IsFastTailCall()) { // Lower fast tail call can introduce new temps to set up args correctly for Callee. // This involves patching LCL_VAR and LCL_VAR_ADDR nodes holding Caller stack args // and replacing them with a new temp. Control expr also can contain nodes that need // to be patched. // Therefore lower fast tail call must be done after controlExpr is inserted into LIR. // There is one side effect which is flipping the order of PME and control expression // since LowerFastTailCall calls InsertPInvokeMethodEpilog. LowerFastTailCall(call); } if (varTypeIsStruct(call)) { LowerCallStruct(call); } ContainCheckCallOperands(call); JITDUMP("lowering call (after):\n"); DISPTREERANGE(BlockRange(), call); JITDUMP("\n"); } // Inserts profiler hook, GT_PROF_HOOK for a tail call node. // // AMD64: // We need to insert this after all nested calls, but before all the arguments to this call have been set up. // To do this, we look for the first GT_PUTARG_STK or GT_PUTARG_REG, and insert the hook immediately before // that. If there are no args, then it should be inserted before the call node. // // For example: // * stmtExpr void (top level) (IL 0x000...0x010) // arg0 SETUP | /--* argPlace ref REG NA $c5 // this in rcx | | /--* argPlace ref REG NA $c1 // | | | /--* call ref System.Globalization.CultureInfo.get_InvariantCulture $c2 // arg1 SETUP | | +--* st.lclVar ref V02 tmp1 REG NA $c2 // | | | /--* lclVar ref V02 tmp1 u : 2 (last use) REG NA $c2 // arg1 in rdx | | +--* putarg_reg ref REG NA // | | | /--* lclVar ref V00 arg0 u : 2 (last use) REG NA $80 // this in rcx | | +--* putarg_reg ref REG NA // | | /--* call nullcheck ref System.String.ToLower $c5 // | | { * stmtExpr void (embedded)(IL 0x000... ? ? ? ) // | | { \--* prof_hook void REG NA // arg0 in rcx | +--* putarg_reg ref REG NA // control expr | +--* const(h) long 0x7ffe8e910e98 ftn REG NA // \--* call void System.Runtime.Remoting.Identity.RemoveAppNameOrAppGuidIfNecessary $VN.Void // // In this case, the GT_PUTARG_REG src is a nested call. We need to put the instructions after that call // (as shown). We assume that of all the GT_PUTARG_*, only the first one can have a nested call. // // X86: // Insert the profiler hook immediately before the call. The profiler hook will preserve // all argument registers (ECX, EDX), but nothing else. // // Params: // callNode - tail call node // insertionPoint - if non-null, insert the profiler hook before this point. // If null, insert the profiler hook before args are setup // but after all arg side effects are computed. // void Lowering::InsertProfTailCallHook(GenTreeCall* call, GenTree* insertionPoint) { assert(call->IsTailCall()); assert(comp->compIsProfilerHookNeeded()); #if defined(TARGET_X86) if (insertionPoint == nullptr) { insertionPoint = call; } #else // !defined(TARGET_X86) if (insertionPoint == nullptr) { for (GenTreeCall::Use& use : call->Args()) { assert(!use.GetNode()->OperIs(GT_PUTARG_REG)); // We don't expect to see these in gtCallArgs if (use.GetNode()->OperIs(GT_PUTARG_STK)) { // found it insertionPoint = use.GetNode(); break; } } if (insertionPoint == nullptr) { for (GenTreeCall::Use& use : call->LateArgs()) { if (use.GetNode()->OperIs(GT_PUTARG_REG, GT_PUTARG_STK)) { // found it insertionPoint = use.GetNode(); break; } } // If there are no args, insert before the call node if (insertionPoint == nullptr) { insertionPoint = call; } } } #endif // !defined(TARGET_X86) assert(insertionPoint != nullptr); GenTree* profHookNode = new (comp, GT_PROF_HOOK) GenTree(GT_PROF_HOOK, TYP_VOID); BlockRange().InsertBefore(insertionPoint, profHookNode); } //------------------------------------------------------------------------ // LowerFastTailCall: Lower a call node dispatched as a fast tailcall (epilog + // jmp). // // Arguments: // call - the call node that is being dispatched as a fast tailcall. // // Assumptions: // call must be non-null. // // Notes: // For fast tail calls it is necessary to set up stack args in the incoming // arg stack space area. When args passed also come from this area we may // run into problems because we may end up overwriting the stack slot before // using it. For example, for foo(a, b) { return bar(b, a); }, if a and b // are on incoming arg stack space in foo they need to be swapped in this // area for the call to bar. This function detects this situation and // introduces a temp when an outgoing argument would overwrite a later-used // incoming argument. // // This function also handles inserting necessary profiler hooks and pinvoke // method epilogs in case there are inlined pinvokes. void Lowering::LowerFastTailCall(GenTreeCall* call) { #if FEATURE_FASTTAILCALL // Tail call restrictions i.e. conditions under which tail prefix is ignored. // Most of these checks are already done by importer or fgMorphTailCall(). // This serves as a double sanity check. assert((comp->info.compFlags & CORINFO_FLG_SYNCH) == 0); // tail calls from synchronized methods assert(!comp->opts.IsReversePInvoke()); // tail calls reverse pinvoke assert(!call->IsUnmanaged()); // tail calls to unamanaged methods assert(!comp->compLocallocUsed); // tail call from methods that also do localloc #ifdef TARGET_AMD64 assert(!comp->getNeedsGSSecurityCookie()); // jit64 compat: tail calls from methods that need GS check #endif // TARGET_AMD64 // We expect to see a call that meets the following conditions assert(call->IsFastTailCall()); // VM cannot use return address hijacking when A() and B() tail call each // other in mutual recursion. Therefore, this block is reachable through // a GC-safe point or the whole method is marked as fully interruptible. // // TODO-Cleanup: // optReachWithoutCall() depends on the fact that loop headers blocks // will have a block number > fgLastBB. These loop headers gets added // after dominator computation and get skipped by OptReachWithoutCall(). // The below condition cannot be asserted in lower because fgSimpleLowering() // can add a new basic block for range check failure which becomes // fgLastBB with block number > loop header block number. // assert((comp->compCurBB->bbFlags & BBF_GC_SAFE_POINT) || // !comp->optReachWithoutCall(comp->fgFirstBB, comp->compCurBB) || comp->GetInterruptible()); // If PInvokes are in-lined, we have to remember to execute PInvoke method epilog anywhere that // a method returns. This is a case of caller method has both PInvokes and tail calls. if (comp->compMethodRequiresPInvokeFrame()) { InsertPInvokeMethodEpilog(comp->compCurBB DEBUGARG(call)); } // Args for tail call are setup in incoming arg area. The gc-ness of args of // caller and callee (which being tail called) may not match. Therefore, everything // from arg setup until the epilog need to be non-interuptible by GC. This is // achieved by inserting GT_START_NONGC before the very first GT_PUTARG_STK node // of call is setup. Note that once a stack arg is setup, it cannot have nested // calls subsequently in execution order to setup other args, because the nested // call could over-write the stack arg that is setup earlier. ArrayStack<GenTree*> putargs(comp->getAllocator(CMK_ArrayStack)); for (GenTreeCall::Use& use : call->Args()) { if (use.GetNode()->OperIs(GT_PUTARG_STK)) { putargs.Push(use.GetNode()); } } for (GenTreeCall::Use& use : call->LateArgs()) { if (use.GetNode()->OperIs(GT_PUTARG_STK)) { putargs.Push(use.GetNode()); } } GenTree* startNonGCNode = nullptr; if (!putargs.Empty()) { // Get the earliest operand of the first PUTARG_STK node. We will make // the requred copies of args before this node. bool unused; GenTree* insertionPoint = BlockRange().GetTreeRange(putargs.Bottom(), &unused).FirstNode(); // Insert GT_START_NONGC node before we evaluate the PUTARG_STK args. // Note that if there are no args to be setup on stack, no need to // insert GT_START_NONGC node. startNonGCNode = new (comp, GT_START_NONGC) GenTree(GT_START_NONGC, TYP_VOID); BlockRange().InsertBefore(insertionPoint, startNonGCNode); // Gc-interruptability in the following case: // foo(a, b, c, d, e) { bar(a, b, c, d, e); } // bar(a, b, c, d, e) { foo(a, b, d, d, e); } // // Since the instruction group starting from the instruction that sets up first // stack arg to the end of the tail call is marked as non-gc interruptible, // this will form a non-interruptible tight loop causing gc-starvation. To fix // this we insert GT_NO_OP as embedded stmt before GT_START_NONGC, if the method // has a single basic block and is not a GC-safe point. The presence of a single // nop outside non-gc interruptible region will prevent gc starvation. if ((comp->fgBBcount == 1) && !(comp->compCurBB->bbFlags & BBF_GC_SAFE_POINT)) { assert(comp->fgFirstBB == comp->compCurBB); GenTree* noOp = new (comp, GT_NO_OP) GenTree(GT_NO_OP, TYP_VOID); BlockRange().InsertBefore(startNonGCNode, noOp); } // Since this is a fast tailcall each PUTARG_STK will place the argument in the // _incoming_ arg space area. This will effectively overwrite our already existing // incoming args that live in that area. If we have later uses of those args, this // is a problem. We introduce a defensive copy into a temp here of those args that // potentially may cause problems. for (int i = 0; i < putargs.Height(); i++) { GenTreePutArgStk* put = putargs.Bottom(i)->AsPutArgStk(); unsigned int overwrittenStart = put->getArgOffset(); unsigned int overwrittenEnd = overwrittenStart + put->GetStackByteSize(); int baseOff = -1; // Stack offset of first arg on stack for (unsigned callerArgLclNum = 0; callerArgLclNum < comp->info.compArgsCount; callerArgLclNum++) { LclVarDsc* callerArgDsc = comp->lvaGetDesc(callerArgLclNum); if (callerArgDsc->lvIsRegArg) { continue; } unsigned int argStart; unsigned int argEnd; #if defined(TARGET_AMD64) if (TargetOS::IsWindows) { // On Windows x64, the argument position determines the stack slot uniquely, and even the // register args take up space in the stack frame (shadow space). argStart = callerArgLclNum * TARGET_POINTER_SIZE; argEnd = argStart + static_cast<unsigned int>(callerArgDsc->lvArgStackSize()); } else #endif // TARGET_AMD64 { assert(callerArgDsc->GetStackOffset() != BAD_STK_OFFS); if (baseOff == -1) { baseOff = callerArgDsc->GetStackOffset(); } // On all ABIs where we fast tail call the stack args should come in order. assert(baseOff <= callerArgDsc->GetStackOffset()); // Compute offset of this stack argument relative to the first stack arg. // This will be its offset into the incoming arg space area. argStart = static_cast<unsigned int>(callerArgDsc->GetStackOffset() - baseOff); argEnd = argStart + comp->lvaLclSize(callerArgLclNum); } // If ranges do not overlap then this PUTARG_STK will not mess up the arg. if ((overwrittenEnd <= argStart) || (overwrittenStart >= argEnd)) { continue; } // Codegen cannot handle a partially overlapping copy. For // example, if we have // bar(S16 stack, S32 stack2) // foo(S32 stack, S32 stack2) { bar(..., stack) } // then we may end up having to move 'stack' in foo 16 bytes // ahead. It is possible that this PUTARG_STK is the only use, // in which case we will need to introduce a temp, so look for // uses starting from it. Note that we assume that in-place // copies are OK. GenTree* lookForUsesFrom = put->gtNext; if (overwrittenStart != argStart) { lookForUsesFrom = insertionPoint; } RehomeArgForFastTailCall(callerArgLclNum, insertionPoint, lookForUsesFrom, call); // The above call can introduce temps and invalidate the pointer. callerArgDsc = comp->lvaGetDesc(callerArgLclNum); // For promoted locals we have more work to do as its fields could also have been invalidated. if (!callerArgDsc->lvPromoted) { continue; } unsigned int fieldsFirst = callerArgDsc->lvFieldLclStart; unsigned int fieldsEnd = fieldsFirst + callerArgDsc->lvFieldCnt; for (unsigned int j = fieldsFirst; j < fieldsEnd; j++) { RehomeArgForFastTailCall(j, insertionPoint, lookForUsesFrom, call); } } } } // Insert GT_PROF_HOOK node to emit profiler tail call hook. This should be // inserted before the args are setup but after the side effects of args are // computed. That is, GT_PROF_HOOK node needs to be inserted before GT_START_NONGC // node if one exists. if (comp->compIsProfilerHookNeeded()) { InsertProfTailCallHook(call, startNonGCNode); } #else // !FEATURE_FASTTAILCALL // Platform does not implement fast tail call mechanism. This cannot be // reached because we always choose to do a tailcall via helper on those // platforms (or no tailcall at all). unreached(); #endif } // //------------------------------------------------------------------------ // RehomeArgForFastTailCall: Introduce temps for args that may be overwritten // during fast tailcall sequence. // // Arguments: // lclNum - the lcl num of the arg that will be overwritten. // insertTempBefore - the node at which to copy the arg into a temp. // lookForUsesStart - the node where to start scanning and replacing uses of // the arg specified by lclNum. // callNode - the call node that is being dispatched as a fast tailcall. // // Assumptions: // all args must be non-null. // // Notes: // This function scans for uses of the arg specified by lclNum starting // from the lookForUsesStart node. If it finds any uses it introduces a temp // for this argument and updates uses to use this instead. In the situation // where it introduces a temp it can thus invalidate pointers to other // locals. // void Lowering::RehomeArgForFastTailCall(unsigned int lclNum, GenTree* insertTempBefore, GenTree* lookForUsesStart, GenTreeCall* callNode) { unsigned int tmpLclNum = BAD_VAR_NUM; for (GenTree* treeNode = lookForUsesStart; treeNode != callNode; treeNode = treeNode->gtNext) { if (!treeNode->OperIsLocal() && !treeNode->OperIsLocalAddr()) { continue; } GenTreeLclVarCommon* lcl = treeNode->AsLclVarCommon(); if (lcl->GetLclNum() != lclNum) { continue; } // Create tmp and use it in place of callerArgDsc if (tmpLclNum == BAD_VAR_NUM) { tmpLclNum = comp->lvaGrabTemp(true DEBUGARG("Fast tail call lowering is creating a new local variable")); LclVarDsc* callerArgDsc = comp->lvaGetDesc(lclNum); var_types tmpTyp = genActualType(callerArgDsc->TypeGet()); comp->lvaTable[tmpLclNum].lvType = tmpTyp; // TODO-CQ: I don't see why we should copy doNotEnreg. comp->lvaTable[tmpLclNum].lvDoNotEnregister = callerArgDsc->lvDoNotEnregister; #ifdef DEBUG comp->lvaTable[tmpLclNum].SetDoNotEnregReason(callerArgDsc->GetDoNotEnregReason()); #endif // DEBUG GenTree* value = comp->gtNewLclvNode(lclNum, tmpTyp); if (tmpTyp == TYP_STRUCT) { comp->lvaSetStruct(tmpLclNum, comp->lvaGetStruct(lclNum), false); } GenTreeLclVar* storeLclVar = comp->gtNewStoreLclVar(tmpLclNum, value); BlockRange().InsertBefore(insertTempBefore, LIR::SeqTree(comp, storeLclVar)); ContainCheckRange(value, storeLclVar); LowerNode(storeLclVar); } lcl->SetLclNum(tmpLclNum); } } //------------------------------------------------------------------------ // LowerTailCallViaJitHelper: lower a call via the tailcall JIT helper. Morph // has already inserted tailcall helper special arguments. This function inserts // actual data for some placeholders. This function is only used on x86. // // Lower // tail.call(<function args>, int numberOfOldStackArgs, int dummyNumberOfNewStackArgs, int flags, void* dummyArg) // as // JIT_TailCall(<function args>, int numberOfOldStackArgsWords, int numberOfNewStackArgsWords, int flags, void* // callTarget) // Note that the special arguments are on the stack, whereas the function arguments follow the normal convention. // // Also inserts PInvoke method epilog if required. // // Arguments: // call - The call node // callTarget - The real call target. This is used to replace the dummyArg during lowering. // // Return Value: // Returns control expression tree for making a call to helper Jit_TailCall. // GenTree* Lowering::LowerTailCallViaJitHelper(GenTreeCall* call, GenTree* callTarget) { // Tail call restrictions i.e. conditions under which tail prefix is ignored. // Most of these checks are already done by importer or fgMorphTailCall(). // This serves as a double sanity check. assert((comp->info.compFlags & CORINFO_FLG_SYNCH) == 0); // tail calls from synchronized methods assert(!call->IsUnmanaged()); // tail calls to unamanaged methods assert(!comp->compLocallocUsed); // tail call from methods that also do localloc // We expect to see a call that meets the following conditions assert(call->IsTailCallViaJitHelper()); assert(callTarget != nullptr); // The TailCall helper call never returns to the caller and is not GC interruptible. // Therefore the block containing the tail call should be a GC safe point to avoid // GC starvation. It is legal for the block to be unmarked iff the entry block is a // GC safe point, as the entry block trivially dominates every reachable block. assert((comp->compCurBB->bbFlags & BBF_GC_SAFE_POINT) || (comp->fgFirstBB->bbFlags & BBF_GC_SAFE_POINT)); // If PInvokes are in-lined, we have to remember to execute PInvoke method epilog anywhere that // a method returns. This is a case of caller method has both PInvokes and tail calls. if (comp->compMethodRequiresPInvokeFrame()) { InsertPInvokeMethodEpilog(comp->compCurBB DEBUGARG(call)); } // Remove gtCallAddr from execution order if present. if (call->gtCallType == CT_INDIRECT) { assert(call->gtCallAddr != nullptr); bool isClosed; LIR::ReadOnlyRange callAddrRange = BlockRange().GetTreeRange(call->gtCallAddr, &isClosed); assert(isClosed); BlockRange().Remove(std::move(callAddrRange)); } // The callTarget tree needs to be sequenced. LIR::Range callTargetRange = LIR::SeqTree(comp, callTarget); // Verify the special args are what we expect, and replace the dummy args with real values. // We need to figure out the size of the outgoing stack arguments, not including the special args. // The number of 4-byte words is passed to the helper for the incoming and outgoing argument sizes. // This number is exactly the next slot number in the call's argument info struct. unsigned nNewStkArgsBytes = call->fgArgInfo->GetNextSlotByteOffset(); const int wordSize = 4; unsigned nNewStkArgsWords = nNewStkArgsBytes / wordSize; DEBUG_ARG_SLOTS_ASSERT(call->fgArgInfo->GetNextSlotNum() == nNewStkArgsWords); assert(nNewStkArgsWords >= 4); // There must be at least the four special stack args. nNewStkArgsWords -= 4; unsigned numArgs = call->fgArgInfo->ArgCount(); fgArgTabEntry* argEntry; // arg 0 == callTarget. argEntry = comp->gtArgEntryByArgNum(call, numArgs - 1); assert(argEntry != nullptr); GenTree* arg0 = argEntry->GetNode()->AsPutArgStk()->gtGetOp1(); ContainCheckRange(callTargetRange); BlockRange().InsertAfter(arg0, std::move(callTargetRange)); bool isClosed; LIR::ReadOnlyRange secondArgRange = BlockRange().GetTreeRange(arg0, &isClosed); assert(isClosed); BlockRange().Remove(std::move(secondArgRange)); argEntry->GetNode()->AsPutArgStk()->gtOp1 = callTarget; // arg 1 == flags argEntry = comp->gtArgEntryByArgNum(call, numArgs - 2); assert(argEntry != nullptr); GenTree* arg1 = argEntry->GetNode()->AsPutArgStk()->gtGetOp1(); assert(arg1->gtOper == GT_CNS_INT); ssize_t tailCallHelperFlags = 1 | // always restore EDI,ESI,EBX (call->IsVirtualStub() ? 0x2 : 0x0); // Stub dispatch flag arg1->AsIntCon()->gtIconVal = tailCallHelperFlags; // arg 2 == numberOfNewStackArgsWords argEntry = comp->gtArgEntryByArgNum(call, numArgs - 3); assert(argEntry != nullptr); GenTree* arg2 = argEntry->GetNode()->AsPutArgStk()->gtGetOp1(); assert(arg2->gtOper == GT_CNS_INT); arg2->AsIntCon()->gtIconVal = nNewStkArgsWords; #ifdef DEBUG // arg 3 == numberOfOldStackArgsWords argEntry = comp->gtArgEntryByArgNum(call, numArgs - 4); assert(argEntry != nullptr); GenTree* arg3 = argEntry->GetNode()->AsPutArgStk()->gtGetOp1(); assert(arg3->gtOper == GT_CNS_INT); #endif // DEBUG // Transform this call node into a call to Jit tail call helper. call->gtCallType = CT_HELPER; call->gtCallMethHnd = comp->eeFindHelper(CORINFO_HELP_TAILCALL); call->gtFlags &= ~GTF_CALL_VIRT_KIND_MASK; // Lower this as if it were a pure helper call. call->gtCallMoreFlags &= ~(GTF_CALL_M_TAILCALL | GTF_CALL_M_TAILCALL_VIA_JIT_HELPER); GenTree* result = LowerDirectCall(call); // Now add back tail call flags for identifying this node as tail call dispatched via helper. call->gtCallMoreFlags |= GTF_CALL_M_TAILCALL | GTF_CALL_M_TAILCALL_VIA_JIT_HELPER; #ifdef PROFILING_SUPPORTED // Insert profiler tail call hook if needed. // Since we don't know the insertion point, pass null for second param. if (comp->compIsProfilerHookNeeded()) { InsertProfTailCallHook(call, nullptr); } #endif // PROFILING_SUPPORTED return result; } //------------------------------------------------------------------------ // LowerCFGCall: Potentially lower a call to use control-flow guard. This // expands indirect calls into either a validate+call sequence or to a dispatch // helper taking the original target in a special register. // // Arguments: // call - The call node // void Lowering::LowerCFGCall(GenTreeCall* call) { assert(!call->IsHelperCall(comp, CORINFO_HELP_DISPATCH_INDIRECT_CALL)); if (call->IsHelperCall(comp, CORINFO_HELP_VALIDATE_INDIRECT_CALL)) { return; } GenTree* callTarget = call->gtCallType == CT_INDIRECT ? call->gtCallAddr : call->gtControlExpr; if ((callTarget == nullptr) || callTarget->IsIntegralConst()) { // This is a direct call, no CFG check is necessary. return; } CFGCallKind cfgKind = call->GetCFGCallKind(); switch (cfgKind) { case CFGCallKind::ValidateAndCall: { // To safely apply CFG we need to generate a very specific pattern: // in particular, it is a safety issue to allow the JIT to reload // the call target from memory between calling // CORINFO_HELP_VALIDATE_INDIRECT_CALL and the target. This is // something that would easily occur in debug codegen if we // produced high-level IR. Instead we will use a GT_PHYSREG node // to get the target back from the register that contains the target. // // Additionally, the validator does not preserve all arg registers, // so we have to move all GT_PUTARG_REG nodes that would otherwise // be trashed ahead. The JIT also has an internal invariant that // once GT_PUTARG nodes start to appear in LIR, the call is coming // up. To avoid breaking this invariant we move _all_ GT_PUTARG // nodes (in particular, GC info reporting relies on this). // // To sum up, we end up transforming // // ta... = <early args> // tb... = <late args> // tc = callTarget // GT_CALL tc, ta..., tb... // // into // // ta... = <early args> (without GT_PUTARG_* nodes) // tb = callTarget // GT_CALL CORINFO_HELP_VALIDATE_INDIRECT_CALL, tb // tc = GT_PHYSREG REG_VALIDATE_INDIRECT_CALL_ADDR (preserved by helper) // td = <moved GT_PUTARG_* nodes> // GT_CALL tb, ta..., td.. // GenTree* regNode = PhysReg(REG_VALIDATE_INDIRECT_CALL_ADDR, TYP_I_IMPL); LIR::Use useOfTar; bool gotUse = BlockRange().TryGetUse(callTarget, &useOfTar); assert(gotUse); useOfTar.ReplaceWith(regNode); GenTree* targetPlaceholder = comp->gtNewZeroConNode(callTarget->TypeGet()); // Add the call to the validator. Use a placeholder for the target while we // morph, sequence and lower, to avoid redoing that for the actual target. GenTreeCall::Use* args = comp->gtNewCallArgs(targetPlaceholder); GenTreeCall* validate = comp->gtNewHelperCallNode(CORINFO_HELP_VALIDATE_INDIRECT_CALL, TYP_VOID, args); comp->fgMorphTree(validate); LIR::Range validateRange = LIR::SeqTree(comp, validate); GenTree* validateFirst = validateRange.FirstNode(); GenTree* validateLast = validateRange.LastNode(); // Insert the validator with the call target before the late args. BlockRange().InsertBefore(call, std::move(validateRange)); // Swap out the target gotUse = BlockRange().TryGetUse(targetPlaceholder, &useOfTar); assert(gotUse); useOfTar.ReplaceWith(callTarget); targetPlaceholder->SetUnusedValue(); LowerRange(validateFirst, validateLast); // Insert the PHYSREG node that we must load right after validation. BlockRange().InsertAfter(validate, regNode); LowerNode(regNode); // Finally move all GT_PUTARG_* nodes for (GenTreeCall::Use& use : call->Args()) { GenTree* node = use.GetNode(); if (!node->IsValue()) { // Non-value nodes in early args are setup nodes for late args. continue; } assert(node->OperIsPutArg() || node->OperIsFieldList()); MoveCFGCallArg(call, node); } for (GenTreeCall::Use& use : call->LateArgs()) { GenTree* node = use.GetNode(); assert(node->OperIsPutArg() || node->OperIsFieldList()); MoveCFGCallArg(call, node); } break; } case CFGCallKind::Dispatch: { #ifdef REG_DISPATCH_INDIRECT_CALL_ADDR // Now insert the call target as an extra argument. // // First append the early placeholder arg GenTreeCall::Use** earlySlot = &call->gtCallArgs; unsigned int index = call->gtCallThisArg != nullptr ? 1 : 0; while (*earlySlot != nullptr) { earlySlot = &(*earlySlot)->NextRef(); index++; } assert(index == call->fgArgInfo->ArgCount()); GenTree* placeHolder = comp->gtNewArgPlaceHolderNode(callTarget->TypeGet(), NO_CLASS_HANDLE); placeHolder->gtFlags |= GTF_LATE_ARG; *earlySlot = comp->gtNewCallArgs(placeHolder); // Append the late actual arg GenTreeCall::Use** lateSlot = &call->gtCallLateArgs; unsigned int lateIndex = 0; while (*lateSlot != nullptr) { lateSlot = &(*lateSlot)->NextRef(); lateIndex++; } *lateSlot = comp->gtNewCallArgs(callTarget); // Add an entry into the arg info regNumber regNum = REG_DISPATCH_INDIRECT_CALL_ADDR; unsigned numRegs = 1; unsigned byteSize = TARGET_POINTER_SIZE; unsigned byteAlignment = TARGET_POINTER_SIZE; bool isStruct = false; bool isFloatHfa = false; bool isVararg = false; fgArgTabEntry* entry = call->fgArgInfo->AddRegArg(index, placeHolder, *earlySlot, regNum, numRegs, byteSize, byteAlignment, isStruct, isFloatHfa, isVararg UNIX_AMD64_ABI_ONLY_ARG(REG_STK) UNIX_AMD64_ABI_ONLY_ARG(0) UNIX_AMD64_ABI_ONLY_ARG(0) UNIX_AMD64_ABI_ONLY_ARG(nullptr)); entry->lateUse = *lateSlot; entry->SetLateArgInx(lateIndex); // Lower the newly added args now that call is updated LowerArg(call, &(*earlySlot)->NodeRef()); LowerArg(call, &(*lateSlot)->NodeRef()); // Finally update the call to be a helper call call->gtCallType = CT_HELPER; call->gtCallMethHnd = comp->eeFindHelper(CORINFO_HELP_DISPATCH_INDIRECT_CALL); call->gtFlags &= ~GTF_CALL_VIRT_KIND_MASK; #ifdef FEATURE_READYTORUN call->gtEntryPoint.addr = nullptr; call->gtEntryPoint.accessType = IAT_VALUE; #endif // Now relower the call target call->gtControlExpr = LowerDirectCall(call); if (call->gtControlExpr != nullptr) { LIR::Range dispatchControlExprRange = LIR::SeqTree(comp, call->gtControlExpr); ContainCheckRange(dispatchControlExprRange); BlockRange().InsertBefore(call, std::move(dispatchControlExprRange)); } #else assert(!"Unexpected CFGCallKind::Dispatch for platform without dispatcher"); #endif break; } default: unreached(); } } //------------------------------------------------------------------------ // IsInvariantInRange: Check if a node is invariant in the specified range. In // other words, can 'node' be moved to right before 'endExclusive' without its // computation changing values? // // Arguments: // node - The node. // endExclusive - The exclusive end of the range to check invariance for. // bool Lowering::IsInvariantInRange(GenTree* node, GenTree* endExclusive) { assert(node->Precedes(endExclusive)); if (node->IsInvariant()) { return true; } if (!node->IsValue()) { return false; } if (node->OperIsLocal()) { GenTreeLclVarCommon* lcl = node->AsLclVarCommon(); LclVarDsc* desc = comp->lvaGetDesc(lcl); if (desc->IsAddressExposed()) { return false; } // Currently, non-address exposed locals have the property that their // use occurs at the user, so no further interference check is // necessary. return true; } return false; } //------------------------------------------------------------------------ // MoveCFGCallArg: Given a call that will be CFG transformed using the // validate+call scheme, and an argument GT_PUTARG_* or GT_FIELD_LIST node, // move that node right before the call. // // Arguments: // call - The call that is being CFG transformed // node - The argument node // // Remarks: // We can always move the GT_PUTARG_* node further ahead as the side-effects // of these nodes are handled by LSRA. However, the operands of these nodes // are not always safe to move further ahead; for invariant operands, we // move them ahead as well to shorten the lifetime of these values. // void Lowering::MoveCFGCallArg(GenTreeCall* call, GenTree* node) { assert(node->OperIsPutArg() || node->OperIsFieldList()); if (node->OperIsFieldList()) { JITDUMP("Node is a GT_FIELD_LIST; moving all operands\n"); for (GenTreeFieldList::Use& operand : node->AsFieldList()->Uses()) { assert(operand.GetNode()->OperIsPutArg()); MoveCFGCallArg(call, operand.GetNode()); } } else { GenTree* operand = node->AsOp()->gtGetOp1(); JITDUMP("Checking if we can move operand of GT_PUTARG_* node:\n"); DISPTREE(operand); if (((operand->gtFlags & GTF_ALL_EFFECT) == 0) && IsInvariantInRange(operand, call)) { JITDUMP("...yes, moving to after validator call\n"); BlockRange().Remove(operand); BlockRange().InsertBefore(call, operand); } else { JITDUMP("...no, operand has side effects or is not invariant\n"); } } JITDUMP("Moving\n"); DISPTREE(node); JITDUMP("\n"); BlockRange().Remove(node); BlockRange().InsertBefore(call, node); } #ifndef TARGET_64BIT //------------------------------------------------------------------------ // Lowering::DecomposeLongCompare: Decomposes a TYP_LONG compare node. // // Arguments: // cmp - the compare node // // Return Value: // The next node to lower. // // Notes: // This is done during lowering because DecomposeLongs handles only nodes // that produce TYP_LONG values. Compare nodes may consume TYP_LONG values // but produce TYP_INT values. // GenTree* Lowering::DecomposeLongCompare(GenTree* cmp) { assert(cmp->gtGetOp1()->TypeGet() == TYP_LONG); GenTree* src1 = cmp->gtGetOp1(); GenTree* src2 = cmp->gtGetOp2(); assert(src1->OperIs(GT_LONG)); assert(src2->OperIs(GT_LONG)); GenTree* loSrc1 = src1->gtGetOp1(); GenTree* hiSrc1 = src1->gtGetOp2(); GenTree* loSrc2 = src2->gtGetOp1(); GenTree* hiSrc2 = src2->gtGetOp2(); BlockRange().Remove(src1); BlockRange().Remove(src2); genTreeOps condition = cmp->OperGet(); GenTree* loCmp; GenTree* hiCmp; if (cmp->OperIs(GT_EQ, GT_NE)) { // // Transform (x EQ|NE y) into (((x.lo XOR y.lo) OR (x.hi XOR y.hi)) EQ|NE 0). If y is 0 then this can // be reduced to just ((x.lo OR x.hi) EQ|NE 0). The OR is expected to set the condition flags so we // don't need to generate a redundant compare against 0, we only generate a SETCC|JCC instruction. // // XOR is used rather than SUB because it is commutative and thus allows swapping the operands when // the first happens to be a constant. Usually only the second compare operand is a constant but it's // still possible to have a constant on the left side. For example, when src1 is a uint->ulong cast // then hiSrc1 would be 0. // if (loSrc1->OperIs(GT_CNS_INT)) { std::swap(loSrc1, loSrc2); } if (loSrc2->IsIntegralConst(0)) { BlockRange().Remove(loSrc2); loCmp = loSrc1; } else { loCmp = comp->gtNewOperNode(GT_XOR, TYP_INT, loSrc1, loSrc2); BlockRange().InsertBefore(cmp, loCmp); ContainCheckBinary(loCmp->AsOp()); } if (hiSrc1->OperIs(GT_CNS_INT)) { std::swap(hiSrc1, hiSrc2); } if (hiSrc2->IsIntegralConst(0)) { BlockRange().Remove(hiSrc2); hiCmp = hiSrc1; } else { hiCmp = comp->gtNewOperNode(GT_XOR, TYP_INT, hiSrc1, hiSrc2); BlockRange().InsertBefore(cmp, hiCmp); ContainCheckBinary(hiCmp->AsOp()); } hiCmp = comp->gtNewOperNode(GT_OR, TYP_INT, loCmp, hiCmp); BlockRange().InsertBefore(cmp, hiCmp); ContainCheckBinary(hiCmp->AsOp()); } else { assert(cmp->OperIs(GT_LT, GT_LE, GT_GE, GT_GT)); // // If the compare is signed then (x LT|GE y) can be transformed into ((x SUB y) LT|GE 0). // If the compare is unsigned we can still use SUB but we need to check the Carry flag, // not the actual result. In both cases we can simply check the appropiate condition flags // and ignore the actual result: // SUB_LO loSrc1, loSrc2 // SUB_HI hiSrc1, hiSrc2 // SETCC|JCC (signed|unsigned LT|GE) // If loSrc2 happens to be 0 then the first SUB can be eliminated and the second one can // be turned into a CMP because the first SUB would have set carry to 0. This effectively // transforms a long compare against 0 into an int compare of the high part against 0. // // (x LE|GT y) can to be transformed into ((x SUB y) LE|GT 0) but checking that a long value // is greater than 0 is not so easy. We need to turn this into a positive/negative check // like the one we get for LT|GE compares, this can be achieved by swapping the compare: // (x LE|GT y) becomes (y GE|LT x) // // Having to swap operands is problematic when the second operand is a constant. The constant // moves to the first operand where it cannot be contained and thus needs a register. This can // be avoided by changing the constant such that LE|GT becomes LT|GE: // (x LE|GT 41) becomes (x LT|GE 42) // if (cmp->OperIs(GT_LE, GT_GT)) { bool mustSwap = true; if (loSrc2->OperIs(GT_CNS_INT) && hiSrc2->OperIs(GT_CNS_INT)) { uint32_t loValue = static_cast<uint32_t>(loSrc2->AsIntCon()->IconValue()); uint32_t hiValue = static_cast<uint32_t>(hiSrc2->AsIntCon()->IconValue()); uint64_t value = static_cast<uint64_t>(loValue) | (static_cast<uint64_t>(hiValue) << 32); uint64_t maxValue = cmp->IsUnsigned() ? UINT64_MAX : INT64_MAX; if (value != maxValue) { value++; loValue = value & UINT32_MAX; hiValue = (value >> 32) & UINT32_MAX; loSrc2->AsIntCon()->SetIconValue(loValue); hiSrc2->AsIntCon()->SetIconValue(hiValue); condition = cmp->OperIs(GT_LE) ? GT_LT : GT_GE; mustSwap = false; } } if (mustSwap) { std::swap(loSrc1, loSrc2); std::swap(hiSrc1, hiSrc2); condition = GenTree::SwapRelop(condition); } } assert((condition == GT_LT) || (condition == GT_GE)); if (loSrc2->IsIntegralConst(0)) { BlockRange().Remove(loSrc2); // Very conservative dead code removal... but it helps. if (loSrc1->OperIs(GT_CNS_INT, GT_LCL_VAR, GT_LCL_FLD)) { BlockRange().Remove(loSrc1); } else { loSrc1->SetUnusedValue(); } hiCmp = comp->gtNewOperNode(GT_CMP, TYP_VOID, hiSrc1, hiSrc2); BlockRange().InsertBefore(cmp, hiCmp); ContainCheckCompare(hiCmp->AsOp()); } else { loCmp = comp->gtNewOperNode(GT_CMP, TYP_VOID, loSrc1, loSrc2); hiCmp = comp->gtNewOperNode(GT_SUB_HI, TYP_INT, hiSrc1, hiSrc2); BlockRange().InsertBefore(cmp, loCmp, hiCmp); ContainCheckCompare(loCmp->AsOp()); ContainCheckBinary(hiCmp->AsOp()); // // Try to move the first SUB_HI operands right in front of it, this allows using // a single temporary register instead of 2 (one for CMP and one for SUB_HI). Do // this only for locals as they won't change condition flags. Note that we could // move constants (except 0 which generates XOR reg, reg) but it's extremely rare // to have a constant as the first operand. // if (hiSrc1->OperIs(GT_LCL_VAR, GT_LCL_FLD)) { BlockRange().Remove(hiSrc1); BlockRange().InsertBefore(hiCmp, hiSrc1); } } } hiCmp->gtFlags |= GTF_SET_FLAGS; if (hiCmp->IsValue()) { hiCmp->SetUnusedValue(); } LIR::Use cmpUse; if (BlockRange().TryGetUse(cmp, &cmpUse) && cmpUse.User()->OperIs(GT_JTRUE)) { BlockRange().Remove(cmp); GenTree* jcc = cmpUse.User(); jcc->AsOp()->gtOp1 = nullptr; jcc->ChangeOper(GT_JCC); jcc->gtFlags |= GTF_USE_FLAGS; jcc->AsCC()->gtCondition = GenCondition::FromIntegralRelop(condition, cmp->IsUnsigned()); } else { cmp->AsOp()->gtOp1 = nullptr; cmp->AsOp()->gtOp2 = nullptr; cmp->ChangeOper(GT_SETCC); cmp->gtFlags |= GTF_USE_FLAGS; cmp->AsCC()->gtCondition = GenCondition::FromIntegralRelop(condition, cmp->IsUnsigned()); } return cmp->gtNext; } #endif // !TARGET_64BIT //------------------------------------------------------------------------ // Lowering::OptimizeConstCompare: Performs various "compare with const" optimizations. // // Arguments: // cmp - the compare node // // Return Value: // The original compare node if lowering should proceed as usual or the next node // to lower if the compare node was changed in such a way that lowering is no // longer needed. // // Notes: // - Narrow operands to enable memory operand containment (XARCH specific). // - Transform cmp(and(x, y), 0) into test(x, y) (XARCH/Arm64 specific but could // be used for ARM as well if support for GT_TEST_EQ/GT_TEST_NE is added). // - Transform TEST(x, LSH(1, y)) into BT(x, y) (XARCH specific) // - Transform RELOP(OP, 0) into SETCC(OP) or JCC(OP) if OP can set the // condition flags appropriately (XARCH/ARM64 specific but could be extended // to ARM32 as well if ARM32 codegen supports GTF_SET_FLAGS). // GenTree* Lowering::OptimizeConstCompare(GenTree* cmp) { assert(cmp->gtGetOp2()->IsIntegralConst()); #if defined(TARGET_XARCH) || defined(TARGET_ARM64) GenTree* op1 = cmp->gtGetOp1(); GenTreeIntCon* op2 = cmp->gtGetOp2()->AsIntCon(); ssize_t op2Value = op2->IconValue(); #ifdef TARGET_XARCH var_types op1Type = op1->TypeGet(); if (IsContainableMemoryOp(op1) && varTypeIsSmall(op1Type) && FitsIn(op1Type, op2Value)) { // // If op1's type is small then try to narrow op2 so it has the same type as op1. // Small types are usually used by memory loads and if both compare operands have // the same type then the memory load can be contained. In certain situations // (e.g "cmp ubyte, 200") we also get a smaller instruction encoding. // op2->gtType = op1Type; } else #endif if (op1->OperIs(GT_CAST) && !op1->gtOverflow()) { GenTreeCast* cast = op1->AsCast(); var_types castToType = cast->CastToType(); GenTree* castOp = cast->gtGetOp1(); if (((castToType == TYP_BOOL) || (castToType == TYP_UBYTE)) && FitsIn<UINT8>(op2Value)) { // // Since we're going to remove the cast we need to be able to narrow the cast operand // to the cast type. This can be done safely only for certain opers (e.g AND, OR, XOR). // Some opers just can't be narrowed (e.g DIV, MUL) while other could be narrowed but // doing so would produce incorrect results (e.g. RSZ, RSH). // // The below list of handled opers is conservative but enough to handle the most common // situations. In particular this include CALL, sometimes the JIT unnecessarilly widens // the result of bool returning calls. // bool removeCast = #ifdef TARGET_ARM64 (op2Value == 0) && cmp->OperIs(GT_EQ, GT_NE, GT_GT) && #endif (castOp->OperIs(GT_CALL, GT_LCL_VAR) || castOp->OperIs(GT_OR, GT_XOR, GT_AND) #ifdef TARGET_XARCH || IsContainableMemoryOp(castOp) #endif ); if (removeCast) { assert(!castOp->gtOverflowEx()); // Must not be an overflow checking operation #ifdef TARGET_ARM64 bool cmpEq = cmp->OperIs(GT_EQ); cmp->SetOperRaw(cmpEq ? GT_TEST_EQ : GT_TEST_NE); op2->SetIconValue(0xff); op2->gtType = castOp->gtType; #else castOp->gtType = castToType; op2->gtType = castToType; #endif // If we have any contained memory ops on castOp, they must now not be contained. if (castOp->OperIs(GT_OR, GT_XOR, GT_AND)) { GenTree* op1 = castOp->gtGetOp1(); if ((op1 != nullptr) && !op1->IsCnsIntOrI()) { op1->ClearContained(); } GenTree* op2 = castOp->gtGetOp2(); if ((op2 != nullptr) && !op2->IsCnsIntOrI()) { op2->ClearContained(); } } cmp->AsOp()->gtOp1 = castOp; BlockRange().Remove(cast); } } } else if (op1->OperIs(GT_AND) && cmp->OperIs(GT_EQ, GT_NE)) { // // Transform ((x AND y) EQ|NE 0) into (x TEST_EQ|TEST_NE y) when possible. // GenTree* andOp1 = op1->gtGetOp1(); GenTree* andOp2 = op1->gtGetOp2(); if (op2Value != 0) { // // If we don't have a 0 compare we can get one by transforming ((x AND mask) EQ|NE mask) // into ((x AND mask) NE|EQ 0) when mask is a single bit. // if (isPow2<target_size_t>(static_cast<target_size_t>(op2Value)) && andOp2->IsIntegralConst(op2Value)) { op2Value = 0; op2->SetIconValue(0); cmp->SetOperRaw(GenTree::ReverseRelop(cmp->OperGet())); } } if (op2Value == 0) { BlockRange().Remove(op1); BlockRange().Remove(op2); cmp->SetOperRaw(cmp->OperIs(GT_EQ) ? GT_TEST_EQ : GT_TEST_NE); cmp->AsOp()->gtOp1 = andOp1; cmp->AsOp()->gtOp2 = andOp2; // We will re-evaluate containment below andOp1->ClearContained(); andOp2->ClearContained(); #ifdef TARGET_XARCH if (IsContainableMemoryOp(andOp1) && andOp2->IsIntegralConst()) { // // For "test" we only care about the bits that are set in the second operand (mask). // If the mask fits in a small type then we can narrow both operands to generate a "test" // instruction with a smaller encoding ("test" does not have a r/m32, imm8 form) and avoid // a widening load in some cases. // // For 16 bit operands we narrow only if the memory operand is already 16 bit. This matches // the behavior of a previous implementation and avoids adding more cases where we generate // 16 bit instructions that require a length changing prefix (0x66). These suffer from // significant decoder stalls on Intel CPUs. // // We could also do this for 64 bit masks that fit into 32 bit but it doesn't help. // In such cases morph narrows down the existing GT_AND by inserting a cast between it and // the memory operand so we'd need to add more code to recognize and eliminate that cast. // size_t mask = static_cast<size_t>(andOp2->AsIntCon()->IconValue()); if (FitsIn<UINT8>(mask)) { andOp1->gtType = TYP_UBYTE; andOp2->gtType = TYP_UBYTE; } else if (FitsIn<UINT16>(mask) && genTypeSize(andOp1) == 2) { andOp1->gtType = TYP_USHORT; andOp2->gtType = TYP_USHORT; } } #endif } } if (cmp->OperIs(GT_TEST_EQ, GT_TEST_NE)) { #ifdef TARGET_XARCH // // Transform TEST_EQ|NE(x, LSH(1, y)) into BT(x, y) when possible. Using BT // results in smaller and faster code. It also doesn't have special register // requirements, unlike LSH that requires the shift count to be in ECX. // Note that BT has the same behavior as LSH when the bit index exceeds the // operand bit size - it uses (bit_index MOD bit_size). // GenTree* lsh = cmp->gtGetOp2(); LIR::Use cmpUse; if (lsh->OperIs(GT_LSH) && varTypeIsIntOrI(lsh->TypeGet()) && lsh->gtGetOp1()->IsIntegralConst(1) && BlockRange().TryGetUse(cmp, &cmpUse)) { GenCondition condition = cmp->OperIs(GT_TEST_NE) ? GenCondition::C : GenCondition::NC; cmp->SetOper(GT_BT); cmp->gtType = TYP_VOID; cmp->gtFlags |= GTF_SET_FLAGS; cmp->AsOp()->gtOp2 = lsh->gtGetOp2(); cmp->gtGetOp2()->ClearContained(); BlockRange().Remove(lsh->gtGetOp1()); BlockRange().Remove(lsh); GenTreeCC* cc; if (cmpUse.User()->OperIs(GT_JTRUE)) { cmpUse.User()->ChangeOper(GT_JCC); cc = cmpUse.User()->AsCC(); cc->gtCondition = condition; } else { cc = new (comp, GT_SETCC) GenTreeCC(GT_SETCC, condition, TYP_INT); BlockRange().InsertAfter(cmp, cc); cmpUse.ReplaceWith(cc); } cc->gtFlags |= GTF_USE_FLAGS; return cmp->gtNext; } #endif // TARGET_XARCH } else if (cmp->OperIs(GT_EQ, GT_NE)) { GenTree* op1 = cmp->gtGetOp1(); GenTree* op2 = cmp->gtGetOp2(); // TODO-CQ: right now the below peep is inexpensive and gets the benefit in most // cases because in majority of cases op1, op2 and cmp would be in that order in // execution. In general we should be able to check that all the nodes that come // after op1 do not modify the flags so that it is safe to avoid generating a // test instruction. if (op2->IsIntegralConst(0) && (op1->gtNext == op2) && (op2->gtNext == cmp) && #ifdef TARGET_XARCH (op1->OperIs(GT_AND, GT_OR, GT_XOR, GT_ADD, GT_SUB, GT_NEG) #ifdef FEATURE_HW_INTRINSICS || (op1->OperIs(GT_HWINTRINSIC) && emitter::DoesWriteZeroFlag(HWIntrinsicInfo::lookupIns(op1->AsHWIntrinsic()))) #endif // FEATURE_HW_INTRINSICS ) #else // TARGET_ARM64 op1->OperIs(GT_AND, GT_ADD, GT_SUB) #endif ) { op1->gtFlags |= GTF_SET_FLAGS; op1->SetUnusedValue(); BlockRange().Remove(op2); GenTree* next = cmp->gtNext; GenTree* cc; genTreeOps ccOp; LIR::Use cmpUse; // Fast check for the common case - relop used by a JTRUE that immediately follows it. if ((next != nullptr) && next->OperIs(GT_JTRUE) && (next->gtGetOp1() == cmp)) { cc = next; ccOp = GT_JCC; next = nullptr; BlockRange().Remove(cmp); } else if (BlockRange().TryGetUse(cmp, &cmpUse) && cmpUse.User()->OperIs(GT_JTRUE)) { cc = cmpUse.User(); ccOp = GT_JCC; next = nullptr; BlockRange().Remove(cmp); } else // The relop is not used by a JTRUE or it is not used at all. { // Transform the relop node it into a SETCC. If it's not used we could remove // it completely but that means doing more work to handle a rare case. cc = cmp; ccOp = GT_SETCC; } GenCondition condition = GenCondition::FromIntegralRelop(cmp); cc->ChangeOper(ccOp); cc->AsCC()->gtCondition = condition; cc->gtFlags |= GTF_USE_FLAGS; return next; } } #endif // defined(TARGET_XARCH) || defined(TARGET_ARM64) return cmp; } //------------------------------------------------------------------------ // Lowering::LowerCompare: Lowers a compare node. // // Arguments: // cmp - the compare node // // Return Value: // The next node to lower. // GenTree* Lowering::LowerCompare(GenTree* cmp) { #ifndef TARGET_64BIT if (cmp->gtGetOp1()->TypeGet() == TYP_LONG) { return DecomposeLongCompare(cmp); } #endif if (cmp->gtGetOp2()->IsIntegralConst() && !comp->opts.MinOpts()) { GenTree* next = OptimizeConstCompare(cmp); // If OptimizeConstCompare return the compare node as "next" then we need to continue lowering. if (next != cmp) { return next; } } #ifdef TARGET_XARCH if (cmp->gtGetOp1()->TypeGet() == cmp->gtGetOp2()->TypeGet()) { if (varTypeIsSmall(cmp->gtGetOp1()->TypeGet()) && varTypeIsUnsigned(cmp->gtGetOp1()->TypeGet())) { // // If both operands have the same type then codegen will use the common operand type to // determine the instruction type. For small types this would result in performing a // signed comparison of two small unsigned values without zero extending them to TYP_INT // which is incorrect. Note that making the comparison unsigned doesn't imply that codegen // has to generate a small comparison, it can still correctly generate a TYP_INT comparison. // cmp->gtFlags |= GTF_UNSIGNED; } } #endif // TARGET_XARCH ContainCheckCompare(cmp->AsOp()); return cmp->gtNext; } //------------------------------------------------------------------------ // Lowering::LowerJTrue: Lowers a JTRUE node. // // Arguments: // jtrue - the JTRUE node // // Return Value: // The next node to lower (usually nullptr). // // Notes: // On ARM64 this may remove the JTRUE node and transform its associated // relop into a JCMP node. // GenTree* Lowering::LowerJTrue(GenTreeOp* jtrue) { #ifdef TARGET_ARM64 GenTree* relop = jtrue->gtGetOp1(); GenTree* relopOp2 = relop->AsOp()->gtGetOp2(); if ((relop->gtNext == jtrue) && relopOp2->IsCnsIntOrI()) { bool useJCMP = false; GenTreeFlags flags = GTF_EMPTY; if (relop->OperIs(GT_EQ, GT_NE) && relopOp2->IsIntegralConst(0)) { // Codegen will use cbz or cbnz in codegen which do not affect the flag register flags = relop->OperIs(GT_EQ) ? GTF_JCMP_EQ : GTF_EMPTY; useJCMP = true; } else if (relop->OperIs(GT_TEST_EQ, GT_TEST_NE) && isPow2(relopOp2->AsIntCon()->IconValue())) { // Codegen will use tbz or tbnz in codegen which do not affect the flag register flags = GTF_JCMP_TST | (relop->OperIs(GT_TEST_EQ) ? GTF_JCMP_EQ : GTF_EMPTY); useJCMP = true; } if (useJCMP) { relop->SetOper(GT_JCMP); relop->gtFlags &= ~(GTF_JCMP_TST | GTF_JCMP_EQ); relop->gtFlags |= flags; relop->gtType = TYP_VOID; relopOp2->SetContained(); BlockRange().Remove(jtrue); assert(relop->gtNext == nullptr); return nullptr; } } #endif // TARGET_ARM64 ContainCheckJTrue(jtrue); assert(jtrue->gtNext == nullptr); return nullptr; } //---------------------------------------------------------------------------------------------- // LowerNodeCC: Lowers a node that produces a boolean value by setting the condition flags. // // Arguments: // node - The node to lower // condition - The condition code of the generated SETCC/JCC node // // Return Value: // A SETCC/JCC node or nullptr if `node` is not used. // // Notes: // This simply replaces `node`'s use with an appropiate SETCC/JCC node, // `node` is not actually changed, except by having its GTF_SET_FLAGS set. // It's the caller's responsibility to change `node` such that it only // sets the condition flags, without producing a boolean value. // GenTreeCC* Lowering::LowerNodeCC(GenTree* node, GenCondition condition) { // Skip over a chain of EQ/NE(x, 0) relops. This may be present either // because `node` is not a relop and so it cannot be used directly by a // JTRUE, or because the frontend failed to remove a EQ/NE(x, 0) that's // used as logical negation. // // Usually there's only one such relop but there's little difference // between removing one or all so we may as well remove them all. // // We can't allow any other nodes between `node` and its user because we // have no way of knowing if those nodes change flags or not. So we're looking // to skip over a sequence of appropriately connected zero and EQ/NE nodes. // The x in EQ/NE(x, 0) GenTree* relop = node; // The first node of the relop sequence GenTree* first = node->gtNext; // The node following the relop sequence GenTree* next = first; while ((next != nullptr) && next->IsIntegralConst(0) && (next->gtNext != nullptr) && next->gtNext->OperIs(GT_EQ, GT_NE) && (next->gtNext->AsOp()->gtGetOp1() == relop) && (next->gtNext->AsOp()->gtGetOp2() == next)) { relop = next->gtNext; next = relop->gtNext; if (relop->OperIs(GT_EQ)) { condition = GenCondition::Reverse(condition); } } GenTreeCC* cc = nullptr; // Next may be null if `node` is not used. In that case we don't need to generate a SETCC node. if (next != nullptr) { if (next->OperIs(GT_JTRUE)) { // If the instruction immediately following 'relop', i.e. 'next' is a conditional branch, // it should always have 'relop' as its 'op1'. If it doesn't, then we have improperly // constructed IL (the setting of a condition code should always immediately precede its // use, since the JIT doesn't track dataflow for condition codes). Still, if it happens // it's not our problem, it simply means that `node` is not used and can be removed. if (next->AsUnOp()->gtGetOp1() == relop) { assert(relop->OperIsCompare()); next->ChangeOper(GT_JCC); cc = next->AsCC(); cc->gtCondition = condition; } } else { // If the node is used by something other than a JTRUE then we need to insert a // SETCC node to materialize the boolean value. LIR::Use use; if (BlockRange().TryGetUse(relop, &use)) { cc = new (comp, GT_SETCC) GenTreeCC(GT_SETCC, condition, TYP_INT); BlockRange().InsertAfter(node, cc); use.ReplaceWith(cc); } } } if (cc != nullptr) { node->gtFlags |= GTF_SET_FLAGS; cc->gtFlags |= GTF_USE_FLAGS; } // Remove the chain of EQ/NE(x, 0) relop nodes, if any. Note that if a SETCC was // inserted after `node`, `first` still points to the node that was initially // after `node`. if (relop != node) { BlockRange().Remove(first, relop); } return cc; } // Lower "jmp <method>" tail call to insert PInvoke method epilog if required. void Lowering::LowerJmpMethod(GenTree* jmp) { assert(jmp->OperGet() == GT_JMP); JITDUMP("lowering GT_JMP\n"); DISPNODE(jmp); JITDUMP("============"); // If PInvokes are in-lined, we have to remember to execute PInvoke method epilog anywhere that // a method returns. if (comp->compMethodRequiresPInvokeFrame()) { InsertPInvokeMethodEpilog(comp->compCurBB DEBUGARG(jmp)); } } // Lower GT_RETURN node to insert PInvoke method epilog if required. void Lowering::LowerRet(GenTreeUnOp* ret) { assert(ret->OperGet() == GT_RETURN); JITDUMP("lowering GT_RETURN\n"); DISPNODE(ret); JITDUMP("============"); GenTree* retVal = ret->gtGetOp1(); // There are two kinds of retyping: // - A simple bitcast can be inserted when: // - We're returning a floating type as an integral type or vice-versa, or // - If we're returning a struct as a primitive type, we change the type of // 'retval' in 'LowerRetStructLclVar()' bool needBitcast = (ret->TypeGet() != TYP_VOID) && (varTypeUsesFloatReg(ret) != varTypeUsesFloatReg(ret->gtGetOp1())); bool doPrimitiveBitcast = false; if (needBitcast) { doPrimitiveBitcast = (!varTypeIsStruct(ret) && !varTypeIsStruct(retVal)); } if (doPrimitiveBitcast) { // Add a simple bitcast when both types are not structs. // If one type is a struct it will be handled below. #if defined(DEBUG) assert(!varTypeIsStruct(ret) && !varTypeIsStruct(retVal)); #endif GenTree* bitcast = comp->gtNewBitCastNode(ret->TypeGet(), retVal); ret->gtOp1 = bitcast; BlockRange().InsertBefore(ret, bitcast); ContainCheckBitCast(bitcast); } else if (ret->TypeGet() != TYP_VOID) { #if FEATURE_MULTIREG_RET if (retVal->OperIs(GT_LCL_VAR) && varTypeIsStruct(retVal)) { ReturnTypeDesc retTypeDesc; LclVarDsc* varDsc = nullptr; varDsc = comp->lvaGetDesc(retVal->AsLclVar()); retTypeDesc.InitializeStructReturnType(comp, varDsc->GetStructHnd(), comp->info.compCallConv); if (retTypeDesc.GetReturnRegCount() > 1) { CheckMultiRegLclVar(retVal->AsLclVar(), &retTypeDesc); } } #endif // FEATURE_MULTIREG_RET #ifdef DEBUG if (varTypeIsStruct(ret->TypeGet()) != varTypeIsStruct(retVal->TypeGet())) { if (varTypeIsStruct(ret->TypeGet())) { assert(comp->info.compRetNativeType != TYP_STRUCT); var_types retActualType = genActualType(comp->info.compRetNativeType); var_types retValActualType = genActualType(retVal->TypeGet()); bool constStructInit = retVal->IsConstInitVal(); bool implicitCastFromSameOrBiggerSize = (genTypeSize(retActualType) <= genTypeSize(retValActualType)); // This could happen if we have retyped op1 as a primitive type during struct promotion, // check `retypedFieldsMap` for details. bool actualTypesMatch = (retActualType == retValActualType); assert(actualTypesMatch || constStructInit || implicitCastFromSameOrBiggerSize); } } #endif // DEBUG if (varTypeIsStruct(ret)) { LowerRetStruct(ret); } else if (!ret->TypeIs(TYP_VOID) && varTypeIsStruct(retVal)) { // Return struct as a primitive using Unsafe cast. assert(retVal->OperIs(GT_LCL_VAR)); LowerRetSingleRegStructLclVar(ret); } } // Method doing PInvokes has exactly one return block unless it has tail calls. if (comp->compMethodRequiresPInvokeFrame() && (comp->compCurBB == comp->genReturnBB)) { InsertPInvokeMethodEpilog(comp->compCurBB DEBUGARG(ret)); } ContainCheckRet(ret); } //---------------------------------------------------------------------------------------------- // LowerStoreLocCommon: platform idependent part of local var or field store lowering. // // Arguments: // lclStore - The store lcl node to lower. // void Lowering::LowerStoreLocCommon(GenTreeLclVarCommon* lclStore) { assert(lclStore->OperIs(GT_STORE_LCL_FLD, GT_STORE_LCL_VAR)); JITDUMP("lowering store lcl var/field (before):\n"); DISPTREERANGE(BlockRange(), lclStore); JITDUMP("\n"); GenTree* src = lclStore->gtGetOp1(); LclVarDsc* varDsc = comp->lvaGetDesc(lclStore); const bool srcIsMultiReg = src->IsMultiRegNode(); const bool dstIsMultiReg = lclStore->IsMultiRegLclVar(); if (!dstIsMultiReg && varTypeIsStruct(varDsc)) { // TODO-Cleanup: we want to check `varDsc->lvRegStruct` as the last condition instead of `!varDsc->lvPromoted`, // but we do not set it for `CSE` vars so it is currently failing. assert(varDsc->CanBeReplacedWithItsField(comp) || varDsc->lvDoNotEnregister || !varDsc->lvPromoted); if (varDsc->CanBeReplacedWithItsField(comp)) { assert(varDsc->lvFieldCnt == 1); unsigned fldNum = varDsc->lvFieldLclStart; LclVarDsc* fldDsc = comp->lvaGetDesc(fldNum); JITDUMP("Replacing an independently promoted local var V%02u with its only field V%02u for the store " "from a call [%06u]\n", lclStore->GetLclNum(), fldNum, comp->dspTreeID(lclStore)); lclStore->SetLclNum(fldNum); lclStore->ChangeType(fldDsc->TypeGet()); varDsc = fldDsc; } } if (srcIsMultiReg || dstIsMultiReg) { const ReturnTypeDesc* retTypeDesc = nullptr; if (src->OperIs(GT_CALL)) { retTypeDesc = src->AsCall()->GetReturnTypeDesc(); } CheckMultiRegLclVar(lclStore->AsLclVar(), retTypeDesc); } const var_types lclRegType = varDsc->GetRegisterType(lclStore); if ((lclStore->TypeGet() == TYP_STRUCT) && !srcIsMultiReg) { bool convertToStoreObj; if (src->OperGet() == GT_CALL) { GenTreeCall* call = src->AsCall(); const ClassLayout* layout = varDsc->GetLayout(); #ifdef DEBUG const unsigned slotCount = layout->GetSlotCount(); #if defined(TARGET_XARCH) && !defined(UNIX_AMD64_ABI) // Windows x64 doesn't have multireg returns, // x86 uses it only for long return type, not for structs. assert(slotCount == 1); assert(lclRegType != TYP_UNDEF); #else // !TARGET_XARCH || UNIX_AMD64_ABI if (!varDsc->lvIsHfa()) { if (slotCount > 1) { assert(call->HasMultiRegRetVal()); } else { unsigned size = layout->GetSize(); assert((size <= 8) || (size == 16)); bool isPowerOf2 = (((size - 1) & size) == 0); bool isTypeDefined = (lclRegType != TYP_UNDEF); assert(isPowerOf2 == isTypeDefined); } } #endif // !TARGET_XARCH || UNIX_AMD64_ABI #endif // DEBUG #if !defined(WINDOWS_AMD64_ABI) if (!call->HasMultiRegRetVal() && (lclRegType == TYP_UNDEF)) { // If we have a single return register, // but we can't retype it as a primitive type, we must spill it. GenTreeLclVar* spilledCall = SpillStructCallResult(call); lclStore->gtOp1 = spilledCall; src = lclStore->gtOp1; JITDUMP("lowering store lcl var/field has to spill call src.\n"); LowerStoreLocCommon(lclStore); return; } #endif // !WINDOWS_AMD64_ABI convertToStoreObj = false; } else if (!varDsc->IsEnregisterableType()) { convertToStoreObj = true; } else if (src->OperIs(GT_CNS_INT)) { assert(src->IsIntegralConst(0) && "expected an INIT_VAL for non-zero init."); #ifdef FEATURE_SIMD if (varTypeIsSIMD(lclRegType)) { CorInfoType simdBaseJitType = comp->getBaseJitTypeOfSIMDLocal(lclStore); if (simdBaseJitType == CORINFO_TYPE_UNDEF) { // Lie about the type if we don't know/have it. simdBaseJitType = CORINFO_TYPE_FLOAT; } GenTreeSIMD* simdTree = comp->gtNewSIMDNode(lclRegType, src, SIMDIntrinsicInit, simdBaseJitType, varDsc->lvExactSize); BlockRange().InsertAfter(src, simdTree); LowerSIMD(simdTree); src = simdTree; lclStore->gtOp1 = src; convertToStoreObj = false; } else #endif // FEATURE_SIMD { convertToStoreObj = false; } } else if (!src->OperIs(GT_LCL_VAR)) { convertToStoreObj = true; } else { assert(src->OperIs(GT_LCL_VAR)); convertToStoreObj = false; } if (convertToStoreObj) { const unsigned lclNum = lclStore->GetLclNum(); GenTreeLclVar* addr = comp->gtNewLclVarAddrNode(lclNum, TYP_BYREF); comp->lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::BlockOp)); addr->gtFlags |= GTF_VAR_DEF; assert(!addr->IsPartialLclFld(comp)); addr->gtFlags |= GTF_DONT_CSE; // Create the assignment node. lclStore->ChangeOper(GT_STORE_OBJ); GenTreeBlk* objStore = lclStore->AsObj(); // Only the GTF_LATE_ARG flag (if present) is preserved. objStore->gtFlags &= GTF_LATE_ARG; objStore->gtFlags |= GTF_ASG | GTF_IND_NONFAULTING | GTF_IND_TGT_NOT_HEAP; #ifndef JIT32_GCENCODER objStore->gtBlkOpGcUnsafe = false; #endif objStore->gtBlkOpKind = GenTreeObj::BlkOpKindInvalid; objStore->SetLayout(varDsc->GetLayout()); objStore->SetAddr(addr); objStore->SetData(src); BlockRange().InsertBefore(objStore, addr); LowerBlockStoreCommon(objStore); return; } } // src and dst can be in registers, check if we need a bitcast. if (!src->TypeIs(TYP_STRUCT) && (varTypeUsesFloatReg(lclRegType) != varTypeUsesFloatReg(src))) { assert(!srcIsMultiReg && !dstIsMultiReg); assert(lclStore->OperIsLocalStore()); assert(lclRegType != TYP_UNDEF); GenTree* bitcast = comp->gtNewBitCastNode(lclRegType, src); lclStore->gtOp1 = bitcast; src = lclStore->gtGetOp1(); BlockRange().InsertBefore(lclStore, bitcast); ContainCheckBitCast(bitcast); } LowerStoreLoc(lclStore); JITDUMP("lowering store lcl var/field (after):\n"); DISPTREERANGE(BlockRange(), lclStore); JITDUMP("\n"); } //---------------------------------------------------------------------------------------------- // LowerRetStructLclVar: Lowers a struct return node. // // Arguments: // node - The return node to lower. // void Lowering::LowerRetStruct(GenTreeUnOp* ret) { #ifdef TARGET_ARM64 if (GlobalJitOptions::compFeatureHfa) { if (varTypeIsSIMD(ret)) { if (comp->info.compRetNativeType == TYP_STRUCT) { assert(varTypeIsSIMD(ret->gtGetOp1())); assert(comp->compMethodReturnsMultiRegRegTypeAlternate()); ret->ChangeType(comp->info.compRetNativeType); } else { assert(comp->info.compRetNativeType == ret->TypeGet()); GenTree* retVal = ret->gtGetOp1(); if (retVal->TypeGet() != ret->TypeGet()) { assert(retVal->OperIs(GT_LCL_VAR)); LowerRetSingleRegStructLclVar(ret); } return; } } } #endif // TARGET_ARM64 if (comp->compMethodReturnsMultiRegRegTypeAlternate()) { return; } assert(ret->OperIs(GT_RETURN)); assert(varTypeIsStruct(ret)); GenTree* retVal = ret->gtGetOp1(); // Note: small types are returned as INT. var_types nativeReturnType = genActualType(comp->info.compRetNativeType); ret->ChangeType(nativeReturnType); switch (retVal->OperGet()) { case GT_CALL: assert(retVal->TypeIs(nativeReturnType)); // Type should be changed during call processing. break; case GT_CNS_INT: // When we promote LCL_VAR single fields into return // we could have all type of constans here. if (varTypeUsesFloatReg(nativeReturnType)) { // Do not expect `initblock` for SIMD* types, // only 'initobj'. assert(retVal->AsIntCon()->IconValue() == 0); retVal->BashToConst(0.0, TYP_FLOAT); } break; case GT_OBJ: retVal->ChangeOper(GT_IND); FALLTHROUGH; case GT_IND: retVal->ChangeType(nativeReturnType); LowerIndir(retVal->AsIndir()); break; case GT_LCL_VAR: LowerRetSingleRegStructLclVar(ret); break; #if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS) #ifdef FEATURE_SIMD case GT_SIMD: #endif // FEATURE_SIMD #ifdef FEATURE_HW_INTRINSICS case GT_HWINTRINSIC: #endif // FEATURE_HW_INTRINSICS { assert(!retVal->TypeIs(TYP_STRUCT)); if (varTypeUsesFloatReg(ret) != varTypeUsesFloatReg(retVal)) { GenTree* bitcast = comp->gtNewBitCastNode(ret->TypeGet(), retVal); ret->gtOp1 = bitcast; BlockRange().InsertBefore(ret, bitcast); ContainCheckBitCast(bitcast); } } break; #endif // FEATURE_SIMD || FEATURE_HW_INTRINSICS case GT_LCL_FLD: { #ifdef DEBUG LclVarDsc* varDsc = comp->lvaGetDesc(retVal->AsLclFld()); assert(varDsc->lvDoNotEnregister); #endif retVal->ChangeType(nativeReturnType); } break; default: assert(varTypeIsEnregisterable(retVal)); if (varTypeUsesFloatReg(ret) != varTypeUsesFloatReg(retVal)) { GenTree* bitcast = comp->gtNewBitCastNode(ret->TypeGet(), retVal); ret->gtOp1 = bitcast; BlockRange().InsertBefore(ret, bitcast); ContainCheckBitCast(bitcast); } break; } } //---------------------------------------------------------------------------------------------- // LowerRetSingleRegStructLclVar: Lowers a return node with a struct lclVar as a source. // // Arguments: // node - The return node to lower. // // Notes: // - the function is only for LclVars that are returned in one register; // - if LclVar is allocated in memory then read it as return type; // - if LclVar can be enregistered read it as register type and add a bitcast if necessary; // void Lowering::LowerRetSingleRegStructLclVar(GenTreeUnOp* ret) { assert(!comp->compMethodReturnsMultiRegRegTypeAlternate()); assert(ret->OperIs(GT_RETURN)); GenTreeLclVarCommon* lclVar = ret->gtGetOp1()->AsLclVar(); assert(lclVar->OperIs(GT_LCL_VAR)); unsigned lclNum = lclVar->GetLclNum(); LclVarDsc* varDsc = comp->lvaGetDesc(lclNum); if (varDsc->lvPromoted) { // TODO-1stClassStructs: We can no longer independently promote // or enregister this struct, since it is referenced as a whole. comp->lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::BlockOpRet)); } if (varDsc->lvDoNotEnregister) { lclVar->ChangeOper(GT_LCL_FLD); lclVar->AsLclFld()->SetLclOffs(0); // We are returning as a primitive type and the lcl is of struct type. assert(comp->info.compRetNativeType != TYP_STRUCT); assert((genTypeSize(comp->info.compRetNativeType) == genTypeSize(ret)) || (varTypeIsIntegral(ret) && varTypeIsIntegral(comp->info.compRetNativeType) && (genTypeSize(comp->info.compRetNativeType) <= genTypeSize(ret)))); // If the actual return type requires normalization, then make sure we // do so by using the correct small type for the GT_LCL_FLD. It would // be conservative to check just compRetNativeType for this since small // structs are normalized to primitive types when they are returned in // registers, so we would normalize for them as well. if (varTypeIsSmall(comp->info.compRetType)) { assert(genTypeSize(comp->info.compRetNativeType) == genTypeSize(comp->info.compRetType)); lclVar->ChangeType(comp->info.compRetType); } else { // Otherwise we don't mind that we leave the upper bits undefined. lclVar->ChangeType(ret->TypeGet()); } } else { const var_types lclVarType = varDsc->GetRegisterType(lclVar); assert(lclVarType != TYP_UNDEF); const var_types actualType = genActualType(lclVarType); lclVar->ChangeType(actualType); if (varTypeUsesFloatReg(ret) != varTypeUsesFloatReg(lclVarType)) { GenTree* bitcast = comp->gtNewBitCastNode(ret->TypeGet(), ret->gtOp1); ret->gtOp1 = bitcast; BlockRange().InsertBefore(ret, bitcast); ContainCheckBitCast(bitcast); } } } //---------------------------------------------------------------------------------------------- // LowerCallStruct: Lowers a call node that returns a stuct. // // Arguments: // call - The call node to lower. // // Notes: // - this handles only single-register returns; // - it transforms the call's user for `GT_STOREIND`. // void Lowering::LowerCallStruct(GenTreeCall* call) { assert(varTypeIsStruct(call)); if (call->HasMultiRegRetVal()) { return; } if (GlobalJitOptions::compFeatureHfa) { if (comp->IsHfa(call)) { #if defined(TARGET_ARM64) assert(comp->GetHfaCount(call) == 1); #elif defined(TARGET_ARM) // ARM returns double in 2 float registers, but // `call->HasMultiRegRetVal()` count double registers. assert(comp->GetHfaCount(call) <= 2); #else // !TARGET_ARM64 && !TARGET_ARM NYI("Unknown architecture"); #endif // !TARGET_ARM64 && !TARGET_ARM var_types hfaType = comp->GetHfaType(call); if (call->TypeIs(hfaType)) { return; } } } CORINFO_CLASS_HANDLE retClsHnd = call->gtRetClsHnd; Compiler::structPassingKind howToReturnStruct; var_types returnType = comp->getReturnTypeForStruct(retClsHnd, call->GetUnmanagedCallConv(), &howToReturnStruct); assert(returnType != TYP_STRUCT && returnType != TYP_UNKNOWN); var_types origType = call->TypeGet(); call->gtType = genActualType(returnType); LIR::Use callUse; if (BlockRange().TryGetUse(call, &callUse)) { GenTree* user = callUse.User(); switch (user->OperGet()) { case GT_RETURN: case GT_STORE_LCL_VAR: case GT_STORE_BLK: case GT_STORE_OBJ: // Leave as is, the user will handle it. assert(user->TypeIs(origType) || varTypeIsSIMD(user->TypeGet())); break; #ifdef FEATURE_SIMD case GT_STORE_LCL_FLD: // If the call type was ever updated (in importer) to TYP_SIMD*, it should match the user type. // If not, the user type should match the struct's returnType. assert((varTypeIsSIMD(user) && user->TypeIs(origType)) || (returnType == user->TypeGet())); break; #endif // FEATURE_SIMD case GT_STOREIND: #ifdef FEATURE_SIMD if (varTypeIsSIMD(user)) { user->ChangeType(returnType); break; } #endif // FEATURE_SIMD // importer has a separate mechanism to retype calls to helpers, // keep it for now. assert(user->TypeIs(TYP_REF) || (user->TypeIs(TYP_I_IMPL) && comp->IsTargetAbi(CORINFO_CORERT_ABI))); assert(call->IsHelperCall()); assert(returnType == user->TypeGet()); break; default: unreached(); } } } //---------------------------------------------------------------------------------------------- // LowerStoreSingleRegCallStruct: Lowers a store block where the source is a struct typed call. // // Arguments: // store - The store node to lower. // // Notes: // - the function is only for calls that return one register; // - it spills the call's result if it can be retyped as a primitive type; // void Lowering::LowerStoreSingleRegCallStruct(GenTreeBlk* store) { assert(store->Data()->IsCall()); GenTreeCall* call = store->Data()->AsCall(); assert(!call->HasMultiRegRetVal()); const ClassLayout* layout = store->GetLayout(); const var_types regType = layout->GetRegisterType(); if (regType != TYP_UNDEF) { store->ChangeType(regType); store->SetOper(GT_STOREIND); LowerStoreIndirCommon(store->AsStoreInd()); return; } else { #if defined(WINDOWS_AMD64_ABI) // All ABI except Windows x64 supports passing 3 byte structs in registers. // Other 64 bites ABI-s support passing 5, 6, 7 byte structs. unreached(); #else // !WINDOWS_AMD64_ABI if (store->OperIs(GT_STORE_OBJ)) { store->SetOper(GT_STORE_BLK); } store->gtBlkOpKind = GenTreeObj::BlkOpKindUnroll; GenTreeLclVar* spilledCall = SpillStructCallResult(call); store->SetData(spilledCall); LowerBlockStoreCommon(store); #endif // WINDOWS_AMD64_ABI } } #if !defined(WINDOWS_AMD64_ABI) //---------------------------------------------------------------------------------------------- // SpillStructCallResult: Spill call result to memory. // // Arguments: // call - call with 3, 5, 6 or 7 return size that has to be spilled to memory. // // Return Value: // load of the spilled variable. // GenTreeLclVar* Lowering::SpillStructCallResult(GenTreeCall* call) const { // TODO-1stClassStructs: we can support this in codegen for `GT_STORE_BLK` without new temps. const unsigned spillNum = comp->lvaGrabTemp(true DEBUGARG("Return value temp for an odd struct return size")); comp->lvaSetVarDoNotEnregister(spillNum DEBUGARG(DoNotEnregisterReason::LocalField)); CORINFO_CLASS_HANDLE retClsHnd = call->gtRetClsHnd; comp->lvaSetStruct(spillNum, retClsHnd, false); GenTreeLclFld* spill = new (comp, GT_STORE_LCL_FLD) GenTreeLclFld(GT_STORE_LCL_FLD, call->gtType, spillNum, 0); spill->gtOp1 = call; spill->gtFlags |= GTF_VAR_DEF; BlockRange().InsertAfter(call, spill); ContainCheckStoreLoc(spill); GenTreeLclVar* loadCallResult = comp->gtNewLclvNode(spillNum, TYP_STRUCT)->AsLclVar(); BlockRange().InsertAfter(spill, loadCallResult); return loadCallResult; } #endif // !WINDOWS_AMD64_ABI GenTree* Lowering::LowerDirectCall(GenTreeCall* call) { noway_assert(call->gtCallType == CT_USER_FUNC || call->gtCallType == CT_HELPER); // Non-virtual direct/indirect calls: Work out if the address of the // call is known at JIT time. If not it is either an indirect call // or the address must be accessed via an single/double indirection. void* addr; InfoAccessType accessType; CorInfoHelpFunc helperNum = comp->eeGetHelperNum(call->gtCallMethHnd); #ifdef FEATURE_READYTORUN if (call->gtEntryPoint.addr != nullptr) { accessType = call->gtEntryPoint.accessType; addr = call->gtEntryPoint.addr; } else #endif if (call->gtCallType == CT_HELPER) { noway_assert(helperNum != CORINFO_HELP_UNDEF); // the convention on getHelperFtn seems to be (it's not documented) // that it returns an address or if it returns null, pAddr is set to // another address, which requires an indirection void* pAddr; addr = comp->info.compCompHnd->getHelperFtn(helperNum, (void**)&pAddr); if (addr != nullptr) { assert(pAddr == nullptr); accessType = IAT_VALUE; } else { accessType = IAT_PVALUE; addr = pAddr; } } else { noway_assert(helperNum == CORINFO_HELP_UNDEF); CORINFO_ACCESS_FLAGS aflags = CORINFO_ACCESS_ANY; if (call->IsSameThis()) { aflags = (CORINFO_ACCESS_FLAGS)(aflags | CORINFO_ACCESS_THIS); } if (!call->NeedsNullCheck()) { aflags = (CORINFO_ACCESS_FLAGS)(aflags | CORINFO_ACCESS_NONNULL); } CORINFO_CONST_LOOKUP addrInfo; comp->info.compCompHnd->getFunctionEntryPoint(call->gtCallMethHnd, &addrInfo, aflags); accessType = addrInfo.accessType; addr = addrInfo.addr; } GenTree* result = nullptr; switch (accessType) { case IAT_VALUE: // Non-virtual direct call to known address. // For JIT helper based tailcall (only used on x86) the target // address is passed as an arg to the helper so we want a node for // it. if (!IsCallTargetInRange(addr) || call->IsTailCallViaJitHelper()) { result = AddrGen(addr); } else { // a direct call within range of hardware relative call instruction // stash the address for codegen call->gtDirectCallAddress = addr; } break; case IAT_PVALUE: { // If we are using an indirection cell for a direct call then apply // an optimization that loads the call target directly from the // indirection cell, instead of duplicating the tree. bool hasIndirectionCell = call->GetIndirectionCellArgKind() != NonStandardArgKind::None; if (!hasIndirectionCell) { // Non-virtual direct calls to addresses accessed by // a single indirection. GenTree* cellAddr = AddrGen(addr); #ifdef DEBUG cellAddr->AsIntCon()->gtTargetHandle = (size_t)call->gtCallMethHnd; #endif GenTree* indir = Ind(cellAddr); result = indir; } break; } case IAT_PPVALUE: // Non-virtual direct calls to addresses accessed by // a double indirection. // // Expanding an IAT_PPVALUE here, will lose the opportunity // to Hoist/CSE the first indirection as it is an invariant load // assert(!"IAT_PPVALUE case in LowerDirectCall"); noway_assert(helperNum == CORINFO_HELP_UNDEF); result = AddrGen(addr); // Double-indirection. Load the address into a register // and call indirectly through the register // result = Ind(Ind(result)); break; case IAT_RELPVALUE: { // Non-virtual direct calls to addresses accessed by // a single relative indirection. GenTree* cellAddr = AddrGen(addr); GenTree* indir = Ind(cellAddr); result = comp->gtNewOperNode(GT_ADD, TYP_I_IMPL, indir, AddrGen(addr)); break; } default: noway_assert(!"Bad accessType"); break; } return result; } GenTree* Lowering::LowerDelegateInvoke(GenTreeCall* call) { noway_assert(call->gtCallType == CT_USER_FUNC); assert((comp->info.compCompHnd->getMethodAttribs(call->gtCallMethHnd) & (CORINFO_FLG_DELEGATE_INVOKE | CORINFO_FLG_FINAL)) == (CORINFO_FLG_DELEGATE_INVOKE | CORINFO_FLG_FINAL)); GenTree* thisArgNode; if (call->IsTailCallViaJitHelper()) { const unsigned argNum = 0; fgArgTabEntry* thisArgTabEntry = comp->gtArgEntryByArgNum(call, argNum); thisArgNode = thisArgTabEntry->GetNode(); } else { thisArgNode = comp->gtGetThisArg(call); } assert(thisArgNode != nullptr); assert(thisArgNode->gtOper == GT_PUTARG_REG); GenTree* thisExpr = thisArgNode->AsOp()->gtOp1; // We're going to use the 'this' expression multiple times, so make a local to copy it. GenTree* base; if (thisExpr->OperIs(GT_LCL_VAR)) { base = comp->gtNewLclvNode(thisExpr->AsLclVar()->GetLclNum(), thisExpr->TypeGet()); } else if (thisExpr->OperIs(GT_LCL_FLD)) { base = comp->gtNewLclFldNode(thisExpr->AsLclFld()->GetLclNum(), thisExpr->TypeGet(), thisExpr->AsLclFld()->GetLclOffs()); } else { unsigned delegateInvokeTmp = comp->lvaGrabTemp(true DEBUGARG("delegate invoke call")); base = comp->gtNewLclvNode(delegateInvokeTmp, thisExpr->TypeGet()); LIR::Use thisExprUse(BlockRange(), &thisArgNode->AsOp()->gtOp1, thisArgNode); ReplaceWithLclVar(thisExprUse, delegateInvokeTmp); thisExpr = thisExprUse.Def(); // it's changed; reload it. } // replace original expression feeding into thisPtr with // [originalThis + offsetOfDelegateInstance] GenTree* newThisAddr = new (comp, GT_LEA) GenTreeAddrMode(TYP_BYREF, thisExpr, nullptr, 0, comp->eeGetEEInfo()->offsetOfDelegateInstance); GenTree* newThis = comp->gtNewOperNode(GT_IND, TYP_REF, newThisAddr); BlockRange().InsertAfter(thisExpr, newThisAddr, newThis); thisArgNode->AsOp()->gtOp1 = newThis; ContainCheckIndir(newThis->AsIndir()); // the control target is // [originalThis + firstTgtOffs] unsigned targetOffs = comp->eeGetEEInfo()->offsetOfDelegateFirstTarget; GenTree* result = new (comp, GT_LEA) GenTreeAddrMode(TYP_REF, base, nullptr, 0, targetOffs); GenTree* callTarget = Ind(result); // don't need to sequence and insert this tree, caller will do it return callTarget; } GenTree* Lowering::LowerIndirectNonvirtCall(GenTreeCall* call) { #ifdef TARGET_X86 if (call->gtCallCookie != nullptr) { NYI_X86("Morphing indirect non-virtual call with non-standard args"); } #endif // Indirect cookie calls gets transformed by fgMorphArgs as indirect call with non-standard args. // Hence we should never see this type of call in lower. noway_assert(call->gtCallCookie == nullptr); return nullptr; } //------------------------------------------------------------------------ // CreateReturnTrapSeq: Create a tree to perform a "return trap", used in PInvoke // epilogs to invoke a GC under a condition. The return trap checks some global // location (the runtime tells us where that is and how many indirections to make), // then, based on the result, conditionally calls a GC helper. We use a special node // for this because at this time (late in the compilation phases), introducing flow // is tedious/difficult. // // This is used for PInvoke inlining. // // Return Value: // Code tree to perform the action. // GenTree* Lowering::CreateReturnTrapSeq() { // The GT_RETURNTRAP node expands to this: // if (g_TrapReturningThreads) // { // RareDisablePreemptiveGC(); // } // The only thing to do here is build up the expression that evaluates 'g_TrapReturningThreads'. void* pAddrOfCaptureThreadGlobal = nullptr; int32_t* addrOfCaptureThreadGlobal = comp->info.compCompHnd->getAddrOfCaptureThreadGlobal(&pAddrOfCaptureThreadGlobal); GenTree* testTree; if (addrOfCaptureThreadGlobal != nullptr) { testTree = AddrGen(addrOfCaptureThreadGlobal); } else { testTree = Ind(AddrGen(pAddrOfCaptureThreadGlobal)); } return comp->gtNewOperNode(GT_RETURNTRAP, TYP_INT, Ind(testTree, TYP_INT)); } //------------------------------------------------------------------------ // SetGCState: Create a tree that stores the given constant (0 or 1) into the // thread's GC state field. // // This is used for PInvoke inlining. // // Arguments: // state - constant (0 or 1) to store into the thread's GC state field. // // Return Value: // Code tree to perform the action. // GenTree* Lowering::SetGCState(int state) { // Thread.offsetOfGcState = 0/1 assert(state == 0 || state == 1); const CORINFO_EE_INFO* pInfo = comp->eeGetEEInfo(); GenTree* base = new (comp, GT_LCL_VAR) GenTreeLclVar(GT_LCL_VAR, TYP_I_IMPL, comp->info.compLvFrameListRoot); GenTree* stateNode = new (comp, GT_CNS_INT) GenTreeIntCon(TYP_BYTE, state); GenTree* addr = new (comp, GT_LEA) GenTreeAddrMode(TYP_I_IMPL, base, nullptr, 1, pInfo->offsetOfGCState); GenTree* storeGcState = new (comp, GT_STOREIND) GenTreeStoreInd(TYP_BYTE, addr, stateNode); return storeGcState; } //------------------------------------------------------------------------ // CreateFrameLinkUpdate: Create a tree that either links or unlinks the // locally-allocated InlinedCallFrame from the Frame list. // // This is used for PInvoke inlining. // // Arguments: // action - whether to link (push) or unlink (pop) the Frame // // Return Value: // Code tree to perform the action. // GenTree* Lowering::CreateFrameLinkUpdate(FrameLinkAction action) { const CORINFO_EE_INFO* pInfo = comp->eeGetEEInfo(); const CORINFO_EE_INFO::InlinedCallFrameInfo& callFrameInfo = pInfo->inlinedCallFrameInfo; GenTree* TCB = new (comp, GT_LCL_VAR) GenTreeLclVar(GT_LCL_VAR, TYP_I_IMPL, comp->info.compLvFrameListRoot); // Thread->m_pFrame GenTree* addr = new (comp, GT_LEA) GenTreeAddrMode(TYP_I_IMPL, TCB, nullptr, 1, pInfo->offsetOfThreadFrame); GenTree* data = nullptr; if (action == PushFrame) { // Thread->m_pFrame = &inlinedCallFrame; data = new (comp, GT_LCL_FLD_ADDR) GenTreeLclFld(GT_LCL_FLD_ADDR, TYP_BYREF, comp->lvaInlinedPInvokeFrameVar, callFrameInfo.offsetOfFrameVptr); } else { assert(action == PopFrame); // Thread->m_pFrame = inlinedCallFrame.m_pNext; data = new (comp, GT_LCL_FLD) GenTreeLclFld(GT_LCL_FLD, TYP_BYREF, comp->lvaInlinedPInvokeFrameVar, pInfo->inlinedCallFrameInfo.offsetOfFrameLink); } GenTree* storeInd = new (comp, GT_STOREIND) GenTreeStoreInd(TYP_I_IMPL, addr, data); return storeInd; } //------------------------------------------------------------------------ // InsertPInvokeMethodProlog: Create the code that runs at the start of // every method that has PInvoke calls. // // Initialize the TCB local and the InlinedCallFrame object. Then link ("push") // the InlinedCallFrame object on the Frame chain. The layout of InlinedCallFrame // is defined in vm/frames.h. See also vm/jitinterface.cpp for more information. // The offsets of these fields is returned by the VM in a call to ICorStaticInfo::getEEInfo(). // // The (current) layout is as follows: // // 64-bit 32-bit CORINFO_EE_INFO // offset offset field name offset when set // ----------------------------------------------------------------------------------------- // +00h +00h GS cookie offsetOfGSCookie // +08h +04h vptr for class InlinedCallFrame offsetOfFrameVptr method prolog // +10h +08h m_Next offsetOfFrameLink method prolog // +18h +0Ch m_Datum offsetOfCallTarget call site // +20h n/a m_StubSecretArg not set by JIT // +28h +10h m_pCallSiteSP offsetOfCallSiteSP x86: call site, and zeroed in method // prolog; // non-x86: method prolog (SP remains // constant in function, after prolog: no // localloc and PInvoke in same function) // +30h +14h m_pCallerReturnAddress offsetOfReturnAddress call site // +38h +18h m_pCalleeSavedFP offsetOfCalleeSavedFP not set by JIT // +1Ch m_pThread // +20h m_pSPAfterProlog offsetOfSPAfterProlog arm only // +20/24h JIT retval spill area (int) before call_gc ??? // +24/28h JIT retval spill area (long) before call_gc ??? // +28/2Ch Saved value of EBP method prolog ??? // // Note that in the VM, InlinedCallFrame is a C++ class whose objects have a 'this' pointer that points // to the InlinedCallFrame vptr (the 2nd field listed above), and the GS cookie is stored *before* // the object. When we link the InlinedCallFrame onto the Frame chain, we must point at this location, // and not at the beginning of the InlinedCallFrame local, which is actually the GS cookie. // // Return Value: // none // void Lowering::InsertPInvokeMethodProlog() { noway_assert(comp->info.compUnmanagedCallCountWithGCTransition); noway_assert(comp->lvaInlinedPInvokeFrameVar != BAD_VAR_NUM); if (comp->opts.ShouldUsePInvokeHelpers()) { return; } JITDUMP("======= Inserting PInvoke method prolog\n"); // The first BB must be a scratch BB in order for us to be able to safely insert the P/Invoke prolog. assert(comp->fgFirstBBisScratch()); LIR::Range& firstBlockRange = LIR::AsRange(comp->fgFirstBB); const CORINFO_EE_INFO* pInfo = comp->eeGetEEInfo(); const CORINFO_EE_INFO::InlinedCallFrameInfo& callFrameInfo = pInfo->inlinedCallFrameInfo; // First arg: &compiler->lvaInlinedPInvokeFrameVar + callFrameInfo.offsetOfFrameVptr #if defined(DEBUG) const LclVarDsc* inlinedPInvokeDsc = comp->lvaGetDesc(comp->lvaInlinedPInvokeFrameVar); assert(inlinedPInvokeDsc->IsAddressExposed()); #endif // DEBUG GenTree* frameAddr = new (comp, GT_LCL_FLD_ADDR) GenTreeLclFld(GT_LCL_FLD_ADDR, TYP_BYREF, comp->lvaInlinedPInvokeFrameVar, callFrameInfo.offsetOfFrameVptr); // Call runtime helper to fill in our InlinedCallFrame and push it on the Frame list: // TCB = CORINFO_HELP_INIT_PINVOKE_FRAME(&symFrameStart, secretArg); // for x86, don't pass the secretArg. CLANG_FORMAT_COMMENT_ANCHOR; #if defined(TARGET_X86) || defined(TARGET_ARM) GenTreeCall::Use* argList = comp->gtNewCallArgs(frameAddr); #else GenTreeCall::Use* argList = comp->gtNewCallArgs(frameAddr, PhysReg(REG_SECRET_STUB_PARAM)); #endif GenTree* call = comp->gtNewHelperCallNode(CORINFO_HELP_INIT_PINVOKE_FRAME, TYP_I_IMPL, argList); // some sanity checks on the frame list root vardsc const unsigned lclNum = comp->info.compLvFrameListRoot; const LclVarDsc* varDsc = comp->lvaGetDesc(lclNum); noway_assert(!varDsc->lvIsParam); noway_assert(varDsc->lvType == TYP_I_IMPL); GenTree* store = new (comp, GT_STORE_LCL_VAR) GenTreeLclVar(GT_STORE_LCL_VAR, TYP_I_IMPL, lclNum); store->AsOp()->gtOp1 = call; store->gtFlags |= GTF_VAR_DEF; GenTree* const insertionPoint = firstBlockRange.FirstNonCatchArgNode(); comp->fgMorphTree(store); firstBlockRange.InsertBefore(insertionPoint, LIR::SeqTree(comp, store)); DISPTREERANGE(firstBlockRange, store); #if !defined(TARGET_X86) && !defined(TARGET_ARM) // For x86, this step is done at the call site (due to stack pointer not being static in the function). // For arm32, CallSiteSP is set up by the call to CORINFO_HELP_INIT_PINVOKE_FRAME. // -------------------------------------------------------- // InlinedCallFrame.m_pCallSiteSP = @RSP; GenTreeLclFld* storeSP = new (comp, GT_STORE_LCL_FLD) GenTreeLclFld(GT_STORE_LCL_FLD, TYP_I_IMPL, comp->lvaInlinedPInvokeFrameVar, callFrameInfo.offsetOfCallSiteSP); storeSP->gtOp1 = PhysReg(REG_SPBASE); storeSP->gtFlags |= GTF_VAR_DEF; assert(inlinedPInvokeDsc->lvDoNotEnregister); firstBlockRange.InsertBefore(insertionPoint, LIR::SeqTree(comp, storeSP)); DISPTREERANGE(firstBlockRange, storeSP); #endif // !defined(TARGET_X86) && !defined(TARGET_ARM) #if !defined(TARGET_ARM) // For arm32, CalleeSavedFP is set up by the call to CORINFO_HELP_INIT_PINVOKE_FRAME. // -------------------------------------------------------- // InlinedCallFrame.m_pCalleeSavedEBP = @RBP; GenTreeLclFld* storeFP = new (comp, GT_STORE_LCL_FLD) GenTreeLclFld(GT_STORE_LCL_FLD, TYP_I_IMPL, comp->lvaInlinedPInvokeFrameVar, callFrameInfo.offsetOfCalleeSavedFP); assert(inlinedPInvokeDsc->lvDoNotEnregister); storeFP->gtOp1 = PhysReg(REG_FPBASE); storeFP->gtFlags |= GTF_VAR_DEF; firstBlockRange.InsertBefore(insertionPoint, LIR::SeqTree(comp, storeFP)); DISPTREERANGE(firstBlockRange, storeFP); #endif // !defined(TARGET_ARM) // -------------------------------------------------------- // On 32-bit targets, CORINFO_HELP_INIT_PINVOKE_FRAME initializes the PInvoke frame and then pushes it onto // the current thread's Frame stack. On 64-bit targets, it only initializes the PInvoke frame. CLANG_FORMAT_COMMENT_ANCHOR; #ifdef TARGET_64BIT if (comp->opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB)) { // Push a frame - if we are NOT in an IL stub, this is done right before the call // The init routine sets InlinedCallFrame's m_pNext, so we just set the thead's top-of-stack GenTree* frameUpd = CreateFrameLinkUpdate(PushFrame); firstBlockRange.InsertBefore(insertionPoint, LIR::SeqTree(comp, frameUpd)); ContainCheckStoreIndir(frameUpd->AsStoreInd()); DISPTREERANGE(firstBlockRange, frameUpd); } #endif // TARGET_64BIT } //------------------------------------------------------------------------ // InsertPInvokeMethodEpilog: Code that needs to be run when exiting any method // that has PInvoke inlines. This needs to be inserted any place you can exit the // function: returns, tailcalls and jmps. // // Arguments: // returnBB - basic block from which a method can return // lastExpr - GenTree of the last top level stmnt of returnBB (debug only arg) // // Return Value: // Code tree to perform the action. // void Lowering::InsertPInvokeMethodEpilog(BasicBlock* returnBB DEBUGARG(GenTree* lastExpr)) { assert(returnBB != nullptr); assert(comp->info.compUnmanagedCallCountWithGCTransition); if (comp->opts.ShouldUsePInvokeHelpers()) { return; } JITDUMP("======= Inserting PInvoke method epilog\n"); // Method doing PInvoke calls has exactly one return block unless it has "jmp" or tail calls. assert(((returnBB == comp->genReturnBB) && (returnBB->bbJumpKind == BBJ_RETURN)) || returnBB->endsWithTailCallOrJmp(comp)); LIR::Range& returnBlockRange = LIR::AsRange(returnBB); GenTree* insertionPoint = returnBlockRange.LastNode(); assert(insertionPoint == lastExpr); // Note: PInvoke Method Epilog (PME) needs to be inserted just before GT_RETURN, GT_JMP or GT_CALL node in execution // order so that it is guaranteed that there will be no further PInvokes after that point in the method. // // Example1: GT_RETURN(op1) - say execution order is: Op1, GT_RETURN. After inserting PME, execution order would be // Op1, PME, GT_RETURN // // Example2: GT_CALL(arg side effect computing nodes, Stk Args Setup, Reg Args setup). The execution order would be // arg side effect computing nodes, Stk Args setup, Reg Args setup, GT_CALL // After inserting PME execution order would be: // arg side effect computing nodes, Stk Args setup, Reg Args setup, PME, GT_CALL // // Example3: GT_JMP. After inserting PME execution order would be: PME, GT_JMP // That is after PME, args for GT_JMP call will be setup. // Pop the frame if necessary. This always happens in the epilog on 32-bit targets. For 64-bit targets, we only do // this in the epilog for IL stubs; for non-IL stubs the frame is popped after every PInvoke call. CLANG_FORMAT_COMMENT_ANCHOR; #ifdef TARGET_64BIT if (comp->opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB)) #endif // TARGET_64BIT { GenTree* frameUpd = CreateFrameLinkUpdate(PopFrame); returnBlockRange.InsertBefore(insertionPoint, LIR::SeqTree(comp, frameUpd)); ContainCheckStoreIndir(frameUpd->AsStoreInd()); } } //------------------------------------------------------------------------ // InsertPInvokeCallProlog: Emit the call-site prolog for direct calls to unmanaged code. // It does all the necessary call-site setup of the InlinedCallFrame. // // Arguments: // call - the call for which we are inserting the PInvoke prolog. // // Return Value: // None. // void Lowering::InsertPInvokeCallProlog(GenTreeCall* call) { JITDUMP("======= Inserting PInvoke call prolog\n"); GenTree* insertBefore = call; if (call->gtCallType == CT_INDIRECT) { bool isClosed; insertBefore = BlockRange().GetTreeRange(call->gtCallAddr, &isClosed).FirstNode(); assert(isClosed); } const CORINFO_EE_INFO::InlinedCallFrameInfo& callFrameInfo = comp->eeGetEEInfo()->inlinedCallFrameInfo; gtCallTypes callType = (gtCallTypes)call->gtCallType; noway_assert(comp->lvaInlinedPInvokeFrameVar != BAD_VAR_NUM); if (comp->opts.ShouldUsePInvokeHelpers()) { // First argument is the address of the frame variable. GenTree* frameAddr = new (comp, GT_LCL_VAR_ADDR) GenTreeLclVar(GT_LCL_VAR_ADDR, TYP_BYREF, comp->lvaInlinedPInvokeFrameVar); #if defined(TARGET_X86) && !defined(UNIX_X86_ABI) // On x86 targets, PInvoke calls need the size of the stack args in InlinedCallFrame.m_Datum. // This is because the callee pops stack arguments, and we need to keep track of this during stack // walking const unsigned numStkArgBytes = call->fgArgInfo->GetNextSlotByteOffset(); GenTree* stackBytes = comp->gtNewIconNode(numStkArgBytes, TYP_INT); GenTreeCall::Use* args = comp->gtNewCallArgs(frameAddr, stackBytes); #else GenTreeCall::Use* args = comp->gtNewCallArgs(frameAddr); #endif // Insert call to CORINFO_HELP_JIT_PINVOKE_BEGIN GenTree* helperCall = comp->gtNewHelperCallNode(CORINFO_HELP_JIT_PINVOKE_BEGIN, TYP_VOID, args); comp->fgMorphTree(helperCall); BlockRange().InsertBefore(insertBefore, LIR::SeqTree(comp, helperCall)); LowerNode(helperCall); // helper call is inserted before current node and should be lowered here. return; } // Emit the following sequence: // // InlinedCallFrame.callTarget = methodHandle // stored in m_Datum // InlinedCallFrame.m_pCallSiteSP = SP // x86 only // InlinedCallFrame.m_pCallerReturnAddress = return address // GT_START_PREEEMPTC // Thread.gcState = 0 // (non-stub) - update top Frame on TCB // 64-bit targets only // ---------------------------------------------------------------------------------- // Setup InlinedCallFrame.callSiteTarget (which is how the JIT refers to it). // The actual field is InlinedCallFrame.m_Datum which has many different uses and meanings. GenTree* src = nullptr; if (callType == CT_INDIRECT) { #if !defined(TARGET_64BIT) // On 32-bit targets, indirect calls need the size of the stack args in InlinedCallFrame.m_Datum. const unsigned stackByteOffset = call->fgArgInfo->GetNextSlotByteOffset(); src = comp->gtNewIconNode(stackByteOffset, TYP_INT); #else // On 64-bit targets, indirect calls may need the stub parameter value in InlinedCallFrame.m_Datum. // If the stub parameter value is not needed, m_Datum will be initialized by the VM. if (comp->info.compPublishStubParam) { src = comp->gtNewLclvNode(comp->lvaStubArgumentVar, TYP_I_IMPL); } #endif // !defined(TARGET_64BIT) } else { assert(callType == CT_USER_FUNC); void* pEmbedMethodHandle = nullptr; CORINFO_METHOD_HANDLE embedMethodHandle = comp->info.compCompHnd->embedMethodHandle(call->gtCallMethHnd, &pEmbedMethodHandle); noway_assert((!embedMethodHandle) != (!pEmbedMethodHandle)); if (embedMethodHandle != nullptr) { // InlinedCallFrame.callSiteTarget = methodHandle src = AddrGen(embedMethodHandle); } else { // InlinedCallFrame.callSiteTarget = *pEmbedMethodHandle src = Ind(AddrGen(pEmbedMethodHandle)); } } if (src != nullptr) { // Store into InlinedCallFrame.m_Datum, the offset of which is given by offsetOfCallTarget. GenTreeLclFld* store = new (comp, GT_STORE_LCL_FLD) GenTreeLclFld(GT_STORE_LCL_FLD, TYP_I_IMPL, comp->lvaInlinedPInvokeFrameVar, callFrameInfo.offsetOfCallTarget); store->gtOp1 = src; store->gtFlags |= GTF_VAR_DEF; InsertTreeBeforeAndContainCheck(insertBefore, store); } #ifdef TARGET_X86 // ---------------------------------------------------------------------------------- // InlinedCallFrame.m_pCallSiteSP = SP GenTreeLclFld* storeCallSiteSP = new (comp, GT_STORE_LCL_FLD) GenTreeLclFld(GT_STORE_LCL_FLD, TYP_I_IMPL, comp->lvaInlinedPInvokeFrameVar, callFrameInfo.offsetOfCallSiteSP); storeCallSiteSP->gtOp1 = PhysReg(REG_SPBASE); storeCallSiteSP->gtFlags |= GTF_VAR_DEF; InsertTreeBeforeAndContainCheck(insertBefore, storeCallSiteSP); #endif // ---------------------------------------------------------------------------------- // InlinedCallFrame.m_pCallerReturnAddress = &label (the address of the instruction immediately following the call) GenTreeLclFld* storeLab = new (comp, GT_STORE_LCL_FLD) GenTreeLclFld(GT_STORE_LCL_FLD, TYP_I_IMPL, comp->lvaInlinedPInvokeFrameVar, callFrameInfo.offsetOfReturnAddress); storeLab->gtOp1 = new (comp, GT_LABEL) GenTree(GT_LABEL, TYP_I_IMPL); storeLab->gtFlags |= GTF_VAR_DEF; InsertTreeBeforeAndContainCheck(insertBefore, storeLab); // Push the PInvoke frame if necessary. On 32-bit targets this only happens in the method prolog if a method // contains PInvokes; on 64-bit targets this is necessary in non-stubs. CLANG_FORMAT_COMMENT_ANCHOR; #ifdef TARGET_64BIT if (!comp->opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB)) { // Set the TCB's frame to be the one we just created. // Note the init routine for the InlinedCallFrame (CORINFO_HELP_INIT_PINVOKE_FRAME) // has prepended it to the linked list to maintain the stack of Frames. // // Stubs do this once per stub, not once per call. GenTree* frameUpd = CreateFrameLinkUpdate(PushFrame); BlockRange().InsertBefore(insertBefore, LIR::SeqTree(comp, frameUpd)); ContainCheckStoreIndir(frameUpd->AsStoreInd()); } #endif // TARGET_64BIT // IMPORTANT **** This instruction must be the last real instruction **** // It changes the thread's state to Preemptive mode // ---------------------------------------------------------------------------------- // [tcb + offsetOfGcState] = 0 GenTree* storeGCState = SetGCState(0); BlockRange().InsertBefore(insertBefore, LIR::SeqTree(comp, storeGCState)); ContainCheckStoreIndir(storeGCState->AsStoreInd()); // Indicate that codegen has switched this thread to preemptive GC. // This tree node doesn't generate any code, but impacts LSRA and gc reporting. // This tree node is simple so doesn't require sequencing. GenTree* preemptiveGCNode = new (comp, GT_START_PREEMPTGC) GenTree(GT_START_PREEMPTGC, TYP_VOID); BlockRange().InsertBefore(insertBefore, preemptiveGCNode); } //------------------------------------------------------------------------ // InsertPInvokeCallEpilog: Insert the code that goes after every inlined pinvoke call. // // Arguments: // call - the call for which we are inserting the PInvoke epilog. // // Return Value: // None. // void Lowering::InsertPInvokeCallEpilog(GenTreeCall* call) { JITDUMP("======= Inserting PInvoke call epilog\n"); if (comp->opts.ShouldUsePInvokeHelpers()) { noway_assert(comp->lvaInlinedPInvokeFrameVar != BAD_VAR_NUM); // First argument is the address of the frame variable. GenTree* frameAddr = comp->gtNewLclVarAddrNode(comp->lvaInlinedPInvokeFrameVar, TYP_BYREF); #if defined(DEBUG) const LclVarDsc* inlinedPInvokeDsc = comp->lvaGetDesc(comp->lvaInlinedPInvokeFrameVar); assert(inlinedPInvokeDsc->IsAddressExposed()); #endif // DEBUG // Insert call to CORINFO_HELP_JIT_PINVOKE_END GenTreeCall* helperCall = comp->gtNewHelperCallNode(CORINFO_HELP_JIT_PINVOKE_END, TYP_VOID, comp->gtNewCallArgs(frameAddr)); comp->fgMorphTree(helperCall); BlockRange().InsertAfter(call, LIR::SeqTree(comp, helperCall)); ContainCheckCallOperands(helperCall); return; } // gcstate = 1 GenTree* insertionPoint = call->gtNext; GenTree* tree = SetGCState(1); BlockRange().InsertBefore(insertionPoint, LIR::SeqTree(comp, tree)); ContainCheckStoreIndir(tree->AsStoreInd()); tree = CreateReturnTrapSeq(); BlockRange().InsertBefore(insertionPoint, LIR::SeqTree(comp, tree)); ContainCheckReturnTrap(tree->AsOp()); // Pop the frame if necessary. On 32-bit targets this only happens in the method epilog; on 64-bit targets thi // happens after every PInvoke call in non-stubs. 32-bit targets instead mark the frame as inactive. CLANG_FORMAT_COMMENT_ANCHOR; #ifdef TARGET_64BIT if (!comp->opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB)) { tree = CreateFrameLinkUpdate(PopFrame); BlockRange().InsertBefore(insertionPoint, LIR::SeqTree(comp, tree)); ContainCheckStoreIndir(tree->AsStoreInd()); } #else const CORINFO_EE_INFO::InlinedCallFrameInfo& callFrameInfo = comp->eeGetEEInfo()->inlinedCallFrameInfo; // ---------------------------------------------------------------------------------- // InlinedCallFrame.m_pCallerReturnAddress = nullptr GenTreeLclFld* const storeCallSiteTracker = new (comp, GT_STORE_LCL_FLD) GenTreeLclFld(GT_STORE_LCL_FLD, TYP_I_IMPL, comp->lvaInlinedPInvokeFrameVar, callFrameInfo.offsetOfReturnAddress); GenTreeIntCon* const constantZero = new (comp, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, 0); storeCallSiteTracker->gtOp1 = constantZero; storeCallSiteTracker->gtFlags |= GTF_VAR_DEF; BlockRange().InsertBefore(insertionPoint, constantZero, storeCallSiteTracker); ContainCheckStoreLoc(storeCallSiteTracker); #endif // TARGET_64BIT } //------------------------------------------------------------------------ // LowerNonvirtPinvokeCall: Lower a non-virtual / indirect PInvoke call // // Arguments: // call - The call to lower. // // Return Value: // The lowered call tree. // GenTree* Lowering::LowerNonvirtPinvokeCall(GenTreeCall* call) { // PInvoke lowering varies depending on the flags passed in by the EE. By default, // GC transitions are generated inline; if CORJIT_FLAG_USE_PINVOKE_HELPERS is specified, // GC transitions are instead performed using helper calls. Examples of each case are given // below. Note that the data structure that is used to store information about a call frame // containing any P/Invoke calls is initialized in the method prolog (see // InsertPInvokeMethod{Prolog,Epilog} for details). // // Inline transitions: // InlinedCallFrame inlinedCallFrame; // // ... // // // Set up frame information // inlinedCallFrame.callTarget = methodHandle; // stored in m_Datum // inlinedCallFrame.m_pCallSiteSP = SP; // x86 only // inlinedCallFrame.m_pCallerReturnAddress = &label; (the address of the instruction immediately following the // call) // Thread.m_pFrame = &inlinedCallFrame; (non-IL-stub only) // // // Switch the thread's GC mode to preemptive mode // thread->m_fPreemptiveGCDisabled = 0; // // // Call the unmanaged method // target(); // // // Switch the thread's GC mode back to cooperative mode // thread->m_fPreemptiveGCDisabled = 1; // // // Rendezvous with a running collection if necessary // if (g_TrapReturningThreads) // RareDisablePreemptiveGC(); // // Transistions using helpers: // // OpaqueFrame opaqueFrame; // // ... // // // Call the JIT_PINVOKE_BEGIN helper // JIT_PINVOKE_BEGIN(&opaqueFrame); // // // Call the unmanaged method // target(); // // // Call the JIT_PINVOKE_END helper // JIT_PINVOKE_END(&opaqueFrame); // // Note that the JIT_PINVOKE_{BEGIN.END} helpers currently use the default calling convention for the target // platform. They may be changed in the future such that they preserve all register values. GenTree* result = nullptr; // All code generated by this function must not contain the randomly-inserted NOPs // that we insert to inhibit JIT spraying in partial trust scenarios. // The PINVOKE_PROLOG op signals this to the code generator/emitter. GenTree* prolog = new (comp, GT_NOP) GenTree(GT_PINVOKE_PROLOG, TYP_VOID); BlockRange().InsertBefore(call, prolog); bool addPInvokePrologEpilog = !call->IsSuppressGCTransition(); if (addPInvokePrologEpilog) { InsertPInvokeCallProlog(call); } if (call->gtCallType != CT_INDIRECT) { noway_assert(call->gtCallType == CT_USER_FUNC); CORINFO_METHOD_HANDLE methHnd = call->gtCallMethHnd; CORINFO_CONST_LOOKUP lookup; comp->info.compCompHnd->getAddressOfPInvokeTarget(methHnd, &lookup); void* addr = lookup.addr; GenTree* addrTree; switch (lookup.accessType) { case IAT_VALUE: // IsCallTargetInRange always return true on x64. It wants to use rip-based addressing // for this call. Unfortunately, in case of pinvokes (+suppressgctransition) to external libs // (e.g. kernel32.dll) the relative offset is unlikely to fit into int32 and we will have to // turn fAllowRel32 off globally. if ((call->IsSuppressGCTransition() && !comp->opts.jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT)) || !IsCallTargetInRange(addr)) { result = AddrGen(addr); } else { // a direct call within range of hardware relative call instruction // stash the address for codegen call->gtDirectCallAddress = addr; #ifdef FEATURE_READYTORUN call->gtEntryPoint.addr = nullptr; call->gtEntryPoint.accessType = IAT_VALUE; #endif } break; case IAT_PVALUE: addrTree = AddrGen(addr); #ifdef DEBUG addrTree->AsIntCon()->gtTargetHandle = (size_t)methHnd; #endif result = Ind(addrTree); break; case IAT_PPVALUE: // ToDo: Expanding an IAT_PPVALUE here, loses the opportunity // to Hoist/CSE the first indirection as it is an invariant load // // This case currently occurs today when we make PInvoke calls in crossgen // // assert(!"IAT_PPVALUE in Lowering::LowerNonvirtPinvokeCall"); addrTree = AddrGen(addr); #ifdef DEBUG addrTree->AsIntCon()->gtTargetHandle = (size_t)methHnd; #endif // Double-indirection. Load the address into a register // and call indirectly through the register // result = Ind(Ind(addrTree)); break; case IAT_RELPVALUE: unreached(); } } if (addPInvokePrologEpilog) { InsertPInvokeCallEpilog(call); } return result; } // Expand the code necessary to calculate the control target. // Returns: the expression needed to calculate the control target // May insert embedded statements GenTree* Lowering::LowerVirtualVtableCall(GenTreeCall* call) { noway_assert(call->gtCallType == CT_USER_FUNC); regNumber thisPtrArgReg = comp->codeGen->genGetThisArgReg(call); // get a reference to the thisPtr being passed fgArgTabEntry* argEntry = comp->gtArgEntryByArgNum(call, 0); assert(argEntry->GetRegNum() == thisPtrArgReg); assert(argEntry->GetNode()->OperIs(GT_PUTARG_REG)); GenTree* thisPtr = argEntry->GetNode()->AsUnOp()->gtGetOp1(); // If what we are passing as the thisptr is not already a local, make a new local to place it in // because we will be creating expressions based on it. unsigned lclNum; if (thisPtr->OperIsLocal()) { lclNum = thisPtr->AsLclVarCommon()->GetLclNum(); } else { // Split off the thisPtr and store to a temporary variable. if (vtableCallTemp == BAD_VAR_NUM) { vtableCallTemp = comp->lvaGrabTemp(true DEBUGARG("virtual vtable call")); } LIR::Use thisPtrUse(BlockRange(), &(argEntry->GetNode()->AsUnOp()->gtOp1), argEntry->GetNode()); ReplaceWithLclVar(thisPtrUse, vtableCallTemp); lclNum = vtableCallTemp; } // Get hold of the vtable offset (note: this might be expensive) unsigned vtabOffsOfIndirection; unsigned vtabOffsAfterIndirection; bool isRelative; comp->info.compCompHnd->getMethodVTableOffset(call->gtCallMethHnd, &vtabOffsOfIndirection, &vtabOffsAfterIndirection, &isRelative); // If the thisPtr is a local field, then construct a local field type node GenTree* local; if (thisPtr->isLclField()) { local = new (comp, GT_LCL_FLD) GenTreeLclFld(GT_LCL_FLD, thisPtr->TypeGet(), lclNum, thisPtr->AsLclFld()->GetLclOffs()); } else { local = new (comp, GT_LCL_VAR) GenTreeLclVar(GT_LCL_VAR, thisPtr->TypeGet(), lclNum); } // pointer to virtual table = [REG_CALL_THIS + offs] GenTree* result = Ind(Offset(local, VPTR_OFFS)); // Get the appropriate vtable chunk if (vtabOffsOfIndirection != CORINFO_VIRTUALCALL_NO_CHUNK) { if (isRelative) { // MethodTable offset is a relative pointer. // // Additional temporary variable is used to store virtual table pointer. // Address of method is obtained by the next computations: // // Save relative offset to tmp (vtab is virtual table pointer, vtabOffsOfIndirection is offset of // vtable-1st-level-indirection): // tmp = vtab // // Save address of method to result (vtabOffsAfterIndirection is offset of vtable-2nd-level-indirection): // result = [tmp + vtabOffsOfIndirection + vtabOffsAfterIndirection + [tmp + vtabOffsOfIndirection]] // // // If relative pointers are also in second level indirection, additional temporary is used: // tmp1 = vtab // tmp2 = tmp1 + vtabOffsOfIndirection + vtabOffsAfterIndirection + [tmp1 + vtabOffsOfIndirection] // result = tmp2 + [tmp2] // unsigned lclNumTmp = comp->lvaGrabTemp(true DEBUGARG("lclNumTmp")); unsigned lclNumTmp2 = comp->lvaGrabTemp(true DEBUGARG("lclNumTmp2")); GenTree* lclvNodeStore = comp->gtNewTempAssign(lclNumTmp, result); GenTree* tmpTree = comp->gtNewLclvNode(lclNumTmp, result->TypeGet()); tmpTree = Offset(tmpTree, vtabOffsOfIndirection); tmpTree = comp->gtNewOperNode(GT_IND, TYP_I_IMPL, tmpTree, false); GenTree* offs = comp->gtNewIconNode(vtabOffsOfIndirection + vtabOffsAfterIndirection, TYP_INT); result = comp->gtNewOperNode(GT_ADD, TYP_I_IMPL, comp->gtNewLclvNode(lclNumTmp, result->TypeGet()), offs); GenTree* base = OffsetByIndexWithScale(result, tmpTree, 1); GenTree* lclvNodeStore2 = comp->gtNewTempAssign(lclNumTmp2, base); LIR::Range range = LIR::SeqTree(comp, lclvNodeStore); JITDUMP("result of obtaining pointer to virtual table:\n"); DISPRANGE(range); BlockRange().InsertBefore(call, std::move(range)); LIR::Range range2 = LIR::SeqTree(comp, lclvNodeStore2); ContainCheckIndir(tmpTree->AsIndir()); JITDUMP("result of obtaining pointer to virtual table 2nd level indirection:\n"); DISPRANGE(range2); BlockRange().InsertAfter(lclvNodeStore, std::move(range2)); result = Ind(comp->gtNewLclvNode(lclNumTmp2, result->TypeGet())); result = comp->gtNewOperNode(GT_ADD, TYP_I_IMPL, result, comp->gtNewLclvNode(lclNumTmp2, result->TypeGet())); } else { // result = [REG_CALL_IND_SCRATCH + vtabOffsOfIndirection] result = Ind(Offset(result, vtabOffsOfIndirection)); } } else { assert(!isRelative); } // Load the function address // result = [reg+vtabOffs] if (!isRelative) { result = Ind(Offset(result, vtabOffsAfterIndirection)); } return result; } // Lower stub dispatched virtual calls. GenTree* Lowering::LowerVirtualStubCall(GenTreeCall* call) { assert(call->IsVirtualStub()); // An x86 JIT which uses full stub dispatch must generate only // the following stub dispatch calls: // // (1) isCallRelativeIndirect: // call dword ptr [rel32] ; FF 15 ---rel32---- // (2) isCallRelative: // call abc ; E8 ---rel32---- // (3) isCallRegisterIndirect: // 3-byte nop ; // call dword ptr [eax] ; FF 10 // // THIS IS VERY TIGHTLY TIED TO THE PREDICATES IN // vm\i386\cGenCpu.h, esp. isCallRegisterIndirect. GenTree* result = nullptr; // This is code to set up an indirect call to a stub address computed // via dictionary lookup. if (call->gtCallType == CT_INDIRECT) { // The importer decided we needed a stub call via a computed // stub dispatch address, i.e. an address which came from a dictionary lookup. // - The dictionary lookup produces an indirected address, suitable for call // via "call [VirtualStubParam.reg]" // // This combination will only be generated for shared generic code and when // stub dispatch is active. // fgMorphArgs will have created trees to pass the address in VirtualStubParam.reg. // All we have to do here is add an indirection to generate the actual call target. GenTree* ind = Ind(call->gtCallAddr); BlockRange().InsertAfter(call->gtCallAddr, ind); call->gtCallAddr = ind; ind->gtFlags |= GTF_IND_REQ_ADDR_IN_REG; ContainCheckIndir(ind->AsIndir()); } else { // Direct stub call. // Get stub addr. This will return NULL if virtual call stubs are not active void* stubAddr = call->gtStubCallStubAddr; noway_assert(stubAddr != nullptr); // If not CT_INDIRECT, then it should always be relative indir call. // This is ensured by VM. noway_assert(call->IsVirtualStubRelativeIndir()); // Direct stub calls, though the stubAddr itself may still need to be // accessed via an indirection. GenTree* addr = AddrGen(stubAddr); // On x86, for tailcall via helper, the JIT_TailCall helper takes the stubAddr as // the target address, and we set a flag that it's a VSD call. The helper then // handles any necessary indirection. if (call->IsTailCallViaJitHelper()) { result = addr; } else { bool shouldOptimizeVirtualStubCall = false; #if defined(TARGET_ARMARCH) || defined(TARGET_AMD64) // Skip inserting the indirection node to load the address that is already // computed in the VSD stub arg register as a hidden parameter. Instead during the // codegen, just load the call target from there. shouldOptimizeVirtualStubCall = !comp->opts.IsCFGEnabled(); #endif if (!shouldOptimizeVirtualStubCall) { result = Ind(addr); } } } // TODO-Cleanup: start emitting random NOPS return result; } //------------------------------------------------------------------------ // Lowering::AreSourcesPossibleModifiedLocals: // Given two nodes which will be used in an addressing mode (base, // index), check to see if they are lclVar reads, and if so, walk // backwards from the use until both reads have been visited to // determine if they are potentially modified in that range. // // Arguments: // addr - the node that uses the base and index nodes // base - the base node // index - the index node // // Returns: true if either the base or index may be modified between the // node and addr. // bool Lowering::AreSourcesPossiblyModifiedLocals(GenTree* addr, GenTree* base, GenTree* index) { assert(addr != nullptr); SideEffectSet baseSideEffects; if (base != nullptr) { if (base->OperIsLocalRead()) { baseSideEffects.AddNode(comp, base); } else { base = nullptr; } } SideEffectSet indexSideEffects; if (index != nullptr) { if (index->OperIsLocalRead()) { indexSideEffects.AddNode(comp, index); } else { index = nullptr; } } for (GenTree* cursor = addr;; cursor = cursor->gtPrev) { assert(cursor != nullptr); if (cursor == base) { base = nullptr; } if (cursor == index) { index = nullptr; } if ((base == nullptr) && (index == nullptr)) { return false; } m_scratchSideEffects.Clear(); m_scratchSideEffects.AddNode(comp, cursor); if ((base != nullptr) && m_scratchSideEffects.InterferesWith(baseSideEffects, false)) { return true; } if ((index != nullptr) && m_scratchSideEffects.InterferesWith(indexSideEffects, false)) { return true; } } } //------------------------------------------------------------------------ // TryCreateAddrMode: recognize trees which can be implemented using an // addressing mode and transform them to a GT_LEA // // Arguments: // addr - the use of the address we want to transform // isContainable - true if this addressing mode can be contained // parent - the node that consumes the given addr (most likely it's an IND) // // Returns: // true if the address node was changed to a LEA, false otherwise. // bool Lowering::TryCreateAddrMode(GenTree* addr, bool isContainable, GenTree* parent) { if (!addr->OperIs(GT_ADD) || addr->gtOverflow()) { return false; } #ifdef TARGET_ARM64 if (parent->OperIsIndir() && parent->AsIndir()->IsVolatile() && !varTypeIsGC(addr)) { // For Arm64 we avoid using LEA for volatile INDs // because we won't be able to use ldar/star return false; } #endif GenTree* base = nullptr; GenTree* index = nullptr; unsigned scale = 0; ssize_t offset = 0; bool rev = false; // Find out if an addressing mode can be constructed bool doAddrMode = comp->codeGen->genCreateAddrMode(addr, // address true, // fold &rev, // reverse ops &base, // base addr &index, // index val &scale, // scaling &offset); // displacement var_types targetType = parent->OperIsIndir() ? parent->TypeGet() : TYP_UNDEF; #ifdef TARGET_ARMARCH // Multiplier should be a "natural-scale" power of two number which is equal to target's width. // // *(ulong*)(data + index * 8); - can be optimized // *(ulong*)(data + index * 7); - can not be optimized // *(int*)(data + index * 2); - can not be optimized // if ((scale > 0) && (genTypeSize(targetType) != scale)) { return false; } #endif if (scale == 0) { scale = 1; } if (!isContainable) { // this is just a reg-const add if (index == nullptr) { return false; } // this is just a reg-reg add if ((scale == 1) && (offset == 0)) { return false; } } // make sure there are not any side effects between def of leaves and use if (!doAddrMode || AreSourcesPossiblyModifiedLocals(addr, base, index)) { JITDUMP("No addressing mode:\n "); DISPNODE(addr); return false; } JITDUMP("Addressing mode:\n"); JITDUMP(" Base\n "); DISPNODE(base); if (index != nullptr) { JITDUMP(" + Index * %u + %d\n ", scale, offset); DISPNODE(index); } else { JITDUMP(" + %d\n", offset); } // Save the (potentially) unused operands before changing the address to LEA. ArrayStack<GenTree*> unusedStack(comp->getAllocator(CMK_ArrayStack)); unusedStack.Push(addr->AsOp()->gtGetOp1()); unusedStack.Push(addr->AsOp()->gtGetOp2()); addr->ChangeOper(GT_LEA); // Make sure there are no leftover side effects (though the existing ADD we're // changing shouldn't have any at this point, but sometimes it does). addr->gtFlags &= ~GTF_ALL_EFFECT; GenTreeAddrMode* addrMode = addr->AsAddrMode(); addrMode->SetBase(base); addrMode->SetIndex(index); addrMode->SetScale(scale); addrMode->SetOffset(static_cast<int>(offset)); // Neither the base nor the index should now be contained. if (base != nullptr) { base->ClearContained(); } if (index != nullptr) { index->ClearContained(); } // Remove all the nodes that are no longer used. while (!unusedStack.Empty()) { GenTree* unused = unusedStack.Pop(); // Use a loop to process some of the nodes iteratively // instead of pushing them on the stack. while ((unused != base) && (unused != index)) { JITDUMP("Removing unused node:\n "); DISPNODE(unused); BlockRange().Remove(unused); if (unused->OperIs(GT_ADD, GT_MUL, GT_LSH)) { // Push the first operand and loop back to process the second one. // This minimizes the stack depth because the second one tends to be // a constant so it gets processed and then the first one gets popped. unusedStack.Push(unused->AsOp()->gtGetOp1()); unused = unused->AsOp()->gtGetOp2(); } else { assert(unused->OperIs(GT_CNS_INT)); break; } } } #ifdef TARGET_ARM64 // Check if we can "contain" LEA(BFIZ) in order to extend 32bit index to 64bit as part of load/store. if ((index != nullptr) && index->OperIs(GT_BFIZ) && index->gtGetOp1()->OperIs(GT_CAST) && index->gtGetOp2()->IsCnsIntOrI() && (varTypeIsIntegral(targetType) || varTypeIsFloating(targetType))) { // BFIZ node is a binary op where op1 is GT_CAST and op2 is GT_CNS_INT GenTreeCast* cast = index->gtGetOp1()->AsCast(); assert(cast->isContained()); const unsigned shiftBy = (unsigned)index->gtGetOp2()->AsIntCon()->IconValue(); // 'scale' and 'offset' have to be unset since we're going to use [base + index * SXTW/UXTW scale] form // where there is no room for additional offsets/scales on ARM64. 'shiftBy' has to match target's width. if (cast->CastOp()->TypeIs(TYP_INT) && cast->TypeIs(TYP_LONG) && (genTypeSize(targetType) == (1U << shiftBy)) && (scale == 1) && (offset == 0)) { // TODO: Make sure that genCreateAddrMode marks such BFIZ candidates as GTF_DONT_CSE for better CQ. MakeSrcContained(addrMode, index); } } #endif JITDUMP("New addressing mode node:\n "); DISPNODE(addrMode); JITDUMP("\n"); return true; } //------------------------------------------------------------------------ // LowerAdd: turn this add into a GT_LEA if that would be profitable // // Arguments: // node - the node we care about // // Returns: // nullptr if no transformation was done, or the next node in the transformed node sequence that // needs to be lowered. // GenTree* Lowering::LowerAdd(GenTreeOp* node) { if (varTypeIsIntegralOrI(node->TypeGet())) { GenTree* op1 = node->gtGetOp1(); GenTree* op2 = node->gtGetOp2(); LIR::Use use; // It is not the best place to do such simple arithmetic optimizations, // but it allows us to avoid `LEA(addr, 0)` nodes and doing that in morph // requires more changes. Delete that part if we get an expression optimizer. if (op2->IsIntegralConst(0)) { JITDUMP("Lower: optimize val + 0: "); DISPNODE(node); JITDUMP("Replaced with: "); DISPNODE(op1); if (BlockRange().TryGetUse(node, &use)) { use.ReplaceWith(op1); } else { op1->SetUnusedValue(); } GenTree* next = node->gtNext; BlockRange().Remove(op2); BlockRange().Remove(node); JITDUMP("Remove [%06u], [%06u]\n", op2->gtTreeID, node->gtTreeID); return next; } #ifndef TARGET_ARMARCH if (BlockRange().TryGetUse(node, &use)) { // If this is a child of an indir, let the parent handle it. // If there is a chain of adds, only look at the topmost one. GenTree* parent = use.User(); if (!parent->OperIsIndir() && !parent->OperIs(GT_ADD)) { TryCreateAddrMode(node, false, parent); } } #endif // !TARGET_ARMARCH } if (node->OperIs(GT_ADD)) { ContainCheckBinary(node); } return nullptr; } //------------------------------------------------------------------------ // LowerUnsignedDivOrMod: Lowers a GT_UDIV/GT_UMOD node. // // Arguments: // divMod - pointer to the GT_UDIV/GT_UMOD node to be lowered // // Return Value: // Returns a boolean indicating whether the node was transformed. // // Notes: // - Transform UDIV/UMOD by power of 2 into RSZ/AND // - Transform UDIV by constant >= 2^(N-1) into GE // - Transform UDIV/UMOD by constant >= 3 into "magic division" // bool Lowering::LowerUnsignedDivOrMod(GenTreeOp* divMod) { assert(divMod->OperIs(GT_UDIV, GT_UMOD)); #if defined(USE_HELPERS_FOR_INT_DIV) if (!varTypeIsIntegral(divMod->TypeGet())) { assert(!"unreachable: integral GT_UDIV/GT_UMOD should get morphed into helper calls"); } assert(varTypeIsFloating(divMod->TypeGet())); #endif // USE_HELPERS_FOR_INT_DIV #if defined(TARGET_ARM64) assert(divMod->OperGet() != GT_UMOD); #endif // TARGET_ARM64 GenTree* dividend = divMod->gtGetOp1(); GenTree* divisor = divMod->gtGetOp2(); #if !defined(TARGET_64BIT) if (dividend->OperIs(GT_LONG)) { return false; } #endif if (!divisor->IsCnsIntOrI()) { return false; } if (dividend->IsCnsIntOrI()) { // We shouldn't see a divmod with constant operands here but if we do then it's likely // because optimizations are disabled or it's a case that's supposed to throw an exception. // Don't optimize this. return false; } const var_types type = divMod->TypeGet(); assert((type == TYP_INT) || (type == TYP_I_IMPL)); size_t divisorValue = static_cast<size_t>(divisor->AsIntCon()->IconValue()); if (type == TYP_INT) { // Clear up the upper 32 bits of the value, they may be set to 1 because constants // are treated as signed and stored in ssize_t which is 64 bit in size on 64 bit targets. divisorValue &= UINT32_MAX; } if (divisorValue == 0) { return false; } const bool isDiv = divMod->OperIs(GT_UDIV); if (isPow2(divisorValue)) { genTreeOps newOper; if (isDiv) { newOper = GT_RSZ; divisorValue = genLog2(divisorValue); } else { newOper = GT_AND; divisorValue -= 1; } divMod->SetOper(newOper); divisor->AsIntCon()->SetIconValue(divisorValue); ContainCheckNode(divMod); return true; } if (isDiv) { // If the divisor is greater or equal than 2^(N - 1) then the result is 1 // iff the dividend is greater or equal than the divisor. if (((type == TYP_INT) && (divisorValue > (UINT32_MAX / 2))) || ((type == TYP_LONG) && (divisorValue > (UINT64_MAX / 2)))) { divMod->SetOper(GT_GE); divMod->gtFlags |= GTF_UNSIGNED; ContainCheckNode(divMod); return true; } } // TODO-ARM-CQ: Currently there's no GT_MULHI for ARM32 #if defined(TARGET_XARCH) || defined(TARGET_ARM64) if (!comp->opts.MinOpts() && (divisorValue >= 3)) { size_t magic; bool increment; int preShift; int postShift; bool simpleMul = false; unsigned bits = type == TYP_INT ? 32 : 64; // if the dividend operand is AND or RSZ with a constant then the number of input bits can be reduced if (dividend->OperIs(GT_AND) && dividend->gtGetOp2()->IsCnsIntOrI()) { size_t maskCns = static_cast<size_t>(dividend->gtGetOp2()->AsIntCon()->IconValue()); if (maskCns != 0) { unsigned maskBits = 1; while (maskCns >>= 1) maskBits++; if (maskBits < bits) bits = maskBits; } } else if (dividend->OperIs(GT_RSZ) && dividend->gtGetOp2()->IsCnsIntOrI()) { size_t shiftCns = static_cast<size_t>(dividend->gtGetOp2()->AsIntCon()->IconValue()); if (shiftCns < bits) { bits -= static_cast<unsigned>(shiftCns); } } if (type == TYP_INT) { magic = MagicDivide::GetUnsigned32Magic(static_cast<uint32_t>(divisorValue), &increment, &preShift, &postShift, bits); #ifdef TARGET_64BIT // avoid inc_saturate/multiple shifts by widening to 32x64 MULHI if (increment || (preShift #ifdef TARGET_XARCH // IMUL reg,reg,imm32 can't be used if magic<0 because of sign-extension && static_cast<int32_t>(magic) < 0 #endif )) { magic = MagicDivide::GetUnsigned64Magic(static_cast<uint64_t>(divisorValue), &increment, &preShift, &postShift, bits); } // otherwise just widen to regular multiplication else { postShift += 32; simpleMul = true; } #endif } else { #ifdef TARGET_64BIT magic = MagicDivide::GetUnsigned64Magic(static_cast<uint64_t>(divisorValue), &increment, &preShift, &postShift, bits); #else unreached(); #endif } assert(divMod->MarkedDivideByConstOptimized()); const bool requiresDividendMultiuse = !isDiv; const weight_t curBBWeight = m_block->getBBWeight(comp); if (requiresDividendMultiuse) { LIR::Use dividendUse(BlockRange(), &divMod->gtOp1, divMod); dividend = ReplaceWithLclVar(dividendUse); } GenTree* firstNode = nullptr; GenTree* adjustedDividend = dividend; #ifdef TARGET_ARM64 // On ARM64 we will use a 32x32->64 bit multiply instead of a 64x64->64 one. bool widenToNativeIntForMul = (type != TYP_I_IMPL) && !simpleMul; #else CLANG_FORMAT_COMMENT_ANCHOR; bool widenToNativeIntForMul = (type != TYP_I_IMPL); #endif // If "increment" flag is returned by GetUnsignedMagic we need to do Saturating Increment first if (increment) { adjustedDividend = comp->gtNewOperNode(GT_INC_SATURATE, type, adjustedDividend); BlockRange().InsertBefore(divMod, adjustedDividend); firstNode = adjustedDividend; assert(!preShift); } // if "preShift" is required, then do a right shift before else if (preShift) { GenTree* preShiftBy = comp->gtNewIconNode(preShift, TYP_INT); adjustedDividend = comp->gtNewOperNode(GT_RSZ, type, adjustedDividend, preShiftBy); BlockRange().InsertBefore(divMod, preShiftBy, adjustedDividend); firstNode = preShiftBy; } else if (widenToNativeIntForMul) { adjustedDividend = comp->gtNewCastNode(TYP_I_IMPL, adjustedDividend, true, TYP_I_IMPL); BlockRange().InsertBefore(divMod, adjustedDividend); firstNode = adjustedDividend; } #ifdef TARGET_XARCH // force input transformation to RAX because the following MULHI will kill RDX:RAX anyway and LSRA often causes // reduntant copies otherwise if (firstNode && !simpleMul) { adjustedDividend->SetRegNum(REG_RAX); } #endif if (widenToNativeIntForMul) { divisor->gtType = TYP_I_IMPL; } divisor->AsIntCon()->SetIconValue(magic); if (isDiv && !postShift && (type == TYP_I_IMPL)) { divMod->SetOper(GT_MULHI); divMod->gtOp1 = adjustedDividend; divMod->SetUnsigned(); } else { #ifdef TARGET_ARM64 // 64-bit MUL is more expensive than UMULL on ARM64. genTreeOps mulOper = simpleMul ? GT_MUL_LONG : GT_MULHI; #else // 64-bit IMUL is less expensive than MUL eax:edx on x64. genTreeOps mulOper = simpleMul ? GT_MUL : GT_MULHI; #endif // Insert a new multiplication node before the existing GT_UDIV/GT_UMOD node. // The existing node will later be transformed into a GT_RSZ/GT_SUB that // computes the final result. This way don't need to find and change the use // of the existing node. GenTree* mulhi = comp->gtNewOperNode(mulOper, TYP_I_IMPL, adjustedDividend, divisor); mulhi->SetUnsigned(); BlockRange().InsertBefore(divMod, mulhi); if (firstNode == nullptr) { firstNode = mulhi; } if (postShift) { GenTree* shiftBy = comp->gtNewIconNode(postShift, TYP_INT); BlockRange().InsertBefore(divMod, shiftBy); if (isDiv && (type == TYP_I_IMPL)) { divMod->SetOper(GT_RSZ); divMod->gtOp1 = mulhi; divMod->gtOp2 = shiftBy; } else { mulhi = comp->gtNewOperNode(GT_RSZ, TYP_I_IMPL, mulhi, shiftBy); BlockRange().InsertBefore(divMod, mulhi); } } if (!isDiv) { // divisor UMOD dividend = dividend SUB (div MUL divisor) GenTree* divisor = comp->gtNewIconNode(divisorValue, type); GenTree* mul = comp->gtNewOperNode(GT_MUL, type, mulhi, divisor); dividend = comp->gtNewLclvNode(dividend->AsLclVar()->GetLclNum(), dividend->TypeGet()); divMod->SetOper(GT_SUB); divMod->gtOp1 = dividend; divMod->gtOp2 = mul; BlockRange().InsertBefore(divMod, divisor, mul, dividend); } else if (type != TYP_I_IMPL) { #ifdef TARGET_ARMARCH divMod->SetOper(GT_CAST); divMod->SetUnsigned(); divMod->AsCast()->gtCastType = TYP_INT; #else divMod->SetOper(GT_BITCAST); #endif divMod->gtOp1 = mulhi; divMod->gtOp2 = nullptr; } } if (firstNode != nullptr) { ContainCheckRange(firstNode, divMod); } return true; } #endif return false; } // LowerConstIntDivOrMod: Transform integer GT_DIV/GT_MOD nodes with a power of 2 // const divisor into equivalent but faster sequences. // // Arguments: // node - pointer to the DIV or MOD node // // Returns: // nullptr if no transformation is done, or the next node in the transformed node sequence that // needs to be lowered. // GenTree* Lowering::LowerConstIntDivOrMod(GenTree* node) { assert((node->OperGet() == GT_DIV) || (node->OperGet() == GT_MOD)); GenTree* divMod = node; GenTree* dividend = divMod->gtGetOp1(); GenTree* divisor = divMod->gtGetOp2(); const var_types type = divMod->TypeGet(); assert((type == TYP_INT) || (type == TYP_LONG)); #if defined(USE_HELPERS_FOR_INT_DIV) assert(!"unreachable: integral GT_DIV/GT_MOD should get morphed into helper calls"); #endif // USE_HELPERS_FOR_INT_DIV #if defined(TARGET_ARM64) assert(node->OperGet() != GT_MOD); #endif // TARGET_ARM64 if (!divisor->IsCnsIntOrI()) { return nullptr; // no transformations to make } if (dividend->IsCnsIntOrI()) { // We shouldn't see a divmod with constant operands here but if we do then it's likely // because optimizations are disabled or it's a case that's supposed to throw an exception. // Don't optimize this. return nullptr; } ssize_t divisorValue = divisor->AsIntCon()->IconValue(); if (divisorValue == -1 || divisorValue == 0) { // x / 0 and x % 0 can't be optimized because they are required to throw an exception. // x / -1 can't be optimized because INT_MIN / -1 is required to throw an exception. // x % -1 is always 0 and the IL spec says that the rem instruction "can" throw an exception if x is // the minimum representable integer. However, the C# spec says that an exception "is" thrown in this // case so optimizing this case would break C# code. // A runtime check could be used to handle this case but it's probably too rare to matter. return nullptr; } bool isDiv = divMod->OperGet() == GT_DIV; if (isDiv) { if ((type == TYP_INT && divisorValue == INT_MIN) || (type == TYP_LONG && divisorValue == INT64_MIN)) { // If the divisor is the minimum representable integer value then we can use a compare, // the result is 1 iff the dividend equals divisor. divMod->SetOper(GT_EQ); return node; } } size_t absDivisorValue = (divisorValue == SSIZE_T_MIN) ? static_cast<size_t>(divisorValue) : static_cast<size_t>(abs(divisorValue)); if (!isPow2(absDivisorValue)) { if (comp->opts.MinOpts()) { return nullptr; } #if defined(TARGET_XARCH) || defined(TARGET_ARM64) ssize_t magic; int shift; if (type == TYP_INT) { magic = MagicDivide::GetSigned32Magic(static_cast<int32_t>(divisorValue), &shift); } else { #ifdef TARGET_64BIT magic = MagicDivide::GetSigned64Magic(static_cast<int64_t>(divisorValue), &shift); #else // !TARGET_64BIT unreached(); #endif // !TARGET_64BIT } divisor->AsIntConCommon()->SetIconValue(magic); // Insert a new GT_MULHI node in front of the existing GT_DIV/GT_MOD node. // The existing node will later be transformed into a GT_ADD/GT_SUB that // computes the final result. This way don't need to find and change the // use of the existing node. GenTree* mulhi = comp->gtNewOperNode(GT_MULHI, type, divisor, dividend); BlockRange().InsertBefore(divMod, mulhi); // mulhi was the easy part. Now we need to generate different code depending // on the divisor value: // For 3 we need: // div = signbit(mulhi) + mulhi // For 5 we need: // div = signbit(mulhi) + sar(mulhi, 1) ; requires shift adjust // For 7 we need: // mulhi += dividend ; requires add adjust // div = signbit(mulhi) + sar(mulhi, 2) ; requires shift adjust // For -3 we need: // mulhi -= dividend ; requires sub adjust // div = signbit(mulhi) + sar(mulhi, 1) ; requires shift adjust bool requiresAddSubAdjust = signum(divisorValue) != signum(magic); bool requiresShiftAdjust = shift != 0; bool requiresDividendMultiuse = requiresAddSubAdjust || !isDiv; if (requiresDividendMultiuse) { LIR::Use dividendUse(BlockRange(), &mulhi->AsOp()->gtOp2, mulhi); dividend = ReplaceWithLclVar(dividendUse); } GenTree* adjusted; if (requiresAddSubAdjust) { dividend = comp->gtNewLclvNode(dividend->AsLclVar()->GetLclNum(), dividend->TypeGet()); adjusted = comp->gtNewOperNode(divisorValue > 0 ? GT_ADD : GT_SUB, type, mulhi, dividend); BlockRange().InsertBefore(divMod, dividend, adjusted); } else { adjusted = mulhi; } GenTree* shiftBy = comp->gtNewIconNode(genTypeSize(type) * 8 - 1, type); GenTree* signBit = comp->gtNewOperNode(GT_RSZ, type, adjusted, shiftBy); BlockRange().InsertBefore(divMod, shiftBy, signBit); LIR::Use adjustedUse(BlockRange(), &signBit->AsOp()->gtOp1, signBit); adjusted = ReplaceWithLclVar(adjustedUse); adjusted = comp->gtNewLclvNode(adjusted->AsLclVar()->GetLclNum(), adjusted->TypeGet()); BlockRange().InsertBefore(divMod, adjusted); if (requiresShiftAdjust) { shiftBy = comp->gtNewIconNode(shift, TYP_INT); adjusted = comp->gtNewOperNode(GT_RSH, type, adjusted, shiftBy); BlockRange().InsertBefore(divMod, shiftBy, adjusted); } if (isDiv) { divMod->SetOperRaw(GT_ADD); divMod->AsOp()->gtOp1 = adjusted; divMod->AsOp()->gtOp2 = signBit; } else { GenTree* div = comp->gtNewOperNode(GT_ADD, type, adjusted, signBit); dividend = comp->gtNewLclvNode(dividend->AsLclVar()->GetLclNum(), dividend->TypeGet()); // divisor % dividend = dividend - divisor x div GenTree* divisor = comp->gtNewIconNode(divisorValue, type); GenTree* mul = comp->gtNewOperNode(GT_MUL, type, div, divisor); BlockRange().InsertBefore(divMod, dividend, div, divisor, mul); divMod->SetOperRaw(GT_SUB); divMod->AsOp()->gtOp1 = dividend; divMod->AsOp()->gtOp2 = mul; } return mulhi; #elif defined(TARGET_ARM) // Currently there's no GT_MULHI for ARM32 return nullptr; #else #error Unsupported or unset target architecture #endif } // We're committed to the conversion now. Go find the use if any. LIR::Use use; if (!BlockRange().TryGetUse(node, &use)) { return nullptr; } // We need to use the dividend node multiple times so its value needs to be // computed once and stored in a temp variable. LIR::Use opDividend(BlockRange(), &divMod->AsOp()->gtOp1, divMod); dividend = ReplaceWithLclVar(opDividend); GenTree* adjustment = comp->gtNewOperNode(GT_RSH, type, dividend, comp->gtNewIconNode(type == TYP_INT ? 31 : 63)); if (absDivisorValue == 2) { // If the divisor is +/-2 then we'd end up with a bitwise and between 0/-1 and 1. // We can get the same result by using GT_RSZ instead of GT_RSH. adjustment->SetOper(GT_RSZ); } else { adjustment = comp->gtNewOperNode(GT_AND, type, adjustment, comp->gtNewIconNode(absDivisorValue - 1, type)); } GenTree* adjustedDividend = comp->gtNewOperNode(GT_ADD, type, adjustment, comp->gtNewLclvNode(dividend->AsLclVar()->GetLclNum(), dividend->TypeGet())); GenTree* newDivMod; if (isDiv) { // perform the division by right shifting the adjusted dividend divisor->AsIntCon()->SetIconValue(genLog2(absDivisorValue)); newDivMod = comp->gtNewOperNode(GT_RSH, type, adjustedDividend, divisor); ContainCheckShiftRotate(newDivMod->AsOp()); if (divisorValue < 0) { // negate the result if the divisor is negative newDivMod = comp->gtNewOperNode(GT_NEG, type, newDivMod); ContainCheckNode(newDivMod); } } else { // divisor % dividend = dividend - divisor x (dividend / divisor) // divisor x (dividend / divisor) translates to (dividend >> log2(divisor)) << log2(divisor) // which simply discards the low log2(divisor) bits, that's just dividend & ~(divisor - 1) divisor->AsIntCon()->SetIconValue(~(absDivisorValue - 1)); newDivMod = comp->gtNewOperNode(GT_SUB, type, comp->gtNewLclvNode(dividend->AsLclVar()->GetLclNum(), dividend->TypeGet()), comp->gtNewOperNode(GT_AND, type, adjustedDividend, divisor)); } // Remove the divisor and dividend nodes from the linear order, // since we have reused them and will resequence the tree BlockRange().Remove(divisor); BlockRange().Remove(dividend); // linearize and insert the new tree before the original divMod node InsertTreeBeforeAndContainCheck(divMod, newDivMod); BlockRange().Remove(divMod); // replace the original divmod node with the new divmod tree use.ReplaceWith(newDivMod); return newDivMod->gtNext; } //------------------------------------------------------------------------ // LowerSignedDivOrMod: transform integer GT_DIV/GT_MOD nodes with a power of 2 // const divisor into equivalent but faster sequences. // // Arguments: // node - the DIV or MOD node // // Returns: // The next node to lower. // GenTree* Lowering::LowerSignedDivOrMod(GenTree* node) { assert((node->OperGet() == GT_DIV) || (node->OperGet() == GT_MOD)); GenTree* next = node->gtNext; if (varTypeIsIntegral(node->TypeGet())) { // LowerConstIntDivOrMod will return nullptr if it doesn't transform the node. GenTree* newNode = LowerConstIntDivOrMod(node); if (newNode != nullptr) { return newNode; } } ContainCheckDivOrMod(node->AsOp()); return next; } //------------------------------------------------------------------------ // LowerShift: Lower shift nodes // // Arguments: // shift - the shift node (GT_LSH, GT_RSH or GT_RSZ) // // Notes: // Remove unnecessary shift count masking, xarch shift instructions // mask the shift count to 5 bits (or 6 bits for 64 bit operations). void Lowering::LowerShift(GenTreeOp* shift) { assert(shift->OperIs(GT_LSH, GT_RSH, GT_RSZ)); size_t mask = 0x1f; #ifdef TARGET_64BIT if (varTypeIsLong(shift->TypeGet())) { mask = 0x3f; } #else assert(!varTypeIsLong(shift->TypeGet())); #endif for (GenTree* andOp = shift->gtGetOp2(); andOp->OperIs(GT_AND); andOp = andOp->gtGetOp1()) { GenTree* maskOp = andOp->gtGetOp2(); if (!maskOp->IsCnsIntOrI()) { break; } if ((static_cast<size_t>(maskOp->AsIntCon()->IconValue()) & mask) != mask) { break; } shift->gtOp2 = andOp->gtGetOp1(); BlockRange().Remove(andOp); BlockRange().Remove(maskOp); // The parent was replaced, clear contain and regOpt flag. shift->gtOp2->ClearContained(); } ContainCheckShiftRotate(shift); #ifdef TARGET_ARM64 // Try to recognize ubfiz/sbfiz idiom in LSH(CAST(X), CNS) tree if (comp->opts.OptimizationEnabled() && shift->OperIs(GT_LSH) && shift->gtGetOp1()->OperIs(GT_CAST) && shift->gtGetOp2()->IsCnsIntOrI() && !shift->isContained()) { GenTreeIntCon* cns = shift->gtGetOp2()->AsIntCon(); GenTreeCast* cast = shift->gtGetOp1()->AsCast(); if (!cast->isContained() && !cast->IsRegOptional() && !cast->gtOverflow() && // Smaller CastOp is most likely an IND(X) node which is lowered to a zero-extend load cast->CastOp()->TypeIs(TYP_LONG, TYP_INT)) { // Cast is either "TYP_LONG <- TYP_INT" or "TYP_INT <- %SMALL_INT% <- TYP_INT" (signed or unsigned) unsigned dstBits = genTypeSize(cast) * BITS_PER_BYTE; unsigned srcBits = varTypeIsSmall(cast->CastToType()) ? genTypeSize(cast->CastToType()) * BITS_PER_BYTE : genTypeSize(cast->CastOp()) * BITS_PER_BYTE; assert(!cast->CastOp()->isContained()); // It has to be an upcast and CNS must be in [1..srcBits) range if ((srcBits < dstBits) && (cns->IconValue() > 0) && (cns->IconValue() < srcBits)) { JITDUMP("Recognized ubfix/sbfix pattern in LSH(CAST, CNS). Changing op to GT_BFIZ"); shift->ChangeOper(GT_BFIZ); MakeSrcContained(shift, cast); } } } #endif } void Lowering::WidenSIMD12IfNecessary(GenTreeLclVarCommon* node) { #ifdef FEATURE_SIMD if (node->TypeGet() == TYP_SIMD12) { // Assumption 1: // RyuJit backend depends on the assumption that on 64-Bit targets Vector3 size is rounded off // to TARGET_POINTER_SIZE and hence Vector3 locals on stack can be treated as TYP_SIMD16 for // reading and writing purposes. // // Assumption 2: // RyuJit backend is making another implicit assumption that Vector3 type args when passed in // registers or on stack, the upper most 4-bytes will be zero. // // For P/Invoke return and Reverse P/Invoke argument passing, native compiler doesn't guarantee // that upper 4-bytes of a Vector3 type struct is zero initialized and hence assumption 2 is // invalid. // // RyuJIT x64 Windows: arguments are treated as passed by ref and hence read/written just 12 // bytes. In case of Vector3 returns, Caller allocates a zero initialized Vector3 local and // passes it retBuf arg and Callee method writes only 12 bytes to retBuf. For this reason, // there is no need to clear upper 4-bytes of Vector3 type args. // // RyuJIT x64 Unix: arguments are treated as passed by value and read/writen as if TYP_SIMD16. // Vector3 return values are returned two return registers and Caller assembles them into a // single xmm reg. Hence RyuJIT explicitly generates code to clears upper 4-bytes of Vector3 // type args in prolog and Vector3 type return value of a call // // RyuJIT x86 Windows: all non-param Vector3 local vars are allocated as 16 bytes. Vector3 arguments // are pushed as 12 bytes. For return values, a 16-byte local is allocated and the address passed // as a return buffer pointer. The callee doesn't write the high 4 bytes, and we don't need to clear // it either. LclVarDsc* varDsc = comp->lvaGetDesc(node->AsLclVarCommon()); if (comp->lvaMapSimd12ToSimd16(varDsc)) { JITDUMP("Mapping TYP_SIMD12 lclvar node to TYP_SIMD16:\n"); DISPNODE(node); JITDUMP("============"); node->gtType = TYP_SIMD16; } } #endif // FEATURE_SIMD } //------------------------------------------------------------------------ // LowerArrElem: Lower a GT_ARR_ELEM node // // Arguments: // node - the GT_ARR_ELEM node to lower. // // Return Value: // The next node to lower. // // Assumptions: // pTree points to a pointer to a GT_ARR_ELEM node. // // Notes: // This performs the following lowering. We start with a node of the form: // /--* <arrObj> // +--* <index0> // +--* <index1> // /--* arrMD&[,] // // First, we create temps for arrObj if it is not already a lclVar, and for any of the index // expressions that have side-effects. // We then transform the tree into: // <offset is null - no accumulated offset for the first index> // /--* <arrObj> // +--* <index0> // /--* ArrIndex[i, ] // +--* <arrObj> // /--| arrOffs[i, ] // | +--* <arrObj> // | +--* <index1> // +--* ArrIndex[*,j] // +--* <arrObj> // /--| arrOffs[*,j] // +--* lclVar NewTemp // /--* lea (scale = element size, offset = offset of first element) // // The new stmtExpr may be omitted if the <arrObj> is a lclVar. // The new stmtExpr may be embedded if the <arrObj> is not the first tree in linear order for // the statement containing the original arrMD. // Note that the arrMDOffs is the INDEX of the lea, but is evaluated before the BASE (which is the second // reference to NewTemp), because that provides more accurate lifetimes. // There may be 1, 2 or 3 dimensions, with 1, 2 or 3 arrMDIdx nodes, respectively. // GenTree* Lowering::LowerArrElem(GenTree* node) { // This will assert if we don't have an ArrElem node GenTreeArrElem* arrElem = node->AsArrElem(); const unsigned char rank = arrElem->gtArrRank; JITDUMP("Lowering ArrElem\n"); JITDUMP("============\n"); DISPTREERANGE(BlockRange(), arrElem); JITDUMP("\n"); assert(arrElem->gtArrObj->TypeGet() == TYP_REF); // We need to have the array object in a lclVar. if (!arrElem->gtArrObj->IsLocal()) { LIR::Use arrObjUse(BlockRange(), &arrElem->gtArrObj, arrElem); ReplaceWithLclVar(arrObjUse); } GenTree* arrObjNode = arrElem->gtArrObj; assert(arrObjNode->IsLocal()); GenTree* insertionPoint = arrElem; // The first ArrOffs node will have 0 for the offset of the previous dimension. GenTree* prevArrOffs = new (comp, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, 0); BlockRange().InsertBefore(insertionPoint, prevArrOffs); GenTree* nextToLower = prevArrOffs; for (unsigned char dim = 0; dim < rank; dim++) { GenTree* indexNode = arrElem->gtArrInds[dim]; // Use the original arrObjNode on the 0th ArrIndex node, and clone it for subsequent ones. GenTree* idxArrObjNode; if (dim == 0) { idxArrObjNode = arrObjNode; } else { idxArrObjNode = comp->gtClone(arrObjNode); BlockRange().InsertBefore(insertionPoint, idxArrObjNode); } // Next comes the GT_ARR_INDEX node. GenTreeArrIndex* arrMDIdx = new (comp, GT_ARR_INDEX) GenTreeArrIndex(TYP_INT, idxArrObjNode, indexNode, dim, rank, arrElem->gtArrElemType); arrMDIdx->gtFlags |= ((idxArrObjNode->gtFlags | indexNode->gtFlags) & GTF_ALL_EFFECT); BlockRange().InsertBefore(insertionPoint, arrMDIdx); GenTree* offsArrObjNode = comp->gtClone(arrObjNode); BlockRange().InsertBefore(insertionPoint, offsArrObjNode); GenTreeArrOffs* arrOffs = new (comp, GT_ARR_OFFSET) GenTreeArrOffs(TYP_I_IMPL, prevArrOffs, arrMDIdx, offsArrObjNode, dim, rank, arrElem->gtArrElemType); arrOffs->gtFlags |= ((prevArrOffs->gtFlags | arrMDIdx->gtFlags | offsArrObjNode->gtFlags) & GTF_ALL_EFFECT); BlockRange().InsertBefore(insertionPoint, arrOffs); prevArrOffs = arrOffs; } // Generate the LEA and make it reverse evaluation, because we want to evaluate the index expression before the // base. unsigned scale = arrElem->gtArrElemSize; unsigned offset = comp->eeGetMDArrayDataOffset(arrElem->gtArrRank); GenTree* leaIndexNode = prevArrOffs; if (!jitIsScaleIndexMul(scale)) { // We do the address arithmetic in TYP_I_IMPL, though note that the lower bounds and lengths in memory are // TYP_INT GenTree* scaleNode = new (comp, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, scale); GenTree* mulNode = new (comp, GT_MUL) GenTreeOp(GT_MUL, TYP_I_IMPL, leaIndexNode, scaleNode); BlockRange().InsertBefore(insertionPoint, scaleNode, mulNode); leaIndexNode = mulNode; scale = 1; } GenTree* leaBase = comp->gtClone(arrObjNode); BlockRange().InsertBefore(insertionPoint, leaBase); GenTree* leaNode = new (comp, GT_LEA) GenTreeAddrMode(arrElem->TypeGet(), leaBase, leaIndexNode, scale, offset); BlockRange().InsertBefore(insertionPoint, leaNode); LIR::Use arrElemUse; if (BlockRange().TryGetUse(arrElem, &arrElemUse)) { arrElemUse.ReplaceWith(leaNode); } else { leaNode->SetUnusedValue(); } BlockRange().Remove(arrElem); JITDUMP("Results of lowering ArrElem:\n"); DISPTREERANGE(BlockRange(), leaNode); JITDUMP("\n\n"); return nextToLower; } PhaseStatus Lowering::DoPhase() { // If we have any PInvoke calls, insert the one-time prolog code. We'll insert the epilog code in the // appropriate spots later. NOTE: there is a minor optimization opportunity here, as we still create p/invoke // data structures and setup/teardown even if we've eliminated all p/invoke calls due to dead code elimination. if (comp->compMethodRequiresPInvokeFrame()) { InsertPInvokeMethodProlog(); } #if !defined(TARGET_64BIT) DecomposeLongs decomp(comp); // Initialize the long decomposition class. if (comp->compLongUsed) { decomp.PrepareForDecomposition(); } #endif // !defined(TARGET_64BIT) if (!comp->compEnregLocals()) { // Lowering is checking if lvDoNotEnregister is already set for contained optimizations. // If we are running without `CLFLG_REGVAR` flag set (`compEnregLocals() == false`) // then we already know that we won't enregister any locals and it is better to set // `lvDoNotEnregister` flag before we start reading it. // The main reason why this flag is not set is that we are running in minOpts. comp->lvSetMinOptsDoNotEnreg(); } for (BasicBlock* const block : comp->Blocks()) { /* Make the block publicly available */ comp->compCurBB = block; #if !defined(TARGET_64BIT) if (comp->compLongUsed) { decomp.DecomposeBlock(block); } #endif //! TARGET_64BIT LowerBlock(block); } #ifdef DEBUG JITDUMP("Lower has completed modifying nodes.\n"); if (VERBOSE) { comp->fgDispBasicBlocks(true); } #endif // Recompute local var ref counts before potentially sorting for liveness. // Note this does minimal work in cases where we are not going to sort. const bool isRecompute = true; const bool setSlotNumbers = false; comp->lvaComputeRefCounts(isRecompute, setSlotNumbers); comp->fgLocalVarLiveness(); // local var liveness can delete code, which may create empty blocks if (comp->opts.OptimizationEnabled()) { comp->optLoopsMarked = false; bool modified = comp->fgUpdateFlowGraph(); if (modified) { JITDUMP("had to run another liveness pass:\n"); comp->fgLocalVarLiveness(); } } // Recompute local var ref counts again after liveness to reflect // impact of any dead code removal. Note this may leave us with // tracked vars that have zero refs. comp->lvaComputeRefCounts(isRecompute, setSlotNumbers); return PhaseStatus::MODIFIED_EVERYTHING; } #ifdef DEBUG //------------------------------------------------------------------------ // Lowering::CheckCallArg: check that a call argument is in an expected // form after lowering. // // Arguments: // arg - the argument to check. // void Lowering::CheckCallArg(GenTree* arg) { if (!arg->IsValue() && !arg->OperIsPutArgStk()) { assert((arg->OperIsStore() && !arg->IsValue()) || arg->IsArgPlaceHolderNode() || arg->IsNothingNode() || arg->OperIsCopyBlkOp()); return; } switch (arg->OperGet()) { case GT_FIELD_LIST: { GenTreeFieldList* list = arg->AsFieldList(); assert(list->isContained()); for (GenTreeFieldList::Use& use : list->Uses()) { assert(use.GetNode()->OperIsPutArg()); } } break; default: assert(arg->OperIsPutArg()); break; } } //------------------------------------------------------------------------ // Lowering::CheckCall: check that a call is in an expected form after // lowering. Currently this amounts to checking its // arguments, but could be expanded to verify more // properties in the future. // // Arguments: // call - the call to check. // void Lowering::CheckCall(GenTreeCall* call) { if (call->gtCallThisArg != nullptr) { CheckCallArg(call->gtCallThisArg->GetNode()); } for (GenTreeCall::Use& use : call->Args()) { CheckCallArg(use.GetNode()); } for (GenTreeCall::Use& use : call->LateArgs()) { CheckCallArg(use.GetNode()); } } //------------------------------------------------------------------------ // Lowering::CheckNode: check that an LIR node is in an expected form // after lowering. // // Arguments: // compiler - the compiler context. // node - the node to check. // void Lowering::CheckNode(Compiler* compiler, GenTree* node) { switch (node->OperGet()) { case GT_CALL: CheckCall(node->AsCall()); break; #ifdef FEATURE_SIMD case GT_SIMD: case GT_HWINTRINSIC: assert(node->TypeGet() != TYP_SIMD12); break; #endif // FEATURE_SIMD case GT_LCL_VAR: case GT_STORE_LCL_VAR: { const LclVarDsc* varDsc = compiler->lvaGetDesc(node->AsLclVar()); #if defined(FEATURE_SIMD) && defined(TARGET_64BIT) if (node->TypeIs(TYP_SIMD12)) { assert(compiler->lvaIsFieldOfDependentlyPromotedStruct(varDsc) || (varDsc->lvSize() == 12)); } #endif // FEATURE_SIMD && TARGET_64BIT if (varDsc->lvPromoted) { assert(varDsc->lvDoNotEnregister || varDsc->lvIsMultiRegRet); } } break; case GT_LCL_VAR_ADDR: case GT_LCL_FLD_ADDR: { const GenTreeLclVarCommon* lclVarAddr = node->AsLclVarCommon(); const LclVarDsc* varDsc = compiler->lvaGetDesc(lclVarAddr); if (((lclVarAddr->gtFlags & GTF_VAR_DEF) != 0) && varDsc->HasGCPtr()) { // Emitter does not correctly handle live updates for LCL_VAR_ADDR // when they are not contained, for example, `STOREIND byref(GT_LCL_VAR_ADDR not-contained)` // would generate: // add r1, sp, 48 // r1 contains address of a lclVar V01. // str r0, [r1] // a gc ref becomes live in V01, but emitter would not report it. // Make sure that we use uncontained address nodes only for variables // that will be marked as mustInit and will be alive throughout the whole block even when tracked. assert(lclVarAddr->isContained() || !varDsc->lvTracked || varTypeIsStruct(varDsc)); // TODO: support this assert for uses, see https://github.com/dotnet/runtime/issues/51900. } assert(varDsc->lvDoNotEnregister); break; } case GT_PHI: case GT_PHI_ARG: assert(!"Should not see phi nodes after rationalize"); break; case GT_LCL_FLD: case GT_STORE_LCL_FLD: { const LclVarDsc* varDsc = compiler->lvaGetDesc(node->AsLclFld()); assert(varDsc->lvDoNotEnregister); } break; default: break; } } //------------------------------------------------------------------------ // Lowering::CheckBlock: check that the contents of an LIR block are in an // expected form after lowering. // // Arguments: // compiler - the compiler context. // block - the block to check. // bool Lowering::CheckBlock(Compiler* compiler, BasicBlock* block) { assert(block->isEmpty() || block->IsLIR()); LIR::Range& blockRange = LIR::AsRange(block); for (GenTree* node : blockRange) { CheckNode(compiler, node); } assert(blockRange.CheckLIR(compiler, true)); return true; } #endif //------------------------------------------------------------------------ // Lowering::LowerBlock: Lower all the nodes in a BasicBlock // // Arguments: // block - the block to lower. // void Lowering::LowerBlock(BasicBlock* block) { assert(block == comp->compCurBB); // compCurBB must already be set. assert(block->isEmpty() || block->IsLIR()); m_block = block; // NOTE: some of the lowering methods insert calls before the node being // lowered (See e.g. InsertPInvoke{Method,Call}{Prolog,Epilog}). In // general, any code that is inserted before the current node should be // "pre-lowered" as they won't be subject to further processing. // Lowering::CheckBlock() runs some extra checks on call arguments in // order to help catch unlowered nodes. GenTree* node = BlockRange().FirstNode(); while (node != nullptr) { node = LowerNode(node); } assert(CheckBlock(comp, block)); } /** Verifies if both of these trees represent the same indirection. * Used by Lower to annotate if CodeGen generate an instruction of the * form *addrMode BinOp= expr * * Preconditions: both trees are children of GT_INDs and their underlying children * have the same gtOper. * * This is a first iteration to actually recognize trees that can be code-generated * as a single read-modify-write instruction on AMD64/x86. For now * this method only supports the recognition of simple addressing modes (through GT_LEA) * or local var indirections. Local fields, array access and other more complex nodes are * not yet supported. * * TODO-CQ: Perform tree recognition by using the Value Numbering Package, that way we can recognize * arbitrary complex trees and support much more addressing patterns. */ bool Lowering::IndirsAreEquivalent(GenTree* candidate, GenTree* storeInd) { assert(candidate->OperGet() == GT_IND); assert(storeInd->OperGet() == GT_STOREIND); // We should check the size of the indirections. If they are // different, say because of a cast, then we can't call them equivalent. Doing so could cause us // to drop a cast. // Signed-ness difference is okay and expected since a store indirection must always // be signed based on the CIL spec, but a load could be unsigned. if (genTypeSize(candidate->gtType) != genTypeSize(storeInd->gtType)) { return false; } GenTree* pTreeA = candidate->gtGetOp1(); GenTree* pTreeB = storeInd->gtGetOp1(); // This method will be called by codegen (as well as during lowering). // After register allocation, the sources may have been spilled and reloaded // to a different register, indicated by an inserted GT_RELOAD node. pTreeA = pTreeA->gtSkipReloadOrCopy(); pTreeB = pTreeB->gtSkipReloadOrCopy(); genTreeOps oper; if (pTreeA->OperGet() != pTreeB->OperGet()) { return false; } oper = pTreeA->OperGet(); switch (oper) { case GT_LCL_VAR: case GT_LCL_VAR_ADDR: case GT_CLS_VAR_ADDR: case GT_CNS_INT: return NodesAreEquivalentLeaves(pTreeA, pTreeB); case GT_LEA: { GenTreeAddrMode* gtAddr1 = pTreeA->AsAddrMode(); GenTreeAddrMode* gtAddr2 = pTreeB->AsAddrMode(); return NodesAreEquivalentLeaves(gtAddr1->Base(), gtAddr2->Base()) && NodesAreEquivalentLeaves(gtAddr1->Index(), gtAddr2->Index()) && (gtAddr1->gtScale == gtAddr2->gtScale) && (gtAddr1->Offset() == gtAddr2->Offset()); } default: // We don't handle anything that is not either a constant, // a local var or LEA. return false; } } //------------------------------------------------------------------------ // NodesAreEquivalentLeaves: Check whether the two given nodes are the same leaves. // // Arguments: // tree1 and tree2 are nodes to be checked. // Return Value: // Returns true if they are same leaves, false otherwise. // // static bool Lowering::NodesAreEquivalentLeaves(GenTree* tree1, GenTree* tree2) { if (tree1 == tree2) { return true; } if (tree1 == nullptr || tree2 == nullptr) { return false; } tree1 = tree1->gtSkipReloadOrCopy(); tree2 = tree2->gtSkipReloadOrCopy(); if (tree1->TypeGet() != tree2->TypeGet()) { return false; } if (tree1->OperGet() != tree2->OperGet()) { return false; } if (!tree1->OperIsLeaf() || !tree2->OperIsLeaf()) { return false; } switch (tree1->OperGet()) { case GT_CNS_INT: return tree1->AsIntCon()->IconValue() == tree2->AsIntCon()->IconValue() && tree1->IsIconHandle() == tree2->IsIconHandle(); case GT_LCL_VAR: case GT_LCL_VAR_ADDR: return tree1->AsLclVarCommon()->GetLclNum() == tree2->AsLclVarCommon()->GetLclNum(); case GT_CLS_VAR_ADDR: return tree1->AsClsVar()->gtClsVarHnd == tree2->AsClsVar()->gtClsVarHnd; default: return false; } } //------------------------------------------------------------------------ // Lowering::CheckMultiRegLclVar: Check whether a MultiReg GT_LCL_VAR node can // remain a multi-reg. // // Arguments: // lclNode - the GT_LCL_VAR or GT_STORE_LCL_VAR node. // retTypeDesc - a return type descriptor either for a call source of a store of // the local, or for the GT_RETURN consumer of the local. // // Notes: // If retTypeDesc is non-null, this method will check that the fields are compatible. // Otherwise, it will only check that the lclVar is independently promoted // (i.e. it is marked lvPromoted and not lvDoNotEnregister). // bool Lowering::CheckMultiRegLclVar(GenTreeLclVar* lclNode, const ReturnTypeDesc* retTypeDesc) { bool canEnregister = false; #if FEATURE_MULTIREG_RET LclVarDsc* varDsc = comp->lvaGetDesc(lclNode->GetLclNum()); if ((comp->lvaEnregMultiRegVars) && varDsc->lvPromoted) { // We can enregister if we have a promoted struct and all the fields' types match the ABI requirements. // Note that we don't promote structs with explicit layout, so we don't need to check field offsets, and // if we have multiple types packed into a single register, we won't have matching reg and field counts, // so we can tolerate mismatches of integer size. if (varDsc->lvPromoted && (comp->lvaGetPromotionType(varDsc) == Compiler::PROMOTION_TYPE_INDEPENDENT)) { // If we have no retTypeDesc, we only care that it is independently promoted. if (retTypeDesc == nullptr) { canEnregister = true; } else { unsigned regCount = retTypeDesc->GetReturnRegCount(); if (regCount == varDsc->lvFieldCnt) { canEnregister = true; } } } } #ifdef TARGET_XARCH // For local stores on XARCH we only handle mismatched src/dest register count for // calls of SIMD type. If the source was another lclVar similarly promoted, we would // have broken it into multiple stores. if (lclNode->OperIs(GT_STORE_LCL_VAR) && !lclNode->gtGetOp1()->OperIs(GT_CALL)) { canEnregister = false; } #endif // TARGET_XARCH if (canEnregister) { lclNode->SetMultiReg(); } else { lclNode->ClearMultiReg(); if (varDsc->lvPromoted && !varDsc->lvDoNotEnregister) { comp->lvaSetVarDoNotEnregister(lclNode->GetLclNum() DEBUGARG(DoNotEnregisterReason::BlockOp)); } } #endif return canEnregister; } //------------------------------------------------------------------------ // Containment Analysis //------------------------------------------------------------------------ void Lowering::ContainCheckNode(GenTree* node) { switch (node->gtOper) { case GT_STORE_LCL_VAR: case GT_STORE_LCL_FLD: ContainCheckStoreLoc(node->AsLclVarCommon()); break; case GT_EQ: case GT_NE: case GT_LT: case GT_LE: case GT_GE: case GT_GT: case GT_TEST_EQ: case GT_TEST_NE: case GT_CMP: case GT_JCMP: ContainCheckCompare(node->AsOp()); break; case GT_JTRUE: ContainCheckJTrue(node->AsOp()); break; case GT_ADD: case GT_SUB: #if !defined(TARGET_64BIT) case GT_ADD_LO: case GT_ADD_HI: case GT_SUB_LO: case GT_SUB_HI: #endif case GT_AND: case GT_OR: case GT_XOR: ContainCheckBinary(node->AsOp()); break; #if defined(TARGET_X86) case GT_MUL_LONG: #endif case GT_MUL: case GT_MULHI: ContainCheckMul(node->AsOp()); break; case GT_DIV: case GT_MOD: case GT_UDIV: case GT_UMOD: ContainCheckDivOrMod(node->AsOp()); break; case GT_LSH: case GT_RSH: case GT_RSZ: case GT_ROL: case GT_ROR: #ifndef TARGET_64BIT case GT_LSH_HI: case GT_RSH_LO: #endif ContainCheckShiftRotate(node->AsOp()); break; case GT_ARR_OFFSET: ContainCheckArrOffset(node->AsArrOffs()); break; case GT_LCLHEAP: ContainCheckLclHeap(node->AsOp()); break; case GT_RETURN: ContainCheckRet(node->AsOp()); break; case GT_RETURNTRAP: ContainCheckReturnTrap(node->AsOp()); break; case GT_STOREIND: ContainCheckStoreIndir(node->AsStoreInd()); break; case GT_IND: ContainCheckIndir(node->AsIndir()); break; case GT_PUTARG_REG: case GT_PUTARG_STK: #if FEATURE_ARG_SPLIT case GT_PUTARG_SPLIT: #endif // FEATURE_ARG_SPLIT // The regNum must have been set by the lowering of the call. assert(node->GetRegNum() != REG_NA); break; #ifdef TARGET_XARCH case GT_INTRINSIC: ContainCheckIntrinsic(node->AsOp()); break; #endif // TARGET_XARCH #ifdef FEATURE_SIMD case GT_SIMD: ContainCheckSIMD(node->AsSIMD()); break; #endif // FEATURE_SIMD #ifdef FEATURE_HW_INTRINSICS case GT_HWINTRINSIC: ContainCheckHWIntrinsic(node->AsHWIntrinsic()); break; #endif // FEATURE_HW_INTRINSICS default: break; } } //------------------------------------------------------------------------ // ContainCheckReturnTrap: determine whether the source of a RETURNTRAP should be contained. // // Arguments: // node - pointer to the GT_RETURNTRAP node // void Lowering::ContainCheckReturnTrap(GenTreeOp* node) { #ifdef TARGET_XARCH assert(node->OperIs(GT_RETURNTRAP)); // This just turns into a compare of its child with an int + a conditional call if (node->gtOp1->isIndir()) { MakeSrcContained(node, node->gtOp1); } #endif // TARGET_XARCH } //------------------------------------------------------------------------ // ContainCheckArrOffset: determine whether the source of an ARR_OFFSET should be contained. // // Arguments: // node - pointer to the GT_ARR_OFFSET node // void Lowering::ContainCheckArrOffset(GenTreeArrOffs* node) { assert(node->OperIs(GT_ARR_OFFSET)); // we don't want to generate code for this if (node->gtOffset->IsIntegralConst(0)) { MakeSrcContained(node, node->AsArrOffs()->gtOffset); } } //------------------------------------------------------------------------ // ContainCheckLclHeap: determine whether the source of a GT_LCLHEAP node should be contained. // // Arguments: // node - pointer to the node // void Lowering::ContainCheckLclHeap(GenTreeOp* node) { assert(node->OperIs(GT_LCLHEAP)); GenTree* size = node->AsOp()->gtOp1; if (size->IsCnsIntOrI()) { MakeSrcContained(node, size); } } //------------------------------------------------------------------------ // ContainCheckRet: determine whether the source of a node should be contained. // // Arguments: // node - pointer to the node // void Lowering::ContainCheckRet(GenTreeUnOp* ret) { assert(ret->OperIs(GT_RETURN)); #if !defined(TARGET_64BIT) if (ret->TypeGet() == TYP_LONG) { GenTree* op1 = ret->gtGetOp1(); noway_assert(op1->OperGet() == GT_LONG); MakeSrcContained(ret, op1); } #endif // !defined(TARGET_64BIT) #if FEATURE_MULTIREG_RET if (ret->TypeIs(TYP_STRUCT)) { GenTree* op1 = ret->gtGetOp1(); // op1 must be either a lclvar or a multi-reg returning call if (op1->OperGet() == GT_LCL_VAR) { const LclVarDsc* varDsc = comp->lvaGetDesc(op1->AsLclVarCommon()); // This must be a multi-reg return or an HFA of a single element. assert(varDsc->lvIsMultiRegRet || (varDsc->lvIsHfa() && varTypeIsValidHfaType(varDsc->lvType))); // Mark var as contained if not enregisterable. if (!varDsc->IsEnregisterableLcl()) { if (!op1->IsMultiRegLclVar()) { MakeSrcContained(ret, op1); } } } } #endif // FEATURE_MULTIREG_RET } //------------------------------------------------------------------------ // ContainCheckJTrue: determine whether the source of a JTRUE should be contained. // // Arguments: // node - pointer to the node // void Lowering::ContainCheckJTrue(GenTreeOp* node) { // The compare does not need to be generated into a register. GenTree* cmp = node->gtGetOp1(); cmp->gtType = TYP_VOID; cmp->gtFlags |= GTF_SET_FLAGS; } //------------------------------------------------------------------------ // ContainCheckBitCast: determine whether the source of a BITCAST should be contained. // // Arguments: // node - pointer to the node // void Lowering::ContainCheckBitCast(GenTree* node) { GenTree* const op1 = node->AsOp()->gtOp1; if (op1->isMemoryOp()) { op1->SetContained(); } else if (op1->OperIs(GT_LCL_VAR)) { if (!m_lsra->willEnregisterLocalVars()) { op1->SetContained(); } const LclVarDsc* varDsc = comp->lvaGetDesc(op1->AsLclVar()); // TODO-Cleanup: we want to check if the local is already known not // to be on reg, for example, because local enreg is disabled. if (varDsc->lvDoNotEnregister) { op1->SetContained(); } else { op1->SetRegOptional(); } } else if (op1->IsLocal()) { op1->SetContained(); } } //------------------------------------------------------------------------ // LowerStoreIndirCommon: a common logic to lower StoreIndir. // // Arguments: // ind - the store indirection node we are lowering. // void Lowering::LowerStoreIndirCommon(GenTreeStoreInd* ind) { assert(ind->TypeGet() != TYP_STRUCT); #if defined(TARGET_ARM64) // Verify containment safety before creating an LEA that must be contained. // const bool isContainable = IsSafeToContainMem(ind, ind->Addr()); #else const bool isContainable = true; #endif TryCreateAddrMode(ind->Addr(), isContainable, ind); if (!comp->codeGen->gcInfo.gcIsWriteBarrierStoreIndNode(ind)) { if (varTypeIsFloating(ind) && ind->Data()->IsCnsFltOrDbl()) { // Optimize *x = DCON to *x = ICON which can be slightly faster and/or smaller. GenTree* data = ind->Data(); double dblCns = data->AsDblCon()->gtDconVal; ssize_t intCns = 0; var_types type = TYP_UNKNOWN; // XARCH: we can always contain the immediates. // ARM64: zero can always be contained, other cases will use immediates from the data // section and it is not a clear win to switch them to inline integers. // ARM: FP constants are assembled from integral ones, so it is always profitable // to directly use the integers as it avoids the int -> float conversion. CLANG_FORMAT_COMMENT_ANCHOR; #if defined(TARGET_XARCH) || defined(TARGET_ARM) bool shouldSwitchToInteger = true; #else // TARGET_ARM64 bool shouldSwitchToInteger = !data->IsCnsNonZeroFltOrDbl(); #endif if (shouldSwitchToInteger) { if (ind->TypeIs(TYP_FLOAT)) { float fltCns = static_cast<float>(dblCns); // should be a safe round-trip intCns = static_cast<ssize_t>(*reinterpret_cast<INT32*>(&fltCns)); type = TYP_INT; } #ifdef TARGET_64BIT else { assert(ind->TypeIs(TYP_DOUBLE)); intCns = static_cast<ssize_t>(*reinterpret_cast<INT64*>(&dblCns)); type = TYP_LONG; } #endif } if (type != TYP_UNKNOWN) { data->BashToConst(intCns, type); ind->ChangeType(type); } } LowerStoreIndir(ind); } } //------------------------------------------------------------------------ // LowerIndir: a common logic to lower IND load or NullCheck. // // Arguments: // ind - the ind node we are lowering. // void Lowering::LowerIndir(GenTreeIndir* ind) { assert(ind->OperIs(GT_IND, GT_NULLCHECK)); // Process struct typed indirs separately unless they are unused; // they only appear as the source of a block copy operation or a return node. if (!ind->TypeIs(TYP_STRUCT) || ind->IsUnusedValue()) { // TODO-Cleanup: We're passing isContainable = true but ContainCheckIndir rejects // address containment in some cases so we end up creating trivial (reg + offfset) // or (reg + reg) LEAs that are not necessary. CLANG_FORMAT_COMMENT_ANCHOR; #if defined(TARGET_ARM64) // Verify containment safety before creating an LEA that must be contained. // const bool isContainable = IsSafeToContainMem(ind, ind->Addr()); #else const bool isContainable = true; #endif TryCreateAddrMode(ind->Addr(), isContainable, ind); ContainCheckIndir(ind); if (ind->OperIs(GT_NULLCHECK) || ind->IsUnusedValue()) { TransformUnusedIndirection(ind, comp, m_block); } } else { // If the `ADDR` node under `STORE_OBJ(dstAddr, IND(struct(ADDR))` // is a complex one it could benefit from an `LEA` that is not contained. const bool isContainable = false; TryCreateAddrMode(ind->Addr(), isContainable, ind); } } //------------------------------------------------------------------------ // TransformUnusedIndirection: change the opcode and the type of the unused indirection. // // Arguments: // ind - Indirection to transform. // comp - Compiler instance. // block - Basic block of the indirection. // void Lowering::TransformUnusedIndirection(GenTreeIndir* ind, Compiler* comp, BasicBlock* block) { // A nullcheck is essentially the same as an indirection with no use. // The difference lies in whether a target register must be allocated. // On XARCH we can generate a compare with no target register as long as the address // is not contained. // On ARM64 we can generate a load to REG_ZR in all cases. // However, on ARM we must always generate a load to a register. // In the case where we require a target register, it is better to use GT_IND, since // GT_NULLCHECK is a non-value node and would therefore require an internal register // to use as the target. That is non-optimal because it will be modeled as conflicting // with the source register(s). // So, to summarize: // - On ARM64, always use GT_NULLCHECK for a dead indirection. // - On ARM, always use GT_IND. // - On XARCH, use GT_IND if we have a contained address, and GT_NULLCHECK otherwise. // In all cases we try to preserve the original type and never make it wider to avoid AVEs. // For structs we conservatively lower it to BYTE. For 8-byte primitives we lower it to TYP_INT // on XARCH as an optimization. // assert(ind->OperIs(GT_NULLCHECK, GT_IND, GT_BLK, GT_OBJ)); ind->ChangeType(comp->gtTypeForNullCheck(ind)); #ifdef TARGET_ARM64 bool useNullCheck = true; #elif TARGET_ARM bool useNullCheck = false; #else // TARGET_XARCH bool useNullCheck = !ind->Addr()->isContained(); #endif // !TARGET_XARCH if (useNullCheck && !ind->OperIs(GT_NULLCHECK)) { comp->gtChangeOperToNullCheck(ind, block); ind->ClearUnusedValue(); } else if (!useNullCheck && !ind->OperIs(GT_IND)) { ind->ChangeOper(GT_IND); ind->SetUnusedValue(); } } //------------------------------------------------------------------------ // LowerBlockStoreCommon: a common logic to lower STORE_OBJ/BLK/DYN_BLK. // // Arguments: // blkNode - the store blk/obj node we are lowering. // void Lowering::LowerBlockStoreCommon(GenTreeBlk* blkNode) { assert(blkNode->OperIs(GT_STORE_BLK, GT_STORE_DYN_BLK, GT_STORE_OBJ)); // Lose the type information stored in the source - we no longer need it. if (blkNode->Data()->OperIs(GT_OBJ, GT_BLK)) { blkNode->Data()->SetOper(GT_IND); LowerIndir(blkNode->Data()->AsIndir()); } if (TryTransformStoreObjAsStoreInd(blkNode)) { return; } LowerBlockStore(blkNode); } //------------------------------------------------------------------------ // TryTransformStoreObjAsStoreInd: try to replace STORE_OBJ/BLK as STOREIND. // // Arguments: // blkNode - the store node. // // Return value: // true if the replacement was made, false otherwise. // // Notes: // TODO-CQ: this method should do the transformation when possible // and STOREIND should always generate better or the same code as // STORE_OBJ/BLK for the same copy. // bool Lowering::TryTransformStoreObjAsStoreInd(GenTreeBlk* blkNode) { assert(blkNode->OperIs(GT_STORE_BLK, GT_STORE_DYN_BLK, GT_STORE_OBJ)); if (!comp->opts.OptimizationEnabled()) { return false; } if (blkNode->OperIs(GT_STORE_DYN_BLK)) { return false; } ClassLayout* layout = blkNode->GetLayout(); if (layout == nullptr) { return false; } var_types regType = layout->GetRegisterType(); if (regType == TYP_UNDEF) { return false; } GenTree* src = blkNode->Data(); if (varTypeIsSIMD(regType) && src->IsConstInitVal()) { // TODO-CQ: support STORE_IND SIMD16(SIMD16, CNT_INT 0). return false; } if (varTypeIsGC(regType)) { // TODO-CQ: STOREIND does not try to contain src if we need a barrier, // STORE_OBJ generates better code currently. return false; } if (src->OperIsInitVal() && !src->IsConstInitVal()) { return false; } if (varTypeIsSmall(regType) && !src->IsConstInitVal() && !src->IsLocal()) { // source operand INDIR will use a widening instruction // and generate worse code, like `movzx` instead of `mov` // on x64. return false; } JITDUMP("Replacing STORE_OBJ with STOREIND for [%06u]\n", blkNode->gtTreeID); blkNode->ChangeOper(GT_STOREIND); blkNode->ChangeType(regType); if ((blkNode->gtFlags & GTF_IND_TGT_NOT_HEAP) == 0) { blkNode->gtFlags |= GTF_IND_TGTANYWHERE; } if (varTypeIsStruct(src)) { src->ChangeType(regType); LowerNode(blkNode->Data()); } else if (src->OperIsInitVal()) { GenTreeUnOp* initVal = src->AsUnOp(); src = src->gtGetOp1(); assert(src->IsCnsIntOrI()); src->AsIntCon()->FixupInitBlkValue(regType); blkNode->SetData(src); BlockRange().Remove(initVal); } else { assert(src->TypeIs(regType) || src->IsCnsIntOrI() || src->IsCall()); } LowerStoreIndirCommon(blkNode->AsStoreInd()); return true; }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Lower XX XX XX XX Preconditions: XX XX XX XX Postconditions (for the nodes currently handled): XX XX - All operands requiring a register are explicit in the graph XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ #include "jitpch.h" #ifdef _MSC_VER #pragma hdrstop #endif #include "lower.h" #if !defined(TARGET_64BIT) #include "decomposelongs.h" #endif // !defined(TARGET_64BIT) //------------------------------------------------------------------------ // MakeSrcContained: Make "childNode" a contained node // // Arguments: // parentNode - is a non-leaf node that can contain its 'childNode' // childNode - is an op that will now be contained by its parent. // // Notes: // If 'childNode' it has any existing sources, they will now be sources for the parent. // void Lowering::MakeSrcContained(GenTree* parentNode, GenTree* childNode) const { assert(!parentNode->OperIsLeaf()); assert(childNode->canBeContained()); childNode->SetContained(); assert(childNode->isContained()); #ifdef DEBUG if (IsContainableMemoryOp(childNode)) { // Verify caller of this method checked safety. // const bool isSafeToContainMem = IsSafeToContainMem(parentNode, childNode); if (!isSafeToContainMem) { JITDUMP("** Unsafe mem containment of [%06u] in [%06u}, comp->dspTreeID(childNode), " "comp->dspTreeID(parentNode)\n"); assert(isSafeToContainMem); } } #endif } //------------------------------------------------------------------------ // CheckImmedAndMakeContained: Checks if the 'childNode' is a containable immediate // and, if so, makes it contained. // // Arguments: // parentNode - is any non-leaf node // childNode - is an child op of 'parentNode' // // Return value: // true if we are able to make childNode a contained immediate // bool Lowering::CheckImmedAndMakeContained(GenTree* parentNode, GenTree* childNode) { assert(!parentNode->OperIsLeaf()); // If childNode is a containable immediate if (IsContainableImmed(parentNode, childNode)) { // then make it contained within the parentNode MakeSrcContained(parentNode, childNode); return true; } return false; } //------------------------------------------------------------------------ // IsSafeToContainMem: Checks for conflicts between childNode and parentNode, // and returns 'true' iff memory operand childNode can be contained in parentNode. // // Arguments: // parentNode - any non-leaf node // childNode - some node that is an input to `parentNode` // // Return value: // true if it is safe to make childNode a contained memory operand. // bool Lowering::IsSafeToContainMem(GenTree* parentNode, GenTree* childNode) const { // Quick early-out for unary cases // if (childNode->gtNext == parentNode) { return true; } m_scratchSideEffects.Clear(); m_scratchSideEffects.AddNode(comp, childNode); for (GenTree* node = childNode->gtNext; node != parentNode; node = node->gtNext) { const bool strict = true; if (m_scratchSideEffects.InterferesWith(comp, node, strict)) { return false; } } return true; } //------------------------------------------------------------------------ // IsSafeToContainMem: Checks for conflicts between childNode and grandParentNode // and returns 'true' iff memory operand childNode can be contained in ancestorNode // // Arguments: // grandParentNode - any non-leaf node // parentNode - parent of `childNode` and an input to `grandParentNode` // childNode - some node that is an input to `parentNode` // // Return value: // true if it is safe to make childNode a contained memory operand. // bool Lowering::IsSafeToContainMem(GenTree* grandparentNode, GenTree* parentNode, GenTree* childNode) const { m_scratchSideEffects.Clear(); m_scratchSideEffects.AddNode(comp, childNode); for (GenTree* node = childNode->gtNext; node != grandparentNode; node = node->gtNext) { if (node == parentNode) { continue; } const bool strict = true; if (m_scratchSideEffects.InterferesWith(comp, node, strict)) { return false; } } return true; } //------------------------------------------------------------------------ // LowerNode: this is the main entry point for Lowering. // // Arguments: // node - the node we are lowering. // // Returns: // next node in the transformed node sequence that needs to be lowered. // GenTree* Lowering::LowerNode(GenTree* node) { assert(node != nullptr); switch (node->gtOper) { case GT_NULLCHECK: case GT_IND: LowerIndir(node->AsIndir()); break; case GT_STOREIND: LowerStoreIndirCommon(node->AsStoreInd()); break; case GT_ADD: { GenTree* next = LowerAdd(node->AsOp()); if (next != nullptr) { return next; } } break; #if !defined(TARGET_64BIT) case GT_ADD_LO: case GT_ADD_HI: case GT_SUB_LO: case GT_SUB_HI: #endif case GT_SUB: case GT_AND: case GT_OR: case GT_XOR: return LowerBinaryArithmetic(node->AsOp()); case GT_MUL: case GT_MULHI: #if defined(TARGET_X86) || defined(TARGET_ARM64) case GT_MUL_LONG: #endif return LowerMul(node->AsOp()); case GT_UDIV: case GT_UMOD: if (!LowerUnsignedDivOrMod(node->AsOp())) { ContainCheckDivOrMod(node->AsOp()); } break; case GT_DIV: case GT_MOD: return LowerSignedDivOrMod(node); case GT_SWITCH: return LowerSwitch(node); case GT_CALL: LowerCall(node); break; case GT_LT: case GT_LE: case GT_GT: case GT_GE: case GT_EQ: case GT_NE: case GT_TEST_EQ: case GT_TEST_NE: case GT_CMP: return LowerCompare(node); case GT_JTRUE: return LowerJTrue(node->AsOp()); case GT_JMP: LowerJmpMethod(node); break; case GT_RETURN: LowerRet(node->AsUnOp()); break; case GT_RETURNTRAP: ContainCheckReturnTrap(node->AsOp()); break; case GT_CAST: LowerCast(node); break; #if defined(TARGET_XARCH) || defined(TARGET_ARM64) case GT_BOUNDS_CHECK: ContainCheckBoundsChk(node->AsBoundsChk()); break; #endif // TARGET_XARCH case GT_ARR_ELEM: return LowerArrElem(node); case GT_ARR_OFFSET: ContainCheckArrOffset(node->AsArrOffs()); break; case GT_ROL: case GT_ROR: LowerRotate(node); break; #ifndef TARGET_64BIT case GT_LSH_HI: case GT_RSH_LO: ContainCheckShiftRotate(node->AsOp()); break; #endif // !TARGET_64BIT case GT_LSH: case GT_RSH: case GT_RSZ: #if defined(TARGET_XARCH) || defined(TARGET_ARM64) LowerShift(node->AsOp()); #else ContainCheckShiftRotate(node->AsOp()); #endif break; case GT_STORE_BLK: case GT_STORE_OBJ: if (node->AsBlk()->Data()->IsCall()) { LowerStoreSingleRegCallStruct(node->AsBlk()); break; } FALLTHROUGH; case GT_STORE_DYN_BLK: LowerBlockStoreCommon(node->AsBlk()); break; case GT_LCLHEAP: ContainCheckLclHeap(node->AsOp()); break; #ifdef TARGET_XARCH case GT_INTRINSIC: ContainCheckIntrinsic(node->AsOp()); break; #endif // TARGET_XARCH #ifdef FEATURE_SIMD case GT_SIMD: LowerSIMD(node->AsSIMD()); break; #endif // FEATURE_SIMD #ifdef FEATURE_HW_INTRINSICS case GT_HWINTRINSIC: LowerHWIntrinsic(node->AsHWIntrinsic()); break; #endif // FEATURE_HW_INTRINSICS case GT_LCL_FLD: { // We should only encounter this for lclVars that are lvDoNotEnregister. verifyLclFldDoNotEnregister(node->AsLclVarCommon()->GetLclNum()); break; } case GT_LCL_VAR: { GenTreeLclVar* lclNode = node->AsLclVar(); WidenSIMD12IfNecessary(lclNode); LclVarDsc* varDsc = comp->lvaGetDesc(lclNode); // The consumer of this node must check compatibility of the fields. // This merely checks whether it is possible for this to be a multireg node. if (lclNode->IsMultiRegLclVar()) { if (!varDsc->lvPromoted || (comp->lvaGetPromotionType(varDsc) != Compiler::PROMOTION_TYPE_INDEPENDENT) || (varDsc->lvFieldCnt > MAX_MULTIREG_COUNT)) { lclNode->ClearMultiReg(); if (lclNode->TypeIs(TYP_STRUCT)) { comp->lvaSetVarDoNotEnregister(lclNode->GetLclNum() DEBUGARG(DoNotEnregisterReason::BlockOp)); } } } break; } case GT_STORE_LCL_VAR: WidenSIMD12IfNecessary(node->AsLclVarCommon()); FALLTHROUGH; case GT_STORE_LCL_FLD: LowerStoreLocCommon(node->AsLclVarCommon()); break; #if defined(TARGET_ARM64) case GT_CMPXCHG: CheckImmedAndMakeContained(node, node->AsCmpXchg()->gtOpComparand); break; case GT_XORR: case GT_XAND: case GT_XADD: CheckImmedAndMakeContained(node, node->AsOp()->gtOp2); break; #elif defined(TARGET_XARCH) case GT_XORR: case GT_XAND: case GT_XADD: if (node->IsUnusedValue()) { node->ClearUnusedValue(); // Make sure the types are identical, since the node type is changed to VOID // CodeGen relies on op2's type to determine the instruction size. // Note that the node type cannot be a small int but the data operand can. assert(genActualType(node->gtGetOp2()->TypeGet()) == node->TypeGet()); node->SetOper(GT_LOCKADD); node->gtType = TYP_VOID; CheckImmedAndMakeContained(node, node->gtGetOp2()); } break; #endif #ifndef TARGET_ARMARCH // TODO-ARMARCH-CQ: We should contain this as long as the offset fits. case GT_OBJ: if (node->AsObj()->Addr()->OperIsLocalAddr()) { node->AsObj()->Addr()->SetContained(); } break; #endif // !TARGET_ARMARCH case GT_KEEPALIVE: node->gtGetOp1()->SetRegOptional(); break; case GT_LCL_FLD_ADDR: case GT_LCL_VAR_ADDR: { const GenTreeLclVarCommon* lclAddr = node->AsLclVarCommon(); const LclVarDsc* varDsc = comp->lvaGetDesc(lclAddr); if (!varDsc->lvDoNotEnregister) { // TODO-Cleanup: this is definitely not the best place for this detection, // but for now it is the easiest. Move it to morph. comp->lvaSetVarDoNotEnregister(lclAddr->GetLclNum() DEBUGARG(DoNotEnregisterReason::LclAddrNode)); } } break; default: break; } return node->gtNext; } /** -- Switch Lowering -- * The main idea of switch lowering is to keep transparency of the register requirements of this node * downstream in LSRA. Given that the switch instruction is inherently a control statement which in the JIT * is represented as a simple tree node, at the time we actually generate code for it we end up * generating instructions that actually modify the flow of execution that imposes complicated * register requirement and lifetimes. * * So, for the purpose of LSRA, we want to have a more detailed specification of what a switch node actually * means and more importantly, which and when do we need a register for each instruction we want to issue * to correctly allocate them downstream. * * For this purpose, this procedure performs switch lowering in two different ways: * * a) Represent the switch statement as a zero-index jump table construct. This means that for every destination * of the switch, we will store this destination in an array of addresses and the code generator will issue * a data section where this array will live and will emit code that based on the switch index, will indirect and * jump to the destination specified in the jump table. * * For this transformation we introduce a new GT node called GT_SWITCH_TABLE that is a specialization of the switch * node for jump table based switches. * The overall structure of a GT_SWITCH_TABLE is: * * GT_SWITCH_TABLE * |_________ localVar (a temporary local that holds the switch index) * |_________ jumpTable (this is a special node that holds the address of the jump table array) * * Now, the way we morph a GT_SWITCH node into this lowered switch table node form is the following: * * Input: GT_SWITCH (inside a basic block whose Branch Type is BBJ_SWITCH) * |_____ expr (an arbitrarily complex GT_NODE that represents the switch index) * * This gets transformed into the following statements inside a BBJ_COND basic block (the target would be * the default case of the switch in case the conditional is evaluated to true). * * ----- original block, transformed * GT_STORE_LCL_VAR tempLocal (a new temporary local variable used to store the switch index) * |_____ expr (the index expression) * * GT_JTRUE * |_____ GT_COND * |_____ GT_GE * |___ Int_Constant (This constant is the index of the default case * that happens to be the highest index in the jump table). * |___ tempLocal (The local variable were we stored the index expression). * * ----- new basic block * GT_SWITCH_TABLE * |_____ tempLocal * |_____ jumpTable (a new jump table node that now LSRA can allocate registers for explicitly * and LinearCodeGen will be responsible to generate downstream). * * This way there are no implicit temporaries. * * b) For small-sized switches, we will actually morph them into a series of conditionals of the form * if (case falls into the default){ goto jumpTable[size]; // last entry in the jump table is the default case } * (For the default case conditional, we'll be constructing the exact same code as the jump table case one). * else if (case == firstCase){ goto jumpTable[1]; } * else if (case == secondCase) { goto jumptable[2]; } and so on. * * This transformation is of course made in JIT-IR, not downstream to CodeGen level, so this way we no longer * require internal temporaries to maintain the index we're evaluating plus we're using existing code from * LinearCodeGen to implement this instead of implement all the control flow constructs using InstrDscs and * InstrGroups downstream. */ GenTree* Lowering::LowerSwitch(GenTree* node) { unsigned jumpCnt; unsigned targetCnt; BasicBlock** jumpTab; assert(node->gtOper == GT_SWITCH); // The first step is to build the default case conditional construct that is // shared between both kinds of expansion of the switch node. // To avoid confusion, we'll alias m_block to originalSwitchBB // that represents the node we're morphing. BasicBlock* originalSwitchBB = m_block; LIR::Range& switchBBRange = LIR::AsRange(originalSwitchBB); // jumpCnt is the number of elements in the jump table array. // jumpTab is the actual pointer to the jump table array. // targetCnt is the number of unique targets in the jump table array. jumpCnt = originalSwitchBB->bbJumpSwt->bbsCount; jumpTab = originalSwitchBB->bbJumpSwt->bbsDstTab; targetCnt = originalSwitchBB->NumSucc(comp); // GT_SWITCH must be a top-level node with no use. #ifdef DEBUG { LIR::Use use; assert(!switchBBRange.TryGetUse(node, &use)); } #endif JITDUMP("Lowering switch " FMT_BB ", %d cases\n", originalSwitchBB->bbNum, jumpCnt); // Handle a degenerate case: if the switch has only a default case, just convert it // to an unconditional branch. This should only happen in minopts or with debuggable // code. if (targetCnt == 1) { JITDUMP("Lowering switch " FMT_BB ": single target; converting to BBJ_ALWAYS\n", originalSwitchBB->bbNum); noway_assert(comp->opts.OptimizationDisabled()); if (originalSwitchBB->bbNext == jumpTab[0]) { originalSwitchBB->bbJumpKind = BBJ_NONE; originalSwitchBB->bbJumpDest = nullptr; } else { originalSwitchBB->bbJumpKind = BBJ_ALWAYS; originalSwitchBB->bbJumpDest = jumpTab[0]; } // Remove extra predecessor links if there was more than one case. for (unsigned i = 1; i < jumpCnt; ++i) { (void)comp->fgRemoveRefPred(jumpTab[i], originalSwitchBB); } // We have to get rid of the GT_SWITCH node but a child might have side effects so just assign // the result of the child subtree to a temp. GenTree* rhs = node->AsOp()->gtOp1; unsigned lclNum = comp->lvaGrabTemp(true DEBUGARG("Lowering is creating a new local variable")); comp->lvaTable[lclNum].lvType = rhs->TypeGet(); GenTreeLclVar* store = comp->gtNewStoreLclVar(lclNum, rhs); switchBBRange.InsertAfter(node, store); switchBBRange.Remove(node); return store; } noway_assert(jumpCnt >= 2); // Spill the argument to the switch node into a local so that it can be used later. LIR::Use use(switchBBRange, &(node->AsOp()->gtOp1), node); ReplaceWithLclVar(use); // GT_SWITCH(indexExpression) is now two statements: // 1. a statement containing 'asg' (for temp = indexExpression) // 2. and a statement with GT_SWITCH(temp) assert(node->gtOper == GT_SWITCH); GenTree* temp = node->AsOp()->gtOp1; assert(temp->gtOper == GT_LCL_VAR); unsigned tempLclNum = temp->AsLclVarCommon()->GetLclNum(); var_types tempLclType = temp->TypeGet(); BasicBlock* defaultBB = jumpTab[jumpCnt - 1]; BasicBlock* followingBB = originalSwitchBB->bbNext; /* Is the number of cases right for a test and jump switch? */ const bool fFirstCaseFollows = (followingBB == jumpTab[0]); const bool fDefaultFollows = (followingBB == defaultBB); unsigned minSwitchTabJumpCnt = 2; // table is better than just 2 cmp/jcc // This means really just a single cmp/jcc (aka a simple if/else) if (fFirstCaseFollows || fDefaultFollows) { minSwitchTabJumpCnt++; } #if defined(TARGET_ARM) // On ARM for small switch tables we will // generate a sequence of compare and branch instructions // because the code to load the base of the switch // table is huge and hideous due to the relocation... :( minSwitchTabJumpCnt += 2; #endif // TARGET_ARM // Once we have the temporary variable, we construct the conditional branch for // the default case. As stated above, this conditional is being shared between // both GT_SWITCH lowering code paths. // This condition is of the form: if (temp > jumpTableLength - 2){ goto jumpTable[jumpTableLength - 1]; } GenTree* gtDefaultCaseCond = comp->gtNewOperNode(GT_GT, TYP_INT, comp->gtNewLclvNode(tempLclNum, tempLclType), comp->gtNewIconNode(jumpCnt - 2, genActualType(tempLclType))); // Make sure we perform an unsigned comparison, just in case the switch index in 'temp' // is now less than zero 0 (that would also hit the default case). gtDefaultCaseCond->gtFlags |= GTF_UNSIGNED; GenTree* gtDefaultCaseJump = comp->gtNewOperNode(GT_JTRUE, TYP_VOID, gtDefaultCaseCond); gtDefaultCaseJump->gtFlags = node->gtFlags; LIR::Range condRange = LIR::SeqTree(comp, gtDefaultCaseJump); switchBBRange.InsertAtEnd(std::move(condRange)); BasicBlock* afterDefaultCondBlock = comp->fgSplitBlockAfterNode(originalSwitchBB, condRange.LastNode()); // afterDefaultCondBlock is now the switch, and all the switch targets have it as a predecessor. // originalSwitchBB is now a BBJ_NONE, and there is a predecessor edge in afterDefaultCondBlock // representing the fall-through flow from originalSwitchBB. assert(originalSwitchBB->bbJumpKind == BBJ_NONE); assert(originalSwitchBB->bbNext == afterDefaultCondBlock); assert(afterDefaultCondBlock->bbJumpKind == BBJ_SWITCH); assert(afterDefaultCondBlock->bbJumpSwt->bbsHasDefault); assert(afterDefaultCondBlock->isEmpty()); // Nothing here yet. // The GT_SWITCH code is still in originalSwitchBB (it will be removed later). // Turn originalSwitchBB into a BBJ_COND. originalSwitchBB->bbJumpKind = BBJ_COND; originalSwitchBB->bbJumpDest = jumpTab[jumpCnt - 1]; // Fix the pred for the default case: the default block target still has originalSwitchBB // as a predecessor, but the fgSplitBlockAfterStatement() moved all predecessors to point // to afterDefaultCondBlock. flowList* oldEdge = comp->fgRemoveRefPred(jumpTab[jumpCnt - 1], afterDefaultCondBlock); comp->fgAddRefPred(jumpTab[jumpCnt - 1], originalSwitchBB, oldEdge); bool useJumpSequence = jumpCnt < minSwitchTabJumpCnt; if (TargetOS::IsUnix && TargetArchitecture::IsArm32) { // Force using an inlined jumping instead switch table generation. // Switch jump table is generated with incorrect values in CoreRT case, // so any large switch will crash after loading to PC any such value. // I think this is due to the fact that we use absolute addressing // instead of relative. But in CoreRT is used as a rule relative // addressing when we generate an executable. // See also https://github.com/dotnet/runtime/issues/8683 // Also https://github.com/dotnet/coreclr/pull/13197 useJumpSequence = useJumpSequence || comp->IsTargetAbi(CORINFO_CORERT_ABI); } // If we originally had 2 unique successors, check to see whether there is a unique // non-default case, in which case we can eliminate the switch altogether. // Note that the single unique successor case is handled above. BasicBlock* uniqueSucc = nullptr; if (targetCnt == 2) { uniqueSucc = jumpTab[0]; noway_assert(jumpCnt >= 2); for (unsigned i = 1; i < jumpCnt - 1; i++) { if (jumpTab[i] != uniqueSucc) { uniqueSucc = nullptr; break; } } } if (uniqueSucc != nullptr) { // If the unique successor immediately follows this block, we have nothing to do - // it will simply fall-through after we remove the switch, below. // Otherwise, make this a BBJ_ALWAYS. // Now, fixup the predecessor links to uniqueSucc. In the original jumpTab: // jumpTab[i-1] was the default target, which we handled above, // jumpTab[0] is the first target, and we'll leave that predecessor link. // Remove any additional predecessor links to uniqueSucc. for (unsigned i = 1; i < jumpCnt - 1; ++i) { assert(jumpTab[i] == uniqueSucc); (void)comp->fgRemoveRefPred(uniqueSucc, afterDefaultCondBlock); } if (afterDefaultCondBlock->bbNext == uniqueSucc) { afterDefaultCondBlock->bbJumpKind = BBJ_NONE; afterDefaultCondBlock->bbJumpDest = nullptr; } else { afterDefaultCondBlock->bbJumpKind = BBJ_ALWAYS; afterDefaultCondBlock->bbJumpDest = uniqueSucc; } } // If the number of possible destinations is small enough, we proceed to expand the switch // into a series of conditional branches, otherwise we follow the jump table based switch // transformation. else if (useJumpSequence || comp->compStressCompile(Compiler::STRESS_SWITCH_CMP_BR_EXPANSION, 50)) { // Lower the switch into a series of compare and branch IR trees. // // In this case we will morph the node in the following way: // 1. Generate a JTRUE statement to evaluate the default case. (This happens above.) // 2. Start splitting the switch basic block into subsequent basic blocks, each of which will contain // a statement that is responsible for performing a comparison of the table index and conditional // branch if equal. JITDUMP("Lowering switch " FMT_BB ": using compare/branch expansion\n", originalSwitchBB->bbNum); // We'll use 'afterDefaultCondBlock' for the first conditional. After that, we'll add new // blocks. If we end up not needing it at all (say, if all the non-default cases just fall through), // we'll delete it. bool fUsedAfterDefaultCondBlock = false; BasicBlock* currentBlock = afterDefaultCondBlock; LIR::Range* currentBBRange = &LIR::AsRange(currentBlock); // Walk to entries 0 to jumpCnt - 1. If a case target follows, ignore it and let it fall through. // If no case target follows, the last one doesn't need to be a compare/branch: it can be an // unconditional branch. bool fAnyTargetFollows = false; for (unsigned i = 0; i < jumpCnt - 1; ++i) { assert(currentBlock != nullptr); // Remove the switch from the predecessor list of this case target's block. // We'll add the proper new predecessor edge later. flowList* oldEdge = comp->fgRemoveRefPred(jumpTab[i], afterDefaultCondBlock); if (jumpTab[i] == followingBB) { // This case label follows the switch; let it fall through. fAnyTargetFollows = true; continue; } // We need a block to put in the new compare and/or branch. // If we haven't used the afterDefaultCondBlock yet, then use that. if (fUsedAfterDefaultCondBlock) { BasicBlock* newBlock = comp->fgNewBBafter(BBJ_NONE, currentBlock, true); comp->fgAddRefPred(newBlock, currentBlock); // The fall-through predecessor. currentBlock = newBlock; currentBBRange = &LIR::AsRange(currentBlock); } else { assert(currentBlock == afterDefaultCondBlock); fUsedAfterDefaultCondBlock = true; } // We're going to have a branch, either a conditional or unconditional, // to the target. Set the target. currentBlock->bbJumpDest = jumpTab[i]; // Wire up the predecessor list for the "branch" case. comp->fgAddRefPred(jumpTab[i], currentBlock, oldEdge); if (!fAnyTargetFollows && (i == jumpCnt - 2)) { // We're processing the last one, and there is no fall through from any case // to the following block, so we can use an unconditional branch to the final // case: there is no need to compare against the case index, since it's // guaranteed to be taken (since the default case was handled first, above). currentBlock->bbJumpKind = BBJ_ALWAYS; } else { // Otherwise, it's a conditional branch. Set the branch kind, then add the // condition statement. currentBlock->bbJumpKind = BBJ_COND; // Now, build the conditional statement for the current case that is // being evaluated: // GT_JTRUE // |__ GT_COND // |____GT_EQ // |____ (switchIndex) (The temp variable) // |____ (ICon) (The actual case constant) GenTree* gtCaseCond = comp->gtNewOperNode(GT_EQ, TYP_INT, comp->gtNewLclvNode(tempLclNum, tempLclType), comp->gtNewIconNode(i, tempLclType)); GenTree* gtCaseBranch = comp->gtNewOperNode(GT_JTRUE, TYP_VOID, gtCaseCond); LIR::Range caseRange = LIR::SeqTree(comp, gtCaseBranch); currentBBRange->InsertAtEnd(std::move(caseRange)); } } if (fAnyTargetFollows) { // There is a fall-through to the following block. In the loop // above, we deleted all the predecessor edges from the switch. // In this case, we need to add one back. comp->fgAddRefPred(currentBlock->bbNext, currentBlock); } if (!fUsedAfterDefaultCondBlock) { // All the cases were fall-through! We don't need this block. // Convert it from BBJ_SWITCH to BBJ_NONE and unset the BBF_DONT_REMOVE flag // so fgRemoveBlock() doesn't complain. JITDUMP("Lowering switch " FMT_BB ": all switch cases were fall-through\n", originalSwitchBB->bbNum); assert(currentBlock == afterDefaultCondBlock); assert(currentBlock->bbJumpKind == BBJ_SWITCH); currentBlock->bbJumpKind = BBJ_NONE; currentBlock->bbFlags &= ~BBF_DONT_REMOVE; comp->fgRemoveBlock(currentBlock, /* unreachable */ false); // It's an empty block. } } else { // At this point the default case has already been handled and we need to generate a jump // table based switch or a bit test based switch at the end of afterDefaultCondBlock. Both // switch variants need the switch value so create the necessary LclVar node here. GenTree* switchValue = comp->gtNewLclvNode(tempLclNum, tempLclType); LIR::Range& switchBlockRange = LIR::AsRange(afterDefaultCondBlock); switchBlockRange.InsertAtEnd(switchValue); // Try generating a bit test based switch first, // if that's not possible a jump table based switch will be generated. if (!TryLowerSwitchToBitTest(jumpTab, jumpCnt, targetCnt, afterDefaultCondBlock, switchValue)) { JITDUMP("Lowering switch " FMT_BB ": using jump table expansion\n", originalSwitchBB->bbNum); #ifdef TARGET_64BIT if (tempLclType != TYP_I_IMPL) { // SWITCH_TABLE expects the switch value (the index into the jump table) to be TYP_I_IMPL. // Note that the switch value is unsigned so the cast should be unsigned as well. switchValue = comp->gtNewCastNode(TYP_I_IMPL, switchValue, true, TYP_U_IMPL); switchBlockRange.InsertAtEnd(switchValue); } #endif GenTree* switchTable = comp->gtNewJmpTableNode(); GenTree* switchJump = comp->gtNewOperNode(GT_SWITCH_TABLE, TYP_VOID, switchValue, switchTable); switchBlockRange.InsertAfter(switchValue, switchTable, switchJump); // this block no longer branches to the default block afterDefaultCondBlock->bbJumpSwt->removeDefault(); } comp->fgInvalidateSwitchDescMapEntry(afterDefaultCondBlock); } GenTree* next = node->gtNext; // Get rid of the GT_SWITCH(temp). switchBBRange.Remove(node->AsOp()->gtOp1); switchBBRange.Remove(node); return next; } //------------------------------------------------------------------------ // TryLowerSwitchToBitTest: Attempts to transform a jump table switch into a bit test. // // Arguments: // jumpTable - The jump table // jumpCount - The number of blocks in the jump table // targetCount - The number of distinct blocks in the jump table // bbSwitch - The switch block // switchValue - A LclVar node that provides the switch value // // Return value: // true if the switch has been lowered to a bit test // // Notes: // If the jump table contains less than 32 (64 on 64 bit targets) entries and there // are at most 2 distinct jump targets then the jump table can be converted to a word // of bits where a 0 bit corresponds to one jump target and a 1 bit corresponds to the // other jump target. Instead of the indirect jump a BT-JCC sequence is used to jump // to the appropriate target: // mov eax, 245 ; jump table converted to a "bit table" // bt eax, ebx ; ebx is supposed to contain the switch value // jc target1 // target0: // ... // target1: // Such code is both shorter and faster (in part due to the removal of a memory load) // than the traditional jump table base code. And of course, it also avoids the need // to emit the jump table itself that can reach up to 256 bytes (for 64 entries). // bool Lowering::TryLowerSwitchToBitTest( BasicBlock* jumpTable[], unsigned jumpCount, unsigned targetCount, BasicBlock* bbSwitch, GenTree* switchValue) { #ifndef TARGET_XARCH // Other architectures may use this if they substitute GT_BT with equivalent code. return false; #else assert(jumpCount >= 2); assert(targetCount >= 2); assert(bbSwitch->bbJumpKind == BBJ_SWITCH); assert(switchValue->OperIs(GT_LCL_VAR)); // // Quick check to see if it's worth going through the jump table. The bit test switch supports // up to 2 targets but targetCount also includes the default block so we need to allow 3 targets. // We'll ensure that there are only 2 targets when building the bit table. // if (targetCount > 3) { return false; } // // The number of bits in the bit table is the same as the number of jump table entries. But the // jump table also includes the default target (at the end) so we need to ignore it. The default // has already been handled by a JTRUE(GT(switchValue, jumpCount - 2)) that LowerSwitch generates. // const unsigned bitCount = jumpCount - 1; if (bitCount > (genTypeSize(TYP_I_IMPL) * 8)) { return false; } // // Build a bit table where a bit set to 0 corresponds to bbCase0 and a bit set to 1 corresponds to // bbCase1. Simply use the first block in the jump table as bbCase1, later we can invert the bit // table and/or swap the blocks if it's beneficial. // BasicBlock* bbCase0 = nullptr; BasicBlock* bbCase1 = jumpTable[0]; size_t bitTable = 1; for (unsigned bitIndex = 1; bitIndex < bitCount; bitIndex++) { if (jumpTable[bitIndex] == bbCase1) { bitTable |= (size_t(1) << bitIndex); } else if (bbCase0 == nullptr) { bbCase0 = jumpTable[bitIndex]; } else if (jumpTable[bitIndex] != bbCase0) { // If it's neither bbCase0 nor bbCase1 then it means we have 3 targets. There can't be more // than 3 because of the check at the start of the function. assert(targetCount == 3); return false; } } // // One of the case blocks has to follow the switch block. This requirement could be avoided // by adding a BBJ_ALWAYS block after the switch block but doing that sometimes negatively // impacts register allocation. // if ((bbSwitch->bbNext != bbCase0) && (bbSwitch->bbNext != bbCase1)) { return false; } #ifdef TARGET_64BIT // // See if we can avoid a 8 byte immediate on 64 bit targets. If all upper 32 bits are 1 // then inverting the bit table will make them 0 so that the table now fits in 32 bits. // Note that this does not change the number of bits in the bit table, it just takes // advantage of the fact that loading a 32 bit immediate into a 64 bit register zero // extends the immediate value to 64 bit. // if (~bitTable <= UINT32_MAX) { bitTable = ~bitTable; std::swap(bbCase0, bbCase1); } #endif // // Rewire the blocks as needed and figure out the condition to use for JCC. // GenCondition bbSwitchCondition; bbSwitch->bbJumpKind = BBJ_COND; comp->fgRemoveAllRefPreds(bbCase1, bbSwitch); comp->fgRemoveAllRefPreds(bbCase0, bbSwitch); if (bbSwitch->bbNext == bbCase0) { // GenCondition::C generates JC so we jump to bbCase1 when the bit is set bbSwitchCondition = GenCondition::C; bbSwitch->bbJumpDest = bbCase1; comp->fgAddRefPred(bbCase0, bbSwitch); comp->fgAddRefPred(bbCase1, bbSwitch); } else { assert(bbSwitch->bbNext == bbCase1); // GenCondition::NC generates JNC so we jump to bbCase0 when the bit is not set bbSwitchCondition = GenCondition::NC; bbSwitch->bbJumpDest = bbCase0; comp->fgAddRefPred(bbCase0, bbSwitch); comp->fgAddRefPred(bbCase1, bbSwitch); } // // Append BT(bitTable, switchValue) and JCC(condition) to the switch block. // var_types bitTableType = (bitCount <= (genTypeSize(TYP_INT) * 8)) ? TYP_INT : TYP_LONG; GenTree* bitTableIcon = comp->gtNewIconNode(bitTable, bitTableType); GenTree* bitTest = comp->gtNewOperNode(GT_BT, TYP_VOID, bitTableIcon, switchValue); bitTest->gtFlags |= GTF_SET_FLAGS; GenTreeCC* jcc = new (comp, GT_JCC) GenTreeCC(GT_JCC, bbSwitchCondition); jcc->gtFlags |= GTF_USE_FLAGS; LIR::AsRange(bbSwitch).InsertAfter(switchValue, bitTableIcon, bitTest, jcc); return true; #endif // TARGET_XARCH } // NOTE: this method deliberately does not update the call arg table. It must only // be used by NewPutArg and LowerArg; these functions are responsible for updating // the call arg table as necessary. void Lowering::ReplaceArgWithPutArgOrBitcast(GenTree** argSlot, GenTree* putArgOrBitcast) { assert(argSlot != nullptr); assert(*argSlot != nullptr); assert(putArgOrBitcast->OperIsPutArg() || putArgOrBitcast->OperIs(GT_BITCAST)); GenTree* arg = *argSlot; // Replace the argument with the putarg/copy *argSlot = putArgOrBitcast; putArgOrBitcast->AsOp()->gtOp1 = arg; // Insert the putarg/copy into the block BlockRange().InsertAfter(arg, putArgOrBitcast); } //------------------------------------------------------------------------ // NewPutArg: rewrites the tree to put an arg in a register or on the stack. // // Arguments: // call - the call whose arg is being rewritten. // arg - the arg being rewritten. // info - the fgArgTabEntry information for the argument. // type - the type of the argument. // // Return Value: // The new tree that was created to put the arg in the right place // or the incoming arg if the arg tree was not rewritten. // // Assumptions: // call, arg, and info must be non-null. // // Notes: // For System V systems with native struct passing (i.e. UNIX_AMD64_ABI defined) // this method allocates a single GT_PUTARG_REG for 1 eightbyte structs and a GT_FIELD_LIST of two GT_PUTARG_REGs // for two eightbyte structs. // // For STK passed structs the method generates GT_PUTARG_STK tree. For System V systems with native struct passing // (i.e. UNIX_AMD64_ABI defined) this method also sets the GC pointers count and the pointers // layout object, so the codegen of the GT_PUTARG_STK could use this for optimizing copying to the stack by value. // (using block copy primitives for non GC pointers and a single TARGET_POINTER_SIZE copy with recording GC info.) // GenTree* Lowering::NewPutArg(GenTreeCall* call, GenTree* arg, fgArgTabEntry* info, var_types type) { assert(call != nullptr); assert(arg != nullptr); assert(info != nullptr); GenTree* putArg = nullptr; bool isOnStack = (info->GetRegNum() == REG_STK); #ifdef TARGET_ARMARCH // Mark contained when we pass struct // GT_FIELD_LIST is always marked contained when it is generated if (type == TYP_STRUCT) { arg->SetContained(); if ((arg->OperGet() == GT_OBJ) && (arg->AsObj()->Addr()->OperGet() == GT_LCL_VAR_ADDR)) { MakeSrcContained(arg, arg->AsObj()->Addr()); } } #endif #if FEATURE_ARG_SPLIT // Struct can be split into register(s) and stack on ARM if (compFeatureArgSplit() && info->IsSplit()) { assert(arg->OperGet() == GT_OBJ || arg->OperGet() == GT_FIELD_LIST); // TODO: Need to check correctness for FastTailCall if (call->IsFastTailCall()) { #ifdef TARGET_ARM NYI_ARM("lower: struct argument by fast tail call"); #endif // TARGET_ARM } const unsigned slotNumber = info->GetByteOffset() / TARGET_POINTER_SIZE; DEBUG_ARG_SLOTS_ASSERT(slotNumber == info->slotNum); const bool putInIncomingArgArea = call->IsFastTailCall(); putArg = new (comp, GT_PUTARG_SPLIT) GenTreePutArgSplit(arg, info->GetByteOffset(), #if defined(DEBUG_ARG_SLOTS) && defined(FEATURE_PUT_STRUCT_ARG_STK) info->GetStackByteSize(), slotNumber, info->GetStackSlotsNumber(), #elif defined(DEBUG_ARG_SLOTS) && !defined(FEATURE_PUT_STRUCT_ARG_STK) slotNumber, #elif !defined(DEBUG_ARG_SLOTS) && defined(FEATURE_PUT_STRUCT_ARG_STK) info->GetStackByteSize(), #endif info->numRegs, call, putInIncomingArgArea); // If struct argument is morphed to GT_FIELD_LIST node(s), // we can know GC info by type of each GT_FIELD_LIST node. // So we skip setting GC Pointer info. // GenTreePutArgSplit* argSplit = putArg->AsPutArgSplit(); for (unsigned regIndex = 0; regIndex < info->numRegs; regIndex++) { argSplit->SetRegNumByIdx(info->GetRegNum(regIndex), regIndex); } if (arg->OperGet() == GT_OBJ) { ClassLayout* layout = arg->AsObj()->GetLayout(); // Set type of registers for (unsigned index = 0; index < info->numRegs; index++) { argSplit->m_regType[index] = layout->GetGCPtrType(index); } } else { unsigned regIndex = 0; for (GenTreeFieldList::Use& use : arg->AsFieldList()->Uses()) { if (regIndex >= info->numRegs) { break; } var_types regType = use.GetNode()->TypeGet(); // Account for the possibility that float fields may be passed in integer registers. if (varTypeIsFloating(regType) && !genIsValidFloatReg(argSplit->GetRegNumByIdx(regIndex))) { regType = (regType == TYP_FLOAT) ? TYP_INT : TYP_LONG; } argSplit->m_regType[regIndex] = regType; regIndex++; } // Clear the register assignment on the fieldList node, as these are contained. arg->SetRegNum(REG_NA); } } else #endif // FEATURE_ARG_SPLIT { if (!isOnStack) { #if FEATURE_MULTIREG_ARGS if ((info->numRegs > 1) && (arg->OperGet() == GT_FIELD_LIST)) { unsigned int regIndex = 0; for (GenTreeFieldList::Use& use : arg->AsFieldList()->Uses()) { regNumber argReg = info->GetRegNum(regIndex); GenTree* curOp = use.GetNode(); var_types curTyp = curOp->TypeGet(); // Create a new GT_PUTARG_REG node with op1 GenTree* newOper = comp->gtNewPutArgReg(curTyp, curOp, argReg); // Splice in the new GT_PUTARG_REG node in the GT_FIELD_LIST ReplaceArgWithPutArgOrBitcast(&use.NodeRef(), newOper); regIndex++; } // Just return arg. The GT_FIELD_LIST is not replaced. // Nothing more to do. return arg; } else #endif // FEATURE_MULTIREG_ARGS { putArg = comp->gtNewPutArgReg(type, arg, info->GetRegNum()); } } else { // Mark this one as tail call arg if it is a fast tail call. // This provides the info to put this argument in in-coming arg area slot // instead of in out-going arg area slot. CLANG_FORMAT_COMMENT_ANCHOR; #ifdef DEBUG // Make sure state is correct. The PUTARG_STK has TYP_VOID, as it doesn't produce // a result. So the type of its operand must be the correct type to push on the stack. // For a FIELD_LIST, this will be the type of the field (not the type of the arg), // but otherwise it is generally the type of the operand. info->checkIsStruct(); #endif if ((arg->OperGet() != GT_FIELD_LIST)) { #if defined(FEATURE_SIMD) && defined(FEATURE_PUT_STRUCT_ARG_STK) if (type == TYP_SIMD12) { #if !defined(TARGET_64BIT) assert(info->GetByteSize() == 12); #else // TARGET_64BIT if (compMacOsArm64Abi()) { assert(info->GetByteSize() == 12); } else { assert(info->GetByteSize() == 16); } #endif // TARGET_64BIT } else #endif // defined(FEATURE_SIMD) && defined(FEATURE_PUT_STRUCT_ARG_STK) { assert(genActualType(arg->TypeGet()) == type); } } const unsigned slotNumber = info->GetByteOffset() / TARGET_POINTER_SIZE; const bool putInIncomingArgArea = call->IsFastTailCall(); putArg = new (comp, GT_PUTARG_STK) GenTreePutArgStk(GT_PUTARG_STK, TYP_VOID, arg, info->GetByteOffset(), #if defined(DEBUG_ARG_SLOTS) && defined(FEATURE_PUT_STRUCT_ARG_STK) info->GetStackByteSize(), slotNumber, info->GetStackSlotsNumber(), #elif defined(DEBUG_ARG_SLOTS) && !defined(FEATURE_PUT_STRUCT_ARG_STK) slotNumber, #elif !defined(DEBUG_ARG_SLOTS) && defined(FEATURE_PUT_STRUCT_ARG_STK) info->GetStackByteSize(), #endif call, putInIncomingArgArea); #ifdef FEATURE_PUT_STRUCT_ARG_STK // If the ArgTabEntry indicates that this arg is a struct // get and store the number of slots that are references. // This is later used in the codegen for PUT_ARG_STK implementation // for struct to decide whether and how many single eight-byte copies // to be done (only for reference slots), so gcinfo is emitted. // For non-reference slots faster/smaller size instructions are used - // pair copying using XMM registers or rep mov instructions. if (info->isStruct) { // We use GT_OBJ only for non-lclVar, non-SIMD, non-FIELD_LIST struct arguments. if (arg->OperIsLocal()) { // This must have a type with a known size (SIMD or has been morphed to a primitive type). assert(arg->TypeGet() != TYP_STRUCT); } else if (arg->OperIs(GT_OBJ)) { assert(!varTypeIsSIMD(arg)); #ifdef TARGET_X86 // On x86 VM lies about the type of a struct containing a pointer sized // integer field by returning the type of its field as the type of struct. // Such struct can be passed in a register depending its position in // parameter list. VM does this unwrapping only one level and therefore // a type like Struct Foo { Struct Bar { int f}} awlays needs to be // passed on stack. Also, VM doesn't lie about type of such a struct // when it is a field of another struct. That is VM doesn't lie about // the type of Foo.Bar // // We now support the promotion of fields that are of type struct. // However we only support a limited case where the struct field has a // single field and that single field must be a scalar type. Say Foo.Bar // field is getting passed as a parameter to a call, Since it is a TYP_STRUCT, // as per x86 ABI it should always be passed on stack. Therefore GenTree // node under a PUTARG_STK could be GT_OBJ(GT_LCL_VAR_ADDR(v1)), where // local v1 could be a promoted field standing for Foo.Bar. Note that // the type of v1 will be the type of field of Foo.Bar.f when Foo is // promoted. That is v1 will be a scalar type. In this case we need to // pass v1 on stack instead of in a register. // // TODO-PERF: replace GT_OBJ(GT_LCL_VAR_ADDR(v1)) with v1 if v1 is // a scalar type and the width of GT_OBJ matches the type size of v1. // Note that this cannot be done till call node arguments are morphed // because we should not lose the fact that the type of argument is // a struct so that the arg gets correctly marked to be passed on stack. GenTree* objOp1 = arg->gtGetOp1(); if (objOp1->OperGet() == GT_LCL_VAR_ADDR) { unsigned lclNum = objOp1->AsLclVarCommon()->GetLclNum(); if (comp->lvaTable[lclNum].lvType != TYP_STRUCT) { comp->lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::VMNeedsStackAddr)); } } #endif // TARGET_X86 } else if (!arg->OperIs(GT_FIELD_LIST)) { #ifdef TARGET_ARM assert((info->GetStackSlotsNumber() == 1) || ((arg->TypeGet() == TYP_DOUBLE) && (info->GetStackSlotsNumber() == 2))); #else assert(varTypeIsSIMD(arg) || (info->GetStackSlotsNumber() == 1)); #endif } } #endif // FEATURE_PUT_STRUCT_ARG_STK } } JITDUMP("new node is : "); DISPNODE(putArg); JITDUMP("\n"); if (arg->gtFlags & GTF_LATE_ARG) { putArg->gtFlags |= GTF_LATE_ARG; } return putArg; } //------------------------------------------------------------------------ // LowerArg: Lower one argument of a call. This entails splicing a "putarg" node between // the argument evaluation and the call. This is the point at which the source is // consumed and the value transitions from control of the register allocator to the calling // convention. // // Arguments: // call - The call node // ppArg - Pointer to the call argument pointer. We might replace the call argument by // changing *ppArg. // // Return Value: // None. // void Lowering::LowerArg(GenTreeCall* call, GenTree** ppArg) { GenTree* arg = *ppArg; JITDUMP("lowering arg : "); DISPNODE(arg); // No assignments should remain by Lowering. assert(!arg->OperIs(GT_ASG)); assert(!arg->OperIsPutArgStk()); // Assignments/stores at this level are not really placing an argument. // They are setting up temporary locals that will later be placed into // outgoing regs or stack. // Note that atomic ops may be stores and still produce a value. if (!arg->IsValue()) { assert((arg->OperIsStore() && !arg->IsValue()) || arg->IsArgPlaceHolderNode() || arg->IsNothingNode() || arg->OperIsCopyBlkOp()); return; } fgArgTabEntry* info = comp->gtArgEntryByNode(call, arg); assert(info->GetNode() == arg); var_types type = arg->TypeGet(); if (varTypeIsSmall(type)) { // Normalize 'type', it represents the item that we will be storing in the Outgoing Args type = TYP_INT; } #if defined(FEATURE_SIMD) #if defined(TARGET_X86) // Non-param TYP_SIMD12 local var nodes are massaged in Lower to TYP_SIMD16 to match their // allocated size (see lvSize()). However, when passing the variables as arguments, and // storing the variables to the outgoing argument area on the stack, we must use their // actual TYP_SIMD12 type, so exactly 12 bytes is allocated and written. if (type == TYP_SIMD16) { if ((arg->OperGet() == GT_LCL_VAR) || (arg->OperGet() == GT_STORE_LCL_VAR)) { const LclVarDsc* varDsc = comp->lvaGetDesc(arg->AsLclVarCommon()); type = varDsc->lvType; } else if (arg->OperIs(GT_SIMD, GT_HWINTRINSIC)) { GenTreeJitIntrinsic* jitIntrinsic = reinterpret_cast<GenTreeJitIntrinsic*>(arg); // For HWIntrinsic, there are some intrinsics like ExtractVector128 which have // a gtType of TYP_SIMD16 but a SimdSize of 32, so we need to include that in // the assert below. assert((jitIntrinsic->GetSimdSize() == 12) || (jitIntrinsic->GetSimdSize() == 16) || (jitIntrinsic->GetSimdSize() == 32)); if (jitIntrinsic->GetSimdSize() == 12) { type = TYP_SIMD12; } } } #elif defined(TARGET_AMD64) // TYP_SIMD8 parameters that are passed as longs if (type == TYP_SIMD8 && genIsValidIntReg(info->GetRegNum())) { GenTree* bitcast = comp->gtNewBitCastNode(TYP_LONG, arg); BlockRange().InsertAfter(arg, bitcast); *ppArg = arg = bitcast; assert(info->GetNode() == arg); type = TYP_LONG; } #endif // defined(TARGET_X86) #endif // defined(FEATURE_SIMD) // If we hit this we are probably double-lowering. assert(!arg->OperIsPutArg()); #if !defined(TARGET_64BIT) if (varTypeIsLong(type)) { noway_assert(arg->OperIs(GT_LONG)); GenTreeFieldList* fieldList = new (comp, GT_FIELD_LIST) GenTreeFieldList(); fieldList->AddFieldLIR(comp, arg->AsOp()->gtGetOp1(), 0, TYP_INT); fieldList->AddFieldLIR(comp, arg->AsOp()->gtGetOp2(), 4, TYP_INT); GenTree* newArg = NewPutArg(call, fieldList, info, type); if (info->GetRegNum() != REG_STK) { assert(info->numRegs == 2); // In the register argument case, NewPutArg replaces the original field list args with new // GT_PUTARG_REG nodes, inserts them in linear order and returns the field list. So the // only thing left to do is to insert the field list itself in linear order. assert(newArg == fieldList); BlockRange().InsertBefore(arg, newArg); } else { // For longs, we will replace the GT_LONG with a GT_FIELD_LIST, and put that under a PUTARG_STK. // Although the hi argument needs to be pushed first, that will be handled by the general case, // in which the fields will be reversed. assert(info->numSlots == 2); newArg->SetRegNum(REG_STK); BlockRange().InsertBefore(arg, fieldList, newArg); } *ppArg = newArg; assert(info->GetNode() == newArg); BlockRange().Remove(arg); } else #endif // !defined(TARGET_64BIT) { #ifdef TARGET_ARMARCH if (call->IsVarargs() || comp->opts.compUseSoftFP) { // For vararg call or on armel, reg args should be all integer. // Insert copies as needed to move float value to integer register. GenTree* newNode = LowerFloatArg(ppArg, info); if (newNode != nullptr) { type = newNode->TypeGet(); } } #endif // TARGET_ARMARCH GenTree* putArg = NewPutArg(call, arg, info, type); // In the case of register passable struct (in one or two registers) // the NewPutArg returns a new node (GT_PUTARG_REG or a GT_FIELD_LIST with two GT_PUTARG_REGs.) // If an extra node is returned, splice it in the right place in the tree. if (arg != putArg) { ReplaceArgWithPutArgOrBitcast(ppArg, putArg); } } } #ifdef TARGET_ARMARCH //------------------------------------------------------------------------ // LowerFloatArg: Lower float call arguments on the arm platform. // // Arguments: // arg - The arg node // info - call argument info // // Return Value: // Return nullptr, if no transformation was done; // return arg if there was in place transformation; // return a new tree if the root was changed. // // Notes: // This must handle scalar float arguments as well as GT_FIELD_LISTs // with floating point fields. // GenTree* Lowering::LowerFloatArg(GenTree** pArg, fgArgTabEntry* info) { GenTree* arg = *pArg; if (info->GetRegNum() != REG_STK) { if (arg->OperIs(GT_FIELD_LIST)) { // Transform fields that are passed as registers in place. regNumber currRegNumber = info->GetRegNum(); unsigned regIndex = 0; for (GenTreeFieldList::Use& use : arg->AsFieldList()->Uses()) { if (regIndex >= info->numRegs) { break; } GenTree* node = use.GetNode(); if (varTypeIsFloating(node)) { GenTree* intNode = LowerFloatArgReg(node, currRegNumber); assert(intNode != nullptr); ReplaceArgWithPutArgOrBitcast(&use.NodeRef(), intNode); } if (node->TypeGet() == TYP_DOUBLE) { currRegNumber = REG_NEXT(REG_NEXT(currRegNumber)); regIndex += 2; } else { currRegNumber = REG_NEXT(currRegNumber); regIndex += 1; } } // List fields were replaced in place. return arg; } else if (varTypeIsFloating(arg)) { GenTree* intNode = LowerFloatArgReg(arg, info->GetRegNum()); assert(intNode != nullptr); ReplaceArgWithPutArgOrBitcast(pArg, intNode); return *pArg; } } return nullptr; } //------------------------------------------------------------------------ // LowerFloatArgReg: Lower the float call argument node that is passed via register. // // Arguments: // arg - The arg node // regNum - register number // // Return Value: // Return new bitcast node, that moves float to int register. // GenTree* Lowering::LowerFloatArgReg(GenTree* arg, regNumber regNum) { var_types floatType = arg->TypeGet(); assert(varTypeIsFloating(floatType)); var_types intType = (floatType == TYP_DOUBLE) ? TYP_LONG : TYP_INT; GenTree* intArg = comp->gtNewBitCastNode(intType, arg); intArg->SetRegNum(regNum); #ifdef TARGET_ARM if (floatType == TYP_DOUBLE) { // A special case when we introduce TYP_LONG // during lowering for arm32 softFP to pass double // in int registers. assert(comp->opts.compUseSoftFP); regNumber nextReg = REG_NEXT(regNum); intArg->AsMultiRegOp()->gtOtherReg = nextReg; } #endif return intArg; } #endif // do lowering steps for each arg of a call void Lowering::LowerArgsForCall(GenTreeCall* call) { JITDUMP("objp:\n======\n"); if (call->gtCallThisArg != nullptr) { LowerArg(call, &call->gtCallThisArg->NodeRef()); } JITDUMP("\nargs:\n======\n"); for (GenTreeCall::Use& use : call->Args()) { LowerArg(call, &use.NodeRef()); } JITDUMP("\nlate:\n======\n"); for (GenTreeCall::Use& use : call->LateArgs()) { LowerArg(call, &use.NodeRef()); } } // helper that create a node representing a relocatable physical address computation GenTree* Lowering::AddrGen(ssize_t addr) { // this should end up in codegen as : instGen_Set_Reg_To_Imm(EA_HANDLE_CNS_RELOC, reg, addr) GenTree* result = comp->gtNewIconHandleNode(addr, GTF_ICON_FTN_ADDR); return result; } // variant that takes a void* GenTree* Lowering::AddrGen(void* addr) { return AddrGen((ssize_t)addr); } // do lowering steps for a call // this includes: // - adding the placement nodes (either stack or register variety) for arguments // - lowering the expression that calculates the target address // - adding nodes for other operations that occur after the call sequence starts and before // control transfer occurs (profiling and tail call helpers, pinvoke incantations) // void Lowering::LowerCall(GenTree* node) { GenTreeCall* call = node->AsCall(); JITDUMP("lowering call (before):\n"); DISPTREERANGE(BlockRange(), call); JITDUMP("\n"); call->ClearOtherRegs(); LowerArgsForCall(call); // note that everything generated from this point might run AFTER the outgoing args are placed GenTree* controlExpr = nullptr; bool callWasExpandedEarly = false; // for x86, this is where we record ESP for checking later to make sure stack is balanced // Check for Delegate.Invoke(). If so, we inline it. We get the // target-object and target-function from the delegate-object, and do // an indirect call. if (call->IsDelegateInvoke()) { controlExpr = LowerDelegateInvoke(call); } else { // Virtual and interface calls switch (call->gtFlags & GTF_CALL_VIRT_KIND_MASK) { case GTF_CALL_VIRT_STUB: controlExpr = LowerVirtualStubCall(call); break; case GTF_CALL_VIRT_VTABLE: assert(call->IsVirtualVtable()); if (!call->IsExpandedEarly()) { assert(call->gtControlExpr == nullptr); controlExpr = LowerVirtualVtableCall(call); } else { callWasExpandedEarly = true; controlExpr = call->gtControlExpr; } break; case GTF_CALL_NONVIRT: if (call->IsUnmanaged()) { controlExpr = LowerNonvirtPinvokeCall(call); } else if (call->gtCallType == CT_INDIRECT) { controlExpr = LowerIndirectNonvirtCall(call); } else { controlExpr = LowerDirectCall(call); } break; default: noway_assert(!"strange call type"); break; } } // Indirect calls should always go through GenTreeCall::gtCallAddr and // should never have a control expression as well. assert((call->gtCallType != CT_INDIRECT) || (controlExpr == nullptr)); if (call->IsTailCallViaJitHelper()) { // Either controlExpr or gtCallAddr must contain real call target. if (controlExpr == nullptr) { assert(call->gtCallType == CT_INDIRECT); assert(call->gtCallAddr != nullptr); controlExpr = call->gtCallAddr; } controlExpr = LowerTailCallViaJitHelper(call, controlExpr); } // Check if we need to thread a newly created controlExpr into the LIR // if ((controlExpr != nullptr) && !callWasExpandedEarly) { LIR::Range controlExprRange = LIR::SeqTree(comp, controlExpr); JITDUMP("results of lowering call:\n"); DISPRANGE(controlExprRange); ContainCheckRange(controlExprRange); BlockRange().InsertBefore(call, std::move(controlExprRange)); call->gtControlExpr = controlExpr; } if (comp->opts.IsCFGEnabled()) { LowerCFGCall(call); } if (call->IsFastTailCall()) { // Lower fast tail call can introduce new temps to set up args correctly for Callee. // This involves patching LCL_VAR and LCL_VAR_ADDR nodes holding Caller stack args // and replacing them with a new temp. Control expr also can contain nodes that need // to be patched. // Therefore lower fast tail call must be done after controlExpr is inserted into LIR. // There is one side effect which is flipping the order of PME and control expression // since LowerFastTailCall calls InsertPInvokeMethodEpilog. LowerFastTailCall(call); } if (varTypeIsStruct(call)) { LowerCallStruct(call); } ContainCheckCallOperands(call); JITDUMP("lowering call (after):\n"); DISPTREERANGE(BlockRange(), call); JITDUMP("\n"); } // Inserts profiler hook, GT_PROF_HOOK for a tail call node. // // AMD64: // We need to insert this after all nested calls, but before all the arguments to this call have been set up. // To do this, we look for the first GT_PUTARG_STK or GT_PUTARG_REG, and insert the hook immediately before // that. If there are no args, then it should be inserted before the call node. // // For example: // * stmtExpr void (top level) (IL 0x000...0x010) // arg0 SETUP | /--* argPlace ref REG NA $c5 // this in rcx | | /--* argPlace ref REG NA $c1 // | | | /--* call ref System.Globalization.CultureInfo.get_InvariantCulture $c2 // arg1 SETUP | | +--* st.lclVar ref V02 tmp1 REG NA $c2 // | | | /--* lclVar ref V02 tmp1 u : 2 (last use) REG NA $c2 // arg1 in rdx | | +--* putarg_reg ref REG NA // | | | /--* lclVar ref V00 arg0 u : 2 (last use) REG NA $80 // this in rcx | | +--* putarg_reg ref REG NA // | | /--* call nullcheck ref System.String.ToLower $c5 // | | { * stmtExpr void (embedded)(IL 0x000... ? ? ? ) // | | { \--* prof_hook void REG NA // arg0 in rcx | +--* putarg_reg ref REG NA // control expr | +--* const(h) long 0x7ffe8e910e98 ftn REG NA // \--* call void System.Runtime.Remoting.Identity.RemoveAppNameOrAppGuidIfNecessary $VN.Void // // In this case, the GT_PUTARG_REG src is a nested call. We need to put the instructions after that call // (as shown). We assume that of all the GT_PUTARG_*, only the first one can have a nested call. // // X86: // Insert the profiler hook immediately before the call. The profiler hook will preserve // all argument registers (ECX, EDX), but nothing else. // // Params: // callNode - tail call node // insertionPoint - if non-null, insert the profiler hook before this point. // If null, insert the profiler hook before args are setup // but after all arg side effects are computed. // void Lowering::InsertProfTailCallHook(GenTreeCall* call, GenTree* insertionPoint) { assert(call->IsTailCall()); assert(comp->compIsProfilerHookNeeded()); #if defined(TARGET_X86) if (insertionPoint == nullptr) { insertionPoint = call; } #else // !defined(TARGET_X86) if (insertionPoint == nullptr) { for (GenTreeCall::Use& use : call->Args()) { assert(!use.GetNode()->OperIs(GT_PUTARG_REG)); // We don't expect to see these in gtCallArgs if (use.GetNode()->OperIs(GT_PUTARG_STK)) { // found it insertionPoint = use.GetNode(); break; } } if (insertionPoint == nullptr) { for (GenTreeCall::Use& use : call->LateArgs()) { if (use.GetNode()->OperIs(GT_PUTARG_REG, GT_PUTARG_STK)) { // found it insertionPoint = use.GetNode(); break; } } // If there are no args, insert before the call node if (insertionPoint == nullptr) { insertionPoint = call; } } } #endif // !defined(TARGET_X86) assert(insertionPoint != nullptr); GenTree* profHookNode = new (comp, GT_PROF_HOOK) GenTree(GT_PROF_HOOK, TYP_VOID); BlockRange().InsertBefore(insertionPoint, profHookNode); } //------------------------------------------------------------------------ // LowerFastTailCall: Lower a call node dispatched as a fast tailcall (epilog + // jmp). // // Arguments: // call - the call node that is being dispatched as a fast tailcall. // // Assumptions: // call must be non-null. // // Notes: // For fast tail calls it is necessary to set up stack args in the incoming // arg stack space area. When args passed also come from this area we may // run into problems because we may end up overwriting the stack slot before // using it. For example, for foo(a, b) { return bar(b, a); }, if a and b // are on incoming arg stack space in foo they need to be swapped in this // area for the call to bar. This function detects this situation and // introduces a temp when an outgoing argument would overwrite a later-used // incoming argument. // // This function also handles inserting necessary profiler hooks and pinvoke // method epilogs in case there are inlined pinvokes. void Lowering::LowerFastTailCall(GenTreeCall* call) { #if FEATURE_FASTTAILCALL // Tail call restrictions i.e. conditions under which tail prefix is ignored. // Most of these checks are already done by importer or fgMorphTailCall(). // This serves as a double sanity check. assert((comp->info.compFlags & CORINFO_FLG_SYNCH) == 0); // tail calls from synchronized methods assert(!comp->opts.IsReversePInvoke()); // tail calls reverse pinvoke assert(!call->IsUnmanaged()); // tail calls to unamanaged methods assert(!comp->compLocallocUsed); // tail call from methods that also do localloc #ifdef TARGET_AMD64 assert(!comp->getNeedsGSSecurityCookie()); // jit64 compat: tail calls from methods that need GS check #endif // TARGET_AMD64 // We expect to see a call that meets the following conditions assert(call->IsFastTailCall()); // VM cannot use return address hijacking when A() and B() tail call each // other in mutual recursion. Therefore, this block is reachable through // a GC-safe point or the whole method is marked as fully interruptible. // // TODO-Cleanup: // optReachWithoutCall() depends on the fact that loop headers blocks // will have a block number > fgLastBB. These loop headers gets added // after dominator computation and get skipped by OptReachWithoutCall(). // The below condition cannot be asserted in lower because fgSimpleLowering() // can add a new basic block for range check failure which becomes // fgLastBB with block number > loop header block number. // assert((comp->compCurBB->bbFlags & BBF_GC_SAFE_POINT) || // !comp->optReachWithoutCall(comp->fgFirstBB, comp->compCurBB) || comp->GetInterruptible()); // If PInvokes are in-lined, we have to remember to execute PInvoke method epilog anywhere that // a method returns. This is a case of caller method has both PInvokes and tail calls. if (comp->compMethodRequiresPInvokeFrame()) { InsertPInvokeMethodEpilog(comp->compCurBB DEBUGARG(call)); } // Args for tail call are setup in incoming arg area. The gc-ness of args of // caller and callee (which being tail called) may not match. Therefore, everything // from arg setup until the epilog need to be non-interuptible by GC. This is // achieved by inserting GT_START_NONGC before the very first GT_PUTARG_STK node // of call is setup. Note that once a stack arg is setup, it cannot have nested // calls subsequently in execution order to setup other args, because the nested // call could over-write the stack arg that is setup earlier. ArrayStack<GenTree*> putargs(comp->getAllocator(CMK_ArrayStack)); for (GenTreeCall::Use& use : call->Args()) { if (use.GetNode()->OperIs(GT_PUTARG_STK)) { putargs.Push(use.GetNode()); } } for (GenTreeCall::Use& use : call->LateArgs()) { if (use.GetNode()->OperIs(GT_PUTARG_STK)) { putargs.Push(use.GetNode()); } } GenTree* startNonGCNode = nullptr; if (!putargs.Empty()) { // Get the earliest operand of the first PUTARG_STK node. We will make // the requred copies of args before this node. bool unused; GenTree* insertionPoint = BlockRange().GetTreeRange(putargs.Bottom(), &unused).FirstNode(); // Insert GT_START_NONGC node before we evaluate the PUTARG_STK args. // Note that if there are no args to be setup on stack, no need to // insert GT_START_NONGC node. startNonGCNode = new (comp, GT_START_NONGC) GenTree(GT_START_NONGC, TYP_VOID); BlockRange().InsertBefore(insertionPoint, startNonGCNode); // Gc-interruptability in the following case: // foo(a, b, c, d, e) { bar(a, b, c, d, e); } // bar(a, b, c, d, e) { foo(a, b, d, d, e); } // // Since the instruction group starting from the instruction that sets up first // stack arg to the end of the tail call is marked as non-gc interruptible, // this will form a non-interruptible tight loop causing gc-starvation. To fix // this we insert GT_NO_OP as embedded stmt before GT_START_NONGC, if the method // has a single basic block and is not a GC-safe point. The presence of a single // nop outside non-gc interruptible region will prevent gc starvation. if ((comp->fgBBcount == 1) && !(comp->compCurBB->bbFlags & BBF_GC_SAFE_POINT)) { assert(comp->fgFirstBB == comp->compCurBB); GenTree* noOp = new (comp, GT_NO_OP) GenTree(GT_NO_OP, TYP_VOID); BlockRange().InsertBefore(startNonGCNode, noOp); } // Since this is a fast tailcall each PUTARG_STK will place the argument in the // _incoming_ arg space area. This will effectively overwrite our already existing // incoming args that live in that area. If we have later uses of those args, this // is a problem. We introduce a defensive copy into a temp here of those args that // potentially may cause problems. for (int i = 0; i < putargs.Height(); i++) { GenTreePutArgStk* put = putargs.Bottom(i)->AsPutArgStk(); unsigned int overwrittenStart = put->getArgOffset(); unsigned int overwrittenEnd = overwrittenStart + put->GetStackByteSize(); int baseOff = -1; // Stack offset of first arg on stack for (unsigned callerArgLclNum = 0; callerArgLclNum < comp->info.compArgsCount; callerArgLclNum++) { LclVarDsc* callerArgDsc = comp->lvaGetDesc(callerArgLclNum); if (callerArgDsc->lvIsRegArg) { continue; } unsigned int argStart; unsigned int argEnd; #if defined(TARGET_AMD64) if (TargetOS::IsWindows) { // On Windows x64, the argument position determines the stack slot uniquely, and even the // register args take up space in the stack frame (shadow space). argStart = callerArgLclNum * TARGET_POINTER_SIZE; argEnd = argStart + static_cast<unsigned int>(callerArgDsc->lvArgStackSize()); } else #endif // TARGET_AMD64 { assert(callerArgDsc->GetStackOffset() != BAD_STK_OFFS); if (baseOff == -1) { baseOff = callerArgDsc->GetStackOffset(); } // On all ABIs where we fast tail call the stack args should come in order. assert(baseOff <= callerArgDsc->GetStackOffset()); // Compute offset of this stack argument relative to the first stack arg. // This will be its offset into the incoming arg space area. argStart = static_cast<unsigned int>(callerArgDsc->GetStackOffset() - baseOff); argEnd = argStart + comp->lvaLclSize(callerArgLclNum); } // If ranges do not overlap then this PUTARG_STK will not mess up the arg. if ((overwrittenEnd <= argStart) || (overwrittenStart >= argEnd)) { continue; } // Codegen cannot handle a partially overlapping copy. For // example, if we have // bar(S16 stack, S32 stack2) // foo(S32 stack, S32 stack2) { bar(..., stack) } // then we may end up having to move 'stack' in foo 16 bytes // ahead. It is possible that this PUTARG_STK is the only use, // in which case we will need to introduce a temp, so look for // uses starting from it. Note that we assume that in-place // copies are OK. GenTree* lookForUsesFrom = put->gtNext; if (overwrittenStart != argStart) { lookForUsesFrom = insertionPoint; } RehomeArgForFastTailCall(callerArgLclNum, insertionPoint, lookForUsesFrom, call); // The above call can introduce temps and invalidate the pointer. callerArgDsc = comp->lvaGetDesc(callerArgLclNum); // For promoted locals we have more work to do as its fields could also have been invalidated. if (!callerArgDsc->lvPromoted) { continue; } unsigned int fieldsFirst = callerArgDsc->lvFieldLclStart; unsigned int fieldsEnd = fieldsFirst + callerArgDsc->lvFieldCnt; for (unsigned int j = fieldsFirst; j < fieldsEnd; j++) { RehomeArgForFastTailCall(j, insertionPoint, lookForUsesFrom, call); } } } } // Insert GT_PROF_HOOK node to emit profiler tail call hook. This should be // inserted before the args are setup but after the side effects of args are // computed. That is, GT_PROF_HOOK node needs to be inserted before GT_START_NONGC // node if one exists. if (comp->compIsProfilerHookNeeded()) { InsertProfTailCallHook(call, startNonGCNode); } #else // !FEATURE_FASTTAILCALL // Platform does not implement fast tail call mechanism. This cannot be // reached because we always choose to do a tailcall via helper on those // platforms (or no tailcall at all). unreached(); #endif } // //------------------------------------------------------------------------ // RehomeArgForFastTailCall: Introduce temps for args that may be overwritten // during fast tailcall sequence. // // Arguments: // lclNum - the lcl num of the arg that will be overwritten. // insertTempBefore - the node at which to copy the arg into a temp. // lookForUsesStart - the node where to start scanning and replacing uses of // the arg specified by lclNum. // callNode - the call node that is being dispatched as a fast tailcall. // // Assumptions: // all args must be non-null. // // Notes: // This function scans for uses of the arg specified by lclNum starting // from the lookForUsesStart node. If it finds any uses it introduces a temp // for this argument and updates uses to use this instead. In the situation // where it introduces a temp it can thus invalidate pointers to other // locals. // void Lowering::RehomeArgForFastTailCall(unsigned int lclNum, GenTree* insertTempBefore, GenTree* lookForUsesStart, GenTreeCall* callNode) { unsigned int tmpLclNum = BAD_VAR_NUM; for (GenTree* treeNode = lookForUsesStart; treeNode != callNode; treeNode = treeNode->gtNext) { if (!treeNode->OperIsLocal() && !treeNode->OperIsLocalAddr()) { continue; } GenTreeLclVarCommon* lcl = treeNode->AsLclVarCommon(); if (lcl->GetLclNum() != lclNum) { continue; } // Create tmp and use it in place of callerArgDsc if (tmpLclNum == BAD_VAR_NUM) { tmpLclNum = comp->lvaGrabTemp(true DEBUGARG("Fast tail call lowering is creating a new local variable")); LclVarDsc* callerArgDsc = comp->lvaGetDesc(lclNum); var_types tmpTyp = genActualType(callerArgDsc->TypeGet()); comp->lvaTable[tmpLclNum].lvType = tmpTyp; // TODO-CQ: I don't see why we should copy doNotEnreg. comp->lvaTable[tmpLclNum].lvDoNotEnregister = callerArgDsc->lvDoNotEnregister; #ifdef DEBUG comp->lvaTable[tmpLclNum].SetDoNotEnregReason(callerArgDsc->GetDoNotEnregReason()); #endif // DEBUG GenTree* value; #ifdef TARGET_ARM if (tmpTyp == TYP_LONG) { GenTree* loResult = comp->gtNewLclFldNode(lclNum, TYP_INT, 0); GenTree* hiResult = comp->gtNewLclFldNode(lclNum, TYP_INT, 4); value = new (comp, GT_LONG) GenTreeOp(GT_LONG, TYP_LONG, loResult, hiResult); } else #endif // TARGET_ARM { value = comp->gtNewLclvNode(lclNum, tmpTyp); } if (tmpTyp == TYP_STRUCT) { comp->lvaSetStruct(tmpLclNum, comp->lvaGetStruct(lclNum), false); } GenTreeLclVar* storeLclVar = comp->gtNewStoreLclVar(tmpLclNum, value); BlockRange().InsertBefore(insertTempBefore, LIR::SeqTree(comp, storeLclVar)); ContainCheckRange(value, storeLclVar); LowerNode(storeLclVar); } lcl->SetLclNum(tmpLclNum); } } //------------------------------------------------------------------------ // LowerTailCallViaJitHelper: lower a call via the tailcall JIT helper. Morph // has already inserted tailcall helper special arguments. This function inserts // actual data for some placeholders. This function is only used on x86. // // Lower // tail.call(<function args>, int numberOfOldStackArgs, int dummyNumberOfNewStackArgs, int flags, void* dummyArg) // as // JIT_TailCall(<function args>, int numberOfOldStackArgsWords, int numberOfNewStackArgsWords, int flags, void* // callTarget) // Note that the special arguments are on the stack, whereas the function arguments follow the normal convention. // // Also inserts PInvoke method epilog if required. // // Arguments: // call - The call node // callTarget - The real call target. This is used to replace the dummyArg during lowering. // // Return Value: // Returns control expression tree for making a call to helper Jit_TailCall. // GenTree* Lowering::LowerTailCallViaJitHelper(GenTreeCall* call, GenTree* callTarget) { // Tail call restrictions i.e. conditions under which tail prefix is ignored. // Most of these checks are already done by importer or fgMorphTailCall(). // This serves as a double sanity check. assert((comp->info.compFlags & CORINFO_FLG_SYNCH) == 0); // tail calls from synchronized methods assert(!call->IsUnmanaged()); // tail calls to unamanaged methods assert(!comp->compLocallocUsed); // tail call from methods that also do localloc // We expect to see a call that meets the following conditions assert(call->IsTailCallViaJitHelper()); assert(callTarget != nullptr); // The TailCall helper call never returns to the caller and is not GC interruptible. // Therefore the block containing the tail call should be a GC safe point to avoid // GC starvation. It is legal for the block to be unmarked iff the entry block is a // GC safe point, as the entry block trivially dominates every reachable block. assert((comp->compCurBB->bbFlags & BBF_GC_SAFE_POINT) || (comp->fgFirstBB->bbFlags & BBF_GC_SAFE_POINT)); // If PInvokes are in-lined, we have to remember to execute PInvoke method epilog anywhere that // a method returns. This is a case of caller method has both PInvokes and tail calls. if (comp->compMethodRequiresPInvokeFrame()) { InsertPInvokeMethodEpilog(comp->compCurBB DEBUGARG(call)); } // Remove gtCallAddr from execution order if present. if (call->gtCallType == CT_INDIRECT) { assert(call->gtCallAddr != nullptr); bool isClosed; LIR::ReadOnlyRange callAddrRange = BlockRange().GetTreeRange(call->gtCallAddr, &isClosed); assert(isClosed); BlockRange().Remove(std::move(callAddrRange)); } // The callTarget tree needs to be sequenced. LIR::Range callTargetRange = LIR::SeqTree(comp, callTarget); // Verify the special args are what we expect, and replace the dummy args with real values. // We need to figure out the size of the outgoing stack arguments, not including the special args. // The number of 4-byte words is passed to the helper for the incoming and outgoing argument sizes. // This number is exactly the next slot number in the call's argument info struct. unsigned nNewStkArgsBytes = call->fgArgInfo->GetNextSlotByteOffset(); const int wordSize = 4; unsigned nNewStkArgsWords = nNewStkArgsBytes / wordSize; DEBUG_ARG_SLOTS_ASSERT(call->fgArgInfo->GetNextSlotNum() == nNewStkArgsWords); assert(nNewStkArgsWords >= 4); // There must be at least the four special stack args. nNewStkArgsWords -= 4; unsigned numArgs = call->fgArgInfo->ArgCount(); fgArgTabEntry* argEntry; // arg 0 == callTarget. argEntry = comp->gtArgEntryByArgNum(call, numArgs - 1); assert(argEntry != nullptr); GenTree* arg0 = argEntry->GetNode()->AsPutArgStk()->gtGetOp1(); ContainCheckRange(callTargetRange); BlockRange().InsertAfter(arg0, std::move(callTargetRange)); bool isClosed; LIR::ReadOnlyRange secondArgRange = BlockRange().GetTreeRange(arg0, &isClosed); assert(isClosed); BlockRange().Remove(std::move(secondArgRange)); argEntry->GetNode()->AsPutArgStk()->gtOp1 = callTarget; // arg 1 == flags argEntry = comp->gtArgEntryByArgNum(call, numArgs - 2); assert(argEntry != nullptr); GenTree* arg1 = argEntry->GetNode()->AsPutArgStk()->gtGetOp1(); assert(arg1->gtOper == GT_CNS_INT); ssize_t tailCallHelperFlags = 1 | // always restore EDI,ESI,EBX (call->IsVirtualStub() ? 0x2 : 0x0); // Stub dispatch flag arg1->AsIntCon()->gtIconVal = tailCallHelperFlags; // arg 2 == numberOfNewStackArgsWords argEntry = comp->gtArgEntryByArgNum(call, numArgs - 3); assert(argEntry != nullptr); GenTree* arg2 = argEntry->GetNode()->AsPutArgStk()->gtGetOp1(); assert(arg2->gtOper == GT_CNS_INT); arg2->AsIntCon()->gtIconVal = nNewStkArgsWords; #ifdef DEBUG // arg 3 == numberOfOldStackArgsWords argEntry = comp->gtArgEntryByArgNum(call, numArgs - 4); assert(argEntry != nullptr); GenTree* arg3 = argEntry->GetNode()->AsPutArgStk()->gtGetOp1(); assert(arg3->gtOper == GT_CNS_INT); #endif // DEBUG // Transform this call node into a call to Jit tail call helper. call->gtCallType = CT_HELPER; call->gtCallMethHnd = comp->eeFindHelper(CORINFO_HELP_TAILCALL); call->gtFlags &= ~GTF_CALL_VIRT_KIND_MASK; // Lower this as if it were a pure helper call. call->gtCallMoreFlags &= ~(GTF_CALL_M_TAILCALL | GTF_CALL_M_TAILCALL_VIA_JIT_HELPER); GenTree* result = LowerDirectCall(call); // Now add back tail call flags for identifying this node as tail call dispatched via helper. call->gtCallMoreFlags |= GTF_CALL_M_TAILCALL | GTF_CALL_M_TAILCALL_VIA_JIT_HELPER; #ifdef PROFILING_SUPPORTED // Insert profiler tail call hook if needed. // Since we don't know the insertion point, pass null for second param. if (comp->compIsProfilerHookNeeded()) { InsertProfTailCallHook(call, nullptr); } #endif // PROFILING_SUPPORTED return result; } //------------------------------------------------------------------------ // LowerCFGCall: Potentially lower a call to use control-flow guard. This // expands indirect calls into either a validate+call sequence or to a dispatch // helper taking the original target in a special register. // // Arguments: // call - The call node // void Lowering::LowerCFGCall(GenTreeCall* call) { assert(!call->IsHelperCall(comp, CORINFO_HELP_DISPATCH_INDIRECT_CALL)); if (call->IsHelperCall(comp, CORINFO_HELP_VALIDATE_INDIRECT_CALL)) { return; } GenTree* callTarget = call->gtCallType == CT_INDIRECT ? call->gtCallAddr : call->gtControlExpr; if ((callTarget == nullptr) || callTarget->IsIntegralConst()) { // This is a direct call, no CFG check is necessary. return; } CFGCallKind cfgKind = call->GetCFGCallKind(); switch (cfgKind) { case CFGCallKind::ValidateAndCall: { // To safely apply CFG we need to generate a very specific pattern: // in particular, it is a safety issue to allow the JIT to reload // the call target from memory between calling // CORINFO_HELP_VALIDATE_INDIRECT_CALL and the target. This is // something that would easily occur in debug codegen if we // produced high-level IR. Instead we will use a GT_PHYSREG node // to get the target back from the register that contains the target. // // Additionally, the validator does not preserve all arg registers, // so we have to move all GT_PUTARG_REG nodes that would otherwise // be trashed ahead. The JIT also has an internal invariant that // once GT_PUTARG nodes start to appear in LIR, the call is coming // up. To avoid breaking this invariant we move _all_ GT_PUTARG // nodes (in particular, GC info reporting relies on this). // // To sum up, we end up transforming // // ta... = <early args> // tb... = <late args> // tc = callTarget // GT_CALL tc, ta..., tb... // // into // // ta... = <early args> (without GT_PUTARG_* nodes) // tb = callTarget // GT_CALL CORINFO_HELP_VALIDATE_INDIRECT_CALL, tb // tc = GT_PHYSREG REG_VALIDATE_INDIRECT_CALL_ADDR (preserved by helper) // td = <moved GT_PUTARG_* nodes> // GT_CALL tb, ta..., td.. // GenTree* regNode = PhysReg(REG_VALIDATE_INDIRECT_CALL_ADDR, TYP_I_IMPL); LIR::Use useOfTar; bool gotUse = BlockRange().TryGetUse(callTarget, &useOfTar); assert(gotUse); useOfTar.ReplaceWith(regNode); GenTree* targetPlaceholder = comp->gtNewZeroConNode(callTarget->TypeGet()); // Add the call to the validator. Use a placeholder for the target while we // morph, sequence and lower, to avoid redoing that for the actual target. GenTreeCall::Use* args = comp->gtNewCallArgs(targetPlaceholder); GenTreeCall* validate = comp->gtNewHelperCallNode(CORINFO_HELP_VALIDATE_INDIRECT_CALL, TYP_VOID, args); comp->fgMorphTree(validate); LIR::Range validateRange = LIR::SeqTree(comp, validate); GenTree* validateFirst = validateRange.FirstNode(); GenTree* validateLast = validateRange.LastNode(); // Insert the validator with the call target before the late args. BlockRange().InsertBefore(call, std::move(validateRange)); // Swap out the target gotUse = BlockRange().TryGetUse(targetPlaceholder, &useOfTar); assert(gotUse); useOfTar.ReplaceWith(callTarget); targetPlaceholder->SetUnusedValue(); LowerRange(validateFirst, validateLast); // Insert the PHYSREG node that we must load right after validation. BlockRange().InsertAfter(validate, regNode); LowerNode(regNode); // Finally move all GT_PUTARG_* nodes for (GenTreeCall::Use& use : call->Args()) { GenTree* node = use.GetNode(); if (!node->IsValue()) { // Non-value nodes in early args are setup nodes for late args. continue; } assert(node->OperIsPutArg() || node->OperIsFieldList()); MoveCFGCallArg(call, node); } for (GenTreeCall::Use& use : call->LateArgs()) { GenTree* node = use.GetNode(); assert(node->OperIsPutArg() || node->OperIsFieldList()); MoveCFGCallArg(call, node); } break; } case CFGCallKind::Dispatch: { #ifdef REG_DISPATCH_INDIRECT_CALL_ADDR // Now insert the call target as an extra argument. // // First append the early placeholder arg GenTreeCall::Use** earlySlot = &call->gtCallArgs; unsigned int index = call->gtCallThisArg != nullptr ? 1 : 0; while (*earlySlot != nullptr) { earlySlot = &(*earlySlot)->NextRef(); index++; } assert(index == call->fgArgInfo->ArgCount()); GenTree* placeHolder = comp->gtNewArgPlaceHolderNode(callTarget->TypeGet(), NO_CLASS_HANDLE); placeHolder->gtFlags |= GTF_LATE_ARG; *earlySlot = comp->gtNewCallArgs(placeHolder); // Append the late actual arg GenTreeCall::Use** lateSlot = &call->gtCallLateArgs; unsigned int lateIndex = 0; while (*lateSlot != nullptr) { lateSlot = &(*lateSlot)->NextRef(); lateIndex++; } *lateSlot = comp->gtNewCallArgs(callTarget); // Add an entry into the arg info regNumber regNum = REG_DISPATCH_INDIRECT_CALL_ADDR; unsigned numRegs = 1; unsigned byteSize = TARGET_POINTER_SIZE; unsigned byteAlignment = TARGET_POINTER_SIZE; bool isStruct = false; bool isFloatHfa = false; bool isVararg = false; fgArgTabEntry* entry = call->fgArgInfo->AddRegArg(index, placeHolder, *earlySlot, regNum, numRegs, byteSize, byteAlignment, isStruct, isFloatHfa, isVararg UNIX_AMD64_ABI_ONLY_ARG(REG_STK) UNIX_AMD64_ABI_ONLY_ARG(0) UNIX_AMD64_ABI_ONLY_ARG(0) UNIX_AMD64_ABI_ONLY_ARG(nullptr)); entry->lateUse = *lateSlot; entry->SetLateArgInx(lateIndex); // Lower the newly added args now that call is updated LowerArg(call, &(*earlySlot)->NodeRef()); LowerArg(call, &(*lateSlot)->NodeRef()); // Finally update the call to be a helper call call->gtCallType = CT_HELPER; call->gtCallMethHnd = comp->eeFindHelper(CORINFO_HELP_DISPATCH_INDIRECT_CALL); call->gtFlags &= ~GTF_CALL_VIRT_KIND_MASK; #ifdef FEATURE_READYTORUN call->gtEntryPoint.addr = nullptr; call->gtEntryPoint.accessType = IAT_VALUE; #endif // Now relower the call target call->gtControlExpr = LowerDirectCall(call); if (call->gtControlExpr != nullptr) { LIR::Range dispatchControlExprRange = LIR::SeqTree(comp, call->gtControlExpr); ContainCheckRange(dispatchControlExprRange); BlockRange().InsertBefore(call, std::move(dispatchControlExprRange)); } #else assert(!"Unexpected CFGCallKind::Dispatch for platform without dispatcher"); #endif break; } default: unreached(); } } //------------------------------------------------------------------------ // IsInvariantInRange: Check if a node is invariant in the specified range. In // other words, can 'node' be moved to right before 'endExclusive' without its // computation changing values? // // Arguments: // node - The node. // endExclusive - The exclusive end of the range to check invariance for. // bool Lowering::IsInvariantInRange(GenTree* node, GenTree* endExclusive) { assert(node->Precedes(endExclusive)); if (node->IsInvariant()) { return true; } if (!node->IsValue()) { return false; } if (node->OperIsLocal()) { GenTreeLclVarCommon* lcl = node->AsLclVarCommon(); LclVarDsc* desc = comp->lvaGetDesc(lcl); if (desc->IsAddressExposed()) { return false; } // Currently, non-address exposed locals have the property that their // use occurs at the user, so no further interference check is // necessary. return true; } return false; } //------------------------------------------------------------------------ // MoveCFGCallArg: Given a call that will be CFG transformed using the // validate+call scheme, and an argument GT_PUTARG_* or GT_FIELD_LIST node, // move that node right before the call. // // Arguments: // call - The call that is being CFG transformed // node - The argument node // // Remarks: // We can always move the GT_PUTARG_* node further ahead as the side-effects // of these nodes are handled by LSRA. However, the operands of these nodes // are not always safe to move further ahead; for invariant operands, we // move them ahead as well to shorten the lifetime of these values. // void Lowering::MoveCFGCallArg(GenTreeCall* call, GenTree* node) { assert(node->OperIsPutArg() || node->OperIsFieldList()); if (node->OperIsFieldList()) { JITDUMP("Node is a GT_FIELD_LIST; moving all operands\n"); for (GenTreeFieldList::Use& operand : node->AsFieldList()->Uses()) { assert(operand.GetNode()->OperIsPutArg()); MoveCFGCallArg(call, operand.GetNode()); } } else { GenTree* operand = node->AsOp()->gtGetOp1(); JITDUMP("Checking if we can move operand of GT_PUTARG_* node:\n"); DISPTREE(operand); if (((operand->gtFlags & GTF_ALL_EFFECT) == 0) && IsInvariantInRange(operand, call)) { JITDUMP("...yes, moving to after validator call\n"); BlockRange().Remove(operand); BlockRange().InsertBefore(call, operand); } else { JITDUMP("...no, operand has side effects or is not invariant\n"); } } JITDUMP("Moving\n"); DISPTREE(node); JITDUMP("\n"); BlockRange().Remove(node); BlockRange().InsertBefore(call, node); } #ifndef TARGET_64BIT //------------------------------------------------------------------------ // Lowering::DecomposeLongCompare: Decomposes a TYP_LONG compare node. // // Arguments: // cmp - the compare node // // Return Value: // The next node to lower. // // Notes: // This is done during lowering because DecomposeLongs handles only nodes // that produce TYP_LONG values. Compare nodes may consume TYP_LONG values // but produce TYP_INT values. // GenTree* Lowering::DecomposeLongCompare(GenTree* cmp) { assert(cmp->gtGetOp1()->TypeGet() == TYP_LONG); GenTree* src1 = cmp->gtGetOp1(); GenTree* src2 = cmp->gtGetOp2(); assert(src1->OperIs(GT_LONG)); assert(src2->OperIs(GT_LONG)); GenTree* loSrc1 = src1->gtGetOp1(); GenTree* hiSrc1 = src1->gtGetOp2(); GenTree* loSrc2 = src2->gtGetOp1(); GenTree* hiSrc2 = src2->gtGetOp2(); BlockRange().Remove(src1); BlockRange().Remove(src2); genTreeOps condition = cmp->OperGet(); GenTree* loCmp; GenTree* hiCmp; if (cmp->OperIs(GT_EQ, GT_NE)) { // // Transform (x EQ|NE y) into (((x.lo XOR y.lo) OR (x.hi XOR y.hi)) EQ|NE 0). If y is 0 then this can // be reduced to just ((x.lo OR x.hi) EQ|NE 0). The OR is expected to set the condition flags so we // don't need to generate a redundant compare against 0, we only generate a SETCC|JCC instruction. // // XOR is used rather than SUB because it is commutative and thus allows swapping the operands when // the first happens to be a constant. Usually only the second compare operand is a constant but it's // still possible to have a constant on the left side. For example, when src1 is a uint->ulong cast // then hiSrc1 would be 0. // if (loSrc1->OperIs(GT_CNS_INT)) { std::swap(loSrc1, loSrc2); } if (loSrc2->IsIntegralConst(0)) { BlockRange().Remove(loSrc2); loCmp = loSrc1; } else { loCmp = comp->gtNewOperNode(GT_XOR, TYP_INT, loSrc1, loSrc2); BlockRange().InsertBefore(cmp, loCmp); ContainCheckBinary(loCmp->AsOp()); } if (hiSrc1->OperIs(GT_CNS_INT)) { std::swap(hiSrc1, hiSrc2); } if (hiSrc2->IsIntegralConst(0)) { BlockRange().Remove(hiSrc2); hiCmp = hiSrc1; } else { hiCmp = comp->gtNewOperNode(GT_XOR, TYP_INT, hiSrc1, hiSrc2); BlockRange().InsertBefore(cmp, hiCmp); ContainCheckBinary(hiCmp->AsOp()); } hiCmp = comp->gtNewOperNode(GT_OR, TYP_INT, loCmp, hiCmp); BlockRange().InsertBefore(cmp, hiCmp); ContainCheckBinary(hiCmp->AsOp()); } else { assert(cmp->OperIs(GT_LT, GT_LE, GT_GE, GT_GT)); // // If the compare is signed then (x LT|GE y) can be transformed into ((x SUB y) LT|GE 0). // If the compare is unsigned we can still use SUB but we need to check the Carry flag, // not the actual result. In both cases we can simply check the appropiate condition flags // and ignore the actual result: // SUB_LO loSrc1, loSrc2 // SUB_HI hiSrc1, hiSrc2 // SETCC|JCC (signed|unsigned LT|GE) // If loSrc2 happens to be 0 then the first SUB can be eliminated and the second one can // be turned into a CMP because the first SUB would have set carry to 0. This effectively // transforms a long compare against 0 into an int compare of the high part against 0. // // (x LE|GT y) can to be transformed into ((x SUB y) LE|GT 0) but checking that a long value // is greater than 0 is not so easy. We need to turn this into a positive/negative check // like the one we get for LT|GE compares, this can be achieved by swapping the compare: // (x LE|GT y) becomes (y GE|LT x) // // Having to swap operands is problematic when the second operand is a constant. The constant // moves to the first operand where it cannot be contained and thus needs a register. This can // be avoided by changing the constant such that LE|GT becomes LT|GE: // (x LE|GT 41) becomes (x LT|GE 42) // if (cmp->OperIs(GT_LE, GT_GT)) { bool mustSwap = true; if (loSrc2->OperIs(GT_CNS_INT) && hiSrc2->OperIs(GT_CNS_INT)) { uint32_t loValue = static_cast<uint32_t>(loSrc2->AsIntCon()->IconValue()); uint32_t hiValue = static_cast<uint32_t>(hiSrc2->AsIntCon()->IconValue()); uint64_t value = static_cast<uint64_t>(loValue) | (static_cast<uint64_t>(hiValue) << 32); uint64_t maxValue = cmp->IsUnsigned() ? UINT64_MAX : INT64_MAX; if (value != maxValue) { value++; loValue = value & UINT32_MAX; hiValue = (value >> 32) & UINT32_MAX; loSrc2->AsIntCon()->SetIconValue(loValue); hiSrc2->AsIntCon()->SetIconValue(hiValue); condition = cmp->OperIs(GT_LE) ? GT_LT : GT_GE; mustSwap = false; } } if (mustSwap) { std::swap(loSrc1, loSrc2); std::swap(hiSrc1, hiSrc2); condition = GenTree::SwapRelop(condition); } } assert((condition == GT_LT) || (condition == GT_GE)); if (loSrc2->IsIntegralConst(0)) { BlockRange().Remove(loSrc2); // Very conservative dead code removal... but it helps. if (loSrc1->OperIs(GT_CNS_INT, GT_LCL_VAR, GT_LCL_FLD)) { BlockRange().Remove(loSrc1); } else { loSrc1->SetUnusedValue(); } hiCmp = comp->gtNewOperNode(GT_CMP, TYP_VOID, hiSrc1, hiSrc2); BlockRange().InsertBefore(cmp, hiCmp); ContainCheckCompare(hiCmp->AsOp()); } else { loCmp = comp->gtNewOperNode(GT_CMP, TYP_VOID, loSrc1, loSrc2); hiCmp = comp->gtNewOperNode(GT_SUB_HI, TYP_INT, hiSrc1, hiSrc2); BlockRange().InsertBefore(cmp, loCmp, hiCmp); ContainCheckCompare(loCmp->AsOp()); ContainCheckBinary(hiCmp->AsOp()); // // Try to move the first SUB_HI operands right in front of it, this allows using // a single temporary register instead of 2 (one for CMP and one for SUB_HI). Do // this only for locals as they won't change condition flags. Note that we could // move constants (except 0 which generates XOR reg, reg) but it's extremely rare // to have a constant as the first operand. // if (hiSrc1->OperIs(GT_LCL_VAR, GT_LCL_FLD)) { BlockRange().Remove(hiSrc1); BlockRange().InsertBefore(hiCmp, hiSrc1); } } } hiCmp->gtFlags |= GTF_SET_FLAGS; if (hiCmp->IsValue()) { hiCmp->SetUnusedValue(); } LIR::Use cmpUse; if (BlockRange().TryGetUse(cmp, &cmpUse) && cmpUse.User()->OperIs(GT_JTRUE)) { BlockRange().Remove(cmp); GenTree* jcc = cmpUse.User(); jcc->AsOp()->gtOp1 = nullptr; jcc->ChangeOper(GT_JCC); jcc->gtFlags |= GTF_USE_FLAGS; jcc->AsCC()->gtCondition = GenCondition::FromIntegralRelop(condition, cmp->IsUnsigned()); } else { cmp->AsOp()->gtOp1 = nullptr; cmp->AsOp()->gtOp2 = nullptr; cmp->ChangeOper(GT_SETCC); cmp->gtFlags |= GTF_USE_FLAGS; cmp->AsCC()->gtCondition = GenCondition::FromIntegralRelop(condition, cmp->IsUnsigned()); } return cmp->gtNext; } #endif // !TARGET_64BIT //------------------------------------------------------------------------ // Lowering::OptimizeConstCompare: Performs various "compare with const" optimizations. // // Arguments: // cmp - the compare node // // Return Value: // The original compare node if lowering should proceed as usual or the next node // to lower if the compare node was changed in such a way that lowering is no // longer needed. // // Notes: // - Narrow operands to enable memory operand containment (XARCH specific). // - Transform cmp(and(x, y), 0) into test(x, y) (XARCH/Arm64 specific but could // be used for ARM as well if support for GT_TEST_EQ/GT_TEST_NE is added). // - Transform TEST(x, LSH(1, y)) into BT(x, y) (XARCH specific) // - Transform RELOP(OP, 0) into SETCC(OP) or JCC(OP) if OP can set the // condition flags appropriately (XARCH/ARM64 specific but could be extended // to ARM32 as well if ARM32 codegen supports GTF_SET_FLAGS). // GenTree* Lowering::OptimizeConstCompare(GenTree* cmp) { assert(cmp->gtGetOp2()->IsIntegralConst()); #if defined(TARGET_XARCH) || defined(TARGET_ARM64) GenTree* op1 = cmp->gtGetOp1(); GenTreeIntCon* op2 = cmp->gtGetOp2()->AsIntCon(); ssize_t op2Value = op2->IconValue(); #ifdef TARGET_XARCH var_types op1Type = op1->TypeGet(); if (IsContainableMemoryOp(op1) && varTypeIsSmall(op1Type) && FitsIn(op1Type, op2Value)) { // // If op1's type is small then try to narrow op2 so it has the same type as op1. // Small types are usually used by memory loads and if both compare operands have // the same type then the memory load can be contained. In certain situations // (e.g "cmp ubyte, 200") we also get a smaller instruction encoding. // op2->gtType = op1Type; } else #endif if (op1->OperIs(GT_CAST) && !op1->gtOverflow()) { GenTreeCast* cast = op1->AsCast(); var_types castToType = cast->CastToType(); GenTree* castOp = cast->gtGetOp1(); if (((castToType == TYP_BOOL) || (castToType == TYP_UBYTE)) && FitsIn<UINT8>(op2Value)) { // // Since we're going to remove the cast we need to be able to narrow the cast operand // to the cast type. This can be done safely only for certain opers (e.g AND, OR, XOR). // Some opers just can't be narrowed (e.g DIV, MUL) while other could be narrowed but // doing so would produce incorrect results (e.g. RSZ, RSH). // // The below list of handled opers is conservative but enough to handle the most common // situations. In particular this include CALL, sometimes the JIT unnecessarilly widens // the result of bool returning calls. // bool removeCast = #ifdef TARGET_ARM64 (op2Value == 0) && cmp->OperIs(GT_EQ, GT_NE, GT_GT) && #endif (castOp->OperIs(GT_CALL, GT_LCL_VAR) || castOp->OperIs(GT_OR, GT_XOR, GT_AND) #ifdef TARGET_XARCH || IsContainableMemoryOp(castOp) #endif ); if (removeCast) { assert(!castOp->gtOverflowEx()); // Must not be an overflow checking operation #ifdef TARGET_ARM64 bool cmpEq = cmp->OperIs(GT_EQ); cmp->SetOperRaw(cmpEq ? GT_TEST_EQ : GT_TEST_NE); op2->SetIconValue(0xff); op2->gtType = castOp->gtType; #else castOp->gtType = castToType; op2->gtType = castToType; #endif // If we have any contained memory ops on castOp, they must now not be contained. if (castOp->OperIs(GT_OR, GT_XOR, GT_AND)) { GenTree* op1 = castOp->gtGetOp1(); if ((op1 != nullptr) && !op1->IsCnsIntOrI()) { op1->ClearContained(); } GenTree* op2 = castOp->gtGetOp2(); if ((op2 != nullptr) && !op2->IsCnsIntOrI()) { op2->ClearContained(); } } cmp->AsOp()->gtOp1 = castOp; BlockRange().Remove(cast); } } } else if (op1->OperIs(GT_AND) && cmp->OperIs(GT_EQ, GT_NE)) { // // Transform ((x AND y) EQ|NE 0) into (x TEST_EQ|TEST_NE y) when possible. // GenTree* andOp1 = op1->gtGetOp1(); GenTree* andOp2 = op1->gtGetOp2(); if (op2Value != 0) { // // If we don't have a 0 compare we can get one by transforming ((x AND mask) EQ|NE mask) // into ((x AND mask) NE|EQ 0) when mask is a single bit. // if (isPow2<target_size_t>(static_cast<target_size_t>(op2Value)) && andOp2->IsIntegralConst(op2Value)) { op2Value = 0; op2->SetIconValue(0); cmp->SetOperRaw(GenTree::ReverseRelop(cmp->OperGet())); } } if (op2Value == 0) { BlockRange().Remove(op1); BlockRange().Remove(op2); cmp->SetOperRaw(cmp->OperIs(GT_EQ) ? GT_TEST_EQ : GT_TEST_NE); cmp->AsOp()->gtOp1 = andOp1; cmp->AsOp()->gtOp2 = andOp2; // We will re-evaluate containment below andOp1->ClearContained(); andOp2->ClearContained(); #ifdef TARGET_XARCH if (IsContainableMemoryOp(andOp1) && andOp2->IsIntegralConst()) { // // For "test" we only care about the bits that are set in the second operand (mask). // If the mask fits in a small type then we can narrow both operands to generate a "test" // instruction with a smaller encoding ("test" does not have a r/m32, imm8 form) and avoid // a widening load in some cases. // // For 16 bit operands we narrow only if the memory operand is already 16 bit. This matches // the behavior of a previous implementation and avoids adding more cases where we generate // 16 bit instructions that require a length changing prefix (0x66). These suffer from // significant decoder stalls on Intel CPUs. // // We could also do this for 64 bit masks that fit into 32 bit but it doesn't help. // In such cases morph narrows down the existing GT_AND by inserting a cast between it and // the memory operand so we'd need to add more code to recognize and eliminate that cast. // size_t mask = static_cast<size_t>(andOp2->AsIntCon()->IconValue()); if (FitsIn<UINT8>(mask)) { andOp1->gtType = TYP_UBYTE; andOp2->gtType = TYP_UBYTE; } else if (FitsIn<UINT16>(mask) && genTypeSize(andOp1) == 2) { andOp1->gtType = TYP_USHORT; andOp2->gtType = TYP_USHORT; } } #endif } } if (cmp->OperIs(GT_TEST_EQ, GT_TEST_NE)) { #ifdef TARGET_XARCH // // Transform TEST_EQ|NE(x, LSH(1, y)) into BT(x, y) when possible. Using BT // results in smaller and faster code. It also doesn't have special register // requirements, unlike LSH that requires the shift count to be in ECX. // Note that BT has the same behavior as LSH when the bit index exceeds the // operand bit size - it uses (bit_index MOD bit_size). // GenTree* lsh = cmp->gtGetOp2(); LIR::Use cmpUse; if (lsh->OperIs(GT_LSH) && varTypeIsIntOrI(lsh->TypeGet()) && lsh->gtGetOp1()->IsIntegralConst(1) && BlockRange().TryGetUse(cmp, &cmpUse)) { GenCondition condition = cmp->OperIs(GT_TEST_NE) ? GenCondition::C : GenCondition::NC; cmp->SetOper(GT_BT); cmp->gtType = TYP_VOID; cmp->gtFlags |= GTF_SET_FLAGS; cmp->AsOp()->gtOp2 = lsh->gtGetOp2(); cmp->gtGetOp2()->ClearContained(); BlockRange().Remove(lsh->gtGetOp1()); BlockRange().Remove(lsh); GenTreeCC* cc; if (cmpUse.User()->OperIs(GT_JTRUE)) { cmpUse.User()->ChangeOper(GT_JCC); cc = cmpUse.User()->AsCC(); cc->gtCondition = condition; } else { cc = new (comp, GT_SETCC) GenTreeCC(GT_SETCC, condition, TYP_INT); BlockRange().InsertAfter(cmp, cc); cmpUse.ReplaceWith(cc); } cc->gtFlags |= GTF_USE_FLAGS; return cmp->gtNext; } #endif // TARGET_XARCH } else if (cmp->OperIs(GT_EQ, GT_NE)) { GenTree* op1 = cmp->gtGetOp1(); GenTree* op2 = cmp->gtGetOp2(); // TODO-CQ: right now the below peep is inexpensive and gets the benefit in most // cases because in majority of cases op1, op2 and cmp would be in that order in // execution. In general we should be able to check that all the nodes that come // after op1 do not modify the flags so that it is safe to avoid generating a // test instruction. if (op2->IsIntegralConst(0) && (op1->gtNext == op2) && (op2->gtNext == cmp) && #ifdef TARGET_XARCH (op1->OperIs(GT_AND, GT_OR, GT_XOR, GT_ADD, GT_SUB, GT_NEG) #ifdef FEATURE_HW_INTRINSICS || (op1->OperIs(GT_HWINTRINSIC) && emitter::DoesWriteZeroFlag(HWIntrinsicInfo::lookupIns(op1->AsHWIntrinsic()))) #endif // FEATURE_HW_INTRINSICS ) #else // TARGET_ARM64 op1->OperIs(GT_AND, GT_ADD, GT_SUB) #endif ) { op1->gtFlags |= GTF_SET_FLAGS; op1->SetUnusedValue(); BlockRange().Remove(op2); GenTree* next = cmp->gtNext; GenTree* cc; genTreeOps ccOp; LIR::Use cmpUse; // Fast check for the common case - relop used by a JTRUE that immediately follows it. if ((next != nullptr) && next->OperIs(GT_JTRUE) && (next->gtGetOp1() == cmp)) { cc = next; ccOp = GT_JCC; next = nullptr; BlockRange().Remove(cmp); } else if (BlockRange().TryGetUse(cmp, &cmpUse) && cmpUse.User()->OperIs(GT_JTRUE)) { cc = cmpUse.User(); ccOp = GT_JCC; next = nullptr; BlockRange().Remove(cmp); } else // The relop is not used by a JTRUE or it is not used at all. { // Transform the relop node it into a SETCC. If it's not used we could remove // it completely but that means doing more work to handle a rare case. cc = cmp; ccOp = GT_SETCC; } GenCondition condition = GenCondition::FromIntegralRelop(cmp); cc->ChangeOper(ccOp); cc->AsCC()->gtCondition = condition; cc->gtFlags |= GTF_USE_FLAGS; return next; } } #endif // defined(TARGET_XARCH) || defined(TARGET_ARM64) return cmp; } //------------------------------------------------------------------------ // Lowering::LowerCompare: Lowers a compare node. // // Arguments: // cmp - the compare node // // Return Value: // The next node to lower. // GenTree* Lowering::LowerCompare(GenTree* cmp) { #ifndef TARGET_64BIT if (cmp->gtGetOp1()->TypeGet() == TYP_LONG) { return DecomposeLongCompare(cmp); } #endif if (cmp->gtGetOp2()->IsIntegralConst() && !comp->opts.MinOpts()) { GenTree* next = OptimizeConstCompare(cmp); // If OptimizeConstCompare return the compare node as "next" then we need to continue lowering. if (next != cmp) { return next; } } #ifdef TARGET_XARCH if (cmp->gtGetOp1()->TypeGet() == cmp->gtGetOp2()->TypeGet()) { if (varTypeIsSmall(cmp->gtGetOp1()->TypeGet()) && varTypeIsUnsigned(cmp->gtGetOp1()->TypeGet())) { // // If both operands have the same type then codegen will use the common operand type to // determine the instruction type. For small types this would result in performing a // signed comparison of two small unsigned values without zero extending them to TYP_INT // which is incorrect. Note that making the comparison unsigned doesn't imply that codegen // has to generate a small comparison, it can still correctly generate a TYP_INT comparison. // cmp->gtFlags |= GTF_UNSIGNED; } } #endif // TARGET_XARCH ContainCheckCompare(cmp->AsOp()); return cmp->gtNext; } //------------------------------------------------------------------------ // Lowering::LowerJTrue: Lowers a JTRUE node. // // Arguments: // jtrue - the JTRUE node // // Return Value: // The next node to lower (usually nullptr). // // Notes: // On ARM64 this may remove the JTRUE node and transform its associated // relop into a JCMP node. // GenTree* Lowering::LowerJTrue(GenTreeOp* jtrue) { #ifdef TARGET_ARM64 GenTree* relop = jtrue->gtGetOp1(); GenTree* relopOp2 = relop->AsOp()->gtGetOp2(); if ((relop->gtNext == jtrue) && relopOp2->IsCnsIntOrI()) { bool useJCMP = false; GenTreeFlags flags = GTF_EMPTY; if (relop->OperIs(GT_EQ, GT_NE) && relopOp2->IsIntegralConst(0)) { // Codegen will use cbz or cbnz in codegen which do not affect the flag register flags = relop->OperIs(GT_EQ) ? GTF_JCMP_EQ : GTF_EMPTY; useJCMP = true; } else if (relop->OperIs(GT_TEST_EQ, GT_TEST_NE) && isPow2(relopOp2->AsIntCon()->IconValue())) { // Codegen will use tbz or tbnz in codegen which do not affect the flag register flags = GTF_JCMP_TST | (relop->OperIs(GT_TEST_EQ) ? GTF_JCMP_EQ : GTF_EMPTY); useJCMP = true; } if (useJCMP) { relop->SetOper(GT_JCMP); relop->gtFlags &= ~(GTF_JCMP_TST | GTF_JCMP_EQ); relop->gtFlags |= flags; relop->gtType = TYP_VOID; relopOp2->SetContained(); BlockRange().Remove(jtrue); assert(relop->gtNext == nullptr); return nullptr; } } #endif // TARGET_ARM64 ContainCheckJTrue(jtrue); assert(jtrue->gtNext == nullptr); return nullptr; } //---------------------------------------------------------------------------------------------- // LowerNodeCC: Lowers a node that produces a boolean value by setting the condition flags. // // Arguments: // node - The node to lower // condition - The condition code of the generated SETCC/JCC node // // Return Value: // A SETCC/JCC node or nullptr if `node` is not used. // // Notes: // This simply replaces `node`'s use with an appropiate SETCC/JCC node, // `node` is not actually changed, except by having its GTF_SET_FLAGS set. // It's the caller's responsibility to change `node` such that it only // sets the condition flags, without producing a boolean value. // GenTreeCC* Lowering::LowerNodeCC(GenTree* node, GenCondition condition) { // Skip over a chain of EQ/NE(x, 0) relops. This may be present either // because `node` is not a relop and so it cannot be used directly by a // JTRUE, or because the frontend failed to remove a EQ/NE(x, 0) that's // used as logical negation. // // Usually there's only one such relop but there's little difference // between removing one or all so we may as well remove them all. // // We can't allow any other nodes between `node` and its user because we // have no way of knowing if those nodes change flags or not. So we're looking // to skip over a sequence of appropriately connected zero and EQ/NE nodes. // The x in EQ/NE(x, 0) GenTree* relop = node; // The first node of the relop sequence GenTree* first = node->gtNext; // The node following the relop sequence GenTree* next = first; while ((next != nullptr) && next->IsIntegralConst(0) && (next->gtNext != nullptr) && next->gtNext->OperIs(GT_EQ, GT_NE) && (next->gtNext->AsOp()->gtGetOp1() == relop) && (next->gtNext->AsOp()->gtGetOp2() == next)) { relop = next->gtNext; next = relop->gtNext; if (relop->OperIs(GT_EQ)) { condition = GenCondition::Reverse(condition); } } GenTreeCC* cc = nullptr; // Next may be null if `node` is not used. In that case we don't need to generate a SETCC node. if (next != nullptr) { if (next->OperIs(GT_JTRUE)) { // If the instruction immediately following 'relop', i.e. 'next' is a conditional branch, // it should always have 'relop' as its 'op1'. If it doesn't, then we have improperly // constructed IL (the setting of a condition code should always immediately precede its // use, since the JIT doesn't track dataflow for condition codes). Still, if it happens // it's not our problem, it simply means that `node` is not used and can be removed. if (next->AsUnOp()->gtGetOp1() == relop) { assert(relop->OperIsCompare()); next->ChangeOper(GT_JCC); cc = next->AsCC(); cc->gtCondition = condition; } } else { // If the node is used by something other than a JTRUE then we need to insert a // SETCC node to materialize the boolean value. LIR::Use use; if (BlockRange().TryGetUse(relop, &use)) { cc = new (comp, GT_SETCC) GenTreeCC(GT_SETCC, condition, TYP_INT); BlockRange().InsertAfter(node, cc); use.ReplaceWith(cc); } } } if (cc != nullptr) { node->gtFlags |= GTF_SET_FLAGS; cc->gtFlags |= GTF_USE_FLAGS; } // Remove the chain of EQ/NE(x, 0) relop nodes, if any. Note that if a SETCC was // inserted after `node`, `first` still points to the node that was initially // after `node`. if (relop != node) { BlockRange().Remove(first, relop); } return cc; } // Lower "jmp <method>" tail call to insert PInvoke method epilog if required. void Lowering::LowerJmpMethod(GenTree* jmp) { assert(jmp->OperGet() == GT_JMP); JITDUMP("lowering GT_JMP\n"); DISPNODE(jmp); JITDUMP("============"); // If PInvokes are in-lined, we have to remember to execute PInvoke method epilog anywhere that // a method returns. if (comp->compMethodRequiresPInvokeFrame()) { InsertPInvokeMethodEpilog(comp->compCurBB DEBUGARG(jmp)); } } // Lower GT_RETURN node to insert PInvoke method epilog if required. void Lowering::LowerRet(GenTreeUnOp* ret) { assert(ret->OperGet() == GT_RETURN); JITDUMP("lowering GT_RETURN\n"); DISPNODE(ret); JITDUMP("============"); GenTree* retVal = ret->gtGetOp1(); // There are two kinds of retyping: // - A simple bitcast can be inserted when: // - We're returning a floating type as an integral type or vice-versa, or // - If we're returning a struct as a primitive type, we change the type of // 'retval' in 'LowerRetStructLclVar()' bool needBitcast = (ret->TypeGet() != TYP_VOID) && (varTypeUsesFloatReg(ret) != varTypeUsesFloatReg(ret->gtGetOp1())); bool doPrimitiveBitcast = false; if (needBitcast) { doPrimitiveBitcast = (!varTypeIsStruct(ret) && !varTypeIsStruct(retVal)); } if (doPrimitiveBitcast) { // Add a simple bitcast when both types are not structs. // If one type is a struct it will be handled below. #if defined(DEBUG) assert(!varTypeIsStruct(ret) && !varTypeIsStruct(retVal)); #endif GenTree* bitcast = comp->gtNewBitCastNode(ret->TypeGet(), retVal); ret->gtOp1 = bitcast; BlockRange().InsertBefore(ret, bitcast); ContainCheckBitCast(bitcast); } else if (ret->TypeGet() != TYP_VOID) { #if FEATURE_MULTIREG_RET if (retVal->OperIs(GT_LCL_VAR) && varTypeIsStruct(retVal)) { ReturnTypeDesc retTypeDesc; LclVarDsc* varDsc = nullptr; varDsc = comp->lvaGetDesc(retVal->AsLclVar()); retTypeDesc.InitializeStructReturnType(comp, varDsc->GetStructHnd(), comp->info.compCallConv); if (retTypeDesc.GetReturnRegCount() > 1) { CheckMultiRegLclVar(retVal->AsLclVar(), &retTypeDesc); } } #endif // FEATURE_MULTIREG_RET #ifdef DEBUG if (varTypeIsStruct(ret->TypeGet()) != varTypeIsStruct(retVal->TypeGet())) { if (varTypeIsStruct(ret->TypeGet())) { assert(comp->info.compRetNativeType != TYP_STRUCT); var_types retActualType = genActualType(comp->info.compRetNativeType); var_types retValActualType = genActualType(retVal->TypeGet()); bool constStructInit = retVal->IsConstInitVal(); bool implicitCastFromSameOrBiggerSize = (genTypeSize(retActualType) <= genTypeSize(retValActualType)); // This could happen if we have retyped op1 as a primitive type during struct promotion, // check `retypedFieldsMap` for details. bool actualTypesMatch = (retActualType == retValActualType); assert(actualTypesMatch || constStructInit || implicitCastFromSameOrBiggerSize); } } #endif // DEBUG if (varTypeIsStruct(ret)) { LowerRetStruct(ret); } else if (!ret->TypeIs(TYP_VOID) && varTypeIsStruct(retVal)) { // Return struct as a primitive using Unsafe cast. assert(retVal->OperIs(GT_LCL_VAR)); LowerRetSingleRegStructLclVar(ret); } } // Method doing PInvokes has exactly one return block unless it has tail calls. if (comp->compMethodRequiresPInvokeFrame() && (comp->compCurBB == comp->genReturnBB)) { InsertPInvokeMethodEpilog(comp->compCurBB DEBUGARG(ret)); } ContainCheckRet(ret); } //---------------------------------------------------------------------------------------------- // LowerStoreLocCommon: platform idependent part of local var or field store lowering. // // Arguments: // lclStore - The store lcl node to lower. // void Lowering::LowerStoreLocCommon(GenTreeLclVarCommon* lclStore) { assert(lclStore->OperIs(GT_STORE_LCL_FLD, GT_STORE_LCL_VAR)); JITDUMP("lowering store lcl var/field (before):\n"); DISPTREERANGE(BlockRange(), lclStore); JITDUMP("\n"); GenTree* src = lclStore->gtGetOp1(); LclVarDsc* varDsc = comp->lvaGetDesc(lclStore); const bool srcIsMultiReg = src->IsMultiRegNode(); const bool dstIsMultiReg = lclStore->IsMultiRegLclVar(); if (!dstIsMultiReg && varTypeIsStruct(varDsc)) { // TODO-Cleanup: we want to check `varDsc->lvRegStruct` as the last condition instead of `!varDsc->lvPromoted`, // but we do not set it for `CSE` vars so it is currently failing. assert(varDsc->CanBeReplacedWithItsField(comp) || varDsc->lvDoNotEnregister || !varDsc->lvPromoted); if (varDsc->CanBeReplacedWithItsField(comp)) { assert(varDsc->lvFieldCnt == 1); unsigned fldNum = varDsc->lvFieldLclStart; LclVarDsc* fldDsc = comp->lvaGetDesc(fldNum); JITDUMP("Replacing an independently promoted local var V%02u with its only field V%02u for the store " "from a call [%06u]\n", lclStore->GetLclNum(), fldNum, comp->dspTreeID(lclStore)); lclStore->SetLclNum(fldNum); lclStore->ChangeType(fldDsc->TypeGet()); varDsc = fldDsc; } } if (srcIsMultiReg || dstIsMultiReg) { const ReturnTypeDesc* retTypeDesc = nullptr; if (src->OperIs(GT_CALL)) { retTypeDesc = src->AsCall()->GetReturnTypeDesc(); } CheckMultiRegLclVar(lclStore->AsLclVar(), retTypeDesc); } const var_types lclRegType = varDsc->GetRegisterType(lclStore); if ((lclStore->TypeGet() == TYP_STRUCT) && !srcIsMultiReg) { bool convertToStoreObj; if (src->OperGet() == GT_CALL) { GenTreeCall* call = src->AsCall(); const ClassLayout* layout = varDsc->GetLayout(); #ifdef DEBUG const unsigned slotCount = layout->GetSlotCount(); #if defined(TARGET_XARCH) && !defined(UNIX_AMD64_ABI) // Windows x64 doesn't have multireg returns, // x86 uses it only for long return type, not for structs. assert(slotCount == 1); assert(lclRegType != TYP_UNDEF); #else // !TARGET_XARCH || UNIX_AMD64_ABI if (!varDsc->lvIsHfa()) { if (slotCount > 1) { assert(call->HasMultiRegRetVal()); } else { unsigned size = layout->GetSize(); assert((size <= 8) || (size == 16)); bool isPowerOf2 = (((size - 1) & size) == 0); bool isTypeDefined = (lclRegType != TYP_UNDEF); assert(isPowerOf2 == isTypeDefined); } } #endif // !TARGET_XARCH || UNIX_AMD64_ABI #endif // DEBUG #if !defined(WINDOWS_AMD64_ABI) if (!call->HasMultiRegRetVal() && (lclRegType == TYP_UNDEF)) { // If we have a single return register, // but we can't retype it as a primitive type, we must spill it. GenTreeLclVar* spilledCall = SpillStructCallResult(call); lclStore->gtOp1 = spilledCall; src = lclStore->gtOp1; JITDUMP("lowering store lcl var/field has to spill call src.\n"); LowerStoreLocCommon(lclStore); return; } #endif // !WINDOWS_AMD64_ABI convertToStoreObj = false; } else if (!varDsc->IsEnregisterableType()) { convertToStoreObj = true; } else if (src->OperIs(GT_CNS_INT)) { assert(src->IsIntegralConst(0) && "expected an INIT_VAL for non-zero init."); #ifdef FEATURE_SIMD if (varTypeIsSIMD(lclRegType)) { CorInfoType simdBaseJitType = comp->getBaseJitTypeOfSIMDLocal(lclStore); if (simdBaseJitType == CORINFO_TYPE_UNDEF) { // Lie about the type if we don't know/have it. simdBaseJitType = CORINFO_TYPE_FLOAT; } GenTreeSIMD* simdTree = comp->gtNewSIMDNode(lclRegType, src, SIMDIntrinsicInit, simdBaseJitType, varDsc->lvExactSize); BlockRange().InsertAfter(src, simdTree); LowerSIMD(simdTree); src = simdTree; lclStore->gtOp1 = src; convertToStoreObj = false; } else #endif // FEATURE_SIMD { convertToStoreObj = false; } } else if (!src->OperIs(GT_LCL_VAR)) { convertToStoreObj = true; } else { assert(src->OperIs(GT_LCL_VAR)); convertToStoreObj = false; } if (convertToStoreObj) { const unsigned lclNum = lclStore->GetLclNum(); GenTreeLclVar* addr = comp->gtNewLclVarAddrNode(lclNum, TYP_BYREF); comp->lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::BlockOp)); addr->gtFlags |= GTF_VAR_DEF; assert(!addr->IsPartialLclFld(comp)); addr->gtFlags |= GTF_DONT_CSE; // Create the assignment node. lclStore->ChangeOper(GT_STORE_OBJ); GenTreeBlk* objStore = lclStore->AsObj(); // Only the GTF_LATE_ARG flag (if present) is preserved. objStore->gtFlags &= GTF_LATE_ARG; objStore->gtFlags |= GTF_ASG | GTF_IND_NONFAULTING | GTF_IND_TGT_NOT_HEAP; #ifndef JIT32_GCENCODER objStore->gtBlkOpGcUnsafe = false; #endif objStore->gtBlkOpKind = GenTreeObj::BlkOpKindInvalid; objStore->SetLayout(varDsc->GetLayout()); objStore->SetAddr(addr); objStore->SetData(src); BlockRange().InsertBefore(objStore, addr); LowerBlockStoreCommon(objStore); return; } } // src and dst can be in registers, check if we need a bitcast. if (!src->TypeIs(TYP_STRUCT) && (varTypeUsesFloatReg(lclRegType) != varTypeUsesFloatReg(src))) { assert(!srcIsMultiReg && !dstIsMultiReg); assert(lclStore->OperIsLocalStore()); assert(lclRegType != TYP_UNDEF); GenTree* bitcast = comp->gtNewBitCastNode(lclRegType, src); lclStore->gtOp1 = bitcast; src = lclStore->gtGetOp1(); BlockRange().InsertBefore(lclStore, bitcast); ContainCheckBitCast(bitcast); } LowerStoreLoc(lclStore); JITDUMP("lowering store lcl var/field (after):\n"); DISPTREERANGE(BlockRange(), lclStore); JITDUMP("\n"); } //---------------------------------------------------------------------------------------------- // LowerRetStructLclVar: Lowers a struct return node. // // Arguments: // node - The return node to lower. // void Lowering::LowerRetStruct(GenTreeUnOp* ret) { #ifdef TARGET_ARM64 if (GlobalJitOptions::compFeatureHfa) { if (varTypeIsSIMD(ret)) { if (comp->info.compRetNativeType == TYP_STRUCT) { assert(varTypeIsSIMD(ret->gtGetOp1())); assert(comp->compMethodReturnsMultiRegRegTypeAlternate()); ret->ChangeType(comp->info.compRetNativeType); } else { assert(comp->info.compRetNativeType == ret->TypeGet()); GenTree* retVal = ret->gtGetOp1(); if (retVal->TypeGet() != ret->TypeGet()) { assert(retVal->OperIs(GT_LCL_VAR)); LowerRetSingleRegStructLclVar(ret); } return; } } } #endif // TARGET_ARM64 if (comp->compMethodReturnsMultiRegRegTypeAlternate()) { return; } assert(ret->OperIs(GT_RETURN)); assert(varTypeIsStruct(ret)); GenTree* retVal = ret->gtGetOp1(); // Note: small types are returned as INT. var_types nativeReturnType = genActualType(comp->info.compRetNativeType); ret->ChangeType(nativeReturnType); switch (retVal->OperGet()) { case GT_CALL: assert(retVal->TypeIs(nativeReturnType)); // Type should be changed during call processing. break; case GT_CNS_INT: // When we promote LCL_VAR single fields into return // we could have all type of constans here. if (varTypeUsesFloatReg(nativeReturnType)) { // Do not expect `initblock` for SIMD* types, // only 'initobj'. assert(retVal->AsIntCon()->IconValue() == 0); retVal->BashToConst(0.0, TYP_FLOAT); } break; case GT_OBJ: retVal->ChangeOper(GT_IND); FALLTHROUGH; case GT_IND: retVal->ChangeType(nativeReturnType); LowerIndir(retVal->AsIndir()); break; case GT_LCL_VAR: LowerRetSingleRegStructLclVar(ret); break; #if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS) #ifdef FEATURE_SIMD case GT_SIMD: #endif // FEATURE_SIMD #ifdef FEATURE_HW_INTRINSICS case GT_HWINTRINSIC: #endif // FEATURE_HW_INTRINSICS { assert(!retVal->TypeIs(TYP_STRUCT)); if (varTypeUsesFloatReg(ret) != varTypeUsesFloatReg(retVal)) { GenTree* bitcast = comp->gtNewBitCastNode(ret->TypeGet(), retVal); ret->gtOp1 = bitcast; BlockRange().InsertBefore(ret, bitcast); ContainCheckBitCast(bitcast); } } break; #endif // FEATURE_SIMD || FEATURE_HW_INTRINSICS case GT_LCL_FLD: { #ifdef DEBUG LclVarDsc* varDsc = comp->lvaGetDesc(retVal->AsLclFld()); assert(varDsc->lvDoNotEnregister); #endif retVal->ChangeType(nativeReturnType); } break; default: assert(varTypeIsEnregisterable(retVal)); if (varTypeUsesFloatReg(ret) != varTypeUsesFloatReg(retVal)) { GenTree* bitcast = comp->gtNewBitCastNode(ret->TypeGet(), retVal); ret->gtOp1 = bitcast; BlockRange().InsertBefore(ret, bitcast); ContainCheckBitCast(bitcast); } break; } } //---------------------------------------------------------------------------------------------- // LowerRetSingleRegStructLclVar: Lowers a return node with a struct lclVar as a source. // // Arguments: // node - The return node to lower. // // Notes: // - the function is only for LclVars that are returned in one register; // - if LclVar is allocated in memory then read it as return type; // - if LclVar can be enregistered read it as register type and add a bitcast if necessary; // void Lowering::LowerRetSingleRegStructLclVar(GenTreeUnOp* ret) { assert(!comp->compMethodReturnsMultiRegRegTypeAlternate()); assert(ret->OperIs(GT_RETURN)); GenTreeLclVarCommon* lclVar = ret->gtGetOp1()->AsLclVar(); assert(lclVar->OperIs(GT_LCL_VAR)); unsigned lclNum = lclVar->GetLclNum(); LclVarDsc* varDsc = comp->lvaGetDesc(lclNum); if (varDsc->lvPromoted) { // TODO-1stClassStructs: We can no longer independently promote // or enregister this struct, since it is referenced as a whole. comp->lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::BlockOpRet)); } if (varDsc->lvDoNotEnregister) { lclVar->ChangeOper(GT_LCL_FLD); lclVar->AsLclFld()->SetLclOffs(0); // We are returning as a primitive type and the lcl is of struct type. assert(comp->info.compRetNativeType != TYP_STRUCT); assert((genTypeSize(comp->info.compRetNativeType) == genTypeSize(ret)) || (varTypeIsIntegral(ret) && varTypeIsIntegral(comp->info.compRetNativeType) && (genTypeSize(comp->info.compRetNativeType) <= genTypeSize(ret)))); // If the actual return type requires normalization, then make sure we // do so by using the correct small type for the GT_LCL_FLD. It would // be conservative to check just compRetNativeType for this since small // structs are normalized to primitive types when they are returned in // registers, so we would normalize for them as well. if (varTypeIsSmall(comp->info.compRetType)) { assert(genTypeSize(comp->info.compRetNativeType) == genTypeSize(comp->info.compRetType)); lclVar->ChangeType(comp->info.compRetType); } else { // Otherwise we don't mind that we leave the upper bits undefined. lclVar->ChangeType(ret->TypeGet()); } } else { const var_types lclVarType = varDsc->GetRegisterType(lclVar); assert(lclVarType != TYP_UNDEF); const var_types actualType = genActualType(lclVarType); lclVar->ChangeType(actualType); if (varTypeUsesFloatReg(ret) != varTypeUsesFloatReg(lclVarType)) { GenTree* bitcast = comp->gtNewBitCastNode(ret->TypeGet(), ret->gtOp1); ret->gtOp1 = bitcast; BlockRange().InsertBefore(ret, bitcast); ContainCheckBitCast(bitcast); } } } //---------------------------------------------------------------------------------------------- // LowerCallStruct: Lowers a call node that returns a stuct. // // Arguments: // call - The call node to lower. // // Notes: // - this handles only single-register returns; // - it transforms the call's user for `GT_STOREIND`. // void Lowering::LowerCallStruct(GenTreeCall* call) { assert(varTypeIsStruct(call)); if (call->HasMultiRegRetVal()) { return; } if (GlobalJitOptions::compFeatureHfa) { if (comp->IsHfa(call)) { #if defined(TARGET_ARM64) assert(comp->GetHfaCount(call) == 1); #elif defined(TARGET_ARM) // ARM returns double in 2 float registers, but // `call->HasMultiRegRetVal()` count double registers. assert(comp->GetHfaCount(call) <= 2); #else // !TARGET_ARM64 && !TARGET_ARM NYI("Unknown architecture"); #endif // !TARGET_ARM64 && !TARGET_ARM var_types hfaType = comp->GetHfaType(call); if (call->TypeIs(hfaType)) { return; } } } CORINFO_CLASS_HANDLE retClsHnd = call->gtRetClsHnd; Compiler::structPassingKind howToReturnStruct; var_types returnType = comp->getReturnTypeForStruct(retClsHnd, call->GetUnmanagedCallConv(), &howToReturnStruct); assert(returnType != TYP_STRUCT && returnType != TYP_UNKNOWN); var_types origType = call->TypeGet(); call->gtType = genActualType(returnType); LIR::Use callUse; if (BlockRange().TryGetUse(call, &callUse)) { GenTree* user = callUse.User(); switch (user->OperGet()) { case GT_RETURN: case GT_STORE_LCL_VAR: case GT_STORE_BLK: case GT_STORE_OBJ: // Leave as is, the user will handle it. assert(user->TypeIs(origType) || varTypeIsSIMD(user->TypeGet())); break; #ifdef FEATURE_SIMD case GT_STORE_LCL_FLD: // If the call type was ever updated (in importer) to TYP_SIMD*, it should match the user type. // If not, the user type should match the struct's returnType. assert((varTypeIsSIMD(user) && user->TypeIs(origType)) || (returnType == user->TypeGet())); break; #endif // FEATURE_SIMD case GT_STOREIND: #ifdef FEATURE_SIMD if (varTypeIsSIMD(user)) { user->ChangeType(returnType); break; } #endif // FEATURE_SIMD // importer has a separate mechanism to retype calls to helpers, // keep it for now. assert(user->TypeIs(TYP_REF) || (user->TypeIs(TYP_I_IMPL) && comp->IsTargetAbi(CORINFO_CORERT_ABI))); assert(call->IsHelperCall()); assert(returnType == user->TypeGet()); break; default: unreached(); } } } //---------------------------------------------------------------------------------------------- // LowerStoreSingleRegCallStruct: Lowers a store block where the source is a struct typed call. // // Arguments: // store - The store node to lower. // // Notes: // - the function is only for calls that return one register; // - it spills the call's result if it can be retyped as a primitive type; // void Lowering::LowerStoreSingleRegCallStruct(GenTreeBlk* store) { assert(store->Data()->IsCall()); GenTreeCall* call = store->Data()->AsCall(); assert(!call->HasMultiRegRetVal()); const ClassLayout* layout = store->GetLayout(); const var_types regType = layout->GetRegisterType(); if (regType != TYP_UNDEF) { store->ChangeType(regType); store->SetOper(GT_STOREIND); LowerStoreIndirCommon(store->AsStoreInd()); return; } else { #if defined(WINDOWS_AMD64_ABI) // All ABI except Windows x64 supports passing 3 byte structs in registers. // Other 64 bites ABI-s support passing 5, 6, 7 byte structs. unreached(); #else // !WINDOWS_AMD64_ABI if (store->OperIs(GT_STORE_OBJ)) { store->SetOper(GT_STORE_BLK); } store->gtBlkOpKind = GenTreeObj::BlkOpKindUnroll; GenTreeLclVar* spilledCall = SpillStructCallResult(call); store->SetData(spilledCall); LowerBlockStoreCommon(store); #endif // WINDOWS_AMD64_ABI } } #if !defined(WINDOWS_AMD64_ABI) //---------------------------------------------------------------------------------------------- // SpillStructCallResult: Spill call result to memory. // // Arguments: // call - call with 3, 5, 6 or 7 return size that has to be spilled to memory. // // Return Value: // load of the spilled variable. // GenTreeLclVar* Lowering::SpillStructCallResult(GenTreeCall* call) const { // TODO-1stClassStructs: we can support this in codegen for `GT_STORE_BLK` without new temps. const unsigned spillNum = comp->lvaGrabTemp(true DEBUGARG("Return value temp for an odd struct return size")); comp->lvaSetVarDoNotEnregister(spillNum DEBUGARG(DoNotEnregisterReason::LocalField)); CORINFO_CLASS_HANDLE retClsHnd = call->gtRetClsHnd; comp->lvaSetStruct(spillNum, retClsHnd, false); GenTreeLclFld* spill = new (comp, GT_STORE_LCL_FLD) GenTreeLclFld(GT_STORE_LCL_FLD, call->gtType, spillNum, 0); spill->gtOp1 = call; spill->gtFlags |= GTF_VAR_DEF; BlockRange().InsertAfter(call, spill); ContainCheckStoreLoc(spill); GenTreeLclVar* loadCallResult = comp->gtNewLclvNode(spillNum, TYP_STRUCT)->AsLclVar(); BlockRange().InsertAfter(spill, loadCallResult); return loadCallResult; } #endif // !WINDOWS_AMD64_ABI GenTree* Lowering::LowerDirectCall(GenTreeCall* call) { noway_assert(call->gtCallType == CT_USER_FUNC || call->gtCallType == CT_HELPER); // Non-virtual direct/indirect calls: Work out if the address of the // call is known at JIT time. If not it is either an indirect call // or the address must be accessed via an single/double indirection. void* addr; InfoAccessType accessType; CorInfoHelpFunc helperNum = comp->eeGetHelperNum(call->gtCallMethHnd); #ifdef FEATURE_READYTORUN if (call->gtEntryPoint.addr != nullptr) { accessType = call->gtEntryPoint.accessType; addr = call->gtEntryPoint.addr; } else #endif if (call->gtCallType == CT_HELPER) { noway_assert(helperNum != CORINFO_HELP_UNDEF); // the convention on getHelperFtn seems to be (it's not documented) // that it returns an address or if it returns null, pAddr is set to // another address, which requires an indirection void* pAddr; addr = comp->info.compCompHnd->getHelperFtn(helperNum, (void**)&pAddr); if (addr != nullptr) { assert(pAddr == nullptr); accessType = IAT_VALUE; } else { accessType = IAT_PVALUE; addr = pAddr; } } else { noway_assert(helperNum == CORINFO_HELP_UNDEF); CORINFO_ACCESS_FLAGS aflags = CORINFO_ACCESS_ANY; if (call->IsSameThis()) { aflags = (CORINFO_ACCESS_FLAGS)(aflags | CORINFO_ACCESS_THIS); } if (!call->NeedsNullCheck()) { aflags = (CORINFO_ACCESS_FLAGS)(aflags | CORINFO_ACCESS_NONNULL); } CORINFO_CONST_LOOKUP addrInfo; comp->info.compCompHnd->getFunctionEntryPoint(call->gtCallMethHnd, &addrInfo, aflags); accessType = addrInfo.accessType; addr = addrInfo.addr; } GenTree* result = nullptr; switch (accessType) { case IAT_VALUE: // Non-virtual direct call to known address. // For JIT helper based tailcall (only used on x86) the target // address is passed as an arg to the helper so we want a node for // it. if (!IsCallTargetInRange(addr) || call->IsTailCallViaJitHelper()) { result = AddrGen(addr); } else { // a direct call within range of hardware relative call instruction // stash the address for codegen call->gtDirectCallAddress = addr; } break; case IAT_PVALUE: { // If we are using an indirection cell for a direct call then apply // an optimization that loads the call target directly from the // indirection cell, instead of duplicating the tree. bool hasIndirectionCell = call->GetIndirectionCellArgKind() != NonStandardArgKind::None; if (!hasIndirectionCell) { // Non-virtual direct calls to addresses accessed by // a single indirection. GenTree* cellAddr = AddrGen(addr); #ifdef DEBUG cellAddr->AsIntCon()->gtTargetHandle = (size_t)call->gtCallMethHnd; #endif GenTree* indir = Ind(cellAddr); result = indir; } break; } case IAT_PPVALUE: // Non-virtual direct calls to addresses accessed by // a double indirection. // // Expanding an IAT_PPVALUE here, will lose the opportunity // to Hoist/CSE the first indirection as it is an invariant load // assert(!"IAT_PPVALUE case in LowerDirectCall"); noway_assert(helperNum == CORINFO_HELP_UNDEF); result = AddrGen(addr); // Double-indirection. Load the address into a register // and call indirectly through the register // result = Ind(Ind(result)); break; case IAT_RELPVALUE: { // Non-virtual direct calls to addresses accessed by // a single relative indirection. GenTree* cellAddr = AddrGen(addr); GenTree* indir = Ind(cellAddr); result = comp->gtNewOperNode(GT_ADD, TYP_I_IMPL, indir, AddrGen(addr)); break; } default: noway_assert(!"Bad accessType"); break; } return result; } GenTree* Lowering::LowerDelegateInvoke(GenTreeCall* call) { noway_assert(call->gtCallType == CT_USER_FUNC); assert((comp->info.compCompHnd->getMethodAttribs(call->gtCallMethHnd) & (CORINFO_FLG_DELEGATE_INVOKE | CORINFO_FLG_FINAL)) == (CORINFO_FLG_DELEGATE_INVOKE | CORINFO_FLG_FINAL)); GenTree* thisArgNode; if (call->IsTailCallViaJitHelper()) { const unsigned argNum = 0; fgArgTabEntry* thisArgTabEntry = comp->gtArgEntryByArgNum(call, argNum); thisArgNode = thisArgTabEntry->GetNode(); } else { thisArgNode = comp->gtGetThisArg(call); } assert(thisArgNode != nullptr); assert(thisArgNode->gtOper == GT_PUTARG_REG); GenTree* thisExpr = thisArgNode->AsOp()->gtOp1; // We're going to use the 'this' expression multiple times, so make a local to copy it. GenTree* base; if (thisExpr->OperIs(GT_LCL_VAR)) { base = comp->gtNewLclvNode(thisExpr->AsLclVar()->GetLclNum(), thisExpr->TypeGet()); } else if (thisExpr->OperIs(GT_LCL_FLD)) { base = comp->gtNewLclFldNode(thisExpr->AsLclFld()->GetLclNum(), thisExpr->TypeGet(), thisExpr->AsLclFld()->GetLclOffs()); } else { unsigned delegateInvokeTmp = comp->lvaGrabTemp(true DEBUGARG("delegate invoke call")); base = comp->gtNewLclvNode(delegateInvokeTmp, thisExpr->TypeGet()); LIR::Use thisExprUse(BlockRange(), &thisArgNode->AsOp()->gtOp1, thisArgNode); ReplaceWithLclVar(thisExprUse, delegateInvokeTmp); thisExpr = thisExprUse.Def(); // it's changed; reload it. } // replace original expression feeding into thisPtr with // [originalThis + offsetOfDelegateInstance] GenTree* newThisAddr = new (comp, GT_LEA) GenTreeAddrMode(TYP_BYREF, thisExpr, nullptr, 0, comp->eeGetEEInfo()->offsetOfDelegateInstance); GenTree* newThis = comp->gtNewOperNode(GT_IND, TYP_REF, newThisAddr); BlockRange().InsertAfter(thisExpr, newThisAddr, newThis); thisArgNode->AsOp()->gtOp1 = newThis; ContainCheckIndir(newThis->AsIndir()); // the control target is // [originalThis + firstTgtOffs] unsigned targetOffs = comp->eeGetEEInfo()->offsetOfDelegateFirstTarget; GenTree* result = new (comp, GT_LEA) GenTreeAddrMode(TYP_REF, base, nullptr, 0, targetOffs); GenTree* callTarget = Ind(result); // don't need to sequence and insert this tree, caller will do it return callTarget; } GenTree* Lowering::LowerIndirectNonvirtCall(GenTreeCall* call) { #ifdef TARGET_X86 if (call->gtCallCookie != nullptr) { NYI_X86("Morphing indirect non-virtual call with non-standard args"); } #endif // Indirect cookie calls gets transformed by fgMorphArgs as indirect call with non-standard args. // Hence we should never see this type of call in lower. noway_assert(call->gtCallCookie == nullptr); return nullptr; } //------------------------------------------------------------------------ // CreateReturnTrapSeq: Create a tree to perform a "return trap", used in PInvoke // epilogs to invoke a GC under a condition. The return trap checks some global // location (the runtime tells us where that is and how many indirections to make), // then, based on the result, conditionally calls a GC helper. We use a special node // for this because at this time (late in the compilation phases), introducing flow // is tedious/difficult. // // This is used for PInvoke inlining. // // Return Value: // Code tree to perform the action. // GenTree* Lowering::CreateReturnTrapSeq() { // The GT_RETURNTRAP node expands to this: // if (g_TrapReturningThreads) // { // RareDisablePreemptiveGC(); // } // The only thing to do here is build up the expression that evaluates 'g_TrapReturningThreads'. void* pAddrOfCaptureThreadGlobal = nullptr; int32_t* addrOfCaptureThreadGlobal = comp->info.compCompHnd->getAddrOfCaptureThreadGlobal(&pAddrOfCaptureThreadGlobal); GenTree* testTree; if (addrOfCaptureThreadGlobal != nullptr) { testTree = AddrGen(addrOfCaptureThreadGlobal); } else { testTree = Ind(AddrGen(pAddrOfCaptureThreadGlobal)); } return comp->gtNewOperNode(GT_RETURNTRAP, TYP_INT, Ind(testTree, TYP_INT)); } //------------------------------------------------------------------------ // SetGCState: Create a tree that stores the given constant (0 or 1) into the // thread's GC state field. // // This is used for PInvoke inlining. // // Arguments: // state - constant (0 or 1) to store into the thread's GC state field. // // Return Value: // Code tree to perform the action. // GenTree* Lowering::SetGCState(int state) { // Thread.offsetOfGcState = 0/1 assert(state == 0 || state == 1); const CORINFO_EE_INFO* pInfo = comp->eeGetEEInfo(); GenTree* base = new (comp, GT_LCL_VAR) GenTreeLclVar(GT_LCL_VAR, TYP_I_IMPL, comp->info.compLvFrameListRoot); GenTree* stateNode = new (comp, GT_CNS_INT) GenTreeIntCon(TYP_BYTE, state); GenTree* addr = new (comp, GT_LEA) GenTreeAddrMode(TYP_I_IMPL, base, nullptr, 1, pInfo->offsetOfGCState); GenTree* storeGcState = new (comp, GT_STOREIND) GenTreeStoreInd(TYP_BYTE, addr, stateNode); return storeGcState; } //------------------------------------------------------------------------ // CreateFrameLinkUpdate: Create a tree that either links or unlinks the // locally-allocated InlinedCallFrame from the Frame list. // // This is used for PInvoke inlining. // // Arguments: // action - whether to link (push) or unlink (pop) the Frame // // Return Value: // Code tree to perform the action. // GenTree* Lowering::CreateFrameLinkUpdate(FrameLinkAction action) { const CORINFO_EE_INFO* pInfo = comp->eeGetEEInfo(); const CORINFO_EE_INFO::InlinedCallFrameInfo& callFrameInfo = pInfo->inlinedCallFrameInfo; GenTree* TCB = new (comp, GT_LCL_VAR) GenTreeLclVar(GT_LCL_VAR, TYP_I_IMPL, comp->info.compLvFrameListRoot); // Thread->m_pFrame GenTree* addr = new (comp, GT_LEA) GenTreeAddrMode(TYP_I_IMPL, TCB, nullptr, 1, pInfo->offsetOfThreadFrame); GenTree* data = nullptr; if (action == PushFrame) { // Thread->m_pFrame = &inlinedCallFrame; data = new (comp, GT_LCL_FLD_ADDR) GenTreeLclFld(GT_LCL_FLD_ADDR, TYP_BYREF, comp->lvaInlinedPInvokeFrameVar, callFrameInfo.offsetOfFrameVptr); } else { assert(action == PopFrame); // Thread->m_pFrame = inlinedCallFrame.m_pNext; data = new (comp, GT_LCL_FLD) GenTreeLclFld(GT_LCL_FLD, TYP_BYREF, comp->lvaInlinedPInvokeFrameVar, pInfo->inlinedCallFrameInfo.offsetOfFrameLink); } GenTree* storeInd = new (comp, GT_STOREIND) GenTreeStoreInd(TYP_I_IMPL, addr, data); return storeInd; } //------------------------------------------------------------------------ // InsertPInvokeMethodProlog: Create the code that runs at the start of // every method that has PInvoke calls. // // Initialize the TCB local and the InlinedCallFrame object. Then link ("push") // the InlinedCallFrame object on the Frame chain. The layout of InlinedCallFrame // is defined in vm/frames.h. See also vm/jitinterface.cpp for more information. // The offsets of these fields is returned by the VM in a call to ICorStaticInfo::getEEInfo(). // // The (current) layout is as follows: // // 64-bit 32-bit CORINFO_EE_INFO // offset offset field name offset when set // ----------------------------------------------------------------------------------------- // +00h +00h GS cookie offsetOfGSCookie // +08h +04h vptr for class InlinedCallFrame offsetOfFrameVptr method prolog // +10h +08h m_Next offsetOfFrameLink method prolog // +18h +0Ch m_Datum offsetOfCallTarget call site // +20h n/a m_StubSecretArg not set by JIT // +28h +10h m_pCallSiteSP offsetOfCallSiteSP x86: call site, and zeroed in method // prolog; // non-x86: method prolog (SP remains // constant in function, after prolog: no // localloc and PInvoke in same function) // +30h +14h m_pCallerReturnAddress offsetOfReturnAddress call site // +38h +18h m_pCalleeSavedFP offsetOfCalleeSavedFP not set by JIT // +1Ch m_pThread // +20h m_pSPAfterProlog offsetOfSPAfterProlog arm only // +20/24h JIT retval spill area (int) before call_gc ??? // +24/28h JIT retval spill area (long) before call_gc ??? // +28/2Ch Saved value of EBP method prolog ??? // // Note that in the VM, InlinedCallFrame is a C++ class whose objects have a 'this' pointer that points // to the InlinedCallFrame vptr (the 2nd field listed above), and the GS cookie is stored *before* // the object. When we link the InlinedCallFrame onto the Frame chain, we must point at this location, // and not at the beginning of the InlinedCallFrame local, which is actually the GS cookie. // // Return Value: // none // void Lowering::InsertPInvokeMethodProlog() { noway_assert(comp->info.compUnmanagedCallCountWithGCTransition); noway_assert(comp->lvaInlinedPInvokeFrameVar != BAD_VAR_NUM); if (comp->opts.ShouldUsePInvokeHelpers()) { return; } JITDUMP("======= Inserting PInvoke method prolog\n"); // The first BB must be a scratch BB in order for us to be able to safely insert the P/Invoke prolog. assert(comp->fgFirstBBisScratch()); LIR::Range& firstBlockRange = LIR::AsRange(comp->fgFirstBB); const CORINFO_EE_INFO* pInfo = comp->eeGetEEInfo(); const CORINFO_EE_INFO::InlinedCallFrameInfo& callFrameInfo = pInfo->inlinedCallFrameInfo; // First arg: &compiler->lvaInlinedPInvokeFrameVar + callFrameInfo.offsetOfFrameVptr #if defined(DEBUG) const LclVarDsc* inlinedPInvokeDsc = comp->lvaGetDesc(comp->lvaInlinedPInvokeFrameVar); assert(inlinedPInvokeDsc->IsAddressExposed()); #endif // DEBUG GenTree* frameAddr = new (comp, GT_LCL_FLD_ADDR) GenTreeLclFld(GT_LCL_FLD_ADDR, TYP_BYREF, comp->lvaInlinedPInvokeFrameVar, callFrameInfo.offsetOfFrameVptr); // Call runtime helper to fill in our InlinedCallFrame and push it on the Frame list: // TCB = CORINFO_HELP_INIT_PINVOKE_FRAME(&symFrameStart, secretArg); // for x86, don't pass the secretArg. CLANG_FORMAT_COMMENT_ANCHOR; #if defined(TARGET_X86) || defined(TARGET_ARM) GenTreeCall::Use* argList = comp->gtNewCallArgs(frameAddr); #else GenTreeCall::Use* argList = comp->gtNewCallArgs(frameAddr, PhysReg(REG_SECRET_STUB_PARAM)); #endif GenTree* call = comp->gtNewHelperCallNode(CORINFO_HELP_INIT_PINVOKE_FRAME, TYP_I_IMPL, argList); // some sanity checks on the frame list root vardsc const unsigned lclNum = comp->info.compLvFrameListRoot; const LclVarDsc* varDsc = comp->lvaGetDesc(lclNum); noway_assert(!varDsc->lvIsParam); noway_assert(varDsc->lvType == TYP_I_IMPL); GenTree* store = new (comp, GT_STORE_LCL_VAR) GenTreeLclVar(GT_STORE_LCL_VAR, TYP_I_IMPL, lclNum); store->AsOp()->gtOp1 = call; store->gtFlags |= GTF_VAR_DEF; GenTree* const insertionPoint = firstBlockRange.FirstNonCatchArgNode(); comp->fgMorphTree(store); firstBlockRange.InsertBefore(insertionPoint, LIR::SeqTree(comp, store)); DISPTREERANGE(firstBlockRange, store); #if !defined(TARGET_X86) && !defined(TARGET_ARM) // For x86, this step is done at the call site (due to stack pointer not being static in the function). // For arm32, CallSiteSP is set up by the call to CORINFO_HELP_INIT_PINVOKE_FRAME. // -------------------------------------------------------- // InlinedCallFrame.m_pCallSiteSP = @RSP; GenTreeLclFld* storeSP = new (comp, GT_STORE_LCL_FLD) GenTreeLclFld(GT_STORE_LCL_FLD, TYP_I_IMPL, comp->lvaInlinedPInvokeFrameVar, callFrameInfo.offsetOfCallSiteSP); storeSP->gtOp1 = PhysReg(REG_SPBASE); storeSP->gtFlags |= GTF_VAR_DEF; assert(inlinedPInvokeDsc->lvDoNotEnregister); firstBlockRange.InsertBefore(insertionPoint, LIR::SeqTree(comp, storeSP)); DISPTREERANGE(firstBlockRange, storeSP); #endif // !defined(TARGET_X86) && !defined(TARGET_ARM) #if !defined(TARGET_ARM) // For arm32, CalleeSavedFP is set up by the call to CORINFO_HELP_INIT_PINVOKE_FRAME. // -------------------------------------------------------- // InlinedCallFrame.m_pCalleeSavedEBP = @RBP; GenTreeLclFld* storeFP = new (comp, GT_STORE_LCL_FLD) GenTreeLclFld(GT_STORE_LCL_FLD, TYP_I_IMPL, comp->lvaInlinedPInvokeFrameVar, callFrameInfo.offsetOfCalleeSavedFP); assert(inlinedPInvokeDsc->lvDoNotEnregister); storeFP->gtOp1 = PhysReg(REG_FPBASE); storeFP->gtFlags |= GTF_VAR_DEF; firstBlockRange.InsertBefore(insertionPoint, LIR::SeqTree(comp, storeFP)); DISPTREERANGE(firstBlockRange, storeFP); #endif // !defined(TARGET_ARM) // -------------------------------------------------------- // On 32-bit targets, CORINFO_HELP_INIT_PINVOKE_FRAME initializes the PInvoke frame and then pushes it onto // the current thread's Frame stack. On 64-bit targets, it only initializes the PInvoke frame. CLANG_FORMAT_COMMENT_ANCHOR; #ifdef TARGET_64BIT if (comp->opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB)) { // Push a frame - if we are NOT in an IL stub, this is done right before the call // The init routine sets InlinedCallFrame's m_pNext, so we just set the thead's top-of-stack GenTree* frameUpd = CreateFrameLinkUpdate(PushFrame); firstBlockRange.InsertBefore(insertionPoint, LIR::SeqTree(comp, frameUpd)); ContainCheckStoreIndir(frameUpd->AsStoreInd()); DISPTREERANGE(firstBlockRange, frameUpd); } #endif // TARGET_64BIT } //------------------------------------------------------------------------ // InsertPInvokeMethodEpilog: Code that needs to be run when exiting any method // that has PInvoke inlines. This needs to be inserted any place you can exit the // function: returns, tailcalls and jmps. // // Arguments: // returnBB - basic block from which a method can return // lastExpr - GenTree of the last top level stmnt of returnBB (debug only arg) // // Return Value: // Code tree to perform the action. // void Lowering::InsertPInvokeMethodEpilog(BasicBlock* returnBB DEBUGARG(GenTree* lastExpr)) { assert(returnBB != nullptr); assert(comp->info.compUnmanagedCallCountWithGCTransition); if (comp->opts.ShouldUsePInvokeHelpers()) { return; } JITDUMP("======= Inserting PInvoke method epilog\n"); // Method doing PInvoke calls has exactly one return block unless it has "jmp" or tail calls. assert(((returnBB == comp->genReturnBB) && (returnBB->bbJumpKind == BBJ_RETURN)) || returnBB->endsWithTailCallOrJmp(comp)); LIR::Range& returnBlockRange = LIR::AsRange(returnBB); GenTree* insertionPoint = returnBlockRange.LastNode(); assert(insertionPoint == lastExpr); // Note: PInvoke Method Epilog (PME) needs to be inserted just before GT_RETURN, GT_JMP or GT_CALL node in execution // order so that it is guaranteed that there will be no further PInvokes after that point in the method. // // Example1: GT_RETURN(op1) - say execution order is: Op1, GT_RETURN. After inserting PME, execution order would be // Op1, PME, GT_RETURN // // Example2: GT_CALL(arg side effect computing nodes, Stk Args Setup, Reg Args setup). The execution order would be // arg side effect computing nodes, Stk Args setup, Reg Args setup, GT_CALL // After inserting PME execution order would be: // arg side effect computing nodes, Stk Args setup, Reg Args setup, PME, GT_CALL // // Example3: GT_JMP. After inserting PME execution order would be: PME, GT_JMP // That is after PME, args for GT_JMP call will be setup. // Pop the frame if necessary. This always happens in the epilog on 32-bit targets. For 64-bit targets, we only do // this in the epilog for IL stubs; for non-IL stubs the frame is popped after every PInvoke call. CLANG_FORMAT_COMMENT_ANCHOR; #ifdef TARGET_64BIT if (comp->opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB)) #endif // TARGET_64BIT { GenTree* frameUpd = CreateFrameLinkUpdate(PopFrame); returnBlockRange.InsertBefore(insertionPoint, LIR::SeqTree(comp, frameUpd)); ContainCheckStoreIndir(frameUpd->AsStoreInd()); } } //------------------------------------------------------------------------ // InsertPInvokeCallProlog: Emit the call-site prolog for direct calls to unmanaged code. // It does all the necessary call-site setup of the InlinedCallFrame. // // Arguments: // call - the call for which we are inserting the PInvoke prolog. // // Return Value: // None. // void Lowering::InsertPInvokeCallProlog(GenTreeCall* call) { JITDUMP("======= Inserting PInvoke call prolog\n"); GenTree* insertBefore = call; if (call->gtCallType == CT_INDIRECT) { bool isClosed; insertBefore = BlockRange().GetTreeRange(call->gtCallAddr, &isClosed).FirstNode(); assert(isClosed); } const CORINFO_EE_INFO::InlinedCallFrameInfo& callFrameInfo = comp->eeGetEEInfo()->inlinedCallFrameInfo; gtCallTypes callType = (gtCallTypes)call->gtCallType; noway_assert(comp->lvaInlinedPInvokeFrameVar != BAD_VAR_NUM); if (comp->opts.ShouldUsePInvokeHelpers()) { // First argument is the address of the frame variable. GenTree* frameAddr = new (comp, GT_LCL_VAR_ADDR) GenTreeLclVar(GT_LCL_VAR_ADDR, TYP_BYREF, comp->lvaInlinedPInvokeFrameVar); #if defined(TARGET_X86) && !defined(UNIX_X86_ABI) // On x86 targets, PInvoke calls need the size of the stack args in InlinedCallFrame.m_Datum. // This is because the callee pops stack arguments, and we need to keep track of this during stack // walking const unsigned numStkArgBytes = call->fgArgInfo->GetNextSlotByteOffset(); GenTree* stackBytes = comp->gtNewIconNode(numStkArgBytes, TYP_INT); GenTreeCall::Use* args = comp->gtNewCallArgs(frameAddr, stackBytes); #else GenTreeCall::Use* args = comp->gtNewCallArgs(frameAddr); #endif // Insert call to CORINFO_HELP_JIT_PINVOKE_BEGIN GenTree* helperCall = comp->gtNewHelperCallNode(CORINFO_HELP_JIT_PINVOKE_BEGIN, TYP_VOID, args); comp->fgMorphTree(helperCall); BlockRange().InsertBefore(insertBefore, LIR::SeqTree(comp, helperCall)); LowerNode(helperCall); // helper call is inserted before current node and should be lowered here. return; } // Emit the following sequence: // // InlinedCallFrame.callTarget = methodHandle // stored in m_Datum // InlinedCallFrame.m_pCallSiteSP = SP // x86 only // InlinedCallFrame.m_pCallerReturnAddress = return address // GT_START_PREEEMPTC // Thread.gcState = 0 // (non-stub) - update top Frame on TCB // 64-bit targets only // ---------------------------------------------------------------------------------- // Setup InlinedCallFrame.callSiteTarget (which is how the JIT refers to it). // The actual field is InlinedCallFrame.m_Datum which has many different uses and meanings. GenTree* src = nullptr; if (callType == CT_INDIRECT) { #if !defined(TARGET_64BIT) // On 32-bit targets, indirect calls need the size of the stack args in InlinedCallFrame.m_Datum. const unsigned stackByteOffset = call->fgArgInfo->GetNextSlotByteOffset(); src = comp->gtNewIconNode(stackByteOffset, TYP_INT); #else // On 64-bit targets, indirect calls may need the stub parameter value in InlinedCallFrame.m_Datum. // If the stub parameter value is not needed, m_Datum will be initialized by the VM. if (comp->info.compPublishStubParam) { src = comp->gtNewLclvNode(comp->lvaStubArgumentVar, TYP_I_IMPL); } #endif // !defined(TARGET_64BIT) } else { assert(callType == CT_USER_FUNC); void* pEmbedMethodHandle = nullptr; CORINFO_METHOD_HANDLE embedMethodHandle = comp->info.compCompHnd->embedMethodHandle(call->gtCallMethHnd, &pEmbedMethodHandle); noway_assert((!embedMethodHandle) != (!pEmbedMethodHandle)); if (embedMethodHandle != nullptr) { // InlinedCallFrame.callSiteTarget = methodHandle src = AddrGen(embedMethodHandle); } else { // InlinedCallFrame.callSiteTarget = *pEmbedMethodHandle src = Ind(AddrGen(pEmbedMethodHandle)); } } if (src != nullptr) { // Store into InlinedCallFrame.m_Datum, the offset of which is given by offsetOfCallTarget. GenTreeLclFld* store = new (comp, GT_STORE_LCL_FLD) GenTreeLclFld(GT_STORE_LCL_FLD, TYP_I_IMPL, comp->lvaInlinedPInvokeFrameVar, callFrameInfo.offsetOfCallTarget); store->gtOp1 = src; store->gtFlags |= GTF_VAR_DEF; InsertTreeBeforeAndContainCheck(insertBefore, store); } #ifdef TARGET_X86 // ---------------------------------------------------------------------------------- // InlinedCallFrame.m_pCallSiteSP = SP GenTreeLclFld* storeCallSiteSP = new (comp, GT_STORE_LCL_FLD) GenTreeLclFld(GT_STORE_LCL_FLD, TYP_I_IMPL, comp->lvaInlinedPInvokeFrameVar, callFrameInfo.offsetOfCallSiteSP); storeCallSiteSP->gtOp1 = PhysReg(REG_SPBASE); storeCallSiteSP->gtFlags |= GTF_VAR_DEF; InsertTreeBeforeAndContainCheck(insertBefore, storeCallSiteSP); #endif // ---------------------------------------------------------------------------------- // InlinedCallFrame.m_pCallerReturnAddress = &label (the address of the instruction immediately following the call) GenTreeLclFld* storeLab = new (comp, GT_STORE_LCL_FLD) GenTreeLclFld(GT_STORE_LCL_FLD, TYP_I_IMPL, comp->lvaInlinedPInvokeFrameVar, callFrameInfo.offsetOfReturnAddress); storeLab->gtOp1 = new (comp, GT_LABEL) GenTree(GT_LABEL, TYP_I_IMPL); storeLab->gtFlags |= GTF_VAR_DEF; InsertTreeBeforeAndContainCheck(insertBefore, storeLab); // Push the PInvoke frame if necessary. On 32-bit targets this only happens in the method prolog if a method // contains PInvokes; on 64-bit targets this is necessary in non-stubs. CLANG_FORMAT_COMMENT_ANCHOR; #ifdef TARGET_64BIT if (!comp->opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB)) { // Set the TCB's frame to be the one we just created. // Note the init routine for the InlinedCallFrame (CORINFO_HELP_INIT_PINVOKE_FRAME) // has prepended it to the linked list to maintain the stack of Frames. // // Stubs do this once per stub, not once per call. GenTree* frameUpd = CreateFrameLinkUpdate(PushFrame); BlockRange().InsertBefore(insertBefore, LIR::SeqTree(comp, frameUpd)); ContainCheckStoreIndir(frameUpd->AsStoreInd()); } #endif // TARGET_64BIT // IMPORTANT **** This instruction must be the last real instruction **** // It changes the thread's state to Preemptive mode // ---------------------------------------------------------------------------------- // [tcb + offsetOfGcState] = 0 GenTree* storeGCState = SetGCState(0); BlockRange().InsertBefore(insertBefore, LIR::SeqTree(comp, storeGCState)); ContainCheckStoreIndir(storeGCState->AsStoreInd()); // Indicate that codegen has switched this thread to preemptive GC. // This tree node doesn't generate any code, but impacts LSRA and gc reporting. // This tree node is simple so doesn't require sequencing. GenTree* preemptiveGCNode = new (comp, GT_START_PREEMPTGC) GenTree(GT_START_PREEMPTGC, TYP_VOID); BlockRange().InsertBefore(insertBefore, preemptiveGCNode); } //------------------------------------------------------------------------ // InsertPInvokeCallEpilog: Insert the code that goes after every inlined pinvoke call. // // Arguments: // call - the call for which we are inserting the PInvoke epilog. // // Return Value: // None. // void Lowering::InsertPInvokeCallEpilog(GenTreeCall* call) { JITDUMP("======= Inserting PInvoke call epilog\n"); if (comp->opts.ShouldUsePInvokeHelpers()) { noway_assert(comp->lvaInlinedPInvokeFrameVar != BAD_VAR_NUM); // First argument is the address of the frame variable. GenTree* frameAddr = comp->gtNewLclVarAddrNode(comp->lvaInlinedPInvokeFrameVar, TYP_BYREF); #if defined(DEBUG) const LclVarDsc* inlinedPInvokeDsc = comp->lvaGetDesc(comp->lvaInlinedPInvokeFrameVar); assert(inlinedPInvokeDsc->IsAddressExposed()); #endif // DEBUG // Insert call to CORINFO_HELP_JIT_PINVOKE_END GenTreeCall* helperCall = comp->gtNewHelperCallNode(CORINFO_HELP_JIT_PINVOKE_END, TYP_VOID, comp->gtNewCallArgs(frameAddr)); comp->fgMorphTree(helperCall); BlockRange().InsertAfter(call, LIR::SeqTree(comp, helperCall)); ContainCheckCallOperands(helperCall); return; } // gcstate = 1 GenTree* insertionPoint = call->gtNext; GenTree* tree = SetGCState(1); BlockRange().InsertBefore(insertionPoint, LIR::SeqTree(comp, tree)); ContainCheckStoreIndir(tree->AsStoreInd()); tree = CreateReturnTrapSeq(); BlockRange().InsertBefore(insertionPoint, LIR::SeqTree(comp, tree)); ContainCheckReturnTrap(tree->AsOp()); // Pop the frame if necessary. On 32-bit targets this only happens in the method epilog; on 64-bit targets thi // happens after every PInvoke call in non-stubs. 32-bit targets instead mark the frame as inactive. CLANG_FORMAT_COMMENT_ANCHOR; #ifdef TARGET_64BIT if (!comp->opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB)) { tree = CreateFrameLinkUpdate(PopFrame); BlockRange().InsertBefore(insertionPoint, LIR::SeqTree(comp, tree)); ContainCheckStoreIndir(tree->AsStoreInd()); } #else const CORINFO_EE_INFO::InlinedCallFrameInfo& callFrameInfo = comp->eeGetEEInfo()->inlinedCallFrameInfo; // ---------------------------------------------------------------------------------- // InlinedCallFrame.m_pCallerReturnAddress = nullptr GenTreeLclFld* const storeCallSiteTracker = new (comp, GT_STORE_LCL_FLD) GenTreeLclFld(GT_STORE_LCL_FLD, TYP_I_IMPL, comp->lvaInlinedPInvokeFrameVar, callFrameInfo.offsetOfReturnAddress); GenTreeIntCon* const constantZero = new (comp, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, 0); storeCallSiteTracker->gtOp1 = constantZero; storeCallSiteTracker->gtFlags |= GTF_VAR_DEF; BlockRange().InsertBefore(insertionPoint, constantZero, storeCallSiteTracker); ContainCheckStoreLoc(storeCallSiteTracker); #endif // TARGET_64BIT } //------------------------------------------------------------------------ // LowerNonvirtPinvokeCall: Lower a non-virtual / indirect PInvoke call // // Arguments: // call - The call to lower. // // Return Value: // The lowered call tree. // GenTree* Lowering::LowerNonvirtPinvokeCall(GenTreeCall* call) { // PInvoke lowering varies depending on the flags passed in by the EE. By default, // GC transitions are generated inline; if CORJIT_FLAG_USE_PINVOKE_HELPERS is specified, // GC transitions are instead performed using helper calls. Examples of each case are given // below. Note that the data structure that is used to store information about a call frame // containing any P/Invoke calls is initialized in the method prolog (see // InsertPInvokeMethod{Prolog,Epilog} for details). // // Inline transitions: // InlinedCallFrame inlinedCallFrame; // // ... // // // Set up frame information // inlinedCallFrame.callTarget = methodHandle; // stored in m_Datum // inlinedCallFrame.m_pCallSiteSP = SP; // x86 only // inlinedCallFrame.m_pCallerReturnAddress = &label; (the address of the instruction immediately following the // call) // Thread.m_pFrame = &inlinedCallFrame; (non-IL-stub only) // // // Switch the thread's GC mode to preemptive mode // thread->m_fPreemptiveGCDisabled = 0; // // // Call the unmanaged method // target(); // // // Switch the thread's GC mode back to cooperative mode // thread->m_fPreemptiveGCDisabled = 1; // // // Rendezvous with a running collection if necessary // if (g_TrapReturningThreads) // RareDisablePreemptiveGC(); // // Transistions using helpers: // // OpaqueFrame opaqueFrame; // // ... // // // Call the JIT_PINVOKE_BEGIN helper // JIT_PINVOKE_BEGIN(&opaqueFrame); // // // Call the unmanaged method // target(); // // // Call the JIT_PINVOKE_END helper // JIT_PINVOKE_END(&opaqueFrame); // // Note that the JIT_PINVOKE_{BEGIN.END} helpers currently use the default calling convention for the target // platform. They may be changed in the future such that they preserve all register values. GenTree* result = nullptr; // All code generated by this function must not contain the randomly-inserted NOPs // that we insert to inhibit JIT spraying in partial trust scenarios. // The PINVOKE_PROLOG op signals this to the code generator/emitter. GenTree* prolog = new (comp, GT_NOP) GenTree(GT_PINVOKE_PROLOG, TYP_VOID); BlockRange().InsertBefore(call, prolog); bool addPInvokePrologEpilog = !call->IsSuppressGCTransition(); if (addPInvokePrologEpilog) { InsertPInvokeCallProlog(call); } if (call->gtCallType != CT_INDIRECT) { noway_assert(call->gtCallType == CT_USER_FUNC); CORINFO_METHOD_HANDLE methHnd = call->gtCallMethHnd; CORINFO_CONST_LOOKUP lookup; comp->info.compCompHnd->getAddressOfPInvokeTarget(methHnd, &lookup); void* addr = lookup.addr; GenTree* addrTree; switch (lookup.accessType) { case IAT_VALUE: // IsCallTargetInRange always return true on x64. It wants to use rip-based addressing // for this call. Unfortunately, in case of pinvokes (+suppressgctransition) to external libs // (e.g. kernel32.dll) the relative offset is unlikely to fit into int32 and we will have to // turn fAllowRel32 off globally. if ((call->IsSuppressGCTransition() && !comp->opts.jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT)) || !IsCallTargetInRange(addr)) { result = AddrGen(addr); } else { // a direct call within range of hardware relative call instruction // stash the address for codegen call->gtDirectCallAddress = addr; #ifdef FEATURE_READYTORUN call->gtEntryPoint.addr = nullptr; call->gtEntryPoint.accessType = IAT_VALUE; #endif } break; case IAT_PVALUE: addrTree = AddrGen(addr); #ifdef DEBUG addrTree->AsIntCon()->gtTargetHandle = (size_t)methHnd; #endif result = Ind(addrTree); break; case IAT_PPVALUE: // ToDo: Expanding an IAT_PPVALUE here, loses the opportunity // to Hoist/CSE the first indirection as it is an invariant load // // This case currently occurs today when we make PInvoke calls in crossgen // // assert(!"IAT_PPVALUE in Lowering::LowerNonvirtPinvokeCall"); addrTree = AddrGen(addr); #ifdef DEBUG addrTree->AsIntCon()->gtTargetHandle = (size_t)methHnd; #endif // Double-indirection. Load the address into a register // and call indirectly through the register // result = Ind(Ind(addrTree)); break; case IAT_RELPVALUE: unreached(); } } if (addPInvokePrologEpilog) { InsertPInvokeCallEpilog(call); } return result; } // Expand the code necessary to calculate the control target. // Returns: the expression needed to calculate the control target // May insert embedded statements GenTree* Lowering::LowerVirtualVtableCall(GenTreeCall* call) { noway_assert(call->gtCallType == CT_USER_FUNC); regNumber thisPtrArgReg = comp->codeGen->genGetThisArgReg(call); // get a reference to the thisPtr being passed fgArgTabEntry* argEntry = comp->gtArgEntryByArgNum(call, 0); assert(argEntry->GetRegNum() == thisPtrArgReg); assert(argEntry->GetNode()->OperIs(GT_PUTARG_REG)); GenTree* thisPtr = argEntry->GetNode()->AsUnOp()->gtGetOp1(); // If what we are passing as the thisptr is not already a local, make a new local to place it in // because we will be creating expressions based on it. unsigned lclNum; if (thisPtr->OperIsLocal()) { lclNum = thisPtr->AsLclVarCommon()->GetLclNum(); } else { // Split off the thisPtr and store to a temporary variable. if (vtableCallTemp == BAD_VAR_NUM) { vtableCallTemp = comp->lvaGrabTemp(true DEBUGARG("virtual vtable call")); } LIR::Use thisPtrUse(BlockRange(), &(argEntry->GetNode()->AsUnOp()->gtOp1), argEntry->GetNode()); ReplaceWithLclVar(thisPtrUse, vtableCallTemp); lclNum = vtableCallTemp; } // Get hold of the vtable offset (note: this might be expensive) unsigned vtabOffsOfIndirection; unsigned vtabOffsAfterIndirection; bool isRelative; comp->info.compCompHnd->getMethodVTableOffset(call->gtCallMethHnd, &vtabOffsOfIndirection, &vtabOffsAfterIndirection, &isRelative); // If the thisPtr is a local field, then construct a local field type node GenTree* local; if (thisPtr->isLclField()) { local = new (comp, GT_LCL_FLD) GenTreeLclFld(GT_LCL_FLD, thisPtr->TypeGet(), lclNum, thisPtr->AsLclFld()->GetLclOffs()); } else { local = new (comp, GT_LCL_VAR) GenTreeLclVar(GT_LCL_VAR, thisPtr->TypeGet(), lclNum); } // pointer to virtual table = [REG_CALL_THIS + offs] GenTree* result = Ind(Offset(local, VPTR_OFFS)); // Get the appropriate vtable chunk if (vtabOffsOfIndirection != CORINFO_VIRTUALCALL_NO_CHUNK) { if (isRelative) { // MethodTable offset is a relative pointer. // // Additional temporary variable is used to store virtual table pointer. // Address of method is obtained by the next computations: // // Save relative offset to tmp (vtab is virtual table pointer, vtabOffsOfIndirection is offset of // vtable-1st-level-indirection): // tmp = vtab // // Save address of method to result (vtabOffsAfterIndirection is offset of vtable-2nd-level-indirection): // result = [tmp + vtabOffsOfIndirection + vtabOffsAfterIndirection + [tmp + vtabOffsOfIndirection]] // // // If relative pointers are also in second level indirection, additional temporary is used: // tmp1 = vtab // tmp2 = tmp1 + vtabOffsOfIndirection + vtabOffsAfterIndirection + [tmp1 + vtabOffsOfIndirection] // result = tmp2 + [tmp2] // unsigned lclNumTmp = comp->lvaGrabTemp(true DEBUGARG("lclNumTmp")); unsigned lclNumTmp2 = comp->lvaGrabTemp(true DEBUGARG("lclNumTmp2")); GenTree* lclvNodeStore = comp->gtNewTempAssign(lclNumTmp, result); GenTree* tmpTree = comp->gtNewLclvNode(lclNumTmp, result->TypeGet()); tmpTree = Offset(tmpTree, vtabOffsOfIndirection); tmpTree = comp->gtNewOperNode(GT_IND, TYP_I_IMPL, tmpTree, false); GenTree* offs = comp->gtNewIconNode(vtabOffsOfIndirection + vtabOffsAfterIndirection, TYP_INT); result = comp->gtNewOperNode(GT_ADD, TYP_I_IMPL, comp->gtNewLclvNode(lclNumTmp, result->TypeGet()), offs); GenTree* base = OffsetByIndexWithScale(result, tmpTree, 1); GenTree* lclvNodeStore2 = comp->gtNewTempAssign(lclNumTmp2, base); LIR::Range range = LIR::SeqTree(comp, lclvNodeStore); JITDUMP("result of obtaining pointer to virtual table:\n"); DISPRANGE(range); BlockRange().InsertBefore(call, std::move(range)); LIR::Range range2 = LIR::SeqTree(comp, lclvNodeStore2); ContainCheckIndir(tmpTree->AsIndir()); JITDUMP("result of obtaining pointer to virtual table 2nd level indirection:\n"); DISPRANGE(range2); BlockRange().InsertAfter(lclvNodeStore, std::move(range2)); result = Ind(comp->gtNewLclvNode(lclNumTmp2, result->TypeGet())); result = comp->gtNewOperNode(GT_ADD, TYP_I_IMPL, result, comp->gtNewLclvNode(lclNumTmp2, result->TypeGet())); } else { // result = [REG_CALL_IND_SCRATCH + vtabOffsOfIndirection] result = Ind(Offset(result, vtabOffsOfIndirection)); } } else { assert(!isRelative); } // Load the function address // result = [reg+vtabOffs] if (!isRelative) { result = Ind(Offset(result, vtabOffsAfterIndirection)); } return result; } // Lower stub dispatched virtual calls. GenTree* Lowering::LowerVirtualStubCall(GenTreeCall* call) { assert(call->IsVirtualStub()); // An x86 JIT which uses full stub dispatch must generate only // the following stub dispatch calls: // // (1) isCallRelativeIndirect: // call dword ptr [rel32] ; FF 15 ---rel32---- // (2) isCallRelative: // call abc ; E8 ---rel32---- // (3) isCallRegisterIndirect: // 3-byte nop ; // call dword ptr [eax] ; FF 10 // // THIS IS VERY TIGHTLY TIED TO THE PREDICATES IN // vm\i386\cGenCpu.h, esp. isCallRegisterIndirect. GenTree* result = nullptr; // This is code to set up an indirect call to a stub address computed // via dictionary lookup. if (call->gtCallType == CT_INDIRECT) { // The importer decided we needed a stub call via a computed // stub dispatch address, i.e. an address which came from a dictionary lookup. // - The dictionary lookup produces an indirected address, suitable for call // via "call [VirtualStubParam.reg]" // // This combination will only be generated for shared generic code and when // stub dispatch is active. // fgMorphArgs will have created trees to pass the address in VirtualStubParam.reg. // All we have to do here is add an indirection to generate the actual call target. GenTree* ind = Ind(call->gtCallAddr); BlockRange().InsertAfter(call->gtCallAddr, ind); call->gtCallAddr = ind; ind->gtFlags |= GTF_IND_REQ_ADDR_IN_REG; ContainCheckIndir(ind->AsIndir()); } else { // Direct stub call. // Get stub addr. This will return NULL if virtual call stubs are not active void* stubAddr = call->gtStubCallStubAddr; noway_assert(stubAddr != nullptr); // If not CT_INDIRECT, then it should always be relative indir call. // This is ensured by VM. noway_assert(call->IsVirtualStubRelativeIndir()); // Direct stub calls, though the stubAddr itself may still need to be // accessed via an indirection. GenTree* addr = AddrGen(stubAddr); // On x86, for tailcall via helper, the JIT_TailCall helper takes the stubAddr as // the target address, and we set a flag that it's a VSD call. The helper then // handles any necessary indirection. if (call->IsTailCallViaJitHelper()) { result = addr; } else { bool shouldOptimizeVirtualStubCall = false; #if defined(TARGET_ARMARCH) || defined(TARGET_AMD64) // Skip inserting the indirection node to load the address that is already // computed in the VSD stub arg register as a hidden parameter. Instead during the // codegen, just load the call target from there. shouldOptimizeVirtualStubCall = !comp->opts.IsCFGEnabled(); #endif if (!shouldOptimizeVirtualStubCall) { result = Ind(addr); } } } // TODO-Cleanup: start emitting random NOPS return result; } //------------------------------------------------------------------------ // Lowering::AreSourcesPossibleModifiedLocals: // Given two nodes which will be used in an addressing mode (base, // index), check to see if they are lclVar reads, and if so, walk // backwards from the use until both reads have been visited to // determine if they are potentially modified in that range. // // Arguments: // addr - the node that uses the base and index nodes // base - the base node // index - the index node // // Returns: true if either the base or index may be modified between the // node and addr. // bool Lowering::AreSourcesPossiblyModifiedLocals(GenTree* addr, GenTree* base, GenTree* index) { assert(addr != nullptr); SideEffectSet baseSideEffects; if (base != nullptr) { if (base->OperIsLocalRead()) { baseSideEffects.AddNode(comp, base); } else { base = nullptr; } } SideEffectSet indexSideEffects; if (index != nullptr) { if (index->OperIsLocalRead()) { indexSideEffects.AddNode(comp, index); } else { index = nullptr; } } for (GenTree* cursor = addr;; cursor = cursor->gtPrev) { assert(cursor != nullptr); if (cursor == base) { base = nullptr; } if (cursor == index) { index = nullptr; } if ((base == nullptr) && (index == nullptr)) { return false; } m_scratchSideEffects.Clear(); m_scratchSideEffects.AddNode(comp, cursor); if ((base != nullptr) && m_scratchSideEffects.InterferesWith(baseSideEffects, false)) { return true; } if ((index != nullptr) && m_scratchSideEffects.InterferesWith(indexSideEffects, false)) { return true; } } } //------------------------------------------------------------------------ // TryCreateAddrMode: recognize trees which can be implemented using an // addressing mode and transform them to a GT_LEA // // Arguments: // addr - the use of the address we want to transform // isContainable - true if this addressing mode can be contained // parent - the node that consumes the given addr (most likely it's an IND) // // Returns: // true if the address node was changed to a LEA, false otherwise. // bool Lowering::TryCreateAddrMode(GenTree* addr, bool isContainable, GenTree* parent) { if (!addr->OperIs(GT_ADD) || addr->gtOverflow()) { return false; } #ifdef TARGET_ARM64 if (parent->OperIsIndir() && parent->AsIndir()->IsVolatile() && !varTypeIsGC(addr)) { // For Arm64 we avoid using LEA for volatile INDs // because we won't be able to use ldar/star return false; } #endif GenTree* base = nullptr; GenTree* index = nullptr; unsigned scale = 0; ssize_t offset = 0; bool rev = false; // Find out if an addressing mode can be constructed bool doAddrMode = comp->codeGen->genCreateAddrMode(addr, // address true, // fold &rev, // reverse ops &base, // base addr &index, // index val &scale, // scaling &offset); // displacement var_types targetType = parent->OperIsIndir() ? parent->TypeGet() : TYP_UNDEF; #ifdef TARGET_ARMARCH // Multiplier should be a "natural-scale" power of two number which is equal to target's width. // // *(ulong*)(data + index * 8); - can be optimized // *(ulong*)(data + index * 7); - can not be optimized // *(int*)(data + index * 2); - can not be optimized // if ((scale > 0) && (genTypeSize(targetType) != scale)) { return false; } #endif if (scale == 0) { scale = 1; } if (!isContainable) { // this is just a reg-const add if (index == nullptr) { return false; } // this is just a reg-reg add if ((scale == 1) && (offset == 0)) { return false; } } // make sure there are not any side effects between def of leaves and use if (!doAddrMode || AreSourcesPossiblyModifiedLocals(addr, base, index)) { JITDUMP("No addressing mode:\n "); DISPNODE(addr); return false; } JITDUMP("Addressing mode:\n"); JITDUMP(" Base\n "); DISPNODE(base); if (index != nullptr) { JITDUMP(" + Index * %u + %d\n ", scale, offset); DISPNODE(index); } else { JITDUMP(" + %d\n", offset); } // Save the (potentially) unused operands before changing the address to LEA. ArrayStack<GenTree*> unusedStack(comp->getAllocator(CMK_ArrayStack)); unusedStack.Push(addr->AsOp()->gtGetOp1()); unusedStack.Push(addr->AsOp()->gtGetOp2()); addr->ChangeOper(GT_LEA); // Make sure there are no leftover side effects (though the existing ADD we're // changing shouldn't have any at this point, but sometimes it does). addr->gtFlags &= ~GTF_ALL_EFFECT; GenTreeAddrMode* addrMode = addr->AsAddrMode(); addrMode->SetBase(base); addrMode->SetIndex(index); addrMode->SetScale(scale); addrMode->SetOffset(static_cast<int>(offset)); // Neither the base nor the index should now be contained. if (base != nullptr) { base->ClearContained(); } if (index != nullptr) { index->ClearContained(); } // Remove all the nodes that are no longer used. while (!unusedStack.Empty()) { GenTree* unused = unusedStack.Pop(); // Use a loop to process some of the nodes iteratively // instead of pushing them on the stack. while ((unused != base) && (unused != index)) { JITDUMP("Removing unused node:\n "); DISPNODE(unused); BlockRange().Remove(unused); if (unused->OperIs(GT_ADD, GT_MUL, GT_LSH)) { // Push the first operand and loop back to process the second one. // This minimizes the stack depth because the second one tends to be // a constant so it gets processed and then the first one gets popped. unusedStack.Push(unused->AsOp()->gtGetOp1()); unused = unused->AsOp()->gtGetOp2(); } else { assert(unused->OperIs(GT_CNS_INT)); break; } } } #ifdef TARGET_ARM64 // Check if we can "contain" LEA(BFIZ) in order to extend 32bit index to 64bit as part of load/store. if ((index != nullptr) && index->OperIs(GT_BFIZ) && index->gtGetOp1()->OperIs(GT_CAST) && index->gtGetOp2()->IsCnsIntOrI() && (varTypeIsIntegral(targetType) || varTypeIsFloating(targetType))) { // BFIZ node is a binary op where op1 is GT_CAST and op2 is GT_CNS_INT GenTreeCast* cast = index->gtGetOp1()->AsCast(); assert(cast->isContained()); const unsigned shiftBy = (unsigned)index->gtGetOp2()->AsIntCon()->IconValue(); // 'scale' and 'offset' have to be unset since we're going to use [base + index * SXTW/UXTW scale] form // where there is no room for additional offsets/scales on ARM64. 'shiftBy' has to match target's width. if (cast->CastOp()->TypeIs(TYP_INT) && cast->TypeIs(TYP_LONG) && (genTypeSize(targetType) == (1U << shiftBy)) && (scale == 1) && (offset == 0)) { // TODO: Make sure that genCreateAddrMode marks such BFIZ candidates as GTF_DONT_CSE for better CQ. MakeSrcContained(addrMode, index); } } #endif JITDUMP("New addressing mode node:\n "); DISPNODE(addrMode); JITDUMP("\n"); return true; } //------------------------------------------------------------------------ // LowerAdd: turn this add into a GT_LEA if that would be profitable // // Arguments: // node - the node we care about // // Returns: // nullptr if no transformation was done, or the next node in the transformed node sequence that // needs to be lowered. // GenTree* Lowering::LowerAdd(GenTreeOp* node) { if (varTypeIsIntegralOrI(node->TypeGet())) { GenTree* op1 = node->gtGetOp1(); GenTree* op2 = node->gtGetOp2(); LIR::Use use; // It is not the best place to do such simple arithmetic optimizations, // but it allows us to avoid `LEA(addr, 0)` nodes and doing that in morph // requires more changes. Delete that part if we get an expression optimizer. if (op2->IsIntegralConst(0)) { JITDUMP("Lower: optimize val + 0: "); DISPNODE(node); JITDUMP("Replaced with: "); DISPNODE(op1); if (BlockRange().TryGetUse(node, &use)) { use.ReplaceWith(op1); } else { op1->SetUnusedValue(); } GenTree* next = node->gtNext; BlockRange().Remove(op2); BlockRange().Remove(node); JITDUMP("Remove [%06u], [%06u]\n", op2->gtTreeID, node->gtTreeID); return next; } #ifndef TARGET_ARMARCH if (BlockRange().TryGetUse(node, &use)) { // If this is a child of an indir, let the parent handle it. // If there is a chain of adds, only look at the topmost one. GenTree* parent = use.User(); if (!parent->OperIsIndir() && !parent->OperIs(GT_ADD)) { TryCreateAddrMode(node, false, parent); } } #endif // !TARGET_ARMARCH } if (node->OperIs(GT_ADD)) { ContainCheckBinary(node); } return nullptr; } //------------------------------------------------------------------------ // LowerUnsignedDivOrMod: Lowers a GT_UDIV/GT_UMOD node. // // Arguments: // divMod - pointer to the GT_UDIV/GT_UMOD node to be lowered // // Return Value: // Returns a boolean indicating whether the node was transformed. // // Notes: // - Transform UDIV/UMOD by power of 2 into RSZ/AND // - Transform UDIV by constant >= 2^(N-1) into GE // - Transform UDIV/UMOD by constant >= 3 into "magic division" // bool Lowering::LowerUnsignedDivOrMod(GenTreeOp* divMod) { assert(divMod->OperIs(GT_UDIV, GT_UMOD)); #if defined(USE_HELPERS_FOR_INT_DIV) if (!varTypeIsIntegral(divMod->TypeGet())) { assert(!"unreachable: integral GT_UDIV/GT_UMOD should get morphed into helper calls"); } assert(varTypeIsFloating(divMod->TypeGet())); #endif // USE_HELPERS_FOR_INT_DIV #if defined(TARGET_ARM64) assert(divMod->OperGet() != GT_UMOD); #endif // TARGET_ARM64 GenTree* dividend = divMod->gtGetOp1(); GenTree* divisor = divMod->gtGetOp2(); #if !defined(TARGET_64BIT) if (dividend->OperIs(GT_LONG)) { return false; } #endif if (!divisor->IsCnsIntOrI()) { return false; } if (dividend->IsCnsIntOrI()) { // We shouldn't see a divmod with constant operands here but if we do then it's likely // because optimizations are disabled or it's a case that's supposed to throw an exception. // Don't optimize this. return false; } const var_types type = divMod->TypeGet(); assert((type == TYP_INT) || (type == TYP_I_IMPL)); size_t divisorValue = static_cast<size_t>(divisor->AsIntCon()->IconValue()); if (type == TYP_INT) { // Clear up the upper 32 bits of the value, they may be set to 1 because constants // are treated as signed and stored in ssize_t which is 64 bit in size on 64 bit targets. divisorValue &= UINT32_MAX; } if (divisorValue == 0) { return false; } const bool isDiv = divMod->OperIs(GT_UDIV); if (isPow2(divisorValue)) { genTreeOps newOper; if (isDiv) { newOper = GT_RSZ; divisorValue = genLog2(divisorValue); } else { newOper = GT_AND; divisorValue -= 1; } divMod->SetOper(newOper); divisor->AsIntCon()->SetIconValue(divisorValue); ContainCheckNode(divMod); return true; } if (isDiv) { // If the divisor is greater or equal than 2^(N - 1) then the result is 1 // iff the dividend is greater or equal than the divisor. if (((type == TYP_INT) && (divisorValue > (UINT32_MAX / 2))) || ((type == TYP_LONG) && (divisorValue > (UINT64_MAX / 2)))) { divMod->SetOper(GT_GE); divMod->gtFlags |= GTF_UNSIGNED; ContainCheckNode(divMod); return true; } } // TODO-ARM-CQ: Currently there's no GT_MULHI for ARM32 #if defined(TARGET_XARCH) || defined(TARGET_ARM64) if (!comp->opts.MinOpts() && (divisorValue >= 3)) { size_t magic; bool increment; int preShift; int postShift; bool simpleMul = false; unsigned bits = type == TYP_INT ? 32 : 64; // if the dividend operand is AND or RSZ with a constant then the number of input bits can be reduced if (dividend->OperIs(GT_AND) && dividend->gtGetOp2()->IsCnsIntOrI()) { size_t maskCns = static_cast<size_t>(dividend->gtGetOp2()->AsIntCon()->IconValue()); if (maskCns != 0) { unsigned maskBits = 1; while (maskCns >>= 1) maskBits++; if (maskBits < bits) bits = maskBits; } } else if (dividend->OperIs(GT_RSZ) && dividend->gtGetOp2()->IsCnsIntOrI()) { size_t shiftCns = static_cast<size_t>(dividend->gtGetOp2()->AsIntCon()->IconValue()); if (shiftCns < bits) { bits -= static_cast<unsigned>(shiftCns); } } if (type == TYP_INT) { magic = MagicDivide::GetUnsigned32Magic(static_cast<uint32_t>(divisorValue), &increment, &preShift, &postShift, bits); #ifdef TARGET_64BIT // avoid inc_saturate/multiple shifts by widening to 32x64 MULHI if (increment || (preShift #ifdef TARGET_XARCH // IMUL reg,reg,imm32 can't be used if magic<0 because of sign-extension && static_cast<int32_t>(magic) < 0 #endif )) { magic = MagicDivide::GetUnsigned64Magic(static_cast<uint64_t>(divisorValue), &increment, &preShift, &postShift, bits); } // otherwise just widen to regular multiplication else { postShift += 32; simpleMul = true; } #endif } else { #ifdef TARGET_64BIT magic = MagicDivide::GetUnsigned64Magic(static_cast<uint64_t>(divisorValue), &increment, &preShift, &postShift, bits); #else unreached(); #endif } assert(divMod->MarkedDivideByConstOptimized()); const bool requiresDividendMultiuse = !isDiv; const weight_t curBBWeight = m_block->getBBWeight(comp); if (requiresDividendMultiuse) { LIR::Use dividendUse(BlockRange(), &divMod->gtOp1, divMod); dividend = ReplaceWithLclVar(dividendUse); } GenTree* firstNode = nullptr; GenTree* adjustedDividend = dividend; #ifdef TARGET_ARM64 // On ARM64 we will use a 32x32->64 bit multiply instead of a 64x64->64 one. bool widenToNativeIntForMul = (type != TYP_I_IMPL) && !simpleMul; #else CLANG_FORMAT_COMMENT_ANCHOR; bool widenToNativeIntForMul = (type != TYP_I_IMPL); #endif // If "increment" flag is returned by GetUnsignedMagic we need to do Saturating Increment first if (increment) { adjustedDividend = comp->gtNewOperNode(GT_INC_SATURATE, type, adjustedDividend); BlockRange().InsertBefore(divMod, adjustedDividend); firstNode = adjustedDividend; assert(!preShift); } // if "preShift" is required, then do a right shift before else if (preShift) { GenTree* preShiftBy = comp->gtNewIconNode(preShift, TYP_INT); adjustedDividend = comp->gtNewOperNode(GT_RSZ, type, adjustedDividend, preShiftBy); BlockRange().InsertBefore(divMod, preShiftBy, adjustedDividend); firstNode = preShiftBy; } else if (widenToNativeIntForMul) { adjustedDividend = comp->gtNewCastNode(TYP_I_IMPL, adjustedDividend, true, TYP_I_IMPL); BlockRange().InsertBefore(divMod, adjustedDividend); firstNode = adjustedDividend; } #ifdef TARGET_XARCH // force input transformation to RAX because the following MULHI will kill RDX:RAX anyway and LSRA often causes // reduntant copies otherwise if (firstNode && !simpleMul) { adjustedDividend->SetRegNum(REG_RAX); } #endif if (widenToNativeIntForMul) { divisor->gtType = TYP_I_IMPL; } divisor->AsIntCon()->SetIconValue(magic); if (isDiv && !postShift && (type == TYP_I_IMPL)) { divMod->SetOper(GT_MULHI); divMod->gtOp1 = adjustedDividend; divMod->SetUnsigned(); } else { #ifdef TARGET_ARM64 // 64-bit MUL is more expensive than UMULL on ARM64. genTreeOps mulOper = simpleMul ? GT_MUL_LONG : GT_MULHI; #else // 64-bit IMUL is less expensive than MUL eax:edx on x64. genTreeOps mulOper = simpleMul ? GT_MUL : GT_MULHI; #endif // Insert a new multiplication node before the existing GT_UDIV/GT_UMOD node. // The existing node will later be transformed into a GT_RSZ/GT_SUB that // computes the final result. This way don't need to find and change the use // of the existing node. GenTree* mulhi = comp->gtNewOperNode(mulOper, TYP_I_IMPL, adjustedDividend, divisor); mulhi->SetUnsigned(); BlockRange().InsertBefore(divMod, mulhi); if (firstNode == nullptr) { firstNode = mulhi; } if (postShift) { GenTree* shiftBy = comp->gtNewIconNode(postShift, TYP_INT); BlockRange().InsertBefore(divMod, shiftBy); if (isDiv && (type == TYP_I_IMPL)) { divMod->SetOper(GT_RSZ); divMod->gtOp1 = mulhi; divMod->gtOp2 = shiftBy; } else { mulhi = comp->gtNewOperNode(GT_RSZ, TYP_I_IMPL, mulhi, shiftBy); BlockRange().InsertBefore(divMod, mulhi); } } if (!isDiv) { // divisor UMOD dividend = dividend SUB (div MUL divisor) GenTree* divisor = comp->gtNewIconNode(divisorValue, type); GenTree* mul = comp->gtNewOperNode(GT_MUL, type, mulhi, divisor); dividend = comp->gtNewLclvNode(dividend->AsLclVar()->GetLclNum(), dividend->TypeGet()); divMod->SetOper(GT_SUB); divMod->gtOp1 = dividend; divMod->gtOp2 = mul; BlockRange().InsertBefore(divMod, divisor, mul, dividend); } else if (type != TYP_I_IMPL) { #ifdef TARGET_ARMARCH divMod->SetOper(GT_CAST); divMod->SetUnsigned(); divMod->AsCast()->gtCastType = TYP_INT; #else divMod->SetOper(GT_BITCAST); #endif divMod->gtOp1 = mulhi; divMod->gtOp2 = nullptr; } } if (firstNode != nullptr) { ContainCheckRange(firstNode, divMod); } return true; } #endif return false; } // LowerConstIntDivOrMod: Transform integer GT_DIV/GT_MOD nodes with a power of 2 // const divisor into equivalent but faster sequences. // // Arguments: // node - pointer to the DIV or MOD node // // Returns: // nullptr if no transformation is done, or the next node in the transformed node sequence that // needs to be lowered. // GenTree* Lowering::LowerConstIntDivOrMod(GenTree* node) { assert((node->OperGet() == GT_DIV) || (node->OperGet() == GT_MOD)); GenTree* divMod = node; GenTree* dividend = divMod->gtGetOp1(); GenTree* divisor = divMod->gtGetOp2(); const var_types type = divMod->TypeGet(); assert((type == TYP_INT) || (type == TYP_LONG)); #if defined(USE_HELPERS_FOR_INT_DIV) assert(!"unreachable: integral GT_DIV/GT_MOD should get morphed into helper calls"); #endif // USE_HELPERS_FOR_INT_DIV #if defined(TARGET_ARM64) assert(node->OperGet() != GT_MOD); #endif // TARGET_ARM64 if (!divisor->IsCnsIntOrI()) { return nullptr; // no transformations to make } if (dividend->IsCnsIntOrI()) { // We shouldn't see a divmod with constant operands here but if we do then it's likely // because optimizations are disabled or it's a case that's supposed to throw an exception. // Don't optimize this. return nullptr; } ssize_t divisorValue = divisor->AsIntCon()->IconValue(); if (divisorValue == -1 || divisorValue == 0) { // x / 0 and x % 0 can't be optimized because they are required to throw an exception. // x / -1 can't be optimized because INT_MIN / -1 is required to throw an exception. // x % -1 is always 0 and the IL spec says that the rem instruction "can" throw an exception if x is // the minimum representable integer. However, the C# spec says that an exception "is" thrown in this // case so optimizing this case would break C# code. // A runtime check could be used to handle this case but it's probably too rare to matter. return nullptr; } bool isDiv = divMod->OperGet() == GT_DIV; if (isDiv) { if ((type == TYP_INT && divisorValue == INT_MIN) || (type == TYP_LONG && divisorValue == INT64_MIN)) { // If the divisor is the minimum representable integer value then we can use a compare, // the result is 1 iff the dividend equals divisor. divMod->SetOper(GT_EQ); return node; } } size_t absDivisorValue = (divisorValue == SSIZE_T_MIN) ? static_cast<size_t>(divisorValue) : static_cast<size_t>(abs(divisorValue)); if (!isPow2(absDivisorValue)) { if (comp->opts.MinOpts()) { return nullptr; } #if defined(TARGET_XARCH) || defined(TARGET_ARM64) ssize_t magic; int shift; if (type == TYP_INT) { magic = MagicDivide::GetSigned32Magic(static_cast<int32_t>(divisorValue), &shift); } else { #ifdef TARGET_64BIT magic = MagicDivide::GetSigned64Magic(static_cast<int64_t>(divisorValue), &shift); #else // !TARGET_64BIT unreached(); #endif // !TARGET_64BIT } divisor->AsIntConCommon()->SetIconValue(magic); // Insert a new GT_MULHI node in front of the existing GT_DIV/GT_MOD node. // The existing node will later be transformed into a GT_ADD/GT_SUB that // computes the final result. This way don't need to find and change the // use of the existing node. GenTree* mulhi = comp->gtNewOperNode(GT_MULHI, type, divisor, dividend); BlockRange().InsertBefore(divMod, mulhi); // mulhi was the easy part. Now we need to generate different code depending // on the divisor value: // For 3 we need: // div = signbit(mulhi) + mulhi // For 5 we need: // div = signbit(mulhi) + sar(mulhi, 1) ; requires shift adjust // For 7 we need: // mulhi += dividend ; requires add adjust // div = signbit(mulhi) + sar(mulhi, 2) ; requires shift adjust // For -3 we need: // mulhi -= dividend ; requires sub adjust // div = signbit(mulhi) + sar(mulhi, 1) ; requires shift adjust bool requiresAddSubAdjust = signum(divisorValue) != signum(magic); bool requiresShiftAdjust = shift != 0; bool requiresDividendMultiuse = requiresAddSubAdjust || !isDiv; if (requiresDividendMultiuse) { LIR::Use dividendUse(BlockRange(), &mulhi->AsOp()->gtOp2, mulhi); dividend = ReplaceWithLclVar(dividendUse); } GenTree* adjusted; if (requiresAddSubAdjust) { dividend = comp->gtNewLclvNode(dividend->AsLclVar()->GetLclNum(), dividend->TypeGet()); adjusted = comp->gtNewOperNode(divisorValue > 0 ? GT_ADD : GT_SUB, type, mulhi, dividend); BlockRange().InsertBefore(divMod, dividend, adjusted); } else { adjusted = mulhi; } GenTree* shiftBy = comp->gtNewIconNode(genTypeSize(type) * 8 - 1, type); GenTree* signBit = comp->gtNewOperNode(GT_RSZ, type, adjusted, shiftBy); BlockRange().InsertBefore(divMod, shiftBy, signBit); LIR::Use adjustedUse(BlockRange(), &signBit->AsOp()->gtOp1, signBit); adjusted = ReplaceWithLclVar(adjustedUse); adjusted = comp->gtNewLclvNode(adjusted->AsLclVar()->GetLclNum(), adjusted->TypeGet()); BlockRange().InsertBefore(divMod, adjusted); if (requiresShiftAdjust) { shiftBy = comp->gtNewIconNode(shift, TYP_INT); adjusted = comp->gtNewOperNode(GT_RSH, type, adjusted, shiftBy); BlockRange().InsertBefore(divMod, shiftBy, adjusted); } if (isDiv) { divMod->SetOperRaw(GT_ADD); divMod->AsOp()->gtOp1 = adjusted; divMod->AsOp()->gtOp2 = signBit; } else { GenTree* div = comp->gtNewOperNode(GT_ADD, type, adjusted, signBit); dividend = comp->gtNewLclvNode(dividend->AsLclVar()->GetLclNum(), dividend->TypeGet()); // divisor % dividend = dividend - divisor x div GenTree* divisor = comp->gtNewIconNode(divisorValue, type); GenTree* mul = comp->gtNewOperNode(GT_MUL, type, div, divisor); BlockRange().InsertBefore(divMod, dividend, div, divisor, mul); divMod->SetOperRaw(GT_SUB); divMod->AsOp()->gtOp1 = dividend; divMod->AsOp()->gtOp2 = mul; } return mulhi; #elif defined(TARGET_ARM) // Currently there's no GT_MULHI for ARM32 return nullptr; #else #error Unsupported or unset target architecture #endif } // We're committed to the conversion now. Go find the use if any. LIR::Use use; if (!BlockRange().TryGetUse(node, &use)) { return nullptr; } // We need to use the dividend node multiple times so its value needs to be // computed once and stored in a temp variable. LIR::Use opDividend(BlockRange(), &divMod->AsOp()->gtOp1, divMod); dividend = ReplaceWithLclVar(opDividend); GenTree* adjustment = comp->gtNewOperNode(GT_RSH, type, dividend, comp->gtNewIconNode(type == TYP_INT ? 31 : 63)); if (absDivisorValue == 2) { // If the divisor is +/-2 then we'd end up with a bitwise and between 0/-1 and 1. // We can get the same result by using GT_RSZ instead of GT_RSH. adjustment->SetOper(GT_RSZ); } else { adjustment = comp->gtNewOperNode(GT_AND, type, adjustment, comp->gtNewIconNode(absDivisorValue - 1, type)); } GenTree* adjustedDividend = comp->gtNewOperNode(GT_ADD, type, adjustment, comp->gtNewLclvNode(dividend->AsLclVar()->GetLclNum(), dividend->TypeGet())); GenTree* newDivMod; if (isDiv) { // perform the division by right shifting the adjusted dividend divisor->AsIntCon()->SetIconValue(genLog2(absDivisorValue)); newDivMod = comp->gtNewOperNode(GT_RSH, type, adjustedDividend, divisor); ContainCheckShiftRotate(newDivMod->AsOp()); if (divisorValue < 0) { // negate the result if the divisor is negative newDivMod = comp->gtNewOperNode(GT_NEG, type, newDivMod); ContainCheckNode(newDivMod); } } else { // divisor % dividend = dividend - divisor x (dividend / divisor) // divisor x (dividend / divisor) translates to (dividend >> log2(divisor)) << log2(divisor) // which simply discards the low log2(divisor) bits, that's just dividend & ~(divisor - 1) divisor->AsIntCon()->SetIconValue(~(absDivisorValue - 1)); newDivMod = comp->gtNewOperNode(GT_SUB, type, comp->gtNewLclvNode(dividend->AsLclVar()->GetLclNum(), dividend->TypeGet()), comp->gtNewOperNode(GT_AND, type, adjustedDividend, divisor)); } // Remove the divisor and dividend nodes from the linear order, // since we have reused them and will resequence the tree BlockRange().Remove(divisor); BlockRange().Remove(dividend); // linearize and insert the new tree before the original divMod node InsertTreeBeforeAndContainCheck(divMod, newDivMod); BlockRange().Remove(divMod); // replace the original divmod node with the new divmod tree use.ReplaceWith(newDivMod); return newDivMod->gtNext; } //------------------------------------------------------------------------ // LowerSignedDivOrMod: transform integer GT_DIV/GT_MOD nodes with a power of 2 // const divisor into equivalent but faster sequences. // // Arguments: // node - the DIV or MOD node // // Returns: // The next node to lower. // GenTree* Lowering::LowerSignedDivOrMod(GenTree* node) { assert((node->OperGet() == GT_DIV) || (node->OperGet() == GT_MOD)); GenTree* next = node->gtNext; if (varTypeIsIntegral(node->TypeGet())) { // LowerConstIntDivOrMod will return nullptr if it doesn't transform the node. GenTree* newNode = LowerConstIntDivOrMod(node); if (newNode != nullptr) { return newNode; } } ContainCheckDivOrMod(node->AsOp()); return next; } //------------------------------------------------------------------------ // LowerShift: Lower shift nodes // // Arguments: // shift - the shift node (GT_LSH, GT_RSH or GT_RSZ) // // Notes: // Remove unnecessary shift count masking, xarch shift instructions // mask the shift count to 5 bits (or 6 bits for 64 bit operations). void Lowering::LowerShift(GenTreeOp* shift) { assert(shift->OperIs(GT_LSH, GT_RSH, GT_RSZ)); size_t mask = 0x1f; #ifdef TARGET_64BIT if (varTypeIsLong(shift->TypeGet())) { mask = 0x3f; } #else assert(!varTypeIsLong(shift->TypeGet())); #endif for (GenTree* andOp = shift->gtGetOp2(); andOp->OperIs(GT_AND); andOp = andOp->gtGetOp1()) { GenTree* maskOp = andOp->gtGetOp2(); if (!maskOp->IsCnsIntOrI()) { break; } if ((static_cast<size_t>(maskOp->AsIntCon()->IconValue()) & mask) != mask) { break; } shift->gtOp2 = andOp->gtGetOp1(); BlockRange().Remove(andOp); BlockRange().Remove(maskOp); // The parent was replaced, clear contain and regOpt flag. shift->gtOp2->ClearContained(); } ContainCheckShiftRotate(shift); #ifdef TARGET_ARM64 // Try to recognize ubfiz/sbfiz idiom in LSH(CAST(X), CNS) tree if (comp->opts.OptimizationEnabled() && shift->OperIs(GT_LSH) && shift->gtGetOp1()->OperIs(GT_CAST) && shift->gtGetOp2()->IsCnsIntOrI() && !shift->isContained()) { GenTreeIntCon* cns = shift->gtGetOp2()->AsIntCon(); GenTreeCast* cast = shift->gtGetOp1()->AsCast(); if (!cast->isContained() && !cast->IsRegOptional() && !cast->gtOverflow() && // Smaller CastOp is most likely an IND(X) node which is lowered to a zero-extend load cast->CastOp()->TypeIs(TYP_LONG, TYP_INT)) { // Cast is either "TYP_LONG <- TYP_INT" or "TYP_INT <- %SMALL_INT% <- TYP_INT" (signed or unsigned) unsigned dstBits = genTypeSize(cast) * BITS_PER_BYTE; unsigned srcBits = varTypeIsSmall(cast->CastToType()) ? genTypeSize(cast->CastToType()) * BITS_PER_BYTE : genTypeSize(cast->CastOp()) * BITS_PER_BYTE; assert(!cast->CastOp()->isContained()); // It has to be an upcast and CNS must be in [1..srcBits) range if ((srcBits < dstBits) && (cns->IconValue() > 0) && (cns->IconValue() < srcBits)) { JITDUMP("Recognized ubfix/sbfix pattern in LSH(CAST, CNS). Changing op to GT_BFIZ"); shift->ChangeOper(GT_BFIZ); MakeSrcContained(shift, cast); } } } #endif } void Lowering::WidenSIMD12IfNecessary(GenTreeLclVarCommon* node) { #ifdef FEATURE_SIMD if (node->TypeGet() == TYP_SIMD12) { // Assumption 1: // RyuJit backend depends on the assumption that on 64-Bit targets Vector3 size is rounded off // to TARGET_POINTER_SIZE and hence Vector3 locals on stack can be treated as TYP_SIMD16 for // reading and writing purposes. // // Assumption 2: // RyuJit backend is making another implicit assumption that Vector3 type args when passed in // registers or on stack, the upper most 4-bytes will be zero. // // For P/Invoke return and Reverse P/Invoke argument passing, native compiler doesn't guarantee // that upper 4-bytes of a Vector3 type struct is zero initialized and hence assumption 2 is // invalid. // // RyuJIT x64 Windows: arguments are treated as passed by ref and hence read/written just 12 // bytes. In case of Vector3 returns, Caller allocates a zero initialized Vector3 local and // passes it retBuf arg and Callee method writes only 12 bytes to retBuf. For this reason, // there is no need to clear upper 4-bytes of Vector3 type args. // // RyuJIT x64 Unix: arguments are treated as passed by value and read/writen as if TYP_SIMD16. // Vector3 return values are returned two return registers and Caller assembles them into a // single xmm reg. Hence RyuJIT explicitly generates code to clears upper 4-bytes of Vector3 // type args in prolog and Vector3 type return value of a call // // RyuJIT x86 Windows: all non-param Vector3 local vars are allocated as 16 bytes. Vector3 arguments // are pushed as 12 bytes. For return values, a 16-byte local is allocated and the address passed // as a return buffer pointer. The callee doesn't write the high 4 bytes, and we don't need to clear // it either. LclVarDsc* varDsc = comp->lvaGetDesc(node->AsLclVarCommon()); if (comp->lvaMapSimd12ToSimd16(varDsc)) { JITDUMP("Mapping TYP_SIMD12 lclvar node to TYP_SIMD16:\n"); DISPNODE(node); JITDUMP("============"); node->gtType = TYP_SIMD16; } } #endif // FEATURE_SIMD } //------------------------------------------------------------------------ // LowerArrElem: Lower a GT_ARR_ELEM node // // Arguments: // node - the GT_ARR_ELEM node to lower. // // Return Value: // The next node to lower. // // Assumptions: // pTree points to a pointer to a GT_ARR_ELEM node. // // Notes: // This performs the following lowering. We start with a node of the form: // /--* <arrObj> // +--* <index0> // +--* <index1> // /--* arrMD&[,] // // First, we create temps for arrObj if it is not already a lclVar, and for any of the index // expressions that have side-effects. // We then transform the tree into: // <offset is null - no accumulated offset for the first index> // /--* <arrObj> // +--* <index0> // /--* ArrIndex[i, ] // +--* <arrObj> // /--| arrOffs[i, ] // | +--* <arrObj> // | +--* <index1> // +--* ArrIndex[*,j] // +--* <arrObj> // /--| arrOffs[*,j] // +--* lclVar NewTemp // /--* lea (scale = element size, offset = offset of first element) // // The new stmtExpr may be omitted if the <arrObj> is a lclVar. // The new stmtExpr may be embedded if the <arrObj> is not the first tree in linear order for // the statement containing the original arrMD. // Note that the arrMDOffs is the INDEX of the lea, but is evaluated before the BASE (which is the second // reference to NewTemp), because that provides more accurate lifetimes. // There may be 1, 2 or 3 dimensions, with 1, 2 or 3 arrMDIdx nodes, respectively. // GenTree* Lowering::LowerArrElem(GenTree* node) { // This will assert if we don't have an ArrElem node GenTreeArrElem* arrElem = node->AsArrElem(); const unsigned char rank = arrElem->gtArrRank; JITDUMP("Lowering ArrElem\n"); JITDUMP("============\n"); DISPTREERANGE(BlockRange(), arrElem); JITDUMP("\n"); assert(arrElem->gtArrObj->TypeGet() == TYP_REF); // We need to have the array object in a lclVar. if (!arrElem->gtArrObj->IsLocal()) { LIR::Use arrObjUse(BlockRange(), &arrElem->gtArrObj, arrElem); ReplaceWithLclVar(arrObjUse); } GenTree* arrObjNode = arrElem->gtArrObj; assert(arrObjNode->IsLocal()); GenTree* insertionPoint = arrElem; // The first ArrOffs node will have 0 for the offset of the previous dimension. GenTree* prevArrOffs = new (comp, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, 0); BlockRange().InsertBefore(insertionPoint, prevArrOffs); GenTree* nextToLower = prevArrOffs; for (unsigned char dim = 0; dim < rank; dim++) { GenTree* indexNode = arrElem->gtArrInds[dim]; // Use the original arrObjNode on the 0th ArrIndex node, and clone it for subsequent ones. GenTree* idxArrObjNode; if (dim == 0) { idxArrObjNode = arrObjNode; } else { idxArrObjNode = comp->gtClone(arrObjNode); BlockRange().InsertBefore(insertionPoint, idxArrObjNode); } // Next comes the GT_ARR_INDEX node. GenTreeArrIndex* arrMDIdx = new (comp, GT_ARR_INDEX) GenTreeArrIndex(TYP_INT, idxArrObjNode, indexNode, dim, rank, arrElem->gtArrElemType); arrMDIdx->gtFlags |= ((idxArrObjNode->gtFlags | indexNode->gtFlags) & GTF_ALL_EFFECT); BlockRange().InsertBefore(insertionPoint, arrMDIdx); GenTree* offsArrObjNode = comp->gtClone(arrObjNode); BlockRange().InsertBefore(insertionPoint, offsArrObjNode); GenTreeArrOffs* arrOffs = new (comp, GT_ARR_OFFSET) GenTreeArrOffs(TYP_I_IMPL, prevArrOffs, arrMDIdx, offsArrObjNode, dim, rank, arrElem->gtArrElemType); arrOffs->gtFlags |= ((prevArrOffs->gtFlags | arrMDIdx->gtFlags | offsArrObjNode->gtFlags) & GTF_ALL_EFFECT); BlockRange().InsertBefore(insertionPoint, arrOffs); prevArrOffs = arrOffs; } // Generate the LEA and make it reverse evaluation, because we want to evaluate the index expression before the // base. unsigned scale = arrElem->gtArrElemSize; unsigned offset = comp->eeGetMDArrayDataOffset(arrElem->gtArrRank); GenTree* leaIndexNode = prevArrOffs; if (!jitIsScaleIndexMul(scale)) { // We do the address arithmetic in TYP_I_IMPL, though note that the lower bounds and lengths in memory are // TYP_INT GenTree* scaleNode = new (comp, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, scale); GenTree* mulNode = new (comp, GT_MUL) GenTreeOp(GT_MUL, TYP_I_IMPL, leaIndexNode, scaleNode); BlockRange().InsertBefore(insertionPoint, scaleNode, mulNode); leaIndexNode = mulNode; scale = 1; } GenTree* leaBase = comp->gtClone(arrObjNode); BlockRange().InsertBefore(insertionPoint, leaBase); GenTree* leaNode = new (comp, GT_LEA) GenTreeAddrMode(arrElem->TypeGet(), leaBase, leaIndexNode, scale, offset); BlockRange().InsertBefore(insertionPoint, leaNode); LIR::Use arrElemUse; if (BlockRange().TryGetUse(arrElem, &arrElemUse)) { arrElemUse.ReplaceWith(leaNode); } else { leaNode->SetUnusedValue(); } BlockRange().Remove(arrElem); JITDUMP("Results of lowering ArrElem:\n"); DISPTREERANGE(BlockRange(), leaNode); JITDUMP("\n\n"); return nextToLower; } PhaseStatus Lowering::DoPhase() { // If we have any PInvoke calls, insert the one-time prolog code. We'll insert the epilog code in the // appropriate spots later. NOTE: there is a minor optimization opportunity here, as we still create p/invoke // data structures and setup/teardown even if we've eliminated all p/invoke calls due to dead code elimination. if (comp->compMethodRequiresPInvokeFrame()) { InsertPInvokeMethodProlog(); } #if !defined(TARGET_64BIT) DecomposeLongs decomp(comp); // Initialize the long decomposition class. if (comp->compLongUsed) { decomp.PrepareForDecomposition(); } #endif // !defined(TARGET_64BIT) if (!comp->compEnregLocals()) { // Lowering is checking if lvDoNotEnregister is already set for contained optimizations. // If we are running without `CLFLG_REGVAR` flag set (`compEnregLocals() == false`) // then we already know that we won't enregister any locals and it is better to set // `lvDoNotEnregister` flag before we start reading it. // The main reason why this flag is not set is that we are running in minOpts. comp->lvSetMinOptsDoNotEnreg(); } for (BasicBlock* const block : comp->Blocks()) { /* Make the block publicly available */ comp->compCurBB = block; #if !defined(TARGET_64BIT) if (comp->compLongUsed) { decomp.DecomposeBlock(block); } #endif //! TARGET_64BIT LowerBlock(block); } #ifdef DEBUG JITDUMP("Lower has completed modifying nodes.\n"); if (VERBOSE) { comp->fgDispBasicBlocks(true); } #endif // Recompute local var ref counts before potentially sorting for liveness. // Note this does minimal work in cases where we are not going to sort. const bool isRecompute = true; const bool setSlotNumbers = false; comp->lvaComputeRefCounts(isRecompute, setSlotNumbers); comp->fgLocalVarLiveness(); // local var liveness can delete code, which may create empty blocks if (comp->opts.OptimizationEnabled()) { comp->optLoopsMarked = false; bool modified = comp->fgUpdateFlowGraph(); if (modified) { JITDUMP("had to run another liveness pass:\n"); comp->fgLocalVarLiveness(); } } // Recompute local var ref counts again after liveness to reflect // impact of any dead code removal. Note this may leave us with // tracked vars that have zero refs. comp->lvaComputeRefCounts(isRecompute, setSlotNumbers); return PhaseStatus::MODIFIED_EVERYTHING; } #ifdef DEBUG //------------------------------------------------------------------------ // Lowering::CheckCallArg: check that a call argument is in an expected // form after lowering. // // Arguments: // arg - the argument to check. // void Lowering::CheckCallArg(GenTree* arg) { if (!arg->IsValue() && !arg->OperIsPutArgStk()) { assert((arg->OperIsStore() && !arg->IsValue()) || arg->IsArgPlaceHolderNode() || arg->IsNothingNode() || arg->OperIsCopyBlkOp()); return; } switch (arg->OperGet()) { case GT_FIELD_LIST: { GenTreeFieldList* list = arg->AsFieldList(); assert(list->isContained()); for (GenTreeFieldList::Use& use : list->Uses()) { assert(use.GetNode()->OperIsPutArg()); } } break; default: assert(arg->OperIsPutArg()); break; } } //------------------------------------------------------------------------ // Lowering::CheckCall: check that a call is in an expected form after // lowering. Currently this amounts to checking its // arguments, but could be expanded to verify more // properties in the future. // // Arguments: // call - the call to check. // void Lowering::CheckCall(GenTreeCall* call) { if (call->gtCallThisArg != nullptr) { CheckCallArg(call->gtCallThisArg->GetNode()); } for (GenTreeCall::Use& use : call->Args()) { CheckCallArg(use.GetNode()); } for (GenTreeCall::Use& use : call->LateArgs()) { CheckCallArg(use.GetNode()); } } //------------------------------------------------------------------------ // Lowering::CheckNode: check that an LIR node is in an expected form // after lowering. // // Arguments: // compiler - the compiler context. // node - the node to check. // void Lowering::CheckNode(Compiler* compiler, GenTree* node) { switch (node->OperGet()) { case GT_CALL: CheckCall(node->AsCall()); break; #ifdef FEATURE_SIMD case GT_SIMD: case GT_HWINTRINSIC: assert(node->TypeGet() != TYP_SIMD12); break; #endif // FEATURE_SIMD case GT_LCL_VAR: case GT_STORE_LCL_VAR: { const LclVarDsc* varDsc = compiler->lvaGetDesc(node->AsLclVar()); #if defined(FEATURE_SIMD) && defined(TARGET_64BIT) if (node->TypeIs(TYP_SIMD12)) { assert(compiler->lvaIsFieldOfDependentlyPromotedStruct(varDsc) || (varDsc->lvSize() == 12)); } #endif // FEATURE_SIMD && TARGET_64BIT if (varDsc->lvPromoted) { assert(varDsc->lvDoNotEnregister || varDsc->lvIsMultiRegRet); } } break; case GT_LCL_VAR_ADDR: case GT_LCL_FLD_ADDR: { const GenTreeLclVarCommon* lclVarAddr = node->AsLclVarCommon(); const LclVarDsc* varDsc = compiler->lvaGetDesc(lclVarAddr); if (((lclVarAddr->gtFlags & GTF_VAR_DEF) != 0) && varDsc->HasGCPtr()) { // Emitter does not correctly handle live updates for LCL_VAR_ADDR // when they are not contained, for example, `STOREIND byref(GT_LCL_VAR_ADDR not-contained)` // would generate: // add r1, sp, 48 // r1 contains address of a lclVar V01. // str r0, [r1] // a gc ref becomes live in V01, but emitter would not report it. // Make sure that we use uncontained address nodes only for variables // that will be marked as mustInit and will be alive throughout the whole block even when tracked. assert(lclVarAddr->isContained() || !varDsc->lvTracked || varTypeIsStruct(varDsc)); // TODO: support this assert for uses, see https://github.com/dotnet/runtime/issues/51900. } assert(varDsc->lvDoNotEnregister); break; } case GT_PHI: case GT_PHI_ARG: assert(!"Should not see phi nodes after rationalize"); break; case GT_LCL_FLD: case GT_STORE_LCL_FLD: { const LclVarDsc* varDsc = compiler->lvaGetDesc(node->AsLclFld()); assert(varDsc->lvDoNotEnregister); } break; default: break; } } //------------------------------------------------------------------------ // Lowering::CheckBlock: check that the contents of an LIR block are in an // expected form after lowering. // // Arguments: // compiler - the compiler context. // block - the block to check. // bool Lowering::CheckBlock(Compiler* compiler, BasicBlock* block) { assert(block->isEmpty() || block->IsLIR()); LIR::Range& blockRange = LIR::AsRange(block); for (GenTree* node : blockRange) { CheckNode(compiler, node); } assert(blockRange.CheckLIR(compiler, true)); return true; } #endif //------------------------------------------------------------------------ // Lowering::LowerBlock: Lower all the nodes in a BasicBlock // // Arguments: // block - the block to lower. // void Lowering::LowerBlock(BasicBlock* block) { assert(block == comp->compCurBB); // compCurBB must already be set. assert(block->isEmpty() || block->IsLIR()); m_block = block; // NOTE: some of the lowering methods insert calls before the node being // lowered (See e.g. InsertPInvoke{Method,Call}{Prolog,Epilog}). In // general, any code that is inserted before the current node should be // "pre-lowered" as they won't be subject to further processing. // Lowering::CheckBlock() runs some extra checks on call arguments in // order to help catch unlowered nodes. GenTree* node = BlockRange().FirstNode(); while (node != nullptr) { node = LowerNode(node); } assert(CheckBlock(comp, block)); } /** Verifies if both of these trees represent the same indirection. * Used by Lower to annotate if CodeGen generate an instruction of the * form *addrMode BinOp= expr * * Preconditions: both trees are children of GT_INDs and their underlying children * have the same gtOper. * * This is a first iteration to actually recognize trees that can be code-generated * as a single read-modify-write instruction on AMD64/x86. For now * this method only supports the recognition of simple addressing modes (through GT_LEA) * or local var indirections. Local fields, array access and other more complex nodes are * not yet supported. * * TODO-CQ: Perform tree recognition by using the Value Numbering Package, that way we can recognize * arbitrary complex trees and support much more addressing patterns. */ bool Lowering::IndirsAreEquivalent(GenTree* candidate, GenTree* storeInd) { assert(candidate->OperGet() == GT_IND); assert(storeInd->OperGet() == GT_STOREIND); // We should check the size of the indirections. If they are // different, say because of a cast, then we can't call them equivalent. Doing so could cause us // to drop a cast. // Signed-ness difference is okay and expected since a store indirection must always // be signed based on the CIL spec, but a load could be unsigned. if (genTypeSize(candidate->gtType) != genTypeSize(storeInd->gtType)) { return false; } GenTree* pTreeA = candidate->gtGetOp1(); GenTree* pTreeB = storeInd->gtGetOp1(); // This method will be called by codegen (as well as during lowering). // After register allocation, the sources may have been spilled and reloaded // to a different register, indicated by an inserted GT_RELOAD node. pTreeA = pTreeA->gtSkipReloadOrCopy(); pTreeB = pTreeB->gtSkipReloadOrCopy(); genTreeOps oper; if (pTreeA->OperGet() != pTreeB->OperGet()) { return false; } oper = pTreeA->OperGet(); switch (oper) { case GT_LCL_VAR: case GT_LCL_VAR_ADDR: case GT_CLS_VAR_ADDR: case GT_CNS_INT: return NodesAreEquivalentLeaves(pTreeA, pTreeB); case GT_LEA: { GenTreeAddrMode* gtAddr1 = pTreeA->AsAddrMode(); GenTreeAddrMode* gtAddr2 = pTreeB->AsAddrMode(); return NodesAreEquivalentLeaves(gtAddr1->Base(), gtAddr2->Base()) && NodesAreEquivalentLeaves(gtAddr1->Index(), gtAddr2->Index()) && (gtAddr1->gtScale == gtAddr2->gtScale) && (gtAddr1->Offset() == gtAddr2->Offset()); } default: // We don't handle anything that is not either a constant, // a local var or LEA. return false; } } //------------------------------------------------------------------------ // NodesAreEquivalentLeaves: Check whether the two given nodes are the same leaves. // // Arguments: // tree1 and tree2 are nodes to be checked. // Return Value: // Returns true if they are same leaves, false otherwise. // // static bool Lowering::NodesAreEquivalentLeaves(GenTree* tree1, GenTree* tree2) { if (tree1 == tree2) { return true; } if (tree1 == nullptr || tree2 == nullptr) { return false; } tree1 = tree1->gtSkipReloadOrCopy(); tree2 = tree2->gtSkipReloadOrCopy(); if (tree1->TypeGet() != tree2->TypeGet()) { return false; } if (tree1->OperGet() != tree2->OperGet()) { return false; } if (!tree1->OperIsLeaf() || !tree2->OperIsLeaf()) { return false; } switch (tree1->OperGet()) { case GT_CNS_INT: return tree1->AsIntCon()->IconValue() == tree2->AsIntCon()->IconValue() && tree1->IsIconHandle() == tree2->IsIconHandle(); case GT_LCL_VAR: case GT_LCL_VAR_ADDR: return tree1->AsLclVarCommon()->GetLclNum() == tree2->AsLclVarCommon()->GetLclNum(); case GT_CLS_VAR_ADDR: return tree1->AsClsVar()->gtClsVarHnd == tree2->AsClsVar()->gtClsVarHnd; default: return false; } } //------------------------------------------------------------------------ // Lowering::CheckMultiRegLclVar: Check whether a MultiReg GT_LCL_VAR node can // remain a multi-reg. // // Arguments: // lclNode - the GT_LCL_VAR or GT_STORE_LCL_VAR node. // retTypeDesc - a return type descriptor either for a call source of a store of // the local, or for the GT_RETURN consumer of the local. // // Notes: // If retTypeDesc is non-null, this method will check that the fields are compatible. // Otherwise, it will only check that the lclVar is independently promoted // (i.e. it is marked lvPromoted and not lvDoNotEnregister). // bool Lowering::CheckMultiRegLclVar(GenTreeLclVar* lclNode, const ReturnTypeDesc* retTypeDesc) { bool canEnregister = false; #if FEATURE_MULTIREG_RET LclVarDsc* varDsc = comp->lvaGetDesc(lclNode->GetLclNum()); if ((comp->lvaEnregMultiRegVars) && varDsc->lvPromoted) { // We can enregister if we have a promoted struct and all the fields' types match the ABI requirements. // Note that we don't promote structs with explicit layout, so we don't need to check field offsets, and // if we have multiple types packed into a single register, we won't have matching reg and field counts, // so we can tolerate mismatches of integer size. if (varDsc->lvPromoted && (comp->lvaGetPromotionType(varDsc) == Compiler::PROMOTION_TYPE_INDEPENDENT)) { // If we have no retTypeDesc, we only care that it is independently promoted. if (retTypeDesc == nullptr) { canEnregister = true; } else { unsigned regCount = retTypeDesc->GetReturnRegCount(); if (regCount == varDsc->lvFieldCnt) { canEnregister = true; } } } } #ifdef TARGET_XARCH // For local stores on XARCH we only handle mismatched src/dest register count for // calls of SIMD type. If the source was another lclVar similarly promoted, we would // have broken it into multiple stores. if (lclNode->OperIs(GT_STORE_LCL_VAR) && !lclNode->gtGetOp1()->OperIs(GT_CALL)) { canEnregister = false; } #endif // TARGET_XARCH if (canEnregister) { lclNode->SetMultiReg(); } else { lclNode->ClearMultiReg(); if (varDsc->lvPromoted && !varDsc->lvDoNotEnregister) { comp->lvaSetVarDoNotEnregister(lclNode->GetLclNum() DEBUGARG(DoNotEnregisterReason::BlockOp)); } } #endif return canEnregister; } //------------------------------------------------------------------------ // Containment Analysis //------------------------------------------------------------------------ void Lowering::ContainCheckNode(GenTree* node) { switch (node->gtOper) { case GT_STORE_LCL_VAR: case GT_STORE_LCL_FLD: ContainCheckStoreLoc(node->AsLclVarCommon()); break; case GT_EQ: case GT_NE: case GT_LT: case GT_LE: case GT_GE: case GT_GT: case GT_TEST_EQ: case GT_TEST_NE: case GT_CMP: case GT_JCMP: ContainCheckCompare(node->AsOp()); break; case GT_JTRUE: ContainCheckJTrue(node->AsOp()); break; case GT_ADD: case GT_SUB: #if !defined(TARGET_64BIT) case GT_ADD_LO: case GT_ADD_HI: case GT_SUB_LO: case GT_SUB_HI: #endif case GT_AND: case GT_OR: case GT_XOR: ContainCheckBinary(node->AsOp()); break; #if defined(TARGET_X86) case GT_MUL_LONG: #endif case GT_MUL: case GT_MULHI: ContainCheckMul(node->AsOp()); break; case GT_DIV: case GT_MOD: case GT_UDIV: case GT_UMOD: ContainCheckDivOrMod(node->AsOp()); break; case GT_LSH: case GT_RSH: case GT_RSZ: case GT_ROL: case GT_ROR: #ifndef TARGET_64BIT case GT_LSH_HI: case GT_RSH_LO: #endif ContainCheckShiftRotate(node->AsOp()); break; case GT_ARR_OFFSET: ContainCheckArrOffset(node->AsArrOffs()); break; case GT_LCLHEAP: ContainCheckLclHeap(node->AsOp()); break; case GT_RETURN: ContainCheckRet(node->AsOp()); break; case GT_RETURNTRAP: ContainCheckReturnTrap(node->AsOp()); break; case GT_STOREIND: ContainCheckStoreIndir(node->AsStoreInd()); break; case GT_IND: ContainCheckIndir(node->AsIndir()); break; case GT_PUTARG_REG: case GT_PUTARG_STK: #if FEATURE_ARG_SPLIT case GT_PUTARG_SPLIT: #endif // FEATURE_ARG_SPLIT // The regNum must have been set by the lowering of the call. assert(node->GetRegNum() != REG_NA); break; #ifdef TARGET_XARCH case GT_INTRINSIC: ContainCheckIntrinsic(node->AsOp()); break; #endif // TARGET_XARCH #ifdef FEATURE_SIMD case GT_SIMD: ContainCheckSIMD(node->AsSIMD()); break; #endif // FEATURE_SIMD #ifdef FEATURE_HW_INTRINSICS case GT_HWINTRINSIC: ContainCheckHWIntrinsic(node->AsHWIntrinsic()); break; #endif // FEATURE_HW_INTRINSICS default: break; } } //------------------------------------------------------------------------ // ContainCheckReturnTrap: determine whether the source of a RETURNTRAP should be contained. // // Arguments: // node - pointer to the GT_RETURNTRAP node // void Lowering::ContainCheckReturnTrap(GenTreeOp* node) { #ifdef TARGET_XARCH assert(node->OperIs(GT_RETURNTRAP)); // This just turns into a compare of its child with an int + a conditional call if (node->gtOp1->isIndir()) { MakeSrcContained(node, node->gtOp1); } #endif // TARGET_XARCH } //------------------------------------------------------------------------ // ContainCheckArrOffset: determine whether the source of an ARR_OFFSET should be contained. // // Arguments: // node - pointer to the GT_ARR_OFFSET node // void Lowering::ContainCheckArrOffset(GenTreeArrOffs* node) { assert(node->OperIs(GT_ARR_OFFSET)); // we don't want to generate code for this if (node->gtOffset->IsIntegralConst(0)) { MakeSrcContained(node, node->AsArrOffs()->gtOffset); } } //------------------------------------------------------------------------ // ContainCheckLclHeap: determine whether the source of a GT_LCLHEAP node should be contained. // // Arguments: // node - pointer to the node // void Lowering::ContainCheckLclHeap(GenTreeOp* node) { assert(node->OperIs(GT_LCLHEAP)); GenTree* size = node->AsOp()->gtOp1; if (size->IsCnsIntOrI()) { MakeSrcContained(node, size); } } //------------------------------------------------------------------------ // ContainCheckRet: determine whether the source of a node should be contained. // // Arguments: // node - pointer to the node // void Lowering::ContainCheckRet(GenTreeUnOp* ret) { assert(ret->OperIs(GT_RETURN)); #if !defined(TARGET_64BIT) if (ret->TypeGet() == TYP_LONG) { GenTree* op1 = ret->gtGetOp1(); noway_assert(op1->OperGet() == GT_LONG); MakeSrcContained(ret, op1); } #endif // !defined(TARGET_64BIT) #if FEATURE_MULTIREG_RET if (ret->TypeIs(TYP_STRUCT)) { GenTree* op1 = ret->gtGetOp1(); // op1 must be either a lclvar or a multi-reg returning call if (op1->OperGet() == GT_LCL_VAR) { const LclVarDsc* varDsc = comp->lvaGetDesc(op1->AsLclVarCommon()); // This must be a multi-reg return or an HFA of a single element. assert(varDsc->lvIsMultiRegRet || (varDsc->lvIsHfa() && varTypeIsValidHfaType(varDsc->lvType))); // Mark var as contained if not enregisterable. if (!varDsc->IsEnregisterableLcl()) { if (!op1->IsMultiRegLclVar()) { MakeSrcContained(ret, op1); } } } } #endif // FEATURE_MULTIREG_RET } //------------------------------------------------------------------------ // ContainCheckJTrue: determine whether the source of a JTRUE should be contained. // // Arguments: // node - pointer to the node // void Lowering::ContainCheckJTrue(GenTreeOp* node) { // The compare does not need to be generated into a register. GenTree* cmp = node->gtGetOp1(); cmp->gtType = TYP_VOID; cmp->gtFlags |= GTF_SET_FLAGS; } //------------------------------------------------------------------------ // ContainCheckBitCast: determine whether the source of a BITCAST should be contained. // // Arguments: // node - pointer to the node // void Lowering::ContainCheckBitCast(GenTree* node) { GenTree* const op1 = node->AsOp()->gtOp1; if (op1->isMemoryOp()) { op1->SetContained(); } else if (op1->OperIs(GT_LCL_VAR)) { if (!m_lsra->willEnregisterLocalVars()) { op1->SetContained(); } const LclVarDsc* varDsc = comp->lvaGetDesc(op1->AsLclVar()); // TODO-Cleanup: we want to check if the local is already known not // to be on reg, for example, because local enreg is disabled. if (varDsc->lvDoNotEnregister) { op1->SetContained(); } else { op1->SetRegOptional(); } } else if (op1->IsLocal()) { op1->SetContained(); } } //------------------------------------------------------------------------ // LowerStoreIndirCommon: a common logic to lower StoreIndir. // // Arguments: // ind - the store indirection node we are lowering. // void Lowering::LowerStoreIndirCommon(GenTreeStoreInd* ind) { assert(ind->TypeGet() != TYP_STRUCT); #if defined(TARGET_ARM64) // Verify containment safety before creating an LEA that must be contained. // const bool isContainable = IsSafeToContainMem(ind, ind->Addr()); #else const bool isContainable = true; #endif TryCreateAddrMode(ind->Addr(), isContainable, ind); if (!comp->codeGen->gcInfo.gcIsWriteBarrierStoreIndNode(ind)) { if (varTypeIsFloating(ind) && ind->Data()->IsCnsFltOrDbl()) { // Optimize *x = DCON to *x = ICON which can be slightly faster and/or smaller. GenTree* data = ind->Data(); double dblCns = data->AsDblCon()->gtDconVal; ssize_t intCns = 0; var_types type = TYP_UNKNOWN; // XARCH: we can always contain the immediates. // ARM64: zero can always be contained, other cases will use immediates from the data // section and it is not a clear win to switch them to inline integers. // ARM: FP constants are assembled from integral ones, so it is always profitable // to directly use the integers as it avoids the int -> float conversion. CLANG_FORMAT_COMMENT_ANCHOR; #if defined(TARGET_XARCH) || defined(TARGET_ARM) bool shouldSwitchToInteger = true; #else // TARGET_ARM64 bool shouldSwitchToInteger = !data->IsCnsNonZeroFltOrDbl(); #endif if (shouldSwitchToInteger) { if (ind->TypeIs(TYP_FLOAT)) { float fltCns = static_cast<float>(dblCns); // should be a safe round-trip intCns = static_cast<ssize_t>(*reinterpret_cast<INT32*>(&fltCns)); type = TYP_INT; } #ifdef TARGET_64BIT else { assert(ind->TypeIs(TYP_DOUBLE)); intCns = static_cast<ssize_t>(*reinterpret_cast<INT64*>(&dblCns)); type = TYP_LONG; } #endif } if (type != TYP_UNKNOWN) { data->BashToConst(intCns, type); ind->ChangeType(type); } } LowerStoreIndir(ind); } } //------------------------------------------------------------------------ // LowerIndir: a common logic to lower IND load or NullCheck. // // Arguments: // ind - the ind node we are lowering. // void Lowering::LowerIndir(GenTreeIndir* ind) { assert(ind->OperIs(GT_IND, GT_NULLCHECK)); // Process struct typed indirs separately unless they are unused; // they only appear as the source of a block copy operation or a return node. if (!ind->TypeIs(TYP_STRUCT) || ind->IsUnusedValue()) { // TODO-Cleanup: We're passing isContainable = true but ContainCheckIndir rejects // address containment in some cases so we end up creating trivial (reg + offfset) // or (reg + reg) LEAs that are not necessary. CLANG_FORMAT_COMMENT_ANCHOR; #if defined(TARGET_ARM64) // Verify containment safety before creating an LEA that must be contained. // const bool isContainable = IsSafeToContainMem(ind, ind->Addr()); #else const bool isContainable = true; #endif TryCreateAddrMode(ind->Addr(), isContainable, ind); ContainCheckIndir(ind); if (ind->OperIs(GT_NULLCHECK) || ind->IsUnusedValue()) { TransformUnusedIndirection(ind, comp, m_block); } } else { // If the `ADDR` node under `STORE_OBJ(dstAddr, IND(struct(ADDR))` // is a complex one it could benefit from an `LEA` that is not contained. const bool isContainable = false; TryCreateAddrMode(ind->Addr(), isContainable, ind); } } //------------------------------------------------------------------------ // TransformUnusedIndirection: change the opcode and the type of the unused indirection. // // Arguments: // ind - Indirection to transform. // comp - Compiler instance. // block - Basic block of the indirection. // void Lowering::TransformUnusedIndirection(GenTreeIndir* ind, Compiler* comp, BasicBlock* block) { // A nullcheck is essentially the same as an indirection with no use. // The difference lies in whether a target register must be allocated. // On XARCH we can generate a compare with no target register as long as the address // is not contained. // On ARM64 we can generate a load to REG_ZR in all cases. // However, on ARM we must always generate a load to a register. // In the case where we require a target register, it is better to use GT_IND, since // GT_NULLCHECK is a non-value node and would therefore require an internal register // to use as the target. That is non-optimal because it will be modeled as conflicting // with the source register(s). // So, to summarize: // - On ARM64, always use GT_NULLCHECK for a dead indirection. // - On ARM, always use GT_IND. // - On XARCH, use GT_IND if we have a contained address, and GT_NULLCHECK otherwise. // In all cases we try to preserve the original type and never make it wider to avoid AVEs. // For structs we conservatively lower it to BYTE. For 8-byte primitives we lower it to TYP_INT // on XARCH as an optimization. // assert(ind->OperIs(GT_NULLCHECK, GT_IND, GT_BLK, GT_OBJ)); ind->ChangeType(comp->gtTypeForNullCheck(ind)); #ifdef TARGET_ARM64 bool useNullCheck = true; #elif TARGET_ARM bool useNullCheck = false; #else // TARGET_XARCH bool useNullCheck = !ind->Addr()->isContained(); #endif // !TARGET_XARCH if (useNullCheck && !ind->OperIs(GT_NULLCHECK)) { comp->gtChangeOperToNullCheck(ind, block); ind->ClearUnusedValue(); } else if (!useNullCheck && !ind->OperIs(GT_IND)) { ind->ChangeOper(GT_IND); ind->SetUnusedValue(); } } //------------------------------------------------------------------------ // LowerBlockStoreCommon: a common logic to lower STORE_OBJ/BLK/DYN_BLK. // // Arguments: // blkNode - the store blk/obj node we are lowering. // void Lowering::LowerBlockStoreCommon(GenTreeBlk* blkNode) { assert(blkNode->OperIs(GT_STORE_BLK, GT_STORE_DYN_BLK, GT_STORE_OBJ)); // Lose the type information stored in the source - we no longer need it. if (blkNode->Data()->OperIs(GT_OBJ, GT_BLK)) { blkNode->Data()->SetOper(GT_IND); LowerIndir(blkNode->Data()->AsIndir()); } if (TryTransformStoreObjAsStoreInd(blkNode)) { return; } LowerBlockStore(blkNode); } //------------------------------------------------------------------------ // TryTransformStoreObjAsStoreInd: try to replace STORE_OBJ/BLK as STOREIND. // // Arguments: // blkNode - the store node. // // Return value: // true if the replacement was made, false otherwise. // // Notes: // TODO-CQ: this method should do the transformation when possible // and STOREIND should always generate better or the same code as // STORE_OBJ/BLK for the same copy. // bool Lowering::TryTransformStoreObjAsStoreInd(GenTreeBlk* blkNode) { assert(blkNode->OperIs(GT_STORE_BLK, GT_STORE_DYN_BLK, GT_STORE_OBJ)); if (!comp->opts.OptimizationEnabled()) { return false; } if (blkNode->OperIs(GT_STORE_DYN_BLK)) { return false; } ClassLayout* layout = blkNode->GetLayout(); if (layout == nullptr) { return false; } var_types regType = layout->GetRegisterType(); if (regType == TYP_UNDEF) { return false; } GenTree* src = blkNode->Data(); if (varTypeIsSIMD(regType) && src->IsConstInitVal()) { // TODO-CQ: support STORE_IND SIMD16(SIMD16, CNT_INT 0). return false; } if (varTypeIsGC(regType)) { // TODO-CQ: STOREIND does not try to contain src if we need a barrier, // STORE_OBJ generates better code currently. return false; } if (src->OperIsInitVal() && !src->IsConstInitVal()) { return false; } if (varTypeIsSmall(regType) && !src->IsConstInitVal() && !src->IsLocal()) { // source operand INDIR will use a widening instruction // and generate worse code, like `movzx` instead of `mov` // on x64. return false; } JITDUMP("Replacing STORE_OBJ with STOREIND for [%06u]\n", blkNode->gtTreeID); blkNode->ChangeOper(GT_STOREIND); blkNode->ChangeType(regType); if ((blkNode->gtFlags & GTF_IND_TGT_NOT_HEAP) == 0) { blkNode->gtFlags |= GTF_IND_TGTANYWHERE; } if (varTypeIsStruct(src)) { src->ChangeType(regType); LowerNode(blkNode->Data()); } else if (src->OperIsInitVal()) { GenTreeUnOp* initVal = src->AsUnOp(); src = src->gtGetOp1(); assert(src->IsCnsIntOrI()); src->AsIntCon()->FixupInitBlkValue(regType); blkNode->SetData(src); BlockRange().Remove(initVal); } else { assert(src->TypeIs(regType) || src->IsCnsIntOrI() || src->IsCall()); } LowerStoreIndirCommon(blkNode->AsStoreInd()); return true; }
1
dotnet/runtime
66,282
Enable Fast Tail Call Optimization for ARM32
- Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
clamp03
2022-03-07T05:47:20Z
2022-03-13T11:50:20Z
227ff3d53784f655fa04dad059a98b3e8d291d61
51b90cc60b8528c77829ef18481b0f58db812776
Enable Fast Tail Call Optimization for ARM32. - Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
./src/coreclr/jit/lsraarmarch.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Register Requirements for ARM and ARM64 common code XX XX XX XX This encapsulates common logic for setting register requirements for XX XX the ARM and ARM64 architectures. XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ #include "jitpch.h" #ifdef _MSC_VER #pragma hdrstop #endif #ifdef TARGET_ARMARCH // This file is ONLY used for ARM and ARM64 architectures #include "jit.h" #include "sideeffects.h" #include "lower.h" #include "lsra.h" //------------------------------------------------------------------------ // BuildIndir: Specify register requirements for address expression // of an indirection operation. // // Arguments: // indirTree - GT_IND, GT_STOREIND or block gentree node // // Return Value: // The number of sources consumed by this node. // int LinearScan::BuildIndir(GenTreeIndir* indirTree) { // struct typed indirs are expected only on rhs of a block copy, // but in this case they must be contained. assert(indirTree->TypeGet() != TYP_STRUCT); GenTree* addr = indirTree->Addr(); GenTree* index = nullptr; int cns = 0; #ifdef TARGET_ARM // Unaligned loads/stores for floating point values must first be loaded into integer register(s) if (indirTree->gtFlags & GTF_IND_UNALIGNED) { var_types type = TYP_UNDEF; if (indirTree->OperGet() == GT_STOREIND) { type = indirTree->AsStoreInd()->Data()->TypeGet(); } else if (indirTree->OperGet() == GT_IND) { type = indirTree->TypeGet(); } if (type == TYP_FLOAT) { buildInternalIntRegisterDefForNode(indirTree); } else if (type == TYP_DOUBLE) { buildInternalIntRegisterDefForNode(indirTree); buildInternalIntRegisterDefForNode(indirTree); } } #endif if (addr->isContained()) { if (addr->OperGet() == GT_LEA) { GenTreeAddrMode* lea = addr->AsAddrMode(); index = lea->Index(); cns = lea->Offset(); // On ARM we may need a single internal register // (when both conditions are true then we still only need a single internal register) if ((index != nullptr) && (cns != 0)) { // ARM does not support both Index and offset so we need an internal register buildInternalIntRegisterDefForNode(indirTree); } else if (!emitter::emitIns_valid_imm_for_ldst_offset(cns, emitTypeSize(indirTree))) { // This offset can't be contained in the ldr/str instruction, so we need an internal register buildInternalIntRegisterDefForNode(indirTree); } } #ifdef TARGET_ARM64 else if (addr->OperGet() == GT_CLS_VAR_ADDR) { // Reserve int to load constant from memory (IF_LARGELDC) buildInternalIntRegisterDefForNode(indirTree); } #endif // TARGET_ARM64 } #ifdef FEATURE_SIMD if (indirTree->TypeGet() == TYP_SIMD12) { // If indirTree is of TYP_SIMD12, addr is not contained. See comment in LowerIndir(). assert(!addr->isContained()); // Vector3 is read/written as two reads/writes: 8 byte and 4 byte. // To assemble the vector properly we would need an additional int register buildInternalIntRegisterDefForNode(indirTree); } #endif // FEATURE_SIMD int srcCount = BuildIndirUses(indirTree); buildInternalRegisterUses(); if (!indirTree->OperIs(GT_STOREIND, GT_NULLCHECK)) { BuildDef(indirTree); } return srcCount; } //------------------------------------------------------------------------ // BuildCall: Set the NodeInfo for a call. // // Arguments: // call - The call node of interest // // Return Value: // The number of sources consumed by this node. // int LinearScan::BuildCall(GenTreeCall* call) { bool hasMultiRegRetVal = false; const ReturnTypeDesc* retTypeDesc = nullptr; regMaskTP dstCandidates = RBM_NONE; int srcCount = 0; int dstCount = 0; if (call->TypeGet() != TYP_VOID) { hasMultiRegRetVal = call->HasMultiRegRetVal(); if (hasMultiRegRetVal) { // dst count = number of registers in which the value is returned by call retTypeDesc = call->GetReturnTypeDesc(); dstCount = retTypeDesc->GetReturnRegCount(); } else { dstCount = 1; } } GenTree* ctrlExpr = call->gtControlExpr; regMaskTP ctrlExprCandidates = RBM_NONE; if (call->gtCallType == CT_INDIRECT) { // either gtControlExpr != null or gtCallAddr != null. // Both cannot be non-null at the same time. assert(ctrlExpr == nullptr); assert(call->gtCallAddr != nullptr); ctrlExpr = call->gtCallAddr; } // set reg requirements on call target represented as control sequence. if (ctrlExpr != nullptr) { // we should never see a gtControlExpr whose type is void. assert(ctrlExpr->TypeGet() != TYP_VOID); // In case of fast tail implemented as jmp, make sure that gtControlExpr is // computed into a register. if (call->IsFastTailCall()) { // Fast tail call - make sure that call target is always computed in volatile registers // that will not be overridden by epilog sequence. ctrlExprCandidates = allRegs(TYP_INT) & RBM_INT_CALLEE_TRASH; assert(ctrlExprCandidates != RBM_NONE); } } else if (call->IsR2ROrVirtualStubRelativeIndir()) { // For R2R and VSD we have stub address in REG_R2R_INDIRECT_PARAM // and will load call address into the temp register from this register. regMaskTP candidates = RBM_NONE; if (call->IsFastTailCall()) { candidates = allRegs(TYP_INT) & RBM_INT_CALLEE_TRASH; assert(candidates != RBM_NONE); } buildInternalIntRegisterDefForNode(call, candidates); } #ifdef TARGET_ARM else { buildInternalIntRegisterDefForNode(call); } if (call->NeedsNullCheck()) { buildInternalIntRegisterDefForNode(call); } #endif // TARGET_ARM RegisterType registerType = call->TypeGet(); // Set destination candidates for return value of the call. #ifdef TARGET_ARM if (call->IsHelperCall(compiler, CORINFO_HELP_INIT_PINVOKE_FRAME)) { // The ARM CORINFO_HELP_INIT_PINVOKE_FRAME helper uses a custom calling convention that returns with // TCB in REG_PINVOKE_TCB. fgMorphCall() sets the correct argument registers. dstCandidates = RBM_PINVOKE_TCB; } else #endif // TARGET_ARM if (hasMultiRegRetVal) { assert(retTypeDesc != nullptr); dstCandidates = retTypeDesc->GetABIReturnRegs(); } else if (varTypeUsesFloatArgReg(registerType)) { dstCandidates = RBM_FLOATRET; } else if (registerType == TYP_LONG) { dstCandidates = RBM_LNGRET; } else { dstCandidates = RBM_INTRET; } // First, count reg args // Each register argument corresponds to one source. bool callHasFloatRegArgs = false; for (GenTreeCall::Use& arg : call->LateArgs()) { GenTree* argNode = arg.GetNode(); #ifdef DEBUG // During Build, we only use the ArgTabEntry for validation, // as getting it is rather expensive. fgArgTabEntry* curArgTabEntry = compiler->gtArgEntryByNode(call, argNode); regNumber argReg = curArgTabEntry->GetRegNum(); assert(curArgTabEntry != nullptr); #endif if (argNode->gtOper == GT_PUTARG_STK) { // late arg that is not passed in a register assert(curArgTabEntry->GetRegNum() == REG_STK); // These should never be contained. assert(!argNode->isContained()); continue; } // A GT_FIELD_LIST has a TYP_VOID, but is used to represent a multireg struct if (argNode->OperGet() == GT_FIELD_LIST) { assert(argNode->isContained()); // There could be up to 2-4 PUTARG_REGs in the list (3 or 4 can only occur for HFAs) for (GenTreeFieldList::Use& use : argNode->AsFieldList()->Uses()) { #ifdef DEBUG assert(use.GetNode()->OperIs(GT_PUTARG_REG)); assert(use.GetNode()->GetRegNum() == argReg); // Update argReg for the next putarg_reg (if any) argReg = genRegArgNext(argReg); #if defined(TARGET_ARM) // A double register is modelled as an even-numbered single one if (use.GetNode()->TypeGet() == TYP_DOUBLE) { argReg = genRegArgNext(argReg); } #endif // TARGET_ARM #endif BuildUse(use.GetNode(), genRegMask(use.GetNode()->GetRegNum())); srcCount++; } } else if (argNode->OperGet() == GT_PUTARG_SPLIT) { unsigned regCount = argNode->AsPutArgSplit()->gtNumRegs; assert(regCount == curArgTabEntry->numRegs); for (unsigned int i = 0; i < regCount; i++) { BuildUse(argNode, genRegMask(argNode->AsPutArgSplit()->GetRegNumByIdx(i)), i); } srcCount += regCount; } else { assert(argNode->OperIs(GT_PUTARG_REG)); assert(argNode->GetRegNum() == argReg); HandleFloatVarArgs(call, argNode, &callHasFloatRegArgs); #ifdef TARGET_ARM // The `double` types have been transformed to `long` on armel, // while the actual long types have been decomposed. // On ARM we may have bitcasts from DOUBLE to LONG. if (argNode->TypeGet() == TYP_LONG) { assert(argNode->IsMultiRegNode()); BuildUse(argNode, genRegMask(argNode->GetRegNum()), 0); BuildUse(argNode, genRegMask(genRegArgNext(argNode->GetRegNum())), 1); srcCount += 2; } else #endif // TARGET_ARM { BuildUse(argNode, genRegMask(argNode->GetRegNum())); srcCount++; } } } #ifdef DEBUG // Now, count stack args // Note that these need to be computed into a register, but then // they're just stored to the stack - so the reg doesn't // need to remain live until the call. In fact, it must not // because the code generator doesn't actually consider it live, // so it can't be spilled. for (GenTreeCall::Use& use : call->Args()) { GenTree* arg = use.GetNode(); // Skip arguments that have been moved to the Late Arg list if ((arg->gtFlags & GTF_LATE_ARG) == 0) { fgArgTabEntry* curArgTabEntry = compiler->gtArgEntryByNode(call, arg); assert(curArgTabEntry != nullptr); // PUTARG_SPLIT nodes must be in the gtCallLateArgs list, since they // define registers used by the call. assert(arg->OperGet() != GT_PUTARG_SPLIT); if (arg->gtOper == GT_PUTARG_STK) { assert(curArgTabEntry->GetRegNum() == REG_STK); } else { assert(!arg->IsValue() || arg->IsUnusedValue()); } } } #endif // DEBUG // If it is a fast tail call, it is already preferenced to use IP0. // Therefore, no need set src candidates on call tgt again. if (call->IsVarargs() && callHasFloatRegArgs && !call->IsFastTailCall() && (ctrlExpr != nullptr)) { NYI_ARM("float reg varargs"); // Don't assign the call target to any of the argument registers because // we will use them to also pass floating point arguments as required // by Arm64 ABI. ctrlExprCandidates = allRegs(TYP_INT) & ~(RBM_ARG_REGS); } if (ctrlExpr != nullptr) { BuildUse(ctrlExpr, ctrlExprCandidates); srcCount++; } buildInternalRegisterUses(); // Now generate defs and kills. regMaskTP killMask = getKillSetForCall(call); BuildDefsWithKills(call, dstCount, dstCandidates, killMask); return srcCount; } //------------------------------------------------------------------------ // BuildPutArgStk: Set the NodeInfo for a GT_PUTARG_STK node // // Arguments: // argNode - a GT_PUTARG_STK node // // Return Value: // The number of sources consumed by this node. // // Notes: // Set the child node(s) to be contained when we have a multireg arg // int LinearScan::BuildPutArgStk(GenTreePutArgStk* argNode) { assert(argNode->gtOper == GT_PUTARG_STK); GenTree* putArgChild = argNode->gtGetOp1(); int srcCount = 0; // Do we have a TYP_STRUCT argument (or a GT_FIELD_LIST), if so it must be a multireg pass-by-value struct if (putArgChild->TypeIs(TYP_STRUCT) || putArgChild->OperIs(GT_FIELD_LIST)) { // We will use store instructions that each write a register sized value if (putArgChild->OperIs(GT_FIELD_LIST)) { assert(putArgChild->isContained()); // We consume all of the items in the GT_FIELD_LIST for (GenTreeFieldList::Use& use : putArgChild->AsFieldList()->Uses()) { BuildUse(use.GetNode()); srcCount++; #if defined(FEATURE_SIMD) if (compMacOsArm64Abi()) { if (use.GetType() == TYP_SIMD12) { // Vector3 is read/written as two reads/writes: 8 byte and 4 byte. // To assemble the vector properly we would need an additional int register. // The other platforms can write it as 16-byte using 1 write. buildInternalIntRegisterDefForNode(use.GetNode()); } } #endif // FEATURE_SIMD } } else { // We can use a ldp/stp sequence so we need two internal registers for ARM64; one for ARM. buildInternalIntRegisterDefForNode(argNode); #ifdef TARGET_ARM64 buildInternalIntRegisterDefForNode(argNode); #endif // TARGET_ARM64 if (putArgChild->OperGet() == GT_OBJ) { assert(putArgChild->isContained()); GenTree* objChild = putArgChild->gtGetOp1(); if (objChild->OperGet() == GT_LCL_VAR_ADDR) { // We will generate all of the code for the GT_PUTARG_STK, the GT_OBJ and the GT_LCL_VAR_ADDR // as one contained operation, and there are no source registers. // assert(objChild->isContained()); } else { // We will generate all of the code for the GT_PUTARG_STK and its child node // as one contained operation // srcCount = BuildOperandUses(objChild); } } else { // No source registers. putArgChild->OperIs(GT_LCL_VAR); } } } else { assert(!putArgChild->isContained()); srcCount = BuildOperandUses(putArgChild); #if defined(FEATURE_SIMD) if (compMacOsArm64Abi() && argNode->GetStackByteSize() == 12) { // Vector3 is read/written as two reads/writes: 8 byte and 4 byte. // To assemble the vector properly we would need an additional int register. // The other platforms can write it as 16-byte using 1 write. buildInternalIntRegisterDefForNode(argNode); } #endif // FEATURE_SIMD } buildInternalRegisterUses(); return srcCount; } //------------------------------------------------------------------------ // BuildPutArgSplit: Set the NodeInfo for a GT_PUTARG_SPLIT node // // Arguments: // argNode - a GT_PUTARG_SPLIT node // // Return Value: // The number of sources consumed by this node. // // Notes: // Set the child node(s) to be contained // int LinearScan::BuildPutArgSplit(GenTreePutArgSplit* argNode) { int srcCount = 0; assert(argNode->gtOper == GT_PUTARG_SPLIT); GenTree* putArgChild = argNode->gtGetOp1(); // Registers for split argument corresponds to source int dstCount = argNode->gtNumRegs; regNumber argReg = argNode->GetRegNum(); regMaskTP argMask = RBM_NONE; for (unsigned i = 0; i < argNode->gtNumRegs; i++) { regNumber thisArgReg = (regNumber)((unsigned)argReg + i); argMask |= genRegMask(thisArgReg); argNode->SetRegNumByIdx(thisArgReg, i); } if (putArgChild->OperGet() == GT_FIELD_LIST) { // Generated code: // 1. Consume all of the items in the GT_FIELD_LIST (source) // 2. Store to target slot and move to target registers (destination) from source // unsigned sourceRegCount = 0; // To avoid redundant moves, have the argument operand computed in the // register in which the argument is passed to the call. for (GenTreeFieldList::Use& use : putArgChild->AsFieldList()->Uses()) { GenTree* node = use.GetNode(); assert(!node->isContained()); // The only multi-reg nodes we should see are OperIsMultiRegOp() unsigned currentRegCount; #ifdef TARGET_ARM if (node->OperIsMultiRegOp()) { currentRegCount = node->AsMultiRegOp()->GetRegCount(); } else #endif // TARGET_ARM { assert(!node->IsMultiRegNode()); currentRegCount = 1; } // Consume all the registers, setting the appropriate register mask for the ones that // go into registers. for (unsigned regIndex = 0; regIndex < currentRegCount; regIndex++) { regMaskTP sourceMask = RBM_NONE; if (sourceRegCount < argNode->gtNumRegs) { sourceMask = genRegMask((regNumber)((unsigned)argReg + sourceRegCount)); } sourceRegCount++; BuildUse(node, sourceMask, regIndex); } } srcCount += sourceRegCount; assert(putArgChild->isContained()); } else { assert(putArgChild->TypeGet() == TYP_STRUCT); assert(putArgChild->OperGet() == GT_OBJ); // We can use a ldr/str sequence so we need an internal register buildInternalIntRegisterDefForNode(argNode, allRegs(TYP_INT) & ~argMask); GenTree* objChild = putArgChild->gtGetOp1(); if (objChild->OperGet() == GT_LCL_VAR_ADDR) { // We will generate all of the code for the GT_PUTARG_SPLIT, the GT_OBJ and the GT_LCL_VAR_ADDR // as one contained operation // assert(objChild->isContained()); } else { srcCount = BuildIndirUses(putArgChild->AsIndir()); } assert(putArgChild->isContained()); } buildInternalRegisterUses(); BuildDefs(argNode, dstCount, argMask); return srcCount; } //------------------------------------------------------------------------ // BuildBlockStore: Build the RefPositions for a block store node. // // Arguments: // blkNode - The block store node of interest // // Return Value: // The number of sources consumed by this node. // int LinearScan::BuildBlockStore(GenTreeBlk* blkNode) { GenTree* dstAddr = blkNode->Addr(); GenTree* src = blkNode->Data(); unsigned size = blkNode->Size(); GenTree* srcAddrOrFill = nullptr; regMaskTP dstAddrRegMask = RBM_NONE; regMaskTP srcRegMask = RBM_NONE; regMaskTP sizeRegMask = RBM_NONE; if (blkNode->OperIsInitBlkOp()) { if (src->OperIs(GT_INIT_VAL)) { assert(src->isContained()); src = src->AsUnOp()->gtGetOp1(); } srcAddrOrFill = src; switch (blkNode->gtBlkOpKind) { case GenTreeBlk::BlkOpKindUnroll: #ifdef TARGET_ARM64 { if (dstAddr->isContained()) { // Since the dstAddr is contained the address will be computed in CodeGen. // This might require an integer register to store the value. buildInternalIntRegisterDefForNode(blkNode); } const bool isDstRegAddrAlignmentKnown = dstAddr->OperIsLocalAddr(); if (isDstRegAddrAlignmentKnown && (size > FP_REGSIZE_BYTES)) { // For larger block sizes CodeGen can choose to use 16-byte SIMD instructions. buildInternalFloatRegisterDefForNode(blkNode, internalFloatRegCandidates()); } } #endif // TARGET_ARM64 break; case GenTreeBlk::BlkOpKindHelper: assert(!src->isContained()); dstAddrRegMask = RBM_ARG_0; srcRegMask = RBM_ARG_1; sizeRegMask = RBM_ARG_2; break; default: unreached(); } } else { if (src->OperIs(GT_IND)) { assert(src->isContained()); srcAddrOrFill = src->AsIndir()->Addr(); } if (blkNode->OperIs(GT_STORE_OBJ)) { // We don't need to materialize the struct size but we still need // a temporary register to perform the sequence of loads and stores. // We can't use the special Write Barrier registers, so exclude them from the mask regMaskTP internalIntCandidates = allRegs(TYP_INT) & ~(RBM_WRITE_BARRIER_DST_BYREF | RBM_WRITE_BARRIER_SRC_BYREF); buildInternalIntRegisterDefForNode(blkNode, internalIntCandidates); if (size >= 2 * REGSIZE_BYTES) { // We will use ldp/stp to reduce code size and improve performance // so we need to reserve an extra internal register buildInternalIntRegisterDefForNode(blkNode, internalIntCandidates); } // If we have a dest address we want it in RBM_WRITE_BARRIER_DST_BYREF. dstAddrRegMask = RBM_WRITE_BARRIER_DST_BYREF; // If we have a source address we want it in REG_WRITE_BARRIER_SRC_BYREF. // Otherwise, if it is a local, codegen will put its address in REG_WRITE_BARRIER_SRC_BYREF, // which is killed by a StoreObj (and thus needn't be reserved). if (srcAddrOrFill != nullptr) { assert(!srcAddrOrFill->isContained()); srcRegMask = RBM_WRITE_BARRIER_SRC_BYREF; } } else { switch (blkNode->gtBlkOpKind) { case GenTreeBlk::BlkOpKindUnroll: { buildInternalIntRegisterDefForNode(blkNode); #ifdef TARGET_ARM64 const bool canUseLoadStorePairIntRegsInstrs = (size >= 2 * REGSIZE_BYTES); if (canUseLoadStorePairIntRegsInstrs) { // CodeGen can use ldp/stp instructions sequence. buildInternalIntRegisterDefForNode(blkNode); } const bool isSrcAddrLocal = src->OperIs(GT_LCL_VAR, GT_LCL_FLD) || ((srcAddrOrFill != nullptr) && srcAddrOrFill->OperIsLocalAddr()); const bool isDstAddrLocal = dstAddr->OperIsLocalAddr(); // CodeGen can use 16-byte SIMD ldp/stp for larger block sizes // only when both source and destination base address registers have known alignment. // This is the case, when both registers are either sp or fp. bool canUse16ByteWideInstrs = isSrcAddrLocal && isDstAddrLocal && (size >= 2 * FP_REGSIZE_BYTES); // Note that the SIMD registers allocation is speculative - LSRA doesn't know at this point // whether CodeGen will use SIMD registers (i.e. if such instruction sequence will be more optimal). // Therefore, it must allocate an additional integer register anyway. if (canUse16ByteWideInstrs) { buildInternalFloatRegisterDefForNode(blkNode, internalFloatRegCandidates()); buildInternalFloatRegisterDefForNode(blkNode, internalFloatRegCandidates()); } const bool srcAddrMayNeedReg = isSrcAddrLocal || ((srcAddrOrFill != nullptr) && srcAddrOrFill->isContained()); const bool dstAddrMayNeedReg = isDstAddrLocal || dstAddr->isContained(); // The following allocates an additional integer register in a case // when a load instruction and a store instruction cannot be encoded using offset // from a corresponding base register. if (srcAddrMayNeedReg && dstAddrMayNeedReg) { buildInternalIntRegisterDefForNode(blkNode); } #endif } break; case GenTreeBlk::BlkOpKindHelper: dstAddrRegMask = RBM_ARG_0; if (srcAddrOrFill != nullptr) { assert(!srcAddrOrFill->isContained()); srcRegMask = RBM_ARG_1; } sizeRegMask = RBM_ARG_2; break; default: unreached(); } } } if (!blkNode->OperIs(GT_STORE_DYN_BLK) && (sizeRegMask != RBM_NONE)) { // Reserve a temp register for the block size argument. buildInternalIntRegisterDefForNode(blkNode, sizeRegMask); } int useCount = 0; if (!dstAddr->isContained()) { useCount++; BuildUse(dstAddr, dstAddrRegMask); } else if (dstAddr->OperIsAddrMode()) { useCount += BuildAddrUses(dstAddr->AsAddrMode()->Base()); } if (srcAddrOrFill != nullptr) { if (!srcAddrOrFill->isContained()) { useCount++; BuildUse(srcAddrOrFill, srcRegMask); } else if (srcAddrOrFill->OperIsAddrMode()) { useCount += BuildAddrUses(srcAddrOrFill->AsAddrMode()->Base()); } } if (blkNode->OperIs(GT_STORE_DYN_BLK)) { useCount++; BuildUse(blkNode->AsStoreDynBlk()->gtDynamicSize, sizeRegMask); } buildInternalRegisterUses(); regMaskTP killMask = getKillSetForBlockStore(blkNode); BuildDefsWithKills(blkNode, 0, RBM_NONE, killMask); return useCount; } //------------------------------------------------------------------------ // BuildCast: Set the NodeInfo for a GT_CAST. // // Arguments: // cast - The GT_CAST node // // Return Value: // The number of sources consumed by this node. // int LinearScan::BuildCast(GenTreeCast* cast) { GenTree* src = cast->gtGetOp1(); const var_types srcType = genActualType(src->TypeGet()); const var_types castType = cast->gtCastType; #ifdef TARGET_ARM assert(!varTypeIsLong(srcType) || (src->OperIs(GT_LONG) && src->isContained())); // Floating point to integer casts requires a temporary register. if (varTypeIsFloating(srcType) && !varTypeIsFloating(castType)) { buildInternalFloatRegisterDefForNode(cast, RBM_ALLFLOAT); setInternalRegsDelayFree = true; } #else // Overflow checking cast from TYP_LONG to TYP_INT requires a temporary register to // store the min and max immediate values that cannot be encoded in the CMP instruction. if (cast->gtOverflow() && varTypeIsLong(srcType) && !cast->IsUnsigned() && (castType == TYP_INT)) { buildInternalIntRegisterDefForNode(cast); } #endif int srcCount = BuildOperandUses(src); buildInternalRegisterUses(); BuildDef(cast); return srcCount; } #endif // TARGET_ARMARCH
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Register Requirements for ARM and ARM64 common code XX XX XX XX This encapsulates common logic for setting register requirements for XX XX the ARM and ARM64 architectures. XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ #include "jitpch.h" #ifdef _MSC_VER #pragma hdrstop #endif #ifdef TARGET_ARMARCH // This file is ONLY used for ARM and ARM64 architectures #include "jit.h" #include "sideeffects.h" #include "lower.h" #include "lsra.h" //------------------------------------------------------------------------ // BuildIndir: Specify register requirements for address expression // of an indirection operation. // // Arguments: // indirTree - GT_IND, GT_STOREIND or block gentree node // // Return Value: // The number of sources consumed by this node. // int LinearScan::BuildIndir(GenTreeIndir* indirTree) { // struct typed indirs are expected only on rhs of a block copy, // but in this case they must be contained. assert(indirTree->TypeGet() != TYP_STRUCT); GenTree* addr = indirTree->Addr(); GenTree* index = nullptr; int cns = 0; #ifdef TARGET_ARM // Unaligned loads/stores for floating point values must first be loaded into integer register(s) if (indirTree->gtFlags & GTF_IND_UNALIGNED) { var_types type = TYP_UNDEF; if (indirTree->OperGet() == GT_STOREIND) { type = indirTree->AsStoreInd()->Data()->TypeGet(); } else if (indirTree->OperGet() == GT_IND) { type = indirTree->TypeGet(); } if (type == TYP_FLOAT) { buildInternalIntRegisterDefForNode(indirTree); } else if (type == TYP_DOUBLE) { buildInternalIntRegisterDefForNode(indirTree); buildInternalIntRegisterDefForNode(indirTree); } } #endif if (addr->isContained()) { if (addr->OperGet() == GT_LEA) { GenTreeAddrMode* lea = addr->AsAddrMode(); index = lea->Index(); cns = lea->Offset(); // On ARM we may need a single internal register // (when both conditions are true then we still only need a single internal register) if ((index != nullptr) && (cns != 0)) { // ARM does not support both Index and offset so we need an internal register buildInternalIntRegisterDefForNode(indirTree); } else if (!emitter::emitIns_valid_imm_for_ldst_offset(cns, emitTypeSize(indirTree))) { // This offset can't be contained in the ldr/str instruction, so we need an internal register buildInternalIntRegisterDefForNode(indirTree); } } #ifdef TARGET_ARM64 else if (addr->OperGet() == GT_CLS_VAR_ADDR) { // Reserve int to load constant from memory (IF_LARGELDC) buildInternalIntRegisterDefForNode(indirTree); } #endif // TARGET_ARM64 } #ifdef FEATURE_SIMD if (indirTree->TypeGet() == TYP_SIMD12) { // If indirTree is of TYP_SIMD12, addr is not contained. See comment in LowerIndir(). assert(!addr->isContained()); // Vector3 is read/written as two reads/writes: 8 byte and 4 byte. // To assemble the vector properly we would need an additional int register buildInternalIntRegisterDefForNode(indirTree); } #endif // FEATURE_SIMD int srcCount = BuildIndirUses(indirTree); buildInternalRegisterUses(); if (!indirTree->OperIs(GT_STOREIND, GT_NULLCHECK)) { BuildDef(indirTree); } return srcCount; } //------------------------------------------------------------------------ // BuildCall: Set the NodeInfo for a call. // // Arguments: // call - The call node of interest // // Return Value: // The number of sources consumed by this node. // int LinearScan::BuildCall(GenTreeCall* call) { bool hasMultiRegRetVal = false; const ReturnTypeDesc* retTypeDesc = nullptr; regMaskTP dstCandidates = RBM_NONE; int srcCount = 0; int dstCount = 0; if (call->TypeGet() != TYP_VOID) { hasMultiRegRetVal = call->HasMultiRegRetVal(); if (hasMultiRegRetVal) { // dst count = number of registers in which the value is returned by call retTypeDesc = call->GetReturnTypeDesc(); dstCount = retTypeDesc->GetReturnRegCount(); } else { dstCount = 1; } } GenTree* ctrlExpr = call->gtControlExpr; regMaskTP ctrlExprCandidates = RBM_NONE; if (call->gtCallType == CT_INDIRECT) { // either gtControlExpr != null or gtCallAddr != null. // Both cannot be non-null at the same time. assert(ctrlExpr == nullptr); assert(call->gtCallAddr != nullptr); ctrlExpr = call->gtCallAddr; } // set reg requirements on call target represented as control sequence. if (ctrlExpr != nullptr) { // we should never see a gtControlExpr whose type is void. assert(ctrlExpr->TypeGet() != TYP_VOID); // In case of fast tail implemented as jmp, make sure that gtControlExpr is // computed into a register. if (call->IsFastTailCall()) { // Fast tail call - make sure that call target is always computed in volatile registers // that will not be overridden by epilog sequence. ctrlExprCandidates = allRegs(TYP_INT) & RBM_INT_CALLEE_TRASH & ~RBM_LR; if (compiler->getNeedsGSSecurityCookie()) { ctrlExprCandidates &= ~(genRegMask(REG_GSCOOKIE_TMP_0) | genRegMask(REG_GSCOOKIE_TMP_1)); } assert(ctrlExprCandidates != RBM_NONE); } } else if (call->IsR2ROrVirtualStubRelativeIndir()) { // For R2R and VSD we have stub address in REG_R2R_INDIRECT_PARAM // and will load call address into the temp register from this register. regMaskTP candidates = RBM_NONE; if (call->IsFastTailCall()) { candidates = allRegs(TYP_INT) & RBM_INT_CALLEE_TRASH; assert(candidates != RBM_NONE); } buildInternalIntRegisterDefForNode(call, candidates); } #ifdef TARGET_ARM else { buildInternalIntRegisterDefForNode(call); } if (call->NeedsNullCheck()) { buildInternalIntRegisterDefForNode(call); } #endif // TARGET_ARM RegisterType registerType = call->TypeGet(); // Set destination candidates for return value of the call. #ifdef TARGET_ARM if (call->IsHelperCall(compiler, CORINFO_HELP_INIT_PINVOKE_FRAME)) { // The ARM CORINFO_HELP_INIT_PINVOKE_FRAME helper uses a custom calling convention that returns with // TCB in REG_PINVOKE_TCB. fgMorphCall() sets the correct argument registers. dstCandidates = RBM_PINVOKE_TCB; } else #endif // TARGET_ARM if (hasMultiRegRetVal) { assert(retTypeDesc != nullptr); dstCandidates = retTypeDesc->GetABIReturnRegs(); } else if (varTypeUsesFloatArgReg(registerType)) { dstCandidates = RBM_FLOATRET; } else if (registerType == TYP_LONG) { dstCandidates = RBM_LNGRET; } else { dstCandidates = RBM_INTRET; } // First, count reg args // Each register argument corresponds to one source. bool callHasFloatRegArgs = false; for (GenTreeCall::Use& arg : call->LateArgs()) { GenTree* argNode = arg.GetNode(); #ifdef DEBUG // During Build, we only use the ArgTabEntry for validation, // as getting it is rather expensive. fgArgTabEntry* curArgTabEntry = compiler->gtArgEntryByNode(call, argNode); regNumber argReg = curArgTabEntry->GetRegNum(); assert(curArgTabEntry != nullptr); #endif if (argNode->gtOper == GT_PUTARG_STK) { // late arg that is not passed in a register assert(curArgTabEntry->GetRegNum() == REG_STK); // These should never be contained. assert(!argNode->isContained()); continue; } // A GT_FIELD_LIST has a TYP_VOID, but is used to represent a multireg struct if (argNode->OperGet() == GT_FIELD_LIST) { assert(argNode->isContained()); // There could be up to 2-4 PUTARG_REGs in the list (3 or 4 can only occur for HFAs) for (GenTreeFieldList::Use& use : argNode->AsFieldList()->Uses()) { #ifdef DEBUG assert(use.GetNode()->OperIs(GT_PUTARG_REG)); assert(use.GetNode()->GetRegNum() == argReg); // Update argReg for the next putarg_reg (if any) argReg = genRegArgNext(argReg); #if defined(TARGET_ARM) // A double register is modelled as an even-numbered single one if (use.GetNode()->TypeGet() == TYP_DOUBLE) { argReg = genRegArgNext(argReg); } #endif // TARGET_ARM #endif BuildUse(use.GetNode(), genRegMask(use.GetNode()->GetRegNum())); srcCount++; } } else if (argNode->OperGet() == GT_PUTARG_SPLIT) { unsigned regCount = argNode->AsPutArgSplit()->gtNumRegs; assert(regCount == curArgTabEntry->numRegs); for (unsigned int i = 0; i < regCount; i++) { BuildUse(argNode, genRegMask(argNode->AsPutArgSplit()->GetRegNumByIdx(i)), i); } srcCount += regCount; } else { assert(argNode->OperIs(GT_PUTARG_REG)); assert(argNode->GetRegNum() == argReg); HandleFloatVarArgs(call, argNode, &callHasFloatRegArgs); #ifdef TARGET_ARM // The `double` types have been transformed to `long` on armel, // while the actual long types have been decomposed. // On ARM we may have bitcasts from DOUBLE to LONG. if (argNode->TypeGet() == TYP_LONG) { assert(argNode->IsMultiRegNode()); BuildUse(argNode, genRegMask(argNode->GetRegNum()), 0); BuildUse(argNode, genRegMask(genRegArgNext(argNode->GetRegNum())), 1); srcCount += 2; } else #endif // TARGET_ARM { BuildUse(argNode, genRegMask(argNode->GetRegNum())); srcCount++; } } } #ifdef DEBUG // Now, count stack args // Note that these need to be computed into a register, but then // they're just stored to the stack - so the reg doesn't // need to remain live until the call. In fact, it must not // because the code generator doesn't actually consider it live, // so it can't be spilled. for (GenTreeCall::Use& use : call->Args()) { GenTree* arg = use.GetNode(); // Skip arguments that have been moved to the Late Arg list if ((arg->gtFlags & GTF_LATE_ARG) == 0) { fgArgTabEntry* curArgTabEntry = compiler->gtArgEntryByNode(call, arg); assert(curArgTabEntry != nullptr); // PUTARG_SPLIT nodes must be in the gtCallLateArgs list, since they // define registers used by the call. assert(arg->OperGet() != GT_PUTARG_SPLIT); if (arg->gtOper == GT_PUTARG_STK) { assert(curArgTabEntry->GetRegNum() == REG_STK); } else { assert(!arg->IsValue() || arg->IsUnusedValue()); } } } #endif // DEBUG // If it is a fast tail call, it is already preferenced to use IP0. // Therefore, no need set src candidates on call tgt again. if (call->IsVarargs() && callHasFloatRegArgs && !call->IsFastTailCall() && (ctrlExpr != nullptr)) { NYI_ARM("float reg varargs"); // Don't assign the call target to any of the argument registers because // we will use them to also pass floating point arguments as required // by Arm64 ABI. ctrlExprCandidates = allRegs(TYP_INT) & ~(RBM_ARG_REGS); } if (ctrlExpr != nullptr) { BuildUse(ctrlExpr, ctrlExprCandidates); srcCount++; } buildInternalRegisterUses(); // Now generate defs and kills. regMaskTP killMask = getKillSetForCall(call); BuildDefsWithKills(call, dstCount, dstCandidates, killMask); return srcCount; } //------------------------------------------------------------------------ // BuildPutArgStk: Set the NodeInfo for a GT_PUTARG_STK node // // Arguments: // argNode - a GT_PUTARG_STK node // // Return Value: // The number of sources consumed by this node. // // Notes: // Set the child node(s) to be contained when we have a multireg arg // int LinearScan::BuildPutArgStk(GenTreePutArgStk* argNode) { assert(argNode->gtOper == GT_PUTARG_STK); GenTree* putArgChild = argNode->gtGetOp1(); int srcCount = 0; // Do we have a TYP_STRUCT argument (or a GT_FIELD_LIST), if so it must be a multireg pass-by-value struct if (putArgChild->TypeIs(TYP_STRUCT) || putArgChild->OperIs(GT_FIELD_LIST)) { // We will use store instructions that each write a register sized value if (putArgChild->OperIs(GT_FIELD_LIST)) { assert(putArgChild->isContained()); // We consume all of the items in the GT_FIELD_LIST for (GenTreeFieldList::Use& use : putArgChild->AsFieldList()->Uses()) { BuildUse(use.GetNode()); srcCount++; #if defined(FEATURE_SIMD) if (compMacOsArm64Abi()) { if (use.GetType() == TYP_SIMD12) { // Vector3 is read/written as two reads/writes: 8 byte and 4 byte. // To assemble the vector properly we would need an additional int register. // The other platforms can write it as 16-byte using 1 write. buildInternalIntRegisterDefForNode(use.GetNode()); } } #endif // FEATURE_SIMD } } else { // We can use a ldp/stp sequence so we need two internal registers for ARM64; one for ARM. buildInternalIntRegisterDefForNode(argNode); #ifdef TARGET_ARM64 buildInternalIntRegisterDefForNode(argNode); #endif // TARGET_ARM64 if (putArgChild->OperGet() == GT_OBJ) { assert(putArgChild->isContained()); GenTree* objChild = putArgChild->gtGetOp1(); if (objChild->OperGet() == GT_LCL_VAR_ADDR) { // We will generate all of the code for the GT_PUTARG_STK, the GT_OBJ and the GT_LCL_VAR_ADDR // as one contained operation, and there are no source registers. // assert(objChild->isContained()); } else { // We will generate all of the code for the GT_PUTARG_STK and its child node // as one contained operation // srcCount = BuildOperandUses(objChild); } } else { // No source registers. putArgChild->OperIs(GT_LCL_VAR); } } } else { assert(!putArgChild->isContained()); srcCount = BuildOperandUses(putArgChild); #if defined(FEATURE_SIMD) if (compMacOsArm64Abi() && argNode->GetStackByteSize() == 12) { // Vector3 is read/written as two reads/writes: 8 byte and 4 byte. // To assemble the vector properly we would need an additional int register. // The other platforms can write it as 16-byte using 1 write. buildInternalIntRegisterDefForNode(argNode); } #endif // FEATURE_SIMD } buildInternalRegisterUses(); return srcCount; } //------------------------------------------------------------------------ // BuildPutArgSplit: Set the NodeInfo for a GT_PUTARG_SPLIT node // // Arguments: // argNode - a GT_PUTARG_SPLIT node // // Return Value: // The number of sources consumed by this node. // // Notes: // Set the child node(s) to be contained // int LinearScan::BuildPutArgSplit(GenTreePutArgSplit* argNode) { int srcCount = 0; assert(argNode->gtOper == GT_PUTARG_SPLIT); GenTree* putArgChild = argNode->gtGetOp1(); // Registers for split argument corresponds to source int dstCount = argNode->gtNumRegs; regNumber argReg = argNode->GetRegNum(); regMaskTP argMask = RBM_NONE; for (unsigned i = 0; i < argNode->gtNumRegs; i++) { regNumber thisArgReg = (regNumber)((unsigned)argReg + i); argMask |= genRegMask(thisArgReg); argNode->SetRegNumByIdx(thisArgReg, i); } if (putArgChild->OperGet() == GT_FIELD_LIST) { // Generated code: // 1. Consume all of the items in the GT_FIELD_LIST (source) // 2. Store to target slot and move to target registers (destination) from source // unsigned sourceRegCount = 0; // To avoid redundant moves, have the argument operand computed in the // register in which the argument is passed to the call. for (GenTreeFieldList::Use& use : putArgChild->AsFieldList()->Uses()) { GenTree* node = use.GetNode(); assert(!node->isContained()); // The only multi-reg nodes we should see are OperIsMultiRegOp() unsigned currentRegCount; #ifdef TARGET_ARM if (node->OperIsMultiRegOp()) { currentRegCount = node->AsMultiRegOp()->GetRegCount(); } else #endif // TARGET_ARM { assert(!node->IsMultiRegNode()); currentRegCount = 1; } // Consume all the registers, setting the appropriate register mask for the ones that // go into registers. for (unsigned regIndex = 0; regIndex < currentRegCount; regIndex++) { regMaskTP sourceMask = RBM_NONE; if (sourceRegCount < argNode->gtNumRegs) { sourceMask = genRegMask((regNumber)((unsigned)argReg + sourceRegCount)); } sourceRegCount++; BuildUse(node, sourceMask, regIndex); } } srcCount += sourceRegCount; assert(putArgChild->isContained()); } else { assert(putArgChild->TypeGet() == TYP_STRUCT); assert(putArgChild->OperGet() == GT_OBJ); // We can use a ldr/str sequence so we need an internal register buildInternalIntRegisterDefForNode(argNode, allRegs(TYP_INT) & ~argMask); GenTree* objChild = putArgChild->gtGetOp1(); if (objChild->OperGet() == GT_LCL_VAR_ADDR) { // We will generate all of the code for the GT_PUTARG_SPLIT, the GT_OBJ and the GT_LCL_VAR_ADDR // as one contained operation // assert(objChild->isContained()); } else { srcCount = BuildIndirUses(putArgChild->AsIndir()); } assert(putArgChild->isContained()); } buildInternalRegisterUses(); BuildDefs(argNode, dstCount, argMask); return srcCount; } //------------------------------------------------------------------------ // BuildBlockStore: Build the RefPositions for a block store node. // // Arguments: // blkNode - The block store node of interest // // Return Value: // The number of sources consumed by this node. // int LinearScan::BuildBlockStore(GenTreeBlk* blkNode) { GenTree* dstAddr = blkNode->Addr(); GenTree* src = blkNode->Data(); unsigned size = blkNode->Size(); GenTree* srcAddrOrFill = nullptr; regMaskTP dstAddrRegMask = RBM_NONE; regMaskTP srcRegMask = RBM_NONE; regMaskTP sizeRegMask = RBM_NONE; if (blkNode->OperIsInitBlkOp()) { if (src->OperIs(GT_INIT_VAL)) { assert(src->isContained()); src = src->AsUnOp()->gtGetOp1(); } srcAddrOrFill = src; switch (blkNode->gtBlkOpKind) { case GenTreeBlk::BlkOpKindUnroll: #ifdef TARGET_ARM64 { if (dstAddr->isContained()) { // Since the dstAddr is contained the address will be computed in CodeGen. // This might require an integer register to store the value. buildInternalIntRegisterDefForNode(blkNode); } const bool isDstRegAddrAlignmentKnown = dstAddr->OperIsLocalAddr(); if (isDstRegAddrAlignmentKnown && (size > FP_REGSIZE_BYTES)) { // For larger block sizes CodeGen can choose to use 16-byte SIMD instructions. buildInternalFloatRegisterDefForNode(blkNode, internalFloatRegCandidates()); } } #endif // TARGET_ARM64 break; case GenTreeBlk::BlkOpKindHelper: assert(!src->isContained()); dstAddrRegMask = RBM_ARG_0; srcRegMask = RBM_ARG_1; sizeRegMask = RBM_ARG_2; break; default: unreached(); } } else { if (src->OperIs(GT_IND)) { assert(src->isContained()); srcAddrOrFill = src->AsIndir()->Addr(); } if (blkNode->OperIs(GT_STORE_OBJ)) { // We don't need to materialize the struct size but we still need // a temporary register to perform the sequence of loads and stores. // We can't use the special Write Barrier registers, so exclude them from the mask regMaskTP internalIntCandidates = allRegs(TYP_INT) & ~(RBM_WRITE_BARRIER_DST_BYREF | RBM_WRITE_BARRIER_SRC_BYREF); buildInternalIntRegisterDefForNode(blkNode, internalIntCandidates); if (size >= 2 * REGSIZE_BYTES) { // We will use ldp/stp to reduce code size and improve performance // so we need to reserve an extra internal register buildInternalIntRegisterDefForNode(blkNode, internalIntCandidates); } // If we have a dest address we want it in RBM_WRITE_BARRIER_DST_BYREF. dstAddrRegMask = RBM_WRITE_BARRIER_DST_BYREF; // If we have a source address we want it in REG_WRITE_BARRIER_SRC_BYREF. // Otherwise, if it is a local, codegen will put its address in REG_WRITE_BARRIER_SRC_BYREF, // which is killed by a StoreObj (and thus needn't be reserved). if (srcAddrOrFill != nullptr) { assert(!srcAddrOrFill->isContained()); srcRegMask = RBM_WRITE_BARRIER_SRC_BYREF; } } else { switch (blkNode->gtBlkOpKind) { case GenTreeBlk::BlkOpKindUnroll: { buildInternalIntRegisterDefForNode(blkNode); #ifdef TARGET_ARM64 const bool canUseLoadStorePairIntRegsInstrs = (size >= 2 * REGSIZE_BYTES); if (canUseLoadStorePairIntRegsInstrs) { // CodeGen can use ldp/stp instructions sequence. buildInternalIntRegisterDefForNode(blkNode); } const bool isSrcAddrLocal = src->OperIs(GT_LCL_VAR, GT_LCL_FLD) || ((srcAddrOrFill != nullptr) && srcAddrOrFill->OperIsLocalAddr()); const bool isDstAddrLocal = dstAddr->OperIsLocalAddr(); // CodeGen can use 16-byte SIMD ldp/stp for larger block sizes // only when both source and destination base address registers have known alignment. // This is the case, when both registers are either sp or fp. bool canUse16ByteWideInstrs = isSrcAddrLocal && isDstAddrLocal && (size >= 2 * FP_REGSIZE_BYTES); // Note that the SIMD registers allocation is speculative - LSRA doesn't know at this point // whether CodeGen will use SIMD registers (i.e. if such instruction sequence will be more optimal). // Therefore, it must allocate an additional integer register anyway. if (canUse16ByteWideInstrs) { buildInternalFloatRegisterDefForNode(blkNode, internalFloatRegCandidates()); buildInternalFloatRegisterDefForNode(blkNode, internalFloatRegCandidates()); } const bool srcAddrMayNeedReg = isSrcAddrLocal || ((srcAddrOrFill != nullptr) && srcAddrOrFill->isContained()); const bool dstAddrMayNeedReg = isDstAddrLocal || dstAddr->isContained(); // The following allocates an additional integer register in a case // when a load instruction and a store instruction cannot be encoded using offset // from a corresponding base register. if (srcAddrMayNeedReg && dstAddrMayNeedReg) { buildInternalIntRegisterDefForNode(blkNode); } #endif } break; case GenTreeBlk::BlkOpKindHelper: dstAddrRegMask = RBM_ARG_0; if (srcAddrOrFill != nullptr) { assert(!srcAddrOrFill->isContained()); srcRegMask = RBM_ARG_1; } sizeRegMask = RBM_ARG_2; break; default: unreached(); } } } if (!blkNode->OperIs(GT_STORE_DYN_BLK) && (sizeRegMask != RBM_NONE)) { // Reserve a temp register for the block size argument. buildInternalIntRegisterDefForNode(blkNode, sizeRegMask); } int useCount = 0; if (!dstAddr->isContained()) { useCount++; BuildUse(dstAddr, dstAddrRegMask); } else if (dstAddr->OperIsAddrMode()) { useCount += BuildAddrUses(dstAddr->AsAddrMode()->Base()); } if (srcAddrOrFill != nullptr) { if (!srcAddrOrFill->isContained()) { useCount++; BuildUse(srcAddrOrFill, srcRegMask); } else if (srcAddrOrFill->OperIsAddrMode()) { useCount += BuildAddrUses(srcAddrOrFill->AsAddrMode()->Base()); } } if (blkNode->OperIs(GT_STORE_DYN_BLK)) { useCount++; BuildUse(blkNode->AsStoreDynBlk()->gtDynamicSize, sizeRegMask); } buildInternalRegisterUses(); regMaskTP killMask = getKillSetForBlockStore(blkNode); BuildDefsWithKills(blkNode, 0, RBM_NONE, killMask); return useCount; } //------------------------------------------------------------------------ // BuildCast: Set the NodeInfo for a GT_CAST. // // Arguments: // cast - The GT_CAST node // // Return Value: // The number of sources consumed by this node. // int LinearScan::BuildCast(GenTreeCast* cast) { GenTree* src = cast->gtGetOp1(); const var_types srcType = genActualType(src->TypeGet()); const var_types castType = cast->gtCastType; #ifdef TARGET_ARM assert(!varTypeIsLong(srcType) || (src->OperIs(GT_LONG) && src->isContained())); // Floating point to integer casts requires a temporary register. if (varTypeIsFloating(srcType) && !varTypeIsFloating(castType)) { buildInternalFloatRegisterDefForNode(cast, RBM_ALLFLOAT); setInternalRegsDelayFree = true; } #else // Overflow checking cast from TYP_LONG to TYP_INT requires a temporary register to // store the min and max immediate values that cannot be encoded in the CMP instruction. if (cast->gtOverflow() && varTypeIsLong(srcType) && !cast->IsUnsigned() && (castType == TYP_INT)) { buildInternalIntRegisterDefForNode(cast); } #endif int srcCount = BuildOperandUses(src); buildInternalRegisterUses(); BuildDef(cast); return srcCount; } #endif // TARGET_ARMARCH
1
dotnet/runtime
66,282
Enable Fast Tail Call Optimization for ARM32
- Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
clamp03
2022-03-07T05:47:20Z
2022-03-13T11:50:20Z
227ff3d53784f655fa04dad059a98b3e8d291d61
51b90cc60b8528c77829ef18481b0f58db812776
Enable Fast Tail Call Optimization for ARM32. - Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
./src/coreclr/jit/morph.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Morph XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ #include "jitpch.h" #ifdef _MSC_VER #pragma hdrstop #endif #include "allocacheck.h" // for alloca // Convert the given node into a call to the specified helper passing // the given argument list. // // Tries to fold constants and also adds an edge for overflow exception // returns the morphed tree GenTree* Compiler::fgMorphCastIntoHelper(GenTree* tree, int helper, GenTree* oper) { GenTree* result; /* If the operand is a constant, we'll try to fold it */ if (oper->OperIsConst()) { GenTree* oldTree = tree; tree = gtFoldExprConst(tree); // This may not fold the constant (NaN ...) if (tree != oldTree) { return fgMorphTree(tree); } else if (tree->OperIsConst()) { return fgMorphConst(tree); } // assert that oper is unchanged and that it is still a GT_CAST node noway_assert(tree->AsCast()->CastOp() == oper); noway_assert(tree->gtOper == GT_CAST); } result = fgMorphIntoHelperCall(tree, helper, gtNewCallArgs(oper)); assert(result == tree); return result; } /***************************************************************************** * * Convert the given node into a call to the specified helper passing * the given argument list. */ GenTree* Compiler::fgMorphIntoHelperCall(GenTree* tree, int helper, GenTreeCall::Use* args, bool morphArgs) { // The helper call ought to be semantically equivalent to the original node, so preserve its VN. tree->ChangeOper(GT_CALL, GenTree::PRESERVE_VN); GenTreeCall* call = tree->AsCall(); call->gtCallType = CT_HELPER; call->gtReturnType = tree->TypeGet(); call->gtCallMethHnd = eeFindHelper(helper); call->gtCallThisArg = nullptr; call->gtCallArgs = args; call->gtCallLateArgs = nullptr; call->fgArgInfo = nullptr; call->gtRetClsHnd = nullptr; call->gtCallMoreFlags = GTF_CALL_M_EMPTY; call->gtInlineCandidateInfo = nullptr; call->gtControlExpr = nullptr; #ifdef UNIX_X86_ABI call->gtFlags |= GTF_CALL_POP_ARGS; #endif // UNIX_X86_ABI #if DEBUG // Helper calls are never candidates. call->gtInlineObservation = InlineObservation::CALLSITE_IS_CALL_TO_HELPER; call->callSig = nullptr; #endif // DEBUG #ifdef FEATURE_READYTORUN call->gtEntryPoint.addr = nullptr; call->gtEntryPoint.accessType = IAT_VALUE; #endif #if FEATURE_MULTIREG_RET call->ResetReturnType(); call->ClearOtherRegs(); call->ClearOtherRegFlags(); #ifndef TARGET_64BIT if (varTypeIsLong(tree)) { call->InitializeLongReturnType(); } #endif // !TARGET_64BIT #endif // FEATURE_MULTIREG_RET if (tree->OperMayThrow(this)) { tree->gtFlags |= GTF_EXCEPT; } else { tree->gtFlags &= ~GTF_EXCEPT; } tree->gtFlags |= GTF_CALL; for (GenTreeCall::Use& use : GenTreeCall::UseList(args)) { tree->gtFlags |= (use.GetNode()->gtFlags & GTF_ALL_EFFECT); } /* Perform the morphing */ if (morphArgs) { tree = fgMorphArgs(call); } return tree; } //------------------------------------------------------------------------ // fgMorphExpandCast: Performs the pre-order (required) morphing for a cast. // // Performs a rich variety of pre-order transformations (and some optimizations). // // Notably: // 1. Splits long -> small type casts into long -> int -> small type // for 32 bit targets. Does the same for float/double -> small type // casts for all targets. // 2. Morphs casts not supported by the target directly into helpers. // These mostly have to do with casts from and to floating point // types, especially checked ones. Refer to the implementation for // what specific casts need to be handled - it is a complex matrix. // 3. "Casts away" the GC-ness of a tree (for CAST(nint <- byref)) via // assigning the GC tree to an inline - COMMA(ASG, LCL_VAR) - non-GC // temporary. // 3. "Pushes down" truncating long -> int casts for some operations: // CAST(int <- MUL(long, long)) => MUL(CAST(int <- long), CAST(int <- long)). // The purpose of this is to allow "optNarrowTree" in the post-order // traversal to fold the tree into a TYP_INT one, which helps 32 bit // targets (and AMD64 too since 32 bit instructions are more compact). // TODO-Arm64-CQ: Re-evaluate the value of this optimization for ARM64. // // Arguments: // tree - the cast tree to morph // // Return Value: // The fully morphed tree, or "nullptr" if it needs further morphing, // in which case the cast may be transformed into an unchecked one // and its operand changed (the cast "expanded" into two). // GenTree* Compiler::fgMorphExpandCast(GenTreeCast* tree) { GenTree* oper = tree->CastOp(); if (fgGlobalMorph && (oper->gtOper == GT_ADDR)) { // Make sure we've checked if 'oper' is an address of an implicit-byref parameter. // If it is, fgMorphImplicitByRefArgs will change its type, and we want the cast // morphing code to see that type. fgMorphImplicitByRefArgs(oper); } var_types srcType = genActualType(oper); var_types dstType = tree->CastToType(); unsigned dstSize = genTypeSize(dstType); // See if the cast has to be done in two steps. R -> I if (varTypeIsFloating(srcType) && varTypeIsIntegral(dstType)) { if (srcType == TYP_FLOAT #if defined(TARGET_ARM64) // Arm64: src = float, dst is overflow conversion. // This goes through helper and hence src needs to be converted to double. && tree->gtOverflow() #elif defined(TARGET_AMD64) // Amd64: src = float, dst = uint64 or overflow conversion. // This goes through helper and hence src needs to be converted to double. && (tree->gtOverflow() || (dstType == TYP_ULONG)) #elif defined(TARGET_ARM) // Arm: src = float, dst = int64/uint64 or overflow conversion. && (tree->gtOverflow() || varTypeIsLong(dstType)) #else // x86: src = float, dst = uint32/int64/uint64 or overflow conversion. && (tree->gtOverflow() || varTypeIsLong(dstType) || (dstType == TYP_UINT)) #endif ) { oper = gtNewCastNode(TYP_DOUBLE, oper, false, TYP_DOUBLE); } // Do we need to do it in two steps R -> I -> smallType? if (dstSize < genTypeSize(TYP_INT)) { oper = gtNewCastNodeL(TYP_INT, oper, /* fromUnsigned */ false, TYP_INT); oper->gtFlags |= (tree->gtFlags & (GTF_OVERFLOW | GTF_EXCEPT)); tree->AsCast()->CastOp() = oper; // We must not mistreat the original cast, which was from a floating point type, // as from an unsigned type, since we now have a TYP_INT node for the source and // CAST_OVF(BYTE <- INT) != CAST_OVF(BYTE <- UINT). assert(!tree->IsUnsigned()); } else { if (!tree->gtOverflow()) { #ifdef TARGET_ARM64 // ARM64 supports all non-overflow checking conversions directly. return nullptr; #else switch (dstType) { case TYP_INT: return nullptr; case TYP_UINT: #if defined(TARGET_ARM) || defined(TARGET_AMD64) return nullptr; #else // TARGET_X86 return fgMorphCastIntoHelper(tree, CORINFO_HELP_DBL2UINT, oper); #endif // TARGET_X86 case TYP_LONG: #ifdef TARGET_AMD64 // SSE2 has instructions to convert a float/double directly to a long return nullptr; #else // !TARGET_AMD64 return fgMorphCastIntoHelper(tree, CORINFO_HELP_DBL2LNG, oper); #endif // !TARGET_AMD64 case TYP_ULONG: return fgMorphCastIntoHelper(tree, CORINFO_HELP_DBL2ULNG, oper); default: unreached(); } #endif // TARGET_ARM64 } else { switch (dstType) { case TYP_INT: return fgMorphCastIntoHelper(tree, CORINFO_HELP_DBL2INT_OVF, oper); case TYP_UINT: return fgMorphCastIntoHelper(tree, CORINFO_HELP_DBL2UINT_OVF, oper); case TYP_LONG: return fgMorphCastIntoHelper(tree, CORINFO_HELP_DBL2LNG_OVF, oper); case TYP_ULONG: return fgMorphCastIntoHelper(tree, CORINFO_HELP_DBL2ULNG_OVF, oper); default: unreached(); } } } } #ifndef TARGET_64BIT // The code generation phase (for x86 & ARM32) does not handle casts // directly from [u]long to anything other than [u]int. Insert an // intermediate cast to native int. else if (varTypeIsLong(srcType) && varTypeIsSmall(dstType)) { oper = gtNewCastNode(TYP_I_IMPL, oper, tree->IsUnsigned(), TYP_I_IMPL); oper->gtFlags |= (tree->gtFlags & (GTF_OVERFLOW | GTF_EXCEPT)); tree->ClearUnsigned(); tree->AsCast()->CastOp() = oper; } #endif //! TARGET_64BIT #ifdef TARGET_ARMARCH // AArch, unlike x86/amd64, has instructions that can cast directly from // all integers (except for longs on AArch32 of course) to floats. // Because there is no IL instruction conv.r4.un, uint/ulong -> float // casts are always imported as CAST(float <- CAST(double <- uint/ulong)). // We can eliminate the redundant intermediate cast as an optimization. else if ((dstType == TYP_FLOAT) && (srcType == TYP_DOUBLE) && oper->OperIs(GT_CAST) #ifdef TARGET_ARM && !varTypeIsLong(oper->AsCast()->CastOp()) #endif ) { oper->gtType = TYP_FLOAT; oper->CastToType() = TYP_FLOAT; return fgMorphTree(oper); } #endif // TARGET_ARMARCH #ifdef TARGET_ARM // converts long/ulong --> float/double casts into helper calls. else if (varTypeIsFloating(dstType) && varTypeIsLong(srcType)) { if (dstType == TYP_FLOAT) { // there is only a double helper, so we // - change the dsttype to double // - insert a cast from double to float // - recurse into the resulting tree tree->CastToType() = TYP_DOUBLE; tree->gtType = TYP_DOUBLE; tree = gtNewCastNode(TYP_FLOAT, tree, false, TYP_FLOAT); return fgMorphTree(tree); } if (tree->gtFlags & GTF_UNSIGNED) return fgMorphCastIntoHelper(tree, CORINFO_HELP_ULNG2DBL, oper); return fgMorphCastIntoHelper(tree, CORINFO_HELP_LNG2DBL, oper); } #endif // TARGET_ARM #ifdef TARGET_AMD64 // Do we have to do two step U4/8 -> R4/8 ? // Codegen supports the following conversion as one-step operation // a) Long -> R4/R8 // b) U8 -> R8 // // The following conversions are performed as two-step operations using above. // U4 -> R4/8 = U4-> Long -> R4/8 // U8 -> R4 = U8 -> R8 -> R4 else if (tree->IsUnsigned() && varTypeIsFloating(dstType)) { srcType = varTypeToUnsigned(srcType); if (srcType == TYP_ULONG) { if (dstType == TYP_FLOAT) { // Codegen can handle U8 -> R8 conversion. // U8 -> R4 = U8 -> R8 -> R4 // - change the dsttype to double // - insert a cast from double to float // - recurse into the resulting tree tree->CastToType() = TYP_DOUBLE; tree->gtType = TYP_DOUBLE; tree = gtNewCastNode(TYP_FLOAT, tree, false, TYP_FLOAT); return fgMorphTree(tree); } } else if (srcType == TYP_UINT) { oper = gtNewCastNode(TYP_LONG, oper, true, TYP_LONG); oper->gtFlags |= (tree->gtFlags & (GTF_OVERFLOW | GTF_EXCEPT)); tree->ClearUnsigned(); tree->CastOp() = oper; } } #endif // TARGET_AMD64 #ifdef TARGET_X86 // Do we have to do two step U4/8 -> R4/8 ? else if (tree->IsUnsigned() && varTypeIsFloating(dstType)) { srcType = varTypeToUnsigned(srcType); if (srcType == TYP_ULONG) { return fgMorphCastIntoHelper(tree, CORINFO_HELP_ULNG2DBL, oper); } else if (srcType == TYP_UINT) { oper = gtNewCastNode(TYP_LONG, oper, true, TYP_LONG); oper->gtFlags |= (tree->gtFlags & (GTF_OVERFLOW | GTF_EXCEPT)); tree->gtFlags &= ~GTF_UNSIGNED; return fgMorphCastIntoHelper(tree, CORINFO_HELP_LNG2DBL, oper); } } else if (((tree->gtFlags & GTF_UNSIGNED) == 0) && (srcType == TYP_LONG) && varTypeIsFloating(dstType)) { oper = fgMorphCastIntoHelper(tree, CORINFO_HELP_LNG2DBL, oper); // Since we don't have a Jit Helper that converts to a TYP_FLOAT // we just use the one that converts to a TYP_DOUBLE // and then add a cast to TYP_FLOAT // if ((dstType == TYP_FLOAT) && (oper->OperGet() == GT_CALL)) { // Fix the return type to be TYP_DOUBLE // oper->gtType = TYP_DOUBLE; // Add a Cast to TYP_FLOAT // tree = gtNewCastNode(TYP_FLOAT, oper, false, TYP_FLOAT); INDEBUG(tree->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED); return tree; } else { return oper; } } #endif // TARGET_X86 else if (varTypeIsGC(srcType) != varTypeIsGC(dstType)) { // We are casting away GC information. we would like to just // change the type to int, however this gives the emitter fits because // it believes the variable is a GC variable at the beginning of the // instruction group, but is not turned non-gc by the code generator // we fix this by copying the GC pointer to a non-gc pointer temp. noway_assert(!varTypeIsGC(dstType) && "How can we have a cast to a GCRef here?"); // We generate an assignment to an int and then do the cast from an int. With this we avoid // the gc problem and we allow casts to bytes, longs, etc... unsigned lclNum = lvaGrabTemp(true DEBUGARG("Cast away GC")); oper->gtType = TYP_I_IMPL; GenTree* asg = gtNewTempAssign(lclNum, oper); oper->gtType = srcType; // do the real cast GenTree* cast = gtNewCastNode(tree->TypeGet(), gtNewLclvNode(lclNum, TYP_I_IMPL), false, dstType); // Generate the comma tree oper = gtNewOperNode(GT_COMMA, tree->TypeGet(), asg, cast); return fgMorphTree(oper); } // Look for narrowing casts ([u]long -> [u]int) and try to push them // down into the operand before morphing it. // // It doesn't matter if this is cast is from ulong or long (i.e. if // GTF_UNSIGNED is set) because the transformation is only applied to // overflow-insensitive narrowing casts, which always silently truncate. // // Note that casts from [u]long to small integer types are handled above. if ((srcType == TYP_LONG) && ((dstType == TYP_INT) || (dstType == TYP_UINT))) { // As a special case, look for overflow-sensitive casts of an AND // expression, and see if the second operand is a small constant. Since // the result of an AND is bound by its smaller operand, it may be // possible to prove that the cast won't overflow, which will in turn // allow the cast's operand to be transformed. if (tree->gtOverflow() && (oper->OperGet() == GT_AND)) { GenTree* andOp2 = oper->AsOp()->gtOp2; // Look for a constant less than 2^{32} for a cast to uint, or less // than 2^{31} for a cast to int. int maxWidth = (dstType == TYP_UINT) ? 32 : 31; if ((andOp2->OperGet() == GT_CNS_NATIVELONG) && ((andOp2->AsIntConCommon()->LngValue() >> maxWidth) == 0)) { tree->ClearOverflow(); tree->SetAllEffectsFlags(oper); } } // Only apply this transformation during global morph, // when neither the cast node nor the oper node may throw an exception // based on the upper 32 bits. // if (fgGlobalMorph && !tree->gtOverflow() && !oper->gtOverflowEx()) { // For these operations the lower 32 bits of the result only depends // upon the lower 32 bits of the operands. // bool canPushCast = oper->OperIs(GT_ADD, GT_SUB, GT_MUL, GT_AND, GT_OR, GT_XOR, GT_NOT, GT_NEG); // For long LSH cast to int, there is a discontinuity in behavior // when the shift amount is 32 or larger. // // CAST(INT, LSH(1LL, 31)) == LSH(1, 31) // LSH(CAST(INT, 1LL), CAST(INT, 31)) == LSH(1, 31) // // CAST(INT, LSH(1LL, 32)) == 0 // LSH(CAST(INT, 1LL), CAST(INT, 32)) == LSH(1, 32) == LSH(1, 0) == 1 // // So some extra validation is needed. // if (oper->OperIs(GT_LSH)) { GenTree* shiftAmount = oper->AsOp()->gtOp2; // Expose constant value for shift, if possible, to maximize the number // of cases we can handle. shiftAmount = gtFoldExpr(shiftAmount); oper->AsOp()->gtOp2 = shiftAmount; #if DEBUG // We may remorph the shift amount tree again later, so clear any morphed flag. shiftAmount->gtDebugFlags &= ~GTF_DEBUG_NODE_MORPHED; #endif // DEBUG if (shiftAmount->IsIntegralConst()) { const ssize_t shiftAmountValue = shiftAmount->AsIntCon()->IconValue(); if ((shiftAmountValue >= 64) || (shiftAmountValue < 0)) { // Shift amount is large enough or negative so result is undefined. // Don't try to optimize. assert(!canPushCast); } else if (shiftAmountValue >= 32) { // We know that we have a narrowing cast ([u]long -> [u]int) // and that we are casting to a 32-bit value, which will result in zero. // // Check to see if we have any side-effects that we must keep // if ((tree->gtFlags & GTF_ALL_EFFECT) == 0) { // Result of the shift is zero. DEBUG_DESTROY_NODE(tree); GenTree* zero = gtNewZeroConNode(TYP_INT); return fgMorphTree(zero); } else // We do have a side-effect { // We could create a GT_COMMA node here to keep the side-effect and return a zero // Instead we just don't try to optimize this case. canPushCast = false; } } else { // Shift amount is positive and small enough that we can push the cast through. canPushCast = true; } } else { // Shift amount is unknown. We can't optimize this case. assert(!canPushCast); } } if (canPushCast) { DEBUG_DESTROY_NODE(tree); // Insert narrowing casts for op1 and op2. oper->AsOp()->gtOp1 = gtNewCastNode(TYP_INT, oper->AsOp()->gtOp1, false, dstType); if (oper->AsOp()->gtOp2 != nullptr) { oper->AsOp()->gtOp2 = gtNewCastNode(TYP_INT, oper->AsOp()->gtOp2, false, dstType); } // Clear the GT_MUL_64RSLT if it is set. if (oper->gtOper == GT_MUL && (oper->gtFlags & GTF_MUL_64RSLT)) { oper->gtFlags &= ~GTF_MUL_64RSLT; } // The operation now produces a 32-bit result. oper->gtType = TYP_INT; // Remorph the new tree as the casts that we added may be folded away. return fgMorphTree(oper); } } } return nullptr; } #ifdef DEBUG const char* getNonStandardArgKindName(NonStandardArgKind kind) { switch (kind) { case NonStandardArgKind::None: return "None"; case NonStandardArgKind::PInvokeFrame: return "PInvokeFrame"; case NonStandardArgKind::PInvokeTarget: return "PInvokeTarget"; case NonStandardArgKind::PInvokeCookie: return "PInvokeCookie"; case NonStandardArgKind::WrapperDelegateCell: return "WrapperDelegateCell"; case NonStandardArgKind::ShiftLow: return "ShiftLow"; case NonStandardArgKind::ShiftHigh: return "ShiftHigh"; case NonStandardArgKind::FixedRetBuffer: return "FixedRetBuffer"; case NonStandardArgKind::VirtualStubCell: return "VirtualStubCell"; case NonStandardArgKind::R2RIndirectionCell: return "R2RIndirectionCell"; case NonStandardArgKind::ValidateIndirectCallTarget: return "ValidateIndirectCallTarget"; default: unreached(); } } void fgArgTabEntry::Dump() const { printf("fgArgTabEntry[arg %u", argNum); printf(" %d.%s", GetNode()->gtTreeID, GenTree::OpName(GetNode()->OperGet())); printf(" %s", varTypeName(argType)); printf(" (%s)", passedByRef ? "By ref" : "By value"); if (GetRegNum() != REG_STK) { printf(", %u reg%s:", numRegs, numRegs == 1 ? "" : "s"); for (unsigned i = 0; i < numRegs; i++) { printf(" %s", getRegName(regNums[i])); } } if (GetStackByteSize() > 0) { #if defined(DEBUG_ARG_SLOTS) printf(", numSlots=%u, slotNum=%u, byteSize=%u, byteOffset=%u", numSlots, slotNum, m_byteSize, m_byteOffset); #else printf(", byteSize=%u, byteOffset=%u", m_byteSize, m_byteOffset); #endif } printf(", byteAlignment=%u", m_byteAlignment); if (isLateArg()) { printf(", lateArgInx=%u", GetLateArgInx()); } if (IsSplit()) { printf(", isSplit"); } if (needTmp) { printf(", tmpNum=V%02u", tmpNum); } if (needPlace) { printf(", needPlace"); } if (isTmp) { printf(", isTmp"); } if (processed) { printf(", processed"); } if (IsHfaRegArg()) { printf(", isHfa(%s)", varTypeName(GetHfaType())); } if (isBackFilled) { printf(", isBackFilled"); } if (nonStandardArgKind != NonStandardArgKind::None) { printf(", nonStandard[%s]", getNonStandardArgKindName(nonStandardArgKind)); } if (isStruct) { printf(", isStruct"); } printf("]\n"); } #endif fgArgInfo::fgArgInfo(Compiler* comp, GenTreeCall* call, unsigned numArgs) { compiler = comp; callTree = call; argCount = 0; // filled in arg count, starts at zero DEBUG_ARG_SLOTS_ONLY(nextSlotNum = INIT_ARG_STACK_SLOT;) nextStackByteOffset = INIT_ARG_STACK_SLOT * TARGET_POINTER_SIZE; stkLevel = 0; #if defined(UNIX_X86_ABI) alignmentDone = false; stkSizeBytes = 0; padStkAlign = 0; #endif #if FEATURE_FIXED_OUT_ARGS outArgSize = 0; #endif argTableSize = numArgs; // the allocated table size hasRegArgs = false; hasStackArgs = false; argsComplete = false; argsSorted = false; needsTemps = false; if (argTableSize == 0) { argTable = nullptr; } else { argTable = new (compiler, CMK_fgArgInfoPtrArr) fgArgTabEntry*[argTableSize]; } } /***************************************************************************** * * fgArgInfo Copy Constructor * * This method needs to act like a copy constructor for fgArgInfo. * The newCall needs to have its fgArgInfo initialized such that * we have newCall that is an exact copy of the oldCall. * We have to take care since the argument information * in the argTable contains pointers that must point to the * new arguments and not the old arguments. */ fgArgInfo::fgArgInfo(GenTreeCall* newCall, GenTreeCall* oldCall) { fgArgInfo* oldArgInfo = oldCall->AsCall()->fgArgInfo; compiler = oldArgInfo->compiler; callTree = newCall; argCount = 0; // filled in arg count, starts at zero DEBUG_ARG_SLOTS_ONLY(nextSlotNum = INIT_ARG_STACK_SLOT;) nextStackByteOffset = INIT_ARG_STACK_SLOT * TARGET_POINTER_SIZE; stkLevel = oldArgInfo->stkLevel; #if defined(UNIX_X86_ABI) alignmentDone = oldArgInfo->alignmentDone; stkSizeBytes = oldArgInfo->stkSizeBytes; padStkAlign = oldArgInfo->padStkAlign; #endif #if FEATURE_FIXED_OUT_ARGS outArgSize = oldArgInfo->outArgSize; #endif argTableSize = oldArgInfo->argTableSize; argsComplete = false; argTable = nullptr; assert(oldArgInfo->argsComplete); if (argTableSize > 0) { argTable = new (compiler, CMK_fgArgInfoPtrArr) fgArgTabEntry*[argTableSize]; // Copy the old arg entries for (unsigned i = 0; i < argTableSize; i++) { argTable[i] = new (compiler, CMK_fgArgInfo) fgArgTabEntry(*oldArgInfo->argTable[i]); } // The copied arg entries contain pointers to old uses, they need // to be updated to point to new uses. if (newCall->gtCallThisArg != nullptr) { for (unsigned i = 0; i < argTableSize; i++) { if (argTable[i]->use == oldCall->gtCallThisArg) { argTable[i]->use = newCall->gtCallThisArg; break; } } } GenTreeCall::UseIterator newUse = newCall->Args().begin(); GenTreeCall::UseIterator newUseEnd = newCall->Args().end(); GenTreeCall::UseIterator oldUse = oldCall->Args().begin(); GenTreeCall::UseIterator oldUseEnd = newCall->Args().end(); for (; newUse != newUseEnd; ++newUse, ++oldUse) { for (unsigned i = 0; i < argTableSize; i++) { if (argTable[i]->use == oldUse.GetUse()) { argTable[i]->use = newUse.GetUse(); break; } } } newUse = newCall->LateArgs().begin(); newUseEnd = newCall->LateArgs().end(); oldUse = oldCall->LateArgs().begin(); oldUseEnd = newCall->LateArgs().end(); for (; newUse != newUseEnd; ++newUse, ++oldUse) { for (unsigned i = 0; i < argTableSize; i++) { if (argTable[i]->lateUse == oldUse.GetUse()) { argTable[i]->lateUse = newUse.GetUse(); break; } } } } argCount = oldArgInfo->argCount; DEBUG_ARG_SLOTS_ONLY(nextSlotNum = oldArgInfo->nextSlotNum;) nextStackByteOffset = oldArgInfo->nextStackByteOffset; hasRegArgs = oldArgInfo->hasRegArgs; hasStackArgs = oldArgInfo->hasStackArgs; argsComplete = true; argsSorted = true; } void fgArgInfo::AddArg(fgArgTabEntry* curArgTabEntry) { assert(argCount < argTableSize); argTable[argCount] = curArgTabEntry; argCount++; } fgArgTabEntry* fgArgInfo::AddRegArg(unsigned argNum, GenTree* node, GenTreeCall::Use* use, regNumber regNum, unsigned numRegs, unsigned byteSize, unsigned byteAlignment, bool isStruct, bool isFloatHfa, bool isVararg /*=false*/) { fgArgTabEntry* curArgTabEntry = new (compiler, CMK_fgArgInfo) fgArgTabEntry; // Any additional register numbers are set by the caller. // This is primarily because on ARM we don't yet know if it // will be split or if it is a double HFA, so the number of registers // may actually be less. curArgTabEntry->setRegNum(0, regNum); curArgTabEntry->argNum = argNum; curArgTabEntry->argType = node->TypeGet(); curArgTabEntry->use = use; curArgTabEntry->lateUse = nullptr; curArgTabEntry->numRegs = numRegs; #if defined(DEBUG_ARG_SLOTS) curArgTabEntry->slotNum = 0; curArgTabEntry->numSlots = 0; #endif curArgTabEntry->SetLateArgInx(UINT_MAX); curArgTabEntry->tmpNum = BAD_VAR_NUM; curArgTabEntry->SetSplit(false); curArgTabEntry->isTmp = false; curArgTabEntry->needTmp = false; curArgTabEntry->needPlace = false; curArgTabEntry->processed = false; if (GlobalJitOptions::compFeatureHfa) { curArgTabEntry->SetHfaElemKind(CORINFO_HFA_ELEM_NONE); } curArgTabEntry->isBackFilled = false; curArgTabEntry->nonStandardArgKind = NonStandardArgKind::None; curArgTabEntry->isStruct = isStruct; curArgTabEntry->SetIsVararg(isVararg); curArgTabEntry->SetByteAlignment(byteAlignment); curArgTabEntry->SetByteSize(byteSize, isStruct, isFloatHfa); curArgTabEntry->SetByteOffset(0); hasRegArgs = true; if (argCount >= argTableSize) { fgArgTabEntry** oldTable = argTable; argTable = new (compiler, CMK_fgArgInfoPtrArr) fgArgTabEntry*[argCount + 1]; memcpy(argTable, oldTable, argCount * sizeof(fgArgTabEntry*)); argTableSize++; } AddArg(curArgTabEntry); return curArgTabEntry; } #if defined(UNIX_AMD64_ABI) fgArgTabEntry* fgArgInfo::AddRegArg(unsigned argNum, GenTree* node, GenTreeCall::Use* use, regNumber regNum, unsigned numRegs, unsigned byteSize, unsigned byteAlignment, const bool isStruct, const bool isFloatHfa, const bool isVararg, const regNumber otherRegNum, const unsigned structIntRegs, const unsigned structFloatRegs, const SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR* const structDescPtr) { fgArgTabEntry* curArgTabEntry = AddRegArg(argNum, node, use, regNum, numRegs, byteSize, byteAlignment, isStruct, isFloatHfa, isVararg); assert(curArgTabEntry != nullptr); curArgTabEntry->isStruct = isStruct; // is this a struct arg curArgTabEntry->structIntRegs = structIntRegs; curArgTabEntry->structFloatRegs = structFloatRegs; INDEBUG(curArgTabEntry->checkIsStruct();) assert(numRegs <= 2); if (numRegs == 2) { curArgTabEntry->setRegNum(1, otherRegNum); } if (isStruct && structDescPtr != nullptr) { curArgTabEntry->structDesc.CopyFrom(*structDescPtr); } return curArgTabEntry; } #endif // defined(UNIX_AMD64_ABI) fgArgTabEntry* fgArgInfo::AddStkArg(unsigned argNum, GenTree* node, GenTreeCall::Use* use, unsigned numSlots, unsigned byteSize, unsigned byteAlignment, bool isStruct, bool isFloatHfa, bool isVararg /*=false*/) { fgArgTabEntry* curArgTabEntry = new (compiler, CMK_fgArgInfo) fgArgTabEntry; #if defined(DEBUG_ARG_SLOTS) if (!compMacOsArm64Abi()) { nextSlotNum = roundUp(nextSlotNum, byteAlignment / TARGET_POINTER_SIZE); } #endif nextStackByteOffset = roundUp(nextStackByteOffset, byteAlignment); DEBUG_ARG_SLOTS_ASSERT(nextStackByteOffset / TARGET_POINTER_SIZE == nextSlotNum); curArgTabEntry->setRegNum(0, REG_STK); curArgTabEntry->argNum = argNum; curArgTabEntry->argType = node->TypeGet(); curArgTabEntry->use = use; curArgTabEntry->lateUse = nullptr; #if defined(DEBUG_ARG_SLOTS) curArgTabEntry->numSlots = numSlots; curArgTabEntry->slotNum = nextSlotNum; #endif curArgTabEntry->numRegs = 0; #if defined(UNIX_AMD64_ABI) curArgTabEntry->structIntRegs = 0; curArgTabEntry->structFloatRegs = 0; #endif // defined(UNIX_AMD64_ABI) curArgTabEntry->SetLateArgInx(UINT_MAX); curArgTabEntry->tmpNum = BAD_VAR_NUM; curArgTabEntry->SetSplit(false); curArgTabEntry->isTmp = false; curArgTabEntry->needTmp = false; curArgTabEntry->needPlace = false; curArgTabEntry->processed = false; if (GlobalJitOptions::compFeatureHfa) { curArgTabEntry->SetHfaElemKind(CORINFO_HFA_ELEM_NONE); } curArgTabEntry->isBackFilled = false; curArgTabEntry->nonStandardArgKind = NonStandardArgKind::None; curArgTabEntry->isStruct = isStruct; curArgTabEntry->SetIsVararg(isVararg); curArgTabEntry->SetByteAlignment(byteAlignment); curArgTabEntry->SetByteSize(byteSize, isStruct, isFloatHfa); curArgTabEntry->SetByteOffset(nextStackByteOffset); hasStackArgs = true; AddArg(curArgTabEntry); DEBUG_ARG_SLOTS_ONLY(nextSlotNum += numSlots;) nextStackByteOffset += curArgTabEntry->GetByteSize(); return curArgTabEntry; } void fgArgInfo::RemorphReset() { DEBUG_ARG_SLOTS_ONLY(nextSlotNum = INIT_ARG_STACK_SLOT;) nextStackByteOffset = INIT_ARG_STACK_SLOT * TARGET_POINTER_SIZE; } //------------------------------------------------------------------------ // UpdateRegArg: Update the given fgArgTabEntry while morphing. // // Arguments: // curArgTabEntry - the fgArgTabEntry to update. // node - the tree node that defines the argument // reMorphing - a boolean value indicate whether we are remorphing the call // // Assumptions: // This must have already been determined to be at least partially passed in registers. // void fgArgInfo::UpdateRegArg(fgArgTabEntry* curArgTabEntry, GenTree* node, bool reMorphing) { bool isLateArg = curArgTabEntry->isLateArg(); // If this is a late arg, we'd better be updating it with a correctly marked node, and vice-versa. assert((isLateArg && ((node->gtFlags & GTF_LATE_ARG) != 0)) || (!isLateArg && ((node->gtFlags & GTF_LATE_ARG) == 0))); assert(curArgTabEntry->numRegs != 0); assert(curArgTabEntry->use->GetNode() == node); } //------------------------------------------------------------------------ // UpdateStkArg: Update the given fgArgTabEntry while morphing. // // Arguments: // curArgTabEntry - the fgArgTabEntry to update. // node - the tree node that defines the argument // reMorphing - a boolean value indicate whether we are remorphing the call // // Assumptions: // This must have already been determined to be passed on the stack. // void fgArgInfo::UpdateStkArg(fgArgTabEntry* curArgTabEntry, GenTree* node, bool reMorphing) { bool isLateArg = curArgTabEntry->isLateArg(); // If this is a late arg, we'd better be updating it with a correctly marked node, and vice-versa. assert((isLateArg && ((node->gtFlags & GTF_LATE_ARG) != 0)) || (!isLateArg && ((node->gtFlags & GTF_LATE_ARG) == 0))); noway_assert(curArgTabEntry->use != callTree->gtCallThisArg); assert((curArgTabEntry->GetRegNum() == REG_STK) || curArgTabEntry->IsSplit()); assert(curArgTabEntry->use->GetNode() == node); #if defined(DEBUG_ARG_SLOTS) if (!compMacOsArm64Abi()) { nextSlotNum = roundUp(nextSlotNum, curArgTabEntry->GetByteAlignment() / TARGET_POINTER_SIZE); assert(curArgTabEntry->slotNum == nextSlotNum); nextSlotNum += curArgTabEntry->numSlots; } #endif nextStackByteOffset = roundUp(nextStackByteOffset, curArgTabEntry->GetByteAlignment()); assert(curArgTabEntry->GetByteOffset() == nextStackByteOffset); nextStackByteOffset += curArgTabEntry->GetStackByteSize(); } void fgArgInfo::SplitArg(unsigned argNum, unsigned numRegs, unsigned numSlots) { fgArgTabEntry* curArgTabEntry = nullptr; assert(argNum < argCount); for (unsigned inx = 0; inx < argCount; inx++) { curArgTabEntry = argTable[inx]; if (curArgTabEntry->argNum == argNum) { break; } } assert(numRegs > 0); assert(numSlots > 0); if (argsComplete) { assert(curArgTabEntry->IsSplit() == true); assert(curArgTabEntry->numRegs == numRegs); DEBUG_ARG_SLOTS_ONLY(assert(curArgTabEntry->numSlots == numSlots);) assert(hasStackArgs == true); } else { curArgTabEntry->SetSplit(true); curArgTabEntry->numRegs = numRegs; DEBUG_ARG_SLOTS_ONLY(curArgTabEntry->numSlots = numSlots;) curArgTabEntry->SetByteOffset(0); hasStackArgs = true; } DEBUG_ARG_SLOTS_ONLY(nextSlotNum += numSlots;) // TODO-Cleanup: structs are aligned to 8 bytes on arm64 apple, so it would work, but pass the precise size. nextStackByteOffset += numSlots * TARGET_POINTER_SIZE; } //------------------------------------------------------------------------ // EvalToTmp: Replace the node in the given fgArgTabEntry with a temp // // Arguments: // curArgTabEntry - the fgArgTabEntry for the argument // tmpNum - the varNum for the temp // newNode - the assignment of the argument value to the temp // // Notes: // Although the name of this method is EvalToTmp, it doesn't actually create // the temp or the copy. // void fgArgInfo::EvalToTmp(fgArgTabEntry* curArgTabEntry, unsigned tmpNum, GenTree* newNode) { assert(curArgTabEntry->use != callTree->gtCallThisArg); assert(curArgTabEntry->use->GetNode() == newNode); assert(curArgTabEntry->GetNode() == newNode); curArgTabEntry->tmpNum = tmpNum; curArgTabEntry->isTmp = true; } void fgArgInfo::ArgsComplete() { bool hasStructRegArg = false; for (unsigned curInx = 0; curInx < argCount; curInx++) { fgArgTabEntry* curArgTabEntry = argTable[curInx]; assert(curArgTabEntry != nullptr); GenTree* argx = curArgTabEntry->GetNode(); if (curArgTabEntry->GetRegNum() == REG_STK) { assert(hasStackArgs == true); #if !FEATURE_FIXED_OUT_ARGS // On x86 we use push instructions to pass arguments: // The non-register arguments are evaluated and pushed in order // and they are never evaluated into temps // continue; #endif } #if FEATURE_ARG_SPLIT else if (curArgTabEntry->IsSplit()) { hasStructRegArg = true; assert(hasStackArgs == true); } #endif // FEATURE_ARG_SPLIT else // we have a register argument, next we look for a struct type. { if (varTypeIsStruct(argx) UNIX_AMD64_ABI_ONLY(|| curArgTabEntry->isStruct)) { hasStructRegArg = true; } } /* If the argument tree contains an assignment (GTF_ASG) then the argument and and every earlier argument (except constants) must be evaluated into temps since there may be other arguments that follow and they may use the value being assigned. EXAMPLE: ArgTab is "a, a=5, a" -> when we see the second arg "a=5" we know the first two arguments "a, a=5" have to be evaluated into temps For the case of an assignment, we only know that there exist some assignment someplace in the tree. We don't know what is being assigned so we are very conservative here and assume that any local variable could have been assigned. */ if (argx->gtFlags & GTF_ASG) { // If this is not the only argument, or it's a copyblk, or it already evaluates the expression to // a tmp, then we need a temp in the late arg list. if ((argCount > 1) || argx->OperIsCopyBlkOp() #ifdef FEATURE_FIXED_OUT_ARGS || curArgTabEntry->isTmp // I protect this by "FEATURE_FIXED_OUT_ARGS" to preserve the property // that we only have late non-register args when that feature is on. #endif // FEATURE_FIXED_OUT_ARGS ) { curArgTabEntry->needTmp = true; needsTemps = true; } // For all previous arguments, unless they are a simple constant // we require that they be evaluated into temps for (unsigned prevInx = 0; prevInx < curInx; prevInx++) { fgArgTabEntry* prevArgTabEntry = argTable[prevInx]; assert(prevArgTabEntry->argNum < curArgTabEntry->argNum); if (!prevArgTabEntry->GetNode()->IsInvariant()) { prevArgTabEntry->needTmp = true; needsTemps = true; } } } bool treatLikeCall = ((argx->gtFlags & GTF_CALL) != 0); #if FEATURE_FIXED_OUT_ARGS // Like calls, if this argument has a tree that will do an inline throw, // a call to a jit helper, then we need to treat it like a call (but only // if there are/were any stack args). // This means unnesting, sorting, etc. Technically this is overly // conservative, but I want to avoid as much special-case debug-only code // as possible, so leveraging the GTF_CALL flag is the easiest. // if (!treatLikeCall && (argx->gtFlags & GTF_EXCEPT) && (argCount > 1) && compiler->opts.compDbgCode && (compiler->fgWalkTreePre(&argx, Compiler::fgChkThrowCB) == Compiler::WALK_ABORT)) { for (unsigned otherInx = 0; otherInx < argCount; otherInx++) { if (otherInx == curInx) { continue; } if (argTable[otherInx]->GetRegNum() == REG_STK) { treatLikeCall = true; break; } } } #endif // FEATURE_FIXED_OUT_ARGS /* If it contains a call (GTF_CALL) then itself and everything before the call with a GLOB_EFFECT must eval to temp (this is because everything with SIDE_EFFECT has to be kept in the right order since we will move the call to the first position) For calls we don't have to be quite as conservative as we are with an assignment since the call won't be modifying any non-address taken LclVars. */ if (treatLikeCall) { if (argCount > 1) // If this is not the only argument { curArgTabEntry->needTmp = true; needsTemps = true; } else if (varTypeIsFloating(argx->TypeGet()) && (argx->OperGet() == GT_CALL)) { // Spill all arguments that are floating point calls curArgTabEntry->needTmp = true; needsTemps = true; } // All previous arguments may need to be evaluated into temps for (unsigned prevInx = 0; prevInx < curInx; prevInx++) { fgArgTabEntry* prevArgTabEntry = argTable[prevInx]; assert(prevArgTabEntry->argNum < curArgTabEntry->argNum); // For all previous arguments, if they have any GTF_ALL_EFFECT // we require that they be evaluated into a temp if ((prevArgTabEntry->GetNode()->gtFlags & GTF_ALL_EFFECT) != 0) { prevArgTabEntry->needTmp = true; needsTemps = true; } #if FEATURE_FIXED_OUT_ARGS // Or, if they are stored into the FIXED_OUT_ARG area // we require that they be moved to the gtCallLateArgs // and replaced with a placeholder node else if (prevArgTabEntry->GetRegNum() == REG_STK) { prevArgTabEntry->needPlace = true; } #if FEATURE_ARG_SPLIT else if (prevArgTabEntry->IsSplit()) { prevArgTabEntry->needPlace = true; } #endif // FEATURE_ARG_SPLIT #endif } } #if FEATURE_MULTIREG_ARGS // For RyuJIT backend we will expand a Multireg arg into a GT_FIELD_LIST // with multiple indirections, so here we consider spilling it into a tmp LclVar. // CLANG_FORMAT_COMMENT_ANCHOR; #ifdef TARGET_ARM bool isMultiRegArg = (curArgTabEntry->numRegs > 0) && (curArgTabEntry->numRegs + curArgTabEntry->GetStackSlotsNumber() > 1); #else bool isMultiRegArg = (curArgTabEntry->numRegs > 1); #endif if ((varTypeIsStruct(argx->TypeGet())) && (curArgTabEntry->needTmp == false)) { if (isMultiRegArg && ((argx->gtFlags & GTF_PERSISTENT_SIDE_EFFECTS) != 0)) { // Spill multireg struct arguments that have Assignments or Calls embedded in them curArgTabEntry->needTmp = true; needsTemps = true; } else { // We call gtPrepareCost to measure the cost of evaluating this tree compiler->gtPrepareCost(argx); if (isMultiRegArg && (argx->GetCostEx() > (6 * IND_COST_EX))) { // Spill multireg struct arguments that are expensive to evaluate twice curArgTabEntry->needTmp = true; needsTemps = true; } #if defined(FEATURE_SIMD) && defined(TARGET_ARM64) else if (isMultiRegArg && varTypeIsSIMD(argx->TypeGet())) { // SIMD types do not need the optimization below due to their sizes if (argx->OperIsSimdOrHWintrinsic() || (argx->OperIs(GT_OBJ) && argx->AsObj()->gtOp1->OperIs(GT_ADDR) && argx->AsObj()->gtOp1->AsOp()->gtOp1->OperIsSimdOrHWintrinsic())) { curArgTabEntry->needTmp = true; needsTemps = true; } } #endif #ifndef TARGET_ARM // TODO-Arm: This optimization is not implemented for ARM32 // so we skip this for ARM32 until it is ported to use RyuJIT backend // else if (argx->OperGet() == GT_OBJ) { GenTreeObj* argObj = argx->AsObj(); unsigned structSize = argObj->GetLayout()->GetSize(); switch (structSize) { case 3: case 5: case 6: case 7: // If we have a stack based LclVar we can perform a wider read of 4 or 8 bytes // if (argObj->AsObj()->gtOp1->IsLocalAddrExpr() == nullptr) // Is the source not a LclVar? { // If we don't have a LclVar we need to read exactly 3,5,6 or 7 bytes // For now we use a a GT_CPBLK to copy the exact size into a GT_LCL_VAR temp. // curArgTabEntry->needTmp = true; needsTemps = true; } break; case 11: case 13: case 14: case 15: // Spill any GT_OBJ multireg structs that are difficult to extract // // When we have a GT_OBJ of a struct with the above sizes we would need // to use 3 or 4 load instructions to load the exact size of this struct. // Instead we spill the GT_OBJ into a new GT_LCL_VAR temp and this sequence // will use a GT_CPBLK to copy the exact size into the GT_LCL_VAR temp. // Then we can just load all 16 bytes of the GT_LCL_VAR temp when passing // the argument. // curArgTabEntry->needTmp = true; needsTemps = true; break; default: break; } } #endif // !TARGET_ARM } } #endif // FEATURE_MULTIREG_ARGS } // We only care because we can't spill structs and qmarks involve a lot of spilling, but // if we don't have qmarks, then it doesn't matter. // So check for Qmark's globally once here, instead of inside the loop. // const bool hasStructRegArgWeCareAbout = (hasStructRegArg && compiler->compQmarkUsed); #if FEATURE_FIXED_OUT_ARGS // For Arm/x64 we only care because we can't reorder a register // argument that uses GT_LCLHEAP. This is an optimization to // save a check inside the below loop. // const bool hasStackArgsWeCareAbout = (hasStackArgs && compiler->compLocallocUsed); #else const bool hasStackArgsWeCareAbout = hasStackArgs; #endif // FEATURE_FIXED_OUT_ARGS // If we have any stack args we have to force the evaluation // of any arguments passed in registers that might throw an exception // // Technically we only a required to handle the following two cases: // a GT_IND with GTF_IND_RNGCHK (only on x86) or // a GT_LCLHEAP node that allocates stuff on the stack // if (hasStackArgsWeCareAbout || hasStructRegArgWeCareAbout) { for (unsigned curInx = 0; curInx < argCount; curInx++) { fgArgTabEntry* curArgTabEntry = argTable[curInx]; assert(curArgTabEntry != nullptr); GenTree* argx = curArgTabEntry->GetNode(); // Examine the register args that are currently not marked needTmp // if (!curArgTabEntry->needTmp && (curArgTabEntry->GetRegNum() != REG_STK)) { if (hasStackArgsWeCareAbout) { #if !FEATURE_FIXED_OUT_ARGS // On x86 we previously recorded a stack depth of zero when // morphing the register arguments of any GT_IND with a GTF_IND_RNGCHK flag // Thus we can not reorder the argument after any stack based argument // (Note that GT_LCLHEAP sets the GTF_EXCEPT flag so we don't need to // check for it explicitly.) // if (argx->gtFlags & GTF_EXCEPT) { curArgTabEntry->needTmp = true; needsTemps = true; continue; } #else // For Arm/X64 we can't reorder a register argument that uses a GT_LCLHEAP // if (argx->gtFlags & GTF_EXCEPT) { assert(compiler->compLocallocUsed); // Returns WALK_ABORT if a GT_LCLHEAP node is encountered in the argx tree // if (compiler->fgWalkTreePre(&argx, Compiler::fgChkLocAllocCB) == Compiler::WALK_ABORT) { curArgTabEntry->needTmp = true; needsTemps = true; continue; } } #endif } if (hasStructRegArgWeCareAbout) { // Returns true if a GT_QMARK node is encountered in the argx tree // if (compiler->fgWalkTreePre(&argx, Compiler::fgChkQmarkCB) == Compiler::WALK_ABORT) { curArgTabEntry->needTmp = true; needsTemps = true; continue; } } } } } // When CFG is enabled and this is a delegate call or vtable call we must // compute the call target before all late args. However this will // effectively null-check 'this', which should happen only after all // arguments are evaluated. Thus we must evaluate all args with side // effects to a temp. if (compiler->opts.IsCFGEnabled() && (callTree->IsVirtualVtable() || callTree->IsDelegateInvoke())) { // Always evaluate 'this' to temp. argTable[0]->needTmp = true; needsTemps = true; for (unsigned curInx = 1; curInx < argCount; curInx++) { fgArgTabEntry* curArgTabEntry = argTable[curInx]; GenTree* arg = curArgTabEntry->GetNode(); if ((arg->gtFlags & GTF_ALL_EFFECT) != 0) { curArgTabEntry->needTmp = true; needsTemps = true; } } } argsComplete = true; } void fgArgInfo::SortArgs() { assert(argsComplete == true); #ifdef DEBUG if (compiler->verbose) { printf("\nSorting the arguments:\n"); } #endif /* Shuffle the arguments around before we build the gtCallLateArgs list. The idea is to move all "simple" arguments like constants and local vars to the end of the table, and move the complex arguments towards the beginning of the table. This will help prevent registers from being spilled by allowing us to evaluate the more complex arguments before the simpler arguments. The argTable ends up looking like: +------------------------------------+ <--- argTable[argCount - 1] | constants | +------------------------------------+ | local var / local field | +------------------------------------+ | remaining arguments sorted by cost | +------------------------------------+ | temps (argTable[].needTmp = true) | +------------------------------------+ | args with calls (GTF_CALL) | +------------------------------------+ <--- argTable[0] */ /* Set the beginning and end for the new argument table */ unsigned curInx; int regCount = 0; unsigned begTab = 0; unsigned endTab = argCount - 1; unsigned argsRemaining = argCount; // First take care of arguments that are constants. // [We use a backward iterator pattern] // curInx = argCount; do { curInx--; fgArgTabEntry* curArgTabEntry = argTable[curInx]; if (curArgTabEntry->GetRegNum() != REG_STK) { regCount++; } assert(curArgTabEntry->lateUse == nullptr); // Skip any already processed args // if (!curArgTabEntry->processed) { GenTree* argx = curArgTabEntry->GetNode(); // put constants at the end of the table // if (argx->gtOper == GT_CNS_INT) { noway_assert(curInx <= endTab); curArgTabEntry->processed = true; // place curArgTabEntry at the endTab position by performing a swap // if (curInx != endTab) { argTable[curInx] = argTable[endTab]; argTable[endTab] = curArgTabEntry; } endTab--; argsRemaining--; } } } while (curInx > 0); if (argsRemaining > 0) { // Next take care of arguments that are calls. // [We use a forward iterator pattern] // for (curInx = begTab; curInx <= endTab; curInx++) { fgArgTabEntry* curArgTabEntry = argTable[curInx]; // Skip any already processed args // if (!curArgTabEntry->processed) { GenTree* argx = curArgTabEntry->GetNode(); // put calls at the beginning of the table // if (argx->gtFlags & GTF_CALL) { curArgTabEntry->processed = true; // place curArgTabEntry at the begTab position by performing a swap // if (curInx != begTab) { argTable[curInx] = argTable[begTab]; argTable[begTab] = curArgTabEntry; } begTab++; argsRemaining--; } } } } if (argsRemaining > 0) { // Next take care arguments that are temps. // These temps come before the arguments that are // ordinary local vars or local fields // since this will give them a better chance to become // enregistered into their actual argument register. // [We use a forward iterator pattern] // for (curInx = begTab; curInx <= endTab; curInx++) { fgArgTabEntry* curArgTabEntry = argTable[curInx]; // Skip any already processed args // if (!curArgTabEntry->processed) { if (curArgTabEntry->needTmp) { curArgTabEntry->processed = true; // place curArgTabEntry at the begTab position by performing a swap // if (curInx != begTab) { argTable[curInx] = argTable[begTab]; argTable[begTab] = curArgTabEntry; } begTab++; argsRemaining--; } } } } if (argsRemaining > 0) { // Next take care of local var and local field arguments. // These are moved towards the end of the argument evaluation. // [We use a backward iterator pattern] // curInx = endTab + 1; do { curInx--; fgArgTabEntry* curArgTabEntry = argTable[curInx]; // Skip any already processed args // if (!curArgTabEntry->processed) { GenTree* argx = curArgTabEntry->GetNode(); if ((argx->gtOper == GT_LCL_VAR) || (argx->gtOper == GT_LCL_FLD)) { noway_assert(curInx <= endTab); curArgTabEntry->processed = true; // place curArgTabEntry at the endTab position by performing a swap // if (curInx != endTab) { argTable[curInx] = argTable[endTab]; argTable[endTab] = curArgTabEntry; } endTab--; argsRemaining--; } } } while (curInx > begTab); } // Finally, take care of all the remaining arguments. // Note that we fill in one arg at a time using a while loop. bool costsPrepared = false; // Only prepare tree costs once, the first time through this loop while (argsRemaining > 0) { /* Find the most expensive arg remaining and evaluate it next */ fgArgTabEntry* expensiveArgTabEntry = nullptr; unsigned expensiveArg = UINT_MAX; unsigned expensiveArgCost = 0; // [We use a forward iterator pattern] // for (curInx = begTab; curInx <= endTab; curInx++) { fgArgTabEntry* curArgTabEntry = argTable[curInx]; // Skip any already processed args // if (!curArgTabEntry->processed) { GenTree* argx = curArgTabEntry->GetNode(); // We should have already handled these kinds of args assert(argx->gtOper != GT_LCL_VAR); assert(argx->gtOper != GT_LCL_FLD); assert(argx->gtOper != GT_CNS_INT); // This arg should either have no persistent side effects or be the last one in our table // assert(((argx->gtFlags & GTF_PERSISTENT_SIDE_EFFECTS) == 0) || (curInx == (argCount-1))); if (argsRemaining == 1) { // This is the last arg to place expensiveArg = curInx; expensiveArgTabEntry = curArgTabEntry; assert(begTab == endTab); break; } else { if (!costsPrepared) { /* We call gtPrepareCost to measure the cost of evaluating this tree */ compiler->gtPrepareCost(argx); } if (argx->GetCostEx() > expensiveArgCost) { // Remember this arg as the most expensive one that we have yet seen expensiveArgCost = argx->GetCostEx(); expensiveArg = curInx; expensiveArgTabEntry = curArgTabEntry; } } } } noway_assert(expensiveArg != UINT_MAX); // put the most expensive arg towards the beginning of the table expensiveArgTabEntry->processed = true; // place expensiveArgTabEntry at the begTab position by performing a swap // if (expensiveArg != begTab) { argTable[expensiveArg] = argTable[begTab]; argTable[begTab] = expensiveArgTabEntry; } begTab++; argsRemaining--; costsPrepared = true; // If we have more expensive arguments, don't re-evaluate the tree cost on the next loop } // The table should now be completely filled and thus begTab should now be adjacent to endTab // and regArgsRemaining should be zero assert(begTab == (endTab + 1)); assert(argsRemaining == 0); argsSorted = true; } #ifdef DEBUG void fgArgInfo::Dump(Compiler* compiler) const { for (unsigned curInx = 0; curInx < ArgCount(); curInx++) { fgArgTabEntry* curArgEntry = ArgTable()[curInx]; curArgEntry->Dump(); } } #endif //------------------------------------------------------------------------------ // fgMakeTmpArgNode : This function creates a tmp var only if needed. // We need this to be done in order to enforce ordering // of the evaluation of arguments. // // Arguments: // curArgTabEntry // // Return Value: // the newly created temp var tree. GenTree* Compiler::fgMakeTmpArgNode(fgArgTabEntry* curArgTabEntry) { unsigned tmpVarNum = curArgTabEntry->tmpNum; LclVarDsc* varDsc = lvaGetDesc(tmpVarNum); assert(varDsc->lvIsTemp); var_types type = varDsc->TypeGet(); // Create a copy of the temp to go into the late argument list GenTree* arg = gtNewLclvNode(tmpVarNum, type); GenTree* addrNode = nullptr; if (varTypeIsStruct(type)) { #if defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_ARM) // Can this type be passed as a primitive type? // If so, the following call will return the corresponding primitive type. // Otherwise, it will return TYP_UNKNOWN and we will pass it as a struct type. bool passedAsPrimitive = false; if (curArgTabEntry->TryPassAsPrimitive()) { CORINFO_CLASS_HANDLE clsHnd = varDsc->GetStructHnd(); var_types structBaseType = getPrimitiveTypeForStruct(lvaLclExactSize(tmpVarNum), clsHnd, curArgTabEntry->IsVararg()); if (structBaseType != TYP_UNKNOWN) { passedAsPrimitive = true; #if defined(UNIX_AMD64_ABI) // TODO-Cleanup: This is inelegant, but eventually we'll track this in the fgArgTabEntry, // and otherwise we'd have to either modify getPrimitiveTypeForStruct() to take // a structDesc or call eeGetSystemVAmd64PassStructInRegisterDescriptor yet again. // if (genIsValidFloatReg(curArgTabEntry->GetRegNum())) { if (structBaseType == TYP_INT) { structBaseType = TYP_FLOAT; } else { assert(structBaseType == TYP_LONG); structBaseType = TYP_DOUBLE; } } #endif type = structBaseType; } } // If it is passed in registers, don't get the address of the var. Make it a // field instead. It will be loaded in registers with putarg_reg tree in lower. if (passedAsPrimitive) { arg->ChangeOper(GT_LCL_FLD); arg->gtType = type; lvaSetVarDoNotEnregister(tmpVarNum DEBUGARG(DoNotEnregisterReason::SwizzleArg)); } else { var_types addrType = TYP_BYREF; arg = gtNewOperNode(GT_ADDR, addrType, arg); lvaSetVarAddrExposed(tmpVarNum DEBUGARG(AddressExposedReason::ESCAPE_ADDRESS)); addrNode = arg; #if FEATURE_MULTIREG_ARGS #ifdef TARGET_ARM64 assert(varTypeIsStruct(type)); if (lvaIsMultiregStruct(varDsc, curArgTabEntry->IsVararg())) { // We will create a GT_OBJ for the argument below. // This will be passed by value in two registers. assert(addrNode != nullptr); // Create an Obj of the temp to use it as a call argument. arg = gtNewObjNode(lvaGetStruct(tmpVarNum), arg); } #else // Always create an Obj of the temp to use it as a call argument. arg = gtNewObjNode(lvaGetStruct(tmpVarNum), arg); #endif // !TARGET_ARM64 #endif // FEATURE_MULTIREG_ARGS } #else // not (TARGET_AMD64 or TARGET_ARM64 or TARGET_ARM) // other targets, we pass the struct by value assert(varTypeIsStruct(type)); addrNode = gtNewOperNode(GT_ADDR, TYP_BYREF, arg); // Get a new Obj node temp to use it as a call argument. // gtNewObjNode will set the GTF_EXCEPT flag if this is not a local stack object. arg = gtNewObjNode(lvaGetStruct(tmpVarNum), addrNode); #endif // not (TARGET_AMD64 or TARGET_ARM64 or TARGET_ARM) } // (varTypeIsStruct(type)) if (addrNode != nullptr) { assert(addrNode->gtOper == GT_ADDR); // the child of a GT_ADDR is required to have this flag set addrNode->AsOp()->gtOp1->gtFlags |= GTF_DONT_CSE; } return arg; } //------------------------------------------------------------------------------ // EvalArgsToTemps : Create temp assignments and populate the LateArgs list. void fgArgInfo::EvalArgsToTemps() { assert(argsSorted); unsigned regArgInx = 0; // Now go through the argument table and perform the necessary evaluation into temps GenTreeCall::Use* tmpRegArgNext = nullptr; for (unsigned curInx = 0; curInx < argCount; curInx++) { fgArgTabEntry* curArgTabEntry = argTable[curInx]; assert(curArgTabEntry->lateUse == nullptr); GenTree* argx = curArgTabEntry->GetNode(); GenTree* setupArg = nullptr; GenTree* defArg; #if !FEATURE_FIXED_OUT_ARGS // Only ever set for FEATURE_FIXED_OUT_ARGS assert(curArgTabEntry->needPlace == false); // On x86 and other archs that use push instructions to pass arguments: // Only the register arguments need to be replaced with placeholder nodes. // Stacked arguments are evaluated and pushed (or stored into the stack) in order. // if (curArgTabEntry->GetRegNum() == REG_STK) continue; #endif if (curArgTabEntry->needTmp) { if (curArgTabEntry->isTmp) { // Create a copy of the temp to go into the late argument list defArg = compiler->fgMakeTmpArgNode(curArgTabEntry); // mark the original node as a late argument argx->gtFlags |= GTF_LATE_ARG; } else { // Create a temp assignment for the argument // Put the temp in the gtCallLateArgs list CLANG_FORMAT_COMMENT_ANCHOR; #ifdef DEBUG if (compiler->verbose) { printf("Argument with 'side effect'...\n"); compiler->gtDispTree(argx); } #endif #if defined(TARGET_AMD64) && !defined(UNIX_AMD64_ABI) noway_assert(argx->gtType != TYP_STRUCT); #endif unsigned tmpVarNum = compiler->lvaGrabTemp(true DEBUGARG("argument with side effect")); if (argx->gtOper == GT_MKREFANY) { // For GT_MKREFANY, typically the actual struct copying does // not have any side-effects and can be delayed. So instead // of using a temp for the whole struct, we can just use a temp // for operand that that has a side-effect GenTree* operand; if ((argx->AsOp()->gtOp2->gtFlags & GTF_ALL_EFFECT) == 0) { operand = argx->AsOp()->gtOp1; // In the early argument evaluation, place an assignment to the temp // from the source operand of the mkrefany setupArg = compiler->gtNewTempAssign(tmpVarNum, operand); // Replace the operand for the mkrefany with the new temp. argx->AsOp()->gtOp1 = compiler->gtNewLclvNode(tmpVarNum, operand->TypeGet()); } else if ((argx->AsOp()->gtOp1->gtFlags & GTF_ALL_EFFECT) == 0) { operand = argx->AsOp()->gtOp2; // In the early argument evaluation, place an assignment to the temp // from the source operand of the mkrefany setupArg = compiler->gtNewTempAssign(tmpVarNum, operand); // Replace the operand for the mkrefany with the new temp. argx->AsOp()->gtOp2 = compiler->gtNewLclvNode(tmpVarNum, operand->TypeGet()); } } if (setupArg != nullptr) { // Now keep the mkrefany for the late argument list defArg = argx; // Clear the side-effect flags because now both op1 and op2 have no side-effects defArg->gtFlags &= ~GTF_ALL_EFFECT; } else { setupArg = compiler->gtNewTempAssign(tmpVarNum, argx); LclVarDsc* varDsc = compiler->lvaGetDesc(tmpVarNum); var_types lclVarType = genActualType(argx->gtType); var_types scalarType = TYP_UNKNOWN; if (setupArg->OperIsCopyBlkOp()) { setupArg = compiler->fgMorphCopyBlock(setupArg); #if defined(TARGET_ARMARCH) || defined(UNIX_AMD64_ABI) if (lclVarType == TYP_STRUCT) { // This scalar LclVar widening step is only performed for ARM architectures. // CORINFO_CLASS_HANDLE clsHnd = compiler->lvaGetStruct(tmpVarNum); unsigned structSize = varDsc->lvExactSize; scalarType = compiler->getPrimitiveTypeForStruct(structSize, clsHnd, curArgTabEntry->IsVararg()); } #endif // TARGET_ARMARCH || defined (UNIX_AMD64_ABI) } // scalarType can be set to a wider type for ARM or unix amd64 architectures: (3 => 4) or (5,6,7 => // 8) if ((scalarType != TYP_UNKNOWN) && (scalarType != lclVarType)) { // Create a GT_LCL_FLD using the wider type to go to the late argument list defArg = compiler->gtNewLclFldNode(tmpVarNum, scalarType, 0); } else { // Create a copy of the temp to go to the late argument list defArg = compiler->gtNewLclvNode(tmpVarNum, lclVarType); } curArgTabEntry->isTmp = true; curArgTabEntry->tmpNum = tmpVarNum; #ifdef TARGET_ARM // Previously we might have thought the local was promoted, and thus the 'COPYBLK' // might have left holes in the used registers (see // fgAddSkippedRegsInPromotedStructArg). // Too bad we're not that smart for these intermediate temps... if (isValidIntArgReg(curArgTabEntry->GetRegNum()) && (curArgTabEntry->numRegs > 1)) { regNumber argReg = curArgTabEntry->GetRegNum(); regMaskTP allUsedRegs = genRegMask(curArgTabEntry->GetRegNum()); for (unsigned i = 1; i < curArgTabEntry->numRegs; i++) { argReg = genRegArgNext(argReg); allUsedRegs |= genRegMask(argReg); } } #endif // TARGET_ARM } /* mark the assignment as a late argument */ setupArg->gtFlags |= GTF_LATE_ARG; #ifdef DEBUG if (compiler->verbose) { printf("\n Evaluate to a temp:\n"); compiler->gtDispTree(setupArg); } #endif } } else // curArgTabEntry->needTmp == false { // On x86 - // Only register args are replaced with placeholder nodes // and the stack based arguments are evaluated and pushed in order. // // On Arm/x64 - When needTmp is false and needPlace is false, // the non-register arguments are evaluated and stored in order. // When needPlace is true we have a nested call that comes after // this argument so we have to replace it in the gtCallArgs list // (the initial argument evaluation list) with a placeholder. // if ((curArgTabEntry->GetRegNum() == REG_STK) && (curArgTabEntry->needPlace == false)) { continue; } /* No temp needed - move the whole node to the gtCallLateArgs list */ /* The argument is deferred and put in the late argument list */ defArg = argx; // Create a placeholder node to put in its place in gtCallLateArgs. // For a struct type we also need to record the class handle of the arg. CORINFO_CLASS_HANDLE clsHnd = NO_CLASS_HANDLE; #if defined(TARGET_AMD64) && !defined(UNIX_AMD64_ABI) // All structs are either passed (and retyped) as integral types, OR they // are passed by reference. noway_assert(argx->gtType != TYP_STRUCT); #else // !defined(TARGET_AMD64) || defined(UNIX_AMD64_ABI) if (defArg->TypeGet() == TYP_STRUCT) { clsHnd = compiler->gtGetStructHandleIfPresent(defArg); noway_assert(clsHnd != NO_CLASS_HANDLE); } #endif // !(defined(TARGET_AMD64) && !defined(UNIX_AMD64_ABI)) setupArg = compiler->gtNewArgPlaceHolderNode(defArg->gtType, clsHnd); /* mark the placeholder node as a late argument */ setupArg->gtFlags |= GTF_LATE_ARG; #ifdef DEBUG if (compiler->verbose) { if (curArgTabEntry->GetRegNum() == REG_STK) { printf("Deferred stack argument :\n"); } else { printf("Deferred argument ('%s'):\n", getRegName(curArgTabEntry->GetRegNum())); } compiler->gtDispTree(argx); printf("Replaced with placeholder node:\n"); compiler->gtDispTree(setupArg); } #endif } if (setupArg != nullptr) { noway_assert(curArgTabEntry->use->GetNode() == argx); curArgTabEntry->use->SetNode(setupArg); } /* deferred arg goes into the late argument list */ if (tmpRegArgNext == nullptr) { tmpRegArgNext = compiler->gtNewCallArgs(defArg); callTree->AsCall()->gtCallLateArgs = tmpRegArgNext; } else { noway_assert(tmpRegArgNext->GetNode() != nullptr); tmpRegArgNext->SetNext(compiler->gtNewCallArgs(defArg)); tmpRegArgNext = tmpRegArgNext->GetNext(); } curArgTabEntry->lateUse = tmpRegArgNext; curArgTabEntry->SetLateArgInx(regArgInx++); } #ifdef DEBUG if (compiler->verbose) { printf("\nShuffled argument table: "); for (unsigned curInx = 0; curInx < argCount; curInx++) { fgArgTabEntry* curArgTabEntry = argTable[curInx]; if (curArgTabEntry->GetRegNum() != REG_STK) { printf("%s ", getRegName(curArgTabEntry->GetRegNum())); } } printf("\n"); } #endif } //------------------------------------------------------------------------------ // fgMakeMultiUse : If the node is an unaliased local or constant clone it, // otherwise insert a comma form temp // // Arguments: // ppTree - a pointer to the child node we will be replacing with the comma expression that // evaluates ppTree to a temp and returns the result // // Return Value: // A fresh GT_LCL_VAR node referencing the temp which has not been used // // Notes: // Caller must ensure that if the node is an unaliased local, the second use this // creates will be evaluated before the local can be reassigned. // // Can be safely called in morph preorder, before GTF_GLOB_REF is reliable. // GenTree* Compiler::fgMakeMultiUse(GenTree** pOp) { GenTree* const tree = *pOp; if (tree->IsInvariant()) { return gtClone(tree); } else if (tree->IsLocal()) { // Can't rely on GTF_GLOB_REF here. // if (!lvaGetDesc(tree->AsLclVarCommon())->IsAddressExposed()) { return gtClone(tree); } } return fgInsertCommaFormTemp(pOp); } //------------------------------------------------------------------------------ // fgInsertCommaFormTemp: Create a new temporary variable to hold the result of *ppTree, // and replace *ppTree with comma(asg(newLcl, *ppTree), newLcl) // // Arguments: // ppTree - a pointer to the child node we will be replacing with the comma expression that // evaluates ppTree to a temp and returns the result // // structType - value type handle if the temp created is of TYP_STRUCT. // // Return Value: // A fresh GT_LCL_VAR node referencing the temp which has not been used // GenTree* Compiler::fgInsertCommaFormTemp(GenTree** ppTree, CORINFO_CLASS_HANDLE structType /*= nullptr*/) { GenTree* subTree = *ppTree; unsigned lclNum = lvaGrabTemp(true DEBUGARG("fgInsertCommaFormTemp is creating a new local variable")); if (varTypeIsStruct(subTree)) { assert(structType != nullptr); lvaSetStruct(lclNum, structType, false); } // If subTree->TypeGet() == TYP_STRUCT, gtNewTempAssign() will create a GT_COPYBLK tree. // The type of GT_COPYBLK is TYP_VOID. Therefore, we should use subTree->TypeGet() for // setting type of lcl vars created. GenTree* asg = gtNewTempAssign(lclNum, subTree); GenTree* load = new (this, GT_LCL_VAR) GenTreeLclVar(GT_LCL_VAR, subTree->TypeGet(), lclNum); GenTree* comma = gtNewOperNode(GT_COMMA, subTree->TypeGet(), asg, load); *ppTree = comma; return new (this, GT_LCL_VAR) GenTreeLclVar(GT_LCL_VAR, subTree->TypeGet(), lclNum); } //------------------------------------------------------------------------ // fgInitArgInfo: Construct the fgArgInfo for the call with the fgArgEntry for each arg // // Arguments: // callNode - the call for which we are generating the fgArgInfo // // Return Value: // None // // Notes: // This method is idempotent in that it checks whether the fgArgInfo has already been // constructed, and just returns. // This method only computes the arg table and arg entries for the call (the fgArgInfo), // and makes no modification of the args themselves. // // The IR for the call args can change for calls with non-standard arguments: some non-standard // arguments add new call argument IR nodes. // void Compiler::fgInitArgInfo(GenTreeCall* call) { GenTreeCall::Use* args; GenTree* argx; unsigned argIndex = 0; unsigned intArgRegNum = 0; unsigned fltArgRegNum = 0; DEBUG_ARG_SLOTS_ONLY(unsigned argSlots = 0;) bool callHasRetBuffArg = call->HasRetBufArg(); bool callIsVararg = call->IsVarargs(); #ifdef TARGET_ARM regMaskTP argSkippedRegMask = RBM_NONE; regMaskTP fltArgSkippedRegMask = RBM_NONE; #endif // TARGET_ARM #if defined(TARGET_X86) unsigned maxRegArgs = MAX_REG_ARG; // X86: non-const, must be calculated #else const unsigned maxRegArgs = MAX_REG_ARG; // other arch: fixed constant number #endif if (call->fgArgInfo != nullptr) { // We've already initialized and set the fgArgInfo. return; } JITDUMP("Initializing arg info for %d.%s:\n", call->gtTreeID, GenTree::OpName(call->gtOper)); // At this point, we should never have gtCallLateArgs, as this needs to be done before those are determined. assert(call->gtCallLateArgs == nullptr); if (TargetOS::IsUnix && callIsVararg) { // Currently native varargs is not implemented on non windows targets. // // Note that some targets like Arm64 Unix should not need much work as // the ABI is the same. While other targets may only need small changes // such as amd64 Unix, which just expects RAX to pass numFPArguments. NYI("Morphing Vararg call not yet implemented on non Windows targets."); } // Data structure for keeping track of non-standard args. Non-standard args are those that are not passed // following the normal calling convention or in the normal argument registers. We either mark existing // arguments as non-standard (such as the x8 return buffer register on ARM64), or we manually insert the // non-standard arguments into the argument list, below. class NonStandardArgs { struct NonStandardArg { GenTree* node; // The tree node representing this non-standard argument. // Note that this must be updated if the tree node changes due to morphing! regNumber reg; // The register to be assigned to this non-standard argument. NonStandardArgKind kind; // The kind of the non-standard arg }; ArrayStack<NonStandardArg> args; public: NonStandardArgs(CompAllocator alloc) : args(alloc, 3) // We will have at most 3 non-standard arguments { } //----------------------------------------------------------------------------- // Add: add a non-standard argument to the table of non-standard arguments // // Arguments: // node - a GenTree node that has a non-standard argument. // reg - the register to assign to this node. // // Return Value: // None. // void Add(GenTree* node, regNumber reg, NonStandardArgKind kind) { NonStandardArg nsa = {node, reg, kind}; args.Push(nsa); } //----------------------------------------------------------------------------- // Find: Look for a GenTree* in the set of non-standard args. // // Arguments: // node - a GenTree node to look for // // Return Value: // The index of the non-standard argument (a non-negative, unique, stable number). // If the node is not a non-standard argument, return -1. // int Find(GenTree* node) { for (int i = 0; i < args.Height(); i++) { if (node == args.Top(i).node) { return i; } } return -1; } //----------------------------------------------------------------------------- // Find: Look for a GenTree node in the non-standard arguments set. If found, // set the register to use for the node. // // Arguments: // node - a GenTree node to look for // pReg - an OUT argument. *pReg is set to the non-standard register to use if // 'node' is found in the non-standard argument set. // pKind - an OUT argument. *pKind is set to the kind of the non-standard arg. // // Return Value: // 'true' if 'node' is a non-standard argument. In this case, *pReg and *pKing are set. // 'false' otherwise (in this case, *pReg and *pKind are unmodified). // bool Find(GenTree* node, regNumber* pReg, NonStandardArgKind* pKind) { for (int i = 0; i < args.Height(); i++) { NonStandardArg& nsa = args.TopRef(i); if (node == nsa.node) { *pReg = nsa.reg; *pKind = nsa.kind; return true; } } return false; } //----------------------------------------------------------------------------- // Replace: Replace the non-standard argument node at a given index. This is done when // the original node was replaced via morphing, but we need to continue to assign a // particular non-standard arg to it. // // Arguments: // index - the index of the non-standard arg. It must exist. // node - the new GenTree node. // // Return Value: // None. // void Replace(int index, GenTree* node) { args.TopRef(index).node = node; } } nonStandardArgs(getAllocator(CMK_ArrayStack)); // Count of args. On first morph, this is counted before we've filled in the arg table. // On remorph, we grab it from the arg table. unsigned numArgs = 0; // First we need to count the args if (call->gtCallThisArg != nullptr) { numArgs++; } for (GenTreeCall::Use& use : call->Args()) { numArgs++; } // Insert or mark non-standard args. These are either outside the normal calling convention, or // arguments registers that don't follow the normal progression of argument registers in the calling // convention (such as for the ARM64 fixed return buffer argument x8). // // *********** NOTE ************* // The logic here must remain in sync with GetNonStandardAddedArgCount(), which is used to map arguments // in the implementation of fast tail call. // *********** END NOTE ********* CLANG_FORMAT_COMMENT_ANCHOR; #if defined(TARGET_X86) || defined(TARGET_ARM) // The x86 and arm32 CORINFO_HELP_INIT_PINVOKE_FRAME helpers has a custom calling convention. // Set the argument registers correctly here. if (call->IsHelperCall(this, CORINFO_HELP_INIT_PINVOKE_FRAME)) { GenTreeCall::Use* args = call->gtCallArgs; GenTree* arg1 = args->GetNode(); assert(arg1 != nullptr); nonStandardArgs.Add(arg1, REG_PINVOKE_FRAME, NonStandardArgKind::PInvokeFrame); } #endif // defined(TARGET_X86) || defined(TARGET_ARM) #if defined(TARGET_ARM) // A non-standard calling convention using wrapper delegate invoke is used on ARM, only, for wrapper // delegates. It is used for VSD delegate calls where the VSD custom calling convention ABI requires passing // R4, a callee-saved register, with a special value. Since R4 is a callee-saved register, its value needs // to be preserved. Thus, the VM uses a wrapper delegate IL stub, which preserves R4 and also sets up R4 // correctly for the VSD call. The VM is simply reusing an existing mechanism (wrapper delegate IL stub) // to achieve its goal for delegate VSD call. See COMDelegate::NeedsWrapperDelegate() in the VM for details. else if (call->gtCallMoreFlags & GTF_CALL_M_WRAPPER_DELEGATE_INV) { GenTree* arg = call->gtCallThisArg->GetNode(); if (arg->OperIsLocal()) { arg = gtClone(arg, true); } else { GenTree* tmp = fgInsertCommaFormTemp(&arg); call->gtCallThisArg->SetNode(arg); call->gtFlags |= GTF_ASG; arg = tmp; } noway_assert(arg != nullptr); GenTree* newArg = new (this, GT_ADDR) GenTreeAddrMode(TYP_BYREF, arg, nullptr, 0, eeGetEEInfo()->offsetOfWrapperDelegateIndirectCell); // Append newArg as the last arg GenTreeCall::Use** insertionPoint = &call->gtCallArgs; for (; *insertionPoint != nullptr; insertionPoint = &((*insertionPoint)->NextRef())) { } *insertionPoint = gtNewCallArgs(newArg); numArgs++; nonStandardArgs.Add(newArg, virtualStubParamInfo->GetReg(), NonStandardArgKind::WrapperDelegateCell); } #endif // defined(TARGET_ARM) #if defined(TARGET_X86) // The x86 shift helpers have custom calling conventions and expect the lo part of the long to be in EAX and the // hi part to be in EDX. This sets the argument registers up correctly. else if (call->IsHelperCall(this, CORINFO_HELP_LLSH) || call->IsHelperCall(this, CORINFO_HELP_LRSH) || call->IsHelperCall(this, CORINFO_HELP_LRSZ)) { GenTreeCall::Use* args = call->gtCallArgs; GenTree* arg1 = args->GetNode(); assert(arg1 != nullptr); nonStandardArgs.Add(arg1, REG_LNGARG_LO, NonStandardArgKind::ShiftLow); args = args->GetNext(); GenTree* arg2 = args->GetNode(); assert(arg2 != nullptr); nonStandardArgs.Add(arg2, REG_LNGARG_HI, NonStandardArgKind::ShiftHigh); } #else // !TARGET_X86 // TODO-X86-CQ: Currently RyuJIT/x86 passes args on the stack, so this is not needed. // If/when we change that, the following code needs to be changed to correctly support the (TBD) managed calling // convention for x86/SSE. // If we have a Fixed Return Buffer argument register then we setup a non-standard argument for it. // // We don't use the fixed return buffer argument if we have the special unmanaged instance call convention. // That convention doesn't use the fixed return buffer register. // CLANG_FORMAT_COMMENT_ANCHOR; if (call->HasFixedRetBufArg()) { args = call->gtCallArgs; assert(args != nullptr); argx = call->gtCallArgs->GetNode(); // We don't increment numArgs here, since we already counted this argument above. nonStandardArgs.Add(argx, theFixedRetBuffReg(), NonStandardArgKind::FixedRetBuffer); } // We are allowed to have a Fixed Return Buffer argument combined // with any of the remaining non-standard arguments // CLANG_FORMAT_COMMENT_ANCHOR; if (call->IsVirtualStub()) { if (!call->IsTailCallViaJitHelper()) { GenTree* stubAddrArg = fgGetStubAddrArg(call); // And push the stub address onto the list of arguments call->gtCallArgs = gtPrependNewCallArg(stubAddrArg, call->gtCallArgs); numArgs++; nonStandardArgs.Add(stubAddrArg, stubAddrArg->GetRegNum(), NonStandardArgKind::VirtualStubCell); } else { // If it is a VSD call getting dispatched via tail call helper, // fgMorphTailCallViaJitHelper() would materialize stub addr as an additional // parameter added to the original arg list and hence no need to // add as a non-standard arg. } } else #endif // !TARGET_X86 if (call->gtCallType == CT_INDIRECT && (call->gtCallCookie != nullptr)) { assert(!call->IsUnmanaged()); GenTree* arg = call->gtCallCookie; noway_assert(arg != nullptr); call->gtCallCookie = nullptr; // All architectures pass the cookie in a register. call->gtCallArgs = gtPrependNewCallArg(arg, call->gtCallArgs); nonStandardArgs.Add(arg, REG_PINVOKE_COOKIE_PARAM, NonStandardArgKind::PInvokeCookie); numArgs++; // put destination into R10/EAX arg = gtClone(call->gtCallAddr, true); call->gtCallArgs = gtPrependNewCallArg(arg, call->gtCallArgs); numArgs++; nonStandardArgs.Add(arg, REG_PINVOKE_TARGET_PARAM, NonStandardArgKind::PInvokeTarget); // finally change this call to a helper call call->gtCallType = CT_HELPER; call->gtCallMethHnd = eeFindHelper(CORINFO_HELP_PINVOKE_CALLI); } #if defined(FEATURE_READYTORUN) // For arm/arm64, we dispatch code same as VSD using virtualStubParamInfo->GetReg() // for indirection cell address, which ZapIndirectHelperThunk expects. // For x64/x86 we use return address to get the indirection cell by disassembling the call site. // That is not possible for fast tailcalls, so we only need this logic for fast tailcalls on xarch. // Note that we call this before we know if something will be a fast tailcall or not. // That's ok; after making something a tailcall, we will invalidate this information // and reconstruct it if necessary. The tailcalling decision does not change since // this is a non-standard arg in a register. bool needsIndirectionCell = call->IsR2RRelativeIndir() && !call->IsDelegateInvoke(); #if defined(TARGET_XARCH) needsIndirectionCell &= call->IsFastTailCall(); #endif if (needsIndirectionCell) { assert(call->gtEntryPoint.addr != nullptr); size_t addrValue = (size_t)call->gtEntryPoint.addr; GenTree* indirectCellAddress = gtNewIconHandleNode(addrValue, GTF_ICON_FTN_ADDR); #ifdef DEBUG indirectCellAddress->AsIntCon()->gtTargetHandle = (size_t)call->gtCallMethHnd; #endif indirectCellAddress->SetRegNum(REG_R2R_INDIRECT_PARAM); #ifdef TARGET_ARM // Issue #xxxx : Don't attempt to CSE this constant on ARM32 // // This constant has specific register requirements, and LSRA doesn't currently correctly // handle them when the value is in a CSE'd local. indirectCellAddress->SetDoNotCSE(); #endif // TARGET_ARM // Push the stub address onto the list of arguments. call->gtCallArgs = gtPrependNewCallArg(indirectCellAddress, call->gtCallArgs); numArgs++; nonStandardArgs.Add(indirectCellAddress, indirectCellAddress->GetRegNum(), NonStandardArgKind::R2RIndirectionCell); } #endif if ((REG_VALIDATE_INDIRECT_CALL_ADDR != REG_ARG_0) && call->IsHelperCall(this, CORINFO_HELP_VALIDATE_INDIRECT_CALL)) { assert(call->gtCallArgs != nullptr); GenTreeCall::Use* args = call->gtCallArgs; GenTree* tar = args->GetNode(); nonStandardArgs.Add(tar, REG_VALIDATE_INDIRECT_CALL_ADDR, NonStandardArgKind::ValidateIndirectCallTarget); } // Allocate the fgArgInfo for the call node; // call->fgArgInfo = new (this, CMK_Unknown) fgArgInfo(this, call, numArgs); // Add the 'this' argument value, if present. if (call->gtCallThisArg != nullptr) { argx = call->gtCallThisArg->GetNode(); assert(argIndex == 0); assert(call->gtCallType == CT_USER_FUNC || call->gtCallType == CT_INDIRECT); assert(varTypeIsGC(argx) || (argx->gtType == TYP_I_IMPL)); const regNumber regNum = genMapIntRegArgNumToRegNum(intArgRegNum); const unsigned numRegs = 1; const unsigned byteSize = TARGET_POINTER_SIZE; const unsigned byteAlignment = TARGET_POINTER_SIZE; const bool isStruct = false; const bool isFloatHfa = false; // This is a register argument - put it in the table. call->fgArgInfo->AddRegArg(argIndex, argx, call->gtCallThisArg, regNum, numRegs, byteSize, byteAlignment, isStruct, isFloatHfa, callIsVararg UNIX_AMD64_ABI_ONLY_ARG(REG_STK) UNIX_AMD64_ABI_ONLY_ARG(0) UNIX_AMD64_ABI_ONLY_ARG(0) UNIX_AMD64_ABI_ONLY_ARG(nullptr)); intArgRegNum++; #ifdef WINDOWS_AMD64_ABI // Whenever we pass an integer register argument // we skip the corresponding floating point register argument fltArgRegNum++; #endif // WINDOWS_AMD64_ABI argIndex++; DEBUG_ARG_SLOTS_ONLY(argSlots++;) } #ifdef TARGET_X86 // Compute the maximum number of arguments that can be passed in registers. // For X86 we handle the varargs and unmanaged calling conventions #ifndef UNIX_X86_ABI if (call->gtFlags & GTF_CALL_POP_ARGS) { noway_assert(intArgRegNum < MAX_REG_ARG); // No more register arguments for varargs (CALL_POP_ARGS) maxRegArgs = intArgRegNum; // Add in the ret buff arg if (callHasRetBuffArg) maxRegArgs++; } #endif // UNIX_X86_ABI if (call->IsUnmanaged()) { noway_assert(intArgRegNum == 0); if (call->gtCallMoreFlags & GTF_CALL_M_UNMGD_THISCALL) { noway_assert(call->gtCallArgs->GetNode()->TypeGet() == TYP_I_IMPL || call->gtCallArgs->GetNode()->TypeGet() == TYP_BYREF || call->gtCallArgs->GetNode()->gtOper == GT_NOP); // the arg was already morphed to a register (fgMorph called twice) maxRegArgs = 1; } else { maxRegArgs = 0; } #ifdef UNIX_X86_ABI // Add in the ret buff arg if (callHasRetBuffArg && call->unmgdCallConv != CorInfoCallConvExtension::C && // C and Stdcall calling conventions do not call->unmgdCallConv != CorInfoCallConvExtension::Stdcall) // use registers to pass arguments. maxRegArgs++; #endif } #endif // TARGET_X86 /* Morph the user arguments */ CLANG_FORMAT_COMMENT_ANCHOR; #if defined(TARGET_ARM) // The ARM ABI has a concept of back-filling of floating-point argument registers, according // to the "Procedure Call Standard for the ARM Architecture" document, especially // section 6.1.2.3 "Parameter passing". Back-filling is where floating-point argument N+1 can // appear in a lower-numbered register than floating point argument N. That is, argument // register allocation is not strictly increasing. To support this, we need to keep track of unused // floating-point argument registers that we can back-fill. We only support 4-byte float and // 8-byte double types, and one to four element HFAs composed of these types. With this, we will // only back-fill single registers, since there is no way with these types to create // an alignment hole greater than one register. However, there can be up to 3 back-fill slots // available (with 16 FP argument registers). Consider this code: // // struct HFA { float x, y, z; }; // a three element HFA // void bar(float a1, // passed in f0 // double a2, // passed in f2/f3; skip f1 for alignment // HFA a3, // passed in f4/f5/f6 // double a4, // passed in f8/f9; skip f7 for alignment. NOTE: it doesn't fit in the f1 back-fill slot // HFA a5, // passed in f10/f11/f12 // double a6, // passed in f14/f15; skip f13 for alignment. NOTE: it doesn't fit in the f1 or f7 back-fill // // slots // float a7, // passed in f1 (back-filled) // float a8, // passed in f7 (back-filled) // float a9, // passed in f13 (back-filled) // float a10) // passed on the stack in [OutArg+0] // // Note that if we ever support FP types with larger alignment requirements, then there could // be more than single register back-fills. // // Once we assign a floating-pointer register to the stack, they all must be on the stack. // See "Procedure Call Standard for the ARM Architecture", section 6.1.2.3, "The back-filling // continues only so long as no VFP CPRC has been allocated to a slot on the stack." // We set anyFloatStackArgs to true when a floating-point argument has been assigned to the stack // and prevent any additional floating-point arguments from going in registers. bool anyFloatStackArgs = false; #endif // TARGET_ARM #ifdef UNIX_AMD64_ABI SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR structDesc; #endif // UNIX_AMD64_ABI #if defined(DEBUG) // Check that we have valid information about call's argument types. // For example: // load byte; call(int) -> CALL(PUTARG_TYPE byte(IND byte)); // load int; call(byte) -> CALL(PUTARG_TYPE int (IND int)); // etc. if (call->callSig != nullptr) { CORINFO_SIG_INFO* sig = call->callSig; const unsigned sigArgsCount = sig->numArgs; GenTreeCall::Use* nodeArgs = call->gtCallArgs; // It could include many arguments not included in `sig->numArgs`, for example, `this`, runtime lookup, cookie // etc. unsigned nodeArgsCount = 0; call->VisitOperands([&nodeArgsCount](GenTree* operand) -> GenTree::VisitResult { nodeArgsCount++; return GenTree::VisitResult::Continue; }); if (call->gtCallThisArg != nullptr) { // Handle the most common argument not in the `sig->numArgs`. // so the following check works on more methods. nodeArgsCount--; } assert(nodeArgsCount >= sigArgsCount); if ((nodeArgsCount == sigArgsCount) && ((Target::g_tgtArgOrder == Target::ARG_ORDER_R2L) || (nodeArgsCount == 1))) { CORINFO_ARG_LIST_HANDLE sigArg = sig->args; for (unsigned i = 0; i < sig->numArgs; ++i) { CORINFO_CLASS_HANDLE argClass; const CorInfoType corType = strip(info.compCompHnd->getArgType(sig, sigArg, &argClass)); const var_types sigType = JITtype2varType(corType); assert(nodeArgs != nullptr); const GenTree* nodeArg = nodeArgs->GetNode(); assert(nodeArg != nullptr); const var_types nodeType = nodeArg->TypeGet(); assert((nodeType == sigType) || varTypeIsStruct(sigType) || genTypeSize(nodeType) == genTypeSize(sigType)); sigArg = info.compCompHnd->getArgNext(sigArg); nodeArgs = nodeArgs->GetNext(); } assert(nodeArgs == nullptr); } } #endif // DEBUG for (args = call->gtCallArgs; args != nullptr; args = args->GetNext(), argIndex++) { argx = args->GetNode()->gtSkipPutArgType(); // Change the node to TYP_I_IMPL so we don't report GC info // NOTE: We deferred this from the importer because of the inliner. if (argx->IsLocalAddrExpr() != nullptr) { argx->gtType = TYP_I_IMPL; } // We should never have any ArgPlaceHolder nodes at this point. assert(!argx->IsArgPlaceHolderNode()); // Setup any HFA information about 'argx' bool isHfaArg = false; var_types hfaType = TYP_UNDEF; unsigned hfaSlots = 0; bool passUsingFloatRegs; unsigned argAlignBytes = TARGET_POINTER_SIZE; unsigned size = 0; unsigned byteSize = 0; if (GlobalJitOptions::compFeatureHfa) { hfaType = GetHfaType(argx); isHfaArg = varTypeIsValidHfaType(hfaType); #if defined(TARGET_ARM64) if (TargetOS::IsWindows) { // Make sure for vararg methods isHfaArg is not true. isHfaArg = callIsVararg ? false : isHfaArg; } #endif // defined(TARGET_ARM64) if (isHfaArg) { isHfaArg = true; hfaSlots = GetHfaCount(argx); // If we have a HFA struct it's possible we transition from a method that originally // only had integer types to now start having FP types. We have to communicate this // through this flag since LSRA later on will use this flag to determine whether // or not to track the FP register set. // compFloatingPointUsed = true; } } const bool isFloatHfa = (hfaType == TYP_FLOAT); #ifdef TARGET_ARM passUsingFloatRegs = !callIsVararg && (isHfaArg || varTypeUsesFloatReg(argx)) && !opts.compUseSoftFP; bool passUsingIntRegs = passUsingFloatRegs ? false : (intArgRegNum < MAX_REG_ARG); // We don't use the "size" return value from InferOpSizeAlign(). codeGen->InferOpSizeAlign(argx, &argAlignBytes); argAlignBytes = roundUp(argAlignBytes, TARGET_POINTER_SIZE); if (argAlignBytes == 2 * TARGET_POINTER_SIZE) { if (passUsingFloatRegs) { if (fltArgRegNum % 2 == 1) { fltArgSkippedRegMask |= genMapArgNumToRegMask(fltArgRegNum, TYP_FLOAT); fltArgRegNum++; } } else if (passUsingIntRegs) { if (intArgRegNum % 2 == 1) { argSkippedRegMask |= genMapArgNumToRegMask(intArgRegNum, TYP_I_IMPL); intArgRegNum++; } } #if defined(DEBUG) if (argSlots % 2 == 1) { argSlots++; } #endif } #elif defined(TARGET_ARM64) assert(!callIsVararg || !isHfaArg); passUsingFloatRegs = !callIsVararg && (isHfaArg || varTypeUsesFloatReg(argx)); #elif defined(TARGET_AMD64) passUsingFloatRegs = varTypeIsFloating(argx); #elif defined(TARGET_X86) passUsingFloatRegs = false; #else #error Unsupported or unset target architecture #endif // TARGET* bool isBackFilled = false; unsigned nextFltArgRegNum = fltArgRegNum; // This is the next floating-point argument register number to use var_types structBaseType = TYP_STRUCT; unsigned structSize = 0; bool passStructByRef = false; bool isStructArg; GenTree* actualArg = argx->gtEffectiveVal(true /* Commas only */); // // Figure out the size of the argument. This is either in number of registers, or number of // TARGET_POINTER_SIZE stack slots, or the sum of these if the argument is split between the registers and // the stack. // isStructArg = varTypeIsStruct(argx); CORINFO_CLASS_HANDLE objClass = NO_CLASS_HANDLE; if (isStructArg) { objClass = gtGetStructHandle(argx); if (argx->TypeGet() == TYP_STRUCT) { // For TYP_STRUCT arguments we must have an OBJ, LCL_VAR or MKREFANY switch (actualArg->OperGet()) { case GT_OBJ: structSize = actualArg->AsObj()->GetLayout()->GetSize(); assert(structSize == info.compCompHnd->getClassSize(objClass)); break; case GT_LCL_VAR: structSize = lvaGetDesc(actualArg->AsLclVarCommon())->lvExactSize; break; case GT_MKREFANY: structSize = info.compCompHnd->getClassSize(objClass); break; default: BADCODE("illegal argument tree in fgInitArgInfo"); break; } } else { structSize = genTypeSize(argx); assert(structSize == info.compCompHnd->getClassSize(objClass)); } } #if defined(TARGET_AMD64) #ifdef UNIX_AMD64_ABI if (!isStructArg) { size = 1; // On AMD64, all primitives fit in a single (64-bit) 'slot' byteSize = genTypeSize(argx); } else { size = (unsigned)(roundUp(structSize, TARGET_POINTER_SIZE)) / TARGET_POINTER_SIZE; byteSize = structSize; eeGetSystemVAmd64PassStructInRegisterDescriptor(objClass, &structDesc); } #else // !UNIX_AMD64_ABI size = 1; // On AMD64 Windows, all args fit in a single (64-bit) 'slot' if (!isStructArg) { byteSize = genTypeSize(argx); } #endif // UNIX_AMD64_ABI #elif defined(TARGET_ARM64) if (isStructArg) { if (isHfaArg) { // HFA structs are passed by value in multiple registers. // The "size" in registers may differ the size in pointer-sized units. CORINFO_CLASS_HANDLE structHnd = gtGetStructHandle(argx); size = GetHfaCount(structHnd); byteSize = info.compCompHnd->getClassSize(structHnd); } else { // Structs are either passed in 1 or 2 (64-bit) slots. // Structs that are the size of 2 pointers are passed by value in multiple registers, // if sufficient registers are available. // Structs that are larger than 2 pointers (except for HFAs) are passed by // reference (to a copy) size = (unsigned)(roundUp(structSize, TARGET_POINTER_SIZE)) / TARGET_POINTER_SIZE; byteSize = structSize; if (size > 2) { size = 1; } } // Note that there are some additional rules for multireg structs. // (i.e they cannot be split between registers and the stack) } else { size = 1; // Otherwise, all primitive types fit in a single (64-bit) 'slot' byteSize = genTypeSize(argx); } #elif defined(TARGET_ARM) || defined(TARGET_X86) if (isStructArg) { size = (unsigned)(roundUp(structSize, TARGET_POINTER_SIZE)) / TARGET_POINTER_SIZE; byteSize = structSize; } else { // The typical case. // Long/double type argument(s) will be modified as needed in Lowering. size = genTypeStSz(argx->gtType); byteSize = genTypeSize(argx); } #else #error Unsupported or unset target architecture #endif // TARGET_XXX if (isStructArg) { assert(argx == args->GetNode()); assert(structSize != 0); structPassingKind howToPassStruct; structBaseType = getArgTypeForStruct(objClass, &howToPassStruct, callIsVararg, structSize); passStructByRef = (howToPassStruct == SPK_ByReference); if (howToPassStruct == SPK_ByReference) { byteSize = TARGET_POINTER_SIZE; } else { byteSize = structSize; } if (howToPassStruct == SPK_PrimitiveType) { #ifdef TARGET_ARM // TODO-CQ: getArgTypeForStruct should *not* return TYP_DOUBLE for a double struct, // or for a struct of two floats. This causes the struct to be address-taken. if (structBaseType == TYP_DOUBLE) { size = 2; } else #endif // TARGET_ARM { size = 1; } } else if (passStructByRef) { size = 1; } } const var_types argType = args->GetNode()->TypeGet(); if (args->GetNode()->OperIs(GT_PUTARG_TYPE)) { byteSize = genTypeSize(argType); } // The 'size' value has now must have been set. (the original value of zero is an invalid value) assert(size != 0); assert(byteSize != 0); if (compMacOsArm64Abi()) { // Arm64 Apple has a special ABI for passing small size arguments on stack, // bytes are aligned to 1-byte, shorts to 2-byte, int/float to 4-byte, etc. // It means passing 8 1-byte arguments on stack can take as small as 8 bytes. argAlignBytes = eeGetArgSizeAlignment(argType, isFloatHfa); } // // Figure out if the argument will be passed in a register. // bool isRegArg = false; NonStandardArgKind nonStandardArgKind = NonStandardArgKind::None; regNumber nonStdRegNum = REG_NA; if (isRegParamType(genActualType(argx->TypeGet())) #ifdef UNIX_AMD64_ABI && (!isStructArg || structDesc.passedInRegisters) #elif defined(TARGET_X86) || (isStructArg && isTrivialPointerSizedStruct(objClass)) #endif ) { #ifdef TARGET_ARM if (passUsingFloatRegs) { // First, see if it can be back-filled if (!anyFloatStackArgs && // Is it legal to back-fill? (We haven't put any FP args on the stack yet) (fltArgSkippedRegMask != RBM_NONE) && // Is there an available back-fill slot? (size == 1)) // The size to back-fill is one float register { // Back-fill the register. isBackFilled = true; regMaskTP backFillBitMask = genFindLowestBit(fltArgSkippedRegMask); fltArgSkippedRegMask &= ~backFillBitMask; // Remove the back-filled register(s) from the skipped mask nextFltArgRegNum = genMapFloatRegNumToRegArgNum(genRegNumFromMask(backFillBitMask)); assert(nextFltArgRegNum < MAX_FLOAT_REG_ARG); } // Does the entire float, double, or HFA fit in the FP arg registers? // Check if the last register needed is still in the argument register range. isRegArg = (nextFltArgRegNum + size - 1) < MAX_FLOAT_REG_ARG; if (!isRegArg) { anyFloatStackArgs = true; } } else { isRegArg = intArgRegNum < MAX_REG_ARG; } #elif defined(TARGET_ARM64) if (passUsingFloatRegs) { // Check if the last register needed is still in the fp argument register range. isRegArg = (nextFltArgRegNum + (size - 1)) < MAX_FLOAT_REG_ARG; // Do we have a HFA arg that we wanted to pass in registers, but we ran out of FP registers? if (isHfaArg && !isRegArg) { // recompute the 'size' so that it represent the number of stack slots rather than the number of // registers // unsigned roundupSize = (unsigned)roundUp(structSize, TARGET_POINTER_SIZE); size = roundupSize / TARGET_POINTER_SIZE; // We also must update fltArgRegNum so that we no longer try to // allocate any new floating point registers for args // This prevents us from backfilling a subsequent arg into d7 // fltArgRegNum = MAX_FLOAT_REG_ARG; } } else { // Check if the last register needed is still in the int argument register range. isRegArg = (intArgRegNum + (size - 1)) < maxRegArgs; // Did we run out of registers when we had a 16-byte struct (size===2) ? // (i.e we only have one register remaining but we needed two registers to pass this arg) // This prevents us from backfilling a subsequent arg into x7 // if (!isRegArg && (size > 1)) { // Arm64 windows native varargs allows splitting a 16 byte struct between stack // and the last general purpose register. if (TargetOS::IsWindows && callIsVararg) { // Override the decision and force a split. isRegArg = (intArgRegNum + (size - 1)) <= maxRegArgs; } else { // We also must update intArgRegNum so that we no longer try to // allocate any new general purpose registers for args // intArgRegNum = maxRegArgs; } } } #else // not TARGET_ARM or TARGET_ARM64 #if defined(UNIX_AMD64_ABI) // Here a struct can be passed in register following the classifications of its members and size. // Now make sure there are actually enough registers to do so. if (isStructArg) { unsigned int structFloatRegs = 0; unsigned int structIntRegs = 0; for (unsigned int i = 0; i < structDesc.eightByteCount; i++) { if (structDesc.IsIntegralSlot(i)) { structIntRegs++; } else if (structDesc.IsSseSlot(i)) { structFloatRegs++; } } isRegArg = ((nextFltArgRegNum + structFloatRegs) <= MAX_FLOAT_REG_ARG) && ((intArgRegNum + structIntRegs) <= MAX_REG_ARG); } else { if (passUsingFloatRegs) { isRegArg = nextFltArgRegNum < MAX_FLOAT_REG_ARG; } else { isRegArg = intArgRegNum < MAX_REG_ARG; } } #else // !defined(UNIX_AMD64_ABI) isRegArg = (intArgRegNum + (size - 1)) < maxRegArgs; #endif // !defined(UNIX_AMD64_ABI) #endif // TARGET_ARM } else { isRegArg = false; } // If there are nonstandard args (outside the calling convention) they were inserted above // and noted them in a table so we can recognize them here and build their argInfo. // // They should not affect the placement of any other args or stack space required. // Example: on AMD64 R10 and R11 are used for indirect VSD (generic interface) and cookie calls. bool isNonStandard = nonStandardArgs.Find(argx, &nonStdRegNum, &nonStandardArgKind); if (isNonStandard) { isRegArg = (nonStdRegNum != REG_STK); } else if (call->IsTailCallViaJitHelper()) { // We have already (before calling fgMorphArgs()) appended the 4 special args // required by the x86 tailcall helper. These args are required to go on the // stack. Force them to the stack here. assert(numArgs >= 4); if (argIndex >= numArgs - 4) { isRegArg = false; } } // Now we know if the argument goes in registers or not and how big it is. CLANG_FORMAT_COMMENT_ANCHOR; #ifdef TARGET_ARM // If we ever allocate a floating point argument to the stack, then all // subsequent HFA/float/double arguments go on the stack. if (!isRegArg && passUsingFloatRegs) { for (; fltArgRegNum < MAX_FLOAT_REG_ARG; ++fltArgRegNum) { fltArgSkippedRegMask |= genMapArgNumToRegMask(fltArgRegNum, TYP_FLOAT); } } // If we think we're going to split a struct between integer registers and the stack, check to // see if we've already assigned a floating-point arg to the stack. if (isRegArg && // We decided above to use a register for the argument !passUsingFloatRegs && // We're using integer registers (intArgRegNum + size > MAX_REG_ARG) && // We're going to split a struct type onto registers and stack anyFloatStackArgs) // We've already used the stack for a floating-point argument { isRegArg = false; // Change our mind; don't pass this struct partially in registers // Skip the rest of the integer argument registers for (; intArgRegNum < MAX_REG_ARG; ++intArgRegNum) { argSkippedRegMask |= genMapArgNumToRegMask(intArgRegNum, TYP_I_IMPL); } } #endif // TARGET_ARM // Now create the fgArgTabEntry. fgArgTabEntry* newArgEntry; if (isRegArg) { regNumber nextRegNum = REG_STK; #if defined(UNIX_AMD64_ABI) regNumber nextOtherRegNum = REG_STK; unsigned int structFloatRegs = 0; unsigned int structIntRegs = 0; #endif // defined(UNIX_AMD64_ABI) if (isNonStandard) { nextRegNum = nonStdRegNum; } #if defined(UNIX_AMD64_ABI) else if (isStructArg && structDesc.passedInRegisters) { // It is a struct passed in registers. Assign the next available register. assert((structDesc.eightByteCount <= 2) && "Too many eightbytes."); regNumber* nextRegNumPtrs[2] = {&nextRegNum, &nextOtherRegNum}; for (unsigned int i = 0; i < structDesc.eightByteCount; i++) { if (structDesc.IsIntegralSlot(i)) { *nextRegNumPtrs[i] = genMapIntRegArgNumToRegNum(intArgRegNum + structIntRegs); ++structIntRegs; } else if (structDesc.IsSseSlot(i)) { *nextRegNumPtrs[i] = genMapFloatRegArgNumToRegNum(nextFltArgRegNum + structFloatRegs); ++structFloatRegs; } } } #endif // defined(UNIX_AMD64_ABI) else { // fill in or update the argInfo table nextRegNum = passUsingFloatRegs ? genMapFloatRegArgNumToRegNum(nextFltArgRegNum) : genMapIntRegArgNumToRegNum(intArgRegNum); } #ifdef TARGET_AMD64 #ifndef UNIX_AMD64_ABI assert(size == 1); #endif #endif // This is a register argument - put it in the table newArgEntry = call->fgArgInfo->AddRegArg(argIndex, argx, args, nextRegNum, size, byteSize, argAlignBytes, isStructArg, isFloatHfa, callIsVararg UNIX_AMD64_ABI_ONLY_ARG(nextOtherRegNum) UNIX_AMD64_ABI_ONLY_ARG(structIntRegs) UNIX_AMD64_ABI_ONLY_ARG(structFloatRegs) UNIX_AMD64_ABI_ONLY_ARG(&structDesc)); newArgEntry->SetIsBackFilled(isBackFilled); // Set up the next intArgRegNum and fltArgRegNum values. if (!isBackFilled) { #if defined(UNIX_AMD64_ABI) if (isStructArg) { // For this case, we've already set the regNums in the argTabEntry intArgRegNum += structIntRegs; fltArgRegNum += structFloatRegs; } else #endif // defined(UNIX_AMD64_ABI) { if (!isNonStandard) { #if FEATURE_ARG_SPLIT // Check for a split (partially enregistered) struct if (compFeatureArgSplit() && !passUsingFloatRegs && ((intArgRegNum + size) > MAX_REG_ARG)) { // This indicates a partial enregistration of a struct type assert((isStructArg) || argx->OperIs(GT_FIELD_LIST) || argx->OperIsCopyBlkOp() || (argx->gtOper == GT_COMMA && (argx->gtFlags & GTF_ASG))); unsigned numRegsPartial = MAX_REG_ARG - intArgRegNum; assert((unsigned char)numRegsPartial == numRegsPartial); call->fgArgInfo->SplitArg(argIndex, numRegsPartial, size - numRegsPartial); } #endif // FEATURE_ARG_SPLIT if (passUsingFloatRegs) { fltArgRegNum += size; #ifdef WINDOWS_AMD64_ABI // Whenever we pass an integer register argument // we skip the corresponding floating point register argument intArgRegNum = min(intArgRegNum + size, MAX_REG_ARG); #endif // WINDOWS_AMD64_ABI // No supported architecture supports partial structs using float registers. assert(fltArgRegNum <= MAX_FLOAT_REG_ARG); } else { // Increment intArgRegNum by 'size' registers intArgRegNum += size; #ifdef WINDOWS_AMD64_ABI fltArgRegNum = min(fltArgRegNum + size, MAX_FLOAT_REG_ARG); #endif // WINDOWS_AMD64_ABI } } } } } else // We have an argument that is not passed in a register { // This is a stack argument - put it in the table newArgEntry = call->fgArgInfo->AddStkArg(argIndex, argx, args, size, byteSize, argAlignBytes, isStructArg, isFloatHfa, callIsVararg); #ifdef UNIX_AMD64_ABI // TODO-Amd64-Unix-CQ: This is temporary (see also in fgMorphArgs). if (structDesc.passedInRegisters) { newArgEntry->structDesc.CopyFrom(structDesc); } #endif } newArgEntry->nonStandardArgKind = nonStandardArgKind; if (GlobalJitOptions::compFeatureHfa) { if (isHfaArg) { newArgEntry->SetHfaType(hfaType, hfaSlots); } } newArgEntry->SetMultiRegNums(); noway_assert(newArgEntry != nullptr); if (newArgEntry->isStruct) { newArgEntry->passedByRef = passStructByRef; newArgEntry->argType = (structBaseType == TYP_UNKNOWN) ? argx->TypeGet() : structBaseType; } else { newArgEntry->argType = argx->TypeGet(); } DEBUG_ARG_SLOTS_ONLY(argSlots += size;) } // end foreach argument loop #ifdef DEBUG if (verbose) { JITDUMP("ArgTable for %d.%s after fgInitArgInfo:\n", call->gtTreeID, GenTree::OpName(call->gtOper)); call->fgArgInfo->Dump(this); JITDUMP("\n"); } #endif } //------------------------------------------------------------------------ // fgMorphArgs: Walk and transform (morph) the arguments of a call // // Arguments: // callNode - the call for which we are doing the argument morphing // // Return Value: // Like most morph methods, this method returns the morphed node, // though in this case there are currently no scenarios where the // node itself is re-created. // // Notes: // This calls fgInitArgInfo to create the 'fgArgInfo' for the call. // If it has already been created, that method will simply return. // // This method changes the state of the call node. It uses the existence // of gtCallLateArgs (the late arguments list) to determine if it has // already done the first round of morphing. // // The first time it is called (i.e. during global morphing), this method // computes the "late arguments". This is when it determines which arguments // need to be evaluated to temps prior to the main argument setup, and which // can be directly evaluated into the argument location. It also creates a // second argument list (gtCallLateArgs) that does the final placement of the // arguments, e.g. into registers or onto the stack. // // The "non-late arguments", aka the gtCallArgs, are doing the in-order // evaluation of the arguments that might have side-effects, such as embedded // assignments, calls or possible throws. In these cases, it and earlier // arguments must be evaluated to temps. // // On targets with a fixed outgoing argument area (FEATURE_FIXED_OUT_ARGS), // if we have any nested calls, we need to defer the copying of the argument // into the fixed argument area until after the call. If the argument did not // otherwise need to be computed into a temp, it is moved to gtCallLateArgs and // replaced in the "early" arg list (gtCallArgs) with a placeholder node. #ifdef _PREFAST_ #pragma warning(push) #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function #endif GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* call) { GenTreeCall::Use* args; GenTree* argx; GenTreeFlags flagsSummary = GTF_EMPTY; unsigned argIndex = 0; DEBUG_ARG_SLOTS_ONLY(unsigned argSlots = 0;) bool reMorphing = call->AreArgsComplete(); // Set up the fgArgInfo. fgInitArgInfo(call); JITDUMP("%sMorphing args for %d.%s:\n", (reMorphing) ? "Re" : "", call->gtTreeID, GenTree::OpName(call->gtOper)); // If we are remorphing, process the late arguments (which were determined by a previous caller). if (reMorphing) { for (GenTreeCall::Use& use : call->LateArgs()) { use.SetNode(fgMorphTree(use.GetNode())); flagsSummary |= use.GetNode()->gtFlags; } assert(call->fgArgInfo != nullptr); } call->fgArgInfo->RemorphReset(); // First we morph the argument subtrees ('this' pointer, arguments, etc.). // During the first call to fgMorphArgs we also record the // information about late arguments we have in 'fgArgInfo'. // This information is used later to contruct the gtCallLateArgs */ // Process the 'this' argument value, if present. if (call->gtCallThisArg != nullptr) { argx = call->gtCallThisArg->GetNode(); fgArgTabEntry* thisArgEntry = call->fgArgInfo->GetArgEntry(0, reMorphing); argx = fgMorphTree(argx); call->gtCallThisArg->SetNode(argx); // This is a register argument - possibly update it in the table. call->fgArgInfo->UpdateRegArg(thisArgEntry, argx, reMorphing); flagsSummary |= argx->gtFlags; if (!reMorphing && call->IsExpandedEarly() && call->IsVirtualVtable()) { if (!argx->OperIsLocal()) { thisArgEntry->needTmp = true; call->fgArgInfo->SetNeedsTemps(); } } assert(argIndex == 0); argIndex++; DEBUG_ARG_SLOTS_ONLY(argSlots++;) } // Note that this name is a bit of a misnomer - it indicates that there are struct args // that occupy more than a single slot that are passed by value (not necessarily in regs). bool hasMultiregStructArgs = false; for (args = call->gtCallArgs; args != nullptr; args = args->GetNext(), argIndex++) { GenTree** parentArgx = &args->NodeRef(); fgArgTabEntry* argEntry = call->fgArgInfo->GetArgEntry(argIndex, reMorphing); // Morph the arg node, and update the parent and argEntry pointers. argx = *parentArgx; argx = fgMorphTree(argx); *parentArgx = argx; assert(argx == args->GetNode()); DEBUG_ARG_SLOTS_ONLY(unsigned size = argEntry->getSize();) CORINFO_CLASS_HANDLE copyBlkClass = NO_CLASS_HANDLE; #if defined(DEBUG_ARG_SLOTS) if (!compMacOsArm64Abi()) { if (argEntry->GetByteAlignment() == 2 * TARGET_POINTER_SIZE) { if (argSlots % 2 == 1) { argSlots++; } } } #endif // DEBUG if (argEntry->isNonStandard() && argEntry->isPassedInRegisters()) { // We need to update the node field for this nonStandard arg here // as it may have been changed by the call to fgMorphTree. call->fgArgInfo->UpdateRegArg(argEntry, argx, reMorphing); flagsSummary |= argx->gtFlags; continue; } DEBUG_ARG_SLOTS_ASSERT(size != 0); DEBUG_ARG_SLOTS_ONLY(argSlots += argEntry->getSlotCount();) if (argx->IsLocalAddrExpr() != nullptr) { argx->gtType = TYP_I_IMPL; } // Get information about this argument. var_types hfaType = argEntry->GetHfaType(); bool isHfaArg = (hfaType != TYP_UNDEF); bool passUsingFloatRegs = argEntry->isPassedInFloatRegisters(); unsigned structSize = 0; // Struct arguments may be morphed into a node that is not a struct type. // In such case the fgArgTabEntry keeps track of whether the original node (before morphing) // was a struct and the struct classification. bool isStructArg = argEntry->isStruct; GenTree* argObj = argx->gtEffectiveVal(true /*commaOnly*/); if (isStructArg && varTypeIsStruct(argObj) && !argObj->OperIs(GT_ASG, GT_MKREFANY, GT_FIELD_LIST, GT_ARGPLACE)) { CORINFO_CLASS_HANDLE objClass = gtGetStructHandle(argObj); unsigned originalSize; if (argObj->TypeGet() == TYP_STRUCT) { if (argObj->OperIs(GT_OBJ)) { // Get the size off the OBJ node. originalSize = argObj->AsObj()->GetLayout()->GetSize(); assert(originalSize == info.compCompHnd->getClassSize(objClass)); } else { // We have a BADCODE assert for this in fgInitArgInfo. assert(argObj->OperIs(GT_LCL_VAR)); originalSize = lvaGetDesc(argObj->AsLclVarCommon())->lvExactSize; } } else { originalSize = genTypeSize(argx); assert(originalSize == info.compCompHnd->getClassSize(objClass)); } unsigned roundupSize = (unsigned)roundUp(originalSize, TARGET_POINTER_SIZE); var_types structBaseType = argEntry->argType; // First, handle the case where the argument is passed by reference. if (argEntry->passedByRef) { DEBUG_ARG_SLOTS_ASSERT(size == 1); copyBlkClass = objClass; #ifdef UNIX_AMD64_ABI assert(!"Structs are not passed by reference on x64/ux"); #endif // UNIX_AMD64_ABI } else // This is passed by value. { // Check to see if we can transform this into load of a primitive type. // 'size' must be the number of pointer sized items DEBUG_ARG_SLOTS_ASSERT(size == roundupSize / TARGET_POINTER_SIZE); structSize = originalSize; unsigned passingSize = originalSize; // Check to see if we can transform this struct load (GT_OBJ) into a GT_IND of the appropriate size. // When it can do this is platform-dependent: // - In general, it can be done for power of 2 structs that fit in a single register. // - For ARM and ARM64 it must also be a non-HFA struct, or have a single field. // - This is irrelevant for X86, since structs are always passed by value on the stack. GenTree* lclVar = fgIsIndirOfAddrOfLocal(argObj); bool canTransform = false; if (structBaseType != TYP_STRUCT) { if (isPow2(passingSize)) { canTransform = (!argEntry->IsHfaArg() || (passingSize == genTypeSize(argEntry->GetHfaType()))); } #if defined(TARGET_ARM64) || defined(UNIX_AMD64_ABI) // For ARM64 or AMD64/UX we can pass non-power-of-2 structs in a register, but we can // only transform in that case if the arg is a local. // TODO-CQ: This transformation should be applicable in general, not just for the ARM64 // or UNIX_AMD64_ABI cases where they will be passed in registers. else { canTransform = (lclVar != nullptr); passingSize = genTypeSize(structBaseType); } #endif // TARGET_ARM64 || UNIX_AMD64_ABI } if (!canTransform) { #if defined(TARGET_AMD64) #ifndef UNIX_AMD64_ABI // On Windows structs are always copied and passed by reference (handled above) unless they are // passed by value in a single register. assert(size == 1); copyBlkClass = objClass; #else // UNIX_AMD64_ABI // On Unix, structs are always passed by value. // We only need a copy if we have one of the following: // - The sizes don't match for a non-lclVar argument. // - We have a known struct type (e.g. SIMD) that requires multiple registers. // TODO-Amd64-Unix-Throughput: We don't need to keep the structDesc in the argEntry if it's not // actually passed in registers. if (argEntry->isPassedInRegisters()) { if (argObj->OperIs(GT_OBJ)) { if (passingSize != structSize) { copyBlkClass = objClass; } } else if (lclVar == nullptr) { // This should only be the case of a value directly producing a known struct type. assert(argObj->TypeGet() != TYP_STRUCT); if (argEntry->numRegs > 1) { copyBlkClass = objClass; } } } #endif // UNIX_AMD64_ABI #elif defined(TARGET_ARM64) if ((passingSize != structSize) && (lclVar == nullptr)) { copyBlkClass = objClass; } #endif #ifdef TARGET_ARM // TODO-1stClassStructs: Unify these conditions across targets. if (((lclVar != nullptr) && (lvaGetPromotionType(lclVar->AsLclVarCommon()->GetLclNum()) == PROMOTION_TYPE_INDEPENDENT)) || ((argObj->OperIs(GT_OBJ)) && (passingSize != structSize))) { copyBlkClass = objClass; } if (structSize < TARGET_POINTER_SIZE) { copyBlkClass = objClass; } #endif // TARGET_ARM } else { // We have a struct argument that fits into a register, and it is either a power of 2, // or a local. // Change our argument, as needed, into a value of the appropriate type. CLANG_FORMAT_COMMENT_ANCHOR; #ifdef TARGET_ARM DEBUG_ARG_SLOTS_ASSERT((size == 1) || ((structBaseType == TYP_DOUBLE) && (size == 2))); #else DEBUG_ARG_SLOTS_ASSERT((size == 1) || (varTypeIsSIMD(structBaseType) && size == (genTypeSize(structBaseType) / REGSIZE_BYTES))); #endif assert((structBaseType != TYP_STRUCT) && (genTypeSize(structBaseType) >= originalSize)); if (argObj->OperIs(GT_OBJ)) { argObj->ChangeOper(GT_IND); // Now see if we can fold *(&X) into X if (argObj->AsOp()->gtOp1->gtOper == GT_ADDR) { GenTree* temp = argObj->AsOp()->gtOp1->AsOp()->gtOp1; // Keep the DONT_CSE flag in sync // (as the addr always marks it for its op1) temp->gtFlags &= ~GTF_DONT_CSE; temp->gtFlags |= (argObj->gtFlags & GTF_DONT_CSE); DEBUG_DESTROY_NODE(argObj->AsOp()->gtOp1); // GT_ADDR DEBUG_DESTROY_NODE(argObj); // GT_IND argObj = temp; *parentArgx = temp; argx = temp; } } if (argObj->gtOper == GT_LCL_VAR) { unsigned lclNum = argObj->AsLclVarCommon()->GetLclNum(); LclVarDsc* varDsc = lvaGetDesc(lclNum); if (varDsc->lvPromoted) { if (varDsc->lvFieldCnt == 1) { // get the first and only promoted field LclVarDsc* fieldVarDsc = lvaGetDesc(varDsc->lvFieldLclStart); if (genTypeSize(fieldVarDsc->TypeGet()) >= originalSize) { // we will use the first and only promoted field argObj->AsLclVarCommon()->SetLclNum(varDsc->lvFieldLclStart); if (varTypeIsEnregisterable(fieldVarDsc->TypeGet()) && (genTypeSize(fieldVarDsc->TypeGet()) == originalSize)) { // Just use the existing field's type argObj->gtType = fieldVarDsc->TypeGet(); } else { // Can't use the existing field's type, so use GT_LCL_FLD to swizzle // to a new type lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::SwizzleArg)); argObj->ChangeOper(GT_LCL_FLD); argObj->gtType = structBaseType; } assert(varTypeIsEnregisterable(argObj->TypeGet())); assert(copyBlkClass == NO_CLASS_HANDLE); } else { // use GT_LCL_FLD to swizzle the single field struct to a new type lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::SwizzleArg)); argObj->ChangeOper(GT_LCL_FLD); argObj->gtType = structBaseType; } } else { // The struct fits into a single register, but it has been promoted into its // constituent fields, and so we have to re-assemble it copyBlkClass = objClass; } } else if (genTypeSize(varDsc->TypeGet()) != genTypeSize(structBaseType)) { // Not a promoted struct, so just swizzle the type by using GT_LCL_FLD lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::SwizzleArg)); argObj->ChangeOper(GT_LCL_FLD); argObj->gtType = structBaseType; } } else { // Not a GT_LCL_VAR, so we can just change the type on the node argObj->gtType = structBaseType; } assert(varTypeIsEnregisterable(argObj->TypeGet()) || ((copyBlkClass != NO_CLASS_HANDLE) && varTypeIsEnregisterable(structBaseType))); } #if !defined(UNIX_AMD64_ABI) && !defined(TARGET_ARMARCH) // TODO-CQ-XARCH: there is no need for a temp copy if we improve our code generation in // `genPutStructArgStk` for xarch like we did it for Arm/Arm64. // We still have a struct unless we converted the GT_OBJ into a GT_IND above... if (isHfaArg && passUsingFloatRegs) { } else if (structBaseType == TYP_STRUCT) { // If the valuetype size is not a multiple of TARGET_POINTER_SIZE, // we must copyblk to a temp before doing the obj to avoid // the obj reading memory past the end of the valuetype CLANG_FORMAT_COMMENT_ANCHOR; if (roundupSize > originalSize) { copyBlkClass = objClass; // There are a few special cases where we can omit using a CopyBlk // where we normally would need to use one. if (argObj->OperIs(GT_OBJ) && argObj->AsObj()->gtGetOp1()->IsLocalAddrExpr() != nullptr) // Is the source a LclVar? { copyBlkClass = NO_CLASS_HANDLE; } } } #endif // !UNIX_AMD64_ABI } } if (argEntry->isPassedInRegisters()) { call->fgArgInfo->UpdateRegArg(argEntry, argx, reMorphing); } else { call->fgArgInfo->UpdateStkArg(argEntry, argx, reMorphing); } if (copyBlkClass != NO_CLASS_HANDLE) { fgMakeOutgoingStructArgCopy(call, args, copyBlkClass); } if (argx->gtOper == GT_MKREFANY) { // 'Lower' the MKREFANY tree and insert it. noway_assert(!reMorphing); #ifdef TARGET_X86 // Build the mkrefany as a GT_FIELD_LIST GenTreeFieldList* fieldList = new (this, GT_FIELD_LIST) GenTreeFieldList(); fieldList->AddField(this, argx->AsOp()->gtGetOp1(), OFFSETOF__CORINFO_TypedReference__dataPtr, TYP_BYREF); fieldList->AddField(this, argx->AsOp()->gtGetOp2(), OFFSETOF__CORINFO_TypedReference__type, TYP_I_IMPL); fgArgTabEntry* fp = gtArgEntryByNode(call, argx); args->SetNode(fieldList); assert(fp->GetNode() == fieldList); #else // !TARGET_X86 // Get a new temp // Here we don't need unsafe value cls check since the addr of temp is used only in mkrefany unsigned tmp = lvaGrabTemp(true DEBUGARG("by-value mkrefany struct argument")); lvaSetStruct(tmp, impGetRefAnyClass(), false); // Build the mkrefany as a comma node: // (tmp.ptr=argx),(tmp.type=handle) GenTreeLclFld* destPtrSlot = gtNewLclFldNode(tmp, TYP_I_IMPL, OFFSETOF__CORINFO_TypedReference__dataPtr); GenTreeLclFld* destTypeSlot = gtNewLclFldNode(tmp, TYP_I_IMPL, OFFSETOF__CORINFO_TypedReference__type); destPtrSlot->SetFieldSeq(GetFieldSeqStore()->CreateSingleton(GetRefanyDataField())); destPtrSlot->gtFlags |= GTF_VAR_DEF; destTypeSlot->SetFieldSeq(GetFieldSeqStore()->CreateSingleton(GetRefanyTypeField())); destTypeSlot->gtFlags |= GTF_VAR_DEF; GenTree* asgPtrSlot = gtNewAssignNode(destPtrSlot, argx->AsOp()->gtOp1); GenTree* asgTypeSlot = gtNewAssignNode(destTypeSlot, argx->AsOp()->gtOp2); GenTree* asg = gtNewOperNode(GT_COMMA, TYP_VOID, asgPtrSlot, asgTypeSlot); // Change the expression to "(tmp=val)" args->SetNode(asg); // EvalArgsToTemps will cause tmp to actually get loaded as the argument call->fgArgInfo->EvalToTmp(argEntry, tmp, asg); lvaSetVarAddrExposed(tmp DEBUGARG(AddressExposedReason::TOO_CONSERVATIVE)); #endif // !TARGET_X86 } #if FEATURE_MULTIREG_ARGS if (isStructArg) { if (((argEntry->numRegs + argEntry->GetStackSlotsNumber()) > 1) || (isHfaArg && argx->TypeGet() == TYP_STRUCT)) { hasMultiregStructArgs = true; } } #ifdef TARGET_ARM else if ((argEntry->argType == TYP_LONG) || (argEntry->argType == TYP_DOUBLE)) { assert((argEntry->numRegs == 2) || (argEntry->numSlots == 2)); } #endif else { // We must have exactly one register or slot. assert(((argEntry->numRegs == 1) && (argEntry->GetStackSlotsNumber() == 0)) || ((argEntry->numRegs == 0) && (argEntry->GetStackSlotsNumber() == 1))); } #endif #if defined(TARGET_X86) if (isStructArg) { GenTree* lclNode = argx->OperIs(GT_LCL_VAR) ? argx : fgIsIndirOfAddrOfLocal(argx); if ((lclNode != nullptr) && (lvaGetPromotionType(lclNode->AsLclVarCommon()->GetLclNum()) == Compiler::PROMOTION_TYPE_INDEPENDENT)) { // Make a GT_FIELD_LIST of the field lclVars. GenTreeLclVarCommon* lcl = lclNode->AsLclVarCommon(); LclVarDsc* varDsc = lvaGetDesc(lcl); GenTreeFieldList* fieldList = new (this, GT_FIELD_LIST) GenTreeFieldList(); fgArgTabEntry* fp = gtArgEntryByNode(call, argx); args->SetNode(fieldList); assert(fp->GetNode() == fieldList); for (unsigned fieldLclNum = varDsc->lvFieldLclStart; fieldLclNum < varDsc->lvFieldLclStart + varDsc->lvFieldCnt; ++fieldLclNum) { LclVarDsc* fieldVarDsc = lvaGetDesc(fieldLclNum); GenTree* fieldLcl; if (fieldLclNum == varDsc->lvFieldLclStart) { lcl->SetLclNum(fieldLclNum); lcl->SetOperResetFlags(GT_LCL_VAR); lcl->gtType = fieldVarDsc->TypeGet(); fieldLcl = lcl; } else { fieldLcl = gtNewLclvNode(fieldLclNum, fieldVarDsc->TypeGet()); } fieldList->AddField(this, fieldLcl, fieldVarDsc->lvFldOffset, fieldVarDsc->TypeGet()); } } } #endif // TARGET_X86 flagsSummary |= args->GetNode()->gtFlags; } // end foreach argument loop if (!reMorphing) { call->fgArgInfo->ArgsComplete(); } /* Process the function address, if indirect call */ if (call->gtCallType == CT_INDIRECT) { call->gtCallAddr = fgMorphTree(call->gtCallAddr); // Const CSE may create an assignment node here flagsSummary |= call->gtCallAddr->gtFlags; } #if FEATURE_FIXED_OUT_ARGS // Record the outgoing argument size. If the call is a fast tail // call, it will setup its arguments in incoming arg area instead // of the out-going arg area, so we don't need to track the // outgoing arg size. if (!call->IsFastTailCall()) { #if defined(UNIX_AMD64_ABI) // This is currently required for the UNIX ABI to work correctly. opts.compNeedToAlignFrame = true; #endif // UNIX_AMD64_ABI const unsigned outgoingArgSpaceSize = GetOutgoingArgByteSize(call->fgArgInfo->GetNextSlotByteOffset()); #if defined(DEBUG_ARG_SLOTS) unsigned preallocatedArgCount = 0; if (!compMacOsArm64Abi()) { preallocatedArgCount = call->fgArgInfo->GetNextSlotNum(); assert(outgoingArgSpaceSize == preallocatedArgCount * REGSIZE_BYTES); } #endif call->fgArgInfo->SetOutArgSize(max(outgoingArgSpaceSize, MIN_ARG_AREA_FOR_CALL)); #ifdef DEBUG if (verbose) { const fgArgInfo* argInfo = call->fgArgInfo; #if defined(DEBUG_ARG_SLOTS) if (!compMacOsArm64Abi()) { printf("argSlots=%d, preallocatedArgCount=%d, nextSlotNum=%d, nextSlotByteOffset=%d, " "outgoingArgSpaceSize=%d\n", argSlots, preallocatedArgCount, argInfo->GetNextSlotNum(), argInfo->GetNextSlotByteOffset(), outgoingArgSpaceSize); } else { printf("nextSlotByteOffset=%d, outgoingArgSpaceSize=%d\n", argInfo->GetNextSlotByteOffset(), outgoingArgSpaceSize); } #else printf("nextSlotByteOffset=%d, outgoingArgSpaceSize=%d\n", argInfo->GetNextSlotByteOffset(), outgoingArgSpaceSize); #endif } #endif } #endif // FEATURE_FIXED_OUT_ARGS // Clear the ASG and EXCEPT (if possible) flags on the call node call->gtFlags &= ~GTF_ASG; if (!call->OperMayThrow(this)) { call->gtFlags &= ~GTF_EXCEPT; } // Union in the side effect flags from the call's operands call->gtFlags |= flagsSummary & GTF_ALL_EFFECT; // If we are remorphing or don't have any register arguments or other arguments that need // temps, then we don't need to call SortArgs() and EvalArgsToTemps(). // if (!reMorphing && (call->fgArgInfo->HasRegArgs() || call->fgArgInfo->NeedsTemps())) { // Do the 'defer or eval to temp' analysis. call->fgArgInfo->SortArgs(); call->fgArgInfo->EvalArgsToTemps(); } if (hasMultiregStructArgs) { fgMorphMultiregStructArgs(call); } #ifdef DEBUG if (verbose) { JITDUMP("ArgTable for %d.%s after fgMorphArgs:\n", call->gtTreeID, GenTree::OpName(call->gtOper)); call->fgArgInfo->Dump(this); JITDUMP("\n"); } #endif return call; } #ifdef _PREFAST_ #pragma warning(pop) #endif //----------------------------------------------------------------------------- // fgMorphMultiregStructArgs: Locate the TYP_STRUCT arguments and // call fgMorphMultiregStructArg on each of them. // // Arguments: // call : a GenTreeCall node that has one or more TYP_STRUCT arguments\. // // Notes: // We only call fgMorphMultiregStructArg for struct arguments that are not passed as simple types. // It will ensure that the struct arguments are in the correct form. // If this method fails to find any TYP_STRUCT arguments it will assert. // void Compiler::fgMorphMultiregStructArgs(GenTreeCall* call) { bool foundStructArg = false; GenTreeFlags flagsSummary = GTF_EMPTY; #ifdef TARGET_X86 assert(!"Logic error: no MultiregStructArgs for X86"); #endif #if defined(TARGET_AMD64) && !defined(UNIX_AMD64_ABI) assert(!"Logic error: no MultiregStructArgs for Windows X64 ABI"); #endif for (GenTreeCall::Use& use : call->Args()) { // For late arguments the arg tree that is overridden is in the gtCallLateArgs list. // For such late args the gtCallArgList contains the setup arg node (evaluating the arg.) // The tree from the gtCallLateArgs list is passed to the callee. The fgArgEntry node contains the mapping // between the nodes in both lists. If the arg is not a late arg, the fgArgEntry->node points to itself, // otherwise points to the list in the late args list. bool isLateArg = (use.GetNode()->gtFlags & GTF_LATE_ARG) != 0; fgArgTabEntry* fgEntryPtr = gtArgEntryByNode(call, use.GetNode()); assert(fgEntryPtr != nullptr); GenTree* argx = fgEntryPtr->GetNode(); GenTreeCall::Use* lateUse = nullptr; GenTree* lateNode = nullptr; if (isLateArg) { for (GenTreeCall::Use& lateArgUse : call->LateArgs()) { GenTree* argNode = lateArgUse.GetNode(); if (argx == argNode) { lateUse = &lateArgUse; lateNode = argNode; break; } } assert((lateUse != nullptr) && (lateNode != nullptr)); } if (!fgEntryPtr->isStruct) { continue; } unsigned size = (fgEntryPtr->numRegs + fgEntryPtr->GetStackSlotsNumber()); if ((size > 1) || (fgEntryPtr->IsHfaArg() && argx->TypeGet() == TYP_STRUCT)) { foundStructArg = true; if (varTypeIsStruct(argx) && !argx->OperIs(GT_FIELD_LIST)) { if (fgEntryPtr->IsHfaRegArg()) { var_types hfaType = fgEntryPtr->GetHfaType(); unsigned structSize; if (argx->OperIs(GT_OBJ)) { structSize = argx->AsObj()->GetLayout()->GetSize(); } else if (varTypeIsSIMD(argx)) { structSize = genTypeSize(argx); } else { assert(argx->OperIs(GT_LCL_VAR)); structSize = lvaGetDesc(argx->AsLclVar())->lvExactSize; } assert(structSize > 0); if (structSize == genTypeSize(hfaType)) { if (argx->OperIs(GT_OBJ)) { argx->SetOper(GT_IND); } argx->gtType = hfaType; } } GenTree* newArgx = fgMorphMultiregStructArg(argx, fgEntryPtr); // Did we replace 'argx' with a new tree? if (newArgx != argx) { // link the new arg node into either the late arg list or the gtCallArgs list if (isLateArg) { lateUse->SetNode(newArgx); } else { use.SetNode(newArgx); } assert(fgEntryPtr->GetNode() == newArgx); } } } } // We should only call this method when we actually have one or more multireg struct args assert(foundStructArg); // Update the flags call->gtFlags |= (flagsSummary & GTF_ALL_EFFECT); } //----------------------------------------------------------------------------- // fgMorphMultiregStructArg: Given a TYP_STRUCT arg from a call argument list, // morph the argument as needed to be passed correctly. // // Arguments: // arg - A GenTree node containing a TYP_STRUCT arg // fgEntryPtr - the fgArgTabEntry information for the current 'arg' // // Notes: // The arg must be a GT_OBJ or GT_LCL_VAR or GT_LCL_FLD of TYP_STRUCT. // If 'arg' is a lclVar passed on the stack, we will ensure that any lclVars that must be on the // stack are marked as doNotEnregister, and then we return. // // If it is passed by register, we mutate the argument into the GT_FIELD_LIST form // which is only used for struct arguments. // // If arg is a LclVar we check if it is struct promoted and has the right number of fields // and if they are at the appropriate offsets we will use the struct promted fields // in the GT_FIELD_LIST nodes that we create. // If we have a GT_LCL_VAR that isn't struct promoted or doesn't meet the requirements // we will use a set of GT_LCL_FLDs nodes to access the various portions of the struct // this also forces the struct to be stack allocated into the local frame. // For the GT_OBJ case will clone the address expression and generate two (or more) // indirections. // Currently the implementation handles ARM64/ARM and will NYI for other architectures. // GenTree* Compiler::fgMorphMultiregStructArg(GenTree* arg, fgArgTabEntry* fgEntryPtr) { assert(varTypeIsStruct(arg->TypeGet())); #if !defined(TARGET_ARMARCH) && !defined(UNIX_AMD64_ABI) NYI("fgMorphMultiregStructArg requires implementation for this target"); #endif #ifdef TARGET_ARM if ((fgEntryPtr->IsSplit() && fgEntryPtr->GetStackSlotsNumber() + fgEntryPtr->numRegs > 4) || (!fgEntryPtr->IsSplit() && fgEntryPtr->GetRegNum() == REG_STK)) #else if (fgEntryPtr->GetRegNum() == REG_STK) #endif { GenTreeLclVarCommon* lcl = nullptr; GenTree* actualArg = arg->gtEffectiveVal(); if (actualArg->OperGet() == GT_OBJ) { if (actualArg->gtGetOp1()->OperIs(GT_ADDR) && actualArg->gtGetOp1()->gtGetOp1()->OperIs(GT_LCL_VAR)) { lcl = actualArg->gtGetOp1()->gtGetOp1()->AsLclVarCommon(); } } else if (actualArg->OperGet() == GT_LCL_VAR) { lcl = actualArg->AsLclVarCommon(); } if (lcl != nullptr) { if (lvaGetPromotionType(lcl->GetLclNum()) == PROMOTION_TYPE_INDEPENDENT) { arg = fgMorphLclArgToFieldlist(lcl); } else if (arg->TypeGet() == TYP_STRUCT) { // If this is a non-register struct, it must be referenced from memory. if (!actualArg->OperIs(GT_OBJ)) { // Create an Obj of the temp to use it as a call argument. arg = gtNewOperNode(GT_ADDR, TYP_I_IMPL, arg); arg = gtNewObjNode(lvaGetStruct(lcl->GetLclNum()), arg); } // Its fields will need to be accessed by address. lvaSetVarDoNotEnregister(lcl->GetLclNum() DEBUG_ARG(DoNotEnregisterReason::IsStructArg)); } } return arg; } #if FEATURE_MULTIREG_ARGS // Examine 'arg' and setup argValue objClass and structSize // const CORINFO_CLASS_HANDLE objClass = gtGetStructHandle(arg); GenTree* argValue = arg; // normally argValue will be arg, but see right below unsigned structSize = 0; if (arg->TypeGet() != TYP_STRUCT) { structSize = genTypeSize(arg->TypeGet()); assert(structSize == info.compCompHnd->getClassSize(objClass)); } else if (arg->OperGet() == GT_OBJ) { GenTreeObj* argObj = arg->AsObj(); const ClassLayout* objLayout = argObj->GetLayout(); structSize = objLayout->GetSize(); assert(structSize == info.compCompHnd->getClassSize(objClass)); // If we have a GT_OBJ of a GT_ADDR then we set argValue to the child node of the GT_ADDR. GenTree* op1 = argObj->gtOp1; if (op1->OperGet() == GT_ADDR) { GenTree* underlyingTree = op1->AsOp()->gtOp1; // Only update to the same type. if (underlyingTree->OperIs(GT_LCL_VAR)) { const LclVarDsc* varDsc = lvaGetDesc(underlyingTree->AsLclVar()); if (ClassLayout::AreCompatible(varDsc->GetLayout(), objLayout)) { argValue = underlyingTree; } } } } else if (arg->OperGet() == GT_LCL_VAR) { LclVarDsc* varDsc = lvaGetDesc(arg->AsLclVarCommon()); structSize = varDsc->lvExactSize; assert(structSize == info.compCompHnd->getClassSize(objClass)); } else { structSize = info.compCompHnd->getClassSize(objClass); } var_types hfaType = TYP_UNDEF; var_types elemType = TYP_UNDEF; unsigned elemCount = 0; unsigned elemSize = 0; var_types type[MAX_ARG_REG_COUNT] = {}; // TYP_UNDEF = 0 hfaType = fgEntryPtr->GetHfaType(); if (varTypeIsValidHfaType(hfaType) && fgEntryPtr->isPassedInFloatRegisters()) { elemType = hfaType; elemSize = genTypeSize(elemType); elemCount = structSize / elemSize; assert(elemSize * elemCount == structSize); for (unsigned inx = 0; inx < elemCount; inx++) { type[inx] = elemType; } } else { assert(structSize <= MAX_ARG_REG_COUNT * TARGET_POINTER_SIZE); BYTE gcPtrs[MAX_ARG_REG_COUNT]; elemCount = roundUp(structSize, TARGET_POINTER_SIZE) / TARGET_POINTER_SIZE; info.compCompHnd->getClassGClayout(objClass, &gcPtrs[0]); for (unsigned inx = 0; inx < elemCount; inx++) { #ifdef UNIX_AMD64_ABI if (gcPtrs[inx] == TYPE_GC_NONE) { type[inx] = GetTypeFromClassificationAndSizes(fgEntryPtr->structDesc.eightByteClassifications[inx], fgEntryPtr->structDesc.eightByteSizes[inx]); } else #endif // UNIX_AMD64_ABI { type[inx] = getJitGCType(gcPtrs[inx]); } } #ifndef UNIX_AMD64_ABI if ((argValue->OperGet() == GT_LCL_FLD) || (argValue->OperGet() == GT_LCL_VAR)) { elemSize = TARGET_POINTER_SIZE; // We can safely widen this to aligned bytes since we are loading from // a GT_LCL_VAR or a GT_LCL_FLD which is properly padded and // lives in the stack frame or will be a promoted field. // structSize = elemCount * TARGET_POINTER_SIZE; } else // we must have a GT_OBJ { assert(argValue->OperGet() == GT_OBJ); // We need to load the struct from an arbitrary address // and we can't read past the end of the structSize // We adjust the last load type here // unsigned remainingBytes = structSize % TARGET_POINTER_SIZE; unsigned lastElem = elemCount - 1; if (remainingBytes != 0) { switch (remainingBytes) { case 1: type[lastElem] = TYP_BYTE; break; case 2: type[lastElem] = TYP_SHORT; break; #if defined(TARGET_ARM64) || defined(UNIX_AMD64_ABI) case 4: type[lastElem] = TYP_INT; break; #endif // (TARGET_ARM64) || (UNIX_AMD64_ABI) default: noway_assert(!"NYI: odd sized struct in fgMorphMultiregStructArg"); break; } } } #endif // !UNIX_AMD64_ABI } // We should still have a TYP_STRUCT assert(varTypeIsStruct(argValue->TypeGet())); GenTreeFieldList* newArg = nullptr; // Are we passing a struct LclVar? // if (argValue->OperGet() == GT_LCL_VAR) { GenTreeLclVarCommon* varNode = argValue->AsLclVarCommon(); unsigned varNum = varNode->GetLclNum(); LclVarDsc* varDsc = lvaGetDesc(varNum); // At this point any TYP_STRUCT LclVar must be an aligned struct // or an HFA struct, both which are passed by value. // assert((varDsc->lvSize() == elemCount * TARGET_POINTER_SIZE) || varDsc->lvIsHfa()); varDsc->lvIsMultiRegArg = true; #ifdef DEBUG if (verbose) { JITDUMP("Multireg struct argument V%02u : ", varNum); fgEntryPtr->Dump(); } #endif // DEBUG #ifndef UNIX_AMD64_ABI // This local variable must match the layout of the 'objClass' type exactly if (varDsc->lvIsHfa() && fgEntryPtr->isPassedInFloatRegisters()) { // We have a HFA struct. noway_assert(elemType == varDsc->GetHfaType()); noway_assert(elemSize == genTypeSize(elemType)); noway_assert(elemCount == (varDsc->lvExactSize / elemSize)); noway_assert(elemSize * elemCount == varDsc->lvExactSize); for (unsigned inx = 0; (inx < elemCount); inx++) { noway_assert(type[inx] == elemType); } } else { #if defined(TARGET_ARM64) // We must have a 16-byte struct (non-HFA) noway_assert(elemCount == 2); #elif defined(TARGET_ARM) noway_assert(elemCount <= 4); #endif for (unsigned inx = 0; inx < elemCount; inx++) { var_types currentGcLayoutType = varDsc->GetLayout()->GetGCPtrType(inx); // We setup the type[inx] value above using the GC info from 'objClass' // This GT_LCL_VAR must have the same GC layout info // if (varTypeIsGC(currentGcLayoutType)) { noway_assert(type[inx] == currentGcLayoutType); } else { // We may have use a small type when we setup the type[inx] values above // We can safely widen this to TYP_I_IMPL type[inx] = TYP_I_IMPL; } } } if (varDsc->lvPromoted && varDsc->lvIsHfa() && fgEntryPtr->isPassedInFloatRegisters()) { bool canMorphToFieldList = true; for (unsigned fldOffset = 0; fldOffset < varDsc->lvExactSize; fldOffset += elemSize) { const unsigned fldVarNum = lvaGetFieldLocal(varDsc, fldOffset); if ((fldVarNum == BAD_VAR_NUM) || !varTypeUsesFloatReg(lvaGetDesc(fldVarNum))) { canMorphToFieldList = false; break; } } if (canMorphToFieldList) { newArg = fgMorphLclArgToFieldlist(varNode); } } else #endif // !UNIX_AMD64_ABI #if defined(TARGET_ARM64) || defined(UNIX_AMD64_ABI) // Is this LclVar a promoted struct with exactly 2 fields? if (varDsc->lvPromoted && (varDsc->lvFieldCnt == 2) && !varDsc->lvIsHfa()) { // See if we have two promoted fields that start at offset 0 and 8? unsigned loVarNum = lvaGetFieldLocal(varDsc, 0); unsigned hiVarNum = lvaGetFieldLocal(varDsc, TARGET_POINTER_SIZE); // Did we find the promoted fields at the necessary offsets? if ((loVarNum != BAD_VAR_NUM) && (hiVarNum != BAD_VAR_NUM)) { LclVarDsc* loVarDsc = lvaGetDesc(loVarNum); LclVarDsc* hiVarDsc = lvaGetDesc(hiVarNum); var_types loType = loVarDsc->lvType; var_types hiType = hiVarDsc->lvType; if ((varTypeIsFloating(loType) != genIsValidFloatReg(fgEntryPtr->GetRegNum(0))) || (varTypeIsFloating(hiType) != genIsValidFloatReg(fgEntryPtr->GetRegNum(1)))) { // TODO-LSRA - It currently doesn't support the passing of floating point LCL_VARS in the integer // registers. So for now we will use GT_LCLFLD's to pass this struct (it won't be enregistered) // JITDUMP("Multireg struct V%02u will be passed using GT_LCLFLD because it has float fields.\n", varNum); // // we call lvaSetVarDoNotEnregister and do the proper transformation below. // } else { // We can use the struct promoted field as the two arguments // Create a new tree for 'arg' // replace the existing LDOBJ(ADDR(LCLVAR)) // with a FIELD_LIST(LCLVAR-LO, FIELD_LIST(LCLVAR-HI, nullptr)) // newArg = new (this, GT_FIELD_LIST) GenTreeFieldList(); newArg->AddField(this, gtNewLclvNode(loVarNum, loType), 0, loType); newArg->AddField(this, gtNewLclvNode(hiVarNum, hiType), TARGET_POINTER_SIZE, hiType); } } } else { // // We will create a list of GT_LCL_FLDs nodes to pass this struct // lvaSetVarDoNotEnregister(varNum DEBUG_ARG(DoNotEnregisterReason::LocalField)); } #elif defined(TARGET_ARM) // Is this LclVar a promoted struct with exactly same size? if (varDsc->lvPromoted && (varDsc->lvFieldCnt == elemCount) && !varDsc->lvIsHfa()) { // See if we have promoted fields? unsigned varNums[4]; bool hasBadVarNum = false; for (unsigned inx = 0; inx < elemCount; inx++) { varNums[inx] = lvaGetFieldLocal(varDsc, TARGET_POINTER_SIZE * inx); if (varNums[inx] == BAD_VAR_NUM) { hasBadVarNum = true; break; } } // Did we find the promoted fields at the necessary offsets? if (!hasBadVarNum) { LclVarDsc* varDscs[4]; var_types varType[4]; bool varIsFloat = false; for (unsigned inx = 0; inx < elemCount; inx++) { varDscs[inx] = lvaGetDesc(varNums[inx]); varType[inx] = varDscs[inx]->lvType; if (varTypeIsFloating(varType[inx])) { // TODO-LSRA - It currently doesn't support the passing of floating point LCL_VARS in the // integer // registers. So for now we will use GT_LCLFLD's to pass this struct (it won't be enregistered) // JITDUMP("Multireg struct V%02u will be passed using GT_LCLFLD because it has float fields.\n", varNum); // // we call lvaSetVarDoNotEnregister and do the proper transformation below. // varIsFloat = true; break; } } if (!varIsFloat) { newArg = fgMorphLclArgToFieldlist(varNode); } } } else { // // We will create a list of GT_LCL_FLDs nodes to pass this struct // lvaSetVarDoNotEnregister(varNum DEBUG_ARG(DoNotEnregisterReason::LocalField)); } #endif // TARGET_ARM } // If we didn't set newarg to a new List Node tree // if (newArg == nullptr) { if (fgEntryPtr->GetRegNum() == REG_STK) { // We leave this stack passed argument alone return arg; } // Are we passing a GT_LCL_FLD (or a GT_LCL_VAR that was not struct promoted ) // A GT_LCL_FLD could also contain a 16-byte struct or HFA struct inside it? // if ((argValue->OperGet() == GT_LCL_FLD) || (argValue->OperGet() == GT_LCL_VAR)) { GenTreeLclVarCommon* varNode = argValue->AsLclVarCommon(); unsigned varNum = varNode->GetLclNum(); LclVarDsc* varDsc = lvaGetDesc(varNum); unsigned baseOffset = varNode->GetLclOffs(); unsigned lastOffset = baseOffset + structSize; // The allocated size of our LocalVar must be at least as big as lastOffset assert(varDsc->lvSize() >= lastOffset); if (varDsc->HasGCPtr()) { // alignment of the baseOffset is required noway_assert((baseOffset % TARGET_POINTER_SIZE) == 0); #ifndef UNIX_AMD64_ABI noway_assert(elemSize == TARGET_POINTER_SIZE); #endif unsigned baseIndex = baseOffset / TARGET_POINTER_SIZE; ClassLayout* layout = varDsc->GetLayout(); for (unsigned inx = 0; (inx < elemCount); inx++) { // The GC information must match what we setup using 'objClass' if (layout->IsGCPtr(baseIndex + inx) || varTypeGCtype(type[inx])) { noway_assert(type[inx] == layout->GetGCPtrType(baseIndex + inx)); } } } else // this varDsc contains no GC pointers { for (unsigned inx = 0; inx < elemCount; inx++) { // The GC information must match what we setup using 'objClass' noway_assert(!varTypeIsGC(type[inx])); } } // // We create a list of GT_LCL_FLDs nodes to pass this struct // lvaSetVarDoNotEnregister(varNum DEBUG_ARG(DoNotEnregisterReason::LocalField)); // Create a new tree for 'arg' // replace the existing LDOBJ(ADDR(LCLVAR)) // with a FIELD_LIST(LCLFLD-LO, LCLFLD-HI) // unsigned offset = baseOffset; newArg = new (this, GT_FIELD_LIST) GenTreeFieldList(); for (unsigned inx = 0; inx < elemCount; inx++) { GenTree* nextLclFld = gtNewLclFldNode(varNum, type[inx], offset); newArg->AddField(this, nextLclFld, offset, type[inx]); offset += genTypeSize(type[inx]); } } // Are we passing a GT_OBJ struct? // else if (argValue->OperGet() == GT_OBJ) { GenTreeObj* argObj = argValue->AsObj(); GenTree* baseAddr = argObj->gtOp1; var_types addrType = baseAddr->TypeGet(); if (baseAddr->OperGet() == GT_ADDR) { GenTree* addrTaken = baseAddr->AsOp()->gtOp1; if (addrTaken->IsLocal()) { GenTreeLclVarCommon* varNode = addrTaken->AsLclVarCommon(); unsigned varNum = varNode->GetLclNum(); // We access non-struct type (for example, long) as a struct type. // Make sure lclVar lives on stack to make sure its fields are accessible by address. lvaSetVarDoNotEnregister(varNum DEBUGARG(DoNotEnregisterReason::LocalField)); } } // Create a new tree for 'arg' // replace the existing LDOBJ(EXPR) // with a FIELD_LIST(IND(EXPR), FIELD_LIST(IND(EXPR+8), nullptr) ...) // newArg = new (this, GT_FIELD_LIST) GenTreeFieldList(); unsigned offset = 0; for (unsigned inx = 0; inx < elemCount; inx++) { GenTree* curAddr = baseAddr; if (offset != 0) { GenTree* baseAddrDup = gtCloneExpr(baseAddr); noway_assert(baseAddrDup != nullptr); curAddr = gtNewOperNode(GT_ADD, addrType, baseAddrDup, gtNewIconNode(offset, TYP_I_IMPL)); } else { curAddr = baseAddr; } GenTree* curItem = gtNewIndir(type[inx], curAddr); // For safety all GT_IND should have at least GT_GLOB_REF set. curItem->gtFlags |= GTF_GLOB_REF; newArg->AddField(this, curItem, offset, type[inx]); offset += genTypeSize(type[inx]); } } } #ifdef DEBUG // If we reach here we should have set newArg to something if (newArg == nullptr) { gtDispTree(argValue); assert(!"Missing case in fgMorphMultiregStructArg"); } #endif noway_assert(newArg != nullptr); #ifdef DEBUG if (verbose) { printf("fgMorphMultiregStructArg created tree:\n"); gtDispTree(newArg); } #endif arg = newArg; // consider calling fgMorphTree(newArg); #endif // FEATURE_MULTIREG_ARGS return arg; } //------------------------------------------------------------------------ // fgMorphLclArgToFieldlist: Morph a GT_LCL_VAR node to a GT_FIELD_LIST of its promoted fields // // Arguments: // lcl - The GT_LCL_VAR node we will transform // // Return value: // The new GT_FIELD_LIST that we have created. // GenTreeFieldList* Compiler::fgMorphLclArgToFieldlist(GenTreeLclVarCommon* lcl) { LclVarDsc* varDsc = lvaGetDesc(lcl); assert(varDsc->lvPromoted); unsigned fieldCount = varDsc->lvFieldCnt; unsigned fieldLclNum = varDsc->lvFieldLclStart; GenTreeFieldList* fieldList = new (this, GT_FIELD_LIST) GenTreeFieldList(); for (unsigned i = 0; i < fieldCount; i++) { LclVarDsc* fieldVarDsc = lvaGetDesc(fieldLclNum); GenTree* lclVar = gtNewLclvNode(fieldLclNum, fieldVarDsc->TypeGet()); fieldList->AddField(this, lclVar, fieldVarDsc->lvFldOffset, fieldVarDsc->TypeGet()); fieldLclNum++; } return fieldList; } //------------------------------------------------------------------------ // fgMakeOutgoingStructArgCopy: make a copy of a struct variable if necessary, // to pass to a callee. // // Arguments: // call - call being processed // args - args for the call // copyBlkClass - class handle for the struct // // The arg is updated if necessary with the copy. // void Compiler::fgMakeOutgoingStructArgCopy(GenTreeCall* call, GenTreeCall::Use* args, CORINFO_CLASS_HANDLE copyBlkClass) { GenTree* argx = args->GetNode(); noway_assert(argx->gtOper != GT_MKREFANY); fgArgTabEntry* argEntry = Compiler::gtArgEntryByNode(call, argx); // If we're optimizing, see if we can avoid making a copy. // // We don't need a copy if this is the last use of an implicit by-ref local. // if (opts.OptimizationEnabled()) { GenTreeLclVar* const lcl = argx->IsImplicitByrefParameterValue(this); if (lcl != nullptr) { const unsigned varNum = lcl->GetLclNum(); LclVarDsc* const varDsc = lvaGetDesc(varNum); const unsigned short totalAppearances = varDsc->lvRefCnt(RCS_EARLY); // We don't have liveness so we rely on other indications of last use. // // We handle these cases: // // * (must not copy) If the call is a tail call, the use is a last use. // We must skip the copy if we have a fast tail call. // // * (may not copy) if the call is noreturn, the use is a last use. // We also check for just one reference here as we are not doing // alias analysis of the call's parameters, or checking if the call // site is not within some try region. // // * (may not copy) if there is exactly one use of the local in the method, // and the call is not in loop, this is a last use. // // fgMightHaveLoop() is expensive; check it last, only if necessary. // if (call->IsTailCall() || // ((totalAppearances == 1) && call->IsNoReturn()) || // ((totalAppearances == 1) && !fgMightHaveLoop())) { args->SetNode(lcl); assert(argEntry->GetNode() == lcl); JITDUMP("did not need to make outgoing copy for last use of implicit byref V%2d\n", varNum); return; } } } JITDUMP("making an outgoing copy for struct arg\n"); if (fgOutgoingArgTemps == nullptr) { fgOutgoingArgTemps = hashBv::Create(this); } unsigned tmp = 0; bool found = false; // Attempt to find a local we have already used for an outgoing struct and reuse it. // We do not reuse within a statement. if (!opts.MinOpts()) { indexType lclNum; FOREACH_HBV_BIT_SET(lclNum, fgOutgoingArgTemps) { LclVarDsc* varDsc = lvaGetDesc((unsigned)lclNum); if (typeInfo::AreEquivalent(varDsc->lvVerTypeInfo, typeInfo(TI_STRUCT, copyBlkClass)) && !fgCurrentlyInUseArgTemps->testBit(lclNum)) { tmp = (unsigned)lclNum; found = true; JITDUMP("reusing outgoing struct arg"); break; } } NEXT_HBV_BIT_SET; } // Create the CopyBlk tree and insert it. if (!found) { // Get a new temp // Here We don't need unsafe value cls check, since the addr of this temp is used only in copyblk. tmp = lvaGrabTemp(true DEBUGARG("by-value struct argument")); lvaSetStruct(tmp, copyBlkClass, false); if (call->IsVarargs()) { lvaSetStructUsedAsVarArg(tmp); } fgOutgoingArgTemps->setBit(tmp); } fgCurrentlyInUseArgTemps->setBit(tmp); // TYP_SIMD structs should not be enregistered, since ABI requires it to be // allocated on stack and address of it needs to be passed. if (lclVarIsSIMDType(tmp)) { // TODO: check if we need this block here or other parts already deal with it. lvaSetVarDoNotEnregister(tmp DEBUGARG(DoNotEnregisterReason::IsStructArg)); } // Create a reference to the temp GenTree* dest = gtNewLclvNode(tmp, lvaTable[tmp].lvType); dest->gtFlags |= (GTF_DONT_CSE | GTF_VAR_DEF); // This is a def of the local, "entire" by construction. // Copy the valuetype to the temp GenTree* copyBlk = gtNewBlkOpNode(dest, argx, false /* not volatile */, true /* copyBlock */); copyBlk = fgMorphCopyBlock(copyBlk); #if FEATURE_FIXED_OUT_ARGS // Do the copy early, and evalute the temp later (see EvalArgsToTemps) // When on Unix create LCL_FLD for structs passed in more than one registers. See fgMakeTmpArgNode GenTree* arg = copyBlk; #else // FEATURE_FIXED_OUT_ARGS // Structs are always on the stack, and thus never need temps // so we have to put the copy and temp all into one expression. argEntry->tmpNum = tmp; GenTree* arg = fgMakeTmpArgNode(argEntry); // Change the expression to "(tmp=val),tmp" arg = gtNewOperNode(GT_COMMA, arg->TypeGet(), copyBlk, arg); #endif // FEATURE_FIXED_OUT_ARGS args->SetNode(arg); call->fgArgInfo->EvalToTmp(argEntry, tmp, arg); } #ifdef TARGET_ARM // See declaration for specification comment. void Compiler::fgAddSkippedRegsInPromotedStructArg(LclVarDsc* varDsc, unsigned firstArgRegNum, regMaskTP* pArgSkippedRegMask) { assert(varDsc->lvPromoted); // There's no way to do these calculations without breaking abstraction and assuming that // integer register arguments are consecutive ints. They are on ARM. // To start, figure out what register contains the last byte of the first argument. LclVarDsc* firstFldVarDsc = lvaGetDesc(varDsc->lvFieldLclStart); unsigned lastFldRegOfLastByte = (firstFldVarDsc->lvFldOffset + firstFldVarDsc->lvExactSize - 1) / TARGET_POINTER_SIZE; ; // Now we're keeping track of the register that the last field ended in; see what registers // subsequent fields start in, and whether any are skipped. // (We assume here the invariant that the fields are sorted in offset order.) for (unsigned fldVarOffset = 1; fldVarOffset < varDsc->lvFieldCnt; fldVarOffset++) { unsigned fldVarNum = varDsc->lvFieldLclStart + fldVarOffset; LclVarDsc* fldVarDsc = lvaGetDesc(fldVarNum); unsigned fldRegOffset = fldVarDsc->lvFldOffset / TARGET_POINTER_SIZE; assert(fldRegOffset >= lastFldRegOfLastByte); // Assuming sorted fields. // This loop should enumerate the offsets of any registers skipped. // Find what reg contains the last byte: // And start at the first register after that. If that isn't the first reg of the current for (unsigned skippedRegOffsets = lastFldRegOfLastByte + 1; skippedRegOffsets < fldRegOffset; skippedRegOffsets++) { // If the register number would not be an arg reg, we're done. if (firstArgRegNum + skippedRegOffsets >= MAX_REG_ARG) return; *pArgSkippedRegMask |= genRegMask(regNumber(firstArgRegNum + skippedRegOffsets)); } lastFldRegOfLastByte = (fldVarDsc->lvFldOffset + fldVarDsc->lvExactSize - 1) / TARGET_POINTER_SIZE; } } #endif // TARGET_ARM /***************************************************************************** * * A little helper used to rearrange nested commutative operations. The * effect is that nested associative, commutative operations are transformed * into a 'left-deep' tree, i.e. into something like this: * * (((a op b) op c) op d) op... */ #if REARRANGE_ADDS void Compiler::fgMoveOpsLeft(GenTree* tree) { GenTree* op1; GenTree* op2; genTreeOps oper; do { op1 = tree->AsOp()->gtOp1; op2 = tree->AsOp()->gtOp2; oper = tree->OperGet(); noway_assert(GenTree::OperIsCommutative(oper)); noway_assert(oper == GT_ADD || oper == GT_XOR || oper == GT_OR || oper == GT_AND || oper == GT_MUL); noway_assert(!varTypeIsFloating(tree->TypeGet()) || !opts.genFPorder); noway_assert(oper == op2->gtOper); // Commutativity doesn't hold if overflow checks are needed if (tree->gtOverflowEx() || op2->gtOverflowEx()) { return; } if (gtIsActiveCSE_Candidate(op2)) { // If we have marked op2 as a CSE candidate, // we can't perform a commutative reordering // because any value numbers that we computed for op2 // will be incorrect after performing a commutative reordering // return; } if (oper == GT_MUL && (op2->gtFlags & GTF_MUL_64RSLT)) { return; } // Check for GTF_ADDRMODE_NO_CSE flag on add/mul Binary Operators if (((oper == GT_ADD) || (oper == GT_MUL)) && ((tree->gtFlags & GTF_ADDRMODE_NO_CSE) != 0)) { return; } if ((tree->gtFlags | op2->gtFlags) & GTF_BOOLEAN) { // We could deal with this, but we were always broken and just hit the assert // below regarding flags, which means it's not frequent, so will just bail out. // See #195514 return; } noway_assert(!tree->gtOverflowEx() && !op2->gtOverflowEx()); GenTree* ad1 = op2->AsOp()->gtOp1; GenTree* ad2 = op2->AsOp()->gtOp2; // Compiler::optOptimizeBools() can create GT_OR of two GC pointers yeilding a GT_INT // We can not reorder such GT_OR trees // if (varTypeIsGC(ad1->TypeGet()) != varTypeIsGC(op2->TypeGet())) { break; } // Don't split up a byref calculation and create a new byref. E.g., // [byref]+ (ref, [int]+ (int, int)) => [byref]+ ([byref]+ (ref, int), int). // Doing this transformation could create a situation where the first // addition (that is, [byref]+ (ref, int) ) creates a byref pointer that // no longer points within the ref object. If a GC happens, the byref won't // get updated. This can happen, for instance, if one of the int components // is negative. It also requires the address generation be in a fully-interruptible // code region. // if (varTypeIsGC(op1->TypeGet()) && op2->TypeGet() == TYP_I_IMPL) { assert(varTypeIsGC(tree->TypeGet()) && (oper == GT_ADD)); break; } /* Change "(x op (y op z))" to "(x op y) op z" */ /* ie. "(op1 op (ad1 op ad2))" to "(op1 op ad1) op ad2" */ GenTree* new_op1 = op2; new_op1->AsOp()->gtOp1 = op1; new_op1->AsOp()->gtOp2 = ad1; /* Change the flags. */ // Make sure we arent throwing away any flags noway_assert((new_op1->gtFlags & ~(GTF_MAKE_CSE | GTF_DONT_CSE | // It is ok that new_op1->gtFlags contains GTF_DONT_CSE flag. GTF_REVERSE_OPS | // The reverse ops flag also can be set, it will be re-calculated GTF_NODE_MASK | GTF_ALL_EFFECT | GTF_UNSIGNED)) == 0); new_op1->gtFlags = (new_op1->gtFlags & (GTF_NODE_MASK | GTF_DONT_CSE)) | // Make sure we propagate GTF_DONT_CSE flag. (op1->gtFlags & GTF_ALL_EFFECT) | (ad1->gtFlags & GTF_ALL_EFFECT); /* Retype new_op1 if it has not/become a GC ptr. */ if (varTypeIsGC(op1->TypeGet())) { noway_assert((varTypeIsGC(tree->TypeGet()) && op2->TypeGet() == TYP_I_IMPL && oper == GT_ADD) || // byref(ref + (int+int)) (varTypeIsI(tree->TypeGet()) && op2->TypeGet() == TYP_I_IMPL && oper == GT_OR)); // int(gcref | int(gcref|intval)) new_op1->gtType = tree->gtType; } else if (varTypeIsGC(ad2->TypeGet())) { // Neither ad1 nor op1 are GC. So new_op1 isnt either noway_assert(op1->gtType == TYP_I_IMPL && ad1->gtType == TYP_I_IMPL); new_op1->gtType = TYP_I_IMPL; } // If new_op1 is a new expression. Assign it a new unique value number. // vnStore is null before the ValueNumber phase has run if (vnStore != nullptr) { // We can only keep the old value number on new_op1 if both op1 and ad2 // have the same non-NoVN value numbers. Since op is commutative, comparing // only ad2 and op1 is enough. if ((op1->gtVNPair.GetLiberal() == ValueNumStore::NoVN) || (ad2->gtVNPair.GetLiberal() == ValueNumStore::NoVN) || (ad2->gtVNPair.GetLiberal() != op1->gtVNPair.GetLiberal())) { new_op1->gtVNPair.SetBoth(vnStore->VNForExpr(nullptr, new_op1->TypeGet())); } } tree->AsOp()->gtOp1 = new_op1; tree->AsOp()->gtOp2 = ad2; /* If 'new_op1' is now the same nested op, process it recursively */ if ((ad1->gtOper == oper) && !ad1->gtOverflowEx()) { fgMoveOpsLeft(new_op1); } /* If 'ad2' is now the same nested op, process it * Instead of recursion, we set up op1 and op2 for the next loop. */ op1 = new_op1; op2 = ad2; } while ((op2->gtOper == oper) && !op2->gtOverflowEx()); return; } #endif /*****************************************************************************/ void Compiler::fgSetRngChkTarget(GenTree* tree, bool delay) { if (tree->OperIs(GT_BOUNDS_CHECK)) { GenTreeBoundsChk* const boundsChk = tree->AsBoundsChk(); BasicBlock* const failBlock = fgSetRngChkTargetInner(boundsChk->gtThrowKind, delay); if (failBlock != nullptr) { boundsChk->gtIndRngFailBB = failBlock; } } else if (tree->OperIs(GT_INDEX_ADDR)) { GenTreeIndexAddr* const indexAddr = tree->AsIndexAddr(); BasicBlock* const failBlock = fgSetRngChkTargetInner(SCK_RNGCHK_FAIL, delay); if (failBlock != nullptr) { indexAddr->gtIndRngFailBB = failBlock; } } else { noway_assert(tree->OperIs(GT_ARR_ELEM, GT_ARR_INDEX)); fgSetRngChkTargetInner(SCK_RNGCHK_FAIL, delay); } } BasicBlock* Compiler::fgSetRngChkTargetInner(SpecialCodeKind kind, bool delay) { if (opts.MinOpts()) { delay = false; } if (!opts.compDbgCode) { if (!delay && !compIsForInlining()) { // Create/find the appropriate "range-fail" label return fgRngChkTarget(compCurBB, kind); } } return nullptr; } /***************************************************************************** * * Expand a GT_INDEX node and fully morph the child operands * * The orginal GT_INDEX node is bashed into the GT_IND node that accesses * the array element. We expand the GT_INDEX node into a larger tree that * evaluates the array base and index. The simplest expansion is a GT_COMMA * with a GT_BOUNDS_CHECK and a GT_IND with a GTF_INX_RNGCHK flag. * For complex array or index expressions one or more GT_COMMA assignments * are inserted so that we only evaluate the array or index expressions once. * * The fully expanded tree is then morphed. This causes gtFoldExpr to * perform local constant prop and reorder the constants in the tree and * fold them. * * We then parse the resulting array element expression in order to locate * and label the constants and variables that occur in the tree. */ const int MAX_ARR_COMPLEXITY = 4; const int MAX_INDEX_COMPLEXITY = 4; GenTree* Compiler::fgMorphArrayIndex(GenTree* tree) { noway_assert(tree->gtOper == GT_INDEX); GenTreeIndex* asIndex = tree->AsIndex(); var_types elemTyp = asIndex->TypeGet(); unsigned elemSize = asIndex->gtIndElemSize; CORINFO_CLASS_HANDLE elemStructType = asIndex->gtStructElemClass; noway_assert(elemTyp != TYP_STRUCT || elemStructType != nullptr); // Fold "cns_str"[cns_index] to ushort constant // NOTE: don't do it for empty string, the operation will fail anyway if (opts.OptimizationEnabled() && asIndex->Arr()->OperIs(GT_CNS_STR) && !asIndex->Arr()->AsStrCon()->IsStringEmptyField() && asIndex->Index()->IsIntCnsFitsInI32()) { const int cnsIndex = static_cast<int>(asIndex->Index()->AsIntConCommon()->IconValue()); if (cnsIndex >= 0) { int length; const char16_t* str = info.compCompHnd->getStringLiteral(asIndex->Arr()->AsStrCon()->gtScpHnd, asIndex->Arr()->AsStrCon()->gtSconCPX, &length); if ((cnsIndex < length) && (str != nullptr)) { GenTree* cnsCharNode = gtNewIconNode(str[cnsIndex], TYP_INT); INDEBUG(cnsCharNode->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED); return cnsCharNode; } } } #ifdef FEATURE_SIMD if (supportSIMDTypes() && varTypeIsStruct(elemTyp) && structSizeMightRepresentSIMDType(elemSize)) { // If this is a SIMD type, this is the point at which we lose the type information, // so we need to set the correct type on the GT_IND. // (We don't care about the base type here, so we only check, but don't retain, the return value). unsigned simdElemSize = 0; if (getBaseJitTypeAndSizeOfSIMDType(elemStructType, &simdElemSize) != CORINFO_TYPE_UNDEF) { assert(simdElemSize == elemSize); elemTyp = getSIMDTypeForSize(elemSize); // This is the new type of the node. tree->gtType = elemTyp; // Now set elemStructType to null so that we don't confuse value numbering. elemStructType = nullptr; } } #endif // FEATURE_SIMD // Set up the array length's offset into lenOffs // And the first element's offset into elemOffs ssize_t lenOffs; ssize_t elemOffs; if (tree->gtFlags & GTF_INX_STRING_LAYOUT) { lenOffs = OFFSETOF__CORINFO_String__stringLen; elemOffs = OFFSETOF__CORINFO_String__chars; tree->gtFlags &= ~GTF_INX_STRING_LAYOUT; // Clear this flag as it is used for GTF_IND_VOLATILE } else { // We have a standard array lenOffs = OFFSETOF__CORINFO_Array__length; elemOffs = OFFSETOF__CORINFO_Array__data; } // In minopts, we expand GT_INDEX to GT_IND(GT_INDEX_ADDR) in order to minimize the size of the IR. As minopts // compilation time is roughly proportional to the size of the IR, this helps keep compilation times down. // Furthermore, this representation typically saves on code size in minopts w.r.t. the complete expansion // performed when optimizing, as it does not require LclVar nodes (which are always stack loads/stores in // minopts). // // When we *are* optimizing, we fully expand GT_INDEX to: // 1. Evaluate the array address expression and store the result in a temp if the expression is complex or // side-effecting. // 2. Evaluate the array index expression and store the result in a temp if the expression is complex or // side-effecting. // 3. Perform an explicit bounds check: GT_BOUNDS_CHECK(index, GT_ARR_LENGTH(array)) // 4. Compute the address of the element that will be accessed: // GT_ADD(GT_ADD(array, firstElementOffset), GT_MUL(index, elementSize)) // 5. Dereference the address with a GT_IND. // // This expansion explicitly exposes the bounds check and the address calculation to the optimizer, which allows // for more straightforward bounds-check removal, CSE, etc. if (opts.MinOpts()) { GenTree* const array = fgMorphTree(asIndex->Arr()); GenTree* const index = fgMorphTree(asIndex->Index()); GenTreeIndexAddr* const indexAddr = new (this, GT_INDEX_ADDR) GenTreeIndexAddr(array, index, elemTyp, elemStructType, elemSize, static_cast<unsigned>(lenOffs), static_cast<unsigned>(elemOffs)); indexAddr->gtFlags |= (array->gtFlags | index->gtFlags) & GTF_ALL_EFFECT; // Mark the indirection node as needing a range check if necessary. // Note this will always be true unless JitSkipArrayBoundCheck() is used if ((indexAddr->gtFlags & GTF_INX_RNGCHK) != 0) { fgSetRngChkTarget(indexAddr); } if (!tree->TypeIs(TYP_STRUCT)) { tree->ChangeOper(GT_IND); } else { DEBUG_DESTROY_NODE(tree); tree = gtNewObjNode(elemStructType, indexAddr); INDEBUG(tree->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED); } GenTreeIndir* const indir = tree->AsIndir(); indir->Addr() = indexAddr; bool canCSE = indir->CanCSE(); indir->gtFlags = GTF_IND_ARR_INDEX | (indexAddr->gtFlags & GTF_ALL_EFFECT); if (!canCSE) { indir->SetDoNotCSE(); } INDEBUG(indexAddr->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED); return indir; } GenTree* arrRef = asIndex->Arr(); GenTree* index = asIndex->Index(); bool chkd = ((tree->gtFlags & GTF_INX_RNGCHK) != 0); // if false, range checking will be disabled bool indexNonFaulting = ((tree->gtFlags & GTF_INX_NOFAULT) != 0); // if true, mark GTF_IND_NONFAULTING bool nCSE = ((tree->gtFlags & GTF_DONT_CSE) != 0); GenTree* arrRefDefn = nullptr; // non-NULL if we need to allocate a temp for the arrRef expression GenTree* indexDefn = nullptr; // non-NULL if we need to allocate a temp for the index expression GenTree* bndsChk = nullptr; // If we're doing range checking, introduce a GT_BOUNDS_CHECK node for the address. if (chkd) { GenTree* arrRef2 = nullptr; // The second copy will be used in array address expression GenTree* index2 = nullptr; // If the arrRef or index expressions involves an assignment, a call or reads from global memory, // then we *must* allocate a temporary in which to "localize" those values, to ensure that the // same values are used in the bounds check and the actual dereference. // Also we allocate the temporary when the expresion is sufficiently complex/expensive. // // Note that if the expression is a GT_FIELD, it has not yet been morphed so its true complexity is // not exposed. Without that condition there are cases of local struct fields that were previously, // needlessly, marked as GTF_GLOB_REF, and when that was fixed, there were some regressions that // were mostly ameliorated by adding this condition. // // Likewise, allocate a temporary if the expression is a GT_LCL_FLD node. These used to be created // after fgMorphArrayIndex from GT_FIELD trees so this preserves the existing behavior. This is // perhaps a decision that should be left to CSE but FX diffs show that it is slightly better to // do this here. if ((arrRef->gtFlags & (GTF_ASG | GTF_CALL | GTF_GLOB_REF)) || gtComplexityExceeds(&arrRef, MAX_ARR_COMPLEXITY) || arrRef->OperIs(GT_FIELD, GT_LCL_FLD)) { unsigned arrRefTmpNum = lvaGrabTemp(true DEBUGARG("arr expr")); arrRefDefn = gtNewTempAssign(arrRefTmpNum, arrRef); arrRef = gtNewLclvNode(arrRefTmpNum, arrRef->TypeGet()); arrRef2 = gtNewLclvNode(arrRefTmpNum, arrRef->TypeGet()); } else { arrRef2 = gtCloneExpr(arrRef); noway_assert(arrRef2 != nullptr); } if ((index->gtFlags & (GTF_ASG | GTF_CALL | GTF_GLOB_REF)) || gtComplexityExceeds(&index, MAX_ARR_COMPLEXITY) || index->OperIs(GT_FIELD, GT_LCL_FLD)) { unsigned indexTmpNum = lvaGrabTemp(true DEBUGARG("index expr")); indexDefn = gtNewTempAssign(indexTmpNum, index); index = gtNewLclvNode(indexTmpNum, index->TypeGet()); index2 = gtNewLclvNode(indexTmpNum, index->TypeGet()); } else { index2 = gtCloneExpr(index); noway_assert(index2 != nullptr); } // Next introduce a GT_BOUNDS_CHECK node var_types bndsChkType = TYP_INT; // By default, try to use 32-bit comparison for array bounds check. #ifdef TARGET_64BIT // The CLI Spec allows an array to be indexed by either an int32 or a native int. In the case // of a 64 bit architecture this means the array index can potentially be a TYP_LONG, so for this case, // the comparison will have to be widen to 64 bits. if (index->TypeGet() == TYP_I_IMPL) { bndsChkType = TYP_I_IMPL; } #endif // TARGET_64BIT GenTree* arrLen = gtNewArrLen(TYP_INT, arrRef, (int)lenOffs, compCurBB); if (bndsChkType != TYP_INT) { arrLen = gtNewCastNode(bndsChkType, arrLen, true, bndsChkType); } GenTreeBoundsChk* arrBndsChk = new (this, GT_BOUNDS_CHECK) GenTreeBoundsChk(index, arrLen, SCK_RNGCHK_FAIL); bndsChk = arrBndsChk; // Now we'll switch to using the second copies for arrRef and index // to compute the address expression arrRef = arrRef2; index = index2; } // Create the "addr" which is "*(arrRef + ((index * elemSize) + elemOffs))" GenTree* addr; #ifdef TARGET_64BIT // Widen 'index' on 64-bit targets if (index->TypeGet() != TYP_I_IMPL) { if (index->OperGet() == GT_CNS_INT) { index->gtType = TYP_I_IMPL; } else { index = gtNewCastNode(TYP_I_IMPL, index, true, TYP_I_IMPL); } } #endif // TARGET_64BIT /* Scale the index value if necessary */ if (elemSize > 1) { GenTree* size = gtNewIconNode(elemSize, TYP_I_IMPL); // Fix 392756 WP7 Crossgen // // During codegen optGetArrayRefScaleAndIndex() makes the assumption that op2 of a GT_MUL node // is a constant and is not capable of handling CSE'ing the elemSize constant into a lclvar. // Hence to prevent the constant from becoming a CSE we mark it as NO_CSE. // size->gtFlags |= GTF_DONT_CSE; /* Multiply by the array element size */ addr = gtNewOperNode(GT_MUL, TYP_I_IMPL, index, size); } else { addr = index; } // Be careful to only create the byref pointer when the full index expression is added to the array reference. // We don't want to create a partial byref address expression that doesn't include the full index offset: // a byref must point within the containing object. It is dangerous (especially when optimizations come into // play) to create a "partial" byref that doesn't point exactly to the correct object; there is risk that // the partial byref will not point within the object, and thus not get updated correctly during a GC. // This is mostly a risk in fully-interruptible code regions. // We can generate two types of trees for "addr": // // 1) "arrRef + (index + elemOffset)" // 2) "(arrRef + elemOffset) + index" // // XArch has powerful addressing modes such as [base + index*scale + offset] so it's fine with 1), // while for Arm we better try to make an invariant sub-tree as large as possible, which is usually // "(arrRef + elemOffset)" and is CSE/LoopHoisting friendly => produces better codegen. // 2) should still be safe from GC's point of view since both ADD operations are byref and point to // within the object so GC will be able to correctly track and update them. bool groupArrayRefWithElemOffset = false; #ifdef TARGET_ARMARCH groupArrayRefWithElemOffset = true; // TODO: in some cases even on ARM we better use 1) shape because if "index" is invariant and "arrRef" is not // we at least will be able to hoist/CSE "index + elemOffset" in some cases. // See https://github.com/dotnet/runtime/pull/61293#issuecomment-964146497 // Use 2) form only for primitive types for now - it significantly reduced number of size regressions if (!varTypeIsIntegral(elemTyp) && !varTypeIsFloating(elemTyp)) { groupArrayRefWithElemOffset = false; } #endif // First element's offset GenTree* elemOffset = gtNewIconNode(elemOffs, TYP_I_IMPL); if (groupArrayRefWithElemOffset) { GenTree* basePlusOffset = gtNewOperNode(GT_ADD, TYP_BYREF, arrRef, elemOffset); addr = gtNewOperNode(GT_ADD, TYP_BYREF, basePlusOffset, addr); } else { addr = gtNewOperNode(GT_ADD, TYP_I_IMPL, addr, elemOffset); addr = gtNewOperNode(GT_ADD, TYP_BYREF, arrRef, addr); } assert(((tree->gtDebugFlags & GTF_DEBUG_NODE_LARGE) != 0) || (GenTree::s_gtNodeSizes[GT_IND] == TREE_NODE_SZ_SMALL)); // Change the orginal GT_INDEX node into a GT_IND node tree->SetOper(GT_IND); // If the index node is a floating-point type, notify the compiler // we'll potentially use floating point registers at the time of codegen. if (varTypeUsesFloatReg(tree->gtType)) { this->compFloatingPointUsed = true; } // We've now consumed the GTF_INX_RNGCHK and GTF_INX_NOFAULT, and the node // is no longer a GT_INDEX node. tree->gtFlags &= ~(GTF_INX_RNGCHK | GTF_INX_NOFAULT); tree->AsOp()->gtOp1 = addr; // This is an array index expression. tree->gtFlags |= GTF_IND_ARR_INDEX; // If there's a bounds check, the indir won't fault. if (bndsChk || indexNonFaulting) { tree->gtFlags |= GTF_IND_NONFAULTING; } else { tree->gtFlags |= GTF_EXCEPT; } if (nCSE) { tree->gtFlags |= GTF_DONT_CSE; } // Store information about it. GetArrayInfoMap()->Set(tree, ArrayInfo(elemTyp, elemSize, (int)elemOffs, elemStructType)); // Remember this 'indTree' that we just created, as we still need to attach the fieldSeq information to it. GenTree* indTree = tree; // Did we create a bndsChk tree? if (bndsChk) { // Use a GT_COMMA node to prepend the array bound check // tree = gtNewOperNode(GT_COMMA, elemTyp, bndsChk, tree); /* Mark the indirection node as needing a range check */ fgSetRngChkTarget(bndsChk); } if (indexDefn != nullptr) { // Use a GT_COMMA node to prepend the index assignment // tree = gtNewOperNode(GT_COMMA, tree->TypeGet(), indexDefn, tree); } if (arrRefDefn != nullptr) { // Use a GT_COMMA node to prepend the arRef assignment // tree = gtNewOperNode(GT_COMMA, tree->TypeGet(), arrRefDefn, tree); } JITDUMP("fgMorphArrayIndex (before remorph):\n") DISPTREE(tree) // Currently we morph the tree to perform some folding operations prior // to attaching fieldSeq info and labeling constant array index contributions // tree = fgMorphTree(tree); JITDUMP("fgMorphArrayIndex (after remorph):\n") DISPTREE(tree) // Ideally we just want to proceed to attaching fieldSeq info and labeling the // constant array index contributions, but the morphing operation may have changed // the 'tree' into something that now unconditionally throws an exception. // // In such case the gtEffectiveVal could be a new tree or it's gtOper could be modified // or it could be left unchanged. If it is unchanged then we should not return, // instead we should proceed to attaching fieldSeq info, etc... // GenTree* arrElem = tree->gtEffectiveVal(); if (fgIsCommaThrow(tree)) { if ((arrElem != indTree) || // A new tree node may have been created (!indTree->OperIs(GT_IND))) // The GT_IND may have been changed to a GT_CNS_INT { return tree; // Just return the Comma-Throw, don't try to attach the fieldSeq info, etc.. } } assert(!fgGlobalMorph || (arrElem->gtDebugFlags & GTF_DEBUG_NODE_MORPHED)); DBEXEC(fgGlobalMorph && (arrElem == tree), tree->gtDebugFlags &= ~GTF_DEBUG_NODE_MORPHED) addr = arrElem->gtGetOp1(); GenTree* cnsOff = nullptr; if (addr->OperIs(GT_ADD)) { GenTree* addrOp1 = addr->gtGetOp1(); if (groupArrayRefWithElemOffset) { if (addrOp1->OperIs(GT_ADD) && addrOp1->gtGetOp2()->IsCnsIntOrI()) { assert(addrOp1->gtGetOp1()->TypeIs(TYP_REF)); cnsOff = addrOp1->gtGetOp2(); addr = addr->gtGetOp2(); // Label any constant array index contributions with #ConstantIndex and any LclVars with // GTF_VAR_ARR_INDEX addr->LabelIndex(this); } else { assert(addr->gtGetOp2()->IsCnsIntOrI()); cnsOff = addr->gtGetOp2(); addr = nullptr; } } else { assert(addr->TypeIs(TYP_BYREF)); assert(addr->gtGetOp1()->TypeIs(TYP_REF)); addr = addr->gtGetOp2(); // Look for the constant [#FirstElem] node here, or as the RHS of an ADD. if (addr->IsCnsIntOrI()) { cnsOff = addr; addr = nullptr; } else { if ((addr->OperIs(GT_ADD)) && addr->gtGetOp2()->IsCnsIntOrI()) { cnsOff = addr->gtGetOp2(); addr = addr->gtGetOp1(); } // Label any constant array index contributions with #ConstantIndex and any LclVars with // GTF_VAR_ARR_INDEX addr->LabelIndex(this); } } } else if (addr->IsCnsIntOrI()) { cnsOff = addr; } FieldSeqNode* firstElemFseq = GetFieldSeqStore()->CreateSingleton(FieldSeqStore::FirstElemPseudoField); if ((cnsOff != nullptr) && (cnsOff->AsIntCon()->gtIconVal == elemOffs)) { // Assign it the [#FirstElem] field sequence // cnsOff->AsIntCon()->gtFieldSeq = firstElemFseq; } else // We have folded the first element's offset with the index expression { // Build the [#ConstantIndex, #FirstElem] field sequence // FieldSeqNode* constantIndexFseq = GetFieldSeqStore()->CreateSingleton(FieldSeqStore::ConstantIndexPseudoField); FieldSeqNode* fieldSeq = GetFieldSeqStore()->Append(constantIndexFseq, firstElemFseq); if (cnsOff == nullptr) // It must have folded into a zero offset { // Record in the general zero-offset map. fgAddFieldSeqForZeroOffset(addr, fieldSeq); } else { cnsOff->AsIntCon()->gtFieldSeq = fieldSeq; } } return tree; } #ifdef TARGET_X86 /***************************************************************************** * * Wrap fixed stack arguments for varargs functions to go through varargs * cookie to access them, except for the cookie itself. * * Non-x86 platforms are allowed to access all arguments directly * so we don't need this code. * */ GenTree* Compiler::fgMorphStackArgForVarArgs(unsigned lclNum, var_types varType, unsigned lclOffs) { /* For the fixed stack arguments of a varargs function, we need to go through the varargs cookies to access them, except for the cookie itself */ LclVarDsc* varDsc = lvaGetDesc(lclNum); if (varDsc->lvIsParam && !varDsc->lvIsRegArg && lclNum != lvaVarargsHandleArg) { // Create a node representing the local pointing to the base of the args GenTree* ptrArg = gtNewOperNode(GT_SUB, TYP_I_IMPL, gtNewLclvNode(lvaVarargsBaseOfStkArgs, TYP_I_IMPL), gtNewIconNode(varDsc->GetStackOffset() - codeGen->intRegState.rsCalleeRegArgCount * REGSIZE_BYTES - lclOffs)); // Access the argument through the local GenTree* tree; if (varTypeIsStruct(varType)) { CORINFO_CLASS_HANDLE typeHnd = varDsc->GetStructHnd(); assert(typeHnd != nullptr); tree = gtNewObjNode(typeHnd, ptrArg); } else { tree = gtNewOperNode(GT_IND, varType, ptrArg); } tree->gtFlags |= GTF_IND_TGTANYWHERE; if (varDsc->IsAddressExposed()) { tree->gtFlags |= GTF_GLOB_REF; } return fgMorphTree(tree); } return NULL; } #endif /***************************************************************************** * * Transform the given GT_LCL_VAR tree for code generation. */ GenTree* Compiler::fgMorphLocalVar(GenTree* tree, bool forceRemorph) { assert(tree->gtOper == GT_LCL_VAR); unsigned lclNum = tree->AsLclVarCommon()->GetLclNum(); var_types varType = lvaGetRealType(lclNum); LclVarDsc* varDsc = lvaGetDesc(lclNum); if (varDsc->IsAddressExposed()) { tree->gtFlags |= GTF_GLOB_REF; } #ifdef TARGET_X86 if (info.compIsVarArgs) { GenTree* newTree = fgMorphStackArgForVarArgs(lclNum, varType, 0); if (newTree != nullptr) { if (newTree->OperIsBlk() && ((tree->gtFlags & GTF_VAR_DEF) == 0)) { newTree->SetOper(GT_IND); } return newTree; } } #endif // TARGET_X86 /* If not during the global morphing phase bail */ if (!fgGlobalMorph && !forceRemorph) { return tree; } bool varAddr = (tree->gtFlags & GTF_DONT_CSE) != 0; noway_assert(!(tree->gtFlags & GTF_VAR_DEF) || varAddr); // GTF_VAR_DEF should always imply varAddr if (!varAddr && varDsc->lvNormalizeOnLoad()) { // TYP_BOOL quirk: previously, the code in optAssertionIsSubrange did not handle TYP_BOOL. // Now it does, but this leads to some regressions because we lose the uniform VNs for trees // that represent the "reduced" normalize-on-load locals, i. e. LCL_VAR(small type V00), created // here with local assertions, and "expanded", i. e. CAST(small type <- LCL_VAR(int V00)). // This is a pretty fundamental problem with how normalize-on-load locals appear to the optimizer. // This quirk preserves the previous behavior. // TODO-CQ: fix the VNs for normalize-on-load locals and remove this quirk. bool isBoolQuirk = varType == TYP_BOOL; // Assertion prop can tell us to omit adding a cast here. This is // useful when the local is a small-typed parameter that is passed in a // register: in that case, the ABI specifies that the upper bits might // be invalid, but the assertion guarantees us that we have normalized // when we wrote it. if (optLocalAssertionProp && !isBoolQuirk && optAssertionIsSubrange(tree, IntegralRange::ForType(varType), apFull) != NO_ASSERTION_INDEX) { // The previous assertion can guarantee us that if this node gets // assigned a register, it will be normalized already. It is still // possible that this node ends up being in memory, in which case // normalization will still be needed, so we better have the right // type. assert(tree->TypeGet() == varDsc->TypeGet()); return tree; } // Small-typed arguments and aliased locals are normalized on load. // Other small-typed locals are normalized on store. // Also, under the debugger as the debugger could write to the variable. // If this is one of the former, insert a narrowing cast on the load. // ie. Convert: var-short --> cast-short(var-int) tree->gtType = TYP_INT; fgMorphTreeDone(tree); tree = gtNewCastNode(TYP_INT, tree, false, varType); fgMorphTreeDone(tree); return tree; } return tree; } /***************************************************************************** Grab a temp for big offset morphing. This method will grab a new temp if no temp of this "type" has been created. Or it will return the same cached one if it has been created. */ unsigned Compiler::fgGetBigOffsetMorphingTemp(var_types type) { unsigned lclNum = fgBigOffsetMorphingTemps[type]; if (lclNum == BAD_VAR_NUM) { // We haven't created a temp for this kind of type. Create one now. lclNum = lvaGrabTemp(false DEBUGARG("Big Offset Morphing")); fgBigOffsetMorphingTemps[type] = lclNum; } else { // We better get the right type. noway_assert(lvaTable[lclNum].TypeGet() == type); } noway_assert(lclNum != BAD_VAR_NUM); return lclNum; } /***************************************************************************** * * Transform the given GT_FIELD tree for code generation. */ GenTree* Compiler::fgMorphField(GenTree* tree, MorphAddrContext* mac) { assert(tree->gtOper == GT_FIELD); CORINFO_FIELD_HANDLE symHnd = tree->AsField()->gtFldHnd; unsigned fldOffset = tree->AsField()->gtFldOffset; GenTree* objRef = tree->AsField()->GetFldObj(); bool fieldMayOverlap = false; bool objIsLocal = false; if (fgGlobalMorph && (objRef != nullptr) && (objRef->gtOper == GT_ADDR)) { // Make sure we've checked if 'objRef' is an address of an implicit-byref parameter. // If it is, fgMorphImplicitByRefArgs may change it do a different opcode, which the // simd field rewrites are sensitive to. fgMorphImplicitByRefArgs(objRef); } noway_assert(((objRef != nullptr) && (objRef->IsLocalAddrExpr() != nullptr)) || ((tree->gtFlags & GTF_GLOB_REF) != 0)); if (tree->AsField()->gtFldMayOverlap) { fieldMayOverlap = true; // Reset the flag because we may reuse the node. tree->AsField()->gtFldMayOverlap = false; } #ifdef FEATURE_SIMD // if this field belongs to simd struct, translate it to simd intrinsic. if (mac == nullptr) { if (IsBaselineSimdIsaSupported()) { GenTree* newTree = fgMorphFieldToSimdGetElement(tree); if (newTree != tree) { newTree = fgMorphTree(newTree); return newTree; } } } else if ((objRef != nullptr) && (objRef->OperGet() == GT_ADDR) && varTypeIsSIMD(objRef->gtGetOp1())) { GenTreeLclVarCommon* lcl = objRef->IsLocalAddrExpr(); if (lcl != nullptr) { lvaSetVarDoNotEnregister(lcl->GetLclNum() DEBUGARG(DoNotEnregisterReason::LocalField)); } } #endif // Create a default MorphAddrContext early so it doesn't go out of scope // before it is used. MorphAddrContext defMAC(MACK_Ind); /* Is this an instance data member? */ if (objRef) { GenTree* addr; objIsLocal = objRef->IsLocal(); if (tree->gtFlags & GTF_IND_TLS_REF) { NO_WAY("instance field can not be a TLS ref."); } /* We'll create the expression "*(objRef + mem_offs)" */ noway_assert(varTypeIsGC(objRef->TypeGet()) || objRef->TypeGet() == TYP_I_IMPL); /* Now we have a tree like this: +--------------------+ | GT_FIELD | tree +----------+---------+ | +--------------+-------------+ |tree->AsField()->GetFldObj()| +--------------+-------------+ We want to make it like this (when fldOffset is <= MAX_UNCHECKED_OFFSET_FOR_NULL_OBJECT): +--------------------+ | GT_IND/GT_OBJ | tree +---------+----------+ | | +---------+----------+ | GT_ADD | addr +---------+----------+ | / \ / \ / \ +-------------------+ +----------------------+ | objRef | | fldOffset | | | | (when fldOffset !=0) | +-------------------+ +----------------------+ or this (when fldOffset is > MAX_UNCHECKED_OFFSET_FOR_NULL_OBJECT): +--------------------+ | GT_IND/GT_OBJ | tree +----------+---------+ | +----------+---------+ | GT_COMMA | comma2 +----------+---------+ | / \ / \ / \ / \ +---------+----------+ +---------+----------+ comma | GT_COMMA | | "+" (i.e. GT_ADD) | addr +---------+----------+ +---------+----------+ | | / \ / \ / \ / \ / \ / \ +-----+-----+ +-----+-----+ +---------+ +-----------+ asg | GT_ASG | ind | GT_IND | | tmpLcl | | fldOffset | +-----+-----+ +-----+-----+ +---------+ +-----------+ | | / \ | / \ | / \ | +-----+-----+ +-----+-----+ +-----------+ | tmpLcl | | objRef | | tmpLcl | +-----------+ +-----------+ +-----------+ */ var_types objRefType = objRef->TypeGet(); GenTree* comma = nullptr; // NULL mac means we encounter the GT_FIELD first. This denotes a dereference of the field, // and thus is equivalent to a MACK_Ind with zero offset. if (mac == nullptr) { mac = &defMAC; } // This flag is set to enable the "conservative" style of explicit null-check insertion. // This means that we insert an explicit null check whenever we create byref by adding a // constant offset to a ref, in a MACK_Addr context (meaning that the byref is not immediately // dereferenced). The alternative is "aggressive", which would not insert such checks (for // small offsets); in this plan, we would transfer some null-checking responsibility to // callee's of methods taking byref parameters. They would have to add explicit null checks // when creating derived byrefs from argument byrefs by adding constants to argument byrefs, in // contexts where the resulting derived byref is not immediately dereferenced (or if the offset is too // large). To make the "aggressive" scheme work, however, we'd also have to add explicit derived-from-null // checks for byref parameters to "external" methods implemented in C++, and in P/Invoke stubs. // This is left here to point out how to implement it. CLANG_FORMAT_COMMENT_ANCHOR; #define CONSERVATIVE_NULL_CHECK_BYREF_CREATION 1 bool addExplicitNullCheck = false; // Implicit byref locals and string literals are never null. if (fgAddrCouldBeNull(objRef)) { // If the objRef is a GT_ADDR node, it, itself, never requires null checking. The expression // whose address is being taken is either a local or static variable, whose address is necessarily // non-null, or else it is a field dereference, which will do its own bounds checking if necessary. if (objRef->gtOper != GT_ADDR && (mac->m_kind == MACK_Addr || mac->m_kind == MACK_Ind)) { if (!mac->m_allConstantOffsets || fgIsBigOffset(mac->m_totalOffset + fldOffset)) { addExplicitNullCheck = true; } else { // In R2R mode the field offset for some fields may change when the code // is loaded. So we can't rely on a zero offset here to suppress the null check. // // See GitHub issue #16454. bool fieldHasChangeableOffset = false; #ifdef FEATURE_READYTORUN fieldHasChangeableOffset = (tree->AsField()->gtFieldLookup.addr != nullptr); #endif #if CONSERVATIVE_NULL_CHECK_BYREF_CREATION addExplicitNullCheck = (mac->m_kind == MACK_Addr) && ((mac->m_totalOffset + fldOffset > 0) || fieldHasChangeableOffset); #else addExplicitNullCheck = (objRef->gtType == TYP_BYREF && mac->m_kind == MACK_Addr && ((mac->m_totalOffset + fldOffset > 0) || fieldHasChangeableOffset)); #endif } } } if (addExplicitNullCheck) { #ifdef DEBUG if (verbose) { printf("Before explicit null check morphing:\n"); gtDispTree(tree); } #endif // // Create the "comma" subtree // GenTree* asg = nullptr; GenTree* nullchk; unsigned lclNum; if (objRef->gtOper != GT_LCL_VAR) { lclNum = fgGetBigOffsetMorphingTemp(genActualType(objRef->TypeGet())); // Create the "asg" node asg = gtNewTempAssign(lclNum, objRef); } else { lclNum = objRef->AsLclVarCommon()->GetLclNum(); } GenTree* lclVar = gtNewLclvNode(lclNum, objRefType); nullchk = gtNewNullCheck(lclVar, compCurBB); nullchk->gtFlags |= GTF_DONT_CSE; // Don't try to create a CSE for these TYP_BYTE indirections if (asg) { // Create the "comma" node. comma = gtNewOperNode(GT_COMMA, TYP_VOID, // We don't want to return anything from this "comma" node. // Set the type to TYP_VOID, so we can select "cmp" instruction // instead of "mov" instruction later on. asg, nullchk); } else { comma = nullchk; } addr = gtNewLclvNode(lclNum, objRefType); // Use "tmpLcl" to create "addr" node. } else { addr = objRef; } #ifdef FEATURE_READYTORUN if (tree->AsField()->gtFieldLookup.addr != nullptr) { GenTree* offsetNode = nullptr; if (tree->AsField()->gtFieldLookup.accessType == IAT_PVALUE) { offsetNode = gtNewIndOfIconHandleNode(TYP_I_IMPL, (size_t)tree->AsField()->gtFieldLookup.addr, GTF_ICON_CONST_PTR, true); #ifdef DEBUG offsetNode->gtGetOp1()->AsIntCon()->gtTargetHandle = (size_t)symHnd; #endif } else { noway_assert(!"unexpected accessType for R2R field access"); } var_types addType = (objRefType == TYP_I_IMPL) ? TYP_I_IMPL : TYP_BYREF; addr = gtNewOperNode(GT_ADD, addType, addr, offsetNode); } #endif if (fldOffset != 0) { // Generate the "addr" node. /* Add the member offset to the object's address */ FieldSeqNode* fieldSeq = fieldMayOverlap ? FieldSeqStore::NotAField() : GetFieldSeqStore()->CreateSingleton(symHnd); addr = gtNewOperNode(GT_ADD, (var_types)(objRefType == TYP_I_IMPL ? TYP_I_IMPL : TYP_BYREF), addr, gtNewIconHandleNode(fldOffset, GTF_ICON_FIELD_OFF, fieldSeq)); } // Now let's set the "tree" as a GT_IND tree. tree->SetOper(GT_IND); tree->AsOp()->gtOp1 = addr; tree->SetIndirExceptionFlags(this); if (addExplicitNullCheck) { // // Create "comma2" node and link it to "tree". // GenTree* comma2; comma2 = gtNewOperNode(GT_COMMA, addr->TypeGet(), // The type of "comma2" node is the same as the type of "addr" node. comma, addr); tree->AsOp()->gtOp1 = comma2; } #ifdef DEBUG if (verbose) { if (addExplicitNullCheck) { printf("After adding explicit null check:\n"); gtDispTree(tree); } } #endif } else /* This is a static data member */ { if (tree->gtFlags & GTF_IND_TLS_REF) { // Thread Local Storage static field reference // // Field ref is a TLS 'Thread-Local-Storage' reference // // Build this tree: IND(*) # // | // ADD(I_IMPL) // / \. // / CNS(fldOffset) // / // / // / // IND(I_IMPL) == [Base of this DLL's TLS] // | // ADD(I_IMPL) // / \. // / CNS(IdValue*4) or MUL // / / \. // IND(I_IMPL) / CNS(4) // | / // CNS(TLS_HDL,0x2C) IND // | // CNS(pIdAddr) // // # Denotes the orginal node // void** pIdAddr = nullptr; unsigned IdValue = info.compCompHnd->getFieldThreadLocalStoreID(symHnd, (void**)&pIdAddr); // // If we can we access the TLS DLL index ID value directly // then pIdAddr will be NULL and // IdValue will be the actual TLS DLL index ID // GenTree* dllRef = nullptr; if (pIdAddr == nullptr) { if (IdValue != 0) { dllRef = gtNewIconNode(IdValue * 4, TYP_I_IMPL); } } else { dllRef = gtNewIndOfIconHandleNode(TYP_I_IMPL, (size_t)pIdAddr, GTF_ICON_CONST_PTR, true); // Next we multiply by 4 dllRef = gtNewOperNode(GT_MUL, TYP_I_IMPL, dllRef, gtNewIconNode(4, TYP_I_IMPL)); } #define WIN32_TLS_SLOTS (0x2C) // Offset from fs:[0] where the pointer to the slots resides // Mark this ICON as a TLS_HDL, codegen will use FS:[cns] GenTree* tlsRef = gtNewIconHandleNode(WIN32_TLS_SLOTS, GTF_ICON_TLS_HDL); // Translate GTF_FLD_INITCLASS to GTF_ICON_INITCLASS if ((tree->gtFlags & GTF_FLD_INITCLASS) != 0) { tree->gtFlags &= ~GTF_FLD_INITCLASS; tlsRef->gtFlags |= GTF_ICON_INITCLASS; } tlsRef = gtNewOperNode(GT_IND, TYP_I_IMPL, tlsRef); if (dllRef != nullptr) { /* Add the dllRef */ tlsRef = gtNewOperNode(GT_ADD, TYP_I_IMPL, tlsRef, dllRef); } /* indirect to have tlsRef point at the base of the DLLs Thread Local Storage */ tlsRef = gtNewOperNode(GT_IND, TYP_I_IMPL, tlsRef); if (fldOffset != 0) { FieldSeqNode* fieldSeq = fieldMayOverlap ? FieldSeqStore::NotAField() : GetFieldSeqStore()->CreateSingleton(symHnd); GenTree* fldOffsetNode = new (this, GT_CNS_INT) GenTreeIntCon(TYP_INT, fldOffset, fieldSeq); /* Add the TLS static field offset to the address */ tlsRef = gtNewOperNode(GT_ADD, TYP_I_IMPL, tlsRef, fldOffsetNode); } // Final indirect to get to actual value of TLS static field tree->SetOper(GT_IND); tree->AsOp()->gtOp1 = tlsRef; noway_assert(tree->gtFlags & GTF_IND_TLS_REF); } else { assert(!fieldMayOverlap); // Normal static field reference // // If we can we access the static's address directly // then pFldAddr will be NULL and // fldAddr will be the actual address of the static field // void** pFldAddr = nullptr; void* fldAddr = info.compCompHnd->getFieldAddress(symHnd, (void**)&pFldAddr); // We should always be able to access this static field address directly // assert(pFldAddr == nullptr); // For boxed statics, this direct address will be for the box. We have already added // the indirection for the field itself and attached the sequence, in importation. bool isBoxedStatic = gtIsStaticFieldPtrToBoxedStruct(tree->TypeGet(), symHnd); FieldSeqNode* fldSeq = !isBoxedStatic ? GetFieldSeqStore()->CreateSingleton(symHnd) : FieldSeqStore::NotAField(); // TODO-CQ: enable this optimization for 32 bit targets. bool isStaticReadOnlyInited = false; #ifdef TARGET_64BIT if (tree->TypeIs(TYP_REF) && !isBoxedStatic) { bool pIsSpeculative = true; if (info.compCompHnd->getStaticFieldCurrentClass(symHnd, &pIsSpeculative) != NO_CLASS_HANDLE) { isStaticReadOnlyInited = !pIsSpeculative; } } #endif // TARGET_64BIT // TODO: choices made below have mostly historical reasons and // should be unified to always use the IND(<address>) form. CLANG_FORMAT_COMMENT_ANCHOR; #ifdef TARGET_64BIT bool preferIndir = isBoxedStatic || isStaticReadOnlyInited || (IMAGE_REL_BASED_REL32 != eeGetRelocTypeHint(fldAddr)); #else // !TARGET_64BIT bool preferIndir = isBoxedStatic; #endif // !TARGET_64BIT if (preferIndir) { GenTreeFlags handleKind = GTF_EMPTY; if (isBoxedStatic) { handleKind = GTF_ICON_STATIC_BOX_PTR; } else if (isStaticReadOnlyInited) { handleKind = GTF_ICON_CONST_PTR; } else { handleKind = GTF_ICON_STATIC_HDL; } GenTree* addr = gtNewIconHandleNode((size_t)fldAddr, handleKind, fldSeq); // Translate GTF_FLD_INITCLASS to GTF_ICON_INITCLASS, if we need to. if (((tree->gtFlags & GTF_FLD_INITCLASS) != 0) && !isStaticReadOnlyInited) { tree->gtFlags &= ~GTF_FLD_INITCLASS; addr->gtFlags |= GTF_ICON_INITCLASS; } tree->SetOper(GT_IND); tree->AsOp()->gtOp1 = addr; if (isBoxedStatic) { // The box for the static cannot be null, and is logically invariant, since it // represents (a base for) the static's address. tree->gtFlags |= (GTF_IND_INVARIANT | GTF_IND_NONFAULTING | GTF_IND_NONNULL); } else if (isStaticReadOnlyInited) { JITDUMP("Marking initialized static read-only field '%s' as invariant.\n", eeGetFieldName(symHnd)); // Static readonly field is not null at this point (see getStaticFieldCurrentClass impl). tree->gtFlags |= (GTF_IND_INVARIANT | GTF_IND_NONFAULTING | GTF_IND_NONNULL); } return fgMorphSmpOp(tree); } else { // Only volatile or classinit could be set, and they map over noway_assert((tree->gtFlags & ~(GTF_FLD_VOLATILE | GTF_FLD_INITCLASS | GTF_COMMON_MASK)) == 0); static_assert_no_msg(GTF_FLD_VOLATILE == GTF_CLS_VAR_VOLATILE); static_assert_no_msg(GTF_FLD_INITCLASS == GTF_CLS_VAR_INITCLASS); tree->SetOper(GT_CLS_VAR); tree->AsClsVar()->gtClsVarHnd = symHnd; tree->AsClsVar()->gtFieldSeq = fldSeq; } return tree; } } noway_assert(tree->gtOper == GT_IND); if (fldOffset == 0) { GenTree* addr = tree->AsOp()->gtOp1; // 'addr' may be a GT_COMMA. Skip over any comma nodes addr = addr->gtEffectiveVal(); #ifdef DEBUG if (verbose) { printf("\nBefore calling fgAddFieldSeqForZeroOffset:\n"); gtDispTree(tree); } #endif // We expect 'addr' to be an address at this point. assert(addr->TypeGet() == TYP_BYREF || addr->TypeGet() == TYP_I_IMPL || addr->TypeGet() == TYP_REF); // Since we don't make a constant zero to attach the field sequence to, associate it with the "addr" node. FieldSeqNode* fieldSeq = fieldMayOverlap ? FieldSeqStore::NotAField() : GetFieldSeqStore()->CreateSingleton(symHnd); fgAddFieldSeqForZeroOffset(addr, fieldSeq); } // Pass down the current mac; if non null we are computing an address GenTree* result = fgMorphSmpOp(tree, mac); #ifdef DEBUG if (verbose) { printf("\nFinal value of Compiler::fgMorphField after calling fgMorphSmpOp:\n"); gtDispTree(result); } #endif return result; } //------------------------------------------------------------------------------ // fgMorphCallInline: attempt to inline a call // // Arguments: // call - call expression to inline, inline candidate // inlineResult - result tracking and reporting // // Notes: // Attempts to inline the call. // // If successful, callee's IR is inserted in place of the call, and // is marked with an InlineContext. // // If unsuccessful, the transformations done in anticipation of a // possible inline are undone, and the candidate flag on the call // is cleared. void Compiler::fgMorphCallInline(GenTreeCall* call, InlineResult* inlineResult) { bool inliningFailed = false; // Is this call an inline candidate? if (call->IsInlineCandidate()) { InlineContext* createdContext = nullptr; // Attempt the inline fgMorphCallInlineHelper(call, inlineResult, &createdContext); // We should have made up our minds one way or another.... assert(inlineResult->IsDecided()); // If we failed to inline, we have a bit of work to do to cleanup if (inlineResult->IsFailure()) { if (createdContext != nullptr) { // We created a context before we got to the failure, so mark // it as failed in the tree. createdContext->SetFailed(inlineResult); } else { #ifdef DEBUG // In debug we always put all inline attempts into the inline tree. InlineContext* ctx = m_inlineStrategy->NewContext(call->gtInlineCandidateInfo->inlinersContext, fgMorphStmt, call); ctx->SetFailed(inlineResult); #endif } inliningFailed = true; // Clear the Inline Candidate flag so we can ensure later we tried // inlining all candidates. // call->gtFlags &= ~GTF_CALL_INLINE_CANDIDATE; } } else { // This wasn't an inline candidate. So it must be a GDV candidate. assert(call->IsGuardedDevirtualizationCandidate()); // We already know we can't inline this call, so don't even bother to try. inliningFailed = true; } // If we failed to inline (or didn't even try), do some cleanup. if (inliningFailed) { if (call->gtReturnType != TYP_VOID) { JITDUMP("Inlining [%06u] failed, so bashing " FMT_STMT " to NOP\n", dspTreeID(call), fgMorphStmt->GetID()); // Detach the GT_CALL tree from the original statement by // hanging a "nothing" node to it. Later the "nothing" node will be removed // and the original GT_CALL tree will be picked up by the GT_RET_EXPR node. noway_assert(fgMorphStmt->GetRootNode() == call); fgMorphStmt->SetRootNode(gtNewNothingNode()); } } } //------------------------------------------------------------------------------ // fgMorphCallInlineHelper: Helper to attempt to inline a call // // Arguments: // call - call expression to inline, inline candidate // result - result to set to success or failure // createdContext - The context that was created if the inline attempt got to the inliner. // // Notes: // Attempts to inline the call. // // If successful, callee's IR is inserted in place of the call, and // is marked with an InlineContext. // // If unsuccessful, the transformations done in anticipation of a // possible inline are undone, and the candidate flag on the call // is cleared. // // If a context was created because we got to the importer then it is output by this function. // If the inline succeeded, this context will already be marked as successful. If it failed and // a context is returned, then it will not have been marked as success or failed. void Compiler::fgMorphCallInlineHelper(GenTreeCall* call, InlineResult* result, InlineContext** createdContext) { // Don't expect any surprises here. assert(result->IsCandidate()); if (lvaCount >= MAX_LV_NUM_COUNT_FOR_INLINING) { // For now, attributing this to call site, though it's really // more of a budget issue (lvaCount currently includes all // caller and prospective callee locals). We still might be // able to inline other callees into this caller, or inline // this callee in other callers. result->NoteFatal(InlineObservation::CALLSITE_TOO_MANY_LOCALS); return; } if (call->IsVirtual()) { result->NoteFatal(InlineObservation::CALLSITE_IS_VIRTUAL); return; } // Re-check this because guarded devirtualization may allow these through. if (gtIsRecursiveCall(call) && call->IsImplicitTailCall()) { result->NoteFatal(InlineObservation::CALLSITE_IMPLICIT_REC_TAIL_CALL); return; } // impMarkInlineCandidate() is expected not to mark tail prefixed calls // and recursive tail calls as inline candidates. noway_assert(!call->IsTailPrefixedCall()); noway_assert(!call->IsImplicitTailCall() || !gtIsRecursiveCall(call)); // // Calling inlinee's compiler to inline the method. // unsigned startVars = lvaCount; #ifdef DEBUG if (verbose) { printf("Expanding INLINE_CANDIDATE in statement "); printStmtID(fgMorphStmt); printf(" in " FMT_BB ":\n", compCurBB->bbNum); gtDispStmt(fgMorphStmt); if (call->IsImplicitTailCall()) { printf("Note: candidate is implicit tail call\n"); } } #endif impInlineRoot()->m_inlineStrategy->NoteAttempt(result); // // Invoke the compiler to inline the call. // fgInvokeInlineeCompiler(call, result, createdContext); if (result->IsFailure()) { // Undo some changes made in anticipation of inlining... // Zero out the used locals memset(lvaTable + startVars, 0, (lvaCount - startVars) * sizeof(*lvaTable)); for (unsigned i = startVars; i < lvaCount; i++) { new (&lvaTable[i], jitstd::placement_t()) LclVarDsc(); // call the constructor. } lvaCount = startVars; #ifdef DEBUG if (verbose) { // printf("Inlining failed. Restore lvaCount to %d.\n", lvaCount); } #endif return; } #ifdef DEBUG if (verbose) { // printf("After inlining lvaCount=%d.\n", lvaCount); } #endif } //------------------------------------------------------------------------ // fgCanFastTailCall: Check to see if this tail call can be optimized as epilog+jmp. // // Arguments: // callee - The callee to check // failReason - If this method returns false, the reason why. Can be nullptr. // // Return Value: // Returns true or false based on whether the callee can be fastTailCalled // // Notes: // This function is target specific and each target will make the fastTailCall // decision differently. See the notes below. // // This function calls fgInitArgInfo() to initialize the arg info table, which // is used to analyze the argument. This function can alter the call arguments // by adding argument IR nodes for non-standard arguments. // // Windows Amd64: // A fast tail call can be made whenever the number of callee arguments // is less than or equal to the number of caller arguments, or we have four // or fewer callee arguments. This is because, on Windows AMD64, each // argument uses exactly one register or one 8-byte stack slot. Thus, we only // need to count arguments, and not be concerned with the size of each // incoming or outgoing argument. // // Can fast tail call examples (amd64 Windows): // // -- Callee will have all register arguments -- // caller(int, int, int, int) // callee(int, int, float, int) // // -- Callee requires stack space that is equal or less than the caller -- // caller(struct, struct, struct, struct, struct, struct) // callee(int, int, int, int, int, int) // // -- Callee requires stack space that is less than the caller -- // caller(struct, double, struct, float, struct, struct) // callee(int, int, int, int, int) // // -- Callee will have all register arguments -- // caller(int) // callee(int, int, int, int) // // Cannot fast tail call examples (amd64 Windows): // // -- Callee requires stack space that is larger than the caller -- // caller(struct, double, struct, float, struct, struct) // callee(int, int, int, int, int, double, double, double) // // -- Callee has a byref struct argument -- // caller(int, int, int) // callee(struct(size 3 bytes)) // // Unix Amd64 && Arm64: // A fastTailCall decision can be made whenever the callee's stack space is // less than or equal to the caller's stack space. There are many permutations // of when the caller and callee have different stack sizes if there are // structs being passed to either the caller or callee. // // Exceptions: // If the callee has a 9 to 16 byte struct argument and the callee has // stack arguments, the decision will be to not fast tail call. This is // because before fgMorphArgs is done, the struct is unknown whether it // will be placed on the stack or enregistered. Therefore, the conservative // decision of do not fast tail call is taken. This limitations should be // removed if/when fgMorphArgs no longer depends on fgCanFastTailCall. // // Can fast tail call examples (amd64 Unix): // // -- Callee will have all register arguments -- // caller(int, int, int, int) // callee(int, int, float, int) // // -- Callee requires stack space that is equal to the caller -- // caller({ long, long }, { int, int }, { int }, { int }, { int }, { int }) -- 6 int register arguments, 16 byte // stack // space // callee(int, int, int, int, int, int, int, int) -- 6 int register arguments, 16 byte stack space // // -- Callee requires stack space that is less than the caller -- // caller({ long, long }, int, { long, long }, int, { long, long }, { long, long }) 6 int register arguments, 32 byte // stack // space // callee(int, int, int, int, int, int, { long, long } ) // 6 int register arguments, 16 byte stack space // // -- Callee will have all register arguments -- // caller(int) // callee(int, int, int, int) // // Cannot fast tail call examples (amd64 Unix): // // -- Callee requires stack space that is larger than the caller -- // caller(float, float, float, float, float, float, float, float) -- 8 float register arguments // callee(int, int, int, int, int, int, int, int) -- 6 int register arguments, 16 byte stack space // // -- Callee has structs which cannot be enregistered (Implementation Limitation) -- // caller(float, float, float, float, float, float, float, float, { double, double, double }) -- 8 float register // arguments, 24 byte stack space // callee({ double, double, double }) -- 24 bytes stack space // // -- Callee requires stack space and has a struct argument >8 bytes and <16 bytes (Implementation Limitation) -- // caller(int, int, int, int, int, int, { double, double, double }) -- 6 int register arguments, 24 byte stack space // callee(int, int, int, int, int, int, { int, int }) -- 6 int registers, 16 byte stack space // // -- Caller requires stack space and nCalleeArgs > nCallerArgs (Bug) -- // caller({ double, double, double, double, double, double }) // 48 byte stack // callee(int, int) -- 2 int registers bool Compiler::fgCanFastTailCall(GenTreeCall* callee, const char** failReason) { #if FEATURE_FASTTAILCALL // To reach here means that the return types of the caller and callee are tail call compatible. // In the case of structs that can be returned in a register, compRetNativeType is set to the actual return type. CLANG_FORMAT_COMMENT_ANCHOR; #ifdef DEBUG if (callee->IsTailPrefixedCall()) { var_types retType = info.compRetType; assert(impTailCallRetTypeCompatible(false, retType, info.compMethodInfo->args.retTypeClass, info.compCallConv, (var_types)callee->gtReturnType, callee->gtRetClsHnd, callee->GetUnmanagedCallConv())); } #endif assert(!callee->AreArgsComplete()); fgInitArgInfo(callee); fgArgInfo* argInfo = callee->fgArgInfo; unsigned calleeArgStackSize = 0; unsigned callerArgStackSize = info.compArgStackSize; for (unsigned index = 0; index < argInfo->ArgCount(); ++index) { fgArgTabEntry* arg = argInfo->GetArgEntry(index, false); calleeArgStackSize = roundUp(calleeArgStackSize, arg->GetByteAlignment()); calleeArgStackSize += arg->GetStackByteSize(); } calleeArgStackSize = GetOutgoingArgByteSize(calleeArgStackSize); auto reportFastTailCallDecision = [&](const char* thisFailReason) { if (failReason != nullptr) { *failReason = thisFailReason; } #ifdef DEBUG if ((JitConfig.JitReportFastTailCallDecisions()) == 1) { if (callee->gtCallType != CT_INDIRECT) { const char* methodName; methodName = eeGetMethodFullName(callee->gtCallMethHnd); printf("[Fast tailcall decision]: Caller: %s\n[Fast tailcall decision]: Callee: %s -- Decision: ", info.compFullName, methodName); } else { printf("[Fast tailcall decision]: Caller: %s\n[Fast tailcall decision]: Callee: IndirectCall -- " "Decision: ", info.compFullName); } if (thisFailReason == nullptr) { printf("Will fast tailcall"); } else { printf("Will not fast tailcall (%s)", thisFailReason); } printf(" (CallerArgStackSize: %d, CalleeArgStackSize: %d)\n\n", callerArgStackSize, calleeArgStackSize); } else { if (thisFailReason == nullptr) { JITDUMP("[Fast tailcall decision]: Will fast tailcall\n"); } else { JITDUMP("[Fast tailcall decision]: Will not fast tailcall (%s)\n", thisFailReason); } } #endif // DEBUG }; if (!opts.compFastTailCalls) { reportFastTailCallDecision("Configuration doesn't allow fast tail calls"); return false; } if (callee->IsStressTailCall()) { reportFastTailCallDecision("Fast tail calls are not performed under tail call stress"); return false; } // Note on vararg methods: // If the caller is vararg method, we don't know the number of arguments passed by caller's caller. // But we can be sure that in-coming arg area of vararg caller would be sufficient to hold its // fixed args. Therefore, we can allow a vararg method to fast tail call other methods as long as // out-going area required for callee is bounded by caller's fixed argument space. // // Note that callee being a vararg method is not a problem since we can account the params being passed. // // We will currently decide to not fast tail call on Windows armarch if the caller or callee is a vararg // method. This is due to the ABI differences for native vararg methods for these platforms. There is // work required to shuffle arguments to the correct locations. CLANG_FORMAT_COMMENT_ANCHOR; if (TargetOS::IsWindows && TargetArchitecture::IsArmArch && (info.compIsVarArgs || callee->IsVarargs())) { reportFastTailCallDecision("Fast tail calls with varargs not supported on Windows ARM/ARM64"); return false; } if (compLocallocUsed) { reportFastTailCallDecision("Localloc used"); return false; } #ifdef TARGET_AMD64 // Needed for Jit64 compat. // In future, enabling fast tail calls from methods that need GS cookie // check would require codegen side work to emit GS cookie check before a // tail call. if (getNeedsGSSecurityCookie()) { reportFastTailCallDecision("GS Security cookie check required"); return false; } #endif // If the NextCallReturnAddress intrinsic is used we should do normal calls. if (info.compHasNextCallRetAddr) { reportFastTailCallDecision("Uses NextCallReturnAddress intrinsic"); return false; } if (callee->HasRetBufArg()) // RetBuf { // If callee has RetBuf param, caller too must have it. // Otherwise go the slow route. if (info.compRetBuffArg == BAD_VAR_NUM) { reportFastTailCallDecision("Callee has RetBuf but caller does not."); return false; } } // For a fast tail call the caller will use its incoming arg stack space to place // arguments, so if the callee requires more arg stack space than is available here // the fast tail call cannot be performed. This is common to all platforms. // Note that the GC'ness of on stack args need not match since the arg setup area is marked // as non-interruptible for fast tail calls. if (calleeArgStackSize > callerArgStackSize) { reportFastTailCallDecision("Not enough incoming arg space"); return false; } // For Windows some struct parameters are copied on the local frame // and then passed by reference. We cannot fast tail call in these situation // as we need to keep our frame around. if (fgCallHasMustCopyByrefParameter(callee)) { reportFastTailCallDecision("Callee has a byref parameter"); return false; } reportFastTailCallDecision(nullptr); return true; #else // FEATURE_FASTTAILCALL if (failReason) *failReason = "Fast tailcalls are not supported on this platform"; return false; #endif } //------------------------------------------------------------------------ // fgCallHasMustCopyByrefParameter: Check to see if this call has a byref parameter that // requires a struct copy in the caller. // // Arguments: // callee - The callee to check // // Return Value: // Returns true or false based on whether this call has a byref parameter that // requires a struct copy in the caller. #if FEATURE_FASTTAILCALL bool Compiler::fgCallHasMustCopyByrefParameter(GenTreeCall* callee) { fgArgInfo* argInfo = callee->fgArgInfo; bool hasMustCopyByrefParameter = false; for (unsigned index = 0; index < argInfo->ArgCount(); ++index) { fgArgTabEntry* arg = argInfo->GetArgEntry(index, false); if (arg->isStruct) { if (arg->passedByRef) { // Generally a byref arg will block tail calling, as we have to // make a local copy of the struct for the callee. hasMustCopyByrefParameter = true; // If we're optimizing, we may be able to pass our caller's byref to our callee, // and so still be able to avoid a struct copy. if (opts.OptimizationEnabled()) { // First, see if this arg is an implicit byref param. GenTreeLclVar* const lcl = arg->GetNode()->IsImplicitByrefParameterValue(this); if (lcl != nullptr) { // Yes, the arg is an implicit byref param. const unsigned lclNum = lcl->GetLclNum(); LclVarDsc* const varDsc = lvaGetDesc(lcl); // The param must not be promoted; if we've promoted, then the arg will be // a local struct assembled from the promoted fields. if (varDsc->lvPromoted) { JITDUMP("Arg [%06u] is promoted implicit byref V%02u, so no tail call\n", dspTreeID(arg->GetNode()), lclNum); } else { JITDUMP("Arg [%06u] is unpromoted implicit byref V%02u, seeing if we can still tail call\n", dspTreeID(arg->GetNode()), lclNum); // We have to worry about introducing aliases if we bypass copying // the struct at the call. We'll do some limited analysis to see if we // can rule this out. const unsigned argLimit = 6; // If this is the only appearance of the byref in the method, then // aliasing is not possible. // // If no other call arg refers to this byref, and no other arg is // a pointer which could refer to this byref, we can optimize. // // We only check this for calls with small numbers of arguments, // as the analysis cost will be quadratic. // const unsigned totalAppearances = varDsc->lvRefCnt(RCS_EARLY); const unsigned callAppearances = (unsigned)varDsc->lvRefCntWtd(RCS_EARLY); assert(totalAppearances >= callAppearances); if (totalAppearances == 1) { JITDUMP("... yes, arg is the only appearance of V%02u\n", lclNum); hasMustCopyByrefParameter = false; } else if (totalAppearances > callAppearances) { // lvRefCntWtd tracks the number of appearances of the arg at call sites. // If this number doesn't match the regular ref count, there is // a non-call appearance, and we must be conservative. // JITDUMP("... no, arg has %u non-call appearance(s)\n", totalAppearances - callAppearances); } else if (argInfo->ArgCount() <= argLimit) { JITDUMP("... all %u appearance(s) are as implicit byref args to calls.\n" "... Running alias analysis on this call's args\n", totalAppearances); GenTree* interferingArg = nullptr; for (unsigned index2 = 0; index2 < argInfo->ArgCount(); ++index2) { if (index2 == index) { continue; } fgArgTabEntry* const arg2 = argInfo->GetArgEntry(index2, false); JITDUMP("... checking other arg [%06u]...\n", dspTreeID(arg2->GetNode())); DISPTREE(arg2->GetNode()); // Do we pass 'lcl' more than once to the callee? if (arg2->isStruct && arg2->passedByRef) { GenTreeLclVarCommon* const lcl2 = arg2->GetNode()->IsImplicitByrefParameterValue(this); if ((lcl2 != nullptr) && (lclNum == lcl2->GetLclNum())) { // not copying would introduce aliased implicit byref structs // in the callee ... we can't optimize. interferingArg = arg2->GetNode(); break; } else { JITDUMP("... arg refers to different implicit byref V%02u\n", lcl2->GetLclNum()); continue; } } // Do we pass a byref pointer which might point within 'lcl'? // // We can assume the 'lcl' is unaliased on entry to the // method, so the only way we can have an aliasing byref pointer at // the call is if 'lcl' is address taken/exposed in the method. // // Note even though 'lcl' is not promoted, we are in the middle // of the promote->rewrite->undo->(morph)->demote cycle, and so // might see references to promoted fields of 'lcl' that haven't yet // been demoted (see fgMarkDemotedImplicitByRefArgs). // // So, we also need to scan all 'lcl's fields, if any, to see if they // are exposed. // // When looking for aliases from other args, we check for both TYP_BYREF // and TYP_I_IMPL typed args here. Conceptually anything that points into // an implicit byref parameter should be TYP_BYREF, as these parameters could // refer to boxed heap locations (say if the method is invoked by reflection) // but there are some stack only structs (like typed references) where // the importer/runtime code uses TYP_I_IMPL, and fgInitArgInfo will // transiently retype all simple address-of implicit parameter args as // TYP_I_IMPL. // if ((arg2->argType == TYP_BYREF) || (arg2->argType == TYP_I_IMPL)) { JITDUMP("...arg is a byref, must run an alias check\n"); bool checkExposure = true; bool hasExposure = false; // See if there is any way arg could refer to a parameter struct. GenTree* arg2Node = arg2->GetNode(); if (arg2Node->OperIs(GT_LCL_VAR)) { GenTreeLclVarCommon* arg2LclNode = arg2Node->AsLclVarCommon(); assert(arg2LclNode->GetLclNum() != lclNum); LclVarDsc* arg2Dsc = lvaGetDesc(arg2LclNode); // Other params can't alias implicit byref params if (arg2Dsc->lvIsParam) { checkExposure = false; } } // Because we're checking TYP_I_IMPL above, at least // screen out obvious things that can't cause aliases. else if (arg2Node->IsIntegralConst()) { checkExposure = false; } if (checkExposure) { JITDUMP( "... not sure where byref arg points, checking if V%02u is exposed\n", lclNum); // arg2 might alias arg, see if we've exposed // arg somewhere in the method. if (varDsc->lvHasLdAddrOp || varDsc->IsAddressExposed()) { // Struct as a whole is exposed, can't optimize JITDUMP("... V%02u is exposed\n", lclNum); hasExposure = true; } else if (varDsc->lvFieldLclStart != 0) { // This is the promoted/undone struct case. // // The field start is actually the local number of the promoted local, // use it to enumerate the fields. const unsigned promotedLcl = varDsc->lvFieldLclStart; LclVarDsc* const promotedVarDsc = lvaGetDesc(promotedLcl); JITDUMP("...promoted-unpromoted case -- also checking exposure of " "fields of V%02u\n", promotedLcl); for (unsigned fieldIndex = 0; fieldIndex < promotedVarDsc->lvFieldCnt; fieldIndex++) { LclVarDsc* fieldDsc = lvaGetDesc(promotedVarDsc->lvFieldLclStart + fieldIndex); if (fieldDsc->lvHasLdAddrOp || fieldDsc->IsAddressExposed()) { // Promoted and not yet demoted field is exposed, can't optimize JITDUMP("... field V%02u is exposed\n", promotedVarDsc->lvFieldLclStart + fieldIndex); hasExposure = true; break; } } } } if (hasExposure) { interferingArg = arg2->GetNode(); break; } } else { JITDUMP("...arg is not a byref or implicit byref (%s)\n", varTypeName(arg2->GetNode()->TypeGet())); } } if (interferingArg != nullptr) { JITDUMP("... no, arg [%06u] may alias with V%02u\n", dspTreeID(interferingArg), lclNum); } else { JITDUMP("... yes, no other arg in call can alias V%02u\n", lclNum); hasMustCopyByrefParameter = false; } } else { JITDUMP(" ... no, call has %u > %u args, alias analysis deemed too costly\n", argInfo->ArgCount(), argLimit); } } } } if (hasMustCopyByrefParameter) { // This arg requires a struct copy. No reason to keep scanning the remaining args. break; } } } } return hasMustCopyByrefParameter; } #endif //------------------------------------------------------------------------ // fgMorphPotentialTailCall: Attempt to morph a call that the importer has // identified as a potential tailcall to an actual tailcall and return the // placeholder node to use in this case. // // Arguments: // call - The call to morph. // // Return Value: // Returns a node to use if the call was morphed into a tailcall. If this // function returns a node the call is done being morphed and the new node // should be used. Otherwise the call will have been demoted to a regular call // and should go through normal morph. // // Notes: // This is called only for calls that the importer has already identified as // potential tailcalls. It will do profitability and legality checks and // classify which kind of tailcall we are able to (or should) do, along with // modifying the trees to perform that kind of tailcall. // GenTree* Compiler::fgMorphPotentialTailCall(GenTreeCall* call) { // It should either be an explicit (i.e. tail prefixed) or an implicit tail call assert(call->IsTailPrefixedCall() ^ call->IsImplicitTailCall()); // It cannot be an inline candidate assert(!call->IsInlineCandidate()); auto failTailCall = [&](const char* reason, unsigned lclNum = BAD_VAR_NUM) { #ifdef DEBUG if (verbose) { printf("\nRejecting tail call in morph for call "); printTreeID(call); printf(": %s", reason); if (lclNum != BAD_VAR_NUM) { printf(" V%02u", lclNum); } printf("\n"); } #endif // for non user funcs, we have no handles to report info.compCompHnd->reportTailCallDecision(nullptr, (call->gtCallType == CT_USER_FUNC) ? call->gtCallMethHnd : nullptr, call->IsTailPrefixedCall(), TAILCALL_FAIL, reason); // We have checked the candidate so demote. call->gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL; #if FEATURE_TAILCALL_OPT call->gtCallMoreFlags &= ~GTF_CALL_M_IMPLICIT_TAILCALL; #endif }; if (call->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC) { failTailCall("Might turn into an intrinsic"); return nullptr; } if (call->IsNoReturn() && !call->IsTailPrefixedCall()) { // Such tail calls always throw an exception and we won't be able to see current // Caller() in the stacktrace. failTailCall("Never returns"); return nullptr; } #ifdef DEBUG if (opts.compGcChecks && (info.compRetType == TYP_REF)) { failTailCall("COMPlus_JitGCChecks or stress might have interposed a call to CORINFO_HELP_CHECK_OBJ, " "invalidating tailcall opportunity"); return nullptr; } #endif // We have to ensure to pass the incoming retValBuf as the // outgoing one. Using a temp will not do as this function will // not regain control to do the copy. This can happen when inlining // a tailcall which also has a potential tailcall in it: the IL looks // like we can do a tailcall, but the trees generated use a temp for the inlinee's // result. TODO-CQ: Fix this. if (info.compRetBuffArg != BAD_VAR_NUM) { noway_assert(call->TypeGet() == TYP_VOID); GenTree* retValBuf = call->gtCallArgs->GetNode(); if (retValBuf->gtOper != GT_LCL_VAR || retValBuf->AsLclVarCommon()->GetLclNum() != info.compRetBuffArg) { failTailCall("Need to copy return buffer"); return nullptr; } } // We are still not sure whether it can be a tail call. Because, when converting // a call to an implicit tail call, we must check that there are no locals with // their address taken. If this is the case, we have to assume that the address // has been leaked and the current stack frame must live until after the final // call. // Verify that none of vars has lvHasLdAddrOp or IsAddressExposed() bit set. Note // that lvHasLdAddrOp is much more conservative. We cannot just base it on // IsAddressExposed() alone since it is not guaranteed to be set on all VarDscs // during morph stage. The reason for also checking IsAddressExposed() is that in case // of vararg methods user args are marked as addr exposed but not lvHasLdAddrOp. // The combination of lvHasLdAddrOp and IsAddressExposed() though conservative allows us // never to be incorrect. // // TODO-Throughput: have a compiler level flag to indicate whether method has vars whose // address is taken. Such a flag could be set whenever lvHasLdAddrOp or IsAddressExposed() // is set. This avoids the need for iterating through all lcl vars of the current // method. Right now throughout the code base we are not consistently using 'set' // method to set lvHasLdAddrOp and IsAddressExposed() flags. bool isImplicitOrStressTailCall = call->IsImplicitTailCall() || call->IsStressTailCall(); if (isImplicitOrStressTailCall && compLocallocUsed) { failTailCall("Localloc used"); return nullptr; } bool hasStructParam = false; for (unsigned varNum = 0; varNum < lvaCount; varNum++) { LclVarDsc* varDsc = lvaGetDesc(varNum); // If the method is marked as an explicit tail call we will skip the // following three hazard checks. // We still must check for any struct parameters and set 'hasStructParam' // so that we won't transform the recursive tail call into a loop. // if (isImplicitOrStressTailCall) { if (varDsc->lvHasLdAddrOp && !lvaIsImplicitByRefLocal(varNum)) { failTailCall("Local address taken", varNum); return nullptr; } if (varDsc->IsAddressExposed()) { if (lvaIsImplicitByRefLocal(varNum)) { // The address of the implicit-byref is a non-address use of the pointer parameter. } else if (varDsc->lvIsStructField && lvaIsImplicitByRefLocal(varDsc->lvParentLcl)) { // The address of the implicit-byref's field is likewise a non-address use of the pointer // parameter. } else if (varDsc->lvPromoted && (lvaTable[varDsc->lvFieldLclStart].lvParentLcl != varNum)) { // This temp was used for struct promotion bookkeeping. It will not be used, and will have // its ref count and address-taken flag reset in fgMarkDemotedImplicitByRefArgs. assert(lvaIsImplicitByRefLocal(lvaTable[varDsc->lvFieldLclStart].lvParentLcl)); assert(fgGlobalMorph); } else { failTailCall("Local address taken", varNum); return nullptr; } } if (varDsc->lvPromoted && varDsc->lvIsParam && !lvaIsImplicitByRefLocal(varNum)) { failTailCall("Has Struct Promoted Param", varNum); return nullptr; } if (varDsc->lvPinned) { // A tail call removes the method from the stack, which means the pinning // goes away for the callee. We can't allow that. failTailCall("Has Pinned Vars", varNum); return nullptr; } } if (varTypeIsStruct(varDsc->TypeGet()) && varDsc->lvIsParam) { hasStructParam = true; // This prevents transforming a recursive tail call into a loop // but doesn't prevent tail call optimization so we need to // look at the rest of parameters. } } if (!fgCheckStmtAfterTailCall()) { failTailCall("Unexpected statements after the tail call"); return nullptr; } const char* failReason = nullptr; bool canFastTailCall = fgCanFastTailCall(call, &failReason); CORINFO_TAILCALL_HELPERS tailCallHelpers; bool tailCallViaJitHelper = false; if (!canFastTailCall) { if (call->IsImplicitTailCall()) { // Implicit or opportunistic tail calls are always dispatched via fast tail call // mechanism and never via tail call helper for perf. failTailCall(failReason); return nullptr; } assert(call->IsTailPrefixedCall()); assert(call->tailCallInfo != nullptr); // We do not currently handle non-standard args except for VSD stubs. if (!call->IsVirtualStub() && call->HasNonStandardAddedArgs(this)) { failTailCall( "Method with non-standard args passed in callee trash register cannot be tail called via helper"); return nullptr; } // On x86 we have a faster mechanism than the general one which we use // in almost all cases. See fgCanTailCallViaJitHelper for more information. if (fgCanTailCallViaJitHelper()) { tailCallViaJitHelper = true; } else { // Make sure we can get the helpers. We do this last as the runtime // will likely be required to generate these. CORINFO_RESOLVED_TOKEN* token = nullptr; CORINFO_SIG_INFO* sig = call->tailCallInfo->GetSig(); unsigned flags = 0; if (!call->tailCallInfo->IsCalli()) { token = call->tailCallInfo->GetToken(); if (call->tailCallInfo->IsCallvirt()) { flags |= CORINFO_TAILCALL_IS_CALLVIRT; } } if (call->gtCallThisArg != nullptr) { var_types thisArgType = call->gtCallThisArg->GetNode()->TypeGet(); if (thisArgType != TYP_REF) { flags |= CORINFO_TAILCALL_THIS_ARG_IS_BYREF; } } if (!info.compCompHnd->getTailCallHelpers(token, sig, (CORINFO_GET_TAILCALL_HELPERS_FLAGS)flags, &tailCallHelpers)) { failTailCall("Tail call help not available"); return nullptr; } } } // Check if we can make the tailcall a loop. bool fastTailCallToLoop = false; #if FEATURE_TAILCALL_OPT // TODO-CQ: enable the transformation when the method has a struct parameter that can be passed in a register // or return type is a struct that can be passed in a register. // // TODO-CQ: if the method being compiled requires generic context reported in gc-info (either through // hidden generic context param or through keep alive thisptr), then while transforming a recursive // call to such a method requires that the generic context stored on stack slot be updated. Right now, // fgMorphRecursiveFastTailCallIntoLoop() is not handling update of generic context while transforming // a recursive call into a loop. Another option is to modify gtIsRecursiveCall() to check that the // generic type parameters of both caller and callee generic method are the same. if (opts.compTailCallLoopOpt && canFastTailCall && gtIsRecursiveCall(call) && !lvaReportParamTypeArg() && !lvaKeepAliveAndReportThis() && !call->IsVirtual() && !hasStructParam && !varTypeIsStruct(call->TypeGet())) { fastTailCallToLoop = true; } #endif // Ok -- now we are committed to performing a tailcall. Report the decision. CorInfoTailCall tailCallResult; if (fastTailCallToLoop) { tailCallResult = TAILCALL_RECURSIVE; } else if (canFastTailCall) { tailCallResult = TAILCALL_OPTIMIZED; } else { tailCallResult = TAILCALL_HELPER; } info.compCompHnd->reportTailCallDecision(nullptr, (call->gtCallType == CT_USER_FUNC) ? call->gtCallMethHnd : nullptr, call->IsTailPrefixedCall(), tailCallResult, nullptr); // Are we currently planning to expand the gtControlExpr as an early virtual call target? // if (call->IsExpandedEarly() && call->IsVirtualVtable()) { // It isn't alway profitable to expand a virtual call early // // We alway expand the TAILCALL_HELPER type late. // And we exapnd late when we have an optimized tail call // and the this pointer needs to be evaluated into a temp. // if (tailCallResult == TAILCALL_HELPER) { // We will alway expand this late in lower instead. // (see LowerTailCallViaJitHelper as it needs some work // for us to be able to expand this earlier in morph) // call->ClearExpandedEarly(); } else if ((tailCallResult == TAILCALL_OPTIMIZED) && ((call->gtCallThisArg->GetNode()->gtFlags & GTF_SIDE_EFFECT) != 0)) { // We generate better code when we expand this late in lower instead. // call->ClearExpandedEarly(); } } // Now actually morph the call. compTailCallUsed = true; // This will prevent inlining this call. call->gtCallMoreFlags |= GTF_CALL_M_TAILCALL; if (tailCallViaJitHelper) { call->gtCallMoreFlags |= GTF_CALL_M_TAILCALL_VIA_JIT_HELPER; } #if FEATURE_TAILCALL_OPT if (fastTailCallToLoop) { call->gtCallMoreFlags |= GTF_CALL_M_TAILCALL_TO_LOOP; } #endif // Mark that this is no longer a pending tailcall. We need to do this before // we call fgMorphCall again (which happens in the fast tailcall case) to // avoid recursing back into this method. call->gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL; #if FEATURE_TAILCALL_OPT call->gtCallMoreFlags &= ~GTF_CALL_M_IMPLICIT_TAILCALL; #endif #ifdef DEBUG if (verbose) { printf("\nGTF_CALL_M_TAILCALL bit set for call "); printTreeID(call); printf("\n"); if (fastTailCallToLoop) { printf("\nGTF_CALL_M_TAILCALL_TO_LOOP bit set for call "); printTreeID(call); printf("\n"); } } #endif // For R2R we might need a different entry point for this call if we are doing a tailcall. // The reason is that the normal delay load helper uses the return address to find the indirection // cell in xarch, but now the JIT is expected to leave the indirection cell in REG_R2R_INDIRECT_PARAM: // We optimize delegate invocations manually in the JIT so skip this for those. if (call->IsR2RRelativeIndir() && canFastTailCall && !fastTailCallToLoop && !call->IsDelegateInvoke()) { info.compCompHnd->updateEntryPointForTailCall(&call->gtEntryPoint); #ifdef TARGET_XARCH // We have already computed arg info to make the fast tailcall decision, but on X64 we now // have to pass the indirection cell, so redo arg info. call->ResetArgInfo(); #endif } // If this block has a flow successor, make suitable updates. // BasicBlock* const nextBlock = compCurBB->GetUniqueSucc(); if (nextBlock == nullptr) { // No unique successor. compCurBB should be a return. // assert(compCurBB->bbJumpKind == BBJ_RETURN); } else { // Flow no longer reaches nextBlock from here. // fgRemoveRefPred(nextBlock, compCurBB); // Adjust profile weights. // // Note if this is a tail call to loop, further updates // are needed once we install the loop edge. // if (compCurBB->hasProfileWeight() && nextBlock->hasProfileWeight()) { // Since we have linear flow we can update the next block weight. // weight_t const blockWeight = compCurBB->bbWeight; weight_t const nextWeight = nextBlock->bbWeight; weight_t const newNextWeight = nextWeight - blockWeight; // If the math would result in a negative weight then there's // no local repair we can do; just leave things inconsistent. // if (newNextWeight >= 0) { // Note if we'd already morphed the IR in nextblock we might // have done something profile sensitive that we should arguably reconsider. // JITDUMP("Reducing profile weight of " FMT_BB " from " FMT_WT " to " FMT_WT "\n", nextBlock->bbNum, nextWeight, newNextWeight); nextBlock->setBBProfileWeight(newNextWeight); } else { JITDUMP("Not reducing profile weight of " FMT_BB " as its weight " FMT_WT " is less than direct flow pred " FMT_BB " weight " FMT_WT "\n", nextBlock->bbNum, nextWeight, compCurBB->bbNum, blockWeight); } // If nextBlock is not a BBJ_RETURN, it should have a unique successor that // is a BBJ_RETURN, as we allow a little bit of flow after a tail call. // if (nextBlock->bbJumpKind != BBJ_RETURN) { BasicBlock* retBlock = nextBlock->GetUniqueSucc(); // Check if we have a sequence of GT_ASG blocks where the same variable is assigned // to temp locals over and over. // Also allow casts on the RHSs of the assignments, and blocks with GT_NOPs. // // { GT_ASG(t_0, GT_CALL(...)) } // { GT_ASG(t_1, t0) } (with casts on rhs potentially) // ... // { GT_ASG(t_n, t_(n - 1)) } // { GT_RET t_n } // if (retBlock->bbJumpKind != BBJ_RETURN) { // Make sure the block has a single statement assert(nextBlock->firstStmt() == nextBlock->lastStmt()); // And the root node is "ASG(LCL_VAR, LCL_VAR)" GenTree* asgNode = nextBlock->firstStmt()->GetRootNode(); assert(asgNode->OperIs(GT_ASG)); unsigned lcl = asgNode->gtGetOp1()->AsLclVarCommon()->GetLclNum(); while (retBlock->bbJumpKind != BBJ_RETURN) { #ifdef DEBUG Statement* nonEmptyStmt = nullptr; for (Statement* const stmt : retBlock->Statements()) { // Ignore NOP statements if (!stmt->GetRootNode()->OperIs(GT_NOP)) { // Only a single non-NOP statement is allowed assert(nonEmptyStmt == nullptr); nonEmptyStmt = stmt; } } if (nonEmptyStmt != nullptr) { asgNode = nonEmptyStmt->GetRootNode(); if (!asgNode->OperIs(GT_NOP)) { assert(asgNode->OperIs(GT_ASG)); GenTree* rhs = asgNode->gtGetOp2(); while (rhs->OperIs(GT_CAST)) { assert(!rhs->gtOverflow()); rhs = rhs->gtGetOp1(); } assert(lcl == rhs->AsLclVarCommon()->GetLclNum()); lcl = asgNode->gtGetOp1()->AsLclVarCommon()->GetLclNum(); } } #endif retBlock = retBlock->GetUniqueSucc(); } } assert(retBlock->bbJumpKind == BBJ_RETURN); if (retBlock->hasProfileWeight()) { // Do similar updates here. // weight_t const nextNextWeight = retBlock->bbWeight; weight_t const newNextNextWeight = nextNextWeight - blockWeight; // If the math would result in an negative weight then there's // no local repair we can do; just leave things inconsistent. // if (newNextNextWeight >= 0) { JITDUMP("Reducing profile weight of " FMT_BB " from " FMT_WT " to " FMT_WT "\n", retBlock->bbNum, nextNextWeight, newNextNextWeight); retBlock->setBBProfileWeight(newNextNextWeight); } else { JITDUMP("Not reducing profile weight of " FMT_BB " as its weight " FMT_WT " is less than direct flow pred " FMT_BB " weight " FMT_WT "\n", retBlock->bbNum, nextNextWeight, compCurBB->bbNum, blockWeight); } } } } } #if !FEATURE_TAILCALL_OPT_SHARED_RETURN // We enable shared-ret tail call optimization for recursive calls even if // FEATURE_TAILCALL_OPT_SHARED_RETURN is not defined. if (gtIsRecursiveCall(call)) #endif { // Many tailcalls will have call and ret in the same block, and thus be // BBJ_RETURN, but if the call falls through to a ret, and we are doing a // tailcall, change it here. compCurBB->bbJumpKind = BBJ_RETURN; } GenTree* stmtExpr = fgMorphStmt->GetRootNode(); #ifdef DEBUG // Tail call needs to be in one of the following IR forms // Either a call stmt or // GT_RETURN(GT_CALL(..)) or GT_RETURN(GT_CAST(GT_CALL(..))) // var = GT_CALL(..) or var = (GT_CAST(GT_CALL(..))) // GT_COMMA(GT_CALL(..), GT_NOP) or GT_COMMA(GT_CAST(GT_CALL(..)), GT_NOP) // In the above, // GT_CASTS may be nested. genTreeOps stmtOper = stmtExpr->gtOper; if (stmtOper == GT_CALL) { assert(stmtExpr == call); } else { assert(stmtOper == GT_RETURN || stmtOper == GT_ASG || stmtOper == GT_COMMA); GenTree* treeWithCall; if (stmtOper == GT_RETURN) { treeWithCall = stmtExpr->gtGetOp1(); } else if (stmtOper == GT_COMMA) { // Second operation must be nop. assert(stmtExpr->gtGetOp2()->IsNothingNode()); treeWithCall = stmtExpr->gtGetOp1(); } else { treeWithCall = stmtExpr->gtGetOp2(); } // Peel off casts while (treeWithCall->gtOper == GT_CAST) { assert(!treeWithCall->gtOverflow()); treeWithCall = treeWithCall->gtGetOp1(); } assert(treeWithCall == call); } #endif // Store the call type for later to introduce the correct placeholder. var_types origCallType = call->TypeGet(); GenTree* result; if (!canFastTailCall && !tailCallViaJitHelper) { // For tailcall via CORINFO_TAILCALL_HELPERS we transform into regular // calls with (to the JIT) regular control flow so we do not need to do // much special handling. result = fgMorphTailCallViaHelpers(call, tailCallHelpers); } else { // Otherwise we will transform into something that does not return. For // fast tailcalls a "jump" and for tailcall via JIT helper a call to a // JIT helper that does not return. So peel off everything after the // call. Statement* nextMorphStmt = fgMorphStmt->GetNextStmt(); JITDUMP("Remove all stmts after the call.\n"); while (nextMorphStmt != nullptr) { Statement* stmtToRemove = nextMorphStmt; nextMorphStmt = stmtToRemove->GetNextStmt(); fgRemoveStmt(compCurBB, stmtToRemove); } bool isRootReplaced = false; GenTree* root = fgMorphStmt->GetRootNode(); if (root != call) { JITDUMP("Replace root node [%06d] with [%06d] tail call node.\n", dspTreeID(root), dspTreeID(call)); isRootReplaced = true; fgMorphStmt->SetRootNode(call); } // Avoid potential extra work for the return (for example, vzeroupper) call->gtType = TYP_VOID; // The runtime requires that we perform a null check on the `this` argument before // tail calling to a virtual dispatch stub. This requirement is a consequence of limitations // in the runtime's ability to map an AV to a NullReferenceException if // the AV occurs in a dispatch stub that has unmanaged caller. if (call->IsVirtualStub()) { call->gtFlags |= GTF_CALL_NULLCHECK; } // Do some target-specific transformations (before we process the args, // etc.) for the JIT helper case. if (tailCallViaJitHelper) { fgMorphTailCallViaJitHelper(call); // Force re-evaluating the argInfo. fgMorphTailCallViaJitHelper will modify the // argument list, invalidating the argInfo. call->fgArgInfo = nullptr; } // Tail call via JIT helper: The VM can't use return address hijacking // if we're not going to return and the helper doesn't have enough info // to safely poll, so we poll before the tail call, if the block isn't // already safe. Since tail call via helper is a slow mechanism it // doen't matter whether we emit GC poll. his is done to be in parity // with Jit64. Also this avoids GC info size increase if all most all // methods are expected to be tail calls (e.g. F#). // // Note that we can avoid emitting GC-poll if we know that the current // BB is dominated by a Gc-SafePoint block. But we don't have dominator // info at this point. One option is to just add a place holder node for // GC-poll (e.g. GT_GCPOLL) here and remove it in lowering if the block // is dominated by a GC-SafePoint. For now it not clear whether // optimizing slow tail calls is worth the effort. As a low cost check, // we check whether the first and current basic blocks are // GC-SafePoints. // // Fast Tail call as epilog+jmp - No need to insert GC-poll. Instead, // fgSetBlockOrder() is going to mark the method as fully interruptible // if the block containing this tail call is reachable without executing // any call. BasicBlock* curBlock = compCurBB; if (canFastTailCall || (fgFirstBB->bbFlags & BBF_GC_SAFE_POINT) || (compCurBB->bbFlags & BBF_GC_SAFE_POINT) || (fgCreateGCPoll(GCPOLL_INLINE, compCurBB) == curBlock)) { // We didn't insert a poll block, so we need to morph the call now // (Normally it will get morphed when we get to the split poll block) GenTree* temp = fgMorphCall(call); noway_assert(temp == call); } // Fast tail call: in case of fast tail calls, we need a jmp epilog and // hence mark it as BBJ_RETURN with BBF_JMP flag set. noway_assert(compCurBB->bbJumpKind == BBJ_RETURN); if (canFastTailCall) { compCurBB->bbFlags |= BBF_HAS_JMP; } else { // We call CORINFO_HELP_TAILCALL which does not return, so we will // not need epilogue. compCurBB->bbJumpKind = BBJ_THROW; } if (isRootReplaced) { // We have replaced the root node of this stmt and deleted the rest, // but we still have the deleted, dead nodes on the `fgMorph*` stack // if the root node was an `ASG`, `RET` or `CAST`. // Return a zero con node to exit morphing of the old trees without asserts // and forbid POST_ORDER morphing doing something wrong with our call. var_types callType; if (varTypeIsStruct(origCallType)) { CORINFO_CLASS_HANDLE retClsHnd = call->gtRetClsHnd; Compiler::structPassingKind howToReturnStruct; callType = getReturnTypeForStruct(retClsHnd, call->GetUnmanagedCallConv(), &howToReturnStruct); assert((howToReturnStruct != SPK_Unknown) && (howToReturnStruct != SPK_ByReference)); if (howToReturnStruct == SPK_ByValue) { callType = TYP_I_IMPL; } else if (howToReturnStruct == SPK_ByValueAsHfa || varTypeIsSIMD(callType)) { callType = TYP_FLOAT; } assert((callType != TYP_UNKNOWN) && !varTypeIsStruct(callType)); } else { callType = origCallType; } assert((callType != TYP_UNKNOWN) && !varTypeIsStruct(callType)); callType = genActualType(callType); GenTree* zero = gtNewZeroConNode(callType); result = fgMorphTree(zero); } else { result = call; } } return result; } //------------------------------------------------------------------------ // fgMorphTailCallViaHelpers: Transform the given GT_CALL tree for tailcall code // generation. // // Arguments: // call - The call to transform // helpers - The tailcall helpers provided by the runtime. // // Return Value: // Returns the transformed node. // // Notes: // This transforms // GT_CALL // {callTarget} // {this} // {args} // into // GT_COMMA // GT_CALL StoreArgsStub // {callTarget} (depending on flags provided by the runtime) // {this} (as a regular arg) // {args} // GT_COMMA // GT_CALL Dispatcher // GT_ADDR ReturnAddress // {CallTargetStub} // GT_ADDR ReturnValue // GT_LCL ReturnValue // whenever the call node returns a value. If the call node does not return a // value the last comma will not be there. // GenTree* Compiler::fgMorphTailCallViaHelpers(GenTreeCall* call, CORINFO_TAILCALL_HELPERS& help) { // R2R requires different handling but we don't support tailcall via // helpers in R2R yet, so just leave it for now. // TODO: R2R: TailCallViaHelper assert(!opts.IsReadyToRun()); JITDUMP("fgMorphTailCallViaHelpers (before):\n"); DISPTREE(call); // Don't support tail calling helper methods assert(call->gtCallType != CT_HELPER); // We come this route only for tail prefixed calls that cannot be dispatched as // fast tail calls assert(!call->IsImplicitTailCall()); // We want to use the following assert, but it can modify the IR in some cases, so we // can't do that in an assert. // assert(!fgCanFastTailCall(call, nullptr)); // We might or might not have called fgInitArgInfo before this point: in // builds with FEATURE_FASTTAILCALL we will have called it when checking if // we could do a fast tailcall, so it is possible we have added extra IR // for non-standard args that we must get rid of. Get rid of that IR here // and do this first as it will 'expose' the retbuf as the first arg, which // we rely upon in fgCreateCallDispatcherAndGetResult. call->ResetArgInfo(); GenTree* callDispatcherAndGetResult = fgCreateCallDispatcherAndGetResult(call, help.hCallTarget, help.hDispatcher); // Change the call to a call to the StoreArgs stub. if (call->HasRetBufArg()) { JITDUMP("Removing retbuf"); call->gtCallArgs = call->gtCallArgs->GetNext(); call->gtCallMoreFlags &= ~GTF_CALL_M_RETBUFFARG; } const bool stubNeedsTargetFnPtr = (help.flags & CORINFO_TAILCALL_STORE_TARGET) != 0; GenTree* doBeforeStoreArgsStub = nullptr; GenTree* thisPtrStubArg = nullptr; // Put 'this' in normal param list if (call->gtCallThisArg != nullptr) { JITDUMP("Moving this pointer into arg list\n"); GenTree* objp = call->gtCallThisArg->GetNode(); GenTree* thisPtr = nullptr; call->gtCallThisArg = nullptr; // JIT will need one or two copies of "this" in the following cases: // 1) the call needs null check; // 2) StoreArgs stub needs the target function pointer address and if the call is virtual // the stub also needs "this" in order to evalute the target. const bool callNeedsNullCheck = call->NeedsNullCheck(); const bool stubNeedsThisPtr = stubNeedsTargetFnPtr && call->IsVirtual(); // TODO-Review: The following transformation is implemented under assumption that // both conditions can be true. However, I could not construct such example // where a virtual tail call would require null check. In case, if the conditions // are mutually exclusive the following could be simplified. if (callNeedsNullCheck || stubNeedsThisPtr) { // Clone "this" if "this" has no side effects. if ((objp->gtFlags & GTF_SIDE_EFFECT) == 0) { thisPtr = gtClone(objp, true); } // Create a temp and spill "this" to the temp if "this" has side effects or "this" was too complex to clone. if (thisPtr == nullptr) { const unsigned lclNum = lvaGrabTemp(true DEBUGARG("tail call thisptr")); // tmp = "this" doBeforeStoreArgsStub = gtNewTempAssign(lclNum, objp); if (callNeedsNullCheck) { // COMMA(tmp = "this", deref(tmp)) GenTree* tmp = gtNewLclvNode(lclNum, objp->TypeGet()); GenTree* nullcheck = gtNewNullCheck(tmp, compCurBB); doBeforeStoreArgsStub = gtNewOperNode(GT_COMMA, TYP_VOID, doBeforeStoreArgsStub, nullcheck); } thisPtr = gtNewLclvNode(lclNum, objp->TypeGet()); if (stubNeedsThisPtr) { thisPtrStubArg = gtNewLclvNode(lclNum, objp->TypeGet()); } } else { if (callNeedsNullCheck) { // deref("this") doBeforeStoreArgsStub = gtNewNullCheck(objp, compCurBB); if (stubNeedsThisPtr) { thisPtrStubArg = gtClone(objp, true); } } else { assert(stubNeedsThisPtr); thisPtrStubArg = objp; } } call->gtFlags &= ~GTF_CALL_NULLCHECK; assert((thisPtrStubArg != nullptr) == stubNeedsThisPtr); } else { thisPtr = objp; } // During rationalization tmp="this" and null check will be materialized // in the right execution order. assert(thisPtr != nullptr); call->gtCallArgs = gtPrependNewCallArg(thisPtr, call->gtCallArgs); } // We may need to pass the target, for instance for calli or generic methods // where we pass instantiating stub. if (stubNeedsTargetFnPtr) { JITDUMP("Adding target since VM requested it\n"); GenTree* target; if (!call->IsVirtual()) { if (call->gtCallType == CT_INDIRECT) { noway_assert(call->gtCallAddr != nullptr); target = call->gtCallAddr; } else { CORINFO_CONST_LOOKUP addrInfo; info.compCompHnd->getFunctionEntryPoint(call->gtCallMethHnd, &addrInfo); CORINFO_GENERIC_HANDLE handle = nullptr; void* pIndirection = nullptr; assert(addrInfo.accessType != IAT_PPVALUE && addrInfo.accessType != IAT_RELPVALUE); if (addrInfo.accessType == IAT_VALUE) { handle = addrInfo.handle; } else if (addrInfo.accessType == IAT_PVALUE) { pIndirection = addrInfo.addr; } target = gtNewIconEmbHndNode(handle, pIndirection, GTF_ICON_FTN_ADDR, call->gtCallMethHnd); } } else { assert(!call->tailCallInfo->GetSig()->hasTypeArg()); CORINFO_CALL_INFO callInfo; unsigned flags = CORINFO_CALLINFO_LDFTN; if (call->tailCallInfo->IsCallvirt()) { flags |= CORINFO_CALLINFO_CALLVIRT; } eeGetCallInfo(call->tailCallInfo->GetToken(), nullptr, (CORINFO_CALLINFO_FLAGS)flags, &callInfo); target = getVirtMethodPointerTree(thisPtrStubArg, call->tailCallInfo->GetToken(), &callInfo); } // Insert target as last arg GenTreeCall::Use** newArgSlot = &call->gtCallArgs; while (*newArgSlot != nullptr) { newArgSlot = &(*newArgSlot)->NextRef(); } *newArgSlot = gtNewCallArgs(target); } // This is now a direct call to the store args stub and not a tailcall. call->gtCallType = CT_USER_FUNC; call->gtCallMethHnd = help.hStoreArgs; call->gtFlags &= ~GTF_CALL_VIRT_KIND_MASK; call->gtCallMoreFlags &= ~(GTF_CALL_M_TAILCALL | GTF_CALL_M_DELEGATE_INV | GTF_CALL_M_WRAPPER_DELEGATE_INV); // The store-args stub returns no value. call->gtRetClsHnd = nullptr; call->gtType = TYP_VOID; call->gtReturnType = TYP_VOID; GenTree* callStoreArgsStub = call; if (doBeforeStoreArgsStub != nullptr) { callStoreArgsStub = gtNewOperNode(GT_COMMA, TYP_VOID, doBeforeStoreArgsStub, callStoreArgsStub); } GenTree* finalTree = gtNewOperNode(GT_COMMA, callDispatcherAndGetResult->TypeGet(), callStoreArgsStub, callDispatcherAndGetResult); finalTree = fgMorphTree(finalTree); JITDUMP("fgMorphTailCallViaHelpers (after):\n"); DISPTREE(finalTree); return finalTree; } //------------------------------------------------------------------------ // fgCreateCallDispatcherAndGetResult: Given a call // CALL // {callTarget} // {retbuf} // {this} // {args} // create a similarly typed node that calls the tailcall dispatcher and returns // the result, as in the following: // COMMA // CALL TailCallDispatcher // ADDR ReturnAddress // &CallTargetFunc // ADDR RetValue // RetValue // If the call has type TYP_VOID, only create the CALL node. // // Arguments: // origCall - the call // callTargetStubHnd - the handle of the CallTarget function (this is a special // IL stub created by the runtime) // dispatcherHnd - the handle of the tailcall dispatcher function // // Return Value: // A node that can be used in place of the original call. // GenTree* Compiler::fgCreateCallDispatcherAndGetResult(GenTreeCall* origCall, CORINFO_METHOD_HANDLE callTargetStubHnd, CORINFO_METHOD_HANDLE dispatcherHnd) { GenTreeCall* callDispatcherNode = gtNewCallNode(CT_USER_FUNC, dispatcherHnd, TYP_VOID, nullptr, fgMorphStmt->GetDebugInfo()); // The dispatcher has signature // void DispatchTailCalls(void* callersRetAddrSlot, void* callTarget, void* retValue) // Add return value arg. GenTree* retValArg; GenTree* retVal = nullptr; unsigned int newRetLcl = BAD_VAR_NUM; GenTree* copyToRetBufNode = nullptr; if (origCall->HasRetBufArg()) { JITDUMP("Transferring retbuf\n"); GenTree* retBufArg = origCall->gtCallArgs->GetNode(); assert(info.compRetBuffArg != BAD_VAR_NUM); assert(retBufArg->OperIsLocal()); assert(retBufArg->AsLclVarCommon()->GetLclNum() == info.compRetBuffArg); // Caller return buffer argument retBufArg can point to GC heap while the dispatcher expects // the return value argument retValArg to point to the stack. // We use a temporary stack allocated return buffer to hold the value during the dispatcher call // and copy the value back to the caller return buffer after that. unsigned int tmpRetBufNum = lvaGrabTemp(true DEBUGARG("substitute local for return buffer")); constexpr bool unsafeValueClsCheck = false; lvaSetStruct(tmpRetBufNum, origCall->gtRetClsHnd, unsafeValueClsCheck); lvaSetVarAddrExposed(tmpRetBufNum DEBUGARG(AddressExposedReason::DISPATCH_RET_BUF)); var_types tmpRetBufType = lvaGetDesc(tmpRetBufNum)->TypeGet(); retValArg = gtNewOperNode(GT_ADDR, TYP_I_IMPL, gtNewLclvNode(tmpRetBufNum, tmpRetBufType)); var_types callerRetBufType = lvaGetDesc(info.compRetBuffArg)->TypeGet(); GenTree* dstAddr = gtNewLclvNode(info.compRetBuffArg, callerRetBufType); GenTree* dst = gtNewObjNode(info.compMethodInfo->args.retTypeClass, dstAddr); GenTree* src = gtNewLclvNode(tmpRetBufNum, tmpRetBufType); constexpr bool isVolatile = false; constexpr bool isCopyBlock = true; copyToRetBufNode = gtNewBlkOpNode(dst, src, isVolatile, isCopyBlock); if (origCall->gtType != TYP_VOID) { retVal = gtClone(retBufArg); } } else if (origCall->gtType != TYP_VOID) { JITDUMP("Creating a new temp for the return value\n"); newRetLcl = lvaGrabTemp(false DEBUGARG("Return value for tail call dispatcher")); if (varTypeIsStruct(origCall->gtType)) { lvaSetStruct(newRetLcl, origCall->gtRetClsHnd, false); } else { // Since we pass a reference to the return value to the dispatcher // we need to use the real return type so we can normalize it on // load when we return it. lvaTable[newRetLcl].lvType = (var_types)origCall->gtReturnType; } lvaSetVarAddrExposed(newRetLcl DEBUGARG(AddressExposedReason::DISPATCH_RET_BUF)); retValArg = gtNewOperNode(GT_ADDR, TYP_I_IMPL, gtNewLclvNode(newRetLcl, genActualType(lvaTable[newRetLcl].lvType))); retVal = gtNewLclvNode(newRetLcl, genActualType(lvaTable[newRetLcl].lvType)); if (varTypeIsStruct(origCall->gtType)) { retVal = impFixupStructReturnType(retVal, origCall->gtRetClsHnd, origCall->GetUnmanagedCallConv()); } } else { JITDUMP("No return value so using null pointer as arg\n"); retValArg = gtNewZeroConNode(TYP_I_IMPL); } callDispatcherNode->gtCallArgs = gtPrependNewCallArg(retValArg, callDispatcherNode->gtCallArgs); // Add callTarget callDispatcherNode->gtCallArgs = gtPrependNewCallArg(new (this, GT_FTN_ADDR) GenTreeFptrVal(TYP_I_IMPL, callTargetStubHnd), callDispatcherNode->gtCallArgs); // Add the caller's return address slot. if (lvaRetAddrVar == BAD_VAR_NUM) { lvaRetAddrVar = lvaGrabTemp(false DEBUGARG("Return address")); lvaTable[lvaRetAddrVar].lvType = TYP_I_IMPL; lvaSetVarAddrExposed(lvaRetAddrVar DEBUGARG(AddressExposedReason::DISPATCH_RET_BUF)); } GenTree* retAddrSlot = gtNewOperNode(GT_ADDR, TYP_I_IMPL, gtNewLclvNode(lvaRetAddrVar, TYP_I_IMPL)); callDispatcherNode->gtCallArgs = gtPrependNewCallArg(retAddrSlot, callDispatcherNode->gtCallArgs); GenTree* finalTree = callDispatcherNode; if (copyToRetBufNode != nullptr) { finalTree = gtNewOperNode(GT_COMMA, TYP_VOID, callDispatcherNode, copyToRetBufNode); } if (origCall->gtType == TYP_VOID) { return finalTree; } assert(retVal != nullptr); finalTree = gtNewOperNode(GT_COMMA, origCall->TypeGet(), finalTree, retVal); // The JIT seems to want to CSE this comma and messes up multi-reg ret // values in the process. Just avoid CSE'ing this tree entirely in that // case. if (origCall->HasMultiRegRetVal()) { finalTree->gtFlags |= GTF_DONT_CSE; } return finalTree; } //------------------------------------------------------------------------ // getLookupTree: get a lookup tree // // Arguments: // pResolvedToken - resolved token of the call // pLookup - the lookup to get the tree for // handleFlags - flags to set on the result node // compileTimeHandle - compile-time handle corresponding to the lookup // // Return Value: // A node representing the lookup tree // GenTree* Compiler::getLookupTree(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_LOOKUP* pLookup, GenTreeFlags handleFlags, void* compileTimeHandle) { if (!pLookup->lookupKind.needsRuntimeLookup) { // No runtime lookup is required. // Access is direct or memory-indirect (of a fixed address) reference CORINFO_GENERIC_HANDLE handle = nullptr; void* pIndirection = nullptr; assert(pLookup->constLookup.accessType != IAT_PPVALUE && pLookup->constLookup.accessType != IAT_RELPVALUE); if (pLookup->constLookup.accessType == IAT_VALUE) { handle = pLookup->constLookup.handle; } else if (pLookup->constLookup.accessType == IAT_PVALUE) { pIndirection = pLookup->constLookup.addr; } return gtNewIconEmbHndNode(handle, pIndirection, handleFlags, compileTimeHandle); } return getRuntimeLookupTree(pResolvedToken, pLookup, compileTimeHandle); } //------------------------------------------------------------------------ // getRuntimeLookupTree: get a tree for a runtime lookup // // Arguments: // pResolvedToken - resolved token of the call // pLookup - the lookup to get the tree for // compileTimeHandle - compile-time handle corresponding to the lookup // // Return Value: // A node representing the runtime lookup tree // GenTree* Compiler::getRuntimeLookupTree(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_LOOKUP* pLookup, void* compileTimeHandle) { assert(!compIsForInlining()); CORINFO_RUNTIME_LOOKUP* pRuntimeLookup = &pLookup->runtimeLookup; // If pRuntimeLookup->indirections is equal to CORINFO_USEHELPER, it specifies that a run-time helper should be // used; otherwise, it specifies the number of indirections via pRuntimeLookup->offsets array. if ((pRuntimeLookup->indirections == CORINFO_USEHELPER) || pRuntimeLookup->testForNull || pRuntimeLookup->testForFixup) { // If the first condition is true, runtime lookup tree is available only via the run-time helper function. // TODO-CQ If the second or third condition is true, we are always using the slow path since we can't // introduce control flow at this point. See impRuntimeLookupToTree for the logic to avoid calling the helper. // The long-term solution is to introduce a new node representing a runtime lookup, create instances // of that node both in the importer and here, and expand the node in lower (introducing control flow if // necessary). return gtNewRuntimeLookupHelperCallNode(pRuntimeLookup, getRuntimeContextTree(pLookup->lookupKind.runtimeLookupKind), compileTimeHandle); } GenTree* result = getRuntimeContextTree(pLookup->lookupKind.runtimeLookupKind); ArrayStack<GenTree*> stmts(getAllocator(CMK_ArrayStack)); auto cloneTree = [&](GenTree** tree DEBUGARG(const char* reason)) -> GenTree* { if (!((*tree)->gtFlags & GTF_GLOB_EFFECT)) { GenTree* clone = gtClone(*tree, true); if (clone) { return clone; } } unsigned temp = lvaGrabTemp(true DEBUGARG(reason)); stmts.Push(gtNewTempAssign(temp, *tree)); *tree = gtNewLclvNode(temp, lvaGetActualType(temp)); return gtNewLclvNode(temp, lvaGetActualType(temp)); }; // Apply repeated indirections for (WORD i = 0; i < pRuntimeLookup->indirections; i++) { GenTree* preInd = nullptr; if ((i == 1 && pRuntimeLookup->indirectFirstOffset) || (i == 2 && pRuntimeLookup->indirectSecondOffset)) { preInd = cloneTree(&result DEBUGARG("getRuntimeLookupTree indirectOffset")); } if (i != 0) { result = gtNewOperNode(GT_IND, TYP_I_IMPL, result); result->gtFlags |= GTF_IND_NONFAULTING; result->gtFlags |= GTF_IND_INVARIANT; } if ((i == 1 && pRuntimeLookup->indirectFirstOffset) || (i == 2 && pRuntimeLookup->indirectSecondOffset)) { result = gtNewOperNode(GT_ADD, TYP_I_IMPL, preInd, result); } if (pRuntimeLookup->offsets[i] != 0) { result = gtNewOperNode(GT_ADD, TYP_I_IMPL, result, gtNewIconNode(pRuntimeLookup->offsets[i], TYP_I_IMPL)); } } assert(!pRuntimeLookup->testForNull); if (pRuntimeLookup->indirections > 0) { assert(!pRuntimeLookup->testForFixup); result = gtNewOperNode(GT_IND, TYP_I_IMPL, result); result->gtFlags |= GTF_IND_NONFAULTING; } // Produces GT_COMMA(stmt1, GT_COMMA(stmt2, ... GT_COMMA(stmtN, result))) while (!stmts.Empty()) { result = gtNewOperNode(GT_COMMA, TYP_I_IMPL, stmts.Pop(), result); } DISPTREE(result); return result; } //------------------------------------------------------------------------ // getVirtMethodPointerTree: get a tree for a virtual method pointer // // Arguments: // thisPtr - tree representing `this` pointer // pResolvedToken - pointer to the resolved token of the method // pCallInfo - pointer to call info // // Return Value: // A node representing the virtual method pointer GenTree* Compiler::getVirtMethodPointerTree(GenTree* thisPtr, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo) { GenTree* exactTypeDesc = getTokenHandleTree(pResolvedToken, true); GenTree* exactMethodDesc = getTokenHandleTree(pResolvedToken, false); GenTreeCall::Use* helpArgs = gtNewCallArgs(thisPtr, exactTypeDesc, exactMethodDesc); return gtNewHelperCallNode(CORINFO_HELP_VIRTUAL_FUNC_PTR, TYP_I_IMPL, helpArgs); } //------------------------------------------------------------------------ // getTokenHandleTree: get a handle tree for a token // // Arguments: // pResolvedToken - token to get a handle for // parent - whether parent should be imported // // Return Value: // A node representing the virtual method pointer GenTree* Compiler::getTokenHandleTree(CORINFO_RESOLVED_TOKEN* pResolvedToken, bool parent) { CORINFO_GENERICHANDLE_RESULT embedInfo; info.compCompHnd->embedGenericHandle(pResolvedToken, parent, &embedInfo); GenTree* result = getLookupTree(pResolvedToken, &embedInfo.lookup, gtTokenToIconFlags(pResolvedToken->token), embedInfo.compileTimeHandle); // If we have a result and it requires runtime lookup, wrap it in a runtime lookup node. if ((result != nullptr) && embedInfo.lookup.lookupKind.needsRuntimeLookup) { result = gtNewRuntimeLookup(embedInfo.compileTimeHandle, embedInfo.handleType, result); } return result; } /***************************************************************************** * * Transform the given GT_CALL tree for tail call via JIT helper. */ void Compiler::fgMorphTailCallViaJitHelper(GenTreeCall* call) { JITDUMP("fgMorphTailCallViaJitHelper (before):\n"); DISPTREE(call); // For the helper-assisted tail calls, we need to push all the arguments // into a single list, and then add a few extra at the beginning or end. // // For x86, the tailcall helper is defined as: // // JIT_TailCall(<function args>, int numberOfOldStackArgsWords, int numberOfNewStackArgsWords, int flags, void* // callTarget) // // Note that the special arguments are on the stack, whereas the function arguments follow // the normal convention: there might be register arguments in ECX and EDX. The stack will // look like (highest address at the top): // first normal stack argument // ... // last normal stack argument // numberOfOldStackArgs // numberOfNewStackArgs // flags // callTarget // // Each special arg is 4 bytes. // // 'flags' is a bitmask where: // 1 == restore callee-save registers (EDI,ESI,EBX). The JIT always saves all // callee-saved registers for tailcall functions. Note that the helper assumes // that the callee-saved registers live immediately below EBP, and must have been // pushed in this order: EDI, ESI, EBX. // 2 == call target is a virtual stub dispatch. // // The x86 tail call helper lives in VM\i386\jithelp.asm. See that function for more details // on the custom calling convention. // Check for PInvoke call types that we don't handle in codegen yet. assert(!call->IsUnmanaged()); assert(call->IsVirtual() || (call->gtCallType != CT_INDIRECT) || (call->gtCallCookie == nullptr)); // Don't support tail calling helper methods assert(call->gtCallType != CT_HELPER); // We come this route only for tail prefixed calls that cannot be dispatched as // fast tail calls assert(!call->IsImplicitTailCall()); // We want to use the following assert, but it can modify the IR in some cases, so we // can't do that in an assert. // assert(!fgCanFastTailCall(call, nullptr)); // First move the 'this' pointer (if any) onto the regular arg list. We do this because // we are going to prepend special arguments onto the argument list (for non-x86 platforms), // and thus shift where the 'this' pointer will be passed to a later argument slot. In // addition, for all platforms, we are going to change the call into a helper call. Our code // generation code for handling calls to helpers does not handle 'this' pointers. So, when we // do this transformation, we must explicitly create a null 'this' pointer check, if required, // since special 'this' pointer handling will no longer kick in. // // Some call types, such as virtual vtable calls, require creating a call address expression // that involves the "this" pointer. Lowering will sometimes create an embedded statement // to create a temporary that is assigned to the "this" pointer expression, and then use // that temp to create the call address expression. This temp creation embedded statement // will occur immediately before the "this" pointer argument, and then will be used for both // the "this" pointer argument as well as the call address expression. In the normal ordering, // the embedded statement establishing the "this" pointer temp will execute before both uses // of the temp. However, for tail calls via a helper, we move the "this" pointer onto the // normal call argument list, and insert a placeholder which will hold the call address // expression. For non-x86, things are ok, because the order of execution of these is not // altered. However, for x86, the call address expression is inserted as the *last* argument // in the argument list, *after* the "this" pointer. It will be put on the stack, and be // evaluated first. To ensure we don't end up with out-of-order temp definition and use, // for those cases where call lowering creates an embedded form temp of "this", we will // create a temp here, early, that will later get morphed correctly. if (call->gtCallThisArg != nullptr) { GenTree* thisPtr = nullptr; GenTree* objp = call->gtCallThisArg->GetNode(); call->gtCallThisArg = nullptr; if ((call->IsDelegateInvoke() || call->IsVirtualVtable()) && !objp->OperIs(GT_LCL_VAR)) { // tmp = "this" unsigned lclNum = lvaGrabTemp(true DEBUGARG("tail call thisptr")); GenTree* asg = gtNewTempAssign(lclNum, objp); // COMMA(tmp = "this", tmp) var_types vt = objp->TypeGet(); GenTree* tmp = gtNewLclvNode(lclNum, vt); thisPtr = gtNewOperNode(GT_COMMA, vt, asg, tmp); objp = thisPtr; } if (call->NeedsNullCheck()) { // clone "this" if "this" has no side effects. if ((thisPtr == nullptr) && !(objp->gtFlags & GTF_SIDE_EFFECT)) { thisPtr = gtClone(objp, true); } var_types vt = objp->TypeGet(); if (thisPtr == nullptr) { // create a temp if either "this" has side effects or "this" is too complex to clone. // tmp = "this" unsigned lclNum = lvaGrabTemp(true DEBUGARG("tail call thisptr")); GenTree* asg = gtNewTempAssign(lclNum, objp); // COMMA(tmp = "this", deref(tmp)) GenTree* tmp = gtNewLclvNode(lclNum, vt); GenTree* nullcheck = gtNewNullCheck(tmp, compCurBB); asg = gtNewOperNode(GT_COMMA, TYP_VOID, asg, nullcheck); // COMMA(COMMA(tmp = "this", deref(tmp)), tmp) thisPtr = gtNewOperNode(GT_COMMA, vt, asg, gtNewLclvNode(lclNum, vt)); } else { // thisPtr = COMMA(deref("this"), "this") GenTree* nullcheck = gtNewNullCheck(thisPtr, compCurBB); thisPtr = gtNewOperNode(GT_COMMA, vt, nullcheck, gtClone(objp, true)); } call->gtFlags &= ~GTF_CALL_NULLCHECK; } else { thisPtr = objp; } // TODO-Cleanup: we leave it as a virtual stub call to // use logic in `LowerVirtualStubCall`, clear GTF_CALL_VIRT_KIND_MASK here // and change `LowerCall` to recognize it as a direct call. // During rationalization tmp="this" and null check will // materialize as embedded stmts in right execution order. assert(thisPtr != nullptr); call->gtCallArgs = gtPrependNewCallArg(thisPtr, call->gtCallArgs); } // Find the end of the argument list. ppArg will point at the last pointer; setting *ppArg will // append to the list. GenTreeCall::Use** ppArg = &call->gtCallArgs; for (GenTreeCall::Use& use : call->Args()) { ppArg = &use.NextRef(); } assert(ppArg != nullptr); assert(*ppArg == nullptr); unsigned nOldStkArgsWords = (compArgSize - (codeGen->intRegState.rsCalleeRegArgCount * REGSIZE_BYTES)) / REGSIZE_BYTES; GenTree* arg3 = gtNewIconNode((ssize_t)nOldStkArgsWords, TYP_I_IMPL); *ppArg = gtNewCallArgs(arg3); // numberOfOldStackArgs ppArg = &((*ppArg)->NextRef()); // Inject a placeholder for the count of outgoing stack arguments that the Lowering phase will generate. // The constant will be replaced. GenTree* arg2 = gtNewIconNode(9, TYP_I_IMPL); *ppArg = gtNewCallArgs(arg2); // numberOfNewStackArgs ppArg = &((*ppArg)->NextRef()); // Inject a placeholder for the flags. // The constant will be replaced. GenTree* arg1 = gtNewIconNode(8, TYP_I_IMPL); *ppArg = gtNewCallArgs(arg1); ppArg = &((*ppArg)->NextRef()); // Inject a placeholder for the real call target that the Lowering phase will generate. // The constant will be replaced. GenTree* arg0 = gtNewIconNode(7, TYP_I_IMPL); *ppArg = gtNewCallArgs(arg0); // It is now a varargs tail call. call->gtCallMoreFlags |= GTF_CALL_M_VARARGS; call->gtFlags &= ~GTF_CALL_POP_ARGS; // The function is responsible for doing explicit null check when it is necessary. assert(!call->NeedsNullCheck()); JITDUMP("fgMorphTailCallViaJitHelper (after):\n"); DISPTREE(call); } //------------------------------------------------------------------------ // fgGetStubAddrArg: Return the virtual stub address for the given call. // // Notes: // the JIT must place the address of the stub used to load the call target, // the "stub indirection cell", in special call argument with special register. // // Arguments: // call - a call that needs virtual stub dispatching. // // Return Value: // addr tree with set resister requirements. // GenTree* Compiler::fgGetStubAddrArg(GenTreeCall* call) { assert(call->IsVirtualStub()); GenTree* stubAddrArg; if (call->gtCallType == CT_INDIRECT) { stubAddrArg = gtClone(call->gtCallAddr, true); } else { assert(call->gtCallMoreFlags & GTF_CALL_M_VIRTSTUB_REL_INDIRECT); ssize_t addr = ssize_t(call->gtStubCallStubAddr); stubAddrArg = gtNewIconHandleNode(addr, GTF_ICON_FTN_ADDR); #ifdef DEBUG stubAddrArg->AsIntCon()->gtTargetHandle = (size_t)call->gtCallMethHnd; #endif } assert(stubAddrArg != nullptr); stubAddrArg->SetRegNum(virtualStubParamInfo->GetReg()); return stubAddrArg; } //------------------------------------------------------------------------------ // fgGetArgTabEntryParameterLclNum : Get the lcl num for the parameter that // corresponds to the argument to a recursive call. // // Notes: // Due to non-standard args this is not just fgArgTabEntry::argNum. // For example, in R2R compilations we will have added a non-standard // arg for the R2R indirection cell. // // Arguments: // argTabEntry - the arg // unsigned Compiler::fgGetArgTabEntryParameterLclNum(GenTreeCall* call, fgArgTabEntry* argTabEntry) { fgArgInfo* argInfo = call->fgArgInfo; unsigned argCount = argInfo->ArgCount(); fgArgTabEntry** argTable = argInfo->ArgTable(); unsigned numToRemove = 0; for (unsigned i = 0; i < argCount; i++) { fgArgTabEntry* arg = argTable[i]; // Late added args add extra args that do not map to IL parameters and that we should not reassign. if (!arg->isNonStandard() || !arg->isNonStandardArgAddedLate()) continue; if (arg->argNum < argTabEntry->argNum) numToRemove++; } return argTabEntry->argNum - numToRemove; } //------------------------------------------------------------------------------ // fgMorphRecursiveFastTailCallIntoLoop : Transform a recursive fast tail call into a loop. // // // Arguments: // block - basic block ending with a recursive fast tail call // recursiveTailCall - recursive tail call to transform // // Notes: // The legality of the transformation is ensured by the checks in endsWithTailCallConvertibleToLoop. void Compiler::fgMorphRecursiveFastTailCallIntoLoop(BasicBlock* block, GenTreeCall* recursiveTailCall) { assert(recursiveTailCall->IsTailCallConvertibleToLoop()); Statement* lastStmt = block->lastStmt(); assert(recursiveTailCall == lastStmt->GetRootNode()); // Transform recursive tail call into a loop. Statement* earlyArgInsertionPoint = lastStmt; const DebugInfo& callDI = lastStmt->GetDebugInfo(); // Hoist arg setup statement for the 'this' argument. GenTreeCall::Use* thisArg = recursiveTailCall->gtCallThisArg; if ((thisArg != nullptr) && !thisArg->GetNode()->IsNothingNode() && !thisArg->GetNode()->IsArgPlaceHolderNode()) { Statement* thisArgStmt = gtNewStmt(thisArg->GetNode(), callDI); fgInsertStmtBefore(block, earlyArgInsertionPoint, thisArgStmt); } // All arguments whose trees may involve caller parameter local variables need to be assigned to temps first; // then the temps need to be assigned to the method parameters. This is done so that the caller // parameters are not re-assigned before call arguments depending on them are evaluated. // tmpAssignmentInsertionPoint and paramAssignmentInsertionPoint keep track of // where the next temp or parameter assignment should be inserted. // In the example below the first call argument (arg1 - 1) needs to be assigned to a temp first // while the second call argument (const 1) doesn't. // Basic block before tail recursion elimination: // ***** BB04, stmt 1 (top level) // [000037] ------------ * stmtExpr void (top level) (IL 0x00A...0x013) // [000033] --C - G------ - \--* call void RecursiveMethod // [000030] ------------ | / --* const int - 1 // [000031] ------------arg0 in rcx + --* +int // [000029] ------------ | \--* lclVar int V00 arg1 // [000032] ------------arg1 in rdx \--* const int 1 // // // Basic block after tail recursion elimination : // ***** BB04, stmt 1 (top level) // [000051] ------------ * stmtExpr void (top level) (IL 0x00A... ? ? ? ) // [000030] ------------ | / --* const int - 1 // [000031] ------------ | / --* +int // [000029] ------------ | | \--* lclVar int V00 arg1 // [000050] - A---------- \--* = int // [000049] D------N---- \--* lclVar int V02 tmp0 // // ***** BB04, stmt 2 (top level) // [000055] ------------ * stmtExpr void (top level) (IL 0x00A... ? ? ? ) // [000052] ------------ | / --* lclVar int V02 tmp0 // [000054] - A---------- \--* = int // [000053] D------N---- \--* lclVar int V00 arg0 // ***** BB04, stmt 3 (top level) // [000058] ------------ * stmtExpr void (top level) (IL 0x00A... ? ? ? ) // [000032] ------------ | / --* const int 1 // [000057] - A---------- \--* = int // [000056] D------N---- \--* lclVar int V01 arg1 Statement* tmpAssignmentInsertionPoint = lastStmt; Statement* paramAssignmentInsertionPoint = lastStmt; // Process early args. They may contain both setup statements for late args and actual args. // Early args don't include 'this' arg. We need to account for that so that the call to gtArgEntryByArgNum // below has the correct second argument. int earlyArgIndex = (thisArg == nullptr) ? 0 : 1; for (GenTreeCall::Use& use : recursiveTailCall->Args()) { GenTree* earlyArg = use.GetNode(); if (!earlyArg->IsNothingNode() && !earlyArg->IsArgPlaceHolderNode()) { if ((earlyArg->gtFlags & GTF_LATE_ARG) != 0) { // This is a setup node so we need to hoist it. Statement* earlyArgStmt = gtNewStmt(earlyArg, callDI); fgInsertStmtBefore(block, earlyArgInsertionPoint, earlyArgStmt); } else { // This is an actual argument that needs to be assigned to the corresponding caller parameter. fgArgTabEntry* curArgTabEntry = gtArgEntryByArgNum(recursiveTailCall, earlyArgIndex); // Late-added non-standard args are extra args that are not passed as locals, so skip those if (!curArgTabEntry->isNonStandard() || !curArgTabEntry->isNonStandardArgAddedLate()) { Statement* paramAssignStmt = fgAssignRecursiveCallArgToCallerParam(earlyArg, curArgTabEntry, fgGetArgTabEntryParameterLclNum(recursiveTailCall, curArgTabEntry), block, callDI, tmpAssignmentInsertionPoint, paramAssignmentInsertionPoint); if ((tmpAssignmentInsertionPoint == lastStmt) && (paramAssignStmt != nullptr)) { // All temp assignments will happen before the first param assignment. tmpAssignmentInsertionPoint = paramAssignStmt; } } } } earlyArgIndex++; } // Process late args. int lateArgIndex = 0; for (GenTreeCall::Use& use : recursiveTailCall->LateArgs()) { // A late argument is an actual argument that needs to be assigned to the corresponding caller's parameter. GenTree* lateArg = use.GetNode(); fgArgTabEntry* curArgTabEntry = gtArgEntryByLateArgIndex(recursiveTailCall, lateArgIndex); // Late-added non-standard args are extra args that are not passed as locals, so skip those if (!curArgTabEntry->isNonStandard() || !curArgTabEntry->isNonStandardArgAddedLate()) { Statement* paramAssignStmt = fgAssignRecursiveCallArgToCallerParam(lateArg, curArgTabEntry, fgGetArgTabEntryParameterLclNum(recursiveTailCall, curArgTabEntry), block, callDI, tmpAssignmentInsertionPoint, paramAssignmentInsertionPoint); if ((tmpAssignmentInsertionPoint == lastStmt) && (paramAssignStmt != nullptr)) { // All temp assignments will happen before the first param assignment. tmpAssignmentInsertionPoint = paramAssignStmt; } } lateArgIndex++; } // If the method has starg.s 0 or ldarga.s 0 a special local (lvaArg0Var) is created so that // compThisArg stays immutable. Normally it's assigned in fgFirstBBScratch block. Since that // block won't be in the loop (it's assumed to have no predecessors), we need to update the special local here. if (!info.compIsStatic && (lvaArg0Var != info.compThisArg)) { var_types thisType = lvaTable[info.compThisArg].TypeGet(); GenTree* arg0 = gtNewLclvNode(lvaArg0Var, thisType); GenTree* arg0Assignment = gtNewAssignNode(arg0, gtNewLclvNode(info.compThisArg, thisType)); Statement* arg0AssignmentStmt = gtNewStmt(arg0Assignment, callDI); fgInsertStmtBefore(block, paramAssignmentInsertionPoint, arg0AssignmentStmt); } // If compInitMem is set, we may need to zero-initialize some locals. Normally it's done in the prolog // but this loop can't include the prolog. Since we don't have liveness information, we insert zero-initialization // for all non-parameter IL locals as well as temp structs with GC fields. // Liveness phase will remove unnecessary initializations. if (info.compInitMem || compSuppressedZeroInit) { unsigned varNum; LclVarDsc* varDsc; for (varNum = 0, varDsc = lvaTable; varNum < lvaCount; varNum++, varDsc++) { #if FEATURE_FIXED_OUT_ARGS if (varNum == lvaOutgoingArgSpaceVar) { continue; } #endif // FEATURE_FIXED_OUT_ARGS if (!varDsc->lvIsParam) { var_types lclType = varDsc->TypeGet(); bool isUserLocal = (varNum < info.compLocalsCount); bool structWithGCFields = ((lclType == TYP_STRUCT) && varDsc->GetLayout()->HasGCPtr()); bool hadSuppressedInit = varDsc->lvSuppressedZeroInit; if ((info.compInitMem && (isUserLocal || structWithGCFields)) || hadSuppressedInit) { GenTree* lcl = gtNewLclvNode(varNum, lclType); GenTree* init = nullptr; if (varTypeIsStruct(lclType)) { const bool isVolatile = false; const bool isCopyBlock = false; init = gtNewBlkOpNode(lcl, gtNewIconNode(0), isVolatile, isCopyBlock); init = fgMorphInitBlock(init); } else { GenTree* zero = gtNewZeroConNode(genActualType(lclType)); init = gtNewAssignNode(lcl, zero); } Statement* initStmt = gtNewStmt(init, callDI); fgInsertStmtBefore(block, lastStmt, initStmt); } } } } // Remove the call fgRemoveStmt(block, lastStmt); // Set the loop edge. if (opts.IsOSR()) { // Todo: this may not look like a viable loop header. // Might need the moral equivalent of a scratch BB. block->bbJumpDest = fgEntryBB; } else { // Ensure we have a scratch block and then target the next // block. Loop detection needs to see a pred out of the loop, // so mark the scratch block BBF_DONT_REMOVE to prevent empty // block removal on it. fgEnsureFirstBBisScratch(); fgFirstBB->bbFlags |= BBF_DONT_REMOVE; block->bbJumpDest = fgFirstBB->bbNext; } // Finish hooking things up. block->bbJumpKind = BBJ_ALWAYS; fgAddRefPred(block->bbJumpDest, block); block->bbFlags &= ~BBF_HAS_JMP; } //------------------------------------------------------------------------------ // fgAssignRecursiveCallArgToCallerParam : Assign argument to a recursive call to the corresponding caller parameter. // // // Arguments: // arg - argument to assign // argTabEntry - argument table entry corresponding to arg // lclParamNum - the lcl num of the parameter // block --- basic block the call is in // callILOffset - IL offset of the call // tmpAssignmentInsertionPoint - tree before which temp assignment should be inserted (if necessary) // paramAssignmentInsertionPoint - tree before which parameter assignment should be inserted // // Return Value: // parameter assignment statement if one was inserted; nullptr otherwise. Statement* Compiler::fgAssignRecursiveCallArgToCallerParam(GenTree* arg, fgArgTabEntry* argTabEntry, unsigned lclParamNum, BasicBlock* block, const DebugInfo& callDI, Statement* tmpAssignmentInsertionPoint, Statement* paramAssignmentInsertionPoint) { // Call arguments should be assigned to temps first and then the temps should be assigned to parameters because // some argument trees may reference parameters directly. GenTree* argInTemp = nullptr; bool needToAssignParameter = true; // TODO-CQ: enable calls with struct arguments passed in registers. noway_assert(!varTypeIsStruct(arg->TypeGet())); if ((argTabEntry->isTmp) || arg->IsCnsIntOrI() || arg->IsCnsFltOrDbl()) { // The argument is already assigned to a temp or is a const. argInTemp = arg; } else if (arg->OperGet() == GT_LCL_VAR) { unsigned lclNum = arg->AsLclVar()->GetLclNum(); LclVarDsc* varDsc = lvaGetDesc(lclNum); if (!varDsc->lvIsParam) { // The argument is a non-parameter local so it doesn't need to be assigned to a temp. argInTemp = arg; } else if (lclNum == lclParamNum) { // The argument is the same parameter local that we were about to assign so // we can skip the assignment. needToAssignParameter = false; } } // TODO: We don't need temp assignments if we can prove that the argument tree doesn't involve // any caller parameters. Some common cases are handled above but we may be able to eliminate // more temp assignments. Statement* paramAssignStmt = nullptr; if (needToAssignParameter) { if (argInTemp == nullptr) { // The argument is not assigned to a temp. We need to create a new temp and insert an assignment. // TODO: we can avoid a temp assignment if we can prove that the argument tree // doesn't involve any caller parameters. unsigned tmpNum = lvaGrabTemp(true DEBUGARG("arg temp")); lvaTable[tmpNum].lvType = arg->gtType; GenTree* tempSrc = arg; GenTree* tempDest = gtNewLclvNode(tmpNum, tempSrc->gtType); GenTree* tmpAssignNode = gtNewAssignNode(tempDest, tempSrc); Statement* tmpAssignStmt = gtNewStmt(tmpAssignNode, callDI); fgInsertStmtBefore(block, tmpAssignmentInsertionPoint, tmpAssignStmt); argInTemp = gtNewLclvNode(tmpNum, tempSrc->gtType); } // Now assign the temp to the parameter. const LclVarDsc* paramDsc = lvaGetDesc(lclParamNum); assert(paramDsc->lvIsParam); GenTree* paramDest = gtNewLclvNode(lclParamNum, paramDsc->lvType); GenTree* paramAssignNode = gtNewAssignNode(paramDest, argInTemp); paramAssignStmt = gtNewStmt(paramAssignNode, callDI); fgInsertStmtBefore(block, paramAssignmentInsertionPoint, paramAssignStmt); } return paramAssignStmt; } /***************************************************************************** * * Transform the given GT_CALL tree for code generation. */ GenTree* Compiler::fgMorphCall(GenTreeCall* call) { if (call->CanTailCall()) { GenTree* newNode = fgMorphPotentialTailCall(call); if (newNode != nullptr) { return newNode; } assert(!call->CanTailCall()); #if FEATURE_MULTIREG_RET if (fgGlobalMorph && call->HasMultiRegRetVal() && varTypeIsStruct(call->TypeGet())) { // The tail call has been rejected so we must finish the work deferred // by impFixupCallStructReturn for multi-reg-returning calls and transform // ret call // into // temp = call // ret temp // Force re-evaluating the argInfo as the return argument has changed. call->ResetArgInfo(); // Create a new temp. unsigned tmpNum = lvaGrabTemp(false DEBUGARG("Return value temp for multi-reg return (rejected tail call).")); lvaTable[tmpNum].lvIsMultiRegRet = true; CORINFO_CLASS_HANDLE structHandle = call->gtRetClsHnd; assert(structHandle != NO_CLASS_HANDLE); const bool unsafeValueClsCheck = false; lvaSetStruct(tmpNum, structHandle, unsafeValueClsCheck); var_types structType = lvaTable[tmpNum].lvType; GenTree* dst = gtNewLclvNode(tmpNum, structType); GenTree* assg = gtNewAssignNode(dst, call); assg = fgMorphTree(assg); // Create the assignment statement and insert it before the current statement. Statement* assgStmt = gtNewStmt(assg, compCurStmt->GetDebugInfo()); fgInsertStmtBefore(compCurBB, compCurStmt, assgStmt); // Return the temp. GenTree* result = gtNewLclvNode(tmpNum, lvaTable[tmpNum].lvType); result->gtFlags |= GTF_DONT_CSE; compCurBB->bbFlags |= BBF_HAS_CALL; // This block has a call #ifdef DEBUG if (verbose) { printf("\nInserting assignment of a multi-reg call result to a temp:\n"); gtDispStmt(assgStmt); } result->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED; #endif // DEBUG return result; } #endif } if ((call->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC) == 0 && (call->gtCallMethHnd == eeFindHelper(CORINFO_HELP_VIRTUAL_FUNC_PTR) #ifdef FEATURE_READYTORUN || call->gtCallMethHnd == eeFindHelper(CORINFO_HELP_READYTORUN_VIRTUAL_FUNC_PTR) #endif ) && (call == fgMorphStmt->GetRootNode())) { // This is call to CORINFO_HELP_VIRTUAL_FUNC_PTR with ignored result. // Transform it into a null check. GenTree* thisPtr = call->gtCallArgs->GetNode(); GenTree* nullCheck = gtNewNullCheck(thisPtr, compCurBB); return fgMorphTree(nullCheck); } noway_assert(call->gtOper == GT_CALL); // // Only count calls once (only in the global morph phase) // if (fgGlobalMorph) { if (call->gtCallType == CT_INDIRECT) { optCallCount++; optIndirectCallCount++; } else if (call->gtCallType == CT_USER_FUNC) { optCallCount++; if (call->IsVirtual()) { optIndirectCallCount++; } } } // Couldn't inline - remember that this BB contains method calls // Mark the block as a GC safe point for the call if possible. // In the event the call indicates the block isn't a GC safe point // and the call is unmanaged with a GC transition suppression request // then insert a GC poll. CLANG_FORMAT_COMMENT_ANCHOR; if (IsGcSafePoint(call)) { compCurBB->bbFlags |= BBF_GC_SAFE_POINT; } // Regardless of the state of the basic block with respect to GC safe point, // we will always insert a GC Poll for scenarios involving a suppressed GC // transition. Only mark the block for GC Poll insertion on the first morph. if (fgGlobalMorph && call->IsUnmanaged() && call->IsSuppressGCTransition()) { compCurBB->bbFlags |= (BBF_HAS_SUPPRESSGC_CALL | BBF_GC_SAFE_POINT); optMethodFlags |= OMF_NEEDS_GCPOLLS; } // Morph Type.op_Equality, Type.op_Inequality, and Enum.HasFlag // // We need to do these before the arguments are morphed if ((call->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC)) { // See if this is foldable GenTree* optTree = gtFoldExprCall(call); // If we optimized, morph the result if (optTree != call) { return fgMorphTree(optTree); } } compCurBB->bbFlags |= BBF_HAS_CALL; // This block has a call /* Process the "normal" argument list */ call = fgMorphArgs(call); noway_assert(call->gtOper == GT_CALL); // Should we expand this virtual method call target early here? // if (call->IsExpandedEarly() && call->IsVirtualVtable()) { // We only expand the Vtable Call target once in the global morph phase if (fgGlobalMorph) { assert(call->gtControlExpr == nullptr); // We only call this method and assign gtControlExpr once call->gtControlExpr = fgExpandVirtualVtableCallTarget(call); } // We always have to morph or re-morph the control expr // call->gtControlExpr = fgMorphTree(call->gtControlExpr); // Propagate any gtFlags into the call call->gtFlags |= call->gtControlExpr->gtFlags; } // Morph stelem.ref helper call to store a null value, into a store into an array without the helper. // This needs to be done after the arguments are morphed to ensure constant propagation has already taken place. if (opts.OptimizationEnabled() && (call->gtCallType == CT_HELPER) && (call->gtCallMethHnd == eeFindHelper(CORINFO_HELP_ARRADDR_ST))) { GenTree* value = gtArgEntryByArgNum(call, 2)->GetNode(); if (value->IsIntegralConst(0)) { assert(value->OperGet() == GT_CNS_INT); GenTree* arr = gtArgEntryByArgNum(call, 0)->GetNode(); GenTree* index = gtArgEntryByArgNum(call, 1)->GetNode(); // Either or both of the array and index arguments may have been spilled to temps by `fgMorphArgs`. Copy // the spill trees as well if necessary. GenTreeOp* argSetup = nullptr; for (GenTreeCall::Use& use : call->Args()) { GenTree* const arg = use.GetNode(); if (arg->OperGet() != GT_ASG) { continue; } assert(arg != arr); assert(arg != index); arg->gtFlags &= ~GTF_LATE_ARG; GenTree* op1 = argSetup; if (op1 == nullptr) { op1 = gtNewNothingNode(); #if DEBUG op1->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED; #endif // DEBUG } argSetup = new (this, GT_COMMA) GenTreeOp(GT_COMMA, TYP_VOID, op1, arg); #if DEBUG argSetup->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED; #endif // DEBUG } #ifdef DEBUG auto resetMorphedFlag = [](GenTree** slot, fgWalkData* data) -> fgWalkResult { (*slot)->gtDebugFlags &= ~GTF_DEBUG_NODE_MORPHED; return WALK_CONTINUE; }; fgWalkTreePost(&arr, resetMorphedFlag); fgWalkTreePost(&index, resetMorphedFlag); fgWalkTreePost(&value, resetMorphedFlag); #endif // DEBUG GenTree* const arrIndexNode = gtNewIndexRef(TYP_REF, arr, index); GenTree* const arrStore = gtNewAssignNode(arrIndexNode, value); GenTree* result = fgMorphTree(arrStore); if (argSetup != nullptr) { result = new (this, GT_COMMA) GenTreeOp(GT_COMMA, TYP_VOID, argSetup, result); #if DEBUG result->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED; #endif // DEBUG } return result; } } if (call->IsNoReturn()) { // // If we know that the call does not return then we can set fgRemoveRestOfBlock // to remove all subsequent statements and change the call's basic block to BBJ_THROW. // As a result the compiler won't need to preserve live registers across the call. // // This isn't need for tail calls as there shouldn't be any code after the call anyway. // Besides, the tail call code is part of the epilog and converting the block to // BBJ_THROW would result in the tail call being dropped as the epilog is generated // only for BBJ_RETURN blocks. // if (!call->IsTailCall()) { fgRemoveRestOfBlock = true; } } return call; } /***************************************************************************** * * Expand and return the call target address for a VirtualCall * The code here should match that generated by LowerVirtualVtableCall */ GenTree* Compiler::fgExpandVirtualVtableCallTarget(GenTreeCall* call) { GenTree* result; JITDUMP("Expanding virtual call target for %d.%s:\n", call->gtTreeID, GenTree::OpName(call->gtOper)); noway_assert(call->gtCallType == CT_USER_FUNC); // get a reference to the thisPtr being passed fgArgTabEntry* thisArgTabEntry = gtArgEntryByArgNum(call, 0); GenTree* thisPtr = thisArgTabEntry->GetNode(); // fgMorphArgs must enforce this invariant by creating a temp // assert(thisPtr->OperIsLocal()); // Make a copy of the thisPtr by cloning // thisPtr = gtClone(thisPtr, true); noway_assert(thisPtr != nullptr); // Get hold of the vtable offset unsigned vtabOffsOfIndirection; unsigned vtabOffsAfterIndirection; bool isRelative; info.compCompHnd->getMethodVTableOffset(call->gtCallMethHnd, &vtabOffsOfIndirection, &vtabOffsAfterIndirection, &isRelative); // Dereference the this pointer to obtain the method table, it is called vtab below GenTree* vtab; assert(VPTR_OFFS == 0); // We have to add this value to the thisPtr to get the methodTable vtab = gtNewOperNode(GT_IND, TYP_I_IMPL, thisPtr); vtab->gtFlags |= GTF_IND_INVARIANT; // Get the appropriate vtable chunk if (vtabOffsOfIndirection != CORINFO_VIRTUALCALL_NO_CHUNK) { // Note this isRelative code path is currently never executed // as the VM doesn't ever return: isRelative == true // if (isRelative) { // MethodTable offset is a relative pointer. // // Additional temporary variable is used to store virtual table pointer. // Address of method is obtained by the next computations: // // Save relative offset to tmp (vtab is virtual table pointer, vtabOffsOfIndirection is offset of // vtable-1st-level-indirection): // tmp = vtab // // Save address of method to result (vtabOffsAfterIndirection is offset of vtable-2nd-level-indirection): // result = [tmp + vtabOffsOfIndirection + vtabOffsAfterIndirection + [tmp + vtabOffsOfIndirection]] // // // When isRelative is true we need to setup two temporary variables // var1 = vtab // var2 = var1 + vtabOffsOfIndirection + vtabOffsAfterIndirection + [var1 + vtabOffsOfIndirection] // result = [var2] + var2 // unsigned varNum1 = lvaGrabTemp(true DEBUGARG("var1 - vtab")); unsigned varNum2 = lvaGrabTemp(true DEBUGARG("var2 - relative")); GenTree* asgVar1 = gtNewTempAssign(varNum1, vtab); // var1 = vtab // [tmp + vtabOffsOfIndirection] GenTree* tmpTree1 = gtNewOperNode(GT_ADD, TYP_I_IMPL, gtNewLclvNode(varNum1, TYP_I_IMPL), gtNewIconNode(vtabOffsOfIndirection, TYP_I_IMPL)); tmpTree1 = gtNewOperNode(GT_IND, TYP_I_IMPL, tmpTree1, false); tmpTree1->gtFlags |= GTF_IND_NONFAULTING; tmpTree1->gtFlags |= GTF_IND_INVARIANT; // var1 + vtabOffsOfIndirection + vtabOffsAfterIndirection GenTree* tmpTree2 = gtNewOperNode(GT_ADD, TYP_I_IMPL, gtNewLclvNode(varNum1, TYP_I_IMPL), gtNewIconNode(vtabOffsOfIndirection + vtabOffsAfterIndirection, TYP_I_IMPL)); // var1 + vtabOffsOfIndirection + vtabOffsAfterIndirection + [var1 + vtabOffsOfIndirection] tmpTree2 = gtNewOperNode(GT_ADD, TYP_I_IMPL, tmpTree2, tmpTree1); GenTree* asgVar2 = gtNewTempAssign(varNum2, tmpTree2); // var2 = <expression> // This last indirection is not invariant, but is non-faulting result = gtNewOperNode(GT_IND, TYP_I_IMPL, gtNewLclvNode(varNum2, TYP_I_IMPL), false); // [var2] result->gtFlags |= GTF_IND_NONFAULTING; result = gtNewOperNode(GT_ADD, TYP_I_IMPL, result, gtNewLclvNode(varNum2, TYP_I_IMPL)); // [var2] + var2 // Now stitch together the two assignment and the calculation of result into a single tree GenTree* commaTree = gtNewOperNode(GT_COMMA, TYP_I_IMPL, asgVar2, result); result = gtNewOperNode(GT_COMMA, TYP_I_IMPL, asgVar1, commaTree); } else { // result = [vtab + vtabOffsOfIndirection] result = gtNewOperNode(GT_ADD, TYP_I_IMPL, vtab, gtNewIconNode(vtabOffsOfIndirection, TYP_I_IMPL)); result = gtNewOperNode(GT_IND, TYP_I_IMPL, result, false); result->gtFlags |= GTF_IND_NONFAULTING; result->gtFlags |= GTF_IND_INVARIANT; } } else { result = vtab; assert(!isRelative); } if (!isRelative) { // Load the function address // result = [result + vtabOffsAfterIndirection] result = gtNewOperNode(GT_ADD, TYP_I_IMPL, result, gtNewIconNode(vtabOffsAfterIndirection, TYP_I_IMPL)); // This last indirection is not invariant, but is non-faulting result = gtNewOperNode(GT_IND, TYP_I_IMPL, result, false); result->gtFlags |= GTF_IND_NONFAULTING; } return result; } /***************************************************************************** * * Transform the given constant tree for code generation. */ GenTree* Compiler::fgMorphConst(GenTree* tree) { assert(tree->OperIsConst()); /* Clear any exception flags or other unnecessary flags * that may have been set before folding this node to a constant */ tree->gtFlags &= ~(GTF_ALL_EFFECT | GTF_REVERSE_OPS); if (!tree->OperIs(GT_CNS_STR)) { return tree; } if (tree->AsStrCon()->IsStringEmptyField()) { LPVOID pValue; InfoAccessType iat = info.compCompHnd->emptyStringLiteral(&pValue); return fgMorphTree(gtNewStringLiteralNode(iat, pValue)); } // TODO-CQ: Do this for compCurBB->isRunRarely(). Doing that currently will // guarantee slow performance for that block. Instead cache the return value // of CORINFO_HELP_STRCNS and go to cache first giving reasonable perf. bool useLazyStrCns = false; if (compCurBB->bbJumpKind == BBJ_THROW) { useLazyStrCns = true; } else if (fgGlobalMorph && compCurStmt->GetRootNode()->IsCall()) { // Quick check: if the root node of the current statement happens to be a noreturn call. GenTreeCall* call = compCurStmt->GetRootNode()->AsCall(); useLazyStrCns = call->IsNoReturn() || fgIsThrow(call); } if (useLazyStrCns) { CorInfoHelpFunc helper = info.compCompHnd->getLazyStringLiteralHelper(tree->AsStrCon()->gtScpHnd); if (helper != CORINFO_HELP_UNDEF) { // For un-important blocks, we want to construct the string lazily GenTreeCall::Use* args; if (helper == CORINFO_HELP_STRCNS_CURRENT_MODULE) { args = gtNewCallArgs(gtNewIconNode(RidFromToken(tree->AsStrCon()->gtSconCPX), TYP_INT)); } else { args = gtNewCallArgs(gtNewIconNode(RidFromToken(tree->AsStrCon()->gtSconCPX), TYP_INT), gtNewIconEmbScpHndNode(tree->AsStrCon()->gtScpHnd)); } tree = gtNewHelperCallNode(helper, TYP_REF, args); return fgMorphTree(tree); } } assert(tree->AsStrCon()->gtScpHnd == info.compScopeHnd || !IsUninitialized(tree->AsStrCon()->gtScpHnd)); LPVOID pValue; InfoAccessType iat = info.compCompHnd->constructStringLiteral(tree->AsStrCon()->gtScpHnd, tree->AsStrCon()->gtSconCPX, &pValue); tree = gtNewStringLiteralNode(iat, pValue); return fgMorphTree(tree); } //------------------------------------------------------------------------ // fgMorphTryFoldObjAsLclVar: try to fold an Obj node as a LclVar. // // Arguments: // obj - the obj node. // destroyNodes -- destroy nodes that are optimized away // // Return value: // GenTreeLclVar if the obj can be replaced by it, null otherwise. // // Notes: // TODO-CQ: currently this transformation is done only under copy block, // but it is benefitial to do for each OBJ node. However, `PUT_ARG_STACK` // for some platforms does not expect struct `LCL_VAR` as a source, so // it needs more work. // GenTreeLclVar* Compiler::fgMorphTryFoldObjAsLclVar(GenTreeObj* obj, bool destroyNodes) { if (opts.OptimizationEnabled()) { GenTree* op1 = obj->Addr(); assert(!op1->OperIs(GT_LCL_VAR_ADDR) && "missed an opt opportunity"); if (op1->OperIs(GT_ADDR)) { GenTreeUnOp* addr = op1->AsUnOp(); GenTree* addrOp = addr->gtGetOp1(); if (addrOp->TypeIs(obj->TypeGet()) && addrOp->OperIs(GT_LCL_VAR)) { GenTreeLclVar* lclVar = addrOp->AsLclVar(); ClassLayout* lclVarLayout = lvaGetDesc(lclVar)->GetLayout(); ClassLayout* objLayout = obj->GetLayout(); if (ClassLayout::AreCompatible(lclVarLayout, objLayout)) { #ifdef DEBUG CORINFO_CLASS_HANDLE objClsHandle = obj->GetLayout()->GetClassHandle(); assert(objClsHandle != NO_CLASS_HANDLE); if (verbose) { CORINFO_CLASS_HANDLE lclClsHnd = gtGetStructHandle(lclVar); printf("fold OBJ(ADDR(X)) [%06u] into X [%06u], ", dspTreeID(obj), dspTreeID(lclVar)); printf("with %s handles\n", ((lclClsHnd == objClsHandle) ? "matching" : "different")); } #endif // Keep the DONT_CSE flag in sync // (as the addr always marks it for its op1) lclVar->gtFlags &= ~GTF_DONT_CSE; lclVar->gtFlags |= (obj->gtFlags & GTF_DONT_CSE); if (destroyNodes) { DEBUG_DESTROY_NODE(obj); DEBUG_DESTROY_NODE(addr); } return lclVar; } } } } return nullptr; } /***************************************************************************** * * Transform the given GTK_LEAF tree for code generation. */ GenTree* Compiler::fgMorphLeaf(GenTree* tree) { assert(tree->OperKind() & GTK_LEAF); if (tree->gtOper == GT_LCL_VAR) { const bool forceRemorph = false; return fgMorphLocalVar(tree, forceRemorph); } else if (tree->gtOper == GT_LCL_FLD) { if (lvaGetDesc(tree->AsLclFld())->IsAddressExposed()) { tree->gtFlags |= GTF_GLOB_REF; } #ifdef TARGET_X86 if (info.compIsVarArgs) { GenTree* newTree = fgMorphStackArgForVarArgs(tree->AsLclFld()->GetLclNum(), tree->TypeGet(), tree->AsLclFld()->GetLclOffs()); if (newTree != nullptr) { if (newTree->OperIsBlk() && ((tree->gtFlags & GTF_VAR_DEF) == 0)) { newTree->SetOper(GT_IND); } return newTree; } } #endif // TARGET_X86 } else if (tree->gtOper == GT_FTN_ADDR) { GenTreeFptrVal* fptrValTree = tree->AsFptrVal(); // A function pointer address is being used. Let the VM know if this is the // target of a Delegate or a raw function pointer. bool isUnsafeFunctionPointer = !fptrValTree->gtFptrDelegateTarget; CORINFO_CONST_LOOKUP addrInfo; #ifdef FEATURE_READYTORUN if (fptrValTree->gtEntryPoint.addr != nullptr) { addrInfo = fptrValTree->gtEntryPoint; } else #endif { info.compCompHnd->getFunctionFixedEntryPoint(fptrValTree->gtFptrMethod, isUnsafeFunctionPointer, &addrInfo); } GenTree* indNode = nullptr; switch (addrInfo.accessType) { case IAT_PPVALUE: indNode = gtNewIndOfIconHandleNode(TYP_I_IMPL, (size_t)addrInfo.handle, GTF_ICON_CONST_PTR, true); // Add the second indirection indNode = gtNewOperNode(GT_IND, TYP_I_IMPL, indNode); // This indirection won't cause an exception. indNode->gtFlags |= GTF_IND_NONFAULTING; // This indirection also is invariant. indNode->gtFlags |= GTF_IND_INVARIANT; break; case IAT_PVALUE: indNode = gtNewIndOfIconHandleNode(TYP_I_IMPL, (size_t)addrInfo.handle, GTF_ICON_FTN_ADDR, true); break; case IAT_VALUE: // Refer to gtNewIconHandleNode() as the template for constructing a constant handle // tree->SetOper(GT_CNS_INT); tree->AsIntConCommon()->SetIconValue(ssize_t(addrInfo.handle)); tree->gtFlags |= GTF_ICON_FTN_ADDR; break; default: noway_assert(!"Unknown addrInfo.accessType"); } if (indNode != nullptr) { DEBUG_DESTROY_NODE(tree); tree = fgMorphTree(indNode); } } return tree; } void Compiler::fgAssignSetVarDef(GenTree* tree) { GenTreeLclVarCommon* lclVarCmnTree; bool isEntire = false; if (tree->DefinesLocal(this, &lclVarCmnTree, &isEntire)) { if (isEntire) { lclVarCmnTree->gtFlags |= GTF_VAR_DEF; } else { // We consider partial definitions to be modeled as uses followed by definitions. // This captures the idea that precedings defs are not necessarily made redundant // by this definition. lclVarCmnTree->gtFlags |= (GTF_VAR_DEF | GTF_VAR_USEASG); } } } //------------------------------------------------------------------------ // fgMorphOneAsgBlockOp: Attempt to replace a block assignment with a scalar assignment // // Arguments: // tree - The block assignment to be possibly morphed // // Return Value: // The modified tree if successful, nullptr otherwise. // // Assumptions: // 'tree' must be a block assignment. // // Notes: // If successful, this method always returns the incoming tree, modifying only // its arguments. // GenTree* Compiler::fgMorphOneAsgBlockOp(GenTree* tree) { // This must be a block assignment. noway_assert(tree->OperIsBlkOp()); var_types asgType = tree->TypeGet(); GenTree* asg = tree; GenTree* dest = asg->gtGetOp1(); GenTree* src = asg->gtGetOp2(); unsigned destVarNum = BAD_VAR_NUM; LclVarDsc* destVarDsc = nullptr; GenTree* destLclVarTree = nullptr; bool isCopyBlock = asg->OperIsCopyBlkOp(); bool isInitBlock = !isCopyBlock; unsigned size = 0; CORINFO_CLASS_HANDLE clsHnd = NO_CLASS_HANDLE; if (dest->gtEffectiveVal()->OperIsBlk()) { GenTreeBlk* lhsBlk = dest->gtEffectiveVal()->AsBlk(); size = lhsBlk->Size(); if (impIsAddressInLocal(lhsBlk->Addr(), &destLclVarTree)) { destVarNum = destLclVarTree->AsLclVarCommon()->GetLclNum(); destVarDsc = lvaGetDesc(destVarNum); } if (lhsBlk->OperGet() == GT_OBJ) { clsHnd = lhsBlk->AsObj()->GetLayout()->GetClassHandle(); } } else { // Is this an enregisterable struct that is already a simple assignment? // This can happen if we are re-morphing. // Note that we won't do this straightaway if this is a SIMD type, since it // may be a promoted lclVar (sometimes we promote the individual float fields of // fixed-size SIMD). if (dest->OperGet() == GT_IND) { noway_assert(asgType != TYP_STRUCT); if (varTypeIsStruct(asgType)) { destLclVarTree = fgIsIndirOfAddrOfLocal(dest); } if (isCopyBlock && destLclVarTree == nullptr && !src->OperIs(GT_LCL_VAR)) { fgMorphBlockOperand(src, asgType, genTypeSize(asgType), false /*isBlkReqd*/); dest->gtFlags |= GTF_DONT_CSE; return tree; } } else { noway_assert(dest->OperIsLocal()); destLclVarTree = dest; } if (destLclVarTree != nullptr) { destVarNum = destLclVarTree->AsLclVarCommon()->GetLclNum(); destVarDsc = lvaGetDesc(destVarNum); if (asgType == TYP_STRUCT) { clsHnd = destVarDsc->GetStructHnd(); size = destVarDsc->lvExactSize; } } if (asgType != TYP_STRUCT) { size = genTypeSize(asgType); } } if (size == 0) { return nullptr; } if ((destVarDsc != nullptr) && varTypeIsStruct(destLclVarTree) && destVarDsc->lvPromoted) { // Let fgMorphCopyBlock handle it. return nullptr; } if (src->IsCall() || src->OperIsSIMD()) { // Can't take ADDR from these nodes, let fgMorphCopyBlock handle it, #11413. return nullptr; } if ((destVarDsc != nullptr) && !varTypeIsStruct(destVarDsc->TypeGet())) { // // See if we can do a simple transformation: // // GT_ASG <TYP_size> // / \. // GT_IND GT_IND or CNS_INT // | | // [dest] [src] // if (asgType == TYP_STRUCT) { // It is possible to use `initobj` to init a primitive type on the stack, // like `ldloca.s 1; initobj 1B000003` where `V01` has type `ref`; // in this case we generate `ASG struct(BLK<8> struct(ADDR byref(LCL_VAR ref)), 0)` // and this code path transforms it into `ASG ref(LCL_VARref, 0)` because it is not a real // struct assignment. if (size == REGSIZE_BYTES) { if (clsHnd == NO_CLASS_HANDLE) { // A register-sized cpblk can be treated as an integer asignment. asgType = TYP_I_IMPL; } else { BYTE gcPtr; info.compCompHnd->getClassGClayout(clsHnd, &gcPtr); asgType = getJitGCType(gcPtr); } } else { switch (size) { case 1: asgType = TYP_BYTE; break; case 2: asgType = TYP_SHORT; break; #ifdef TARGET_64BIT case 4: asgType = TYP_INT; break; #endif // TARGET_64BIT } } } } GenTree* srcLclVarTree = nullptr; LclVarDsc* srcVarDsc = nullptr; if (isCopyBlock) { if (src->OperGet() == GT_LCL_VAR) { srcLclVarTree = src; srcVarDsc = lvaGetDesc(src->AsLclVarCommon()); } else if (src->OperIsIndir() && impIsAddressInLocal(src->AsOp()->gtOp1, &srcLclVarTree)) { srcVarDsc = lvaGetDesc(srcLclVarTree->AsLclVarCommon()); } if ((srcVarDsc != nullptr) && varTypeIsStruct(srcLclVarTree) && srcVarDsc->lvPromoted) { // Let fgMorphCopyBlock handle it. return nullptr; } } if (asgType != TYP_STRUCT) { noway_assert((size <= REGSIZE_BYTES) || varTypeIsSIMD(asgType)); // For initBlk, a non constant source is not going to allow us to fiddle // with the bits to create a single assigment. // Nor do we (for now) support transforming an InitBlock of SIMD type, unless // it is a direct assignment to a lclVar and the value is zero. if (isInitBlock) { if (!src->IsConstInitVal()) { return nullptr; } if (varTypeIsSIMD(asgType) && (!src->IsIntegralConst(0) || (destVarDsc == nullptr))) { return nullptr; } } if (destVarDsc != nullptr) { // Kill everything about dest if (optLocalAssertionProp) { if (optAssertionCount > 0) { fgKillDependentAssertions(destVarNum DEBUGARG(tree)); } } // A previous incarnation of this code also required the local not to be // address-exposed(=taken). That seems orthogonal to the decision of whether // to do field-wise assignments: being address-exposed will cause it to be // "dependently" promoted, so it will be in the right memory location. One possible // further reason for avoiding field-wise stores is that the struct might have alignment-induced // holes, whose contents could be meaningful in unsafe code. If we decide that's a valid // concern, then we could compromise, and say that address-exposed + fields do not completely cover the // memory of the struct prevent field-wise assignments. Same situation exists for the "src" decision. if (varTypeIsStruct(destLclVarTree) && destVarDsc->lvPromoted) { // Let fgMorphInitBlock handle it. (Since we'll need to do field-var-wise assignments.) return nullptr; } else if (!varTypeIsFloating(destLclVarTree->TypeGet()) && (size == genTypeSize(destVarDsc))) { // Use the dest local var directly, as well as its type. dest = destLclVarTree; asgType = destVarDsc->lvType; // If the block operation had been a write to a local var of a small int type, // of the exact size of the small int type, and the var is NormalizeOnStore, // we would have labeled it GTF_VAR_USEASG, because the block operation wouldn't // have done that normalization. If we're now making it into an assignment, // the NormalizeOnStore will work, and it can be a full def. if (destVarDsc->lvNormalizeOnStore()) { dest->gtFlags &= (~GTF_VAR_USEASG); } } else { // Could be a non-promoted struct, or a floating point type local, or // an int subject to a partial write. Don't enregister. lvaSetVarDoNotEnregister(destVarNum DEBUGARG(DoNotEnregisterReason::OneAsgRetyping)); // Mark the local var tree as a definition point of the local. destLclVarTree->gtFlags |= GTF_VAR_DEF; if (size < destVarDsc->lvExactSize) { // If it's not a full-width assignment.... destLclVarTree->gtFlags |= GTF_VAR_USEASG; } if (dest == destLclVarTree) { GenTree* addr = gtNewOperNode(GT_ADDR, TYP_BYREF, dest); dest = gtNewIndir(asgType, addr); } } } // Check to ensure we don't have a reducible *(& ... ) if (dest->OperIsIndir() && dest->AsIndir()->Addr()->OperGet() == GT_ADDR) { // If dest is an Indir or Block, and it has a child that is a Addr node // GenTree* addrNode = dest->AsIndir()->Addr(); // known to be a GT_ADDR // Can we just remove the Ind(Addr(destOp)) and operate directly on 'destOp'? // GenTree* destOp = addrNode->gtGetOp1(); var_types destOpType = destOp->TypeGet(); // We can if we have a primitive integer type and the sizes are exactly the same. // if ((varTypeIsIntegralOrI(destOp) && (size == genTypeSize(destOpType)))) { dest = destOp; asgType = destOpType; } } if (dest->gtEffectiveVal()->OperIsIndir()) { // If we have no information about the destination, we have to assume it could // live anywhere (not just in the GC heap). // Mark the GT_IND node so that we use the correct write barrier helper in case // the field is a GC ref. if (!fgIsIndirOfAddrOfLocal(dest)) { dest->gtFlags |= (GTF_GLOB_REF | GTF_IND_TGTANYWHERE); tree->gtFlags |= GTF_GLOB_REF; } dest->SetIndirExceptionFlags(this); tree->gtFlags |= (dest->gtFlags & GTF_EXCEPT); } if (isCopyBlock) { if (srcVarDsc != nullptr) { // Handled above. assert(!varTypeIsStruct(srcLclVarTree) || !srcVarDsc->lvPromoted); if (!varTypeIsFloating(srcLclVarTree->TypeGet()) && size == genTypeSize(genActualType(srcLclVarTree->TypeGet()))) { // Use the src local var directly. src = srcLclVarTree; } else { // The source argument of the copyblk can potentially be accessed only through indir(addr(lclVar)) // or indir(lclVarAddr) so it must be on the stack. unsigned lclVarNum = srcLclVarTree->AsLclVarCommon()->GetLclNum(); lvaSetVarDoNotEnregister(lclVarNum DEBUGARG(DoNotEnregisterReason::OneAsgRetyping)); GenTree* srcAddr; if (src == srcLclVarTree) { srcAddr = gtNewOperNode(GT_ADDR, TYP_BYREF, src); src = gtNewOperNode(GT_IND, asgType, srcAddr); } else { assert(src->OperIsIndir()); } } } if (src->OperIsIndir()) { if (!fgIsIndirOfAddrOfLocal(src)) { // If we have no information about the src, we have to assume it could // live anywhere (not just in the GC heap). // Mark the GT_IND node so that we use the correct write barrier helper in case // the field is a GC ref. src->gtFlags |= (GTF_GLOB_REF | GTF_IND_TGTANYWHERE); } src->SetIndirExceptionFlags(this); } } else // InitBlk { #ifdef FEATURE_SIMD if (varTypeIsSIMD(asgType)) { assert(!isCopyBlock); // Else we would have returned the tree above. noway_assert(src->IsIntegralConst(0)); noway_assert(destVarDsc != nullptr); src = gtNewSIMDNode(asgType, src, SIMDIntrinsicInit, destVarDsc->GetSimdBaseJitType(), size); } else #endif { if (src->OperIsInitVal()) { src = src->gtGetOp1(); } assert(src->IsCnsIntOrI()); // This will mutate the integer constant, in place, to be the correct // value for the type we are using in the assignment. src->AsIntCon()->FixupInitBlkValue(asgType); } } // Ensure that the dest is setup appropriately. if (dest->gtEffectiveVal()->OperIsIndir()) { dest = fgMorphBlockOperand(dest, asgType, size, false /*isBlkReqd*/); } // Ensure that the rhs is setup appropriately. if (isCopyBlock) { src = fgMorphBlockOperand(src, asgType, size, false /*isBlkReqd*/); } // Set the lhs and rhs on the assignment. if (dest != tree->AsOp()->gtOp1) { asg->AsOp()->gtOp1 = dest; } if (src != asg->AsOp()->gtOp2) { asg->AsOp()->gtOp2 = src; } asg->ChangeType(asgType); dest->gtFlags |= GTF_DONT_CSE; asg->gtFlags &= ~GTF_EXCEPT; asg->gtFlags |= ((dest->gtFlags | src->gtFlags) & GTF_ALL_EFFECT); // Un-set GTF_REVERSE_OPS, and it will be set later if appropriate. asg->gtFlags &= ~GTF_REVERSE_OPS; #ifdef DEBUG if (verbose) { printf("fgMorphOneAsgBlock (after):\n"); gtDispTree(tree); } #endif return tree; } return nullptr; } //------------------------------------------------------------------------ // fgMorphPromoteLocalInitBlock: Attempts to promote a local block init tree // to a tree of promoted field initialization assignments. // // Arguments: // destLclNode - The destination LclVar node // initVal - The initialization value // blockSize - The amount of bytes to initialize // // Return Value: // A tree that performs field by field initialization of the destination // struct variable if various conditions are met, nullptr otherwise. // // Notes: // This transforms a single block initialization assignment like: // // * ASG struct (init) // +--* BLK(12) struct // | \--* ADDR long // | \--* LCL_VAR struct(P) V02 loc0 // | \--* int V02.a (offs=0x00) -> V06 tmp3 // | \--* ubyte V02.c (offs=0x04) -> V07 tmp4 // | \--* float V02.d (offs=0x08) -> V08 tmp5 // \--* INIT_VAL int // \--* CNS_INT int 42 // // into a COMMA tree of assignments that initialize each promoted struct // field: // // * COMMA void // +--* COMMA void // | +--* ASG int // | | +--* LCL_VAR int V06 tmp3 // | | \--* CNS_INT int 0x2A2A2A2A // | \--* ASG ubyte // | +--* LCL_VAR ubyte V07 tmp4 // | \--* CNS_INT int 42 // \--* ASG float // +--* LCL_VAR float V08 tmp5 // \--* CNS_DBL float 1.5113661732714390e-13 // GenTree* Compiler::fgMorphPromoteLocalInitBlock(GenTreeLclVar* destLclNode, GenTree* initVal, unsigned blockSize) { assert(destLclNode->OperIs(GT_LCL_VAR)); LclVarDsc* destLclVar = lvaGetDesc(destLclNode); assert(varTypeIsStruct(destLclVar->TypeGet())); assert(destLclVar->lvPromoted); if (blockSize == 0) { JITDUMP(" size is zero or unknown.\n"); return nullptr; } if (destLclVar->IsAddressExposed() && destLclVar->lvContainsHoles) { JITDUMP(" dest is address exposed and contains holes.\n"); return nullptr; } if (destLclVar->lvCustomLayout && destLclVar->lvContainsHoles) { // TODO-1stClassStructs: there are no reasons for this pessimization, delete it. JITDUMP(" dest has custom layout and contains holes.\n"); return nullptr; } if (destLclVar->lvExactSize != blockSize) { JITDUMP(" dest size mismatch.\n"); return nullptr; } if (!initVal->OperIs(GT_CNS_INT)) { JITDUMP(" source is not constant.\n"); return nullptr; } const int64_t initPattern = (initVal->AsIntCon()->IconValue() & 0xFF) * 0x0101010101010101LL; if (initPattern != 0) { for (unsigned i = 0; i < destLclVar->lvFieldCnt; ++i) { LclVarDsc* fieldDesc = lvaGetDesc(destLclVar->lvFieldLclStart + i); if (varTypeIsSIMD(fieldDesc->TypeGet()) || varTypeIsGC(fieldDesc->TypeGet())) { // Cannot initialize GC or SIMD types with a non-zero constant. // The former is completly bogus. The later restriction could be // lifted by supporting non-zero SIMD constants or by generating // field initialization code that converts an integer constant to // the appropiate SIMD value. Unlikely to be very useful, though. JITDUMP(" dest contains GC and/or SIMD fields and source constant is not 0.\n"); return nullptr; } } } JITDUMP(" using field by field initialization.\n"); GenTree* tree = nullptr; for (unsigned i = 0; i < destLclVar->lvFieldCnt; ++i) { unsigned fieldLclNum = destLclVar->lvFieldLclStart + i; LclVarDsc* fieldDesc = lvaGetDesc(fieldLclNum); GenTree* dest = gtNewLclvNode(fieldLclNum, fieldDesc->TypeGet()); // If it had been labeled a "USEASG", assignments to the individual promoted fields are not. dest->gtFlags |= (destLclNode->gtFlags & ~(GTF_NODE_MASK | GTF_VAR_USEASG)); GenTree* src; switch (dest->TypeGet()) { case TYP_BOOL: case TYP_BYTE: case TYP_UBYTE: case TYP_SHORT: case TYP_USHORT: // Promoted fields are expected to be "normalize on load". If that changes then // we may need to adjust this code to widen the constant correctly. assert(fieldDesc->lvNormalizeOnLoad()); FALLTHROUGH; case TYP_INT: { int64_t mask = (int64_t(1) << (genTypeSize(dest->TypeGet()) * 8)) - 1; src = gtNewIconNode(static_cast<int32_t>(initPattern & mask)); break; } case TYP_LONG: src = gtNewLconNode(initPattern); break; case TYP_FLOAT: float floatPattern; memcpy(&floatPattern, &initPattern, sizeof(floatPattern)); src = gtNewDconNode(floatPattern, dest->TypeGet()); break; case TYP_DOUBLE: double doublePattern; memcpy(&doublePattern, &initPattern, sizeof(doublePattern)); src = gtNewDconNode(doublePattern, dest->TypeGet()); break; case TYP_REF: case TYP_BYREF: #ifdef FEATURE_SIMD case TYP_SIMD8: case TYP_SIMD12: case TYP_SIMD16: case TYP_SIMD32: #endif // FEATURE_SIMD assert(initPattern == 0); src = gtNewIconNode(0, dest->TypeGet()); break; default: unreached(); } GenTree* asg = gtNewAssignNode(dest, src); if (optLocalAssertionProp) { optAssertionGen(asg); } if (tree != nullptr) { tree = gtNewOperNode(GT_COMMA, TYP_VOID, tree, asg); } else { tree = asg; } } return tree; } //------------------------------------------------------------------------ // fgMorphGetStructAddr: Gets the address of a struct object // // Arguments: // pTree - the parent's pointer to the struct object node // clsHnd - the class handle for the struct type // isRValue - true if this is a source (not dest) // // Return Value: // Returns the address of the struct value, possibly modifying the existing tree to // sink the address below any comma nodes (this is to canonicalize for value numbering). // If this is a source, it will morph it to an GT_IND before taking its address, // since it may not be remorphed (and we don't want blk nodes as rvalues). GenTree* Compiler::fgMorphGetStructAddr(GenTree** pTree, CORINFO_CLASS_HANDLE clsHnd, bool isRValue) { GenTree* addr; GenTree* tree = *pTree; // If this is an indirection, we can return its op1, unless it's a GTF_IND_ARR_INDEX, in which case we // need to hang onto that for the purposes of value numbering. if (tree->OperIsIndir()) { if ((tree->gtFlags & GTF_IND_ARR_INDEX) == 0) { addr = tree->AsOp()->gtOp1; } else { if (isRValue && tree->OperIsBlk()) { tree->ChangeOper(GT_IND); } addr = gtNewOperNode(GT_ADDR, TYP_BYREF, tree); } } else if (tree->gtOper == GT_COMMA) { // If this is a comma, we're going to "sink" the GT_ADDR below it. (void)fgMorphGetStructAddr(&(tree->AsOp()->gtOp2), clsHnd, isRValue); tree->gtType = TYP_BYREF; addr = tree; } else { switch (tree->gtOper) { case GT_LCL_FLD: case GT_LCL_VAR: case GT_INDEX: case GT_FIELD: case GT_ARR_ELEM: addr = gtNewOperNode(GT_ADDR, TYP_BYREF, tree); break; case GT_INDEX_ADDR: addr = tree; break; default: { // TODO: Consider using lvaGrabTemp and gtNewTempAssign instead, since we're // not going to use "temp" GenTree* temp = fgInsertCommaFormTemp(pTree, clsHnd); unsigned lclNum = temp->gtEffectiveVal()->AsLclVar()->GetLclNum(); lvaSetVarDoNotEnregister(lclNum DEBUG_ARG(DoNotEnregisterReason::VMNeedsStackAddr)); addr = fgMorphGetStructAddr(pTree, clsHnd, isRValue); break; } } } *pTree = addr; return addr; } //------------------------------------------------------------------------ // fgMorphBlockOperand: Canonicalize an operand of a block assignment // // Arguments: // tree - The block operand // asgType - The type of the assignment // blockWidth - The size of the block // isBlkReqd - true iff this operand must remain a block node // // Return Value: // Returns the morphed block operand // // Notes: // This does the following: // - Ensures that a struct operand is a block node or lclVar. // - Ensures that any COMMAs are above ADDR nodes. // Although 'tree' WAS an operand of a block assignment, the assignment // may have been retyped to be a scalar assignment. GenTree* Compiler::fgMorphBlockOperand(GenTree* tree, var_types asgType, unsigned blockWidth, bool isBlkReqd) { GenTree* effectiveVal = tree->gtEffectiveVal(); if (asgType != TYP_STRUCT) { if (effectiveVal->OperIsIndir()) { if (!isBlkReqd) { GenTree* addr = effectiveVal->AsIndir()->Addr(); if ((addr->OperGet() == GT_ADDR) && (addr->gtGetOp1()->TypeGet() == asgType)) { effectiveVal = addr->gtGetOp1(); } else if (effectiveVal->OperIsBlk()) { effectiveVal->SetOper(GT_IND); } } effectiveVal->gtType = asgType; } else if (effectiveVal->TypeGet() != asgType) { if (effectiveVal->IsCall()) { #ifdef DEBUG GenTreeCall* call = effectiveVal->AsCall(); assert(call->TypeGet() == TYP_STRUCT); assert(blockWidth == info.compCompHnd->getClassSize(call->gtRetClsHnd)); #endif } else { GenTree* addr = gtNewOperNode(GT_ADDR, TYP_BYREF, effectiveVal); effectiveVal = gtNewIndir(asgType, addr); } } } else { GenTreeIndir* indirTree = nullptr; GenTreeLclVarCommon* lclNode = nullptr; bool needsIndirection = true; if (effectiveVal->OperIsIndir()) { indirTree = effectiveVal->AsIndir(); GenTree* addr = effectiveVal->AsIndir()->Addr(); if ((addr->OperGet() == GT_ADDR) && (addr->gtGetOp1()->OperGet() == GT_LCL_VAR)) { lclNode = addr->gtGetOp1()->AsLclVarCommon(); } } else if (effectiveVal->OperGet() == GT_LCL_VAR) { lclNode = effectiveVal->AsLclVarCommon(); } else if (effectiveVal->IsCall()) { needsIndirection = false; #ifdef DEBUG GenTreeCall* call = effectiveVal->AsCall(); assert(call->TypeGet() == TYP_STRUCT); assert(blockWidth == info.compCompHnd->getClassSize(call->gtRetClsHnd)); #endif } #ifdef TARGET_ARM64 else if (effectiveVal->OperIsHWIntrinsic()) { needsIndirection = false; #ifdef DEBUG GenTreeHWIntrinsic* intrinsic = effectiveVal->AsHWIntrinsic(); assert(intrinsic->TypeGet() == TYP_STRUCT); assert(HWIntrinsicInfo::IsMultiReg(intrinsic->GetHWIntrinsicId())); #endif } #endif // TARGET_ARM64 if (lclNode != nullptr) { const LclVarDsc* varDsc = lvaGetDesc(lclNode); if (varTypeIsStruct(varDsc) && (varDsc->lvExactSize == blockWidth) && (varDsc->lvType == asgType)) { if (effectiveVal != lclNode) { JITDUMP("Replacing block node [%06d] with lclVar V%02u\n", dspTreeID(tree), lclNode->GetLclNum()); effectiveVal = lclNode; } needsIndirection = false; } else { // This may be a lclVar that was determined to be address-exposed. effectiveVal->gtFlags |= (lclNode->gtFlags & GTF_ALL_EFFECT); } } if (needsIndirection) { if (indirTree != nullptr) { // If we have an indirection and a block is required, it should already be a block. assert(indirTree->OperIsBlk() || !isBlkReqd); effectiveVal->gtType = asgType; } else { GenTree* newTree; GenTree* addr = gtNewOperNode(GT_ADDR, TYP_BYREF, effectiveVal); if (isBlkReqd) { CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleIfPresent(effectiveVal); if (clsHnd == NO_CLASS_HANDLE) { newTree = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, addr, typGetBlkLayout(blockWidth)); } else { newTree = gtNewObjNode(clsHnd, addr); gtSetObjGcInfo(newTree->AsObj()); } } else { newTree = gtNewIndir(asgType, addr); } effectiveVal = newTree; } } } assert(effectiveVal->TypeIs(asgType) || (varTypeIsSIMD(asgType) && varTypeIsStruct(effectiveVal))); tree = effectiveVal; return tree; } //------------------------------------------------------------------------ // fgMorphCanUseLclFldForCopy: check if we can access LclVar2 using LclVar1's fields. // // Arguments: // lclNum1 - a promoted lclVar that is used in fieldwise assignment; // lclNum2 - the local variable on the other side of ASG, can be BAD_VAR_NUM. // // Return Value: // True if the second local is valid and has the same struct handle as the first, // false otherwise. // // Notes: // This check is needed to avoid accessing LCL_VARs with incorrect // CORINFO_FIELD_HANDLE that would confuse VN optimizations. // bool Compiler::fgMorphCanUseLclFldForCopy(unsigned lclNum1, unsigned lclNum2) { assert(lclNum1 != BAD_VAR_NUM); if (lclNum2 == BAD_VAR_NUM) { return false; } const LclVarDsc* varDsc1 = lvaGetDesc(lclNum1); const LclVarDsc* varDsc2 = lvaGetDesc(lclNum2); assert(varTypeIsStruct(varDsc1)); if (!varTypeIsStruct(varDsc2)) { return false; } CORINFO_CLASS_HANDLE struct1 = varDsc1->GetStructHnd(); CORINFO_CLASS_HANDLE struct2 = varDsc2->GetStructHnd(); assert(struct1 != NO_CLASS_HANDLE); assert(struct2 != NO_CLASS_HANDLE); if (struct1 != struct2) { return false; } return true; } // insert conversions and normalize to make tree amenable to register // FP architectures GenTree* Compiler::fgMorphForRegisterFP(GenTree* tree) { if (tree->OperIsArithmetic()) { if (varTypeIsFloating(tree)) { GenTree* op1 = tree->AsOp()->gtOp1; GenTree* op2 = tree->gtGetOp2(); assert(varTypeIsFloating(op1->TypeGet()) && varTypeIsFloating(op2->TypeGet())); if (op1->TypeGet() != tree->TypeGet()) { tree->AsOp()->gtOp1 = gtNewCastNode(tree->TypeGet(), op1, false, tree->TypeGet()); } if (op2->TypeGet() != tree->TypeGet()) { tree->AsOp()->gtOp2 = gtNewCastNode(tree->TypeGet(), op2, false, tree->TypeGet()); } } } else if (tree->OperIsCompare()) { GenTree* op1 = tree->AsOp()->gtOp1; if (varTypeIsFloating(op1)) { GenTree* op2 = tree->gtGetOp2(); assert(varTypeIsFloating(op2)); if (op1->TypeGet() != op2->TypeGet()) { // both had better be floating, just one bigger than other if (op1->TypeGet() == TYP_FLOAT) { assert(op2->TypeGet() == TYP_DOUBLE); tree->AsOp()->gtOp1 = gtNewCastNode(TYP_DOUBLE, op1, false, TYP_DOUBLE); } else if (op2->TypeGet() == TYP_FLOAT) { assert(op1->TypeGet() == TYP_DOUBLE); tree->AsOp()->gtOp2 = gtNewCastNode(TYP_DOUBLE, op2, false, TYP_DOUBLE); } } } } return tree; } #ifdef FEATURE_SIMD //-------------------------------------------------------------------------------------------------------------- // getSIMDStructFromField: // Checking whether the field belongs to a simd struct or not. If it is, return the GenTree* for // the struct node, also base type, field index and simd size. If it is not, just return nullptr. // Usually if the tree node is from a simd lclvar which is not used in any SIMD intrinsic, then we // should return nullptr, since in this case we should treat SIMD struct as a regular struct. // However if no matter what, you just want get simd struct node, you can set the ignoreUsedInSIMDIntrinsic // as true. Then there will be no IsUsedInSIMDIntrinsic checking, and it will return SIMD struct node // if the struct is a SIMD struct. // // Arguments: // tree - GentreePtr. This node will be checked to see this is a field which belongs to a simd // struct used for simd intrinsic or not. // simdBaseJitTypeOut - CorInfoType pointer, if the tree node is the tree we want, we set *simdBaseJitTypeOut // to simd lclvar's base JIT type. // indexOut - unsigned pointer, if the tree is used for simd intrinsic, we will set *indexOut // equals to the index number of this field. // simdSizeOut - unsigned pointer, if the tree is used for simd intrinsic, set the *simdSizeOut // equals to the simd struct size which this tree belongs to. // ignoreUsedInSIMDIntrinsic - bool. If this is set to true, then this function will ignore // the UsedInSIMDIntrinsic check. // // return value: // A GenTree* which points the simd lclvar tree belongs to. If the tree is not the simd // instrinic related field, return nullptr. // GenTree* Compiler::getSIMDStructFromField(GenTree* tree, CorInfoType* simdBaseJitTypeOut, unsigned* indexOut, unsigned* simdSizeOut, bool ignoreUsedInSIMDIntrinsic /*false*/) { GenTree* ret = nullptr; if (tree->OperGet() == GT_FIELD) { GenTree* objRef = tree->AsField()->GetFldObj(); if (objRef != nullptr) { GenTree* obj = nullptr; if (objRef->gtOper == GT_ADDR) { obj = objRef->AsOp()->gtOp1; } else if (ignoreUsedInSIMDIntrinsic) { obj = objRef; } else { return nullptr; } if (isSIMDTypeLocal(obj)) { LclVarDsc* varDsc = lvaGetDesc(obj->AsLclVarCommon()); if (varDsc->lvIsUsedInSIMDIntrinsic() || ignoreUsedInSIMDIntrinsic) { *simdSizeOut = varDsc->lvExactSize; *simdBaseJitTypeOut = getBaseJitTypeOfSIMDLocal(obj); ret = obj; } } else if (obj->OperGet() == GT_SIMD) { ret = obj; GenTreeSIMD* simdNode = obj->AsSIMD(); *simdSizeOut = simdNode->GetSimdSize(); *simdBaseJitTypeOut = simdNode->GetSimdBaseJitType(); } #ifdef FEATURE_HW_INTRINSICS else if (obj->OperIsHWIntrinsic()) { ret = obj; GenTreeHWIntrinsic* simdNode = obj->AsHWIntrinsic(); *simdSizeOut = simdNode->GetSimdSize(); *simdBaseJitTypeOut = simdNode->GetSimdBaseJitType(); } #endif // FEATURE_HW_INTRINSICS } } if (ret != nullptr) { var_types fieldType = tree->TypeGet(); if (fieldType == TYP_LONG) { // Vector2/3/4 expose public float fields while Vector<T> // and Vector64/128/256<T> have internal ulong fields. So // we should only ever encounter accesses for TYP_FLOAT or // TYP_LONG and in the case of the latter we don't want the // generic type since we are executing some algorithm on the // raw underlying bits instead. *simdBaseJitTypeOut = CORINFO_TYPE_ULONG; } else { assert(fieldType == TYP_FLOAT); } unsigned baseTypeSize = genTypeSize(JITtype2varType(*simdBaseJitTypeOut)); *indexOut = tree->AsField()->gtFldOffset / baseTypeSize; } return ret; } /***************************************************************************** * If a read operation tries to access simd struct field, then transform the operation * to the SimdGetElementNode, and return the new tree. Otherwise, return the old tree. * Argument: * tree - GenTree*. If this pointer points to simd struct which is used for simd * intrinsic, we will morph it as simd intrinsic NI_Vector128_GetElement. * Return: * A GenTree* which points to the new tree. If the tree is not for simd intrinsic, * return nullptr. */ GenTree* Compiler::fgMorphFieldToSimdGetElement(GenTree* tree) { unsigned index = 0; CorInfoType simdBaseJitType = CORINFO_TYPE_UNDEF; unsigned simdSize = 0; GenTree* simdStructNode = getSIMDStructFromField(tree, &simdBaseJitType, &index, &simdSize); if (simdStructNode != nullptr) { var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType); GenTree* op2 = gtNewIconNode(index, TYP_INT); assert(simdSize <= 16); assert(simdSize >= ((index + 1) * genTypeSize(simdBaseType))); tree = gtNewSimdGetElementNode(simdBaseType, simdStructNode, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); } return tree; } /***************************************************************************** * Transform an assignment of a SIMD struct field to SimdWithElementNode, and * return a new tree. If it is not such an assignment, then return the old tree. * Argument: * tree - GenTree*. If this pointer points to simd struct which is used for simd * intrinsic, we will morph it as simd intrinsic set. * Return: * A GenTree* which points to the new tree. If the tree is not for simd intrinsic, * return nullptr. */ GenTree* Compiler::fgMorphFieldAssignToSimdSetElement(GenTree* tree) { assert(tree->OperGet() == GT_ASG); unsigned index = 0; CorInfoType simdBaseJitType = CORINFO_TYPE_UNDEF; unsigned simdSize = 0; GenTree* simdStructNode = getSIMDStructFromField(tree->gtGetOp1(), &simdBaseJitType, &index, &simdSize); if (simdStructNode != nullptr) { var_types simdType = simdStructNode->gtType; var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType); assert(simdSize <= 16); assert(simdSize >= ((index + 1) * genTypeSize(simdBaseType))); GenTree* op2 = gtNewIconNode(index, TYP_INT); GenTree* op3 = tree->gtGetOp2(); NamedIntrinsic intrinsicId = NI_Vector128_WithElement; GenTree* target = gtClone(simdStructNode); assert(target != nullptr); GenTree* simdTree = gtNewSimdWithElementNode(simdType, simdStructNode, op2, op3, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); tree->AsOp()->gtOp1 = target; tree->AsOp()->gtOp2 = simdTree; // fgMorphTree has already called fgMorphImplicitByRefArgs() on this assignment, but the source // and target have not yet been morphed. // Therefore, in case the source and/or target are now implicit byrefs, we need to call it again. if (fgMorphImplicitByRefArgs(tree)) { if (tree->gtGetOp1()->OperIsBlk()) { assert(tree->gtGetOp1()->TypeGet() == simdType); tree->gtGetOp1()->SetOper(GT_IND); tree->gtGetOp1()->gtType = simdType; } } #ifdef DEBUG tree->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED; #endif } return tree; } #endif // FEATURE_SIMD //------------------------------------------------------------------------------ // fgMorphCommutative : Try to simplify "(X op C1) op C2" to "X op C3" // for commutative operators. // // Arguments: // tree - node to fold // // return value: // A folded GenTree* instance or nullptr if something prevents folding. // GenTreeOp* Compiler::fgMorphCommutative(GenTreeOp* tree) { assert(varTypeIsIntegralOrI(tree->TypeGet())); assert(tree->OperIs(GT_ADD, GT_MUL, GT_OR, GT_AND, GT_XOR)); // op1 can be GT_COMMA, in this case we're going to fold // "(op (COMMA(... (op X C1))) C2)" to "(COMMA(... (op X C3)))" GenTree* op1 = tree->gtGetOp1()->gtEffectiveVal(true); genTreeOps oper = tree->OperGet(); if (!op1->OperIs(oper) || !tree->gtGetOp2()->IsCnsIntOrI() || !op1->gtGetOp2()->IsCnsIntOrI() || op1->gtGetOp1()->IsCnsIntOrI()) { return nullptr; } if (!fgGlobalMorph && (op1 != tree->gtGetOp1())) { // Since 'tree->gtGetOp1()' can have complex structure (e.g. COMMA(..(COMMA(..,op1))) // don't run the optimization for such trees outside of global morph. // Otherwise, there is a chance of violating VNs invariants and/or modifying a tree // that is an active CSE candidate. return nullptr; } if (gtIsActiveCSE_Candidate(tree) || gtIsActiveCSE_Candidate(op1)) { // The optimization removes 'tree' from IR and changes the value of 'op1'. return nullptr; } if (tree->OperMayOverflow() && (tree->gtOverflow() || op1->gtOverflow())) { return nullptr; } GenTreeIntCon* cns1 = op1->gtGetOp2()->AsIntCon(); GenTreeIntCon* cns2 = tree->gtGetOp2()->AsIntCon(); if (!varTypeIsIntegralOrI(tree->TypeGet()) || cns1->TypeIs(TYP_REF) || !cns1->TypeIs(cns2->TypeGet())) { return nullptr; } if (gtIsActiveCSE_Candidate(cns1) || gtIsActiveCSE_Candidate(cns2)) { // The optimization removes 'cns2' from IR and changes the value of 'cns1'. return nullptr; } GenTree* folded = gtFoldExprConst(gtNewOperNode(oper, cns1->TypeGet(), cns1, cns2)); if (!folded->IsCnsIntOrI()) { // Give up if we can't fold "C1 op C2" return nullptr; } auto foldedCns = folded->AsIntCon(); cns1->SetIconValue(foldedCns->IconValue()); cns1->SetVNsFromNode(foldedCns); cns1->gtFieldSeq = foldedCns->gtFieldSeq; op1 = tree->gtGetOp1(); op1->SetVNsFromNode(tree); DEBUG_DESTROY_NODE(tree); DEBUG_DESTROY_NODE(cns2); DEBUG_DESTROY_NODE(foldedCns); INDEBUG(cns1->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED); return op1->AsOp(); } //------------------------------------------------------------------------------ // fgMorphCastedBitwiseOp : Try to simplify "(T)x op (T)y" to "(T)(x op y)". // // Arguments: // tree - node to fold // // Return Value: // A folded GenTree* instance, or nullptr if it couldn't be folded GenTree* Compiler::fgMorphCastedBitwiseOp(GenTreeOp* tree) { // This transform does not preserve VNs and deletes a node. assert(fgGlobalMorph); assert(varTypeIsIntegralOrI(tree)); assert(tree->OperIs(GT_OR, GT_AND, GT_XOR)); GenTree* op1 = tree->gtGetOp1(); GenTree* op2 = tree->gtGetOp2(); genTreeOps oper = tree->OperGet(); // see whether both ops are casts, with matching to and from types. if (op1->OperIs(GT_CAST) && op2->OperIs(GT_CAST)) { // bail if either operand is a checked cast if (op1->gtOverflow() || op2->gtOverflow()) { return nullptr; } var_types fromType = op1->AsCast()->CastOp()->TypeGet(); var_types toType = op1->AsCast()->CastToType(); bool isUnsigned = op1->IsUnsigned(); if (varTypeIsFloating(fromType) || (op2->CastFromType() != fromType) || (op2->CastToType() != toType) || (op2->IsUnsigned() != isUnsigned)) { return nullptr; } /* // Reuse gentree nodes: // // tree op1 // / \ | // op1 op2 ==> tree // | | / \. // x y x y // // (op2 becomes garbage) */ tree->gtOp1 = op1->AsCast()->CastOp(); tree->gtOp2 = op2->AsCast()->CastOp(); tree->gtType = genActualType(fromType); op1->gtType = genActualType(toType); op1->AsCast()->gtOp1 = tree; op1->AsCast()->CastToType() = toType; op1->SetAllEffectsFlags(tree); // no need to update isUnsigned DEBUG_DESTROY_NODE(op2); INDEBUG(op1->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED); return op1; } return nullptr; } /***************************************************************************** * * Transform the given GTK_SMPOP tree for code generation. */ #ifdef _PREFAST_ #pragma warning(push) #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function #endif GenTree* Compiler::fgMorphSmpOp(GenTree* tree, MorphAddrContext* mac) { ALLOCA_CHECK(); assert(tree->OperKind() & GTK_SMPOP); /* The steps in this function are : o Perform required preorder processing o Process the first, then second operand, if any o Perform required postorder morphing o Perform optional postorder morphing if optimizing */ bool isQmarkColon = false; AssertionIndex origAssertionCount = DUMMY_INIT(0); AssertionDsc* origAssertionTab = DUMMY_INIT(NULL); AssertionIndex thenAssertionCount = DUMMY_INIT(0); AssertionDsc* thenAssertionTab = DUMMY_INIT(NULL); if (fgGlobalMorph) { tree = fgMorphForRegisterFP(tree); } genTreeOps oper = tree->OperGet(); var_types typ = tree->TypeGet(); GenTree* op1 = tree->AsOp()->gtOp1; GenTree* op2 = tree->gtGetOp2IfPresent(); /*------------------------------------------------------------------------- * First do any PRE-ORDER processing */ switch (oper) { // Some arithmetic operators need to use a helper call to the EE int helper; case GT_ASG: tree = fgDoNormalizeOnStore(tree); /* fgDoNormalizeOnStore can change op2 */ noway_assert(op1 == tree->AsOp()->gtOp1); op2 = tree->AsOp()->gtOp2; #ifdef FEATURE_SIMD if (IsBaselineSimdIsaSupported()) { // We should check whether op2 should be assigned to a SIMD field or not. // If it is, we should tranlate the tree to simd intrinsic. assert(!fgGlobalMorph || ((tree->gtDebugFlags & GTF_DEBUG_NODE_MORPHED) == 0)); GenTree* newTree = fgMorphFieldAssignToSimdSetElement(tree); typ = tree->TypeGet(); op1 = tree->gtGetOp1(); op2 = tree->gtGetOp2(); #ifdef DEBUG assert((tree == newTree) && (tree->OperGet() == oper)); if ((tree->gtDebugFlags & GTF_DEBUG_NODE_MORPHED) != 0) { tree->gtDebugFlags &= ~GTF_DEBUG_NODE_MORPHED; } #endif // DEBUG } #endif // We can't CSE the LHS of an assignment. Only r-values can be CSEed. // Previously, the "lhs" (addr) of a block op was CSE'd. So, to duplicate the former // behavior, allow CSE'ing if is a struct type (or a TYP_REF transformed from a struct type) // TODO-1stClassStructs: improve this. if (op1->IsLocal() || (op1->TypeGet() != TYP_STRUCT)) { op1->gtFlags |= GTF_DONT_CSE; } break; case GT_ADDR: /* op1 of a GT_ADDR is an l-value. Only r-values can be CSEed */ op1->gtFlags |= GTF_DONT_CSE; break; case GT_QMARK: case GT_JTRUE: noway_assert(op1); if (op1->OperIsCompare()) { /* Mark the comparison node with GTF_RELOP_JMP_USED so it knows that it does not need to materialize the result as a 0 or 1. */ /* We also mark it as DONT_CSE, as we don't handle QMARKs with nonRELOP op1s */ op1->gtFlags |= (GTF_RELOP_JMP_USED | GTF_DONT_CSE); // Request that the codegen for op1 sets the condition flags // when it generates the code for op1. // // Codegen for op1 must set the condition flags if // this method returns true. // op1->gtRequestSetFlags(); } else { GenTree* effOp1 = op1->gtEffectiveVal(); noway_assert((effOp1->gtOper == GT_CNS_INT) && (effOp1->IsIntegralConst(0) || effOp1->IsIntegralConst(1))); } break; case GT_COLON: if (optLocalAssertionProp) { isQmarkColon = true; } break; case GT_FIELD: return fgMorphField(tree, mac); case GT_INDEX: return fgMorphArrayIndex(tree); case GT_CAST: { GenTree* morphedCast = fgMorphExpandCast(tree->AsCast()); if (morphedCast != nullptr) { return morphedCast; } op1 = tree->AsCast()->CastOp(); } break; case GT_MUL: noway_assert(op2 != nullptr); if (opts.OptimizationEnabled() && !optValnumCSE_phase && !tree->gtOverflow()) { // MUL(NEG(a), C) => MUL(a, NEG(C)) if (op1->OperIs(GT_NEG) && !op1->gtGetOp1()->IsCnsIntOrI() && op2->IsCnsIntOrI() && !op2->IsIconHandle()) { GenTree* newOp1 = op1->gtGetOp1(); GenTree* newConst = gtNewIconNode(-op2->AsIntCon()->IconValue(), op2->TypeGet()); DEBUG_DESTROY_NODE(op1); DEBUG_DESTROY_NODE(op2); tree->AsOp()->gtOp1 = newOp1; tree->AsOp()->gtOp2 = newConst; return fgMorphSmpOp(tree, mac); } } #ifndef TARGET_64BIT if (typ == TYP_LONG) { // For (long)int1 * (long)int2, we dont actually do the // casts, and just multiply the 32 bit values, which will // give us the 64 bit result in edx:eax. if (tree->Is64RsltMul()) { // We are seeing this node again. // Morph only the children of casts, // so as to avoid losing them. tree = fgMorphLongMul(tree->AsOp()); goto DONE_MORPHING_CHILDREN; } tree = fgRecognizeAndMorphLongMul(tree->AsOp()); op1 = tree->AsOp()->gtGetOp1(); op2 = tree->AsOp()->gtGetOp2(); if (tree->Is64RsltMul()) { goto DONE_MORPHING_CHILDREN; } else { if (tree->gtOverflow()) helper = tree->IsUnsigned() ? CORINFO_HELP_ULMUL_OVF : CORINFO_HELP_LMUL_OVF; else helper = CORINFO_HELP_LMUL; goto USE_HELPER_FOR_ARITH; } } #endif // !TARGET_64BIT break; case GT_ARR_LENGTH: if (op1->OperIs(GT_CNS_STR)) { // Optimize `ldstr + String::get_Length()` to CNS_INT // e.g. "Hello".Length => 5 GenTreeIntCon* iconNode = gtNewStringLiteralLength(op1->AsStrCon()); if (iconNode != nullptr) { INDEBUG(iconNode->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED); return iconNode; } } break; case GT_DIV: // Replace "val / dcon" with "val * (1.0 / dcon)" if dcon is a power of two. // Powers of two within range are always exactly represented, // so multiplication by the reciprocal is safe in this scenario if (fgGlobalMorph && op2->IsCnsFltOrDbl()) { double divisor = op2->AsDblCon()->gtDconVal; if (((typ == TYP_DOUBLE) && FloatingPointUtils::hasPreciseReciprocal(divisor)) || ((typ == TYP_FLOAT) && FloatingPointUtils::hasPreciseReciprocal(forceCastToFloat(divisor)))) { oper = GT_MUL; tree->ChangeOper(oper); op2->AsDblCon()->gtDconVal = 1.0 / divisor; } } // Convert DIV to UDIV if boths op1 and op2 are known to be never negative if (!gtIsActiveCSE_Candidate(tree) && varTypeIsIntegral(tree) && op1->IsNeverNegative(this) && op2->IsNeverNegative(this)) { assert(tree->OperIs(GT_DIV)); tree->ChangeOper(GT_UDIV, GenTree::PRESERVE_VN); return fgMorphSmpOp(tree, mac); } #ifndef TARGET_64BIT if (typ == TYP_LONG) { helper = CORINFO_HELP_LDIV; goto USE_HELPER_FOR_ARITH; } #if USE_HELPERS_FOR_INT_DIV if (typ == TYP_INT) { helper = CORINFO_HELP_DIV; goto USE_HELPER_FOR_ARITH; } #endif #endif // !TARGET_64BIT break; case GT_UDIV: #ifndef TARGET_64BIT if (typ == TYP_LONG) { helper = CORINFO_HELP_ULDIV; goto USE_HELPER_FOR_ARITH; } #if USE_HELPERS_FOR_INT_DIV if (typ == TYP_INT) { helper = CORINFO_HELP_UDIV; goto USE_HELPER_FOR_ARITH; } #endif #endif // TARGET_64BIT break; case GT_MOD: if (varTypeIsFloating(typ)) { helper = CORINFO_HELP_DBLREM; noway_assert(op2); if (op1->TypeGet() == TYP_FLOAT) { if (op2->TypeGet() == TYP_FLOAT) { helper = CORINFO_HELP_FLTREM; } else { tree->AsOp()->gtOp1 = op1 = gtNewCastNode(TYP_DOUBLE, op1, false, TYP_DOUBLE); } } else if (op2->TypeGet() == TYP_FLOAT) { tree->AsOp()->gtOp2 = op2 = gtNewCastNode(TYP_DOUBLE, op2, false, TYP_DOUBLE); } goto USE_HELPER_FOR_ARITH; } // Convert MOD to UMOD if boths op1 and op2 are known to be never negative if (!gtIsActiveCSE_Candidate(tree) && varTypeIsIntegral(tree) && op1->IsNeverNegative(this) && op2->IsNeverNegative(this)) { assert(tree->OperIs(GT_MOD)); tree->ChangeOper(GT_UMOD, GenTree::PRESERVE_VN); return fgMorphSmpOp(tree, mac); } // Do not use optimizations (unlike UMOD's idiv optimizing during codegen) for signed mod. // A similar optimization for signed mod will not work for a negative perfectly divisible // HI-word. To make it correct, we would need to divide without the sign and then flip the // result sign after mod. This requires 18 opcodes + flow making it not worthy to inline. goto ASSIGN_HELPER_FOR_MOD; case GT_UMOD: #ifdef TARGET_ARMARCH // // Note for TARGET_ARMARCH we don't have a remainder instruction, so we don't do this optimization // #else // TARGET_XARCH // If this is an unsigned long mod with a constant divisor, // then don't morph to a helper call - it can be done faster inline using idiv. noway_assert(op2); if ((typ == TYP_LONG) && opts.OptEnabled(CLFLG_CONSTANTFOLD)) { if (op2->OperIs(GT_CNS_NATIVELONG) && op2->AsIntConCommon()->LngValue() >= 2 && op2->AsIntConCommon()->LngValue() <= 0x3fffffff) { tree->AsOp()->gtOp1 = op1 = fgMorphTree(op1); noway_assert(op1->TypeIs(TYP_LONG)); // Update flags for op1 morph. tree->gtFlags &= ~GTF_ALL_EFFECT; // Only update with op1 as op2 is a constant. tree->gtFlags |= (op1->gtFlags & GTF_ALL_EFFECT); // If op1 is a constant, then do constant folding of the division operator. if (op1->OperIs(GT_CNS_NATIVELONG)) { tree = gtFoldExpr(tree); } if (!tree->OperIsConst()) { tree->AsOp()->CheckDivideByConstOptimized(this); } return tree; } } #endif // TARGET_XARCH ASSIGN_HELPER_FOR_MOD: // For "val % 1", return 0 if op1 doesn't have any side effects // and we are not in the CSE phase, we cannot discard 'tree' // because it may contain CSE expressions that we haven't yet examined. // if (((op1->gtFlags & GTF_SIDE_EFFECT) == 0) && !optValnumCSE_phase) { if (op2->IsIntegralConst(1)) { GenTree* zeroNode = gtNewZeroConNode(typ); #ifdef DEBUG zeroNode->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED; #endif DEBUG_DESTROY_NODE(tree); return zeroNode; } } #ifndef TARGET_64BIT if (typ == TYP_LONG) { helper = (oper == GT_UMOD) ? CORINFO_HELP_ULMOD : CORINFO_HELP_LMOD; goto USE_HELPER_FOR_ARITH; } #if USE_HELPERS_FOR_INT_DIV if (typ == TYP_INT) { if (oper == GT_UMOD) { helper = CORINFO_HELP_UMOD; goto USE_HELPER_FOR_ARITH; } else if (oper == GT_MOD) { helper = CORINFO_HELP_MOD; goto USE_HELPER_FOR_ARITH; } } #endif #endif // !TARGET_64BIT #ifdef TARGET_ARM64 // For ARM64 we don't have a remainder instruction, // The architecture manual suggests the following transformation to // generate code for such operator: // // a % b = a - (a / b) * b; // // TODO: there are special cases where it can be done better, for example // when the modulo operation is unsigned and the divisor is a // integer constant power of two. In this case, we can make the transform: // // a % b = a & (b - 1); // // Lower supports it for all cases except when `a` is constant, but // in Morph we can't guarantee that `a` won't be transformed into a constant, // so can't guarantee that lower will be able to do this optimization. { // Do "a % b = a - (a / b) * b" morph always, see TODO before this block. bool doMorphModToSubMulDiv = true; if (doMorphModToSubMulDiv) { assert(!optValnumCSE_phase); tree = fgMorphModToSubMulDiv(tree->AsOp()); op1 = tree->AsOp()->gtOp1; op2 = tree->AsOp()->gtOp2; } } #else // !TARGET_ARM64 // If b is not a power of 2 constant then lowering replaces a % b // with a - (a / b) * b and applies magic division optimization to // a / b. The code may already contain an a / b expression (e.g. // x = a / 10; y = a % 10;) and then we end up with redundant code. // If we convert % to / here we give CSE the opportunity to eliminate // the redundant division. If there's no redundant division then // nothing is lost, lowering would have done this transform anyway. if (!optValnumCSE_phase && ((tree->OperGet() == GT_MOD) && op2->IsIntegralConst())) { ssize_t divisorValue = op2->AsIntCon()->IconValue(); size_t absDivisorValue = (divisorValue == SSIZE_T_MIN) ? static_cast<size_t>(divisorValue) : static_cast<size_t>(abs(divisorValue)); if (!isPow2(absDivisorValue)) { tree = fgMorphModToSubMulDiv(tree->AsOp()); op1 = tree->AsOp()->gtOp1; op2 = tree->AsOp()->gtOp2; } } #endif // !TARGET_ARM64 break; USE_HELPER_FOR_ARITH: { // TODO: this comment is wrong now, do an appropriate fix. /* We have to morph these arithmetic operations into helper calls before morphing the arguments (preorder), else the arguments won't get correct values of fgPtrArgCntCur. However, try to fold the tree first in case we end up with a simple node which won't need a helper call at all */ noway_assert(tree->OperIsBinary()); GenTree* oldTree = tree; tree = gtFoldExpr(tree); // Were we able to fold it ? // Note that gtFoldExpr may return a non-leaf even if successful // e.g. for something like "expr / 1" - see also bug #290853 if (tree->OperIsLeaf() || (oldTree != tree)) { return (oldTree != tree) ? fgMorphTree(tree) : fgMorphLeaf(tree); } // Did we fold it into a comma node with throw? if (tree->gtOper == GT_COMMA) { noway_assert(fgIsCommaThrow(tree)); return fgMorphTree(tree); } } return fgMorphIntoHelperCall(tree, helper, gtNewCallArgs(op1, op2)); case GT_RETURN: if (!tree->TypeIs(TYP_VOID)) { if (op1->OperIs(GT_OBJ, GT_BLK, GT_IND)) { op1 = fgMorphRetInd(tree->AsUnOp()); } if (op1->OperIs(GT_LCL_VAR)) { // With a `genReturnBB` this `RETURN(src)` tree will be replaced by a `ASG(genReturnLocal, src)` // and `ASG` will be tranformed into field by field copy without parent local referencing if // possible. GenTreeLclVar* lclVar = op1->AsLclVar(); unsigned lclNum = lclVar->GetLclNum(); if ((genReturnLocal == BAD_VAR_NUM) || (genReturnLocal == lclNum)) { LclVarDsc* varDsc = lvaGetDesc(lclVar); if (varDsc->CanBeReplacedWithItsField(this)) { // We can replace the struct with its only field and allow copy propagation to replace // return value that was written as a field. unsigned fieldLclNum = varDsc->lvFieldLclStart; LclVarDsc* fieldDsc = lvaGetDesc(fieldLclNum); JITDUMP("Replacing an independently promoted local var V%02u with its only field " "V%02u for " "the return [%06u]\n", lclVar->GetLclNum(), fieldLclNum, dspTreeID(tree)); lclVar->SetLclNum(fieldLclNum); lclVar->ChangeType(fieldDsc->lvType); } } } } // normalize small integer return values if (fgGlobalMorph && varTypeIsSmall(info.compRetType) && (op1 != nullptr) && !op1->TypeIs(TYP_VOID) && fgCastNeeded(op1, info.compRetType)) { // Small-typed return values are normalized by the callee op1 = gtNewCastNode(TYP_INT, op1, false, info.compRetType); // Propagate GTF_COLON_COND op1->gtFlags |= (tree->gtFlags & GTF_COLON_COND); tree->AsOp()->gtOp1 = fgMorphTree(op1); // Propagate side effect flags tree->SetAllEffectsFlags(tree->AsOp()->gtGetOp1()); return tree; } break; case GT_EQ: case GT_NE: { GenTree* optimizedTree = gtFoldTypeCompare(tree); if (optimizedTree != tree) { return fgMorphTree(optimizedTree); } // Pattern-matching optimization: // (a % c) ==/!= 0 // for power-of-2 constant `c` // => // a & (c - 1) ==/!= 0 // For integer `a`, even if negative. if (opts.OptimizationEnabled() && !optValnumCSE_phase) { assert(tree->OperIs(GT_EQ, GT_NE)); if (op1->OperIs(GT_MOD) && varTypeIsIntegral(op1) && op2->IsIntegralConst(0)) { GenTree* op1op2 = op1->AsOp()->gtOp2; if (op1op2->IsCnsIntOrI()) { const ssize_t modValue = op1op2->AsIntCon()->IconValue(); if (isPow2(modValue)) { JITDUMP("\nTransforming:\n"); DISPTREE(tree); op1->SetOper(GT_AND); // Change % => & op1op2->AsIntConCommon()->SetIconValue(modValue - 1); // Change c => c - 1 fgUpdateConstTreeValueNumber(op1op2); JITDUMP("\ninto:\n"); DISPTREE(tree); } } } } } FALLTHROUGH; case GT_GT: { // Try and optimize nullable boxes feeding compares GenTree* optimizedTree = gtFoldBoxNullable(tree); if (optimizedTree->OperGet() != tree->OperGet()) { return optimizedTree; } else { tree = optimizedTree; } op1 = tree->AsOp()->gtOp1; op2 = tree->gtGetOp2IfPresent(); break; } case GT_RUNTIMELOOKUP: return fgMorphTree(op1); #ifdef TARGET_ARM case GT_INTRINSIC: if (tree->AsIntrinsic()->gtIntrinsicName == NI_System_Math_Round) { switch (tree->TypeGet()) { case TYP_DOUBLE: return fgMorphIntoHelperCall(tree, CORINFO_HELP_DBLROUND, gtNewCallArgs(op1)); case TYP_FLOAT: return fgMorphIntoHelperCall(tree, CORINFO_HELP_FLTROUND, gtNewCallArgs(op1)); default: unreached(); } } break; #endif case GT_PUTARG_TYPE: return fgMorphTree(tree->AsUnOp()->gtGetOp1()); case GT_NULLCHECK: { op1 = tree->AsUnOp()->gtGetOp1(); if (op1->IsCall()) { GenTreeCall* const call = op1->AsCall(); if (call->IsHelperCall() && s_helperCallProperties.NonNullReturn(eeGetHelperNum(call->gtCallMethHnd))) { JITDUMP("\nNULLCHECK on [%06u] will always succeed\n", dspTreeID(call)); // TODO: Can we also remove the call? // return fgMorphTree(call); } } } break; default: break; } if (opts.OptimizationEnabled() && fgGlobalMorph) { GenTree* morphed = fgMorphReduceAddOps(tree); if (morphed != tree) return fgMorphTree(morphed); } /*------------------------------------------------------------------------- * Process the first operand, if any */ if (op1) { // If we are entering the "then" part of a Qmark-Colon we must // save the state of the current copy assignment table // so that we can restore this state when entering the "else" part if (isQmarkColon) { noway_assert(optLocalAssertionProp); if (optAssertionCount) { noway_assert(optAssertionCount <= optMaxAssertionCount); // else ALLOCA() is a bad idea unsigned tabSize = optAssertionCount * sizeof(AssertionDsc); origAssertionTab = (AssertionDsc*)ALLOCA(tabSize); origAssertionCount = optAssertionCount; memcpy(origAssertionTab, optAssertionTabPrivate, tabSize); } else { origAssertionCount = 0; origAssertionTab = nullptr; } } // We might need a new MorphAddressContext context. (These are used to convey // parent context about how addresses being calculated will be used; see the // specification comment for MorphAddrContext for full details.) // Assume it's an Ind context to start. MorphAddrContext subIndMac1(MACK_Ind); MorphAddrContext* subMac1 = mac; if (subMac1 == nullptr || subMac1->m_kind == MACK_Ind) { switch (tree->gtOper) { case GT_ADDR: // A non-null mac here implies this node is part of an address computation. // If so, we need to pass the existing mac down to the child node. // // Otherwise, use a new mac. if (subMac1 == nullptr) { subMac1 = &subIndMac1; subMac1->m_kind = MACK_Addr; } break; case GT_COMMA: // In a comma, the incoming context only applies to the rightmost arg of the // comma list. The left arg (op1) gets a fresh context. subMac1 = nullptr; break; case GT_OBJ: case GT_BLK: case GT_IND: // A non-null mac here implies this node is part of an address computation (the tree parent is // GT_ADDR). // If so, we need to pass the existing mac down to the child node. // // Otherwise, use a new mac. if (subMac1 == nullptr) { subMac1 = &subIndMac1; } break; default: break; } } // For additions, if we're in an IND context keep track of whether // all offsets added to the address are constant, and their sum. if (tree->gtOper == GT_ADD && subMac1 != nullptr) { assert(subMac1->m_kind == MACK_Ind || subMac1->m_kind == MACK_Addr); // Can't be a CopyBlock. GenTree* otherOp = tree->AsOp()->gtOp2; // Is the other operator a constant? if (otherOp->IsCnsIntOrI()) { ClrSafeInt<size_t> totalOffset(subMac1->m_totalOffset); totalOffset += otherOp->AsIntConCommon()->IconValue(); if (totalOffset.IsOverflow()) { // We will consider an offset so large as to overflow as "not a constant" -- // we will do a null check. subMac1->m_allConstantOffsets = false; } else { subMac1->m_totalOffset += otherOp->AsIntConCommon()->IconValue(); } } else { subMac1->m_allConstantOffsets = false; } } // If op1 is a GT_FIELD or indir, we need to pass down the mac if // its parent is GT_ADDR, since the address of op1 // is part of an ongoing address computation. Otherwise // op1 represents the value of the field and so any address // calculations it does are in a new context. if (((op1->gtOper == GT_FIELD) || op1->OperIsIndir()) && (tree->gtOper != GT_ADDR)) { subMac1 = nullptr; // The impact of op1's value to any ongoing // address computation is handled below when looking // at op2. } tree->AsOp()->gtOp1 = op1 = fgMorphTree(op1, subMac1); // If we are exiting the "then" part of a Qmark-Colon we must // save the state of the current copy assignment table // so that we can merge this state with the "else" part exit if (isQmarkColon) { noway_assert(optLocalAssertionProp); if (optAssertionCount) { noway_assert(optAssertionCount <= optMaxAssertionCount); // else ALLOCA() is a bad idea unsigned tabSize = optAssertionCount * sizeof(AssertionDsc); thenAssertionTab = (AssertionDsc*)ALLOCA(tabSize); thenAssertionCount = optAssertionCount; memcpy(thenAssertionTab, optAssertionTabPrivate, tabSize); } else { thenAssertionCount = 0; thenAssertionTab = nullptr; } } /* Morphing along with folding and inlining may have changed the * side effect flags, so we have to reset them * * NOTE: Don't reset the exception flags on nodes that may throw */ assert(tree->gtOper != GT_CALL); if (!tree->OperRequiresCallFlag(this)) { tree->gtFlags &= ~GTF_CALL; } /* Propagate the new flags */ tree->gtFlags |= (op1->gtFlags & GTF_ALL_EFFECT); // &aliasedVar doesn't need GTF_GLOB_REF, though alisasedVar does // Similarly for clsVar if (oper == GT_ADDR && (op1->gtOper == GT_LCL_VAR || op1->gtOper == GT_CLS_VAR)) { tree->gtFlags &= ~GTF_GLOB_REF; } } // if (op1) /*------------------------------------------------------------------------- * Process the second operand, if any */ if (op2) { // If we are entering the "else" part of a Qmark-Colon we must // reset the state of the current copy assignment table if (isQmarkColon) { noway_assert(optLocalAssertionProp); optAssertionReset(0); if (origAssertionCount) { size_t tabSize = origAssertionCount * sizeof(AssertionDsc); memcpy(optAssertionTabPrivate, origAssertionTab, tabSize); optAssertionReset(origAssertionCount); } } // We might need a new MorphAddressContext context to use in evaluating op2. // (These are used to convey parent context about how addresses being calculated // will be used; see the specification comment for MorphAddrContext for full details.) // Assume it's an Ind context to start. switch (tree->gtOper) { case GT_ADD: if (mac != nullptr && mac->m_kind == MACK_Ind) { GenTree* otherOp = tree->AsOp()->gtOp1; // Is the other operator a constant? if (otherOp->IsCnsIntOrI()) { mac->m_totalOffset += otherOp->AsIntConCommon()->IconValue(); } else { mac->m_allConstantOffsets = false; } } break; default: break; } // If op2 is a GT_FIELD or indir, we must be taking its value, // so it should evaluate its address in a new context. if ((op2->gtOper == GT_FIELD) || op2->OperIsIndir()) { // The impact of op2's value to any ongoing // address computation is handled above when looking // at op1. mac = nullptr; } tree->AsOp()->gtOp2 = op2 = fgMorphTree(op2, mac); /* Propagate the side effect flags from op2 */ tree->gtFlags |= (op2->gtFlags & GTF_ALL_EFFECT); // If we are exiting the "else" part of a Qmark-Colon we must // merge the state of the current copy assignment table with // that of the exit of the "then" part. if (isQmarkColon) { noway_assert(optLocalAssertionProp); // If either exit table has zero entries then // the merged table also has zero entries if (optAssertionCount == 0 || thenAssertionCount == 0) { optAssertionReset(0); } else { size_t tabSize = optAssertionCount * sizeof(AssertionDsc); if ((optAssertionCount != thenAssertionCount) || (memcmp(thenAssertionTab, optAssertionTabPrivate, tabSize) != 0)) { // Yes they are different so we have to find the merged set // Iterate over the copy asgn table removing any entries // that do not have an exact match in the thenAssertionTab AssertionIndex index = 1; while (index <= optAssertionCount) { AssertionDsc* curAssertion = optGetAssertion(index); for (unsigned j = 0; j < thenAssertionCount; j++) { AssertionDsc* thenAssertion = &thenAssertionTab[j]; // Do the left sides match? if ((curAssertion->op1.lcl.lclNum == thenAssertion->op1.lcl.lclNum) && (curAssertion->assertionKind == thenAssertion->assertionKind)) { // Do the right sides match? if ((curAssertion->op2.kind == thenAssertion->op2.kind) && (curAssertion->op2.lconVal == thenAssertion->op2.lconVal)) { goto KEEP; } else { goto REMOVE; } } } // // If we fall out of the loop above then we didn't find // any matching entry in the thenAssertionTab so it must // have been killed on that path so we remove it here // REMOVE: // The data at optAssertionTabPrivate[i] is to be removed CLANG_FORMAT_COMMENT_ANCHOR; #ifdef DEBUG if (verbose) { printf("The QMARK-COLON "); printTreeID(tree); printf(" removes assertion candidate #%d\n", index); } #endif optAssertionRemove(index); continue; KEEP: // The data at optAssertionTabPrivate[i] is to be kept index++; } } } } } // if (op2) #ifndef TARGET_64BIT DONE_MORPHING_CHILDREN: #endif // !TARGET_64BIT if (tree->OperIsIndirOrArrLength()) { tree->SetIndirExceptionFlags(this); } else { if (tree->OperMayThrow(this)) { // Mark the tree node as potentially throwing an exception tree->gtFlags |= GTF_EXCEPT; } else { if (((op1 == nullptr) || ((op1->gtFlags & GTF_EXCEPT) == 0)) && ((op2 == nullptr) || ((op2->gtFlags & GTF_EXCEPT) == 0))) { tree->gtFlags &= ~GTF_EXCEPT; } } } if (tree->OperRequiresAsgFlag()) { tree->gtFlags |= GTF_ASG; } else { if (((op1 == nullptr) || ((op1->gtFlags & GTF_ASG) == 0)) && ((op2 == nullptr) || ((op2->gtFlags & GTF_ASG) == 0))) { tree->gtFlags &= ~GTF_ASG; } } if (tree->OperRequiresCallFlag(this)) { tree->gtFlags |= GTF_CALL; } else { if (((op1 == nullptr) || ((op1->gtFlags & GTF_CALL) == 0)) && ((op2 == nullptr) || ((op2->gtFlags & GTF_CALL) == 0))) { tree->gtFlags &= ~GTF_CALL; } } /*------------------------------------------------------------------------- * Now do POST-ORDER processing */ if (varTypeIsGC(tree->TypeGet()) && (op1 && !varTypeIsGC(op1->TypeGet())) && (op2 && !varTypeIsGC(op2->TypeGet()))) { // The tree is really not GC but was marked as such. Now that the // children have been unmarked, unmark the tree too. // Remember that GT_COMMA inherits it's type only from op2 if (tree->gtOper == GT_COMMA) { tree->gtType = genActualType(op2->TypeGet()); } else { tree->gtType = genActualType(op1->TypeGet()); } } GenTree* oldTree = tree; GenTree* qmarkOp1 = nullptr; GenTree* qmarkOp2 = nullptr; if ((tree->OperGet() == GT_QMARK) && (tree->AsOp()->gtOp2->OperGet() == GT_COLON)) { qmarkOp1 = oldTree->AsOp()->gtOp2->AsOp()->gtOp1; qmarkOp2 = oldTree->AsOp()->gtOp2->AsOp()->gtOp2; } // Try to fold it, maybe we get lucky, tree = gtFoldExpr(tree); if (oldTree != tree) { /* if gtFoldExpr returned op1 or op2 then we are done */ if ((tree == op1) || (tree == op2) || (tree == qmarkOp1) || (tree == qmarkOp2)) { return tree; } /* If we created a comma-throw tree then we need to morph op1 */ if (fgIsCommaThrow(tree)) { tree->AsOp()->gtOp1 = fgMorphTree(tree->AsOp()->gtOp1); fgMorphTreeDone(tree); return tree; } return tree; } else if (tree->OperIsConst()) { return tree; } /* gtFoldExpr could have used setOper to change the oper */ oper = tree->OperGet(); typ = tree->TypeGet(); /* gtFoldExpr could have changed op1 and op2 */ op1 = tree->AsOp()->gtOp1; op2 = tree->gtGetOp2IfPresent(); // Do we have an integer compare operation? // if (tree->OperIsCompare() && varTypeIsIntegralOrI(tree->TypeGet())) { // Are we comparing against zero? // if (op2->IsIntegralConst(0)) { // Request that the codegen for op1 sets the condition flags // when it generates the code for op1. // // Codegen for op1 must set the condition flags if // this method returns true. // op1->gtRequestSetFlags(); } } /*------------------------------------------------------------------------- * Perform the required oper-specific postorder morphing */ GenTree* temp; size_t ival1; GenTree* lclVarTree; GenTree* effectiveOp1; FieldSeqNode* fieldSeq = nullptr; switch (oper) { case GT_ASG: if (op1->OperIs(GT_LCL_VAR) && ((op1->gtFlags & GTF_VAR_FOLDED_IND) != 0)) { op1->gtFlags &= ~GTF_VAR_FOLDED_IND; tree = fgDoNormalizeOnStore(tree); op2 = tree->gtGetOp2(); } lclVarTree = fgIsIndirOfAddrOfLocal(op1); if (lclVarTree != nullptr) { lclVarTree->gtFlags |= GTF_VAR_DEF; } effectiveOp1 = op1->gtEffectiveVal(); // If we are storing a small type, we might be able to omit a cast. if (effectiveOp1->OperIs(GT_IND, GT_CLS_VAR) && varTypeIsSmall(effectiveOp1)) { if (!gtIsActiveCSE_Candidate(op2) && op2->OperIs(GT_CAST) && varTypeIsIntegral(op2->AsCast()->CastOp()) && !op2->gtOverflow()) { var_types castType = op2->CastToType(); // If we are performing a narrowing cast and // castType is larger or the same as op1's type // then we can discard the cast. if (varTypeIsSmall(castType) && (genTypeSize(castType) >= genTypeSize(effectiveOp1))) { tree->AsOp()->gtOp2 = op2 = op2->AsCast()->CastOp(); } } } fgAssignSetVarDef(tree); /* We can't CSE the LHS of an assignment */ /* We also must set in the pre-morphing phase, otherwise assertionProp doesn't see it */ if (op1->IsLocal() || (op1->TypeGet() != TYP_STRUCT)) { op1->gtFlags |= GTF_DONT_CSE; } break; case GT_CAST: tree = fgOptimizeCast(tree->AsCast()); if (!tree->OperIsSimple()) { return tree; } if (tree->OperIs(GT_CAST) && tree->gtOverflow()) { fgAddCodeRef(compCurBB, bbThrowIndex(compCurBB), SCK_OVERFLOW); } typ = tree->TypeGet(); oper = tree->OperGet(); op1 = tree->AsOp()->gtGetOp1(); op2 = tree->gtGetOp2IfPresent(); break; case GT_EQ: case GT_NE: // It is not safe to reorder/delete CSE's if (!optValnumCSE_phase && op2->IsIntegralConst()) { tree = fgOptimizeEqualityComparisonWithConst(tree->AsOp()); assert(tree->OperIsCompare()); oper = tree->OperGet(); op1 = tree->gtGetOp1(); op2 = tree->gtGetOp2(); } goto COMPARE; case GT_LT: case GT_LE: case GT_GE: case GT_GT: if (!optValnumCSE_phase && (op1->OperIs(GT_CAST) || op2->OperIs(GT_CAST))) { tree = fgOptimizeRelationalComparisonWithCasts(tree->AsOp()); oper = tree->OperGet(); op1 = tree->gtGetOp1(); op2 = tree->gtGetOp2(); } // op2's value may be changed, so it cannot be a CSE candidate. if (op2->IsIntegralConst() && !gtIsActiveCSE_Candidate(op2)) { tree = fgOptimizeRelationalComparisonWithConst(tree->AsOp()); oper = tree->OperGet(); assert(op1 == tree->AsOp()->gtGetOp1()); assert(op2 == tree->AsOp()->gtGetOp2()); } COMPARE: noway_assert(tree->OperIsCompare()); break; case GT_MUL: #ifndef TARGET_64BIT if (typ == TYP_LONG) { // This must be GTF_MUL_64RSLT INDEBUG(tree->AsOp()->DebugCheckLongMul()); return tree; } #endif // TARGET_64BIT goto CM_OVF_OP; case GT_SUB: if (tree->gtOverflow()) { goto CM_OVF_OP; } // TODO #4104: there are a lot of other places where // this condition is not checked before transformations. if (fgGlobalMorph) { /* Check for "op1 - cns2" , we change it to "op1 + (-cns2)" */ noway_assert(op2); if (op2->IsCnsIntOrI() && !op2->IsIconHandle()) { // Negate the constant and change the node to be "+", // except when `op2` is a const byref. op2->AsIntConCommon()->SetIconValue(-op2->AsIntConCommon()->IconValue()); op2->AsIntConRef().gtFieldSeq = FieldSeqStore::NotAField(); oper = GT_ADD; tree->ChangeOper(oper); goto CM_ADD_OP; } /* Check for "cns1 - op2" , we change it to "(cns1 + (-op2))" */ noway_assert(op1); if (op1->IsCnsIntOrI()) { noway_assert(varTypeIsIntOrI(tree)); // The type of the new GT_NEG node cannot just be op2->TypeGet(). // Otherwise we may sign-extend incorrectly in cases where the GT_NEG // node ends up feeding directly into a cast, for example in // GT_CAST<ubyte>(GT_SUB(0, s_1.ubyte)) tree->AsOp()->gtOp2 = op2 = gtNewOperNode(GT_NEG, genActualType(op2->TypeGet()), op2); fgMorphTreeDone(op2); oper = GT_ADD; tree->ChangeOper(oper); goto CM_ADD_OP; } /* No match - exit */ } // Skip optimization if non-NEG operand is constant. // Both op1 and op2 are not constant because it was already checked above. if (opts.OptimizationEnabled() && fgGlobalMorph) { // a - -b = > a + b // SUB(a, (NEG(b)) => ADD(a, b) if (!op1->OperIs(GT_NEG) && op2->OperIs(GT_NEG)) { // tree: SUB // op1: a // op2: NEG // op2Child: b GenTree* op2Child = op2->AsOp()->gtOp1; // b oper = GT_ADD; tree->SetOper(oper, GenTree::PRESERVE_VN); tree->AsOp()->gtOp2 = op2Child; DEBUG_DESTROY_NODE(op2); op2 = op2Child; } // -a - -b = > b - a // SUB(NEG(a), (NEG(b)) => SUB(b, a) else if (op1->OperIs(GT_NEG) && op2->OperIs(GT_NEG) && gtCanSwapOrder(op1, op2)) { // tree: SUB // op1: NEG // op1Child: a // op2: NEG // op2Child: b GenTree* op1Child = op1->AsOp()->gtOp1; // a GenTree* op2Child = op2->AsOp()->gtOp1; // b tree->AsOp()->gtOp1 = op2Child; tree->AsOp()->gtOp2 = op1Child; DEBUG_DESTROY_NODE(op1); DEBUG_DESTROY_NODE(op2); op1 = op2Child; op2 = op1Child; } } break; #ifdef TARGET_ARM64 case GT_DIV: if (!varTypeIsFloating(tree->gtType)) { // Codegen for this instruction needs to be able to throw two exceptions: fgAddCodeRef(compCurBB, bbThrowIndex(compCurBB), SCK_OVERFLOW); fgAddCodeRef(compCurBB, bbThrowIndex(compCurBB), SCK_DIV_BY_ZERO); } break; case GT_UDIV: // Codegen for this instruction needs to be able to throw one exception: fgAddCodeRef(compCurBB, bbThrowIndex(compCurBB), SCK_DIV_BY_ZERO); break; #endif case GT_ADD: CM_OVF_OP: if (tree->gtOverflow()) { tree->gtRequestSetFlags(); // Add the excptn-throwing basic block to jump to on overflow fgAddCodeRef(compCurBB, bbThrowIndex(compCurBB), SCK_OVERFLOW); // We can't do any commutative morphing for overflow instructions break; } CM_ADD_OP: FALLTHROUGH; case GT_OR: case GT_XOR: case GT_AND: tree = fgOptimizeCommutativeArithmetic(tree->AsOp()); if (!tree->OperIsSimple()) { return tree; } typ = tree->TypeGet(); oper = tree->OperGet(); op1 = tree->gtGetOp1(); op2 = tree->gtGetOp2IfPresent(); break; case GT_NOT: case GT_NEG: // Remove double negation/not. // Note: this is not a safe tranformation if "tree" is a CSE candidate. // Consider for example the following expression: NEG(NEG(OP)), where any // NEG is a CSE candidate. Were we to morph this to just OP, CSE would fail to find // the original NEG in the statement. if (op1->OperIs(oper) && opts.OptimizationEnabled() && !gtIsActiveCSE_Candidate(tree) && !gtIsActiveCSE_Candidate(op1)) { JITDUMP("Remove double negation/not\n") GenTree* op1op1 = op1->gtGetOp1(); DEBUG_DESTROY_NODE(tree); DEBUG_DESTROY_NODE(op1); return op1op1; } // Distribute negation over simple multiplication/division expressions if (opts.OptimizationEnabled() && !optValnumCSE_phase && tree->OperIs(GT_NEG) && op1->OperIs(GT_MUL, GT_DIV)) { GenTreeOp* mulOrDiv = op1->AsOp(); GenTree* op1op1 = mulOrDiv->gtGetOp1(); GenTree* op1op2 = mulOrDiv->gtGetOp2(); if (!op1op1->IsCnsIntOrI() && op1op2->IsCnsIntOrI() && !op1op2->IsIconHandle()) { // NEG(MUL(a, C)) => MUL(a, -C) // NEG(DIV(a, C)) => DIV(a, -C), except when C = {-1, 1} ssize_t constVal = op1op2->AsIntCon()->IconValue(); if ((mulOrDiv->OperIs(GT_DIV) && (constVal != -1) && (constVal != 1)) || (mulOrDiv->OperIs(GT_MUL) && !mulOrDiv->gtOverflow())) { GenTree* newOp1 = op1op1; // a GenTree* newOp2 = gtNewIconNode(-constVal, op1op2->TypeGet()); // -C mulOrDiv->gtOp1 = newOp1; mulOrDiv->gtOp2 = newOp2; mulOrDiv->SetVNsFromNode(tree); DEBUG_DESTROY_NODE(tree); DEBUG_DESTROY_NODE(op1op2); return mulOrDiv; } } } /* Any constant cases should have been folded earlier */ noway_assert(!op1->OperIsConst() || !opts.OptEnabled(CLFLG_CONSTANTFOLD) || optValnumCSE_phase); break; case GT_CKFINITE: noway_assert(varTypeIsFloating(op1->TypeGet())); fgAddCodeRef(compCurBB, bbThrowIndex(compCurBB), SCK_ARITH_EXCPN); break; case GT_BOUNDS_CHECK: fgSetRngChkTarget(tree); break; case GT_OBJ: case GT_BLK: case GT_IND: { // If we have IND(ADDR(X)) and X has GTF_GLOB_REF, we must set GTF_GLOB_REF on // the OBJ. Note that the GTF_GLOB_REF will have been cleared on ADDR(X) where X // is a local or CLS_VAR, even if it has been address-exposed. if (op1->OperIs(GT_ADDR)) { tree->gtFlags |= (op1->AsUnOp()->gtGetOp1()->gtFlags & GTF_GLOB_REF); } if (!tree->OperIs(GT_IND)) { break; } // Can not remove a GT_IND if it is currently a CSE candidate. if (gtIsActiveCSE_Candidate(tree)) { break; } bool foldAndReturnTemp = false; temp = nullptr; ival1 = 0; // Don't remove a volatile GT_IND, even if the address points to a local variable. if ((tree->gtFlags & GTF_IND_VOLATILE) == 0) { /* Try to Fold *(&X) into X */ if (op1->gtOper == GT_ADDR) { // Can not remove a GT_ADDR if it is currently a CSE candidate. if (gtIsActiveCSE_Candidate(op1)) { break; } temp = op1->AsOp()->gtOp1; // X // In the test below, if they're both TYP_STRUCT, this of course does *not* mean that // they are the *same* struct type. In fact, they almost certainly aren't. If the // address has an associated field sequence, that identifies this case; go through // the "lcl_fld" path rather than this one. FieldSeqNode* addrFieldSeq = nullptr; // This is an unused out parameter below. if (typ == temp->TypeGet() && !GetZeroOffsetFieldMap()->Lookup(op1, &addrFieldSeq)) { foldAndReturnTemp = true; } else if (temp->OperIsLocal()) { unsigned lclNum = temp->AsLclVarCommon()->GetLclNum(); LclVarDsc* varDsc = lvaGetDesc(lclNum); // We will try to optimize when we have a promoted struct promoted with a zero lvFldOffset if (varDsc->lvPromoted && (varDsc->lvFldOffset == 0)) { noway_assert(varTypeIsStruct(varDsc)); // We will try to optimize when we have a single field struct that is being struct promoted if (varDsc->lvFieldCnt == 1) { unsigned lclNumFld = varDsc->lvFieldLclStart; // just grab the promoted field LclVarDsc* fieldVarDsc = lvaGetDesc(lclNumFld); // Also make sure that the tree type matches the fieldVarType and that it's lvFldOffset // is zero if (fieldVarDsc->TypeGet() == typ && (fieldVarDsc->lvFldOffset == 0)) { // We can just use the existing promoted field LclNum temp->AsLclVarCommon()->SetLclNum(lclNumFld); temp->gtType = fieldVarDsc->TypeGet(); foldAndReturnTemp = true; } } } // If the type of the IND (typ) is a "small int", and the type of the local has the // same width, then we can reduce to just the local variable -- it will be // correctly normalized. // // The below transformation cannot be applied if the local var needs to be normalized on load. else if (varTypeIsSmall(typ) && (genTypeSize(varDsc) == genTypeSize(typ)) && !lvaTable[lclNum].lvNormalizeOnLoad()) { const bool definitelyLoad = (tree->gtFlags & GTF_DONT_CSE) == 0; const bool possiblyStore = !definitelyLoad; if (possiblyStore || (varTypeIsUnsigned(varDsc) == varTypeIsUnsigned(typ))) { typ = temp->TypeGet(); tree->gtType = typ; foldAndReturnTemp = true; if (possiblyStore) { // This node can be on the left-hand-side of an assignment node. // Mark this node with GTF_VAR_FOLDED_IND to make sure that fgDoNormalizeOnStore() // is called on its parent in post-order morph. temp->gtFlags |= GTF_VAR_FOLDED_IND; } } } // For matching types we can fold else if (!varTypeIsStruct(typ) && (lvaTable[lclNum].lvType == typ) && !lvaTable[lclNum].lvNormalizeOnLoad()) { tree->gtType = typ = temp->TypeGet(); foldAndReturnTemp = true; } else { // Assumes that when Lookup returns "false" it will leave "fieldSeq" unmodified (i.e. // nullptr) assert(fieldSeq == nullptr); bool b = GetZeroOffsetFieldMap()->Lookup(op1, &fieldSeq); assert(b || fieldSeq == nullptr); if ((fieldSeq != nullptr) && (temp->OperGet() == GT_LCL_FLD)) { // Append the field sequence, change the type. temp->AsLclFld()->SetFieldSeq( GetFieldSeqStore()->Append(temp->AsLclFld()->GetFieldSeq(), fieldSeq)); temp->gtType = typ; foldAndReturnTemp = true; } } // Otherwise will will fold this into a GT_LCL_FLD below // where we check (temp != nullptr) } else // !temp->OperIsLocal() { // We don't try to fold away the GT_IND/GT_ADDR for this case temp = nullptr; } } else if (op1->OperGet() == GT_ADD) { #ifdef TARGET_ARM // Check for a misalignment floating point indirection. if (varTypeIsFloating(typ)) { GenTree* addOp2 = op1->AsOp()->gtGetOp2(); if (addOp2->IsCnsIntOrI()) { ssize_t offset = addOp2->AsIntCon()->gtIconVal; if ((offset % emitTypeSize(TYP_FLOAT)) != 0) { tree->gtFlags |= GTF_IND_UNALIGNED; } } } #endif // TARGET_ARM /* Try to change *(&lcl + cns) into lcl[cns] to prevent materialization of &lcl */ if (op1->AsOp()->gtOp1->OperGet() == GT_ADDR && op1->AsOp()->gtOp2->OperGet() == GT_CNS_INT && opts.OptimizationEnabled()) { // No overflow arithmetic with pointers noway_assert(!op1->gtOverflow()); temp = op1->AsOp()->gtOp1->AsOp()->gtOp1; if (!temp->OperIsLocal()) { temp = nullptr; break; } // Can not remove the GT_ADDR if it is currently a CSE candidate. if (gtIsActiveCSE_Candidate(op1->AsOp()->gtOp1)) { break; } ival1 = op1->AsOp()->gtOp2->AsIntCon()->gtIconVal; fieldSeq = op1->AsOp()->gtOp2->AsIntCon()->gtFieldSeq; // Does the address have an associated zero-offset field sequence? FieldSeqNode* addrFieldSeq = nullptr; if (GetZeroOffsetFieldMap()->Lookup(op1->AsOp()->gtOp1, &addrFieldSeq)) { fieldSeq = GetFieldSeqStore()->Append(addrFieldSeq, fieldSeq); } if (ival1 == 0 && typ == temp->TypeGet() && temp->TypeGet() != TYP_STRUCT) { noway_assert(!varTypeIsGC(temp->TypeGet())); foldAndReturnTemp = true; } else { // The emitter can't handle large offsets if (ival1 != (unsigned short)ival1) { break; } // The emitter can get confused by invalid offsets if (ival1 >= Compiler::lvaLclSize(temp->AsLclVarCommon()->GetLclNum())) { break; } } // Now we can fold this into a GT_LCL_FLD below // where we check (temp != nullptr) } } } // At this point we may have a lclVar or lclFld that might be foldable with a bit of extra massaging: // - We may have a load of a local where the load has a different type than the local // - We may have a load of a local plus an offset // // In these cases, we will change the lclVar or lclFld into a lclFld of the appropriate type and // offset if doing so is legal. The only cases in which this transformation is illegal are if the load // begins before the local or if the load extends beyond the end of the local (i.e. if the load is // out-of-bounds w.r.t. the local). if ((temp != nullptr) && !foldAndReturnTemp) { assert(temp->OperIsLocal()); const unsigned lclNum = temp->AsLclVarCommon()->GetLclNum(); LclVarDsc* const varDsc = lvaGetDesc(lclNum); const var_types tempTyp = temp->TypeGet(); const bool useExactSize = varTypeIsStruct(tempTyp) || (tempTyp == TYP_BLK) || (tempTyp == TYP_LCLBLK); const unsigned varSize = useExactSize ? varDsc->lvExactSize : genTypeSize(temp); // Make sure we do not enregister this lclVar. lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::LocalField)); // If the size of the load is greater than the size of the lclVar, we cannot fold this access into // a lclFld: the access represented by an lclFld node must begin at or after the start of the // lclVar and must not extend beyond the end of the lclVar. if ((ival1 >= 0) && ((ival1 + genTypeSize(typ)) <= varSize)) { GenTreeLclFld* lclFld; // We will turn a GT_LCL_VAR into a GT_LCL_FLD with an gtLclOffs of 'ival' // or if we already have a GT_LCL_FLD we will adjust the gtLclOffs by adding 'ival' // Then we change the type of the GT_LCL_FLD to match the orginal GT_IND type. // if (temp->OperGet() == GT_LCL_FLD) { lclFld = temp->AsLclFld(); lclFld->SetLclOffs(lclFld->GetLclOffs() + static_cast<unsigned>(ival1)); lclFld->SetFieldSeq(GetFieldSeqStore()->Append(lclFld->GetFieldSeq(), fieldSeq)); } else // We have a GT_LCL_VAR. { assert(temp->OperGet() == GT_LCL_VAR); temp->ChangeOper(GT_LCL_FLD); // Note that this makes the gtFieldSeq "NotAField". lclFld = temp->AsLclFld(); lclFld->SetLclOffs(static_cast<unsigned>(ival1)); if (fieldSeq != nullptr) { // If it does represent a field, note that. lclFld->SetFieldSeq(fieldSeq); } } temp->gtType = tree->gtType; foldAndReturnTemp = true; } } if (foldAndReturnTemp) { assert(temp != nullptr); assert(temp->TypeGet() == typ); assert((op1->OperGet() == GT_ADD) || (op1->OperGet() == GT_ADDR)); // Copy the value of GTF_DONT_CSE from the original tree to `temp`: it can be set for // 'temp' because a GT_ADDR always marks it for its operand. temp->gtFlags &= ~GTF_DONT_CSE; temp->gtFlags |= (tree->gtFlags & GTF_DONT_CSE); if (op1->OperGet() == GT_ADD) { DEBUG_DESTROY_NODE(op1->AsOp()->gtOp1); // GT_ADDR DEBUG_DESTROY_NODE(op1->AsOp()->gtOp2); // GT_CNS_INT } DEBUG_DESTROY_NODE(op1); // GT_ADD or GT_ADDR DEBUG_DESTROY_NODE(tree); // GT_IND // If the result of the fold is a local var, we may need to perform further adjustments e.g. for // normalization. if (temp->OperIs(GT_LCL_VAR)) { #ifdef DEBUG // We clear this flag on `temp` because `fgMorphLocalVar` may assert that this bit is clear // and the node in question must have this bit set (as it has already been morphed). temp->gtDebugFlags &= ~GTF_DEBUG_NODE_MORPHED; #endif // DEBUG const bool forceRemorph = true; temp = fgMorphLocalVar(temp, forceRemorph); #ifdef DEBUG // We then set this flag on `temp` because `fgMorhpLocalVar` may not set it itself, and the // caller of `fgMorphSmpOp` may assert that this flag is set on `temp` once this function // returns. temp->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED; #endif // DEBUG } return temp; } // Only do this optimization when we are in the global optimizer. Doing this after value numbering // could result in an invalid value number for the newly generated GT_IND node. if ((op1->OperGet() == GT_COMMA) && fgGlobalMorph) { // Perform the transform IND(COMMA(x, ..., z)) == COMMA(x, ..., IND(z)). // TBD: this transformation is currently necessary for correctness -- it might // be good to analyze the failures that result if we don't do this, and fix them // in other ways. Ideally, this should be optional. GenTree* commaNode = op1; GenTreeFlags treeFlags = tree->gtFlags; commaNode->gtType = typ; commaNode->gtFlags = (treeFlags & ~GTF_REVERSE_OPS); // Bashing the GT_COMMA flags here is // dangerous, clear the GTF_REVERSE_OPS at // least. #ifdef DEBUG commaNode->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED; #endif while (commaNode->AsOp()->gtOp2->gtOper == GT_COMMA) { commaNode = commaNode->AsOp()->gtOp2; commaNode->gtType = typ; commaNode->gtFlags = (treeFlags & ~GTF_REVERSE_OPS & ~GTF_ASG & ~GTF_CALL); // Bashing the GT_COMMA flags here is // dangerous, clear the GTF_REVERSE_OPS, GT_ASG, and GT_CALL at // least. commaNode->gtFlags |= ((commaNode->AsOp()->gtOp1->gtFlags | commaNode->AsOp()->gtOp2->gtFlags) & (GTF_ASG | GTF_CALL)); #ifdef DEBUG commaNode->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED; #endif } bool wasArrIndex = (tree->gtFlags & GTF_IND_ARR_INDEX) != 0; ArrayInfo arrInfo; if (wasArrIndex) { bool b = GetArrayInfoMap()->Lookup(tree, &arrInfo); assert(b); GetArrayInfoMap()->Remove(tree); } tree = op1; GenTree* addr = commaNode->AsOp()->gtOp2; // TODO-1stClassStructs: we often create a struct IND without a handle, fix it. op1 = gtNewIndir(typ, addr); // This is very conservative op1->gtFlags |= treeFlags & ~GTF_ALL_EFFECT & ~GTF_IND_NONFAULTING; op1->gtFlags |= (addr->gtFlags & GTF_ALL_EFFECT); if (wasArrIndex) { GetArrayInfoMap()->Set(op1, arrInfo); } #ifdef DEBUG op1->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED; #endif commaNode->AsOp()->gtOp2 = op1; commaNode->gtFlags |= (op1->gtFlags & GTF_ALL_EFFECT); return tree; } break; } case GT_ADDR: // Can not remove op1 if it is currently a CSE candidate. if (gtIsActiveCSE_Candidate(op1)) { break; } if (op1->OperGet() == GT_IND) { if ((op1->gtFlags & GTF_IND_ARR_INDEX) == 0) { // Can not remove a GT_ADDR if it is currently a CSE candidate. if (gtIsActiveCSE_Candidate(tree)) { break; } // Perform the transform ADDR(IND(...)) == (...). GenTree* addr = op1->AsOp()->gtOp1; // If tree has a zero field sequence annotation, update the annotation // on addr node. FieldSeqNode* zeroFieldSeq = nullptr; if (GetZeroOffsetFieldMap()->Lookup(tree, &zeroFieldSeq)) { fgAddFieldSeqForZeroOffset(addr, zeroFieldSeq); } noway_assert(varTypeIsGC(addr->gtType) || addr->gtType == TYP_I_IMPL); DEBUG_DESTROY_NODE(op1); DEBUG_DESTROY_NODE(tree); return addr; } } else if (op1->OperGet() == GT_OBJ) { // Can not remove a GT_ADDR if it is currently a CSE candidate. if (gtIsActiveCSE_Candidate(tree)) { break; } // Perform the transform ADDR(OBJ(...)) == (...). GenTree* addr = op1->AsObj()->Addr(); noway_assert(varTypeIsGC(addr->gtType) || addr->gtType == TYP_I_IMPL); DEBUG_DESTROY_NODE(op1); DEBUG_DESTROY_NODE(tree); return addr; } else if ((op1->gtOper == GT_COMMA) && !optValnumCSE_phase) { // Perform the transform ADDR(COMMA(x, ..., z)) == COMMA(x, ..., ADDR(z)). // (Be sure to mark "z" as an l-value...) ArrayStack<GenTree*> commas(getAllocator(CMK_ArrayStack)); for (GenTree* comma = op1; comma != nullptr && comma->gtOper == GT_COMMA; comma = comma->gtGetOp2()) { commas.Push(comma); } GenTree* commaNode = commas.Top(); // The top-level addr might be annotated with a zeroOffset field. FieldSeqNode* zeroFieldSeq = nullptr; bool isZeroOffset = GetZeroOffsetFieldMap()->Lookup(tree, &zeroFieldSeq); tree = op1; commaNode->AsOp()->gtOp2->gtFlags |= GTF_DONT_CSE; // If the node we're about to put under a GT_ADDR is an indirection, it // doesn't need to be materialized, since we only want the addressing mode. Because // of this, this GT_IND is not a faulting indirection and we don't have to extract it // as a side effect. GenTree* commaOp2 = commaNode->AsOp()->gtOp2; if (commaOp2->OperIsBlk()) { commaOp2->SetOper(GT_IND); } if (commaOp2->gtOper == GT_IND) { commaOp2->gtFlags |= GTF_IND_NONFAULTING; commaOp2->gtFlags &= ~GTF_EXCEPT; commaOp2->gtFlags |= (commaOp2->AsOp()->gtOp1->gtFlags & GTF_EXCEPT); } op1 = gtNewOperNode(GT_ADDR, TYP_BYREF, commaOp2); if (isZeroOffset) { // Transfer the annotation to the new GT_ADDR node. fgAddFieldSeqForZeroOffset(op1, zeroFieldSeq); } commaNode->AsOp()->gtOp2 = op1; // Originally, I gave all the comma nodes type "byref". But the ADDR(IND(x)) == x transform // might give op1 a type different from byref (like, say, native int). So now go back and give // all the comma nodes the type of op1. // TODO: the comma flag update below is conservative and can be improved. // For example, if we made the ADDR(IND(x)) == x transformation, we may be able to // get rid of some of the IND flags on the COMMA nodes (e.g., GTF_GLOB_REF). while (!commas.Empty()) { GenTree* comma = commas.Pop(); comma->gtType = op1->gtType; comma->gtFlags |= op1->gtFlags; #ifdef DEBUG comma->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED; #endif gtUpdateNodeSideEffects(comma); } return tree; } break; case GT_COLON: if (fgGlobalMorph) { /* Mark the nodes that are conditionally executed */ fgWalkTreePre(&tree, gtMarkColonCond); } /* Since we're doing this postorder we clear this if it got set by a child */ fgRemoveRestOfBlock = false; break; case GT_COMMA: /* Special case: trees that don't produce a value */ if (op2->OperIs(GT_ASG) || (op2->OperGet() == GT_COMMA && op2->TypeGet() == TYP_VOID) || fgIsThrow(op2)) { typ = tree->gtType = TYP_VOID; } // If we are in the Valuenum CSE phase then don't morph away anything as these // nodes may have CSE defs/uses in them. // if (!optValnumCSE_phase) { // Extract the side effects from the left side of the comma. Since they don't "go" anywhere, this // is all we need. GenTree* op1SideEffects = nullptr; // The addition of "GTF_MAKE_CSE" below prevents us from throwing away (for example) // hoisted expressions in loops. gtExtractSideEffList(op1, &op1SideEffects, (GTF_SIDE_EFFECT | GTF_MAKE_CSE)); if (op1SideEffects) { // Replace the left hand side with the side effect list. op1 = op1SideEffects; tree->AsOp()->gtOp1 = op1SideEffects; gtUpdateNodeSideEffects(tree); } else { op2->gtFlags |= (tree->gtFlags & (GTF_DONT_CSE | GTF_LATE_ARG)); DEBUG_DESTROY_NODE(tree); DEBUG_DESTROY_NODE(op1); return op2; } // If the right operand is just a void nop node, throw it away. Unless this is a // comma throw, in which case we want the top-level morphing loop to recognize it. if (op2->IsNothingNode() && op1->TypeIs(TYP_VOID) && !fgIsCommaThrow(tree)) { op1->gtFlags |= (tree->gtFlags & (GTF_DONT_CSE | GTF_LATE_ARG)); DEBUG_DESTROY_NODE(tree); DEBUG_DESTROY_NODE(op2); return op1; } } break; case GT_JTRUE: /* Special case if fgRemoveRestOfBlock is set to true */ if (fgRemoveRestOfBlock) { if (fgIsCommaThrow(op1, true)) { GenTree* throwNode = op1->AsOp()->gtOp1; JITDUMP("Removing [%06d] GT_JTRUE as the block now unconditionally throws an exception.\n", dspTreeID(tree)); DEBUG_DESTROY_NODE(tree); return throwNode; } noway_assert(op1->OperIsCompare()); noway_assert(op1->gtFlags & GTF_EXCEPT); // We need to keep op1 for the side-effects. Hang it off // a GT_COMMA node JITDUMP("Keeping side-effects by bashing [%06d] GT_JTRUE into a GT_COMMA.\n", dspTreeID(tree)); tree->ChangeOper(GT_COMMA); tree->AsOp()->gtOp2 = op2 = gtNewNothingNode(); // Additionally since we're eliminating the JTRUE // codegen won't like it if op1 is a RELOP of longs, floats or doubles. // So we change it into a GT_COMMA as well. JITDUMP("Also bashing [%06d] (a relop) into a GT_COMMA.\n", dspTreeID(op1)); op1->ChangeOper(GT_COMMA); op1->gtFlags &= ~GTF_UNSIGNED; // Clear the unsigned flag if it was set on the relop op1->gtType = op1->AsOp()->gtOp1->gtType; return tree; } break; case GT_INTRINSIC: if (tree->AsIntrinsic()->gtIntrinsicName == NI_System_Runtime_CompilerServices_RuntimeHelpers_IsKnownConstant) { // Should be expanded by the time it reaches CSE phase assert(!optValnumCSE_phase); JITDUMP("\nExpanding RuntimeHelpers.IsKnownConstant to "); if (op1->OperIsConst()) { // We're lucky to catch a constant here while importer was not JITDUMP("true\n"); DEBUG_DESTROY_NODE(tree, op1); tree = gtNewIconNode(1); } else { GenTree* op1SideEffects = nullptr; gtExtractSideEffList(op1, &op1SideEffects, GTF_ALL_EFFECT); if (op1SideEffects != nullptr) { DEBUG_DESTROY_NODE(tree); // Keep side-effects of op1 tree = gtNewOperNode(GT_COMMA, TYP_INT, op1SideEffects, gtNewIconNode(0)); JITDUMP("false with side effects:\n") DISPTREE(tree); } else { JITDUMP("false\n"); DEBUG_DESTROY_NODE(tree, op1); tree = gtNewIconNode(0); } } INDEBUG(tree->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED); return tree; } break; default: break; } assert(oper == tree->gtOper); // Propagate comma throws. // If we are in the Valuenum CSE phase then don't morph away anything as these // nodes may have CSE defs/uses in them. if (fgGlobalMorph && (oper != GT_ASG) && (oper != GT_COLON)) { if ((op1 != nullptr) && fgIsCommaThrow(op1, true)) { GenTree* propagatedThrow = fgPropagateCommaThrow(tree, op1->AsOp(), GTF_EMPTY); if (propagatedThrow != nullptr) { return propagatedThrow; } } if ((op2 != nullptr) && fgIsCommaThrow(op2, true)) { GenTree* propagatedThrow = fgPropagateCommaThrow(tree, op2->AsOp(), op1->gtFlags & GTF_ALL_EFFECT); if (propagatedThrow != nullptr) { return propagatedThrow; } } } /*------------------------------------------------------------------------- * Optional morphing is done if tree transformations is permitted */ if ((opts.compFlags & CLFLG_TREETRANS) == 0) { return tree; } tree = fgMorphSmpOpOptional(tree->AsOp()); return tree; } //------------------------------------------------------------------------ // fgOptimizeCast: Optimizes the supplied GT_CAST tree. // // Tries to get rid of the cast, its operand, the GTF_OVERFLOW flag, calls // calls "optNarrowTree". Called in post-order by "fgMorphSmpOp". // // Arguments: // tree - the cast tree to optimize // // Return Value: // The optimized tree (that can have any shape). // GenTree* Compiler::fgOptimizeCast(GenTreeCast* cast) { GenTree* src = cast->CastOp(); if (gtIsActiveCSE_Candidate(cast) || gtIsActiveCSE_Candidate(src)) { return cast; } // See if we can discard the cast. if (varTypeIsIntegral(cast) && varTypeIsIntegral(src)) { IntegralRange srcRange = IntegralRange::ForNode(src, this); IntegralRange noOvfRange = IntegralRange::ForCastInput(cast); if (noOvfRange.Contains(srcRange)) { // Casting between same-sized types is a no-op, // given we have proven this cast cannot overflow. if (genActualType(cast) == genActualType(src)) { return src; } cast->ClearOverflow(); cast->SetAllEffectsFlags(src); // Try and see if we can make this cast into a cheaper zero-extending version. if (genActualTypeIsInt(src) && cast->TypeIs(TYP_LONG) && srcRange.IsPositive()) { cast->SetUnsigned(); } } // For checked casts, we're done. if (cast->gtOverflow()) { return cast; } var_types castToType = cast->CastToType(); // For indir-like nodes, we may be able to change their type to satisfy (and discard) the cast. if (varTypeIsSmall(castToType) && (genTypeSize(castToType) == genTypeSize(src)) && src->OperIs(GT_IND, GT_CLS_VAR, GT_LCL_FLD)) { // We're changing the type here so we need to update the VN; // in other cases we discard the cast without modifying src // so the VN doesn't change. src->ChangeType(castToType); src->SetVNsFromNode(cast); return src; } // Try to narrow the operand of the cast and discard the cast. if (opts.OptEnabled(CLFLG_TREETRANS) && (genTypeSize(src) > genTypeSize(castToType)) && optNarrowTree(src, src->TypeGet(), castToType, cast->gtVNPair, false)) { optNarrowTree(src, src->TypeGet(), castToType, cast->gtVNPair, true); // "optNarrowTree" may leave a dead cast behind. if (src->OperIs(GT_CAST) && (src->AsCast()->CastToType() == genActualType(src->AsCast()->CastOp()))) { src = src->AsCast()->CastOp(); } return src; } // Check for two consecutive casts, we may be able to discard the intermediate one. if (opts.OptimizationEnabled() && src->OperIs(GT_CAST) && !src->gtOverflow()) { var_types dstCastToType = castToType; var_types srcCastToType = src->AsCast()->CastToType(); // CAST(ubyte <- CAST(short <- X)): CAST(ubyte <- X). // CAST(ushort <- CAST(short <- X)): CAST(ushort <- X). if (varTypeIsSmall(srcCastToType) && (genTypeSize(dstCastToType) <= genTypeSize(srcCastToType))) { cast->CastOp() = src->AsCast()->CastOp(); DEBUG_DESTROY_NODE(src); } } } return cast; } //------------------------------------------------------------------------ // fgOptimizeEqualityComparisonWithConst: optimizes various EQ/NE(OP, CONST) patterns. // // Arguments: // cmp - The GT_NE/GT_EQ tree the second operand of which is an integral constant // // Return Value: // The optimized tree, "cmp" in case no optimizations were done. // Currently only returns relop trees. // GenTree* Compiler::fgOptimizeEqualityComparisonWithConst(GenTreeOp* cmp) { assert(cmp->OperIs(GT_EQ, GT_NE)); assert(cmp->gtGetOp2()->IsIntegralConst()); assert(!optValnumCSE_phase); GenTree* op1 = cmp->gtGetOp1(); GenTreeIntConCommon* op2 = cmp->gtGetOp2()->AsIntConCommon(); // Check for "(expr +/- icon1) ==/!= (non-zero-icon2)". if (op2->IsCnsIntOrI() && (op2->IconValue() != 0)) { // Since this can occur repeatedly we use a while loop. while (op1->OperIs(GT_ADD, GT_SUB) && op1->AsOp()->gtGetOp2()->IsCnsIntOrI() && op1->TypeIs(TYP_INT) && !op1->gtOverflow()) { // Got it; change "x + icon1 == icon2" to "x == icon2 - icon1". ssize_t op1Value = op1->AsOp()->gtGetOp2()->AsIntCon()->IconValue(); ssize_t op2Value = op2->IconValue(); if (op1->OperIs(GT_ADD)) { op2Value -= op1Value; } else { op2Value += op1Value; } op1 = op1->AsOp()->gtGetOp1(); op2->SetIconValue(static_cast<int32_t>(op2Value)); } cmp->gtOp1 = op1; fgUpdateConstTreeValueNumber(op2); } // Here we look for the following tree // // EQ/NE // / \. // op1 CNS 0/1 // if (op2->IsIntegralConst(0) || op2->IsIntegralConst(1)) { ssize_t op2Value = static_cast<ssize_t>(op2->IntegralValue()); if (op1->OperIsCompare()) { // Here we look for the following tree // // EQ/NE -> RELOP/!RELOP // / \ / \. // RELOP CNS 0/1 // / \. // // Note that we will remove/destroy the EQ/NE node and move // the RELOP up into it's location. // Here we reverse the RELOP if necessary. bool reverse = ((op2Value == 0) == (cmp->OperIs(GT_EQ))); if (reverse) { gtReverseCond(op1); } noway_assert((op1->gtFlags & GTF_RELOP_JMP_USED) == 0); op1->gtFlags |= cmp->gtFlags & (GTF_RELOP_JMP_USED | GTF_DONT_CSE); op1->SetVNsFromNode(cmp); DEBUG_DESTROY_NODE(cmp); return op1; } // // Now we check for a compare with the result of an '&' operator // // Here we look for the following transformation: // // EQ/NE EQ/NE // / \ / \. // AND CNS 0/1 -> AND CNS 0 // / \ / \. // RSZ/RSH CNS 1 x CNS (1 << y) // / \. // x CNS_INT +y if (fgGlobalMorph && op1->OperIs(GT_AND) && op1->AsOp()->gtGetOp1()->OperIs(GT_RSZ, GT_RSH)) { GenTreeOp* andOp = op1->AsOp(); GenTreeOp* rshiftOp = andOp->gtGetOp1()->AsOp(); if (!rshiftOp->gtGetOp2()->IsCnsIntOrI()) { goto SKIP; } ssize_t shiftAmount = rshiftOp->gtGetOp2()->AsIntCon()->IconValue(); if (shiftAmount < 0) { goto SKIP; } if (!andOp->gtGetOp2()->IsIntegralConst(1)) { goto SKIP; } GenTreeIntConCommon* andMask = andOp->gtGetOp2()->AsIntConCommon(); if (andOp->TypeIs(TYP_INT)) { if (shiftAmount > 31) { goto SKIP; } andMask->SetIconValue(static_cast<int32_t>(1 << shiftAmount)); // Reverse the condition if necessary. if (op2Value == 1) { gtReverseCond(cmp); op2->SetIconValue(0); } } else if (andOp->TypeIs(TYP_LONG)) { if (shiftAmount > 63) { goto SKIP; } andMask->SetLngValue(1ll << shiftAmount); // Reverse the cond if necessary if (op2Value == 1) { gtReverseCond(cmp); op2->SetLngValue(0); } } andOp->gtOp1 = rshiftOp->gtGetOp1(); DEBUG_DESTROY_NODE(rshiftOp->gtGetOp2()); DEBUG_DESTROY_NODE(rshiftOp); } } SKIP: // Now check for compares with small constant longs that can be cast to int. // Note that we filter out negative values here so that the transformations // below are correct. E. g. "EQ(-1L, CAST_UN(int))" is always "false", but were // we to make it into "EQ(-1, int)", "true" becomes possible for negative inputs. if (!op2->TypeIs(TYP_LONG) || ((op2->LngValue() >> 31) != 0)) { return cmp; } if (!op1->OperIs(GT_AND)) { // Another interesting case: cast from int. if (op1->OperIs(GT_CAST) && op1->AsCast()->CastOp()->TypeIs(TYP_INT) && !op1->gtOverflow()) { // Simply make this into an integer comparison. cmp->gtOp1 = op1->AsCast()->CastOp(); op2->BashToConst(static_cast<int32_t>(op2->LngValue())); fgUpdateConstTreeValueNumber(op2); } return cmp; } // Now we perform the following optimization: // EQ/NE(AND(OP long, CNS_LNG), CNS_LNG) => // EQ/NE(AND(CAST(int <- OP), CNS_INT), CNS_INT) // when the constants are sufficiently small. // This transform cannot preserve VNs. if (fgGlobalMorph) { assert(op1->TypeIs(TYP_LONG) && op1->OperIs(GT_AND)); // Is the result of the mask effectively an INT? GenTreeOp* andOp = op1->AsOp(); if (!andOp->gtGetOp2()->OperIs(GT_CNS_NATIVELONG)) { return cmp; } GenTreeIntConCommon* andMask = andOp->gtGetOp2()->AsIntConCommon(); if ((andMask->LngValue() >> 32) != 0) { return cmp; } // Now we narrow the first operand of AND to int. if (optNarrowTree(andOp->gtGetOp1(), TYP_LONG, TYP_INT, ValueNumPair(), false)) { optNarrowTree(andOp->gtGetOp1(), TYP_LONG, TYP_INT, ValueNumPair(), true); } else { andOp->gtOp1 = gtNewCastNode(TYP_INT, andOp->gtGetOp1(), false, TYP_INT); } assert(andMask == andOp->gtGetOp2()); // Now replace the mask node. andMask->BashToConst(static_cast<int32_t>(andMask->LngValue())); // Now change the type of the AND node. andOp->ChangeType(TYP_INT); // Finally we replace the comparand. op2->BashToConst(static_cast<int32_t>(op2->LngValue())); } return cmp; } //------------------------------------------------------------------------ // fgOptimizeRelationalComparisonWithConst: optimizes a comparison operation. // // Recognizes comparisons against various constant operands and morphs // them, if possible, into comparisons against zero. // // Arguments: // cmp - the GT_LE/GT_LT/GT_GE/GT_GT tree to morph. // // Return Value: // The "cmp" tree, possibly with a modified oper. // The second operand's constant value may be modified as well. // // Assumptions: // The operands have been swapped so that any constants are on the right. // The second operand is an integral constant. // GenTree* Compiler::fgOptimizeRelationalComparisonWithConst(GenTreeOp* cmp) { assert(cmp->OperIs(GT_LE, GT_LT, GT_GE, GT_GT)); assert(cmp->gtGetOp2()->IsIntegralConst()); assert(!gtIsActiveCSE_Candidate(cmp->gtGetOp2())); GenTree* op1 = cmp->gtGetOp1(); GenTreeIntConCommon* op2 = cmp->gtGetOp2()->AsIntConCommon(); assert(genActualType(op1) == genActualType(op2)); genTreeOps oper = cmp->OperGet(); int64_t op2Value = op2->IntegralValue(); if (op2Value == 1) { // Check for "expr >= 1". if (oper == GT_GE) { // Change to "expr != 0" for unsigned and "expr > 0" for signed. oper = cmp->IsUnsigned() ? GT_NE : GT_GT; } // Check for "expr < 1". else if (oper == GT_LT) { // Change to "expr == 0" for unsigned and "expr <= 0". oper = cmp->IsUnsigned() ? GT_EQ : GT_LE; } } // Check for "expr relop -1". else if (!cmp->IsUnsigned() && (op2Value == -1)) { // Check for "expr <= -1". if (oper == GT_LE) { // Change to "expr < 0". oper = GT_LT; } // Check for "expr > -1". else if (oper == GT_GT) { // Change to "expr >= 0". oper = GT_GE; } } else if (cmp->IsUnsigned()) { if ((oper == GT_LE) || (oper == GT_GT)) { if (op2Value == 0) { // IL doesn't have a cne instruction so compilers use cgt.un instead. The JIT // recognizes certain patterns that involve GT_NE (e.g (x & 4) != 0) and fails // if GT_GT is used instead. Transform (x GT_GT.unsigned 0) into (x GT_NE 0) // and (x GT_LE.unsigned 0) into (x GT_EQ 0). The later case is rare, it sometimes // occurs as a result of branch inversion. oper = (oper == GT_LE) ? GT_EQ : GT_NE; cmp->gtFlags &= ~GTF_UNSIGNED; } // LE_UN/GT_UN(expr, int/long.MaxValue) => GE/LT(expr, 0). else if (((op1->TypeIs(TYP_LONG) && (op2Value == INT64_MAX))) || ((genActualType(op1) == TYP_INT) && (op2Value == INT32_MAX))) { oper = (oper == GT_LE) ? GT_GE : GT_LT; cmp->gtFlags &= ~GTF_UNSIGNED; } } } if (!cmp->OperIs(oper)) { // Keep the old ValueNumber for 'tree' as the new expr // will still compute the same value as before. cmp->SetOper(oper, GenTree::PRESERVE_VN); op2->SetIntegralValue(0); fgUpdateConstTreeValueNumber(op2); } return cmp; } #ifdef FEATURE_HW_INTRINSICS //------------------------------------------------------------------------ // fgOptimizeHWIntrinsic: optimize a HW intrinsic node // // Arguments: // node - HWIntrinsic node to examine // // Returns: // The original node if no optimization happened or if tree bashing occured. // An alternative tree if an optimization happened. // // Notes: // Checks for HWIntrinsic nodes: Vector64.Create/Vector128.Create/Vector256.Create, // and if the call is one of these, attempt to optimize. // This is post-order, meaning that it will not morph the children. // GenTree* Compiler::fgOptimizeHWIntrinsic(GenTreeHWIntrinsic* node) { assert(!optValnumCSE_phase); if (opts.OptimizationDisabled()) { return node; } switch (node->GetHWIntrinsicId()) { case NI_Vector128_Create: #if defined(TARGET_XARCH) case NI_Vector256_Create: #elif defined(TARGET_ARM64) case NI_Vector64_Create: #endif { bool hwAllArgsAreConstZero = true; for (GenTree* arg : node->Operands()) { if (!arg->IsIntegralConst(0) && !arg->IsFloatPositiveZero()) { hwAllArgsAreConstZero = false; break; } } if (hwAllArgsAreConstZero) { switch (node->GetHWIntrinsicId()) { case NI_Vector128_Create: { node->ResetHWIntrinsicId(NI_Vector128_get_Zero); break; } #if defined(TARGET_XARCH) case NI_Vector256_Create: { node->ResetHWIntrinsicId(NI_Vector256_get_Zero); break; } #elif defined(TARGET_ARM64) case NI_Vector64_Create: { node->ResetHWIntrinsicId(NI_Vector64_get_Zero); break; } #endif default: unreached(); } } break; } default: break; } return node; } #endif //------------------------------------------------------------------------ // fgOptimizeCommutativeArithmetic: Optimizes commutative operations. // // Arguments: // tree - the unchecked GT_ADD/GT_MUL/GT_OR/GT_XOR/GT_AND tree to optimize. // // Return Value: // The optimized tree that can have any shape. // GenTree* Compiler::fgOptimizeCommutativeArithmetic(GenTreeOp* tree) { assert(tree->OperIs(GT_ADD, GT_MUL, GT_OR, GT_XOR, GT_AND)); assert(!tree->gtOverflowEx()); // Commute constants to the right. if (tree->gtGetOp1()->OperIsConst() && !tree->gtGetOp1()->TypeIs(TYP_REF)) { // TODO-Review: We used to assert here that "(!op2->OperIsConst() || !opts.OptEnabled(CLFLG_CONSTANTFOLD))". // This may indicate a missed "remorph". Task is to re-enable this assertion and investigate. std::swap(tree->gtOp1, tree->gtOp2); } if (fgOperIsBitwiseRotationRoot(tree->OperGet())) { GenTree* rotationTree = fgRecognizeAndMorphBitwiseRotation(tree); if (rotationTree != nullptr) { return rotationTree; } } if (fgGlobalMorph && tree->OperIs(GT_AND, GT_OR, GT_XOR)) { GenTree* castTree = fgMorphCastedBitwiseOp(tree->AsOp()); if (castTree != nullptr) { return castTree; } } if (varTypeIsIntegralOrI(tree)) { genTreeOps oldTreeOper = tree->OperGet(); GenTreeOp* optimizedTree = fgMorphCommutative(tree->AsOp()); if (optimizedTree != nullptr) { if (!optimizedTree->OperIs(oldTreeOper)) { // "optimizedTree" could end up being a COMMA. return optimizedTree; } tree = optimizedTree; } } if (!optValnumCSE_phase) { GenTree* optimizedTree = nullptr; if (tree->OperIs(GT_ADD)) { optimizedTree = fgOptimizeAddition(tree); } else if (tree->OperIs(GT_MUL)) { optimizedTree = fgOptimizeMultiply(tree); } else if (tree->OperIs(GT_AND)) { optimizedTree = fgOptimizeBitwiseAnd(tree); } if (optimizedTree != nullptr) { return optimizedTree; } } return tree; } //------------------------------------------------------------------------ // fgOptimizeAddition: optimizes addition. // // Arguments: // add - the unchecked GT_ADD tree to optimize. // // Return Value: // The optimized tree, that can have any shape, in case any transformations // were performed. Otherwise, "nullptr", guaranteeing no state change. // GenTree* Compiler::fgOptimizeAddition(GenTreeOp* add) { assert(add->OperIs(GT_ADD) && !add->gtOverflow()); assert(!optValnumCSE_phase); GenTree* op1 = add->gtGetOp1(); GenTree* op2 = add->gtGetOp2(); // Fold "((x + icon1) + (y + icon2))" to ((x + y) + (icon1 + icon2))". // Be careful not to create a byref pointer that may point outside of the ref object. // Only do this in global morph as we don't recompute the VN for "(x + y)", the new "op2". if (op1->OperIs(GT_ADD) && op2->OperIs(GT_ADD) && !op1->gtOverflow() && !op2->gtOverflow() && op1->AsOp()->gtGetOp2()->IsCnsIntOrI() && op2->AsOp()->gtGetOp2()->IsCnsIntOrI() && !varTypeIsGC(op1->AsOp()->gtGetOp1()) && !varTypeIsGC(op2->AsOp()->gtGetOp1()) && fgGlobalMorph) { GenTreeOp* addOne = op1->AsOp(); GenTreeOp* addTwo = op2->AsOp(); GenTreeIntCon* constOne = addOne->gtGetOp2()->AsIntCon(); GenTreeIntCon* constTwo = addTwo->gtGetOp2()->AsIntCon(); addOne->gtOp2 = addTwo->gtGetOp1(); addOne->SetAllEffectsFlags(addOne->gtGetOp1(), addOne->gtGetOp2()); DEBUG_DESTROY_NODE(addTwo); constOne->SetValueTruncating(constOne->IconValue() + constTwo->IconValue()); op2 = constOne; add->gtOp2 = constOne; DEBUG_DESTROY_NODE(constTwo); } // Fold (x + 0) - given it won't change the tree type to TYP_REF. // TODO-Bug: this code will lose the GC-ness of a tree like "native int + byref(0)". if (op2->IsIntegralConst(0) && ((add->TypeGet() == op1->TypeGet()) || !op1->TypeIs(TYP_REF))) { if (op2->IsCnsIntOrI() && varTypeIsI(op1)) { fgAddFieldSeqForZeroOffset(op1, op2->AsIntCon()->gtFieldSeq); } DEBUG_DESTROY_NODE(op2); DEBUG_DESTROY_NODE(add); return op1; } // Note that these transformations are legal for floating-point ADDs as well. if (opts.OptimizationEnabled()) { // - a + b = > b - a // ADD((NEG(a), b) => SUB(b, a) // Do not do this if "op2" is constant for canonicalization purposes. if (op1->OperIs(GT_NEG) && !op2->OperIs(GT_NEG) && !op2->IsIntegralConst() && gtCanSwapOrder(op1, op2)) { add->SetOper(GT_SUB); add->gtOp1 = op2; add->gtOp2 = op1->AsOp()->gtGetOp1(); DEBUG_DESTROY_NODE(op1); return add; } // a + -b = > a - b // ADD(a, (NEG(b)) => SUB(a, b) if (!op1->OperIs(GT_NEG) && op2->OperIs(GT_NEG)) { add->SetOper(GT_SUB); add->gtOp2 = op2->AsOp()->gtGetOp1(); DEBUG_DESTROY_NODE(op2); return add; } } return nullptr; } //------------------------------------------------------------------------ // fgOptimizeMultiply: optimizes multiplication. // // Arguments: // mul - the unchecked TYP_I_IMPL/TYP_INT GT_MUL tree to optimize. // // Return Value: // The optimized tree, that can have any shape, in case any transformations // were performed. Otherwise, "nullptr", guaranteeing no state change. // GenTree* Compiler::fgOptimizeMultiply(GenTreeOp* mul) { assert(mul->OperIs(GT_MUL)); assert(varTypeIsIntOrI(mul) || varTypeIsFloating(mul)); assert(!mul->gtOverflow()); assert(!optValnumCSE_phase); GenTree* op1 = mul->gtGetOp1(); GenTree* op2 = mul->gtGetOp2(); assert(mul->TypeGet() == genActualType(op1)); assert(mul->TypeGet() == genActualType(op2)); if (opts.OptimizationEnabled() && op2->IsCnsFltOrDbl()) { double multiplierValue = op2->AsDblCon()->gtDconVal; if (multiplierValue == 1.0) { // Fold "x * 1.0" to "x". DEBUG_DESTROY_NODE(op2); DEBUG_DESTROY_NODE(mul); return op1; } // Fold "x * 2.0" to "x + x". // If op1 is not a local we will have to introduce a temporary via GT_COMMA. // Unfortunately, it's not optHoistLoopCode-friendly (yet), so we'll only do // this for locals / after hoisting has run (when rationalization remorphs // math INTRINSICSs into calls...). if ((multiplierValue == 2.0) && (op1->IsLocal() || (fgOrder == FGOrderLinear))) { op2 = fgMakeMultiUse(&op1); GenTree* add = gtNewOperNode(GT_ADD, mul->TypeGet(), op1, op2); INDEBUG(add->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED); return add; } } if (op2->IsIntegralConst()) { ssize_t mult = op2->AsIntConCommon()->IconValue(); bool op2IsConstIndex = op2->OperGet() == GT_CNS_INT && op2->AsIntCon()->gtFieldSeq != nullptr && op2->AsIntCon()->gtFieldSeq->IsConstantIndexFieldSeq(); assert(!op2IsConstIndex || op2->AsIntCon()->gtFieldSeq->m_next == nullptr); if (mult == 0) { // We may be able to throw away op1 (unless it has side-effects) if ((op1->gtFlags & GTF_SIDE_EFFECT) == 0) { DEBUG_DESTROY_NODE(op1); DEBUG_DESTROY_NODE(mul); return op2; // Just return the "0" node } // We need to keep op1 for the side-effects. Hang it off a GT_COMMA node. mul->ChangeOper(GT_COMMA, GenTree::PRESERVE_VN); return mul; } #ifdef TARGET_XARCH // Should we try to replace integer multiplication with lea/add/shift sequences? bool mulShiftOpt = compCodeOpt() != SMALL_CODE; #else // !TARGET_XARCH bool mulShiftOpt = false; #endif // !TARGET_XARCH size_t abs_mult = (mult >= 0) ? mult : -mult; size_t lowestBit = genFindLowestBit(abs_mult); bool changeToShift = false; // is it a power of two? (positive or negative) if (abs_mult == lowestBit) { // if negative negate (min-int does not need negation) if (mult < 0 && mult != SSIZE_T_MIN) { op1 = gtNewOperNode(GT_NEG, genActualType(op1), op1); mul->gtOp1 = op1; fgMorphTreeDone(op1); } // If "op2" is a constant array index, the other multiplicand must be a constant. // Transfer the annotation to the other one. if (op2->OperGet() == GT_CNS_INT && op2->AsIntCon()->gtFieldSeq != nullptr && op2->AsIntCon()->gtFieldSeq->IsConstantIndexFieldSeq()) { assert(op2->AsIntCon()->gtFieldSeq->m_next == nullptr); GenTree* otherOp = op1; if (otherOp->OperGet() == GT_NEG) { otherOp = otherOp->AsOp()->gtOp1; } assert(otherOp->OperGet() == GT_CNS_INT); assert(otherOp->AsIntCon()->gtFieldSeq == FieldSeqStore::NotAField()); otherOp->AsIntCon()->gtFieldSeq = op2->AsIntCon()->gtFieldSeq; } if (abs_mult == 1) { DEBUG_DESTROY_NODE(op2); DEBUG_DESTROY_NODE(mul); return op1; } // Change the multiplication into a shift by log2(val) bits. op2->AsIntConCommon()->SetIconValue(genLog2(abs_mult)); changeToShift = true; } else if (mulShiftOpt && (lowestBit > 1) && jitIsScaleIndexMul(lowestBit)) { int shift = genLog2(lowestBit); ssize_t factor = abs_mult >> shift; if (factor == 3 || factor == 5 || factor == 9) { // if negative negate (min-int does not need negation) if (mult < 0 && mult != SSIZE_T_MIN) { op1 = gtNewOperNode(GT_NEG, genActualType(op1), op1); mul->gtOp1 = op1; fgMorphTreeDone(op1); } GenTree* factorIcon = gtNewIconNode(factor, mul->TypeGet()); if (op2IsConstIndex) { factorIcon->AsIntCon()->gtFieldSeq = GetFieldSeqStore()->CreateSingleton(FieldSeqStore::ConstantIndexPseudoField); } // change the multiplication into a smaller multiplication (by 3, 5 or 9) and a shift op1 = gtNewOperNode(GT_MUL, mul->TypeGet(), op1, factorIcon); mul->gtOp1 = op1; fgMorphTreeDone(op1); op2->AsIntConCommon()->SetIconValue(shift); changeToShift = true; } } if (changeToShift) { fgUpdateConstTreeValueNumber(op2); mul->ChangeOper(GT_LSH, GenTree::PRESERVE_VN); return mul; } } return nullptr; } //------------------------------------------------------------------------ // fgOptimizeBitwiseAnd: optimizes the "and" operation. // // Arguments: // andOp - the GT_AND tree to optimize. // // Return Value: // The optimized tree, currently always a relop, in case any transformations // were performed. Otherwise, "nullptr", guaranteeing no state change. // GenTree* Compiler::fgOptimizeBitwiseAnd(GenTreeOp* andOp) { assert(andOp->OperIs(GT_AND)); assert(!optValnumCSE_phase); GenTree* op1 = andOp->gtGetOp1(); GenTree* op2 = andOp->gtGetOp2(); // Fold "cmp & 1" to just "cmp". if (andOp->TypeIs(TYP_INT) && op1->OperIsCompare() && op2->IsIntegralConst(1)) { DEBUG_DESTROY_NODE(op2); DEBUG_DESTROY_NODE(andOp); return op1; } return nullptr; } //------------------------------------------------------------------------ // fgOptimizeRelationalComparisonWithCasts: Recognizes comparisons against // various cast operands and tries to remove them. E.g.: // // * GE int // +--* CAST long <- ulong <- uint // | \--* X int // \--* CNS_INT long // // to: // // * GE_un int // +--* X int // \--* CNS_INT int // // same for: // // * GE int // +--* CAST long <- ulong <- uint // | \--* X int // \--* CAST long <- [u]long <- int // \--* ARR_LEN int // // These patterns quite often show up along with index checks // // Arguments: // cmp - the GT_LE/GT_LT/GT_GE/GT_GT tree to morph. // // Return Value: // Returns the same tree where operands might have narrower types // // Notes: // TODO-Casts: consider unifying this function with "optNarrowTree" // GenTree* Compiler::fgOptimizeRelationalComparisonWithCasts(GenTreeOp* cmp) { assert(cmp->OperIs(GT_LE, GT_LT, GT_GE, GT_GT)); assert(!optValnumCSE_phase); GenTree* op1 = cmp->gtGetOp1(); GenTree* op2 = cmp->gtGetOp2(); // Caller is expected to call this function only if we have CAST nodes assert(op1->OperIs(GT_CAST) || op2->OperIs(GT_CAST)); if (!op1->TypeIs(TYP_LONG)) { // We can extend this logic to handle small types as well, but currently it's done mostly to // assist range check elimination return cmp; } GenTree* castOp; GenTree* knownPositiveOp; bool knownPositiveIsOp2; if (op2->IsIntegralConst() || ((op2->OperIs(GT_CAST) && op2->AsCast()->CastOp()->OperIs(GT_ARR_LENGTH)))) { // op2 is either a LONG constant or (T)ARR_LENGTH knownPositiveIsOp2 = true; castOp = cmp->gtGetOp1(); knownPositiveOp = cmp->gtGetOp2(); } else { // op1 is either a LONG constant (yes, it's pretty normal for relops) // or (T)ARR_LENGTH castOp = cmp->gtGetOp2(); knownPositiveOp = cmp->gtGetOp1(); knownPositiveIsOp2 = false; } if (castOp->OperIs(GT_CAST) && varTypeIsLong(castOp->CastToType()) && castOp->AsCast()->CastOp()->TypeIs(TYP_INT) && castOp->IsUnsigned() && !castOp->gtOverflow()) { bool knownPositiveFitsIntoU32 = false; if (knownPositiveOp->IsIntegralConst() && FitsIn<UINT32>(knownPositiveOp->AsIntConCommon()->IntegralValue())) { // BTW, we can fold the whole condition if op2 doesn't fit into UINT_MAX. knownPositiveFitsIntoU32 = true; } else if (knownPositiveOp->OperIs(GT_CAST) && varTypeIsLong(knownPositiveOp->CastToType()) && knownPositiveOp->AsCast()->CastOp()->OperIs(GT_ARR_LENGTH)) { knownPositiveFitsIntoU32 = true; // TODO-Casts: recognize Span.Length here as well. } if (!knownPositiveFitsIntoU32) { return cmp; } JITDUMP("Removing redundant cast(s) for:\n") DISPTREE(cmp) JITDUMP("\n\nto:\n\n") cmp->SetUnsigned(); // Drop cast from castOp if (knownPositiveIsOp2) { cmp->gtOp1 = castOp->AsCast()->CastOp(); } else { cmp->gtOp2 = castOp->AsCast()->CastOp(); } DEBUG_DESTROY_NODE(castOp); if (knownPositiveOp->OperIs(GT_CAST)) { // Drop cast from knownPositiveOp too if (knownPositiveIsOp2) { cmp->gtOp2 = knownPositiveOp->AsCast()->CastOp(); } else { cmp->gtOp1 = knownPositiveOp->AsCast()->CastOp(); } DEBUG_DESTROY_NODE(knownPositiveOp); } else { // Change type for constant from LONG to INT knownPositiveOp->ChangeType(TYP_INT); #ifndef TARGET_64BIT assert(knownPositiveOp->OperIs(GT_CNS_LNG)); knownPositiveOp->BashToConst(static_cast<int>(knownPositiveOp->AsIntConCommon()->IntegralValue())); #endif fgUpdateConstTreeValueNumber(knownPositiveOp); } DISPTREE(cmp) JITDUMP("\n") } return cmp; } //------------------------------------------------------------------------ // fgPropagateCommaThrow: propagate a "comma throw" up the tree. // // "Comma throws" in the compiler represent the canonical form of an always // throwing expression. They have the shape of COMMA(THROW, ZERO), to satisfy // the semantic that the original expression produced some value and are // generated by "gtFoldExprConst" when it encounters checked arithmetic that // will determinably overflow. // // In the global morphing phase, "comma throws" are "propagated" up the tree, // in post-order, to eliminate nodes that will never execute. This method, // called by "fgMorphSmpOp", encapsulates this optimization. // // Arguments: // parent - the node currently being processed. // commaThrow - the comma throw in question, "parent"'s operand. // precedingSideEffects - side effects of nodes preceding "comma" in execution order. // // Return Value: // If "parent" is to be replaced with a comma throw, i. e. the propagation was successful, // the new "parent", otherwise "nullptr", guaranteeing no state change, with one exception: // the "fgRemoveRestOfBlock" "global" may be set. Note that the new returned tree does not // have to be a "comma throw", it can be "bare" throw call if the "parent" node did not // produce any value. // // Notes: // "Comma throws" are very rare. // GenTree* Compiler::fgPropagateCommaThrow(GenTree* parent, GenTreeOp* commaThrow, GenTreeFlags precedingSideEffects) { // Comma throw propagation does not preserve VNs, and deletes nodes. assert(fgGlobalMorph); assert(fgIsCommaThrow(commaThrow)); if ((commaThrow->gtFlags & GTF_COLON_COND) == 0) { fgRemoveRestOfBlock = true; } if ((precedingSideEffects & GTF_ALL_EFFECT) == 0) { if (parent->TypeIs(TYP_VOID)) { // Return the throw node as the new tree. return commaThrow->gtGetOp1(); } // Fix up the COMMA's type if needed. if (genActualType(parent) != genActualType(commaThrow)) { commaThrow->gtGetOp2()->BashToZeroConst(genActualType(parent)); commaThrow->ChangeType(genActualType(parent)); } return commaThrow; } return nullptr; } //---------------------------------------------------------------------------------------------- // fgMorphRetInd: Try to get rid of extra IND(ADDR()) pairs in a return tree. // // Arguments: // node - The return node that uses an indirection. // // Return Value: // the original op1 of the ret if there was no optimization or an optimized new op1. // GenTree* Compiler::fgMorphRetInd(GenTreeUnOp* ret) { assert(ret->OperIs(GT_RETURN)); assert(ret->gtGetOp1()->OperIs(GT_IND, GT_BLK, GT_OBJ)); GenTreeIndir* ind = ret->gtGetOp1()->AsIndir(); GenTree* addr = ind->Addr(); if (addr->OperIs(GT_ADDR) && addr->gtGetOp1()->OperIs(GT_LCL_VAR)) { // If struct promotion was undone, adjust the annotations if (fgGlobalMorph && fgMorphImplicitByRefArgs(addr)) { return ind; } // If `return` retypes LCL_VAR as a smaller struct it should not set `doNotEnregister` on that // LclVar. // Example: in `Vector128:AsVector2` we have RETURN SIMD8(OBJ SIMD8(ADDR byref(LCL_VAR SIMD16))). GenTreeLclVar* lclVar = addr->gtGetOp1()->AsLclVar(); if (!lvaIsImplicitByRefLocal(lclVar->GetLclNum())) { assert(!gtIsActiveCSE_Candidate(addr) && !gtIsActiveCSE_Candidate(ind)); unsigned indSize; if (ind->OperIs(GT_IND)) { indSize = genTypeSize(ind); } else { indSize = ind->AsBlk()->GetLayout()->GetSize(); } LclVarDsc* varDsc = lvaGetDesc(lclVar); unsigned lclVarSize; if (!lclVar->TypeIs(TYP_STRUCT)) { lclVarSize = genTypeSize(varDsc->TypeGet()); } else { lclVarSize = varDsc->lvExactSize; } // TODO: change conditions in `canFold` to `indSize <= lclVarSize`, but currently do not support `BITCAST // int<-SIMD16` etc. assert((indSize <= lclVarSize) || varDsc->lvDoNotEnregister); #if defined(TARGET_64BIT) bool canFold = (indSize == lclVarSize); #else // !TARGET_64BIT // TODO: improve 32 bit targets handling for LONG returns if necessary, nowadays we do not support `BITCAST // long<->double` there. bool canFold = (indSize == lclVarSize) && (lclVarSize <= REGSIZE_BYTES); #endif // TODO: support `genReturnBB != nullptr`, it requires #11413 to avoid `Incompatible types for // gtNewTempAssign`. if (canFold && (genReturnBB == nullptr)) { // Fold (TYPE1)*(&(TYPE2)x) even if types do not match, lowering will handle it. // Getting rid of this IND(ADDR()) pair allows to keep lclVar as not address taken // and enregister it. DEBUG_DESTROY_NODE(ind); DEBUG_DESTROY_NODE(addr); ret->gtOp1 = lclVar; // We use GTF_DONT_CSE as an "is under GT_ADDR" check. We can // get rid of it now since the GT_RETURN node should never have // its address taken. assert((ret->gtFlags & GTF_DONT_CSE) == 0); lclVar->gtFlags &= ~GTF_DONT_CSE; return lclVar; } else if (!varDsc->lvDoNotEnregister) { lvaSetVarDoNotEnregister(lclVar->GetLclNum() DEBUGARG(DoNotEnregisterReason::BlockOpRet)); } } } return ind; } #ifdef _PREFAST_ #pragma warning(pop) #endif GenTree* Compiler::fgMorphSmpOpOptional(GenTreeOp* tree) { genTreeOps oper = tree->gtOper; GenTree* op1 = tree->gtOp1; GenTree* op2 = tree->gtOp2; var_types typ = tree->TypeGet(); if (fgGlobalMorph && GenTree::OperIsCommutative(oper)) { /* Swap the operands so that the more expensive one is 'op1' */ if (tree->gtFlags & GTF_REVERSE_OPS) { tree->gtOp1 = op2; tree->gtOp2 = op1; op2 = op1; op1 = tree->gtOp1; tree->gtFlags &= ~GTF_REVERSE_OPS; } if (oper == op2->gtOper) { /* Reorder nested operators at the same precedence level to be left-recursive. For example, change "(a+(b+c))" to the equivalent expression "((a+b)+c)". */ /* Things are handled differently for floating-point operators */ if (!varTypeIsFloating(tree->TypeGet())) { fgMoveOpsLeft(tree); op1 = tree->gtOp1; op2 = tree->gtOp2; } } } #if REARRANGE_ADDS /* Change "((x+icon)+y)" to "((x+y)+icon)" Don't reorder floating-point operations */ if (fgGlobalMorph && (oper == GT_ADD) && !tree->gtOverflow() && (op1->gtOper == GT_ADD) && !op1->gtOverflow() && varTypeIsIntegralOrI(typ)) { GenTree* ad1 = op1->AsOp()->gtOp1; GenTree* ad2 = op1->AsOp()->gtOp2; if (!op2->OperIsConst() && ad2->OperIsConst()) { // This takes // + (tree) // / \. // / \. // / \. // + (op1) op2 // / \. // / \. // ad1 ad2 // // and it swaps ad2 and op2. // Don't create a byref pointer that may point outside of the ref object. // If a GC happens, the byref won't get updated. This can happen if one // of the int components is negative. It also requires the address generation // be in a fully-interruptible code region. if (!varTypeIsGC(ad1->TypeGet()) && !varTypeIsGC(op2->TypeGet())) { tree->gtOp2 = ad2; op1->AsOp()->gtOp2 = op2; op1->gtFlags |= op2->gtFlags & GTF_ALL_EFFECT; op2 = tree->gtOp2; } } } #endif /*------------------------------------------------------------------------- * Perform optional oper-specific postorder morphing */ switch (oper) { case GT_ASG: // Make sure we're allowed to do this. if (optValnumCSE_phase) { // It is not safe to reorder/delete CSE's break; } if (varTypeIsStruct(typ) && !tree->IsPhiDefn()) { if (tree->OperIsCopyBlkOp()) { return fgMorphCopyBlock(tree); } else { return fgMorphInitBlock(tree); } } if (typ == TYP_LONG) { break; } if (op2->gtFlags & GTF_ASG) { break; } if ((op2->gtFlags & GTF_CALL) && (op1->gtFlags & GTF_ALL_EFFECT)) { break; } /* Special case: a cast that can be thrown away */ // TODO-Cleanup: fgMorphSmp does a similar optimization. However, it removes only // one cast and sometimes there is another one after it that gets removed by this // code. fgMorphSmp should be improved to remove all redundant casts so this code // can be removed. if (op1->gtOper == GT_IND && op2->gtOper == GT_CAST && !op2->gtOverflow()) { var_types srct; var_types cast; var_types dstt; srct = op2->AsCast()->CastOp()->TypeGet(); cast = (var_types)op2->CastToType(); dstt = op1->TypeGet(); /* Make sure these are all ints and precision is not lost */ if (genTypeSize(cast) >= genTypeSize(dstt) && dstt <= TYP_INT && srct <= TYP_INT) { op2 = tree->gtOp2 = op2->AsCast()->CastOp(); } } break; case GT_MUL: /* Check for the case "(val + icon) * icon" */ if (op2->gtOper == GT_CNS_INT && op1->gtOper == GT_ADD) { GenTree* add = op1->AsOp()->gtOp2; if (add->IsCnsIntOrI() && (op2->GetScaleIndexMul() != 0)) { if (tree->gtOverflow() || op1->gtOverflow()) { break; } ssize_t imul = op2->AsIntCon()->gtIconVal; ssize_t iadd = add->AsIntCon()->gtIconVal; /* Change '(val + iadd) * imul' -> '(val * imul) + (iadd * imul)' */ oper = GT_ADD; tree->ChangeOper(oper); op2->AsIntCon()->SetValueTruncating(iadd * imul); op1->ChangeOper(GT_MUL); add->AsIntCon()->SetIconValue(imul); } } break; case GT_DIV: /* For "val / 1", just return "val" */ if (op2->IsIntegralConst(1)) { DEBUG_DESTROY_NODE(tree); return op1; } break; case GT_UDIV: case GT_UMOD: tree->CheckDivideByConstOptimized(this); break; case GT_LSH: /* Check for the case "(val + icon) << icon" */ if (!optValnumCSE_phase && op2->IsCnsIntOrI() && op1->gtOper == GT_ADD && !op1->gtOverflow()) { GenTree* cns = op1->AsOp()->gtOp2; if (cns->IsCnsIntOrI() && (op2->GetScaleIndexShf() != 0)) { ssize_t ishf = op2->AsIntConCommon()->IconValue(); ssize_t iadd = cns->AsIntConCommon()->IconValue(); // printf("Changing '(val+icon1)<<icon2' into '(val<<icon2+icon1<<icon2)'\n"); /* Change "(val + iadd) << ishf" into "(val<<ishf + iadd<<ishf)" */ tree->ChangeOper(GT_ADD); // we are reusing the shift amount node here, but the type we want is that of the shift result op2->gtType = op1->gtType; op2->AsIntConCommon()->SetValueTruncating(iadd << ishf); if (cns->gtOper == GT_CNS_INT && cns->AsIntCon()->gtFieldSeq != nullptr && cns->AsIntCon()->gtFieldSeq->IsConstantIndexFieldSeq()) { assert(cns->AsIntCon()->gtFieldSeq->m_next == nullptr); op2->AsIntCon()->gtFieldSeq = cns->AsIntCon()->gtFieldSeq; } op1->ChangeOper(GT_LSH); cns->AsIntConCommon()->SetIconValue(ishf); } } break; case GT_XOR: if (!optValnumCSE_phase) { /* "x ^ -1" is "~x" */ if (op2->IsIntegralConst(-1)) { tree->ChangeOper(GT_NOT); tree->gtOp2 = nullptr; DEBUG_DESTROY_NODE(op2); } else if (op2->IsIntegralConst(1) && op1->OperIsCompare()) { /* "binaryVal ^ 1" is "!binaryVal" */ gtReverseCond(op1); DEBUG_DESTROY_NODE(op2); DEBUG_DESTROY_NODE(tree); return op1; } } break; case GT_INIT_VAL: // Initialization values for initBlk have special semantics - their lower // byte is used to fill the struct. However, we allow 0 as a "bare" value, // which enables them to get a VNForZero, and be propagated. if (op1->IsIntegralConst(0)) { return op1; } break; default: break; } return tree; } #if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS) //------------------------------------------------------------------------ // fgMorphMultiOp: Morph a GenTreeMultiOp (SIMD/HWINTRINSIC) tree. // // Arguments: // multiOp - The tree to morph // // Return Value: // The fully morphed tree. // GenTree* Compiler::fgMorphMultiOp(GenTreeMultiOp* multiOp) { gtUpdateNodeOperSideEffects(multiOp); bool dontCseConstArguments = false; #if defined(FEATURE_HW_INTRINSICS) // Opportunistically, avoid unexpected CSE for hw intrinsics with IMM arguments if (multiOp->OperIs(GT_HWINTRINSIC)) { NamedIntrinsic hwIntrinsic = multiOp->AsHWIntrinsic()->GetHWIntrinsicId(); #if defined(TARGET_XARCH) if (HWIntrinsicInfo::lookupCategory(hwIntrinsic) == HW_Category_IMM) { dontCseConstArguments = true; } #elif defined(TARGET_ARMARCH) if (HWIntrinsicInfo::HasImmediateOperand(hwIntrinsic)) { dontCseConstArguments = true; } #endif } #endif for (GenTree** use : multiOp->UseEdges()) { *use = fgMorphTree(*use); GenTree* operand = *use; multiOp->gtFlags |= (operand->gtFlags & GTF_ALL_EFFECT); if (dontCseConstArguments && operand->OperIsConst()) { operand->SetDoNotCSE(); } // Promoted structs after morph must be in one of two states: // a) Fully eliminated from the IR (independent promotion) OR only be // used by "special" nodes (e. g. LHS of ASGs for multi-reg structs). // b) Marked as do-not-enregister (dependent promotion). // // So here we preserve this invariant and mark any promoted structs as do-not-enreg. // if (operand->OperIs(GT_LCL_VAR) && lvaGetDesc(operand->AsLclVar())->lvPromoted) { lvaSetVarDoNotEnregister(operand->AsLclVar()->GetLclNum() DEBUGARG(DoNotEnregisterReason::SimdUserForcesDep)); } } #if defined(FEATURE_HW_INTRINSICS) if (opts.OptimizationEnabled() && multiOp->OperIs(GT_HWINTRINSIC)) { GenTreeHWIntrinsic* hw = multiOp->AsHWIntrinsic(); switch (hw->GetHWIntrinsicId()) { #if defined(TARGET_XARCH) case NI_SSE_Xor: case NI_SSE2_Xor: case NI_AVX_Xor: case NI_AVX2_Xor: { // Transform XOR(X, 0) to X for vectors GenTree* op1 = hw->Op(1); GenTree* op2 = hw->Op(2); if (!gtIsActiveCSE_Candidate(hw)) { if (op1->IsIntegralConstVector(0) && !gtIsActiveCSE_Candidate(op1)) { DEBUG_DESTROY_NODE(hw); DEBUG_DESTROY_NODE(op1); return op2; } if (op2->IsIntegralConstVector(0) && !gtIsActiveCSE_Candidate(op2)) { DEBUG_DESTROY_NODE(hw); DEBUG_DESTROY_NODE(op2); return op1; } } break; } #endif case NI_Vector128_Create: #if defined(TARGET_XARCH) case NI_Vector256_Create: #elif defined(TARGET_ARMARCH) case NI_Vector64_Create: #endif { bool hwAllArgsAreConst = true; for (GenTree** use : multiOp->UseEdges()) { if (!(*use)->OperIsConst()) { hwAllArgsAreConst = false; break; } } // Avoid unexpected CSE for constant arguments for Vector_.Create // but only if all arguments are constants. if (hwAllArgsAreConst) { for (GenTree** use : multiOp->UseEdges()) { (*use)->SetDoNotCSE(); } } } break; default: break; } } #endif // defined(FEATURE_HW_INTRINSICS) && defined(TARGET_XARCH) #ifdef FEATURE_HW_INTRINSICS if (multiOp->OperIsHWIntrinsic() && !optValnumCSE_phase) { return fgOptimizeHWIntrinsic(multiOp->AsHWIntrinsic()); } #endif return multiOp; } #endif // defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS) //------------------------------------------------------------------------ // fgMorphModToSubMulDiv: Transform a % b into the equivalent a - (a / b) * b // (see ECMA III 3.55 and III.3.56). // // Arguments: // tree - The GT_MOD/GT_UMOD tree to morph // // Returns: // The morphed tree // // Notes: // For ARM64 we don't have a remainder instruction so this transform is // always done. For XARCH this transform is done if we know that magic // division will be used, in that case this transform allows CSE to // eliminate the redundant div from code like "x = a / 3; y = a % 3;". // GenTree* Compiler::fgMorphModToSubMulDiv(GenTreeOp* tree) { JITDUMP("\nMorphing MOD/UMOD [%06u] to Sub/Mul/Div\n", dspTreeID(tree)); if (tree->OperGet() == GT_MOD) { tree->SetOper(GT_DIV); } else if (tree->OperGet() == GT_UMOD) { tree->SetOper(GT_UDIV); } else { noway_assert(!"Illegal gtOper in fgMorphModToSubMulDiv"); } var_types type = tree->gtType; GenTree* const copyOfNumeratorValue = fgMakeMultiUse(&tree->gtOp1); GenTree* const copyOfDenominatorValue = fgMakeMultiUse(&tree->gtOp2); GenTree* const mul = gtNewOperNode(GT_MUL, type, tree, copyOfDenominatorValue); GenTree* const sub = gtNewOperNode(GT_SUB, type, copyOfNumeratorValue, mul); // Ensure "sub" does not evaluate "copyOfNumeratorValue" before it is defined by "mul". // sub->gtFlags |= GTF_REVERSE_OPS; #ifdef DEBUG sub->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED; #endif tree->CheckDivideByConstOptimized(this); return sub; } //------------------------------------------------------------------------------ // fgOperIsBitwiseRotationRoot : Check if the operation can be a root of a bitwise rotation tree. // // // Arguments: // oper - Operation to check // // Return Value: // True if the operation can be a root of a bitwise rotation tree; false otherwise. bool Compiler::fgOperIsBitwiseRotationRoot(genTreeOps oper) { return (oper == GT_OR) || (oper == GT_XOR); } //------------------------------------------------------------------------------ // fgRecognizeAndMorphBitwiseRotation : Check if the tree represents a left or right rotation. If so, return // an equivalent GT_ROL or GT_ROR tree; otherwise, return the original tree. // // Arguments: // tree - tree to check for a rotation pattern // // Return Value: // An equivalent GT_ROL or GT_ROR tree if a pattern is found; "nullptr" otherwise. // // Assumption: // The input is a GT_OR or a GT_XOR tree. GenTree* Compiler::fgRecognizeAndMorphBitwiseRotation(GenTree* tree) { // // Check for a rotation pattern, e.g., // // OR ROL // / \ / \. // LSH RSZ -> x y // / \ / \. // x AND x AND // / \ / \. // y 31 ADD 31 // / \. // NEG 32 // | // y // The patterns recognized: // (x << (y & M)) op (x >>> ((-y + N) & M)) // (x >>> ((-y + N) & M)) op (x << (y & M)) // // (x << y) op (x >>> (-y + N)) // (x >> > (-y + N)) op (x << y) // // (x >>> (y & M)) op (x << ((-y + N) & M)) // (x << ((-y + N) & M)) op (x >>> (y & M)) // // (x >>> y) op (x << (-y + N)) // (x << (-y + N)) op (x >>> y) // // (x << c1) op (x >>> c2) // (x >>> c1) op (x << c2) // // where // c1 and c2 are const // c1 + c2 == bitsize(x) // N == bitsize(x) // M is const // M & (N - 1) == N - 1 // op is either | or ^ if (((tree->gtFlags & GTF_PERSISTENT_SIDE_EFFECTS) != 0) || ((tree->gtFlags & GTF_ORDER_SIDEEFF) != 0)) { // We can't do anything if the tree has assignments, calls, or volatile // reads. Note that we allow GTF_EXCEPT side effect since any exceptions // thrown by the original tree will be thrown by the transformed tree as well. return nullptr; } genTreeOps oper = tree->OperGet(); assert(fgOperIsBitwiseRotationRoot(oper)); // Check if we have an LSH on one side of the OR and an RSZ on the other side. GenTree* op1 = tree->gtGetOp1(); GenTree* op2 = tree->gtGetOp2(); GenTree* leftShiftTree = nullptr; GenTree* rightShiftTree = nullptr; if ((op1->OperGet() == GT_LSH) && (op2->OperGet() == GT_RSZ)) { leftShiftTree = op1; rightShiftTree = op2; } else if ((op1->OperGet() == GT_RSZ) && (op2->OperGet() == GT_LSH)) { leftShiftTree = op2; rightShiftTree = op1; } else { return nullptr; } // Check if the trees representing the value to shift are identical. // We already checked that there are no side effects above. if (GenTree::Compare(leftShiftTree->gtGetOp1(), rightShiftTree->gtGetOp1())) { GenTree* rotatedValue = leftShiftTree->gtGetOp1(); var_types rotatedValueActualType = genActualType(rotatedValue->gtType); ssize_t rotatedValueBitSize = genTypeSize(rotatedValueActualType) * 8; noway_assert((rotatedValueBitSize == 32) || (rotatedValueBitSize == 64)); GenTree* leftShiftIndex = leftShiftTree->gtGetOp2(); GenTree* rightShiftIndex = rightShiftTree->gtGetOp2(); // The shift index may be masked. At least (rotatedValueBitSize - 1) lower bits // shouldn't be masked for the transformation to be valid. If additional // higher bits are not masked, the transformation is still valid since the result // of MSIL shift instructions is unspecified if the shift amount is greater or equal // than the width of the value being shifted. ssize_t minimalMask = rotatedValueBitSize - 1; ssize_t leftShiftMask = -1; ssize_t rightShiftMask = -1; if ((leftShiftIndex->OperGet() == GT_AND)) { if (leftShiftIndex->gtGetOp2()->IsCnsIntOrI()) { leftShiftMask = leftShiftIndex->gtGetOp2()->AsIntCon()->gtIconVal; leftShiftIndex = leftShiftIndex->gtGetOp1(); } else { return nullptr; } } if ((rightShiftIndex->OperGet() == GT_AND)) { if (rightShiftIndex->gtGetOp2()->IsCnsIntOrI()) { rightShiftMask = rightShiftIndex->gtGetOp2()->AsIntCon()->gtIconVal; rightShiftIndex = rightShiftIndex->gtGetOp1(); } else { return nullptr; } } if (((minimalMask & leftShiftMask) != minimalMask) || ((minimalMask & rightShiftMask) != minimalMask)) { // The shift index is overmasked, e.g., we have // something like (x << y & 15) or // (x >> (32 - y) & 15 with 32 bit x. // The transformation is not valid. return nullptr; } GenTree* shiftIndexWithAdd = nullptr; GenTree* shiftIndexWithoutAdd = nullptr; genTreeOps rotateOp = GT_NONE; GenTree* rotateIndex = nullptr; if (leftShiftIndex->OperGet() == GT_ADD) { shiftIndexWithAdd = leftShiftIndex; shiftIndexWithoutAdd = rightShiftIndex; rotateOp = GT_ROR; } else if (rightShiftIndex->OperGet() == GT_ADD) { shiftIndexWithAdd = rightShiftIndex; shiftIndexWithoutAdd = leftShiftIndex; rotateOp = GT_ROL; } if (shiftIndexWithAdd != nullptr) { if (shiftIndexWithAdd->gtGetOp2()->IsCnsIntOrI()) { if (shiftIndexWithAdd->gtGetOp2()->AsIntCon()->gtIconVal == rotatedValueBitSize) { if (shiftIndexWithAdd->gtGetOp1()->OperGet() == GT_NEG) { if (GenTree::Compare(shiftIndexWithAdd->gtGetOp1()->gtGetOp1(), shiftIndexWithoutAdd)) { // We found one of these patterns: // (x << (y & M)) | (x >>> ((-y + N) & M)) // (x << y) | (x >>> (-y + N)) // (x >>> (y & M)) | (x << ((-y + N) & M)) // (x >>> y) | (x << (-y + N)) // where N == bitsize(x), M is const, and // M & (N - 1) == N - 1 CLANG_FORMAT_COMMENT_ANCHOR; #ifndef TARGET_64BIT if (!shiftIndexWithoutAdd->IsCnsIntOrI() && (rotatedValueBitSize == 64)) { // TODO-X86-CQ: we need to handle variable-sized long shifts specially on x86. // GT_LSH, GT_RSH, and GT_RSZ have helpers for this case. We may need // to add helpers for GT_ROL and GT_ROR. return nullptr; } #endif rotateIndex = shiftIndexWithoutAdd; } } } } } else if ((leftShiftIndex->IsCnsIntOrI() && rightShiftIndex->IsCnsIntOrI())) { if (leftShiftIndex->AsIntCon()->gtIconVal + rightShiftIndex->AsIntCon()->gtIconVal == rotatedValueBitSize) { // We found this pattern: // (x << c1) | (x >>> c2) // where c1 and c2 are const and c1 + c2 == bitsize(x) rotateOp = GT_ROL; rotateIndex = leftShiftIndex; } } if (rotateIndex != nullptr) { noway_assert(GenTree::OperIsRotate(rotateOp)); GenTreeFlags inputTreeEffects = tree->gtFlags & GTF_ALL_EFFECT; // We can use the same tree only during global morph; reusing the tree in a later morph // may invalidate value numbers. if (fgGlobalMorph) { tree->AsOp()->gtOp1 = rotatedValue; tree->AsOp()->gtOp2 = rotateIndex; tree->ChangeOper(rotateOp); unsigned childFlags = 0; for (GenTree* op : tree->Operands()) { childFlags |= (op->gtFlags & GTF_ALL_EFFECT); } // The parent's flags should be a superset of its operands' flags noway_assert((inputTreeEffects & childFlags) == childFlags); } else { tree = gtNewOperNode(rotateOp, rotatedValueActualType, rotatedValue, rotateIndex); noway_assert(inputTreeEffects == (tree->gtFlags & GTF_ALL_EFFECT)); } return tree; } } return nullptr; } #if !defined(TARGET_64BIT) //------------------------------------------------------------------------------ // fgRecognizeAndMorphLongMul : Check for and morph long multiplication with 32 bit operands. // // Uses "GenTree::IsValidLongMul" to check for the long multiplication pattern. Will swap // operands if the first one is a constant and the second one is not, even for trees which // end up not being eligibile for long multiplication. // // Arguments: // mul - GT_MUL tree to check for a long multiplication opportunity // // Return Value: // The original tree, with operands possibly swapped, if it is not eligible for long multiplication. // Tree with GTF_MUL_64RSLT set, side effect flags propagated, and children morphed if it is. // GenTreeOp* Compiler::fgRecognizeAndMorphLongMul(GenTreeOp* mul) { assert(mul->OperIs(GT_MUL)); assert(mul->TypeIs(TYP_LONG)); GenTree* op1 = mul->gtGetOp1(); GenTree* op2 = mul->gtGetOp2(); // "IsValidLongMul" and decomposition do not handle constant op1. if (op1->IsIntegralConst()) { std::swap(op1, op2); mul->gtOp1 = op1; mul->gtOp2 = op2; } if (!mul->IsValidLongMul()) { return mul; } // MUL_LONG needs to do the work the casts would have done. mul->ClearUnsigned(); if (op1->IsUnsigned()) { mul->SetUnsigned(); } // "IsValidLongMul" returned "true", so this GT_MUL cannot overflow. mul->ClearOverflow(); mul->Set64RsltMul(); return fgMorphLongMul(mul); } //------------------------------------------------------------------------------ // fgMorphLongMul : Morphs GT_MUL nodes marked with GTF_MUL_64RSLT. // // Morphs *only* the operands of casts that compose the long mul to // avoid them being folded aways. // // Arguments: // mul - GT_MUL tree to morph operands of // // Return Value: // The original tree, with operands morphed and flags propagated. // GenTreeOp* Compiler::fgMorphLongMul(GenTreeOp* mul) { INDEBUG(mul->DebugCheckLongMul()); GenTree* op1 = mul->gtGetOp1(); GenTree* op2 = mul->gtGetOp2(); // Morph the operands. We cannot allow the casts to go away, so we morph their operands directly. op1->AsCast()->CastOp() = fgMorphTree(op1->AsCast()->CastOp()); op1->SetAllEffectsFlags(op1->AsCast()->CastOp()); if (op2->OperIs(GT_CAST)) { op2->AsCast()->CastOp() = fgMorphTree(op2->AsCast()->CastOp()); op2->SetAllEffectsFlags(op2->AsCast()->CastOp()); } mul->SetAllEffectsFlags(op1, op2); op1->SetDoNotCSE(); op2->SetDoNotCSE(); return mul; } #endif // !defined(TARGET_64BIT) /***************************************************************************** * * Transform the given tree for code generation and return an equivalent tree. */ GenTree* Compiler::fgMorphTree(GenTree* tree, MorphAddrContext* mac) { assert(tree); #ifdef DEBUG if (verbose) { if ((unsigned)JitConfig.JitBreakMorphTree() == tree->gtTreeID) { noway_assert(!"JitBreakMorphTree hit"); } } #endif #ifdef DEBUG int thisMorphNum = 0; if (verbose && treesBeforeAfterMorph) { thisMorphNum = morphNum++; printf("\nfgMorphTree (before %d):\n", thisMorphNum); gtDispTree(tree); } #endif if (fgGlobalMorph) { // Apply any rewrites for implicit byref arguments before morphing the // tree. if (fgMorphImplicitByRefArgs(tree)) { #ifdef DEBUG if (verbose && treesBeforeAfterMorph) { printf("\nfgMorphTree (%d), after implicit-byref rewrite:\n", thisMorphNum); gtDispTree(tree); } #endif } } /*------------------------------------------------------------------------- * fgMorphTree() can potentially replace a tree with another, and the * caller has to store the return value correctly. * Turn this on to always make copy of "tree" here to shake out * hidden/unupdated references. */ #ifdef DEBUG if (compStressCompile(STRESS_GENERIC_CHECK, 0)) { GenTree* copy; if (GenTree::s_gtNodeSizes[tree->gtOper] == TREE_NODE_SZ_SMALL) { copy = gtNewLargeOperNode(GT_ADD, TYP_INT); } else { copy = new (this, GT_CALL) GenTreeCall(TYP_INT); } copy->ReplaceWith(tree, this); #if defined(LATE_DISASM) // GT_CNS_INT is considered small, so ReplaceWith() won't copy all fields if ((tree->gtOper == GT_CNS_INT) && tree->IsIconHandle()) { copy->AsIntCon()->gtCompileTimeHandle = tree->AsIntCon()->gtCompileTimeHandle; } #endif DEBUG_DESTROY_NODE(tree); tree = copy; } #endif // DEBUG if (fgGlobalMorph) { /* Ensure that we haven't morphed this node already */ assert(((tree->gtDebugFlags & GTF_DEBUG_NODE_MORPHED) == 0) && "ERROR: Already morphed this node!"); /* Before morphing the tree, we try to propagate any active assertions */ if (optLocalAssertionProp) { /* Do we have any active assertions? */ if (optAssertionCount > 0) { GenTree* newTree = tree; while (newTree != nullptr) { tree = newTree; /* newTree is non-Null if we propagated an assertion */ newTree = optAssertionProp(apFull, tree, nullptr, nullptr); } assert(tree != nullptr); } } PREFAST_ASSUME(tree != nullptr); } /* Save the original un-morphed tree for fgMorphTreeDone */ GenTree* oldTree = tree; /* Figure out what kind of a node we have */ unsigned kind = tree->OperKind(); /* Is this a constant node? */ if (tree->OperIsConst()) { tree = fgMorphConst(tree); goto DONE; } /* Is this a leaf node? */ if (kind & GTK_LEAF) { tree = fgMorphLeaf(tree); goto DONE; } /* Is it a 'simple' unary/binary operator? */ if (kind & GTK_SMPOP) { tree = fgMorphSmpOp(tree, mac); goto DONE; } /* See what kind of a special operator we have here */ switch (tree->OperGet()) { case GT_CALL: if (tree->OperMayThrow(this)) { tree->gtFlags |= GTF_EXCEPT; } else { tree->gtFlags &= ~GTF_EXCEPT; } tree = fgMorphCall(tree->AsCall()); break; #if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS) #if defined(FEATURE_SIMD) case GT_SIMD: #endif #if defined(FEATURE_HW_INTRINSICS) case GT_HWINTRINSIC: #endif tree = fgMorphMultiOp(tree->AsMultiOp()); break; #endif // defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS) case GT_ARR_ELEM: tree->AsArrElem()->gtArrObj = fgMorphTree(tree->AsArrElem()->gtArrObj); unsigned dim; for (dim = 0; dim < tree->AsArrElem()->gtArrRank; dim++) { tree->AsArrElem()->gtArrInds[dim] = fgMorphTree(tree->AsArrElem()->gtArrInds[dim]); } tree->gtFlags &= ~GTF_CALL; tree->gtFlags |= tree->AsArrElem()->gtArrObj->gtFlags & GTF_ALL_EFFECT; for (dim = 0; dim < tree->AsArrElem()->gtArrRank; dim++) { tree->gtFlags |= tree->AsArrElem()->gtArrInds[dim]->gtFlags & GTF_ALL_EFFECT; } if (fgGlobalMorph) { fgSetRngChkTarget(tree, false); } break; case GT_ARR_OFFSET: tree->AsArrOffs()->gtOffset = fgMorphTree(tree->AsArrOffs()->gtOffset); tree->AsArrOffs()->gtIndex = fgMorphTree(tree->AsArrOffs()->gtIndex); tree->AsArrOffs()->gtArrObj = fgMorphTree(tree->AsArrOffs()->gtArrObj); tree->gtFlags &= ~GTF_CALL; tree->gtFlags |= tree->AsArrOffs()->gtOffset->gtFlags & GTF_ALL_EFFECT; tree->gtFlags |= tree->AsArrOffs()->gtIndex->gtFlags & GTF_ALL_EFFECT; tree->gtFlags |= tree->AsArrOffs()->gtArrObj->gtFlags & GTF_ALL_EFFECT; if (fgGlobalMorph) { fgSetRngChkTarget(tree, false); } break; case GT_PHI: tree->gtFlags &= ~GTF_ALL_EFFECT; for (GenTreePhi::Use& use : tree->AsPhi()->Uses()) { use.SetNode(fgMorphTree(use.GetNode())); tree->gtFlags |= use.GetNode()->gtFlags & GTF_ALL_EFFECT; } break; case GT_FIELD_LIST: tree->gtFlags &= ~GTF_ALL_EFFECT; for (GenTreeFieldList::Use& use : tree->AsFieldList()->Uses()) { use.SetNode(fgMorphTree(use.GetNode())); tree->gtFlags |= (use.GetNode()->gtFlags & GTF_ALL_EFFECT); } break; case GT_CMPXCHG: tree->AsCmpXchg()->gtOpLocation = fgMorphTree(tree->AsCmpXchg()->gtOpLocation); tree->AsCmpXchg()->gtOpValue = fgMorphTree(tree->AsCmpXchg()->gtOpValue); tree->AsCmpXchg()->gtOpComparand = fgMorphTree(tree->AsCmpXchg()->gtOpComparand); tree->gtFlags &= (~GTF_EXCEPT & ~GTF_CALL); tree->gtFlags |= tree->AsCmpXchg()->gtOpLocation->gtFlags & GTF_ALL_EFFECT; tree->gtFlags |= tree->AsCmpXchg()->gtOpValue->gtFlags & GTF_ALL_EFFECT; tree->gtFlags |= tree->AsCmpXchg()->gtOpComparand->gtFlags & GTF_ALL_EFFECT; break; case GT_STORE_DYN_BLK: tree = fgMorphStoreDynBlock(tree->AsStoreDynBlk()); break; default: #ifdef DEBUG gtDispTree(tree); #endif noway_assert(!"unexpected operator"); } DONE: fgMorphTreeDone(tree, oldTree DEBUGARG(thisMorphNum)); return tree; } //------------------------------------------------------------------------ // fgKillDependentAssertionsSingle: Kill all assertions specific to lclNum // // Arguments: // lclNum - The varNum of the lclVar for which we're killing assertions. // tree - (DEBUG only) the tree responsible for killing its assertions. // void Compiler::fgKillDependentAssertionsSingle(unsigned lclNum DEBUGARG(GenTree* tree)) { /* All dependent assertions are killed here */ ASSERT_TP killed = BitVecOps::MakeCopy(apTraits, GetAssertionDep(lclNum)); if (killed) { AssertionIndex index = optAssertionCount; while (killed && (index > 0)) { if (BitVecOps::IsMember(apTraits, killed, index - 1)) { #ifdef DEBUG AssertionDsc* curAssertion = optGetAssertion(index); noway_assert((curAssertion->op1.lcl.lclNum == lclNum) || ((curAssertion->op2.kind == O2K_LCLVAR_COPY) && (curAssertion->op2.lcl.lclNum == lclNum))); if (verbose) { printf("\nThe assignment "); printTreeID(tree); printf(" using V%02u removes: ", curAssertion->op1.lcl.lclNum); optPrintAssertion(curAssertion); } #endif // Remove this bit from the killed mask BitVecOps::RemoveElemD(apTraits, killed, index - 1); optAssertionRemove(index); } index--; } // killed mask should now be zero noway_assert(BitVecOps::IsEmpty(apTraits, killed)); } } //------------------------------------------------------------------------ // fgKillDependentAssertions: Kill all dependent assertions with regard to lclNum. // // Arguments: // lclNum - The varNum of the lclVar for which we're killing assertions. // tree - (DEBUG only) the tree responsible for killing its assertions. // // Notes: // For structs and struct fields, it will invalidate the children and parent // respectively. // Calls fgKillDependentAssertionsSingle to kill the assertions for a single lclVar. // void Compiler::fgKillDependentAssertions(unsigned lclNum DEBUGARG(GenTree* tree)) { LclVarDsc* varDsc = lvaGetDesc(lclNum); if (varDsc->lvPromoted) { noway_assert(varTypeIsStruct(varDsc)); // Kill the field locals. for (unsigned i = varDsc->lvFieldLclStart; i < varDsc->lvFieldLclStart + varDsc->lvFieldCnt; ++i) { fgKillDependentAssertionsSingle(i DEBUGARG(tree)); } // Kill the struct local itself. fgKillDependentAssertionsSingle(lclNum DEBUGARG(tree)); } else if (varDsc->lvIsStructField) { // Kill the field local. fgKillDependentAssertionsSingle(lclNum DEBUGARG(tree)); // Kill the parent struct. fgKillDependentAssertionsSingle(varDsc->lvParentLcl DEBUGARG(tree)); } else { fgKillDependentAssertionsSingle(lclNum DEBUGARG(tree)); } } /***************************************************************************** * * This function is called to complete the morphing of a tree node * It should only be called once for each node. * If DEBUG is defined the flag GTF_DEBUG_NODE_MORPHED is checked and updated, * to enforce the invariant that each node is only morphed once. * If local assertion prop is enabled the result tree may be replaced * by an equivalent tree. * */ void Compiler::fgMorphTreeDone(GenTree* tree, GenTree* oldTree /* == NULL */ DEBUGARG(int morphNum)) { #ifdef DEBUG if (verbose && treesBeforeAfterMorph) { printf("\nfgMorphTree (after %d):\n", morphNum); gtDispTree(tree); printf(""); // in our logic this causes a flush } #endif if (!fgGlobalMorph) { return; } if ((oldTree != nullptr) && (oldTree != tree)) { /* Ensure that we have morphed this node */ assert((tree->gtDebugFlags & GTF_DEBUG_NODE_MORPHED) && "ERROR: Did not morph this node!"); #ifdef DEBUG TransferTestDataToNode(oldTree, tree); #endif } else { // Ensure that we haven't morphed this node already assert(((tree->gtDebugFlags & GTF_DEBUG_NODE_MORPHED) == 0) && "ERROR: Already morphed this node!"); } if (tree->OperIsConst()) { goto DONE; } if (!optLocalAssertionProp) { goto DONE; } /* Do we have any active assertions? */ if (optAssertionCount > 0) { /* Is this an assignment to a local variable */ GenTreeLclVarCommon* lclVarTree = nullptr; // The check below will miss LIR-style assignments. // // But we shouldn't be running local assertion prop on these, // as local prop gets disabled when we run global prop. assert(!tree->OperIs(GT_STORE_LCL_VAR, GT_STORE_LCL_FLD)); // DefinesLocal can return true for some BLK op uses, so // check what gets assigned only when we're at an assignment. if (tree->OperIs(GT_ASG) && tree->DefinesLocal(this, &lclVarTree)) { unsigned lclNum = lclVarTree->GetLclNum(); noway_assert(lclNum < lvaCount); fgKillDependentAssertions(lclNum DEBUGARG(tree)); } } /* If this tree makes a new assertion - make it available */ optAssertionGen(tree); DONE:; #ifdef DEBUG /* Mark this node as being morphed */ tree->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED; #endif } //------------------------------------------------------------------------ // fgFoldConditional: try and fold conditionals and optimize BBJ_COND or // BBJ_SWITCH blocks. // // Argumetns: // block - block to examine // // Returns: // FoldResult indicating what changes were made, if any // Compiler::FoldResult Compiler::fgFoldConditional(BasicBlock* block) { FoldResult result = FoldResult::FOLD_DID_NOTHING; // We don't want to make any code unreachable // if (opts.OptimizationDisabled()) { return result; } if (block->bbJumpKind == BBJ_COND) { noway_assert(block->bbStmtList != nullptr && block->bbStmtList->GetPrevStmt() != nullptr); Statement* lastStmt = block->lastStmt(); noway_assert(lastStmt->GetNextStmt() == nullptr); if (lastStmt->GetRootNode()->gtOper == GT_CALL) { noway_assert(fgRemoveRestOfBlock); // Unconditional throw - transform the basic block into a BBJ_THROW // fgConvertBBToThrowBB(block); result = FoldResult::FOLD_CHANGED_CONTROL_FLOW; JITDUMP("\nConditional folded at " FMT_BB "\n", block->bbNum); JITDUMP(FMT_BB " becomes a BBJ_THROW\n", block->bbNum); return result; } noway_assert(lastStmt->GetRootNode()->gtOper == GT_JTRUE); /* Did we fold the conditional */ noway_assert(lastStmt->GetRootNode()->AsOp()->gtOp1); GenTree* condTree; condTree = lastStmt->GetRootNode()->AsOp()->gtOp1; GenTree* cond; cond = condTree->gtEffectiveVal(true); if (cond->OperIsConst()) { /* Yupee - we folded the conditional! * Remove the conditional statement */ noway_assert(cond->gtOper == GT_CNS_INT); noway_assert((block->bbNext->countOfInEdges() > 0) && (block->bbJumpDest->countOfInEdges() > 0)); if (condTree != cond) { // Preserve any side effects assert(condTree->OperIs(GT_COMMA)); lastStmt->SetRootNode(condTree); result = FoldResult::FOLD_ALTERED_LAST_STMT; } else { // no side effects, remove the jump entirely fgRemoveStmt(block, lastStmt); result = FoldResult::FOLD_REMOVED_LAST_STMT; } // block is a BBJ_COND that we are folding the conditional for. // bTaken is the path that will always be taken from block. // bNotTaken is the path that will never be taken from block. // BasicBlock* bTaken; BasicBlock* bNotTaken; if (cond->AsIntCon()->gtIconVal != 0) { /* JTRUE 1 - transform the basic block into a BBJ_ALWAYS */ block->bbJumpKind = BBJ_ALWAYS; bTaken = block->bbJumpDest; bNotTaken = block->bbNext; } else { /* Unmark the loop if we are removing a backwards branch */ /* dest block must also be marked as a loop head and */ /* We must be able to reach the backedge block */ if ((block->bbJumpDest->isLoopHead()) && (block->bbJumpDest->bbNum <= block->bbNum) && fgReachable(block->bbJumpDest, block)) { optUnmarkLoopBlocks(block->bbJumpDest, block); } /* JTRUE 0 - transform the basic block into a BBJ_NONE */ block->bbJumpKind = BBJ_NONE; bTaken = block->bbNext; bNotTaken = block->bbJumpDest; } if (fgHaveValidEdgeWeights) { // We are removing an edge from block to bNotTaken // and we have already computed the edge weights, so // we will try to adjust some of the weights // flowList* edgeTaken = fgGetPredForBlock(bTaken, block); BasicBlock* bUpdated = nullptr; // non-NULL if we updated the weight of an internal block // We examine the taken edge (block -> bTaken) // if block has valid profile weight and bTaken does not we try to adjust bTaken's weight // else if bTaken has valid profile weight and block does not we try to adjust block's weight // We can only adjust the block weights when (the edge block -> bTaken) is the only edge into bTaken // if (block->hasProfileWeight()) { // The edge weights for (block -> bTaken) are 100% of block's weight edgeTaken->setEdgeWeights(block->bbWeight, block->bbWeight, bTaken); if (!bTaken->hasProfileWeight()) { if ((bTaken->countOfInEdges() == 1) || (bTaken->bbWeight < block->bbWeight)) { // Update the weight of bTaken bTaken->inheritWeight(block); bUpdated = bTaken; } } } else if (bTaken->hasProfileWeight()) { if (bTaken->countOfInEdges() == 1) { // There is only one in edge to bTaken edgeTaken->setEdgeWeights(bTaken->bbWeight, bTaken->bbWeight, bTaken); // Update the weight of block block->inheritWeight(bTaken); bUpdated = block; } } if (bUpdated != nullptr) { weight_t newMinWeight; weight_t newMaxWeight; flowList* edge; // Now fix the weights of the edges out of 'bUpdated' switch (bUpdated->bbJumpKind) { case BBJ_NONE: edge = fgGetPredForBlock(bUpdated->bbNext, bUpdated); newMaxWeight = bUpdated->bbWeight; newMinWeight = min(edge->edgeWeightMin(), newMaxWeight); edge->setEdgeWeights(newMinWeight, newMaxWeight, bUpdated->bbNext); break; case BBJ_COND: edge = fgGetPredForBlock(bUpdated->bbNext, bUpdated); newMaxWeight = bUpdated->bbWeight; newMinWeight = min(edge->edgeWeightMin(), newMaxWeight); edge->setEdgeWeights(newMinWeight, newMaxWeight, bUpdated->bbNext); FALLTHROUGH; case BBJ_ALWAYS: edge = fgGetPredForBlock(bUpdated->bbJumpDest, bUpdated); newMaxWeight = bUpdated->bbWeight; newMinWeight = min(edge->edgeWeightMin(), newMaxWeight); edge->setEdgeWeights(newMinWeight, newMaxWeight, bUpdated->bbNext); break; default: // We don't handle BBJ_SWITCH break; } } } /* modify the flow graph */ /* Remove 'block' from the predecessor list of 'bNotTaken' */ fgRemoveRefPred(bNotTaken, block); #ifdef DEBUG if (verbose) { printf("\nConditional folded at " FMT_BB "\n", block->bbNum); printf(FMT_BB " becomes a %s", block->bbNum, block->bbJumpKind == BBJ_ALWAYS ? "BBJ_ALWAYS" : "BBJ_NONE"); if (block->bbJumpKind == BBJ_ALWAYS) { printf(" to " FMT_BB, block->bbJumpDest->bbNum); } printf("\n"); } #endif /* if the block was a loop condition we may have to modify * the loop table */ for (unsigned loopNum = 0; loopNum < optLoopCount; loopNum++) { /* Some loops may have been already removed by * loop unrolling or conditional folding */ if (optLoopTable[loopNum].lpFlags & LPFLG_REMOVED) { continue; } /* We are only interested in the loop bottom */ if (optLoopTable[loopNum].lpBottom == block) { if (cond->AsIntCon()->gtIconVal == 0) { /* This was a bogus loop (condition always false) * Remove the loop from the table */ optMarkLoopRemoved(loopNum); optLoopTable[loopNum].lpTop->unmarkLoopAlign(this DEBUG_ARG("Bogus loop")); #ifdef DEBUG if (verbose) { printf("Removing loop " FMT_LP " (from " FMT_BB " to " FMT_BB ")\n\n", loopNum, optLoopTable[loopNum].lpTop->bbNum, optLoopTable[loopNum].lpBottom->bbNum); } #endif } } } } } else if (block->bbJumpKind == BBJ_SWITCH) { noway_assert(block->bbStmtList != nullptr && block->bbStmtList->GetPrevStmt() != nullptr); Statement* lastStmt = block->lastStmt(); noway_assert(lastStmt->GetNextStmt() == nullptr); if (lastStmt->GetRootNode()->gtOper == GT_CALL) { noway_assert(fgRemoveRestOfBlock); // Unconditional throw - transform the basic block into a BBJ_THROW // fgConvertBBToThrowBB(block); result = FoldResult::FOLD_CHANGED_CONTROL_FLOW; JITDUMP("\nConditional folded at " FMT_BB "\n", block->bbNum); JITDUMP(FMT_BB " becomes a BBJ_THROW\n", block->bbNum); return result; } noway_assert(lastStmt->GetRootNode()->gtOper == GT_SWITCH); /* Did we fold the conditional */ noway_assert(lastStmt->GetRootNode()->AsOp()->gtOp1); GenTree* condTree; condTree = lastStmt->GetRootNode()->AsOp()->gtOp1; GenTree* cond; cond = condTree->gtEffectiveVal(true); if (cond->OperIsConst()) { /* Yupee - we folded the conditional! * Remove the conditional statement */ noway_assert(cond->gtOper == GT_CNS_INT); if (condTree != cond) { // Preserve any side effects assert(condTree->OperIs(GT_COMMA)); lastStmt->SetRootNode(condTree); result = FoldResult::FOLD_ALTERED_LAST_STMT; } else { // no side effects, remove the switch entirely fgRemoveStmt(block, lastStmt); result = FoldResult::FOLD_REMOVED_LAST_STMT; } /* modify the flow graph */ /* Find the actual jump target */ unsigned switchVal; switchVal = (unsigned)cond->AsIntCon()->gtIconVal; unsigned jumpCnt; jumpCnt = block->bbJumpSwt->bbsCount; BasicBlock** jumpTab; jumpTab = block->bbJumpSwt->bbsDstTab; bool foundVal; foundVal = false; for (unsigned val = 0; val < jumpCnt; val++, jumpTab++) { BasicBlock* curJump = *jumpTab; assert(curJump->countOfInEdges() > 0); // If val matches switchVal or we are at the last entry and // we never found the switch value then set the new jump dest if ((val == switchVal) || (!foundVal && (val == jumpCnt - 1))) { if (curJump != block->bbNext) { /* transform the basic block into a BBJ_ALWAYS */ block->bbJumpKind = BBJ_ALWAYS; block->bbJumpDest = curJump; } else { /* transform the basic block into a BBJ_NONE */ block->bbJumpKind = BBJ_NONE; } foundVal = true; } else { /* Remove 'block' from the predecessor list of 'curJump' */ fgRemoveRefPred(curJump, block); } } assert(foundVal); #ifdef DEBUG if (verbose) { printf("\nConditional folded at " FMT_BB "\n", block->bbNum); printf(FMT_BB " becomes a %s", block->bbNum, block->bbJumpKind == BBJ_ALWAYS ? "BBJ_ALWAYS" : "BBJ_NONE"); if (block->bbJumpKind == BBJ_ALWAYS) { printf(" to " FMT_BB, block->bbJumpDest->bbNum); } printf("\n"); } #endif } } return result; } //------------------------------------------------------------------------ // fgMorphBlockStmt: morph a single statement in a block. // // Arguments: // block - block containing the statement // stmt - statement to morph // msg - string to identify caller in a dump // // Returns: // true if 'stmt' was removed from the block. // s false if 'stmt' is still in the block (even if other statements were removed). // // Notes: // Can be called anytime, unlike fgMorphStmts() which should only be called once. // bool Compiler::fgMorphBlockStmt(BasicBlock* block, Statement* stmt DEBUGARG(const char* msg)) { assert(block != nullptr); assert(stmt != nullptr); // Reset some ambient state fgRemoveRestOfBlock = false; compCurBB = block; compCurStmt = stmt; GenTree* morph = fgMorphTree(stmt->GetRootNode()); // Bug 1106830 - During the CSE phase we can't just remove // morph->AsOp()->gtOp2 as it could contain CSE expressions. // This leads to a noway_assert in OptCSE.cpp when // searching for the removed CSE ref. (using gtFindLink) // if (!optValnumCSE_phase) { // Check for morph as a GT_COMMA with an unconditional throw if (fgIsCommaThrow(morph, true)) { #ifdef DEBUG if (verbose) { printf("Folding a top-level fgIsCommaThrow stmt\n"); printf("Removing op2 as unreachable:\n"); gtDispTree(morph->AsOp()->gtOp2); printf("\n"); } #endif // Use the call as the new stmt morph = morph->AsOp()->gtOp1; noway_assert(morph->gtOper == GT_CALL); } // we can get a throw as a statement root if (fgIsThrow(morph)) { #ifdef DEBUG if (verbose) { printf("We have a top-level fgIsThrow stmt\n"); printf("Removing the rest of block as unreachable:\n"); } #endif noway_assert((morph->gtFlags & GTF_COLON_COND) == 0); fgRemoveRestOfBlock = true; } } stmt->SetRootNode(morph); // Can the entire tree be removed? bool removedStmt = false; // Defer removing statements during CSE so we don't inadvertently remove any CSE defs. if (!optValnumCSE_phase) { removedStmt = fgCheckRemoveStmt(block, stmt); } // Or this is the last statement of a conditional branch that was just folded? if (!removedStmt && (stmt->GetNextStmt() == nullptr) && !fgRemoveRestOfBlock) { FoldResult const fr = fgFoldConditional(block); removedStmt = (fr == FoldResult::FOLD_REMOVED_LAST_STMT); } if (!removedStmt) { // Have to re-do the evaluation order since for example some later code does not expect constants as op1 gtSetStmtInfo(stmt); // Have to re-link the nodes for this statement fgSetStmtSeq(stmt); } #ifdef DEBUG if (verbose) { printf("%s %s tree:\n", msg, (removedStmt ? "removed" : "morphed")); gtDispTree(morph); printf("\n"); } #endif if (fgRemoveRestOfBlock) { // Remove the rest of the stmts in the block for (Statement* removeStmt : StatementList(stmt->GetNextStmt())) { fgRemoveStmt(block, removeStmt); } // The rest of block has been removed and we will always throw an exception. // // For compDbgCode, we prepend an empty BB as the firstBB, it is BBJ_NONE. // We should not convert it to a ThrowBB. if ((block != fgFirstBB) || ((fgFirstBB->bbFlags & BBF_INTERNAL) == 0)) { // Convert block to a throw bb fgConvertBBToThrowBB(block); } #ifdef DEBUG if (verbose) { printf("\n%s Block " FMT_BB " becomes a throw block.\n", msg, block->bbNum); } #endif fgRemoveRestOfBlock = false; } return removedStmt; } /***************************************************************************** * * Morph the statements of the given block. * This function should be called just once for a block. Use fgMorphBlockStmt() * for reentrant calls. */ void Compiler::fgMorphStmts(BasicBlock* block) { fgRemoveRestOfBlock = false; fgCurrentlyInUseArgTemps = hashBv::Create(this); for (Statement* const stmt : block->Statements()) { if (fgRemoveRestOfBlock) { fgRemoveStmt(block, stmt); continue; } #ifdef FEATURE_SIMD if (opts.OptimizationEnabled() && stmt->GetRootNode()->TypeGet() == TYP_FLOAT && stmt->GetRootNode()->OperGet() == GT_ASG) { fgMorphCombineSIMDFieldAssignments(block, stmt); } #endif fgMorphStmt = stmt; compCurStmt = stmt; GenTree* oldTree = stmt->GetRootNode(); #ifdef DEBUG unsigned oldHash = verbose ? gtHashValue(oldTree) : DUMMY_INIT(~0); if (verbose) { printf("\nfgMorphTree " FMT_BB ", " FMT_STMT " (before)\n", block->bbNum, stmt->GetID()); gtDispTree(oldTree); } #endif /* Morph this statement tree */ GenTree* morphedTree = fgMorphTree(oldTree); // mark any outgoing arg temps as free so we can reuse them in the next statement. fgCurrentlyInUseArgTemps->ZeroAll(); // Has fgMorphStmt been sneakily changed ? if ((stmt->GetRootNode() != oldTree) || (block != compCurBB)) { if (stmt->GetRootNode() != oldTree) { /* This must be tailcall. Ignore 'morphedTree' and carry on with the tail-call node */ morphedTree = stmt->GetRootNode(); } else { /* This must be a tailcall that caused a GCPoll to get injected. We haven't actually morphed the call yet but the flag still got set, clear it here... */ CLANG_FORMAT_COMMENT_ANCHOR; #ifdef DEBUG morphedTree->gtDebugFlags &= ~GTF_DEBUG_NODE_MORPHED; #endif } noway_assert(compTailCallUsed); noway_assert(morphedTree->gtOper == GT_CALL); GenTreeCall* call = morphedTree->AsCall(); // Could be // - a fast call made as jmp in which case block will be ending with // BBJ_RETURN (as we need epilog) and marked as containing a jmp. // - a tailcall dispatched via JIT helper, on x86, in which case // block will be ending with BBJ_THROW. // - a tail call dispatched via runtime help (IL stubs), in which // case there will not be any tailcall and the block will be ending // with BBJ_RETURN (as normal control flow) noway_assert((call->IsFastTailCall() && (compCurBB->bbJumpKind == BBJ_RETURN) && ((compCurBB->bbFlags & BBF_HAS_JMP)) != 0) || (call->IsTailCallViaJitHelper() && (compCurBB->bbJumpKind == BBJ_THROW)) || (!call->IsTailCall() && (compCurBB->bbJumpKind == BBJ_RETURN))); } #ifdef DEBUG if (compStressCompile(STRESS_CLONE_EXPR, 30)) { // Clone all the trees to stress gtCloneExpr() if (verbose) { printf("\nfgMorphTree (stressClone from):\n"); gtDispTree(morphedTree); } morphedTree = gtCloneExpr(morphedTree); noway_assert(morphedTree != nullptr); if (verbose) { printf("\nfgMorphTree (stressClone to):\n"); gtDispTree(morphedTree); } } /* If the hash value changes. we modified the tree during morphing */ if (verbose) { unsigned newHash = gtHashValue(morphedTree); if (newHash != oldHash) { printf("\nfgMorphTree " FMT_BB ", " FMT_STMT " (after)\n", block->bbNum, stmt->GetID()); gtDispTree(morphedTree); } } #endif /* Check for morphedTree as a GT_COMMA with an unconditional throw */ if (!gtIsActiveCSE_Candidate(morphedTree) && fgIsCommaThrow(morphedTree, true)) { /* Use the call as the new stmt */ morphedTree = morphedTree->AsOp()->gtOp1; noway_assert(morphedTree->gtOper == GT_CALL); noway_assert((morphedTree->gtFlags & GTF_COLON_COND) == 0); fgRemoveRestOfBlock = true; } stmt->SetRootNode(morphedTree); if (fgRemoveRestOfBlock) { continue; } /* Has the statement been optimized away */ if (fgCheckRemoveStmt(block, stmt)) { continue; } /* Check if this block ends with a conditional branch that can be folded */ if (fgFoldConditional(block) != FoldResult::FOLD_DID_NOTHING) { continue; } if (ehBlockHasExnFlowDsc(block)) { continue; } } if (fgRemoveRestOfBlock) { if ((block->bbJumpKind == BBJ_COND) || (block->bbJumpKind == BBJ_SWITCH)) { Statement* first = block->firstStmt(); noway_assert(first); Statement* lastStmt = block->lastStmt(); noway_assert(lastStmt && lastStmt->GetNextStmt() == nullptr); GenTree* last = lastStmt->GetRootNode(); if (((block->bbJumpKind == BBJ_COND) && (last->gtOper == GT_JTRUE)) || ((block->bbJumpKind == BBJ_SWITCH) && (last->gtOper == GT_SWITCH))) { GenTree* op1 = last->AsOp()->gtOp1; if (op1->OperIsCompare()) { /* Unmark the comparison node with GTF_RELOP_JMP_USED */ op1->gtFlags &= ~GTF_RELOP_JMP_USED; } lastStmt->SetRootNode(fgMorphTree(op1)); } } /* Mark block as a BBJ_THROW block */ fgConvertBBToThrowBB(block); } #if FEATURE_FASTTAILCALL GenTree* recursiveTailCall = nullptr; if (block->endsWithTailCallConvertibleToLoop(this, &recursiveTailCall)) { fgMorphRecursiveFastTailCallIntoLoop(block, recursiveTailCall->AsCall()); } #endif // Reset this back so that it doesn't leak out impacting other blocks fgRemoveRestOfBlock = false; } /***************************************************************************** * * Morph the blocks of the method. * Returns true if the basic block list is modified. * This function should be called just once. */ void Compiler::fgMorphBlocks() { #ifdef DEBUG if (verbose) { printf("\n*************** In fgMorphBlocks()\n"); } #endif /* Since fgMorphTree can be called after various optimizations to re-arrange * the nodes we need a global flag to signal if we are during the one-pass * global morphing */ fgGlobalMorph = true; // // Local assertion prop is enabled if we are optimized // optLocalAssertionProp = opts.OptimizationEnabled(); if (optLocalAssertionProp) { // // Initialize for local assertion prop // optAssertionInit(true); } if (!compEnregLocals()) { // Morph is checking if lvDoNotEnregister is already set for some optimizations. // If we are running without `CLFLG_REGVAR` flag set (`compEnregLocals() == false`) // then we already know that we won't enregister any locals and it is better to set // this flag before we start reading it. // The main reason why this flag is not set is that we are running in minOpts. lvSetMinOptsDoNotEnreg(); } /*------------------------------------------------------------------------- * Process all basic blocks in the function */ BasicBlock* block = fgFirstBB; noway_assert(block); do { #ifdef DEBUG if (verbose) { printf("\nMorphing " FMT_BB " of '%s'\n", block->bbNum, info.compFullName); } #endif if (optLocalAssertionProp) { // // Clear out any currently recorded assertion candidates // before processing each basic block, // also we must handle QMARK-COLON specially // optAssertionReset(0); } // Make the current basic block address available globally. compCurBB = block; // Process all statement trees in the basic block. fgMorphStmts(block); // Do we need to merge the result of this block into a single return block? if ((block->bbJumpKind == BBJ_RETURN) && ((block->bbFlags & BBF_HAS_JMP) == 0)) { if ((genReturnBB != nullptr) && (genReturnBB != block)) { fgMergeBlockReturn(block); } } block = block->bbNext; } while (block != nullptr); // We are done with the global morphing phase fgGlobalMorph = false; compCurBB = nullptr; // Under OSR, we no longer need to specially protect the original method entry // if (opts.IsOSR() && (fgEntryBB != nullptr) && (fgEntryBB->bbFlags & BBF_IMPORTED)) { JITDUMP("OSR: un-protecting original method entry " FMT_BB "\n", fgEntryBB->bbNum); assert(fgEntryBB->bbRefs > 0); fgEntryBB->bbRefs--; // We don't need to remember this block anymore. fgEntryBB = nullptr; } #ifdef DEBUG if (verboseTrees) { fgDispBasicBlocks(true); } #endif } //------------------------------------------------------------------------ // fgMergeBlockReturn: assign the block return value (if any) into the single return temp // and branch to the single return block. // // Arguments: // block - the block to process. // // Notes: // A block is not guaranteed to have a last stmt if its jump kind is BBJ_RETURN. // For example a method returning void could have an empty block with jump kind BBJ_RETURN. // Such blocks do materialize as part of in-lining. // // A block with jump kind BBJ_RETURN does not necessarily need to end with GT_RETURN. // It could end with a tail call or rejected tail call or monitor.exit or a GT_INTRINSIC. // For now it is safe to explicitly check whether last stmt is GT_RETURN if genReturnLocal // is BAD_VAR_NUM. // void Compiler::fgMergeBlockReturn(BasicBlock* block) { assert((block->bbJumpKind == BBJ_RETURN) && ((block->bbFlags & BBF_HAS_JMP) == 0)); assert((genReturnBB != nullptr) && (genReturnBB != block)); // TODO: Need to characterize the last top level stmt of a block ending with BBJ_RETURN. Statement* lastStmt = block->lastStmt(); GenTree* ret = (lastStmt != nullptr) ? lastStmt->GetRootNode() : nullptr; if ((ret != nullptr) && (ret->OperGet() == GT_RETURN) && ((ret->gtFlags & GTF_RET_MERGED) != 0)) { // This return was generated during epilog merging, so leave it alone } else { // We'll jump to the genReturnBB. CLANG_FORMAT_COMMENT_ANCHOR; #if !defined(TARGET_X86) if (info.compFlags & CORINFO_FLG_SYNCH) { fgConvertSyncReturnToLeave(block); } else #endif // !TARGET_X86 { block->bbJumpKind = BBJ_ALWAYS; block->bbJumpDest = genReturnBB; fgAddRefPred(genReturnBB, block); fgReturnCount--; } if (genReturnLocal != BAD_VAR_NUM) { // replace the GT_RETURN node to be a GT_ASG that stores the return value into genReturnLocal. // Method must be returning a value other than TYP_VOID. noway_assert(compMethodHasRetVal()); // This block must be ending with a GT_RETURN noway_assert(lastStmt != nullptr); noway_assert(lastStmt->GetNextStmt() == nullptr); noway_assert(ret != nullptr); // GT_RETURN must have non-null operand as the method is returning the value assigned to // genReturnLocal noway_assert(ret->OperGet() == GT_RETURN); noway_assert(ret->gtGetOp1() != nullptr); Statement* pAfterStatement = lastStmt; const DebugInfo& di = lastStmt->GetDebugInfo(); GenTree* tree = gtNewTempAssign(genReturnLocal, ret->gtGetOp1(), &pAfterStatement, di, block); if (tree->OperIsCopyBlkOp()) { tree = fgMorphCopyBlock(tree); } else if (tree->OperIsInitBlkOp()) { tree = fgMorphInitBlock(tree); } if (pAfterStatement == lastStmt) { lastStmt->SetRootNode(tree); } else { // gtNewTempAssign inserted additional statements after last fgRemoveStmt(block, lastStmt); Statement* newStmt = gtNewStmt(tree, di); fgInsertStmtAfter(block, pAfterStatement, newStmt); lastStmt = newStmt; } } else if (ret != nullptr && ret->OperGet() == GT_RETURN) { // This block ends with a GT_RETURN noway_assert(lastStmt != nullptr); noway_assert(lastStmt->GetNextStmt() == nullptr); // Must be a void GT_RETURN with null operand; delete it as this block branches to oneReturn // block noway_assert(ret->TypeGet() == TYP_VOID); noway_assert(ret->gtGetOp1() == nullptr); fgRemoveStmt(block, lastStmt); } JITDUMP("\nUpdate " FMT_BB " to jump to common return block.\n", block->bbNum); DISPBLOCK(block); if (block->hasProfileWeight()) { weight_t const oldWeight = genReturnBB->hasProfileWeight() ? genReturnBB->bbWeight : BB_ZERO_WEIGHT; weight_t const newWeight = oldWeight + block->bbWeight; JITDUMP("merging profile weight " FMT_WT " from " FMT_BB " to common return " FMT_BB "\n", block->bbWeight, block->bbNum, genReturnBB->bbNum); genReturnBB->setBBProfileWeight(newWeight); DISPBLOCK(genReturnBB); } } } /***************************************************************************** * * Make some decisions about the kind of code to generate. */ void Compiler::fgSetOptions() { #ifdef DEBUG /* Should we force fully interruptible code ? */ if (JitConfig.JitFullyInt() || compStressCompile(STRESS_GENERIC_VARN, 30)) { noway_assert(!codeGen->isGCTypeFixed()); SetInterruptible(true); } #endif if (opts.compDbgCode) { assert(!codeGen->isGCTypeFixed()); SetInterruptible(true); // debugging is easier this way ... } /* Assume we won't need an explicit stack frame if this is allowed */ if (compLocallocUsed) { codeGen->setFramePointerRequired(true); } #ifdef TARGET_X86 if (compTailCallUsed) codeGen->setFramePointerRequired(true); #endif // TARGET_X86 if (!opts.genFPopt) { codeGen->setFramePointerRequired(true); } // Assert that the EH table has been initialized by now. Note that // compHndBBtabAllocCount never decreases; it is a high-water mark // of table allocation. In contrast, compHndBBtabCount does shrink // if we delete a dead EH region, and if it shrinks to zero, the // table pointer compHndBBtab is unreliable. assert(compHndBBtabAllocCount >= info.compXcptnsCount); #ifdef TARGET_X86 // Note: this case, and the !X86 case below, should both use the // !X86 path. This would require a few more changes for X86 to use // compHndBBtabCount (the current number of EH clauses) instead of // info.compXcptnsCount (the number of EH clauses in IL), such as // in ehNeedsShadowSPslots(). This is because sometimes the IL has // an EH clause that we delete as statically dead code before we // get here, leaving no EH clauses left, and thus no requirement // to use a frame pointer because of EH. But until all the code uses // the same test, leave info.compXcptnsCount here. if (info.compXcptnsCount > 0) { codeGen->setFramePointerRequiredEH(true); } #else // !TARGET_X86 if (compHndBBtabCount > 0) { codeGen->setFramePointerRequiredEH(true); } #endif // TARGET_X86 #ifdef UNIX_X86_ABI if (info.compXcptnsCount > 0) { assert(!codeGen->isGCTypeFixed()); // Enforce fully interruptible codegen for funclet unwinding SetInterruptible(true); } #endif // UNIX_X86_ABI if (compMethodRequiresPInvokeFrame()) { codeGen->setFramePointerRequired(true); // Setup of Pinvoke frame currently requires an EBP style frame } if (info.compPublishStubParam) { codeGen->setFramePointerRequiredGCInfo(true); } if (compIsProfilerHookNeeded()) { codeGen->setFramePointerRequired(true); } if (info.compIsVarArgs) { // Code that initializes lvaVarargsBaseOfStkArgs requires this to be EBP relative. codeGen->setFramePointerRequiredGCInfo(true); } if (lvaReportParamTypeArg()) { codeGen->setFramePointerRequiredGCInfo(true); } // printf("method will %s be fully interruptible\n", GetInterruptible() ? " " : "not"); } /*****************************************************************************/ GenTree* Compiler::fgInitThisClass() { noway_assert(!compIsForInlining()); CORINFO_LOOKUP_KIND kind; info.compCompHnd->getLocationOfThisType(info.compMethodHnd, &kind); if (!kind.needsRuntimeLookup) { return fgGetSharedCCtor(info.compClassHnd); } else { #ifdef FEATURE_READYTORUN // Only CoreRT understands CORINFO_HELP_READYTORUN_GENERIC_STATIC_BASE. Don't do this on CoreCLR. if (opts.IsReadyToRun() && IsTargetAbi(CORINFO_CORERT_ABI)) { CORINFO_RESOLVED_TOKEN resolvedToken; memset(&resolvedToken, 0, sizeof(resolvedToken)); // We are in a shared method body, but maybe we don't need a runtime lookup after all. // This covers the case of a generic method on a non-generic type. if (!(info.compClassAttr & CORINFO_FLG_SHAREDINST)) { resolvedToken.hClass = info.compClassHnd; return impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_STATIC_BASE, TYP_BYREF); } // We need a runtime lookup. GenTree* ctxTree = getRuntimeContextTree(kind.runtimeLookupKind); // CORINFO_HELP_READYTORUN_GENERIC_STATIC_BASE with a zeroed out resolvedToken means "get the static // base of the class that owns the method being compiled". If we're in this method, it means we're not // inlining and there's no ambiguity. return impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_GENERIC_STATIC_BASE, TYP_BYREF, gtNewCallArgs(ctxTree), &kind); } #endif // Collectible types requires that for shared generic code, if we use the generic context paramter // that we report it. (This is a conservative approach, we could detect some cases particularly when the // context parameter is this that we don't need the eager reporting logic.) lvaGenericsContextInUse = true; switch (kind.runtimeLookupKind) { case CORINFO_LOOKUP_THISOBJ: { // This code takes a this pointer; but we need to pass the static method desc to get the right point in // the hierarchy GenTree* vtTree = gtNewLclvNode(info.compThisArg, TYP_REF); vtTree->gtFlags |= GTF_VAR_CONTEXT; // Vtable pointer of this object vtTree = gtNewMethodTableLookup(vtTree); GenTree* methodHnd = gtNewIconEmbMethHndNode(info.compMethodHnd); return gtNewHelperCallNode(CORINFO_HELP_INITINSTCLASS, TYP_VOID, gtNewCallArgs(vtTree, methodHnd)); } case CORINFO_LOOKUP_CLASSPARAM: { GenTree* vtTree = gtNewLclvNode(info.compTypeCtxtArg, TYP_I_IMPL); vtTree->gtFlags |= GTF_VAR_CONTEXT; return gtNewHelperCallNode(CORINFO_HELP_INITCLASS, TYP_VOID, gtNewCallArgs(vtTree)); } case CORINFO_LOOKUP_METHODPARAM: { GenTree* methHndTree = gtNewLclvNode(info.compTypeCtxtArg, TYP_I_IMPL); methHndTree->gtFlags |= GTF_VAR_CONTEXT; return gtNewHelperCallNode(CORINFO_HELP_INITINSTCLASS, TYP_VOID, gtNewCallArgs(gtNewIconNode(0), methHndTree)); } default: noway_assert(!"Unknown LOOKUP_KIND"); UNREACHABLE(); } } } #ifdef DEBUG /***************************************************************************** * * Tree walk callback to make sure no GT_QMARK nodes are present in the tree, * except for the allowed ? 1 : 0; pattern. */ Compiler::fgWalkResult Compiler::fgAssertNoQmark(GenTree** tree, fgWalkData* data) { if ((*tree)->OperGet() == GT_QMARK) { fgCheckQmarkAllowedForm(*tree); } return WALK_CONTINUE; } void Compiler::fgCheckQmarkAllowedForm(GenTree* tree) { assert(tree->OperGet() == GT_QMARK); assert(!"Qmarks beyond morph disallowed."); } /***************************************************************************** * * Verify that the importer has created GT_QMARK nodes in a way we can * process them. The following is allowed: * * 1. A top level qmark. Top level qmark is of the form: * a) (bool) ? (void) : (void) OR * b) V0N = (bool) ? (type) : (type) * * 2. Recursion is allowed at the top level, i.e., a GT_QMARK can be a child * of either op1 of colon or op2 of colon but not a child of any other * operator. */ void Compiler::fgPreExpandQmarkChecks(GenTree* expr) { GenTree* topQmark = fgGetTopLevelQmark(expr); // If the top level Qmark is null, then scan the tree to make sure // there are no qmarks within it. if (topQmark == nullptr) { fgWalkTreePre(&expr, Compiler::fgAssertNoQmark, nullptr); } else { // We could probably expand the cond node also, but don't think the extra effort is necessary, // so let's just assert the cond node of a top level qmark doesn't have further top level qmarks. fgWalkTreePre(&topQmark->AsOp()->gtOp1, Compiler::fgAssertNoQmark, nullptr); fgPreExpandQmarkChecks(topQmark->AsOp()->gtOp2->AsOp()->gtOp1); fgPreExpandQmarkChecks(topQmark->AsOp()->gtOp2->AsOp()->gtOp2); } } #endif // DEBUG /***************************************************************************** * * Get the top level GT_QMARK node in a given "expr", return NULL if such a * node is not present. If the top level GT_QMARK node is assigned to a * GT_LCL_VAR, then return the lcl node in ppDst. * */ GenTree* Compiler::fgGetTopLevelQmark(GenTree* expr, GenTree** ppDst /* = NULL */) { if (ppDst != nullptr) { *ppDst = nullptr; } GenTree* topQmark = nullptr; if (expr->gtOper == GT_QMARK) { topQmark = expr; } else if (expr->gtOper == GT_ASG && expr->AsOp()->gtOp2->gtOper == GT_QMARK && expr->AsOp()->gtOp1->gtOper == GT_LCL_VAR) { topQmark = expr->AsOp()->gtOp2; if (ppDst != nullptr) { *ppDst = expr->AsOp()->gtOp1; } } return topQmark; } /********************************************************************************* * * For a castclass helper call, * Importer creates the following tree: * tmp = (op1 == null) ? op1 : ((*op1 == (cse = op2, cse)) ? op1 : helper()); * * This method splits the qmark expression created by the importer into the * following blocks: (block, asg, cond1, cond2, helper, remainder) * Notice that op1 is the result for both the conditions. So we coalesce these * assignments into a single block instead of two blocks resulting a nested diamond. * * +---------->-----------+ * | | | * ^ ^ v * | | | * block-->asg-->cond1--+-->cond2--+-->helper--+-->remainder * * We expect to achieve the following codegen: * mov rsi, rdx tmp = op1 // asgBlock * test rsi, rsi goto skip if tmp == null ? // cond1Block * je SKIP * mov rcx, 0x76543210 cns = op2 // cond2Block * cmp qword ptr [rsi], rcx goto skip if *tmp == op2 * je SKIP * call CORINFO_HELP_CHKCASTCLASS_SPECIAL tmp = helper(cns, tmp) // helperBlock * mov rsi, rax * SKIP: // remainderBlock * tmp has the result. * */ void Compiler::fgExpandQmarkForCastInstOf(BasicBlock* block, Statement* stmt) { #ifdef DEBUG if (verbose) { printf("\nExpanding CastInstOf qmark in " FMT_BB " (before)\n", block->bbNum); fgDispBasicBlocks(block, block, true); } #endif // DEBUG GenTree* expr = stmt->GetRootNode(); GenTree* dst = nullptr; GenTree* qmark = fgGetTopLevelQmark(expr, &dst); noway_assert(dst != nullptr); assert(qmark->gtFlags & GTF_QMARK_CAST_INSTOF); // Get cond, true, false exprs for the qmark. GenTree* condExpr = qmark->gtGetOp1(); GenTree* trueExpr = qmark->gtGetOp2()->AsColon()->ThenNode(); GenTree* falseExpr = qmark->gtGetOp2()->AsColon()->ElseNode(); // Get cond, true, false exprs for the nested qmark. GenTree* nestedQmark = falseExpr; GenTree* cond2Expr; GenTree* true2Expr; GenTree* false2Expr; if (nestedQmark->gtOper == GT_QMARK) { cond2Expr = nestedQmark->gtGetOp1(); true2Expr = nestedQmark->gtGetOp2()->AsColon()->ThenNode(); false2Expr = nestedQmark->gtGetOp2()->AsColon()->ElseNode(); } else { // This is a rare case that arises when we are doing minopts and encounter isinst of null // gtFoldExpr was still is able to optimize away part of the tree (but not all). // That means it does not match our pattern. // Rather than write code to handle this case, just fake up some nodes to make it match the common // case. Synthesize a comparison that is always true, and for the result-on-true, use the // entire subtree we expected to be the nested question op. cond2Expr = gtNewOperNode(GT_EQ, TYP_INT, gtNewIconNode(0, TYP_I_IMPL), gtNewIconNode(0, TYP_I_IMPL)); true2Expr = nestedQmark; false2Expr = gtNewIconNode(0, TYP_I_IMPL); } assert(false2Expr->OperGet() == trueExpr->OperGet()); // Create the chain of blocks. See method header comment. // The order of blocks after this is the following: // block ... asgBlock ... cond1Block ... cond2Block ... helperBlock ... remainderBlock // // We need to remember flags that exist on 'block' that we want to propagate to 'remainderBlock', // if they are going to be cleared by fgSplitBlockAfterStatement(). We currently only do this only // for the GC safe point bit, the logic being that if 'block' was marked gcsafe, then surely // remainderBlock will still be GC safe. BasicBlockFlags propagateFlags = block->bbFlags & BBF_GC_SAFE_POINT; BasicBlock* remainderBlock = fgSplitBlockAfterStatement(block, stmt); fgRemoveRefPred(remainderBlock, block); // We're going to put more blocks between block and remainderBlock. BasicBlock* helperBlock = fgNewBBafter(BBJ_NONE, block, true); BasicBlock* cond2Block = fgNewBBafter(BBJ_COND, block, true); BasicBlock* cond1Block = fgNewBBafter(BBJ_COND, block, true); BasicBlock* asgBlock = fgNewBBafter(BBJ_NONE, block, true); remainderBlock->bbFlags |= propagateFlags; // These blocks are only internal if 'block' is (but they've been set as internal by fgNewBBafter). // If they're not internal, mark them as imported to avoid asserts about un-imported blocks. if ((block->bbFlags & BBF_INTERNAL) == 0) { helperBlock->bbFlags &= ~BBF_INTERNAL; cond2Block->bbFlags &= ~BBF_INTERNAL; cond1Block->bbFlags &= ~BBF_INTERNAL; asgBlock->bbFlags &= ~BBF_INTERNAL; helperBlock->bbFlags |= BBF_IMPORTED; cond2Block->bbFlags |= BBF_IMPORTED; cond1Block->bbFlags |= BBF_IMPORTED; asgBlock->bbFlags |= BBF_IMPORTED; } // Chain the flow correctly. fgAddRefPred(asgBlock, block); fgAddRefPred(cond1Block, asgBlock); fgAddRefPred(cond2Block, cond1Block); fgAddRefPred(helperBlock, cond2Block); fgAddRefPred(remainderBlock, helperBlock); fgAddRefPred(remainderBlock, cond1Block); fgAddRefPred(remainderBlock, cond2Block); cond1Block->bbJumpDest = remainderBlock; cond2Block->bbJumpDest = remainderBlock; // Set the weights; some are guesses. asgBlock->inheritWeight(block); cond1Block->inheritWeight(block); cond2Block->inheritWeightPercentage(cond1Block, 50); helperBlock->inheritWeightPercentage(cond2Block, 50); // Append cond1 as JTRUE to cond1Block GenTree* jmpTree = gtNewOperNode(GT_JTRUE, TYP_VOID, condExpr); Statement* jmpStmt = fgNewStmtFromTree(jmpTree, stmt->GetDebugInfo()); fgInsertStmtAtEnd(cond1Block, jmpStmt); // Append cond2 as JTRUE to cond2Block jmpTree = gtNewOperNode(GT_JTRUE, TYP_VOID, cond2Expr); jmpStmt = fgNewStmtFromTree(jmpTree, stmt->GetDebugInfo()); fgInsertStmtAtEnd(cond2Block, jmpStmt); // AsgBlock should get tmp = op1 assignment. trueExpr = gtNewTempAssign(dst->AsLclVarCommon()->GetLclNum(), trueExpr); Statement* trueStmt = fgNewStmtFromTree(trueExpr, stmt->GetDebugInfo()); fgInsertStmtAtEnd(asgBlock, trueStmt); // Since we are adding helper in the JTRUE false path, reverse the cond2 and add the helper. gtReverseCond(cond2Expr); GenTree* helperExpr = gtNewTempAssign(dst->AsLclVarCommon()->GetLclNum(), true2Expr); Statement* helperStmt = fgNewStmtFromTree(helperExpr, stmt->GetDebugInfo()); fgInsertStmtAtEnd(helperBlock, helperStmt); // Finally remove the nested qmark stmt. fgRemoveStmt(block, stmt); if (true2Expr->OperIs(GT_CALL) && (true2Expr->AsCall()->gtCallMoreFlags & GTF_CALL_M_DOES_NOT_RETURN)) { fgConvertBBToThrowBB(helperBlock); } #ifdef DEBUG if (verbose) { printf("\nExpanding CastInstOf qmark in " FMT_BB " (after)\n", block->bbNum); fgDispBasicBlocks(block, remainderBlock, true); } #endif // DEBUG } /***************************************************************************** * * Expand a statement with a top level qmark node. There are three cases, based * on whether the qmark has both "true" and "false" arms, or just one of them. * * S0; * C ? T : F; * S1; * * Generates ===> * * bbj_always * +---->------+ * false | | * S0 -->-- ~C -->-- T F -->-- S1 * | | * +--->--------+ * bbj_cond(true) * * ----------------------------------------- * * S0; * C ? T : NOP; * S1; * * Generates ===> * * false * S0 -->-- ~C -->-- T -->-- S1 * | | * +-->-------------+ * bbj_cond(true) * * ----------------------------------------- * * S0; * C ? NOP : F; * S1; * * Generates ===> * * false * S0 -->-- C -->-- F -->-- S1 * | | * +-->------------+ * bbj_cond(true) * * If the qmark assigns to a variable, then create tmps for "then" * and "else" results and assign the temp to the variable as a writeback step. */ void Compiler::fgExpandQmarkStmt(BasicBlock* block, Statement* stmt) { GenTree* expr = stmt->GetRootNode(); // Retrieve the Qmark node to be expanded. GenTree* dst = nullptr; GenTree* qmark = fgGetTopLevelQmark(expr, &dst); if (qmark == nullptr) { return; } if (qmark->gtFlags & GTF_QMARK_CAST_INSTOF) { fgExpandQmarkForCastInstOf(block, stmt); return; } #ifdef DEBUG if (verbose) { printf("\nExpanding top-level qmark in " FMT_BB " (before)\n", block->bbNum); fgDispBasicBlocks(block, block, true); } #endif // DEBUG // Retrieve the operands. GenTree* condExpr = qmark->gtGetOp1(); GenTree* trueExpr = qmark->gtGetOp2()->AsColon()->ThenNode(); GenTree* falseExpr = qmark->gtGetOp2()->AsColon()->ElseNode(); assert(!varTypeIsFloating(condExpr->TypeGet())); bool hasTrueExpr = (trueExpr->OperGet() != GT_NOP); bool hasFalseExpr = (falseExpr->OperGet() != GT_NOP); assert(hasTrueExpr || hasFalseExpr); // We expect to have at least one arm of the qmark! // Create remainder, cond and "else" blocks. After this, the blocks are in this order: // block ... condBlock ... elseBlock ... remainderBlock // // We need to remember flags that exist on 'block' that we want to propagate to 'remainderBlock', // if they are going to be cleared by fgSplitBlockAfterStatement(). We currently only do this only // for the GC safe point bit, the logic being that if 'block' was marked gcsafe, then surely // remainderBlock will still be GC safe. BasicBlockFlags propagateFlags = block->bbFlags & BBF_GC_SAFE_POINT; BasicBlock* remainderBlock = fgSplitBlockAfterStatement(block, stmt); fgRemoveRefPred(remainderBlock, block); // We're going to put more blocks between block and remainderBlock. BasicBlock* condBlock = fgNewBBafter(BBJ_COND, block, true); BasicBlock* elseBlock = fgNewBBafter(BBJ_NONE, condBlock, true); // These blocks are only internal if 'block' is (but they've been set as internal by fgNewBBafter). // If they're not internal, mark them as imported to avoid asserts about un-imported blocks. if ((block->bbFlags & BBF_INTERNAL) == 0) { condBlock->bbFlags &= ~BBF_INTERNAL; elseBlock->bbFlags &= ~BBF_INTERNAL; condBlock->bbFlags |= BBF_IMPORTED; elseBlock->bbFlags |= BBF_IMPORTED; } remainderBlock->bbFlags |= propagateFlags; condBlock->inheritWeight(block); fgAddRefPred(condBlock, block); fgAddRefPred(elseBlock, condBlock); fgAddRefPred(remainderBlock, elseBlock); BasicBlock* thenBlock = nullptr; if (hasTrueExpr && hasFalseExpr) { // bbj_always // +---->------+ // false | | // S0 -->-- ~C -->-- T F -->-- S1 // | | // +--->--------+ // bbj_cond(true) // gtReverseCond(condExpr); condBlock->bbJumpDest = elseBlock; thenBlock = fgNewBBafter(BBJ_ALWAYS, condBlock, true); thenBlock->bbJumpDest = remainderBlock; if ((block->bbFlags & BBF_INTERNAL) == 0) { thenBlock->bbFlags &= ~BBF_INTERNAL; thenBlock->bbFlags |= BBF_IMPORTED; } fgAddRefPred(thenBlock, condBlock); fgAddRefPred(remainderBlock, thenBlock); thenBlock->inheritWeightPercentage(condBlock, 50); elseBlock->inheritWeightPercentage(condBlock, 50); } else if (hasTrueExpr) { // false // S0 -->-- ~C -->-- T -->-- S1 // | | // +-->-------------+ // bbj_cond(true) // gtReverseCond(condExpr); condBlock->bbJumpDest = remainderBlock; fgAddRefPred(remainderBlock, condBlock); // Since we have no false expr, use the one we'd already created. thenBlock = elseBlock; elseBlock = nullptr; thenBlock->inheritWeightPercentage(condBlock, 50); } else if (hasFalseExpr) { // false // S0 -->-- C -->-- F -->-- S1 // | | // +-->------------+ // bbj_cond(true) // condBlock->bbJumpDest = remainderBlock; fgAddRefPred(remainderBlock, condBlock); elseBlock->inheritWeightPercentage(condBlock, 50); } GenTree* jmpTree = gtNewOperNode(GT_JTRUE, TYP_VOID, qmark->gtGetOp1()); Statement* jmpStmt = fgNewStmtFromTree(jmpTree, stmt->GetDebugInfo()); fgInsertStmtAtEnd(condBlock, jmpStmt); // Remove the original qmark statement. fgRemoveStmt(block, stmt); // Since we have top level qmarks, we either have a dst for it in which case // we need to create tmps for true and falseExprs, else just don't bother // assigning. unsigned lclNum = BAD_VAR_NUM; if (dst != nullptr) { assert(dst->gtOper == GT_LCL_VAR); lclNum = dst->AsLclVar()->GetLclNum(); } else { assert(qmark->TypeGet() == TYP_VOID); } if (hasTrueExpr) { if (dst != nullptr) { trueExpr = gtNewTempAssign(lclNum, trueExpr); } Statement* trueStmt = fgNewStmtFromTree(trueExpr, stmt->GetDebugInfo()); fgInsertStmtAtEnd(thenBlock, trueStmt); } // Assign the falseExpr into the dst or tmp, insert in elseBlock if (hasFalseExpr) { if (dst != nullptr) { falseExpr = gtNewTempAssign(lclNum, falseExpr); } Statement* falseStmt = fgNewStmtFromTree(falseExpr, stmt->GetDebugInfo()); fgInsertStmtAtEnd(elseBlock, falseStmt); } #ifdef DEBUG if (verbose) { printf("\nExpanding top-level qmark in " FMT_BB " (after)\n", block->bbNum); fgDispBasicBlocks(block, remainderBlock, true); } #endif // DEBUG } /***************************************************************************** * * Expand GT_QMARK nodes from the flow graph into basic blocks. * */ void Compiler::fgExpandQmarkNodes() { if (compQmarkUsed) { for (BasicBlock* const block : Blocks()) { for (Statement* const stmt : block->Statements()) { GenTree* expr = stmt->GetRootNode(); #ifdef DEBUG fgPreExpandQmarkChecks(expr); #endif fgExpandQmarkStmt(block, stmt); } } #ifdef DEBUG fgPostExpandQmarkChecks(); #endif } compQmarkRationalized = true; } #ifdef DEBUG /***************************************************************************** * * Make sure we don't have any more GT_QMARK nodes. * */ void Compiler::fgPostExpandQmarkChecks() { for (BasicBlock* const block : Blocks()) { for (Statement* const stmt : block->Statements()) { GenTree* expr = stmt->GetRootNode(); fgWalkTreePre(&expr, Compiler::fgAssertNoQmark, nullptr); } } } #endif /***************************************************************************** * * Promoting struct locals */ void Compiler::fgPromoteStructs() { #ifdef DEBUG if (verbose) { printf("*************** In fgPromoteStructs()\n"); } #endif // DEBUG if (!opts.OptEnabled(CLFLG_STRUCTPROMOTE)) { JITDUMP(" promotion opt flag not enabled\n"); return; } if (fgNoStructPromotion) { JITDUMP(" promotion disabled by JitNoStructPromotion\n"); return; } #if 0 // The code in this #if has been useful in debugging struct promotion issues, by // enabling selective enablement of the struct promotion optimization according to // method hash. #ifdef DEBUG unsigned methHash = info.compMethodHash(); char* lostr = getenv("structpromohashlo"); unsigned methHashLo = 0; if (lostr != NULL) { sscanf_s(lostr, "%x", &methHashLo); } char* histr = getenv("structpromohashhi"); unsigned methHashHi = UINT32_MAX; if (histr != NULL) { sscanf_s(histr, "%x", &methHashHi); } if (methHash < methHashLo || methHash > methHashHi) { return; } else { printf("Promoting structs for method %s, hash = 0x%x.\n", info.compFullName, info.compMethodHash()); printf(""); // in our logic this causes a flush } #endif // DEBUG #endif // 0 if (info.compIsVarArgs) { JITDUMP(" promotion disabled because of varargs\n"); return; } #ifdef DEBUG if (verbose) { printf("\nlvaTable before fgPromoteStructs\n"); lvaTableDump(); } #endif // DEBUG // The lvaTable might grow as we grab temps. Make a local copy here. unsigned startLvaCount = lvaCount; // // Loop through the original lvaTable. Looking for struct locals to be promoted. // lvaStructPromotionInfo structPromotionInfo; bool tooManyLocalsReported = false; // Clear the structPromotionHelper, since it is used during inlining, at which point it // may be conservative about looking up SIMD info. // We don't want to preserve those conservative decisions for the actual struct promotion. structPromotionHelper->Clear(); for (unsigned lclNum = 0; lclNum < startLvaCount; lclNum++) { // Whether this var got promoted bool promotedVar = false; LclVarDsc* varDsc = lvaGetDesc(lclNum); // If we have marked this as lvUsedInSIMDIntrinsic, then we do not want to promote // its fields. Instead, we will attempt to enregister the entire struct. if (varDsc->lvIsSIMDType() && (varDsc->lvIsUsedInSIMDIntrinsic() || isOpaqueSIMDLclVar(varDsc))) { varDsc->lvRegStruct = true; } // Don't promote if we have reached the tracking limit. else if (lvaHaveManyLocals()) { // Print the message first time when we detected this condition if (!tooManyLocalsReported) { JITDUMP("Stopped promoting struct fields, due to too many locals.\n"); } tooManyLocalsReported = true; } else if (varTypeIsStruct(varDsc)) { assert(structPromotionHelper != nullptr); promotedVar = structPromotionHelper->TryPromoteStructVar(lclNum); } if (!promotedVar && varDsc->lvIsSIMDType() && !varDsc->lvFieldAccessed) { // Even if we have not used this in a SIMD intrinsic, if it is not being promoted, // we will treat it as a reg struct. varDsc->lvRegStruct = true; } } #ifdef DEBUG if (verbose) { printf("\nlvaTable after fgPromoteStructs\n"); lvaTableDump(); } #endif // DEBUG } void Compiler::fgMorphStructField(GenTree* tree, GenTree* parent) { noway_assert(tree->OperGet() == GT_FIELD); GenTreeField* field = tree->AsField(); GenTree* objRef = field->GetFldObj(); GenTree* obj = ((objRef != nullptr) && (objRef->gtOper == GT_ADDR)) ? objRef->AsOp()->gtOp1 : nullptr; noway_assert((tree->gtFlags & GTF_GLOB_REF) || ((obj != nullptr) && (obj->gtOper == GT_LCL_VAR))); /* Is this an instance data member? */ if ((obj != nullptr) && (obj->gtOper == GT_LCL_VAR)) { unsigned lclNum = obj->AsLclVarCommon()->GetLclNum(); const LclVarDsc* varDsc = lvaGetDesc(lclNum); if (varTypeIsStruct(obj)) { if (varDsc->lvPromoted) { // Promoted struct unsigned fldOffset = field->gtFldOffset; unsigned fieldLclIndex = lvaGetFieldLocal(varDsc, fldOffset); if (fieldLclIndex == BAD_VAR_NUM) { // Access a promoted struct's field with an offset that doesn't correspond to any field. // It can happen if the struct was cast to another struct with different offsets. return; } const LclVarDsc* fieldDsc = lvaGetDesc(fieldLclIndex); var_types fieldType = fieldDsc->TypeGet(); assert(fieldType != TYP_STRUCT); // promoted LCL_VAR can't have a struct type. if (tree->TypeGet() != fieldType) { if (tree->TypeGet() != TYP_STRUCT) { // This is going to be an incorrect instruction promotion. // For example when we try to read int as long. return; } if (field->gtFldHnd != fieldDsc->lvFieldHnd) { CORINFO_CLASS_HANDLE fieldTreeClass = nullptr, fieldDscClass = nullptr; CorInfoType fieldTreeType = info.compCompHnd->getFieldType(field->gtFldHnd, &fieldTreeClass); CorInfoType fieldDscType = info.compCompHnd->getFieldType(fieldDsc->lvFieldHnd, &fieldDscClass); if (fieldTreeType != fieldDscType || fieldTreeClass != fieldDscClass) { // Access the promoted field with a different class handle, can't check that types match. return; } // Access the promoted field as a field of a non-promoted struct with the same class handle. } else { // As we already checked this above, we must have a tree with a TYP_STRUCT type // assert(tree->TypeGet() == TYP_STRUCT); // The field tree accesses it as a struct, but the promoted LCL_VAR field // says that it has another type. This happens when struct promotion unwraps // a single field struct to get to its ultimate type. // // Note that currently, we cannot have a promoted LCL_VAR field with a struct type. // // This mismatch in types can lead to problems for some parent node type like GT_RETURN. // So we check the parent node and only allow this optimization when we have // a GT_ADDR or a GT_ASG. // // Note that for a GT_ASG we have to do some additional work, // see below after the SetOper(GT_LCL_VAR) // if (!parent->OperIs(GT_ADDR, GT_ASG)) { // Don't transform other operations such as GT_RETURN // return; } #ifdef DEBUG // This is an additional DEBUG-only sanity check // assert(structPromotionHelper != nullptr); structPromotionHelper->CheckRetypedAsScalar(field->gtFldHnd, fieldType); #endif // DEBUG } } tree->SetOper(GT_LCL_VAR); tree->AsLclVarCommon()->SetLclNum(fieldLclIndex); tree->gtType = fieldType; tree->gtFlags &= GTF_NODE_MASK; // Note: that clears all flags except `GTF_COLON_COND`. if (parent->gtOper == GT_ASG) { // If we are changing the left side of an assignment, we need to set // these two flags: // if (parent->AsOp()->gtOp1 == tree) { tree->gtFlags |= GTF_VAR_DEF; tree->gtFlags |= GTF_DONT_CSE; } // Promotion of struct containing struct fields where the field // is a struct with a single pointer sized scalar type field: in // this case struct promotion uses the type of the underlying // scalar field as the type of struct field instead of recursively // promoting. This can lead to a case where we have a block-asgn // with its RHS replaced with a scalar type. Mark RHS value as // DONT_CSE so that assertion prop will not do const propagation. // The reason this is required is that if RHS of a block-asg is a // constant, then it is interpreted as init-block incorrectly. // // TODO - This can also be avoided if we implement recursive struct // promotion, tracked by #10019. if (varTypeIsStruct(parent) && parent->AsOp()->gtOp2 == tree && !varTypeIsStruct(tree)) { tree->gtFlags |= GTF_DONT_CSE; } } #ifdef DEBUG if (verbose) { printf("Replacing the field in promoted struct with local var V%02u\n", fieldLclIndex); } #endif // DEBUG } } else { // Normed struct // A "normed struct" is a struct that the VM tells us is a basic type. This can only happen if // the struct contains a single element, and that element is 4 bytes (on x64 it can also be 8 // bytes). Normally, the type of the local var and the type of GT_FIELD are equivalent. However, // there is one extremely rare case where that won't be true. An enum type is a special value type // that contains exactly one element of a primitive integer type (that, for CLS programs is named // "value__"). The VM tells us that a local var of that enum type is the primitive type of the // enum's single field. It turns out that it is legal for IL to access this field using ldflda or // ldfld. For example: // // .class public auto ansi sealed mynamespace.e_t extends [mscorlib]System.Enum // { // .field public specialname rtspecialname int16 value__ // .field public static literal valuetype mynamespace.e_t one = int16(0x0000) // } // .method public hidebysig static void Main() cil managed // { // .locals init (valuetype mynamespace.e_t V_0) // ... // ldloca.s V_0 // ldflda int16 mynamespace.e_t::value__ // ... // } // // Normally, compilers will not generate the ldflda, since it is superfluous. // // In the example, the lclVar is short, but the JIT promotes all trees using this local to the // "actual type", that is, INT. But the GT_FIELD is still SHORT. So, in the case of a type // mismatch like this, don't do this morphing. The local var may end up getting marked as // address taken, and the appropriate SHORT load will be done from memory in that case. if (tree->TypeGet() == obj->TypeGet()) { tree->ChangeOper(GT_LCL_VAR); tree->AsLclVarCommon()->SetLclNum(lclNum); tree->gtFlags &= GTF_NODE_MASK; if ((parent->gtOper == GT_ASG) && (parent->AsOp()->gtOp1 == tree)) { tree->gtFlags |= GTF_VAR_DEF; tree->gtFlags |= GTF_DONT_CSE; } #ifdef DEBUG if (verbose) { printf("Replacing the field in normed struct with local var V%02u\n", lclNum); } #endif // DEBUG } } } } void Compiler::fgMorphLocalField(GenTree* tree, GenTree* parent) { noway_assert(tree->OperGet() == GT_LCL_FLD); unsigned lclNum = tree->AsLclFld()->GetLclNum(); LclVarDsc* varDsc = lvaGetDesc(lclNum); if (varTypeIsStruct(varDsc)) { if (varDsc->lvPromoted) { // Promoted struct unsigned fldOffset = tree->AsLclFld()->GetLclOffs(); unsigned fieldLclIndex = 0; LclVarDsc* fldVarDsc = nullptr; if (fldOffset != BAD_VAR_NUM) { fieldLclIndex = lvaGetFieldLocal(varDsc, fldOffset); noway_assert(fieldLclIndex != BAD_VAR_NUM); fldVarDsc = lvaGetDesc(fieldLclIndex); } var_types treeType = tree->TypeGet(); var_types fieldType = fldVarDsc->TypeGet(); if (fldOffset != BAD_VAR_NUM && ((genTypeSize(fieldType) == genTypeSize(treeType)) || (varDsc->lvFieldCnt == 1))) { // There is an existing sub-field we can use. tree->AsLclFld()->SetLclNum(fieldLclIndex); // The field must be an enregisterable type; otherwise it would not be a promoted field. // The tree type may not match, e.g. for return types that have been morphed, but both // must be enregisterable types. assert(varTypeIsEnregisterable(treeType) && varTypeIsEnregisterable(fieldType)); tree->ChangeOper(GT_LCL_VAR); assert(tree->AsLclVarCommon()->GetLclNum() == fieldLclIndex); tree->gtType = fldVarDsc->TypeGet(); if ((parent->gtOper == GT_ASG) && (parent->AsOp()->gtOp1 == tree)) { tree->gtFlags |= GTF_VAR_DEF; tree->gtFlags |= GTF_DONT_CSE; } JITDUMP("Replacing the GT_LCL_FLD in promoted struct with local var V%02u\n", fieldLclIndex); } else { // There is no existing field that has all the parts that we need // So we must ensure that the struct lives in memory. lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::LocalField)); #ifdef DEBUG // We can't convert this guy to a float because he really does have his // address taken.. varDsc->lvKeepType = 1; #endif // DEBUG } } else if (varTypeIsSIMD(varDsc) && (genTypeSize(tree->TypeGet()) == genTypeSize(varDsc))) { assert(tree->AsLclFld()->GetLclOffs() == 0); tree->gtType = varDsc->TypeGet(); tree->ChangeOper(GT_LCL_VAR); JITDUMP("Replacing GT_LCL_FLD of struct with local var V%02u\n", lclNum); } } } //------------------------------------------------------------------------ // fgResetImplicitByRefRefCount: Clear the ref count field of all implicit byrefs void Compiler::fgResetImplicitByRefRefCount() { #if (defined(TARGET_AMD64) && !defined(UNIX_AMD64_ABI)) || defined(TARGET_ARM64) #ifdef DEBUG if (verbose) { printf("\n*************** In fgResetImplicitByRefRefCount()\n"); } #endif // DEBUG for (unsigned lclNum = 0; lclNum < info.compArgsCount; ++lclNum) { LclVarDsc* varDsc = lvaGetDesc(lclNum); if (varDsc->lvIsImplicitByRef) { // Clear the ref count field; fgMarkAddressTakenLocals will increment it per // appearance of implicit-by-ref param so that call arg morphing can do an // optimization for single-use implicit-by-ref params whose single use is as // an outgoing call argument. varDsc->setLvRefCnt(0, RCS_EARLY); varDsc->setLvRefCntWtd(0, RCS_EARLY); } } #endif // (TARGET_AMD64 && !UNIX_AMD64_ABI) || TARGET_ARM64 } //------------------------------------------------------------------------ // fgRetypeImplicitByRefArgs: Update the types on implicit byref parameters' `LclVarDsc`s (from // struct to pointer). Also choose (based on address-exposed analysis) // which struct promotions of implicit byrefs to keep or discard. // For those which are kept, insert the appropriate initialization code. // For those which are to be discarded, annotate the promoted field locals // so that fgMorphImplicitByRefArgs will know to rewrite their appearances // using indirections off the pointer parameters. void Compiler::fgRetypeImplicitByRefArgs() { #if (defined(TARGET_AMD64) && !defined(UNIX_AMD64_ABI)) || defined(TARGET_ARM64) #ifdef DEBUG if (verbose) { printf("\n*************** In fgRetypeImplicitByRefArgs()\n"); } #endif // DEBUG for (unsigned lclNum = 0; lclNum < info.compArgsCount; lclNum++) { LclVarDsc* varDsc = lvaGetDesc(lclNum); if (lvaIsImplicitByRefLocal(lclNum)) { unsigned size; if (varDsc->lvSize() > REGSIZE_BYTES) { size = varDsc->lvSize(); } else { CORINFO_CLASS_HANDLE typeHnd = varDsc->GetStructHnd(); size = info.compCompHnd->getClassSize(typeHnd); } if (varDsc->lvPromoted) { // This implicit-by-ref was promoted; create a new temp to represent the // promoted struct before rewriting this parameter as a pointer. unsigned newLclNum = lvaGrabTemp(false DEBUGARG("Promoted implicit byref")); lvaSetStruct(newLclNum, lvaGetStruct(lclNum), true); if (info.compIsVarArgs) { lvaSetStructUsedAsVarArg(newLclNum); } // Update varDsc since lvaGrabTemp might have re-allocated the var dsc array. varDsc = lvaGetDesc(lclNum); // Copy the struct promotion annotations to the new temp. LclVarDsc* newVarDsc = lvaGetDesc(newLclNum); newVarDsc->lvPromoted = true; newVarDsc->lvFieldLclStart = varDsc->lvFieldLclStart; newVarDsc->lvFieldCnt = varDsc->lvFieldCnt; newVarDsc->lvContainsHoles = varDsc->lvContainsHoles; newVarDsc->lvCustomLayout = varDsc->lvCustomLayout; #ifdef DEBUG newVarDsc->lvKeepType = true; #endif // DEBUG // Propagate address-taken-ness and do-not-enregister-ness. newVarDsc->SetAddressExposed(varDsc->IsAddressExposed() DEBUGARG(varDsc->GetAddrExposedReason())); newVarDsc->lvDoNotEnregister = varDsc->lvDoNotEnregister; newVarDsc->lvLiveInOutOfHndlr = varDsc->lvLiveInOutOfHndlr; newVarDsc->lvSingleDef = varDsc->lvSingleDef; newVarDsc->lvSingleDefRegCandidate = varDsc->lvSingleDefRegCandidate; newVarDsc->lvSpillAtSingleDef = varDsc->lvSpillAtSingleDef; #ifdef DEBUG newVarDsc->SetDoNotEnregReason(varDsc->GetDoNotEnregReason()); #endif // DEBUG // If the promotion is dependent, the promoted temp would just be committed // to memory anyway, so we'll rewrite its appearances to be indirections // through the pointer parameter, the same as we'd do for this // parameter if it weren't promoted at all (otherwise the initialization // of the new temp would just be a needless memcpy at method entry). // // Otherwise, see how many appearances there are. We keep two early ref counts: total // number of references to the struct or some field, and how many of these are // arguments to calls. We undo promotion unless we see enough non-call uses. // const unsigned totalAppearances = varDsc->lvRefCnt(RCS_EARLY); const unsigned callAppearances = (unsigned)varDsc->lvRefCntWtd(RCS_EARLY); assert(totalAppearances >= callAppearances); const unsigned nonCallAppearances = totalAppearances - callAppearances; bool undoPromotion = ((lvaGetPromotionType(newVarDsc) == PROMOTION_TYPE_DEPENDENT) || (nonCallAppearances <= varDsc->lvFieldCnt)); #ifdef DEBUG // Above is a profitability heurisic; either value of // undoPromotion should lead to correct code. So, // under stress, make different decisions at times. if (compStressCompile(STRESS_BYREF_PROMOTION, 25)) { undoPromotion = !undoPromotion; JITDUMP("Stress -- changing byref undo promotion for V%02u to %s undo\n", lclNum, undoPromotion ? "" : "NOT"); } #endif // DEBUG JITDUMP("%s promotion of implicit by-ref V%02u: %s total: %u non-call: %u fields: %u\n", undoPromotion ? "Undoing" : "Keeping", lclNum, (lvaGetPromotionType(newVarDsc) == PROMOTION_TYPE_DEPENDENT) ? "dependent;" : "", totalAppearances, nonCallAppearances, varDsc->lvFieldCnt); if (!undoPromotion) { // Insert IR that initializes the temp from the parameter. // LHS is a simple reference to the temp. fgEnsureFirstBBisScratch(); GenTree* lhs = gtNewLclvNode(newLclNum, varDsc->lvType); // RHS is an indirection (using GT_OBJ) off the parameter. GenTree* addr = gtNewLclvNode(lclNum, TYP_BYREF); GenTree* rhs = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, addr, typGetBlkLayout(size)); GenTree* assign = gtNewAssignNode(lhs, rhs); fgNewStmtAtBeg(fgFirstBB, assign); } // Update the locals corresponding to the promoted fields. unsigned fieldLclStart = varDsc->lvFieldLclStart; unsigned fieldCount = varDsc->lvFieldCnt; unsigned fieldLclStop = fieldLclStart + fieldCount; for (unsigned fieldLclNum = fieldLclStart; fieldLclNum < fieldLclStop; ++fieldLclNum) { LclVarDsc* fieldVarDsc = lvaGetDesc(fieldLclNum); if (undoPromotion) { // Leave lvParentLcl pointing to the parameter so that fgMorphImplicitByRefArgs // will know to rewrite appearances of this local. assert(fieldVarDsc->lvParentLcl == lclNum); } else { // Set the new parent. fieldVarDsc->lvParentLcl = newLclNum; } fieldVarDsc->lvIsParam = false; // The fields shouldn't inherit any register preferences from // the parameter which is really a pointer to the struct. fieldVarDsc->lvIsRegArg = false; fieldVarDsc->lvIsMultiRegArg = false; fieldVarDsc->SetArgReg(REG_NA); #if FEATURE_MULTIREG_ARGS fieldVarDsc->SetOtherArgReg(REG_NA); #endif } // Hijack lvFieldLclStart to record the new temp number. // It will get fixed up in fgMarkDemotedImplicitByRefArgs. varDsc->lvFieldLclStart = newLclNum; // Go ahead and clear lvFieldCnt -- either we're promoting // a replacement temp or we're not promoting this arg, and // in either case the parameter is now a pointer that doesn't // have these fields. varDsc->lvFieldCnt = 0; // Hijack lvPromoted to communicate to fgMorphImplicitByRefArgs // whether references to the struct should be rewritten as // indirections off the pointer (not promoted) or references // to the new struct local (promoted). varDsc->lvPromoted = !undoPromotion; } else { // The "undo promotion" path above clears lvPromoted for args that struct // promotion wanted to promote but that aren't considered profitable to // rewrite. It hijacks lvFieldLclStart to communicate to // fgMarkDemotedImplicitByRefArgs that it needs to clean up annotations left // on such args for fgMorphImplicitByRefArgs to consult in the interim. // Here we have an arg that was simply never promoted, so make sure it doesn't // have nonzero lvFieldLclStart, since that would confuse fgMorphImplicitByRefArgs // and fgMarkDemotedImplicitByRefArgs. assert(varDsc->lvFieldLclStart == 0); } // Since the parameter in this position is really a pointer, its type is TYP_BYREF. varDsc->lvType = TYP_BYREF; // Since this previously was a TYP_STRUCT and we have changed it to a TYP_BYREF // make sure that the following flag is not set as these will force SSA to // exclude tracking/enregistering these LclVars. (see SsaBuilder::IncludeInSsa) // varDsc->lvOverlappingFields = 0; // This flag could have been set, clear it. // The struct parameter may have had its address taken, but the pointer parameter // cannot -- any uses of the struct parameter's address are uses of the pointer // parameter's value, and there's no way for the MSIL to reference the pointer // parameter's address. So clear the address-taken bit for the parameter. varDsc->CleanAddressExposed(); varDsc->lvDoNotEnregister = 0; #ifdef DEBUG // This should not be converted to a double in stress mode, // because it is really a pointer varDsc->lvKeepType = 1; if (verbose) { printf("Changing the lvType for struct parameter V%02d to TYP_BYREF.\n", lclNum); } #endif // DEBUG } } #endif // (TARGET_AMD64 && !UNIX_AMD64_ABI) || TARGET_ARM64 } //------------------------------------------------------------------------ // fgMarkDemotedImplicitByRefArgs: Clear annotations for any implicit byrefs that struct promotion // asked to promote. Appearances of these have now been rewritten // (by fgMorphImplicitByRefArgs) using indirections from the pointer // parameter or references to the promotion temp, as appropriate. void Compiler::fgMarkDemotedImplicitByRefArgs() { JITDUMP("\n*************** In fgMarkDemotedImplicitByRefArgs()\n"); #if (defined(TARGET_AMD64) && !defined(UNIX_AMD64_ABI)) || defined(TARGET_ARM64) for (unsigned lclNum = 0; lclNum < info.compArgsCount; lclNum++) { LclVarDsc* varDsc = lvaGetDesc(lclNum); if (lvaIsImplicitByRefLocal(lclNum)) { JITDUMP("Clearing annotation for V%02d\n", lclNum); if (varDsc->lvPromoted) { // The parameter is simply a pointer now, so clear lvPromoted. It was left set // by fgRetypeImplicitByRefArgs to communicate to fgMorphImplicitByRefArgs that // appearances of this arg needed to be rewritten to a new promoted struct local. varDsc->lvPromoted = false; // Clear the lvFieldLclStart value that was set by fgRetypeImplicitByRefArgs // to tell fgMorphImplicitByRefArgs which local is the new promoted struct one. varDsc->lvFieldLclStart = 0; } else if (varDsc->lvFieldLclStart != 0) { // We created new temps to represent a promoted struct corresponding to this // parameter, but decided not to go through with the promotion and have // rewritten all uses as indirections off the pointer parameter. // We stashed the pointer to the new struct temp in lvFieldLclStart; make // note of that and clear the annotation. unsigned structLclNum = varDsc->lvFieldLclStart; varDsc->lvFieldLclStart = 0; // The temp struct is now unused; set flags appropriately so that we // won't allocate space for it on the stack. LclVarDsc* structVarDsc = lvaGetDesc(structLclNum); structVarDsc->CleanAddressExposed(); #ifdef DEBUG structVarDsc->lvUnusedStruct = true; structVarDsc->lvUndoneStructPromotion = true; #endif // DEBUG unsigned fieldLclStart = structVarDsc->lvFieldLclStart; unsigned fieldCount = structVarDsc->lvFieldCnt; unsigned fieldLclStop = fieldLclStart + fieldCount; for (unsigned fieldLclNum = fieldLclStart; fieldLclNum < fieldLclStop; ++fieldLclNum) { JITDUMP("Fixing pointer for field V%02d from V%02d to V%02d\n", fieldLclNum, lclNum, structLclNum); // Fix the pointer to the parent local. LclVarDsc* fieldVarDsc = lvaGetDesc(fieldLclNum); assert(fieldVarDsc->lvParentLcl == lclNum); fieldVarDsc->lvParentLcl = structLclNum; // The field local is now unused; set flags appropriately so that // we won't allocate stack space for it. fieldVarDsc->CleanAddressExposed(); } } } } #endif // (TARGET_AMD64 && !UNIX_AMD64_ABI) || TARGET_ARM64 } /***************************************************************************** * * Morph irregular parameters * for x64 and ARM64 this means turning them into byrefs, adding extra indirs. */ bool Compiler::fgMorphImplicitByRefArgs(GenTree* tree) { #if (!defined(TARGET_AMD64) || defined(UNIX_AMD64_ABI)) && !defined(TARGET_ARM64) return false; #else // (TARGET_AMD64 && !UNIX_AMD64_ABI) || TARGET_ARM64 bool changed = false; // Implicit byref morphing needs to know if the reference to the parameter is a // child of GT_ADDR or not, so this method looks one level down and does the // rewrite whenever a child is a reference to an implicit byref parameter. if (tree->gtOper == GT_ADDR) { if (tree->AsOp()->gtOp1->gtOper == GT_LCL_VAR) { GenTree* morphedTree = fgMorphImplicitByRefArgs(tree, true); changed = (morphedTree != nullptr); assert(!changed || (morphedTree == tree)); } } else { for (GenTree** pTree : tree->UseEdges()) { GenTree** pTreeCopy = pTree; GenTree* childTree = *pTree; if (childTree->gtOper == GT_LCL_VAR) { GenTree* newChildTree = fgMorphImplicitByRefArgs(childTree, false); if (newChildTree != nullptr) { changed = true; *pTreeCopy = newChildTree; } } } } return changed; #endif // (TARGET_AMD64 && !UNIX_AMD64_ABI) || TARGET_ARM64 } GenTree* Compiler::fgMorphImplicitByRefArgs(GenTree* tree, bool isAddr) { assert((tree->gtOper == GT_LCL_VAR) || ((tree->gtOper == GT_ADDR) && (tree->AsOp()->gtOp1->gtOper == GT_LCL_VAR))); assert(isAddr == (tree->gtOper == GT_ADDR)); GenTree* lclVarTree = isAddr ? tree->AsOp()->gtOp1 : tree; unsigned lclNum = lclVarTree->AsLclVarCommon()->GetLclNum(); LclVarDsc* lclVarDsc = lvaGetDesc(lclNum); CORINFO_FIELD_HANDLE fieldHnd; unsigned fieldOffset = 0; var_types fieldRefType = TYP_UNKNOWN; if (lvaIsImplicitByRefLocal(lclNum)) { // The SIMD transformation to coalesce contiguous references to SIMD vector fields will // re-invoke the traversal to mark address-taken locals. // So, we may encounter a tree that has already been transformed to TYP_BYREF. // If we do, leave it as-is. if (!varTypeIsStruct(lclVarTree)) { assert(lclVarTree->TypeGet() == TYP_BYREF); return nullptr; } else if (lclVarDsc->lvPromoted) { // fgRetypeImplicitByRefArgs created a new promoted struct local to represent this // arg. Rewrite this to refer to the new local. assert(lclVarDsc->lvFieldLclStart != 0); lclVarTree->AsLclVarCommon()->SetLclNum(lclVarDsc->lvFieldLclStart); return tree; } fieldHnd = nullptr; } else if (lclVarDsc->lvIsStructField && lvaIsImplicitByRefLocal(lclVarDsc->lvParentLcl)) { // This was a field reference to an implicit-by-reference struct parameter that was // dependently promoted; update it to a field reference off the pointer. // Grab the field handle from the struct field lclVar. fieldHnd = lclVarDsc->lvFieldHnd; fieldOffset = lclVarDsc->lvFldOffset; assert(fieldHnd != nullptr); // Update lclNum/lclVarDsc to refer to the parameter lclNum = lclVarDsc->lvParentLcl; lclVarDsc = lvaGetDesc(lclNum); fieldRefType = lclVarTree->TypeGet(); } else { // We only need to tranform the 'marked' implicit by ref parameters return nullptr; } // This is no longer a def of the lclVar, even if it WAS a def of the struct. lclVarTree->gtFlags &= ~(GTF_LIVENESS_MASK); if (isAddr) { if (fieldHnd == nullptr) { // change &X into just plain X tree->ReplaceWith(lclVarTree, this); tree->gtType = TYP_BYREF; } else { // change &(X.f) [i.e. GT_ADDR of local for promoted arg field] // into &(X, f) [i.e. GT_ADDR of GT_FIELD off ptr param] lclVarTree->AsLclVarCommon()->SetLclNum(lclNum); lclVarTree->gtType = TYP_BYREF; tree->AsOp()->gtOp1 = gtNewFieldRef(fieldRefType, fieldHnd, lclVarTree, fieldOffset); } #ifdef DEBUG if (verbose) { printf("Replacing address of implicit by ref struct parameter with byref:\n"); } #endif // DEBUG } else { // Change X into OBJ(X) or FIELD(X, f) var_types structType = tree->gtType; tree->gtType = TYP_BYREF; if (fieldHnd) { tree->AsLclVarCommon()->SetLclNum(lclNum); tree = gtNewFieldRef(fieldRefType, fieldHnd, tree, fieldOffset); } else { tree = gtNewObjNode(lclVarDsc->GetStructHnd(), tree); if (structType == TYP_STRUCT) { gtSetObjGcInfo(tree->AsObj()); } } // TODO-CQ: If the VM ever stops violating the ABI and passing heap references // we could remove TGTANYWHERE tree->gtFlags = ((tree->gtFlags & GTF_COMMON_MASK) | GTF_IND_TGTANYWHERE); #ifdef DEBUG if (verbose) { printf("Replacing value of implicit by ref struct parameter with indir of parameter:\n"); } #endif // DEBUG } #ifdef DEBUG if (verbose) { gtDispTree(tree); } #endif // DEBUG return tree; } //------------------------------------------------------------------------ // fgAddFieldSeqForZeroOffset: // Associate a fieldSeq (with a zero offset) with the GenTree node 'addr' // // Arguments: // addr - A GenTree node // fieldSeqZero - a fieldSeq (with a zero offset) // // Notes: // Some GenTree nodes have internal fields that record the field sequence. // If we have one of these nodes: GT_CNS_INT, GT_LCL_FLD // we can append the field sequence using the gtFieldSeq // If we have a GT_ADD of a GT_CNS_INT we can use the // fieldSeq from child node. // Otherwise we record 'fieldSeqZero' in the GenTree node using // a Map: GetFieldSeqStore() // When doing so we take care to preserve any existing zero field sequence // void Compiler::fgAddFieldSeqForZeroOffset(GenTree* addr, FieldSeqNode* fieldSeqZero) { // We expect 'addr' to be an address at this point. assert(addr->TypeGet() == TYP_BYREF || addr->TypeGet() == TYP_I_IMPL || addr->TypeGet() == TYP_REF); // Tunnel through any commas. const bool commaOnly = true; addr = addr->gtEffectiveVal(commaOnly); // We still expect 'addr' to be an address at this point. assert(addr->TypeGet() == TYP_BYREF || addr->TypeGet() == TYP_I_IMPL || addr->TypeGet() == TYP_REF); FieldSeqNode* fieldSeqUpdate = fieldSeqZero; GenTree* fieldSeqNode = addr; bool fieldSeqRecorded = false; #ifdef DEBUG if (verbose) { printf("\nfgAddFieldSeqForZeroOffset for"); gtDispAnyFieldSeq(fieldSeqZero); printf("\naddr (Before)\n"); gtDispNode(addr, nullptr, nullptr, false); gtDispCommonEndLine(addr); } #endif // DEBUG switch (addr->OperGet()) { case GT_CNS_INT: fieldSeqUpdate = GetFieldSeqStore()->Append(addr->AsIntCon()->gtFieldSeq, fieldSeqZero); addr->AsIntCon()->gtFieldSeq = fieldSeqUpdate; fieldSeqRecorded = true; break; case GT_ADDR: if (addr->AsOp()->gtOp1->OperGet() == GT_LCL_FLD) { fieldSeqNode = addr->AsOp()->gtOp1; GenTreeLclFld* lclFld = addr->AsOp()->gtOp1->AsLclFld(); fieldSeqUpdate = GetFieldSeqStore()->Append(lclFld->GetFieldSeq(), fieldSeqZero); lclFld->SetFieldSeq(fieldSeqUpdate); fieldSeqRecorded = true; } break; case GT_ADD: if (addr->AsOp()->gtOp1->OperGet() == GT_CNS_INT) { fieldSeqNode = addr->AsOp()->gtOp1; fieldSeqUpdate = GetFieldSeqStore()->Append(addr->AsOp()->gtOp1->AsIntCon()->gtFieldSeq, fieldSeqZero); addr->AsOp()->gtOp1->AsIntCon()->gtFieldSeq = fieldSeqUpdate; fieldSeqRecorded = true; } else if (addr->AsOp()->gtOp2->OperGet() == GT_CNS_INT) { fieldSeqNode = addr->AsOp()->gtOp2; fieldSeqUpdate = GetFieldSeqStore()->Append(addr->AsOp()->gtOp2->AsIntCon()->gtFieldSeq, fieldSeqZero); addr->AsOp()->gtOp2->AsIntCon()->gtFieldSeq = fieldSeqUpdate; fieldSeqRecorded = true; } break; default: break; } if (fieldSeqRecorded == false) { // Record in the general zero-offset map. // The "addr" node might already be annotated with a zero-offset field sequence. FieldSeqNode* existingFieldSeq = nullptr; if (GetZeroOffsetFieldMap()->Lookup(addr, &existingFieldSeq)) { // Append the zero field sequences fieldSeqUpdate = GetFieldSeqStore()->Append(existingFieldSeq, fieldSeqZero); } // Overwrite the field sequence annotation for op1 GetZeroOffsetFieldMap()->Set(addr, fieldSeqUpdate, NodeToFieldSeqMap::Overwrite); fieldSeqRecorded = true; } #ifdef DEBUG if (verbose) { printf(" (After)\n"); gtDispNode(fieldSeqNode, nullptr, nullptr, false); gtDispCommonEndLine(fieldSeqNode); } #endif // DEBUG } #ifdef FEATURE_SIMD //----------------------------------------------------------------------------------- // fgMorphCombineSIMDFieldAssignments: // If the RHS of the input stmt is a read for simd vector X Field, then this function // will keep reading next few stmts based on the vector size(2, 3, 4). // If the next stmts LHS are located contiguous and RHS are also located // contiguous, then we replace those statements with a copyblk. // // Argument: // block - BasicBlock*. block which stmt belongs to // stmt - Statement*. the stmt node we want to check // // return value: // if this funciton successfully optimized the stmts, then return true. Otherwise // return false; bool Compiler::fgMorphCombineSIMDFieldAssignments(BasicBlock* block, Statement* stmt) { GenTree* tree = stmt->GetRootNode(); assert(tree->OperGet() == GT_ASG); GenTree* originalLHS = tree->AsOp()->gtOp1; GenTree* prevLHS = tree->AsOp()->gtOp1; GenTree* prevRHS = tree->AsOp()->gtOp2; unsigned index = 0; CorInfoType simdBaseJitType = CORINFO_TYPE_UNDEF; unsigned simdSize = 0; GenTree* simdStructNode = getSIMDStructFromField(prevRHS, &simdBaseJitType, &index, &simdSize, true); if (simdStructNode == nullptr || index != 0 || simdBaseJitType != CORINFO_TYPE_FLOAT) { // if the RHS is not from a SIMD vector field X, then there is no need to check further. return false; } var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType); var_types simdType = getSIMDTypeForSize(simdSize); int assignmentsCount = simdSize / genTypeSize(simdBaseType) - 1; int remainingAssignments = assignmentsCount; Statement* curStmt = stmt->GetNextStmt(); Statement* lastStmt = stmt; while (curStmt != nullptr && remainingAssignments > 0) { GenTree* exp = curStmt->GetRootNode(); if (exp->OperGet() != GT_ASG) { break; } GenTree* curLHS = exp->gtGetOp1(); GenTree* curRHS = exp->gtGetOp2(); if (!areArgumentsContiguous(prevLHS, curLHS) || !areArgumentsContiguous(prevRHS, curRHS)) { break; } remainingAssignments--; prevLHS = curLHS; prevRHS = curRHS; lastStmt = curStmt; curStmt = curStmt->GetNextStmt(); } if (remainingAssignments > 0) { // if the left assignments number is bigger than zero, then this means // that the assignments are not assgining to the contiguously memory // locations from same vector. return false; } #ifdef DEBUG if (verbose) { printf("\nFound contiguous assignments from a SIMD vector to memory.\n"); printf("From " FMT_BB ", stmt ", block->bbNum); printStmtID(stmt); printf(" to stmt"); printStmtID(lastStmt); printf("\n"); } #endif for (int i = 0; i < assignmentsCount; i++) { fgRemoveStmt(block, stmt->GetNextStmt()); } GenTree* dstNode; if (originalLHS->OperIs(GT_LCL_FLD)) { dstNode = originalLHS; dstNode->gtType = simdType; dstNode->AsLclFld()->SetFieldSeq(FieldSeqStore::NotAField()); // This may have changed a partial local field into full local field if (dstNode->IsPartialLclFld(this)) { dstNode->gtFlags |= GTF_VAR_USEASG; } else { dstNode->gtFlags &= ~GTF_VAR_USEASG; } } else { GenTree* copyBlkDst = createAddressNodeForSIMDInit(originalLHS, simdSize); if (simdStructNode->OperIsLocal()) { setLclRelatedToSIMDIntrinsic(simdStructNode); } GenTree* copyBlkAddr = copyBlkDst; if (copyBlkAddr->gtOper == GT_LEA) { copyBlkAddr = copyBlkAddr->AsAddrMode()->Base(); } GenTreeLclVarCommon* localDst = copyBlkAddr->IsLocalAddrExpr(); if (localDst != nullptr) { setLclRelatedToSIMDIntrinsic(localDst); } if (simdStructNode->TypeGet() == TYP_BYREF) { assert(simdStructNode->OperIsLocal()); assert(lvaIsImplicitByRefLocal(simdStructNode->AsLclVarCommon()->GetLclNum())); simdStructNode = gtNewIndir(simdType, simdStructNode); } else { assert(varTypeIsSIMD(simdStructNode)); } dstNode = gtNewOperNode(GT_IND, simdType, copyBlkDst); } #ifdef DEBUG if (verbose) { printf("\n" FMT_BB " stmt ", block->bbNum); printStmtID(stmt); printf("(before)\n"); gtDispStmt(stmt); } #endif assert(!simdStructNode->CanCSE()); simdStructNode->ClearDoNotCSE(); tree = gtNewAssignNode(dstNode, simdStructNode); stmt->SetRootNode(tree); // Since we generated a new address node which didn't exist before, // we should expose this address manually here. // TODO-ADDR: Remove this when LocalAddressVisitor transforms all // local field access into LCL_FLDs, at that point we would be // combining 2 existing LCL_FLDs or 2 FIELDs that do not reference // a local and thus cannot result in a new address exposed local. fgMarkAddressExposedLocals(stmt); #ifdef DEBUG if (verbose) { printf("\nReplaced " FMT_BB " stmt", block->bbNum); printStmtID(stmt); printf("(after)\n"); gtDispStmt(stmt); } #endif return true; } #endif // FEATURE_SIMD //------------------------------------------------------------------------ // fgCheckStmtAfterTailCall: check that statements after the tail call stmt // candidate are in one of expected forms, that are desctibed below. // // Return Value: // 'true' if stmts are in the expected form, else 'false'. // bool Compiler::fgCheckStmtAfterTailCall() { // For void calls, we would have created a GT_CALL in the stmt list. // For non-void calls, we would have created a GT_RETURN(GT_CAST(GT_CALL)). // For calls returning structs, we would have a void call, followed by a void return. // For debuggable code, it would be an assignment of the call to a temp // We want to get rid of any of this extra trees, and just leave // the call. Statement* callStmt = fgMorphStmt; Statement* nextMorphStmt = callStmt->GetNextStmt(); // Check that the rest stmts in the block are in one of the following pattern: // 1) ret(void) // 2) ret(cast*(callResultLclVar)) // 3) lclVar = callResultLclVar, the actual ret(lclVar) in another block // 4) nop if (nextMorphStmt != nullptr) { GenTree* callExpr = callStmt->GetRootNode(); if (callExpr->gtOper != GT_ASG) { // The next stmt can be GT_RETURN(TYP_VOID) or GT_RETURN(lclVar), // where lclVar was return buffer in the call for structs or simd. Statement* retStmt = nextMorphStmt; GenTree* retExpr = retStmt->GetRootNode(); noway_assert(retExpr->gtOper == GT_RETURN); nextMorphStmt = retStmt->GetNextStmt(); } else { noway_assert(callExpr->gtGetOp1()->OperIsLocal()); unsigned callResultLclNumber = callExpr->gtGetOp1()->AsLclVarCommon()->GetLclNum(); #if FEATURE_TAILCALL_OPT_SHARED_RETURN // We can have a chain of assignments from the call result to // various inline return spill temps. These are ok as long // as the last one ultimately provides the return value or is ignored. // // And if we're returning a small type we may see a cast // on the source side. while ((nextMorphStmt != nullptr) && (nextMorphStmt->GetRootNode()->OperIs(GT_ASG, GT_NOP))) { if (nextMorphStmt->GetRootNode()->OperIs(GT_NOP)) { nextMorphStmt = nextMorphStmt->GetNextStmt(); continue; } Statement* moveStmt = nextMorphStmt; GenTree* moveExpr = nextMorphStmt->GetRootNode(); GenTree* moveDest = moveExpr->gtGetOp1(); noway_assert(moveDest->OperIsLocal()); // Tunnel through any casts on the source side. GenTree* moveSource = moveExpr->gtGetOp2(); while (moveSource->OperIs(GT_CAST)) { noway_assert(!moveSource->gtOverflow()); moveSource = moveSource->gtGetOp1(); } noway_assert(moveSource->OperIsLocal()); // Verify we're just passing the value from one local to another // along the chain. const unsigned srcLclNum = moveSource->AsLclVarCommon()->GetLclNum(); noway_assert(srcLclNum == callResultLclNumber); const unsigned dstLclNum = moveDest->AsLclVarCommon()->GetLclNum(); callResultLclNumber = dstLclNum; nextMorphStmt = moveStmt->GetNextStmt(); } if (nextMorphStmt != nullptr) #endif { Statement* retStmt = nextMorphStmt; GenTree* retExpr = nextMorphStmt->GetRootNode(); noway_assert(retExpr->gtOper == GT_RETURN); GenTree* treeWithLcl = retExpr->gtGetOp1(); while (treeWithLcl->gtOper == GT_CAST) { noway_assert(!treeWithLcl->gtOverflow()); treeWithLcl = treeWithLcl->gtGetOp1(); } noway_assert(callResultLclNumber == treeWithLcl->AsLclVarCommon()->GetLclNum()); nextMorphStmt = retStmt->GetNextStmt(); } } } return nextMorphStmt == nullptr; } //------------------------------------------------------------------------ // fgCanTailCallViaJitHelper: check whether we can use the faster tailcall // JIT helper on x86. // // Return Value: // 'true' if we can; or 'false' if we should use the generic tailcall mechanism. // bool Compiler::fgCanTailCallViaJitHelper() { #if !defined(TARGET_X86) || defined(UNIX_X86_ABI) || defined(FEATURE_READYTORUN) // On anything except windows X86 we have no faster mechanism available. return false; #else // The JIT helper does not properly handle the case where localloc was used. if (compLocallocUsed) return false; return true; #endif } //------------------------------------------------------------------------ // fgMorphReduceAddOps: reduce successive variable adds into a single multiply, // e.g., i + i + i + i => i * 4. // // Arguments: // tree - tree for reduction // // Return Value: // reduced tree if pattern matches, original tree otherwise // GenTree* Compiler::fgMorphReduceAddOps(GenTree* tree) { // ADD(_, V0) starts the pattern match. if (!tree->OperIs(GT_ADD) || tree->gtOverflow()) { return tree; } #ifndef TARGET_64BIT // Transforming 64-bit ADD to 64-bit MUL on 32-bit system results in replacing // ADD ops with a helper function call. Don't apply optimization in that case. if (tree->TypeGet() == TYP_LONG) { return tree; } #endif GenTree* lclVarTree = tree->AsOp()->gtOp2; GenTree* consTree = tree->AsOp()->gtOp1; GenTree* op1 = consTree; GenTree* op2 = lclVarTree; if (!op2->OperIs(GT_LCL_VAR) || !varTypeIsIntegral(op2)) { return tree; } int foldCount = 0; unsigned lclNum = op2->AsLclVarCommon()->GetLclNum(); // Search for pattern of shape ADD(ADD(ADD(lclNum, lclNum), lclNum), lclNum). while (true) { // ADD(lclNum, lclNum), end of tree if (op1->OperIs(GT_LCL_VAR) && op1->AsLclVarCommon()->GetLclNum() == lclNum && op2->OperIs(GT_LCL_VAR) && op2->AsLclVarCommon()->GetLclNum() == lclNum) { foldCount += 2; break; } // ADD(ADD(X, Y), lclNum), keep descending else if (op1->OperIs(GT_ADD) && !op1->gtOverflow() && op2->OperIs(GT_LCL_VAR) && op2->AsLclVarCommon()->GetLclNum() == lclNum) { foldCount++; op2 = op1->AsOp()->gtOp2; op1 = op1->AsOp()->gtOp1; } // Any other case is a pattern we won't attempt to fold for now. else { return tree; } } // V0 + V0 ... + V0 becomes V0 * foldCount, where postorder transform will optimize // accordingly consTree->BashToConst(foldCount, tree->TypeGet()); GenTree* morphed = gtNewOperNode(GT_MUL, tree->TypeGet(), lclVarTree, consTree); DEBUG_DESTROY_NODE(tree); return morphed; }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Morph XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ #include "jitpch.h" #ifdef _MSC_VER #pragma hdrstop #endif #include "allocacheck.h" // for alloca // Convert the given node into a call to the specified helper passing // the given argument list. // // Tries to fold constants and also adds an edge for overflow exception // returns the morphed tree GenTree* Compiler::fgMorphCastIntoHelper(GenTree* tree, int helper, GenTree* oper) { GenTree* result; /* If the operand is a constant, we'll try to fold it */ if (oper->OperIsConst()) { GenTree* oldTree = tree; tree = gtFoldExprConst(tree); // This may not fold the constant (NaN ...) if (tree != oldTree) { return fgMorphTree(tree); } else if (tree->OperIsConst()) { return fgMorphConst(tree); } // assert that oper is unchanged and that it is still a GT_CAST node noway_assert(tree->AsCast()->CastOp() == oper); noway_assert(tree->gtOper == GT_CAST); } result = fgMorphIntoHelperCall(tree, helper, gtNewCallArgs(oper)); assert(result == tree); return result; } /***************************************************************************** * * Convert the given node into a call to the specified helper passing * the given argument list. */ GenTree* Compiler::fgMorphIntoHelperCall(GenTree* tree, int helper, GenTreeCall::Use* args, bool morphArgs) { // The helper call ought to be semantically equivalent to the original node, so preserve its VN. tree->ChangeOper(GT_CALL, GenTree::PRESERVE_VN); GenTreeCall* call = tree->AsCall(); call->gtCallType = CT_HELPER; call->gtReturnType = tree->TypeGet(); call->gtCallMethHnd = eeFindHelper(helper); call->gtCallThisArg = nullptr; call->gtCallArgs = args; call->gtCallLateArgs = nullptr; call->fgArgInfo = nullptr; call->gtRetClsHnd = nullptr; call->gtCallMoreFlags = GTF_CALL_M_EMPTY; call->gtInlineCandidateInfo = nullptr; call->gtControlExpr = nullptr; #ifdef UNIX_X86_ABI call->gtFlags |= GTF_CALL_POP_ARGS; #endif // UNIX_X86_ABI #if DEBUG // Helper calls are never candidates. call->gtInlineObservation = InlineObservation::CALLSITE_IS_CALL_TO_HELPER; call->callSig = nullptr; #endif // DEBUG #ifdef FEATURE_READYTORUN call->gtEntryPoint.addr = nullptr; call->gtEntryPoint.accessType = IAT_VALUE; #endif #if FEATURE_MULTIREG_RET call->ResetReturnType(); call->ClearOtherRegs(); call->ClearOtherRegFlags(); #ifndef TARGET_64BIT if (varTypeIsLong(tree)) { call->InitializeLongReturnType(); } #endif // !TARGET_64BIT #endif // FEATURE_MULTIREG_RET if (tree->OperMayThrow(this)) { tree->gtFlags |= GTF_EXCEPT; } else { tree->gtFlags &= ~GTF_EXCEPT; } tree->gtFlags |= GTF_CALL; for (GenTreeCall::Use& use : GenTreeCall::UseList(args)) { tree->gtFlags |= (use.GetNode()->gtFlags & GTF_ALL_EFFECT); } /* Perform the morphing */ if (morphArgs) { tree = fgMorphArgs(call); } return tree; } //------------------------------------------------------------------------ // fgMorphExpandCast: Performs the pre-order (required) morphing for a cast. // // Performs a rich variety of pre-order transformations (and some optimizations). // // Notably: // 1. Splits long -> small type casts into long -> int -> small type // for 32 bit targets. Does the same for float/double -> small type // casts for all targets. // 2. Morphs casts not supported by the target directly into helpers. // These mostly have to do with casts from and to floating point // types, especially checked ones. Refer to the implementation for // what specific casts need to be handled - it is a complex matrix. // 3. "Casts away" the GC-ness of a tree (for CAST(nint <- byref)) via // assigning the GC tree to an inline - COMMA(ASG, LCL_VAR) - non-GC // temporary. // 3. "Pushes down" truncating long -> int casts for some operations: // CAST(int <- MUL(long, long)) => MUL(CAST(int <- long), CAST(int <- long)). // The purpose of this is to allow "optNarrowTree" in the post-order // traversal to fold the tree into a TYP_INT one, which helps 32 bit // targets (and AMD64 too since 32 bit instructions are more compact). // TODO-Arm64-CQ: Re-evaluate the value of this optimization for ARM64. // // Arguments: // tree - the cast tree to morph // // Return Value: // The fully morphed tree, or "nullptr" if it needs further morphing, // in which case the cast may be transformed into an unchecked one // and its operand changed (the cast "expanded" into two). // GenTree* Compiler::fgMorphExpandCast(GenTreeCast* tree) { GenTree* oper = tree->CastOp(); if (fgGlobalMorph && (oper->gtOper == GT_ADDR)) { // Make sure we've checked if 'oper' is an address of an implicit-byref parameter. // If it is, fgMorphImplicitByRefArgs will change its type, and we want the cast // morphing code to see that type. fgMorphImplicitByRefArgs(oper); } var_types srcType = genActualType(oper); var_types dstType = tree->CastToType(); unsigned dstSize = genTypeSize(dstType); // See if the cast has to be done in two steps. R -> I if (varTypeIsFloating(srcType) && varTypeIsIntegral(dstType)) { if (srcType == TYP_FLOAT #if defined(TARGET_ARM64) // Arm64: src = float, dst is overflow conversion. // This goes through helper and hence src needs to be converted to double. && tree->gtOverflow() #elif defined(TARGET_AMD64) // Amd64: src = float, dst = uint64 or overflow conversion. // This goes through helper and hence src needs to be converted to double. && (tree->gtOverflow() || (dstType == TYP_ULONG)) #elif defined(TARGET_ARM) // Arm: src = float, dst = int64/uint64 or overflow conversion. && (tree->gtOverflow() || varTypeIsLong(dstType)) #else // x86: src = float, dst = uint32/int64/uint64 or overflow conversion. && (tree->gtOverflow() || varTypeIsLong(dstType) || (dstType == TYP_UINT)) #endif ) { oper = gtNewCastNode(TYP_DOUBLE, oper, false, TYP_DOUBLE); } // Do we need to do it in two steps R -> I -> smallType? if (dstSize < genTypeSize(TYP_INT)) { oper = gtNewCastNodeL(TYP_INT, oper, /* fromUnsigned */ false, TYP_INT); oper->gtFlags |= (tree->gtFlags & (GTF_OVERFLOW | GTF_EXCEPT)); tree->AsCast()->CastOp() = oper; // We must not mistreat the original cast, which was from a floating point type, // as from an unsigned type, since we now have a TYP_INT node for the source and // CAST_OVF(BYTE <- INT) != CAST_OVF(BYTE <- UINT). assert(!tree->IsUnsigned()); } else { if (!tree->gtOverflow()) { #ifdef TARGET_ARM64 // ARM64 supports all non-overflow checking conversions directly. return nullptr; #else switch (dstType) { case TYP_INT: return nullptr; case TYP_UINT: #if defined(TARGET_ARM) || defined(TARGET_AMD64) return nullptr; #else // TARGET_X86 return fgMorphCastIntoHelper(tree, CORINFO_HELP_DBL2UINT, oper); #endif // TARGET_X86 case TYP_LONG: #ifdef TARGET_AMD64 // SSE2 has instructions to convert a float/double directly to a long return nullptr; #else // !TARGET_AMD64 return fgMorphCastIntoHelper(tree, CORINFO_HELP_DBL2LNG, oper); #endif // !TARGET_AMD64 case TYP_ULONG: return fgMorphCastIntoHelper(tree, CORINFO_HELP_DBL2ULNG, oper); default: unreached(); } #endif // TARGET_ARM64 } else { switch (dstType) { case TYP_INT: return fgMorphCastIntoHelper(tree, CORINFO_HELP_DBL2INT_OVF, oper); case TYP_UINT: return fgMorphCastIntoHelper(tree, CORINFO_HELP_DBL2UINT_OVF, oper); case TYP_LONG: return fgMorphCastIntoHelper(tree, CORINFO_HELP_DBL2LNG_OVF, oper); case TYP_ULONG: return fgMorphCastIntoHelper(tree, CORINFO_HELP_DBL2ULNG_OVF, oper); default: unreached(); } } } } #ifndef TARGET_64BIT // The code generation phase (for x86 & ARM32) does not handle casts // directly from [u]long to anything other than [u]int. Insert an // intermediate cast to native int. else if (varTypeIsLong(srcType) && varTypeIsSmall(dstType)) { oper = gtNewCastNode(TYP_I_IMPL, oper, tree->IsUnsigned(), TYP_I_IMPL); oper->gtFlags |= (tree->gtFlags & (GTF_OVERFLOW | GTF_EXCEPT)); tree->ClearUnsigned(); tree->AsCast()->CastOp() = oper; } #endif //! TARGET_64BIT #ifdef TARGET_ARMARCH // AArch, unlike x86/amd64, has instructions that can cast directly from // all integers (except for longs on AArch32 of course) to floats. // Because there is no IL instruction conv.r4.un, uint/ulong -> float // casts are always imported as CAST(float <- CAST(double <- uint/ulong)). // We can eliminate the redundant intermediate cast as an optimization. else if ((dstType == TYP_FLOAT) && (srcType == TYP_DOUBLE) && oper->OperIs(GT_CAST) #ifdef TARGET_ARM && !varTypeIsLong(oper->AsCast()->CastOp()) #endif ) { oper->gtType = TYP_FLOAT; oper->CastToType() = TYP_FLOAT; return fgMorphTree(oper); } #endif // TARGET_ARMARCH #ifdef TARGET_ARM // converts long/ulong --> float/double casts into helper calls. else if (varTypeIsFloating(dstType) && varTypeIsLong(srcType)) { if (dstType == TYP_FLOAT) { // there is only a double helper, so we // - change the dsttype to double // - insert a cast from double to float // - recurse into the resulting tree tree->CastToType() = TYP_DOUBLE; tree->gtType = TYP_DOUBLE; tree = gtNewCastNode(TYP_FLOAT, tree, false, TYP_FLOAT); return fgMorphTree(tree); } if (tree->gtFlags & GTF_UNSIGNED) return fgMorphCastIntoHelper(tree, CORINFO_HELP_ULNG2DBL, oper); return fgMorphCastIntoHelper(tree, CORINFO_HELP_LNG2DBL, oper); } #endif // TARGET_ARM #ifdef TARGET_AMD64 // Do we have to do two step U4/8 -> R4/8 ? // Codegen supports the following conversion as one-step operation // a) Long -> R4/R8 // b) U8 -> R8 // // The following conversions are performed as two-step operations using above. // U4 -> R4/8 = U4-> Long -> R4/8 // U8 -> R4 = U8 -> R8 -> R4 else if (tree->IsUnsigned() && varTypeIsFloating(dstType)) { srcType = varTypeToUnsigned(srcType); if (srcType == TYP_ULONG) { if (dstType == TYP_FLOAT) { // Codegen can handle U8 -> R8 conversion. // U8 -> R4 = U8 -> R8 -> R4 // - change the dsttype to double // - insert a cast from double to float // - recurse into the resulting tree tree->CastToType() = TYP_DOUBLE; tree->gtType = TYP_DOUBLE; tree = gtNewCastNode(TYP_FLOAT, tree, false, TYP_FLOAT); return fgMorphTree(tree); } } else if (srcType == TYP_UINT) { oper = gtNewCastNode(TYP_LONG, oper, true, TYP_LONG); oper->gtFlags |= (tree->gtFlags & (GTF_OVERFLOW | GTF_EXCEPT)); tree->ClearUnsigned(); tree->CastOp() = oper; } } #endif // TARGET_AMD64 #ifdef TARGET_X86 // Do we have to do two step U4/8 -> R4/8 ? else if (tree->IsUnsigned() && varTypeIsFloating(dstType)) { srcType = varTypeToUnsigned(srcType); if (srcType == TYP_ULONG) { return fgMorphCastIntoHelper(tree, CORINFO_HELP_ULNG2DBL, oper); } else if (srcType == TYP_UINT) { oper = gtNewCastNode(TYP_LONG, oper, true, TYP_LONG); oper->gtFlags |= (tree->gtFlags & (GTF_OVERFLOW | GTF_EXCEPT)); tree->gtFlags &= ~GTF_UNSIGNED; return fgMorphCastIntoHelper(tree, CORINFO_HELP_LNG2DBL, oper); } } else if (((tree->gtFlags & GTF_UNSIGNED) == 0) && (srcType == TYP_LONG) && varTypeIsFloating(dstType)) { oper = fgMorphCastIntoHelper(tree, CORINFO_HELP_LNG2DBL, oper); // Since we don't have a Jit Helper that converts to a TYP_FLOAT // we just use the one that converts to a TYP_DOUBLE // and then add a cast to TYP_FLOAT // if ((dstType == TYP_FLOAT) && (oper->OperGet() == GT_CALL)) { // Fix the return type to be TYP_DOUBLE // oper->gtType = TYP_DOUBLE; // Add a Cast to TYP_FLOAT // tree = gtNewCastNode(TYP_FLOAT, oper, false, TYP_FLOAT); INDEBUG(tree->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED); return tree; } else { return oper; } } #endif // TARGET_X86 else if (varTypeIsGC(srcType) != varTypeIsGC(dstType)) { // We are casting away GC information. we would like to just // change the type to int, however this gives the emitter fits because // it believes the variable is a GC variable at the beginning of the // instruction group, but is not turned non-gc by the code generator // we fix this by copying the GC pointer to a non-gc pointer temp. noway_assert(!varTypeIsGC(dstType) && "How can we have a cast to a GCRef here?"); // We generate an assignment to an int and then do the cast from an int. With this we avoid // the gc problem and we allow casts to bytes, longs, etc... unsigned lclNum = lvaGrabTemp(true DEBUGARG("Cast away GC")); oper->gtType = TYP_I_IMPL; GenTree* asg = gtNewTempAssign(lclNum, oper); oper->gtType = srcType; // do the real cast GenTree* cast = gtNewCastNode(tree->TypeGet(), gtNewLclvNode(lclNum, TYP_I_IMPL), false, dstType); // Generate the comma tree oper = gtNewOperNode(GT_COMMA, tree->TypeGet(), asg, cast); return fgMorphTree(oper); } // Look for narrowing casts ([u]long -> [u]int) and try to push them // down into the operand before morphing it. // // It doesn't matter if this is cast is from ulong or long (i.e. if // GTF_UNSIGNED is set) because the transformation is only applied to // overflow-insensitive narrowing casts, which always silently truncate. // // Note that casts from [u]long to small integer types are handled above. if ((srcType == TYP_LONG) && ((dstType == TYP_INT) || (dstType == TYP_UINT))) { // As a special case, look for overflow-sensitive casts of an AND // expression, and see if the second operand is a small constant. Since // the result of an AND is bound by its smaller operand, it may be // possible to prove that the cast won't overflow, which will in turn // allow the cast's operand to be transformed. if (tree->gtOverflow() && (oper->OperGet() == GT_AND)) { GenTree* andOp2 = oper->AsOp()->gtOp2; // Look for a constant less than 2^{32} for a cast to uint, or less // than 2^{31} for a cast to int. int maxWidth = (dstType == TYP_UINT) ? 32 : 31; if ((andOp2->OperGet() == GT_CNS_NATIVELONG) && ((andOp2->AsIntConCommon()->LngValue() >> maxWidth) == 0)) { tree->ClearOverflow(); tree->SetAllEffectsFlags(oper); } } // Only apply this transformation during global morph, // when neither the cast node nor the oper node may throw an exception // based on the upper 32 bits. // if (fgGlobalMorph && !tree->gtOverflow() && !oper->gtOverflowEx()) { // For these operations the lower 32 bits of the result only depends // upon the lower 32 bits of the operands. // bool canPushCast = oper->OperIs(GT_ADD, GT_SUB, GT_MUL, GT_AND, GT_OR, GT_XOR, GT_NOT, GT_NEG); // For long LSH cast to int, there is a discontinuity in behavior // when the shift amount is 32 or larger. // // CAST(INT, LSH(1LL, 31)) == LSH(1, 31) // LSH(CAST(INT, 1LL), CAST(INT, 31)) == LSH(1, 31) // // CAST(INT, LSH(1LL, 32)) == 0 // LSH(CAST(INT, 1LL), CAST(INT, 32)) == LSH(1, 32) == LSH(1, 0) == 1 // // So some extra validation is needed. // if (oper->OperIs(GT_LSH)) { GenTree* shiftAmount = oper->AsOp()->gtOp2; // Expose constant value for shift, if possible, to maximize the number // of cases we can handle. shiftAmount = gtFoldExpr(shiftAmount); oper->AsOp()->gtOp2 = shiftAmount; #if DEBUG // We may remorph the shift amount tree again later, so clear any morphed flag. shiftAmount->gtDebugFlags &= ~GTF_DEBUG_NODE_MORPHED; #endif // DEBUG if (shiftAmount->IsIntegralConst()) { const ssize_t shiftAmountValue = shiftAmount->AsIntCon()->IconValue(); if ((shiftAmountValue >= 64) || (shiftAmountValue < 0)) { // Shift amount is large enough or negative so result is undefined. // Don't try to optimize. assert(!canPushCast); } else if (shiftAmountValue >= 32) { // We know that we have a narrowing cast ([u]long -> [u]int) // and that we are casting to a 32-bit value, which will result in zero. // // Check to see if we have any side-effects that we must keep // if ((tree->gtFlags & GTF_ALL_EFFECT) == 0) { // Result of the shift is zero. DEBUG_DESTROY_NODE(tree); GenTree* zero = gtNewZeroConNode(TYP_INT); return fgMorphTree(zero); } else // We do have a side-effect { // We could create a GT_COMMA node here to keep the side-effect and return a zero // Instead we just don't try to optimize this case. canPushCast = false; } } else { // Shift amount is positive and small enough that we can push the cast through. canPushCast = true; } } else { // Shift amount is unknown. We can't optimize this case. assert(!canPushCast); } } if (canPushCast) { DEBUG_DESTROY_NODE(tree); // Insert narrowing casts for op1 and op2. oper->AsOp()->gtOp1 = gtNewCastNode(TYP_INT, oper->AsOp()->gtOp1, false, dstType); if (oper->AsOp()->gtOp2 != nullptr) { oper->AsOp()->gtOp2 = gtNewCastNode(TYP_INT, oper->AsOp()->gtOp2, false, dstType); } // Clear the GT_MUL_64RSLT if it is set. if (oper->gtOper == GT_MUL && (oper->gtFlags & GTF_MUL_64RSLT)) { oper->gtFlags &= ~GTF_MUL_64RSLT; } // The operation now produces a 32-bit result. oper->gtType = TYP_INT; // Remorph the new tree as the casts that we added may be folded away. return fgMorphTree(oper); } } } return nullptr; } #ifdef DEBUG const char* getNonStandardArgKindName(NonStandardArgKind kind) { switch (kind) { case NonStandardArgKind::None: return "None"; case NonStandardArgKind::PInvokeFrame: return "PInvokeFrame"; case NonStandardArgKind::PInvokeTarget: return "PInvokeTarget"; case NonStandardArgKind::PInvokeCookie: return "PInvokeCookie"; case NonStandardArgKind::WrapperDelegateCell: return "WrapperDelegateCell"; case NonStandardArgKind::ShiftLow: return "ShiftLow"; case NonStandardArgKind::ShiftHigh: return "ShiftHigh"; case NonStandardArgKind::FixedRetBuffer: return "FixedRetBuffer"; case NonStandardArgKind::VirtualStubCell: return "VirtualStubCell"; case NonStandardArgKind::R2RIndirectionCell: return "R2RIndirectionCell"; case NonStandardArgKind::ValidateIndirectCallTarget: return "ValidateIndirectCallTarget"; default: unreached(); } } void fgArgTabEntry::Dump() const { printf("fgArgTabEntry[arg %u", argNum); printf(" %d.%s", GetNode()->gtTreeID, GenTree::OpName(GetNode()->OperGet())); printf(" %s", varTypeName(argType)); printf(" (%s)", passedByRef ? "By ref" : "By value"); if (GetRegNum() != REG_STK) { printf(", %u reg%s:", numRegs, numRegs == 1 ? "" : "s"); for (unsigned i = 0; i < numRegs; i++) { printf(" %s", getRegName(regNums[i])); } } if (GetStackByteSize() > 0) { #if defined(DEBUG_ARG_SLOTS) printf(", numSlots=%u, slotNum=%u, byteSize=%u, byteOffset=%u", numSlots, slotNum, m_byteSize, m_byteOffset); #else printf(", byteSize=%u, byteOffset=%u", m_byteSize, m_byteOffset); #endif } printf(", byteAlignment=%u", m_byteAlignment); if (isLateArg()) { printf(", lateArgInx=%u", GetLateArgInx()); } if (IsSplit()) { printf(", isSplit"); } if (needTmp) { printf(", tmpNum=V%02u", tmpNum); } if (needPlace) { printf(", needPlace"); } if (isTmp) { printf(", isTmp"); } if (processed) { printf(", processed"); } if (IsHfaRegArg()) { printf(", isHfa(%s)", varTypeName(GetHfaType())); } if (isBackFilled) { printf(", isBackFilled"); } if (nonStandardArgKind != NonStandardArgKind::None) { printf(", nonStandard[%s]", getNonStandardArgKindName(nonStandardArgKind)); } if (isStruct) { printf(", isStruct"); } printf("]\n"); } #endif fgArgInfo::fgArgInfo(Compiler* comp, GenTreeCall* call, unsigned numArgs) { compiler = comp; callTree = call; argCount = 0; // filled in arg count, starts at zero DEBUG_ARG_SLOTS_ONLY(nextSlotNum = INIT_ARG_STACK_SLOT;) nextStackByteOffset = INIT_ARG_STACK_SLOT * TARGET_POINTER_SIZE; stkLevel = 0; #if defined(UNIX_X86_ABI) alignmentDone = false; stkSizeBytes = 0; padStkAlign = 0; #endif #if FEATURE_FIXED_OUT_ARGS outArgSize = 0; #endif argTableSize = numArgs; // the allocated table size hasRegArgs = false; hasStackArgs = false; argsComplete = false; argsSorted = false; needsTemps = false; if (argTableSize == 0) { argTable = nullptr; } else { argTable = new (compiler, CMK_fgArgInfoPtrArr) fgArgTabEntry*[argTableSize]; } } /***************************************************************************** * * fgArgInfo Copy Constructor * * This method needs to act like a copy constructor for fgArgInfo. * The newCall needs to have its fgArgInfo initialized such that * we have newCall that is an exact copy of the oldCall. * We have to take care since the argument information * in the argTable contains pointers that must point to the * new arguments and not the old arguments. */ fgArgInfo::fgArgInfo(GenTreeCall* newCall, GenTreeCall* oldCall) { fgArgInfo* oldArgInfo = oldCall->AsCall()->fgArgInfo; compiler = oldArgInfo->compiler; callTree = newCall; argCount = 0; // filled in arg count, starts at zero DEBUG_ARG_SLOTS_ONLY(nextSlotNum = INIT_ARG_STACK_SLOT;) nextStackByteOffset = INIT_ARG_STACK_SLOT * TARGET_POINTER_SIZE; stkLevel = oldArgInfo->stkLevel; #if defined(UNIX_X86_ABI) alignmentDone = oldArgInfo->alignmentDone; stkSizeBytes = oldArgInfo->stkSizeBytes; padStkAlign = oldArgInfo->padStkAlign; #endif #if FEATURE_FIXED_OUT_ARGS outArgSize = oldArgInfo->outArgSize; #endif argTableSize = oldArgInfo->argTableSize; argsComplete = false; argTable = nullptr; assert(oldArgInfo->argsComplete); if (argTableSize > 0) { argTable = new (compiler, CMK_fgArgInfoPtrArr) fgArgTabEntry*[argTableSize]; // Copy the old arg entries for (unsigned i = 0; i < argTableSize; i++) { argTable[i] = new (compiler, CMK_fgArgInfo) fgArgTabEntry(*oldArgInfo->argTable[i]); } // The copied arg entries contain pointers to old uses, they need // to be updated to point to new uses. if (newCall->gtCallThisArg != nullptr) { for (unsigned i = 0; i < argTableSize; i++) { if (argTable[i]->use == oldCall->gtCallThisArg) { argTable[i]->use = newCall->gtCallThisArg; break; } } } GenTreeCall::UseIterator newUse = newCall->Args().begin(); GenTreeCall::UseIterator newUseEnd = newCall->Args().end(); GenTreeCall::UseIterator oldUse = oldCall->Args().begin(); GenTreeCall::UseIterator oldUseEnd = newCall->Args().end(); for (; newUse != newUseEnd; ++newUse, ++oldUse) { for (unsigned i = 0; i < argTableSize; i++) { if (argTable[i]->use == oldUse.GetUse()) { argTable[i]->use = newUse.GetUse(); break; } } } newUse = newCall->LateArgs().begin(); newUseEnd = newCall->LateArgs().end(); oldUse = oldCall->LateArgs().begin(); oldUseEnd = newCall->LateArgs().end(); for (; newUse != newUseEnd; ++newUse, ++oldUse) { for (unsigned i = 0; i < argTableSize; i++) { if (argTable[i]->lateUse == oldUse.GetUse()) { argTable[i]->lateUse = newUse.GetUse(); break; } } } } argCount = oldArgInfo->argCount; DEBUG_ARG_SLOTS_ONLY(nextSlotNum = oldArgInfo->nextSlotNum;) nextStackByteOffset = oldArgInfo->nextStackByteOffset; hasRegArgs = oldArgInfo->hasRegArgs; hasStackArgs = oldArgInfo->hasStackArgs; argsComplete = true; argsSorted = true; } void fgArgInfo::AddArg(fgArgTabEntry* curArgTabEntry) { assert(argCount < argTableSize); argTable[argCount] = curArgTabEntry; argCount++; } fgArgTabEntry* fgArgInfo::AddRegArg(unsigned argNum, GenTree* node, GenTreeCall::Use* use, regNumber regNum, unsigned numRegs, unsigned byteSize, unsigned byteAlignment, bool isStruct, bool isFloatHfa, bool isVararg /*=false*/) { fgArgTabEntry* curArgTabEntry = new (compiler, CMK_fgArgInfo) fgArgTabEntry; // Any additional register numbers are set by the caller. // This is primarily because on ARM we don't yet know if it // will be split or if it is a double HFA, so the number of registers // may actually be less. curArgTabEntry->setRegNum(0, regNum); curArgTabEntry->argNum = argNum; curArgTabEntry->argType = node->TypeGet(); curArgTabEntry->use = use; curArgTabEntry->lateUse = nullptr; curArgTabEntry->numRegs = numRegs; #if defined(DEBUG_ARG_SLOTS) curArgTabEntry->slotNum = 0; curArgTabEntry->numSlots = 0; #endif curArgTabEntry->SetLateArgInx(UINT_MAX); curArgTabEntry->tmpNum = BAD_VAR_NUM; curArgTabEntry->SetSplit(false); curArgTabEntry->isTmp = false; curArgTabEntry->needTmp = false; curArgTabEntry->needPlace = false; curArgTabEntry->processed = false; if (GlobalJitOptions::compFeatureHfa) { curArgTabEntry->SetHfaElemKind(CORINFO_HFA_ELEM_NONE); } curArgTabEntry->isBackFilled = false; curArgTabEntry->nonStandardArgKind = NonStandardArgKind::None; curArgTabEntry->isStruct = isStruct; curArgTabEntry->SetIsVararg(isVararg); curArgTabEntry->SetByteAlignment(byteAlignment); curArgTabEntry->SetByteSize(byteSize, isStruct, isFloatHfa); curArgTabEntry->SetByteOffset(0); hasRegArgs = true; if (argCount >= argTableSize) { fgArgTabEntry** oldTable = argTable; argTable = new (compiler, CMK_fgArgInfoPtrArr) fgArgTabEntry*[argCount + 1]; memcpy(argTable, oldTable, argCount * sizeof(fgArgTabEntry*)); argTableSize++; } AddArg(curArgTabEntry); return curArgTabEntry; } #if defined(UNIX_AMD64_ABI) fgArgTabEntry* fgArgInfo::AddRegArg(unsigned argNum, GenTree* node, GenTreeCall::Use* use, regNumber regNum, unsigned numRegs, unsigned byteSize, unsigned byteAlignment, const bool isStruct, const bool isFloatHfa, const bool isVararg, const regNumber otherRegNum, const unsigned structIntRegs, const unsigned structFloatRegs, const SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR* const structDescPtr) { fgArgTabEntry* curArgTabEntry = AddRegArg(argNum, node, use, regNum, numRegs, byteSize, byteAlignment, isStruct, isFloatHfa, isVararg); assert(curArgTabEntry != nullptr); curArgTabEntry->isStruct = isStruct; // is this a struct arg curArgTabEntry->structIntRegs = structIntRegs; curArgTabEntry->structFloatRegs = structFloatRegs; INDEBUG(curArgTabEntry->checkIsStruct();) assert(numRegs <= 2); if (numRegs == 2) { curArgTabEntry->setRegNum(1, otherRegNum); } if (isStruct && structDescPtr != nullptr) { curArgTabEntry->structDesc.CopyFrom(*structDescPtr); } return curArgTabEntry; } #endif // defined(UNIX_AMD64_ABI) fgArgTabEntry* fgArgInfo::AddStkArg(unsigned argNum, GenTree* node, GenTreeCall::Use* use, unsigned numSlots, unsigned byteSize, unsigned byteAlignment, bool isStruct, bool isFloatHfa, bool isVararg /*=false*/) { fgArgTabEntry* curArgTabEntry = new (compiler, CMK_fgArgInfo) fgArgTabEntry; #if defined(DEBUG_ARG_SLOTS) if (!compMacOsArm64Abi()) { nextSlotNum = roundUp(nextSlotNum, byteAlignment / TARGET_POINTER_SIZE); } #endif nextStackByteOffset = roundUp(nextStackByteOffset, byteAlignment); DEBUG_ARG_SLOTS_ASSERT(nextStackByteOffset / TARGET_POINTER_SIZE == nextSlotNum); curArgTabEntry->setRegNum(0, REG_STK); curArgTabEntry->argNum = argNum; curArgTabEntry->argType = node->TypeGet(); curArgTabEntry->use = use; curArgTabEntry->lateUse = nullptr; #if defined(DEBUG_ARG_SLOTS) curArgTabEntry->numSlots = numSlots; curArgTabEntry->slotNum = nextSlotNum; #endif curArgTabEntry->numRegs = 0; #if defined(UNIX_AMD64_ABI) curArgTabEntry->structIntRegs = 0; curArgTabEntry->structFloatRegs = 0; #endif // defined(UNIX_AMD64_ABI) curArgTabEntry->SetLateArgInx(UINT_MAX); curArgTabEntry->tmpNum = BAD_VAR_NUM; curArgTabEntry->SetSplit(false); curArgTabEntry->isTmp = false; curArgTabEntry->needTmp = false; curArgTabEntry->needPlace = false; curArgTabEntry->processed = false; if (GlobalJitOptions::compFeatureHfa) { curArgTabEntry->SetHfaElemKind(CORINFO_HFA_ELEM_NONE); } curArgTabEntry->isBackFilled = false; curArgTabEntry->nonStandardArgKind = NonStandardArgKind::None; curArgTabEntry->isStruct = isStruct; curArgTabEntry->SetIsVararg(isVararg); curArgTabEntry->SetByteAlignment(byteAlignment); curArgTabEntry->SetByteSize(byteSize, isStruct, isFloatHfa); curArgTabEntry->SetByteOffset(nextStackByteOffset); hasStackArgs = true; AddArg(curArgTabEntry); DEBUG_ARG_SLOTS_ONLY(nextSlotNum += numSlots;) nextStackByteOffset += curArgTabEntry->GetByteSize(); return curArgTabEntry; } void fgArgInfo::RemorphReset() { DEBUG_ARG_SLOTS_ONLY(nextSlotNum = INIT_ARG_STACK_SLOT;) nextStackByteOffset = INIT_ARG_STACK_SLOT * TARGET_POINTER_SIZE; } //------------------------------------------------------------------------ // UpdateRegArg: Update the given fgArgTabEntry while morphing. // // Arguments: // curArgTabEntry - the fgArgTabEntry to update. // node - the tree node that defines the argument // reMorphing - a boolean value indicate whether we are remorphing the call // // Assumptions: // This must have already been determined to be at least partially passed in registers. // void fgArgInfo::UpdateRegArg(fgArgTabEntry* curArgTabEntry, GenTree* node, bool reMorphing) { bool isLateArg = curArgTabEntry->isLateArg(); // If this is a late arg, we'd better be updating it with a correctly marked node, and vice-versa. assert((isLateArg && ((node->gtFlags & GTF_LATE_ARG) != 0)) || (!isLateArg && ((node->gtFlags & GTF_LATE_ARG) == 0))); assert(curArgTabEntry->numRegs != 0); assert(curArgTabEntry->use->GetNode() == node); } //------------------------------------------------------------------------ // UpdateStkArg: Update the given fgArgTabEntry while morphing. // // Arguments: // curArgTabEntry - the fgArgTabEntry to update. // node - the tree node that defines the argument // reMorphing - a boolean value indicate whether we are remorphing the call // // Assumptions: // This must have already been determined to be passed on the stack. // void fgArgInfo::UpdateStkArg(fgArgTabEntry* curArgTabEntry, GenTree* node, bool reMorphing) { bool isLateArg = curArgTabEntry->isLateArg(); // If this is a late arg, we'd better be updating it with a correctly marked node, and vice-versa. assert((isLateArg && ((node->gtFlags & GTF_LATE_ARG) != 0)) || (!isLateArg && ((node->gtFlags & GTF_LATE_ARG) == 0))); noway_assert(curArgTabEntry->use != callTree->gtCallThisArg); assert((curArgTabEntry->GetRegNum() == REG_STK) || curArgTabEntry->IsSplit()); assert(curArgTabEntry->use->GetNode() == node); #if defined(DEBUG_ARG_SLOTS) if (!compMacOsArm64Abi()) { nextSlotNum = roundUp(nextSlotNum, curArgTabEntry->GetByteAlignment() / TARGET_POINTER_SIZE); assert(curArgTabEntry->slotNum == nextSlotNum); nextSlotNum += curArgTabEntry->numSlots; } #endif nextStackByteOffset = roundUp(nextStackByteOffset, curArgTabEntry->GetByteAlignment()); assert(curArgTabEntry->GetByteOffset() == nextStackByteOffset); nextStackByteOffset += curArgTabEntry->GetStackByteSize(); } void fgArgInfo::SplitArg(unsigned argNum, unsigned numRegs, unsigned numSlots) { fgArgTabEntry* curArgTabEntry = nullptr; assert(argNum < argCount); for (unsigned inx = 0; inx < argCount; inx++) { curArgTabEntry = argTable[inx]; if (curArgTabEntry->argNum == argNum) { break; } } assert(numRegs > 0); assert(numSlots > 0); if (argsComplete) { assert(curArgTabEntry->IsSplit() == true); assert(curArgTabEntry->numRegs == numRegs); DEBUG_ARG_SLOTS_ONLY(assert(curArgTabEntry->numSlots == numSlots);) assert(hasStackArgs == true); } else { curArgTabEntry->SetSplit(true); curArgTabEntry->numRegs = numRegs; DEBUG_ARG_SLOTS_ONLY(curArgTabEntry->numSlots = numSlots;) curArgTabEntry->SetByteOffset(0); hasStackArgs = true; } DEBUG_ARG_SLOTS_ONLY(nextSlotNum += numSlots;) // TODO-Cleanup: structs are aligned to 8 bytes on arm64 apple, so it would work, but pass the precise size. nextStackByteOffset += numSlots * TARGET_POINTER_SIZE; } //------------------------------------------------------------------------ // EvalToTmp: Replace the node in the given fgArgTabEntry with a temp // // Arguments: // curArgTabEntry - the fgArgTabEntry for the argument // tmpNum - the varNum for the temp // newNode - the assignment of the argument value to the temp // // Notes: // Although the name of this method is EvalToTmp, it doesn't actually create // the temp or the copy. // void fgArgInfo::EvalToTmp(fgArgTabEntry* curArgTabEntry, unsigned tmpNum, GenTree* newNode) { assert(curArgTabEntry->use != callTree->gtCallThisArg); assert(curArgTabEntry->use->GetNode() == newNode); assert(curArgTabEntry->GetNode() == newNode); curArgTabEntry->tmpNum = tmpNum; curArgTabEntry->isTmp = true; } void fgArgInfo::ArgsComplete() { bool hasStructRegArg = false; for (unsigned curInx = 0; curInx < argCount; curInx++) { fgArgTabEntry* curArgTabEntry = argTable[curInx]; assert(curArgTabEntry != nullptr); GenTree* argx = curArgTabEntry->GetNode(); if (curArgTabEntry->GetRegNum() == REG_STK) { assert(hasStackArgs == true); #if !FEATURE_FIXED_OUT_ARGS // On x86 we use push instructions to pass arguments: // The non-register arguments are evaluated and pushed in order // and they are never evaluated into temps // continue; #endif } #if FEATURE_ARG_SPLIT else if (curArgTabEntry->IsSplit()) { hasStructRegArg = true; assert(hasStackArgs == true); } #endif // FEATURE_ARG_SPLIT else // we have a register argument, next we look for a struct type. { if (varTypeIsStruct(argx) UNIX_AMD64_ABI_ONLY(|| curArgTabEntry->isStruct)) { hasStructRegArg = true; } } /* If the argument tree contains an assignment (GTF_ASG) then the argument and and every earlier argument (except constants) must be evaluated into temps since there may be other arguments that follow and they may use the value being assigned. EXAMPLE: ArgTab is "a, a=5, a" -> when we see the second arg "a=5" we know the first two arguments "a, a=5" have to be evaluated into temps For the case of an assignment, we only know that there exist some assignment someplace in the tree. We don't know what is being assigned so we are very conservative here and assume that any local variable could have been assigned. */ if (argx->gtFlags & GTF_ASG) { // If this is not the only argument, or it's a copyblk, or it already evaluates the expression to // a tmp, then we need a temp in the late arg list. if ((argCount > 1) || argx->OperIsCopyBlkOp() #ifdef FEATURE_FIXED_OUT_ARGS || curArgTabEntry->isTmp // I protect this by "FEATURE_FIXED_OUT_ARGS" to preserve the property // that we only have late non-register args when that feature is on. #endif // FEATURE_FIXED_OUT_ARGS ) { curArgTabEntry->needTmp = true; needsTemps = true; } // For all previous arguments, unless they are a simple constant // we require that they be evaluated into temps for (unsigned prevInx = 0; prevInx < curInx; prevInx++) { fgArgTabEntry* prevArgTabEntry = argTable[prevInx]; assert(prevArgTabEntry->argNum < curArgTabEntry->argNum); if (!prevArgTabEntry->GetNode()->IsInvariant()) { prevArgTabEntry->needTmp = true; needsTemps = true; } } } bool treatLikeCall = ((argx->gtFlags & GTF_CALL) != 0); #if FEATURE_FIXED_OUT_ARGS // Like calls, if this argument has a tree that will do an inline throw, // a call to a jit helper, then we need to treat it like a call (but only // if there are/were any stack args). // This means unnesting, sorting, etc. Technically this is overly // conservative, but I want to avoid as much special-case debug-only code // as possible, so leveraging the GTF_CALL flag is the easiest. // if (!treatLikeCall && (argx->gtFlags & GTF_EXCEPT) && (argCount > 1) && compiler->opts.compDbgCode && (compiler->fgWalkTreePre(&argx, Compiler::fgChkThrowCB) == Compiler::WALK_ABORT)) { for (unsigned otherInx = 0; otherInx < argCount; otherInx++) { if (otherInx == curInx) { continue; } if (argTable[otherInx]->GetRegNum() == REG_STK) { treatLikeCall = true; break; } } } #endif // FEATURE_FIXED_OUT_ARGS /* If it contains a call (GTF_CALL) then itself and everything before the call with a GLOB_EFFECT must eval to temp (this is because everything with SIDE_EFFECT has to be kept in the right order since we will move the call to the first position) For calls we don't have to be quite as conservative as we are with an assignment since the call won't be modifying any non-address taken LclVars. */ if (treatLikeCall) { if (argCount > 1) // If this is not the only argument { curArgTabEntry->needTmp = true; needsTemps = true; } else if (varTypeIsFloating(argx->TypeGet()) && (argx->OperGet() == GT_CALL)) { // Spill all arguments that are floating point calls curArgTabEntry->needTmp = true; needsTemps = true; } // All previous arguments may need to be evaluated into temps for (unsigned prevInx = 0; prevInx < curInx; prevInx++) { fgArgTabEntry* prevArgTabEntry = argTable[prevInx]; assert(prevArgTabEntry->argNum < curArgTabEntry->argNum); // For all previous arguments, if they have any GTF_ALL_EFFECT // we require that they be evaluated into a temp if ((prevArgTabEntry->GetNode()->gtFlags & GTF_ALL_EFFECT) != 0) { prevArgTabEntry->needTmp = true; needsTemps = true; } #if FEATURE_FIXED_OUT_ARGS // Or, if they are stored into the FIXED_OUT_ARG area // we require that they be moved to the gtCallLateArgs // and replaced with a placeholder node else if (prevArgTabEntry->GetRegNum() == REG_STK) { prevArgTabEntry->needPlace = true; } #if FEATURE_ARG_SPLIT else if (prevArgTabEntry->IsSplit()) { prevArgTabEntry->needPlace = true; } #endif // FEATURE_ARG_SPLIT #endif } } #if FEATURE_MULTIREG_ARGS // For RyuJIT backend we will expand a Multireg arg into a GT_FIELD_LIST // with multiple indirections, so here we consider spilling it into a tmp LclVar. // CLANG_FORMAT_COMMENT_ANCHOR; #ifdef TARGET_ARM bool isMultiRegArg = (curArgTabEntry->numRegs > 0) && (curArgTabEntry->numRegs + curArgTabEntry->GetStackSlotsNumber() > 1); #else bool isMultiRegArg = (curArgTabEntry->numRegs > 1); #endif if ((varTypeIsStruct(argx->TypeGet())) && (curArgTabEntry->needTmp == false)) { if (isMultiRegArg && ((argx->gtFlags & GTF_PERSISTENT_SIDE_EFFECTS) != 0)) { // Spill multireg struct arguments that have Assignments or Calls embedded in them curArgTabEntry->needTmp = true; needsTemps = true; } else { // We call gtPrepareCost to measure the cost of evaluating this tree compiler->gtPrepareCost(argx); if (isMultiRegArg && (argx->GetCostEx() > (6 * IND_COST_EX))) { // Spill multireg struct arguments that are expensive to evaluate twice curArgTabEntry->needTmp = true; needsTemps = true; } #if defined(FEATURE_SIMD) && defined(TARGET_ARM64) else if (isMultiRegArg && varTypeIsSIMD(argx->TypeGet())) { // SIMD types do not need the optimization below due to their sizes if (argx->OperIsSimdOrHWintrinsic() || (argx->OperIs(GT_OBJ) && argx->AsObj()->gtOp1->OperIs(GT_ADDR) && argx->AsObj()->gtOp1->AsOp()->gtOp1->OperIsSimdOrHWintrinsic())) { curArgTabEntry->needTmp = true; needsTemps = true; } } #endif #ifndef TARGET_ARM // TODO-Arm: This optimization is not implemented for ARM32 // so we skip this for ARM32 until it is ported to use RyuJIT backend // else if (argx->OperGet() == GT_OBJ) { GenTreeObj* argObj = argx->AsObj(); unsigned structSize = argObj->GetLayout()->GetSize(); switch (structSize) { case 3: case 5: case 6: case 7: // If we have a stack based LclVar we can perform a wider read of 4 or 8 bytes // if (argObj->AsObj()->gtOp1->IsLocalAddrExpr() == nullptr) // Is the source not a LclVar? { // If we don't have a LclVar we need to read exactly 3,5,6 or 7 bytes // For now we use a a GT_CPBLK to copy the exact size into a GT_LCL_VAR temp. // curArgTabEntry->needTmp = true; needsTemps = true; } break; case 11: case 13: case 14: case 15: // Spill any GT_OBJ multireg structs that are difficult to extract // // When we have a GT_OBJ of a struct with the above sizes we would need // to use 3 or 4 load instructions to load the exact size of this struct. // Instead we spill the GT_OBJ into a new GT_LCL_VAR temp and this sequence // will use a GT_CPBLK to copy the exact size into the GT_LCL_VAR temp. // Then we can just load all 16 bytes of the GT_LCL_VAR temp when passing // the argument. // curArgTabEntry->needTmp = true; needsTemps = true; break; default: break; } } #endif // !TARGET_ARM } } #endif // FEATURE_MULTIREG_ARGS } // We only care because we can't spill structs and qmarks involve a lot of spilling, but // if we don't have qmarks, then it doesn't matter. // So check for Qmark's globally once here, instead of inside the loop. // const bool hasStructRegArgWeCareAbout = (hasStructRegArg && compiler->compQmarkUsed); #if FEATURE_FIXED_OUT_ARGS // For Arm/x64 we only care because we can't reorder a register // argument that uses GT_LCLHEAP. This is an optimization to // save a check inside the below loop. // const bool hasStackArgsWeCareAbout = (hasStackArgs && compiler->compLocallocUsed); #else const bool hasStackArgsWeCareAbout = hasStackArgs; #endif // FEATURE_FIXED_OUT_ARGS // If we have any stack args we have to force the evaluation // of any arguments passed in registers that might throw an exception // // Technically we only a required to handle the following two cases: // a GT_IND with GTF_IND_RNGCHK (only on x86) or // a GT_LCLHEAP node that allocates stuff on the stack // if (hasStackArgsWeCareAbout || hasStructRegArgWeCareAbout) { for (unsigned curInx = 0; curInx < argCount; curInx++) { fgArgTabEntry* curArgTabEntry = argTable[curInx]; assert(curArgTabEntry != nullptr); GenTree* argx = curArgTabEntry->GetNode(); // Examine the register args that are currently not marked needTmp // if (!curArgTabEntry->needTmp && (curArgTabEntry->GetRegNum() != REG_STK)) { if (hasStackArgsWeCareAbout) { #if !FEATURE_FIXED_OUT_ARGS // On x86 we previously recorded a stack depth of zero when // morphing the register arguments of any GT_IND with a GTF_IND_RNGCHK flag // Thus we can not reorder the argument after any stack based argument // (Note that GT_LCLHEAP sets the GTF_EXCEPT flag so we don't need to // check for it explicitly.) // if (argx->gtFlags & GTF_EXCEPT) { curArgTabEntry->needTmp = true; needsTemps = true; continue; } #else // For Arm/X64 we can't reorder a register argument that uses a GT_LCLHEAP // if (argx->gtFlags & GTF_EXCEPT) { assert(compiler->compLocallocUsed); // Returns WALK_ABORT if a GT_LCLHEAP node is encountered in the argx tree // if (compiler->fgWalkTreePre(&argx, Compiler::fgChkLocAllocCB) == Compiler::WALK_ABORT) { curArgTabEntry->needTmp = true; needsTemps = true; continue; } } #endif } if (hasStructRegArgWeCareAbout) { // Returns true if a GT_QMARK node is encountered in the argx tree // if (compiler->fgWalkTreePre(&argx, Compiler::fgChkQmarkCB) == Compiler::WALK_ABORT) { curArgTabEntry->needTmp = true; needsTemps = true; continue; } } } } } // When CFG is enabled and this is a delegate call or vtable call we must // compute the call target before all late args. However this will // effectively null-check 'this', which should happen only after all // arguments are evaluated. Thus we must evaluate all args with side // effects to a temp. if (compiler->opts.IsCFGEnabled() && (callTree->IsVirtualVtable() || callTree->IsDelegateInvoke())) { // Always evaluate 'this' to temp. argTable[0]->needTmp = true; needsTemps = true; for (unsigned curInx = 1; curInx < argCount; curInx++) { fgArgTabEntry* curArgTabEntry = argTable[curInx]; GenTree* arg = curArgTabEntry->GetNode(); if ((arg->gtFlags & GTF_ALL_EFFECT) != 0) { curArgTabEntry->needTmp = true; needsTemps = true; } } } argsComplete = true; } void fgArgInfo::SortArgs() { assert(argsComplete == true); #ifdef DEBUG if (compiler->verbose) { printf("\nSorting the arguments:\n"); } #endif /* Shuffle the arguments around before we build the gtCallLateArgs list. The idea is to move all "simple" arguments like constants and local vars to the end of the table, and move the complex arguments towards the beginning of the table. This will help prevent registers from being spilled by allowing us to evaluate the more complex arguments before the simpler arguments. The argTable ends up looking like: +------------------------------------+ <--- argTable[argCount - 1] | constants | +------------------------------------+ | local var / local field | +------------------------------------+ | remaining arguments sorted by cost | +------------------------------------+ | temps (argTable[].needTmp = true) | +------------------------------------+ | args with calls (GTF_CALL) | +------------------------------------+ <--- argTable[0] */ /* Set the beginning and end for the new argument table */ unsigned curInx; int regCount = 0; unsigned begTab = 0; unsigned endTab = argCount - 1; unsigned argsRemaining = argCount; // First take care of arguments that are constants. // [We use a backward iterator pattern] // curInx = argCount; do { curInx--; fgArgTabEntry* curArgTabEntry = argTable[curInx]; if (curArgTabEntry->GetRegNum() != REG_STK) { regCount++; } assert(curArgTabEntry->lateUse == nullptr); // Skip any already processed args // if (!curArgTabEntry->processed) { GenTree* argx = curArgTabEntry->GetNode(); // put constants at the end of the table // if (argx->gtOper == GT_CNS_INT) { noway_assert(curInx <= endTab); curArgTabEntry->processed = true; // place curArgTabEntry at the endTab position by performing a swap // if (curInx != endTab) { argTable[curInx] = argTable[endTab]; argTable[endTab] = curArgTabEntry; } endTab--; argsRemaining--; } } } while (curInx > 0); if (argsRemaining > 0) { // Next take care of arguments that are calls. // [We use a forward iterator pattern] // for (curInx = begTab; curInx <= endTab; curInx++) { fgArgTabEntry* curArgTabEntry = argTable[curInx]; // Skip any already processed args // if (!curArgTabEntry->processed) { GenTree* argx = curArgTabEntry->GetNode(); // put calls at the beginning of the table // if (argx->gtFlags & GTF_CALL) { curArgTabEntry->processed = true; // place curArgTabEntry at the begTab position by performing a swap // if (curInx != begTab) { argTable[curInx] = argTable[begTab]; argTable[begTab] = curArgTabEntry; } begTab++; argsRemaining--; } } } } if (argsRemaining > 0) { // Next take care arguments that are temps. // These temps come before the arguments that are // ordinary local vars or local fields // since this will give them a better chance to become // enregistered into their actual argument register. // [We use a forward iterator pattern] // for (curInx = begTab; curInx <= endTab; curInx++) { fgArgTabEntry* curArgTabEntry = argTable[curInx]; // Skip any already processed args // if (!curArgTabEntry->processed) { if (curArgTabEntry->needTmp) { curArgTabEntry->processed = true; // place curArgTabEntry at the begTab position by performing a swap // if (curInx != begTab) { argTable[curInx] = argTable[begTab]; argTable[begTab] = curArgTabEntry; } begTab++; argsRemaining--; } } } } if (argsRemaining > 0) { // Next take care of local var and local field arguments. // These are moved towards the end of the argument evaluation. // [We use a backward iterator pattern] // curInx = endTab + 1; do { curInx--; fgArgTabEntry* curArgTabEntry = argTable[curInx]; // Skip any already processed args // if (!curArgTabEntry->processed) { GenTree* argx = curArgTabEntry->GetNode(); if ((argx->gtOper == GT_LCL_VAR) || (argx->gtOper == GT_LCL_FLD)) { noway_assert(curInx <= endTab); curArgTabEntry->processed = true; // place curArgTabEntry at the endTab position by performing a swap // if (curInx != endTab) { argTable[curInx] = argTable[endTab]; argTable[endTab] = curArgTabEntry; } endTab--; argsRemaining--; } } } while (curInx > begTab); } // Finally, take care of all the remaining arguments. // Note that we fill in one arg at a time using a while loop. bool costsPrepared = false; // Only prepare tree costs once, the first time through this loop while (argsRemaining > 0) { /* Find the most expensive arg remaining and evaluate it next */ fgArgTabEntry* expensiveArgTabEntry = nullptr; unsigned expensiveArg = UINT_MAX; unsigned expensiveArgCost = 0; // [We use a forward iterator pattern] // for (curInx = begTab; curInx <= endTab; curInx++) { fgArgTabEntry* curArgTabEntry = argTable[curInx]; // Skip any already processed args // if (!curArgTabEntry->processed) { GenTree* argx = curArgTabEntry->GetNode(); // We should have already handled these kinds of args assert(argx->gtOper != GT_LCL_VAR); assert(argx->gtOper != GT_LCL_FLD); assert(argx->gtOper != GT_CNS_INT); // This arg should either have no persistent side effects or be the last one in our table // assert(((argx->gtFlags & GTF_PERSISTENT_SIDE_EFFECTS) == 0) || (curInx == (argCount-1))); if (argsRemaining == 1) { // This is the last arg to place expensiveArg = curInx; expensiveArgTabEntry = curArgTabEntry; assert(begTab == endTab); break; } else { if (!costsPrepared) { /* We call gtPrepareCost to measure the cost of evaluating this tree */ compiler->gtPrepareCost(argx); } if (argx->GetCostEx() > expensiveArgCost) { // Remember this arg as the most expensive one that we have yet seen expensiveArgCost = argx->GetCostEx(); expensiveArg = curInx; expensiveArgTabEntry = curArgTabEntry; } } } } noway_assert(expensiveArg != UINT_MAX); // put the most expensive arg towards the beginning of the table expensiveArgTabEntry->processed = true; // place expensiveArgTabEntry at the begTab position by performing a swap // if (expensiveArg != begTab) { argTable[expensiveArg] = argTable[begTab]; argTable[begTab] = expensiveArgTabEntry; } begTab++; argsRemaining--; costsPrepared = true; // If we have more expensive arguments, don't re-evaluate the tree cost on the next loop } // The table should now be completely filled and thus begTab should now be adjacent to endTab // and regArgsRemaining should be zero assert(begTab == (endTab + 1)); assert(argsRemaining == 0); argsSorted = true; } #ifdef DEBUG void fgArgInfo::Dump(Compiler* compiler) const { for (unsigned curInx = 0; curInx < ArgCount(); curInx++) { fgArgTabEntry* curArgEntry = ArgTable()[curInx]; curArgEntry->Dump(); } } #endif //------------------------------------------------------------------------------ // fgMakeTmpArgNode : This function creates a tmp var only if needed. // We need this to be done in order to enforce ordering // of the evaluation of arguments. // // Arguments: // curArgTabEntry // // Return Value: // the newly created temp var tree. GenTree* Compiler::fgMakeTmpArgNode(fgArgTabEntry* curArgTabEntry) { unsigned tmpVarNum = curArgTabEntry->tmpNum; LclVarDsc* varDsc = lvaGetDesc(tmpVarNum); assert(varDsc->lvIsTemp); var_types type = varDsc->TypeGet(); // Create a copy of the temp to go into the late argument list GenTree* arg = gtNewLclvNode(tmpVarNum, type); GenTree* addrNode = nullptr; if (varTypeIsStruct(type)) { #if defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_ARM) // Can this type be passed as a primitive type? // If so, the following call will return the corresponding primitive type. // Otherwise, it will return TYP_UNKNOWN and we will pass it as a struct type. bool passedAsPrimitive = false; if (curArgTabEntry->TryPassAsPrimitive()) { CORINFO_CLASS_HANDLE clsHnd = varDsc->GetStructHnd(); var_types structBaseType = getPrimitiveTypeForStruct(lvaLclExactSize(tmpVarNum), clsHnd, curArgTabEntry->IsVararg()); if (structBaseType != TYP_UNKNOWN) { passedAsPrimitive = true; #if defined(UNIX_AMD64_ABI) // TODO-Cleanup: This is inelegant, but eventually we'll track this in the fgArgTabEntry, // and otherwise we'd have to either modify getPrimitiveTypeForStruct() to take // a structDesc or call eeGetSystemVAmd64PassStructInRegisterDescriptor yet again. // if (genIsValidFloatReg(curArgTabEntry->GetRegNum())) { if (structBaseType == TYP_INT) { structBaseType = TYP_FLOAT; } else { assert(structBaseType == TYP_LONG); structBaseType = TYP_DOUBLE; } } #endif type = structBaseType; } } // If it is passed in registers, don't get the address of the var. Make it a // field instead. It will be loaded in registers with putarg_reg tree in lower. if (passedAsPrimitive) { arg->ChangeOper(GT_LCL_FLD); arg->gtType = type; lvaSetVarDoNotEnregister(tmpVarNum DEBUGARG(DoNotEnregisterReason::SwizzleArg)); } else { var_types addrType = TYP_BYREF; arg = gtNewOperNode(GT_ADDR, addrType, arg); lvaSetVarAddrExposed(tmpVarNum DEBUGARG(AddressExposedReason::ESCAPE_ADDRESS)); addrNode = arg; #if FEATURE_MULTIREG_ARGS #ifdef TARGET_ARM64 assert(varTypeIsStruct(type)); if (lvaIsMultiregStruct(varDsc, curArgTabEntry->IsVararg())) { // We will create a GT_OBJ for the argument below. // This will be passed by value in two registers. assert(addrNode != nullptr); // Create an Obj of the temp to use it as a call argument. arg = gtNewObjNode(lvaGetStruct(tmpVarNum), arg); } #else // Always create an Obj of the temp to use it as a call argument. arg = gtNewObjNode(lvaGetStruct(tmpVarNum), arg); #endif // !TARGET_ARM64 #endif // FEATURE_MULTIREG_ARGS } #else // not (TARGET_AMD64 or TARGET_ARM64 or TARGET_ARM) // other targets, we pass the struct by value assert(varTypeIsStruct(type)); addrNode = gtNewOperNode(GT_ADDR, TYP_BYREF, arg); // Get a new Obj node temp to use it as a call argument. // gtNewObjNode will set the GTF_EXCEPT flag if this is not a local stack object. arg = gtNewObjNode(lvaGetStruct(tmpVarNum), addrNode); #endif // not (TARGET_AMD64 or TARGET_ARM64 or TARGET_ARM) } // (varTypeIsStruct(type)) if (addrNode != nullptr) { assert(addrNode->gtOper == GT_ADDR); // the child of a GT_ADDR is required to have this flag set addrNode->AsOp()->gtOp1->gtFlags |= GTF_DONT_CSE; } return arg; } //------------------------------------------------------------------------------ // EvalArgsToTemps : Create temp assignments and populate the LateArgs list. void fgArgInfo::EvalArgsToTemps() { assert(argsSorted); unsigned regArgInx = 0; // Now go through the argument table and perform the necessary evaluation into temps GenTreeCall::Use* tmpRegArgNext = nullptr; for (unsigned curInx = 0; curInx < argCount; curInx++) { fgArgTabEntry* curArgTabEntry = argTable[curInx]; assert(curArgTabEntry->lateUse == nullptr); GenTree* argx = curArgTabEntry->GetNode(); GenTree* setupArg = nullptr; GenTree* defArg; #if !FEATURE_FIXED_OUT_ARGS // Only ever set for FEATURE_FIXED_OUT_ARGS assert(curArgTabEntry->needPlace == false); // On x86 and other archs that use push instructions to pass arguments: // Only the register arguments need to be replaced with placeholder nodes. // Stacked arguments are evaluated and pushed (or stored into the stack) in order. // if (curArgTabEntry->GetRegNum() == REG_STK) continue; #endif if (curArgTabEntry->needTmp) { if (curArgTabEntry->isTmp) { // Create a copy of the temp to go into the late argument list defArg = compiler->fgMakeTmpArgNode(curArgTabEntry); // mark the original node as a late argument argx->gtFlags |= GTF_LATE_ARG; } else { // Create a temp assignment for the argument // Put the temp in the gtCallLateArgs list CLANG_FORMAT_COMMENT_ANCHOR; #ifdef DEBUG if (compiler->verbose) { printf("Argument with 'side effect'...\n"); compiler->gtDispTree(argx); } #endif #if defined(TARGET_AMD64) && !defined(UNIX_AMD64_ABI) noway_assert(argx->gtType != TYP_STRUCT); #endif unsigned tmpVarNum = compiler->lvaGrabTemp(true DEBUGARG("argument with side effect")); if (argx->gtOper == GT_MKREFANY) { // For GT_MKREFANY, typically the actual struct copying does // not have any side-effects and can be delayed. So instead // of using a temp for the whole struct, we can just use a temp // for operand that that has a side-effect GenTree* operand; if ((argx->AsOp()->gtOp2->gtFlags & GTF_ALL_EFFECT) == 0) { operand = argx->AsOp()->gtOp1; // In the early argument evaluation, place an assignment to the temp // from the source operand of the mkrefany setupArg = compiler->gtNewTempAssign(tmpVarNum, operand); // Replace the operand for the mkrefany with the new temp. argx->AsOp()->gtOp1 = compiler->gtNewLclvNode(tmpVarNum, operand->TypeGet()); } else if ((argx->AsOp()->gtOp1->gtFlags & GTF_ALL_EFFECT) == 0) { operand = argx->AsOp()->gtOp2; // In the early argument evaluation, place an assignment to the temp // from the source operand of the mkrefany setupArg = compiler->gtNewTempAssign(tmpVarNum, operand); // Replace the operand for the mkrefany with the new temp. argx->AsOp()->gtOp2 = compiler->gtNewLclvNode(tmpVarNum, operand->TypeGet()); } } if (setupArg != nullptr) { // Now keep the mkrefany for the late argument list defArg = argx; // Clear the side-effect flags because now both op1 and op2 have no side-effects defArg->gtFlags &= ~GTF_ALL_EFFECT; } else { setupArg = compiler->gtNewTempAssign(tmpVarNum, argx); LclVarDsc* varDsc = compiler->lvaGetDesc(tmpVarNum); var_types lclVarType = genActualType(argx->gtType); var_types scalarType = TYP_UNKNOWN; if (setupArg->OperIsCopyBlkOp()) { setupArg = compiler->fgMorphCopyBlock(setupArg); #if defined(TARGET_ARMARCH) || defined(UNIX_AMD64_ABI) if (lclVarType == TYP_STRUCT) { // This scalar LclVar widening step is only performed for ARM architectures. // CORINFO_CLASS_HANDLE clsHnd = compiler->lvaGetStruct(tmpVarNum); unsigned structSize = varDsc->lvExactSize; scalarType = compiler->getPrimitiveTypeForStruct(structSize, clsHnd, curArgTabEntry->IsVararg()); } #endif // TARGET_ARMARCH || defined (UNIX_AMD64_ABI) } // scalarType can be set to a wider type for ARM or unix amd64 architectures: (3 => 4) or (5,6,7 => // 8) if ((scalarType != TYP_UNKNOWN) && (scalarType != lclVarType)) { // Create a GT_LCL_FLD using the wider type to go to the late argument list defArg = compiler->gtNewLclFldNode(tmpVarNum, scalarType, 0); } else { // Create a copy of the temp to go to the late argument list defArg = compiler->gtNewLclvNode(tmpVarNum, lclVarType); } curArgTabEntry->isTmp = true; curArgTabEntry->tmpNum = tmpVarNum; #ifdef TARGET_ARM // Previously we might have thought the local was promoted, and thus the 'COPYBLK' // might have left holes in the used registers (see // fgAddSkippedRegsInPromotedStructArg). // Too bad we're not that smart for these intermediate temps... if (isValidIntArgReg(curArgTabEntry->GetRegNum()) && (curArgTabEntry->numRegs > 1)) { regNumber argReg = curArgTabEntry->GetRegNum(); regMaskTP allUsedRegs = genRegMask(curArgTabEntry->GetRegNum()); for (unsigned i = 1; i < curArgTabEntry->numRegs; i++) { argReg = genRegArgNext(argReg); allUsedRegs |= genRegMask(argReg); } } #endif // TARGET_ARM } /* mark the assignment as a late argument */ setupArg->gtFlags |= GTF_LATE_ARG; #ifdef DEBUG if (compiler->verbose) { printf("\n Evaluate to a temp:\n"); compiler->gtDispTree(setupArg); } #endif } } else // curArgTabEntry->needTmp == false { // On x86 - // Only register args are replaced with placeholder nodes // and the stack based arguments are evaluated and pushed in order. // // On Arm/x64 - When needTmp is false and needPlace is false, // the non-register arguments are evaluated and stored in order. // When needPlace is true we have a nested call that comes after // this argument so we have to replace it in the gtCallArgs list // (the initial argument evaluation list) with a placeholder. // if ((curArgTabEntry->GetRegNum() == REG_STK) && (curArgTabEntry->needPlace == false)) { continue; } /* No temp needed - move the whole node to the gtCallLateArgs list */ /* The argument is deferred and put in the late argument list */ defArg = argx; // Create a placeholder node to put in its place in gtCallLateArgs. // For a struct type we also need to record the class handle of the arg. CORINFO_CLASS_HANDLE clsHnd = NO_CLASS_HANDLE; #if defined(TARGET_AMD64) && !defined(UNIX_AMD64_ABI) // All structs are either passed (and retyped) as integral types, OR they // are passed by reference. noway_assert(argx->gtType != TYP_STRUCT); #else // !defined(TARGET_AMD64) || defined(UNIX_AMD64_ABI) if (defArg->TypeGet() == TYP_STRUCT) { clsHnd = compiler->gtGetStructHandleIfPresent(defArg); noway_assert(clsHnd != NO_CLASS_HANDLE); } #endif // !(defined(TARGET_AMD64) && !defined(UNIX_AMD64_ABI)) setupArg = compiler->gtNewArgPlaceHolderNode(defArg->gtType, clsHnd); /* mark the placeholder node as a late argument */ setupArg->gtFlags |= GTF_LATE_ARG; #ifdef DEBUG if (compiler->verbose) { if (curArgTabEntry->GetRegNum() == REG_STK) { printf("Deferred stack argument :\n"); } else { printf("Deferred argument ('%s'):\n", getRegName(curArgTabEntry->GetRegNum())); } compiler->gtDispTree(argx); printf("Replaced with placeholder node:\n"); compiler->gtDispTree(setupArg); } #endif } if (setupArg != nullptr) { noway_assert(curArgTabEntry->use->GetNode() == argx); curArgTabEntry->use->SetNode(setupArg); } /* deferred arg goes into the late argument list */ if (tmpRegArgNext == nullptr) { tmpRegArgNext = compiler->gtNewCallArgs(defArg); callTree->AsCall()->gtCallLateArgs = tmpRegArgNext; } else { noway_assert(tmpRegArgNext->GetNode() != nullptr); tmpRegArgNext->SetNext(compiler->gtNewCallArgs(defArg)); tmpRegArgNext = tmpRegArgNext->GetNext(); } curArgTabEntry->lateUse = tmpRegArgNext; curArgTabEntry->SetLateArgInx(regArgInx++); } #ifdef DEBUG if (compiler->verbose) { printf("\nShuffled argument table: "); for (unsigned curInx = 0; curInx < argCount; curInx++) { fgArgTabEntry* curArgTabEntry = argTable[curInx]; if (curArgTabEntry->GetRegNum() != REG_STK) { printf("%s ", getRegName(curArgTabEntry->GetRegNum())); } } printf("\n"); } #endif } //------------------------------------------------------------------------------ // fgMakeMultiUse : If the node is an unaliased local or constant clone it, // otherwise insert a comma form temp // // Arguments: // ppTree - a pointer to the child node we will be replacing with the comma expression that // evaluates ppTree to a temp and returns the result // // Return Value: // A fresh GT_LCL_VAR node referencing the temp which has not been used // // Notes: // Caller must ensure that if the node is an unaliased local, the second use this // creates will be evaluated before the local can be reassigned. // // Can be safely called in morph preorder, before GTF_GLOB_REF is reliable. // GenTree* Compiler::fgMakeMultiUse(GenTree** pOp) { GenTree* const tree = *pOp; if (tree->IsInvariant()) { return gtClone(tree); } else if (tree->IsLocal()) { // Can't rely on GTF_GLOB_REF here. // if (!lvaGetDesc(tree->AsLclVarCommon())->IsAddressExposed()) { return gtClone(tree); } } return fgInsertCommaFormTemp(pOp); } //------------------------------------------------------------------------------ // fgInsertCommaFormTemp: Create a new temporary variable to hold the result of *ppTree, // and replace *ppTree with comma(asg(newLcl, *ppTree), newLcl) // // Arguments: // ppTree - a pointer to the child node we will be replacing with the comma expression that // evaluates ppTree to a temp and returns the result // // structType - value type handle if the temp created is of TYP_STRUCT. // // Return Value: // A fresh GT_LCL_VAR node referencing the temp which has not been used // GenTree* Compiler::fgInsertCommaFormTemp(GenTree** ppTree, CORINFO_CLASS_HANDLE structType /*= nullptr*/) { GenTree* subTree = *ppTree; unsigned lclNum = lvaGrabTemp(true DEBUGARG("fgInsertCommaFormTemp is creating a new local variable")); if (varTypeIsStruct(subTree)) { assert(structType != nullptr); lvaSetStruct(lclNum, structType, false); } // If subTree->TypeGet() == TYP_STRUCT, gtNewTempAssign() will create a GT_COPYBLK tree. // The type of GT_COPYBLK is TYP_VOID. Therefore, we should use subTree->TypeGet() for // setting type of lcl vars created. GenTree* asg = gtNewTempAssign(lclNum, subTree); GenTree* load = new (this, GT_LCL_VAR) GenTreeLclVar(GT_LCL_VAR, subTree->TypeGet(), lclNum); GenTree* comma = gtNewOperNode(GT_COMMA, subTree->TypeGet(), asg, load); *ppTree = comma; return new (this, GT_LCL_VAR) GenTreeLclVar(GT_LCL_VAR, subTree->TypeGet(), lclNum); } //------------------------------------------------------------------------ // fgInitArgInfo: Construct the fgArgInfo for the call with the fgArgEntry for each arg // // Arguments: // callNode - the call for which we are generating the fgArgInfo // // Return Value: // None // // Notes: // This method is idempotent in that it checks whether the fgArgInfo has already been // constructed, and just returns. // This method only computes the arg table and arg entries for the call (the fgArgInfo), // and makes no modification of the args themselves. // // The IR for the call args can change for calls with non-standard arguments: some non-standard // arguments add new call argument IR nodes. // void Compiler::fgInitArgInfo(GenTreeCall* call) { GenTreeCall::Use* args; GenTree* argx; unsigned argIndex = 0; unsigned intArgRegNum = 0; unsigned fltArgRegNum = 0; DEBUG_ARG_SLOTS_ONLY(unsigned argSlots = 0;) bool callHasRetBuffArg = call->HasRetBufArg(); bool callIsVararg = call->IsVarargs(); #ifdef TARGET_ARM regMaskTP argSkippedRegMask = RBM_NONE; regMaskTP fltArgSkippedRegMask = RBM_NONE; #endif // TARGET_ARM #if defined(TARGET_X86) unsigned maxRegArgs = MAX_REG_ARG; // X86: non-const, must be calculated #else const unsigned maxRegArgs = MAX_REG_ARG; // other arch: fixed constant number #endif if (call->fgArgInfo != nullptr) { // We've already initialized and set the fgArgInfo. return; } JITDUMP("Initializing arg info for %d.%s:\n", call->gtTreeID, GenTree::OpName(call->gtOper)); // At this point, we should never have gtCallLateArgs, as this needs to be done before those are determined. assert(call->gtCallLateArgs == nullptr); if (TargetOS::IsUnix && callIsVararg) { // Currently native varargs is not implemented on non windows targets. // // Note that some targets like Arm64 Unix should not need much work as // the ABI is the same. While other targets may only need small changes // such as amd64 Unix, which just expects RAX to pass numFPArguments. NYI("Morphing Vararg call not yet implemented on non Windows targets."); } // Data structure for keeping track of non-standard args. Non-standard args are those that are not passed // following the normal calling convention or in the normal argument registers. We either mark existing // arguments as non-standard (such as the x8 return buffer register on ARM64), or we manually insert the // non-standard arguments into the argument list, below. class NonStandardArgs { struct NonStandardArg { GenTree* node; // The tree node representing this non-standard argument. // Note that this must be updated if the tree node changes due to morphing! regNumber reg; // The register to be assigned to this non-standard argument. NonStandardArgKind kind; // The kind of the non-standard arg }; ArrayStack<NonStandardArg> args; public: NonStandardArgs(CompAllocator alloc) : args(alloc, 3) // We will have at most 3 non-standard arguments { } //----------------------------------------------------------------------------- // Add: add a non-standard argument to the table of non-standard arguments // // Arguments: // node - a GenTree node that has a non-standard argument. // reg - the register to assign to this node. // // Return Value: // None. // void Add(GenTree* node, regNumber reg, NonStandardArgKind kind) { NonStandardArg nsa = {node, reg, kind}; args.Push(nsa); } //----------------------------------------------------------------------------- // Find: Look for a GenTree* in the set of non-standard args. // // Arguments: // node - a GenTree node to look for // // Return Value: // The index of the non-standard argument (a non-negative, unique, stable number). // If the node is not a non-standard argument, return -1. // int Find(GenTree* node) { for (int i = 0; i < args.Height(); i++) { if (node == args.Top(i).node) { return i; } } return -1; } //----------------------------------------------------------------------------- // Find: Look for a GenTree node in the non-standard arguments set. If found, // set the register to use for the node. // // Arguments: // node - a GenTree node to look for // pReg - an OUT argument. *pReg is set to the non-standard register to use if // 'node' is found in the non-standard argument set. // pKind - an OUT argument. *pKind is set to the kind of the non-standard arg. // // Return Value: // 'true' if 'node' is a non-standard argument. In this case, *pReg and *pKing are set. // 'false' otherwise (in this case, *pReg and *pKind are unmodified). // bool Find(GenTree* node, regNumber* pReg, NonStandardArgKind* pKind) { for (int i = 0; i < args.Height(); i++) { NonStandardArg& nsa = args.TopRef(i); if (node == nsa.node) { *pReg = nsa.reg; *pKind = nsa.kind; return true; } } return false; } //----------------------------------------------------------------------------- // Replace: Replace the non-standard argument node at a given index. This is done when // the original node was replaced via morphing, but we need to continue to assign a // particular non-standard arg to it. // // Arguments: // index - the index of the non-standard arg. It must exist. // node - the new GenTree node. // // Return Value: // None. // void Replace(int index, GenTree* node) { args.TopRef(index).node = node; } } nonStandardArgs(getAllocator(CMK_ArrayStack)); // Count of args. On first morph, this is counted before we've filled in the arg table. // On remorph, we grab it from the arg table. unsigned numArgs = 0; // First we need to count the args if (call->gtCallThisArg != nullptr) { numArgs++; } for (GenTreeCall::Use& use : call->Args()) { numArgs++; } // Insert or mark non-standard args. These are either outside the normal calling convention, or // arguments registers that don't follow the normal progression of argument registers in the calling // convention (such as for the ARM64 fixed return buffer argument x8). // // *********** NOTE ************* // The logic here must remain in sync with GetNonStandardAddedArgCount(), which is used to map arguments // in the implementation of fast tail call. // *********** END NOTE ********* CLANG_FORMAT_COMMENT_ANCHOR; #if defined(TARGET_X86) || defined(TARGET_ARM) // The x86 and arm32 CORINFO_HELP_INIT_PINVOKE_FRAME helpers has a custom calling convention. // Set the argument registers correctly here. if (call->IsHelperCall(this, CORINFO_HELP_INIT_PINVOKE_FRAME)) { GenTreeCall::Use* args = call->gtCallArgs; GenTree* arg1 = args->GetNode(); assert(arg1 != nullptr); nonStandardArgs.Add(arg1, REG_PINVOKE_FRAME, NonStandardArgKind::PInvokeFrame); } #endif // defined(TARGET_X86) || defined(TARGET_ARM) #if defined(TARGET_ARM) // A non-standard calling convention using wrapper delegate invoke is used on ARM, only, for wrapper // delegates. It is used for VSD delegate calls where the VSD custom calling convention ABI requires passing // R4, a callee-saved register, with a special value. Since R4 is a callee-saved register, its value needs // to be preserved. Thus, the VM uses a wrapper delegate IL stub, which preserves R4 and also sets up R4 // correctly for the VSD call. The VM is simply reusing an existing mechanism (wrapper delegate IL stub) // to achieve its goal for delegate VSD call. See COMDelegate::NeedsWrapperDelegate() in the VM for details. else if (call->gtCallMoreFlags & GTF_CALL_M_WRAPPER_DELEGATE_INV) { GenTree* arg = call->gtCallThisArg->GetNode(); if (arg->OperIsLocal()) { arg = gtClone(arg, true); } else { GenTree* tmp = fgInsertCommaFormTemp(&arg); call->gtCallThisArg->SetNode(arg); call->gtFlags |= GTF_ASG; arg = tmp; } noway_assert(arg != nullptr); GenTree* newArg = new (this, GT_ADDR) GenTreeAddrMode(TYP_BYREF, arg, nullptr, 0, eeGetEEInfo()->offsetOfWrapperDelegateIndirectCell); // Append newArg as the last arg GenTreeCall::Use** insertionPoint = &call->gtCallArgs; for (; *insertionPoint != nullptr; insertionPoint = &((*insertionPoint)->NextRef())) { } *insertionPoint = gtNewCallArgs(newArg); numArgs++; nonStandardArgs.Add(newArg, virtualStubParamInfo->GetReg(), NonStandardArgKind::WrapperDelegateCell); } #endif // defined(TARGET_ARM) #if defined(TARGET_X86) // The x86 shift helpers have custom calling conventions and expect the lo part of the long to be in EAX and the // hi part to be in EDX. This sets the argument registers up correctly. else if (call->IsHelperCall(this, CORINFO_HELP_LLSH) || call->IsHelperCall(this, CORINFO_HELP_LRSH) || call->IsHelperCall(this, CORINFO_HELP_LRSZ)) { GenTreeCall::Use* args = call->gtCallArgs; GenTree* arg1 = args->GetNode(); assert(arg1 != nullptr); nonStandardArgs.Add(arg1, REG_LNGARG_LO, NonStandardArgKind::ShiftLow); args = args->GetNext(); GenTree* arg2 = args->GetNode(); assert(arg2 != nullptr); nonStandardArgs.Add(arg2, REG_LNGARG_HI, NonStandardArgKind::ShiftHigh); } #else // !TARGET_X86 // TODO-X86-CQ: Currently RyuJIT/x86 passes args on the stack, so this is not needed. // If/when we change that, the following code needs to be changed to correctly support the (TBD) managed calling // convention for x86/SSE. // If we have a Fixed Return Buffer argument register then we setup a non-standard argument for it. // // We don't use the fixed return buffer argument if we have the special unmanaged instance call convention. // That convention doesn't use the fixed return buffer register. // CLANG_FORMAT_COMMENT_ANCHOR; if (call->HasFixedRetBufArg()) { args = call->gtCallArgs; assert(args != nullptr); argx = call->gtCallArgs->GetNode(); // We don't increment numArgs here, since we already counted this argument above. nonStandardArgs.Add(argx, theFixedRetBuffReg(), NonStandardArgKind::FixedRetBuffer); } // We are allowed to have a Fixed Return Buffer argument combined // with any of the remaining non-standard arguments // CLANG_FORMAT_COMMENT_ANCHOR; if (call->IsVirtualStub()) { if (!call->IsTailCallViaJitHelper()) { GenTree* stubAddrArg = fgGetStubAddrArg(call); // And push the stub address onto the list of arguments call->gtCallArgs = gtPrependNewCallArg(stubAddrArg, call->gtCallArgs); numArgs++; nonStandardArgs.Add(stubAddrArg, stubAddrArg->GetRegNum(), NonStandardArgKind::VirtualStubCell); } else { // If it is a VSD call getting dispatched via tail call helper, // fgMorphTailCallViaJitHelper() would materialize stub addr as an additional // parameter added to the original arg list and hence no need to // add as a non-standard arg. } } else #endif // !TARGET_X86 if (call->gtCallType == CT_INDIRECT && (call->gtCallCookie != nullptr)) { assert(!call->IsUnmanaged()); GenTree* arg = call->gtCallCookie; noway_assert(arg != nullptr); call->gtCallCookie = nullptr; // All architectures pass the cookie in a register. call->gtCallArgs = gtPrependNewCallArg(arg, call->gtCallArgs); nonStandardArgs.Add(arg, REG_PINVOKE_COOKIE_PARAM, NonStandardArgKind::PInvokeCookie); numArgs++; // put destination into R10/EAX arg = gtClone(call->gtCallAddr, true); call->gtCallArgs = gtPrependNewCallArg(arg, call->gtCallArgs); numArgs++; nonStandardArgs.Add(arg, REG_PINVOKE_TARGET_PARAM, NonStandardArgKind::PInvokeTarget); // finally change this call to a helper call call->gtCallType = CT_HELPER; call->gtCallMethHnd = eeFindHelper(CORINFO_HELP_PINVOKE_CALLI); } #if defined(FEATURE_READYTORUN) // For arm/arm64, we dispatch code same as VSD using virtualStubParamInfo->GetReg() // for indirection cell address, which ZapIndirectHelperThunk expects. // For x64/x86 we use return address to get the indirection cell by disassembling the call site. // That is not possible for fast tailcalls, so we only need this logic for fast tailcalls on xarch. // Note that we call this before we know if something will be a fast tailcall or not. // That's ok; after making something a tailcall, we will invalidate this information // and reconstruct it if necessary. The tailcalling decision does not change since // this is a non-standard arg in a register. bool needsIndirectionCell = call->IsR2RRelativeIndir() && !call->IsDelegateInvoke(); #if defined(TARGET_XARCH) needsIndirectionCell &= call->IsFastTailCall(); #endif if (needsIndirectionCell) { assert(call->gtEntryPoint.addr != nullptr); size_t addrValue = (size_t)call->gtEntryPoint.addr; GenTree* indirectCellAddress = gtNewIconHandleNode(addrValue, GTF_ICON_FTN_ADDR); #ifdef DEBUG indirectCellAddress->AsIntCon()->gtTargetHandle = (size_t)call->gtCallMethHnd; #endif indirectCellAddress->SetRegNum(REG_R2R_INDIRECT_PARAM); #ifdef TARGET_ARM // Issue #xxxx : Don't attempt to CSE this constant on ARM32 // // This constant has specific register requirements, and LSRA doesn't currently correctly // handle them when the value is in a CSE'd local. indirectCellAddress->SetDoNotCSE(); #endif // TARGET_ARM // Push the stub address onto the list of arguments. call->gtCallArgs = gtPrependNewCallArg(indirectCellAddress, call->gtCallArgs); numArgs++; nonStandardArgs.Add(indirectCellAddress, indirectCellAddress->GetRegNum(), NonStandardArgKind::R2RIndirectionCell); } #endif if ((REG_VALIDATE_INDIRECT_CALL_ADDR != REG_ARG_0) && call->IsHelperCall(this, CORINFO_HELP_VALIDATE_INDIRECT_CALL)) { assert(call->gtCallArgs != nullptr); GenTreeCall::Use* args = call->gtCallArgs; GenTree* tar = args->GetNode(); nonStandardArgs.Add(tar, REG_VALIDATE_INDIRECT_CALL_ADDR, NonStandardArgKind::ValidateIndirectCallTarget); } // Allocate the fgArgInfo for the call node; // call->fgArgInfo = new (this, CMK_Unknown) fgArgInfo(this, call, numArgs); // Add the 'this' argument value, if present. if (call->gtCallThisArg != nullptr) { argx = call->gtCallThisArg->GetNode(); assert(argIndex == 0); assert(call->gtCallType == CT_USER_FUNC || call->gtCallType == CT_INDIRECT); assert(varTypeIsGC(argx) || (argx->gtType == TYP_I_IMPL)); const regNumber regNum = genMapIntRegArgNumToRegNum(intArgRegNum); const unsigned numRegs = 1; const unsigned byteSize = TARGET_POINTER_SIZE; const unsigned byteAlignment = TARGET_POINTER_SIZE; const bool isStruct = false; const bool isFloatHfa = false; // This is a register argument - put it in the table. call->fgArgInfo->AddRegArg(argIndex, argx, call->gtCallThisArg, regNum, numRegs, byteSize, byteAlignment, isStruct, isFloatHfa, callIsVararg UNIX_AMD64_ABI_ONLY_ARG(REG_STK) UNIX_AMD64_ABI_ONLY_ARG(0) UNIX_AMD64_ABI_ONLY_ARG(0) UNIX_AMD64_ABI_ONLY_ARG(nullptr)); intArgRegNum++; #ifdef WINDOWS_AMD64_ABI // Whenever we pass an integer register argument // we skip the corresponding floating point register argument fltArgRegNum++; #endif // WINDOWS_AMD64_ABI argIndex++; DEBUG_ARG_SLOTS_ONLY(argSlots++;) } #ifdef TARGET_X86 // Compute the maximum number of arguments that can be passed in registers. // For X86 we handle the varargs and unmanaged calling conventions #ifndef UNIX_X86_ABI if (call->gtFlags & GTF_CALL_POP_ARGS) { noway_assert(intArgRegNum < MAX_REG_ARG); // No more register arguments for varargs (CALL_POP_ARGS) maxRegArgs = intArgRegNum; // Add in the ret buff arg if (callHasRetBuffArg) maxRegArgs++; } #endif // UNIX_X86_ABI if (call->IsUnmanaged()) { noway_assert(intArgRegNum == 0); if (call->gtCallMoreFlags & GTF_CALL_M_UNMGD_THISCALL) { noway_assert(call->gtCallArgs->GetNode()->TypeGet() == TYP_I_IMPL || call->gtCallArgs->GetNode()->TypeGet() == TYP_BYREF || call->gtCallArgs->GetNode()->gtOper == GT_NOP); // the arg was already morphed to a register (fgMorph called twice) maxRegArgs = 1; } else { maxRegArgs = 0; } #ifdef UNIX_X86_ABI // Add in the ret buff arg if (callHasRetBuffArg && call->unmgdCallConv != CorInfoCallConvExtension::C && // C and Stdcall calling conventions do not call->unmgdCallConv != CorInfoCallConvExtension::Stdcall) // use registers to pass arguments. maxRegArgs++; #endif } #endif // TARGET_X86 /* Morph the user arguments */ CLANG_FORMAT_COMMENT_ANCHOR; #if defined(TARGET_ARM) // The ARM ABI has a concept of back-filling of floating-point argument registers, according // to the "Procedure Call Standard for the ARM Architecture" document, especially // section 6.1.2.3 "Parameter passing". Back-filling is where floating-point argument N+1 can // appear in a lower-numbered register than floating point argument N. That is, argument // register allocation is not strictly increasing. To support this, we need to keep track of unused // floating-point argument registers that we can back-fill. We only support 4-byte float and // 8-byte double types, and one to four element HFAs composed of these types. With this, we will // only back-fill single registers, since there is no way with these types to create // an alignment hole greater than one register. However, there can be up to 3 back-fill slots // available (with 16 FP argument registers). Consider this code: // // struct HFA { float x, y, z; }; // a three element HFA // void bar(float a1, // passed in f0 // double a2, // passed in f2/f3; skip f1 for alignment // HFA a3, // passed in f4/f5/f6 // double a4, // passed in f8/f9; skip f7 for alignment. NOTE: it doesn't fit in the f1 back-fill slot // HFA a5, // passed in f10/f11/f12 // double a6, // passed in f14/f15; skip f13 for alignment. NOTE: it doesn't fit in the f1 or f7 back-fill // // slots // float a7, // passed in f1 (back-filled) // float a8, // passed in f7 (back-filled) // float a9, // passed in f13 (back-filled) // float a10) // passed on the stack in [OutArg+0] // // Note that if we ever support FP types with larger alignment requirements, then there could // be more than single register back-fills. // // Once we assign a floating-pointer register to the stack, they all must be on the stack. // See "Procedure Call Standard for the ARM Architecture", section 6.1.2.3, "The back-filling // continues only so long as no VFP CPRC has been allocated to a slot on the stack." // We set anyFloatStackArgs to true when a floating-point argument has been assigned to the stack // and prevent any additional floating-point arguments from going in registers. bool anyFloatStackArgs = false; #endif // TARGET_ARM #ifdef UNIX_AMD64_ABI SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR structDesc; #endif // UNIX_AMD64_ABI #if defined(DEBUG) // Check that we have valid information about call's argument types. // For example: // load byte; call(int) -> CALL(PUTARG_TYPE byte(IND byte)); // load int; call(byte) -> CALL(PUTARG_TYPE int (IND int)); // etc. if (call->callSig != nullptr) { CORINFO_SIG_INFO* sig = call->callSig; const unsigned sigArgsCount = sig->numArgs; GenTreeCall::Use* nodeArgs = call->gtCallArgs; // It could include many arguments not included in `sig->numArgs`, for example, `this`, runtime lookup, cookie // etc. unsigned nodeArgsCount = 0; call->VisitOperands([&nodeArgsCount](GenTree* operand) -> GenTree::VisitResult { nodeArgsCount++; return GenTree::VisitResult::Continue; }); if (call->gtCallThisArg != nullptr) { // Handle the most common argument not in the `sig->numArgs`. // so the following check works on more methods. nodeArgsCount--; } assert(nodeArgsCount >= sigArgsCount); if ((nodeArgsCount == sigArgsCount) && ((Target::g_tgtArgOrder == Target::ARG_ORDER_R2L) || (nodeArgsCount == 1))) { CORINFO_ARG_LIST_HANDLE sigArg = sig->args; for (unsigned i = 0; i < sig->numArgs; ++i) { CORINFO_CLASS_HANDLE argClass; const CorInfoType corType = strip(info.compCompHnd->getArgType(sig, sigArg, &argClass)); const var_types sigType = JITtype2varType(corType); assert(nodeArgs != nullptr); const GenTree* nodeArg = nodeArgs->GetNode(); assert(nodeArg != nullptr); const var_types nodeType = nodeArg->TypeGet(); assert((nodeType == sigType) || varTypeIsStruct(sigType) || genTypeSize(nodeType) == genTypeSize(sigType)); sigArg = info.compCompHnd->getArgNext(sigArg); nodeArgs = nodeArgs->GetNext(); } assert(nodeArgs == nullptr); } } #endif // DEBUG for (args = call->gtCallArgs; args != nullptr; args = args->GetNext(), argIndex++) { argx = args->GetNode()->gtSkipPutArgType(); // Change the node to TYP_I_IMPL so we don't report GC info // NOTE: We deferred this from the importer because of the inliner. if (argx->IsLocalAddrExpr() != nullptr) { argx->gtType = TYP_I_IMPL; } // We should never have any ArgPlaceHolder nodes at this point. assert(!argx->IsArgPlaceHolderNode()); // Setup any HFA information about 'argx' bool isHfaArg = false; var_types hfaType = TYP_UNDEF; unsigned hfaSlots = 0; bool passUsingFloatRegs; unsigned argAlignBytes = TARGET_POINTER_SIZE; unsigned size = 0; unsigned byteSize = 0; if (GlobalJitOptions::compFeatureHfa) { hfaType = GetHfaType(argx); isHfaArg = varTypeIsValidHfaType(hfaType); #if defined(TARGET_ARM64) if (TargetOS::IsWindows) { // Make sure for vararg methods isHfaArg is not true. isHfaArg = callIsVararg ? false : isHfaArg; } #endif // defined(TARGET_ARM64) if (isHfaArg) { isHfaArg = true; hfaSlots = GetHfaCount(argx); // If we have a HFA struct it's possible we transition from a method that originally // only had integer types to now start having FP types. We have to communicate this // through this flag since LSRA later on will use this flag to determine whether // or not to track the FP register set. // compFloatingPointUsed = true; } } const bool isFloatHfa = (hfaType == TYP_FLOAT); #ifdef TARGET_ARM passUsingFloatRegs = !callIsVararg && (isHfaArg || varTypeUsesFloatReg(argx)) && !opts.compUseSoftFP; bool passUsingIntRegs = passUsingFloatRegs ? false : (intArgRegNum < MAX_REG_ARG); // We don't use the "size" return value from InferOpSizeAlign(). codeGen->InferOpSizeAlign(argx, &argAlignBytes); argAlignBytes = roundUp(argAlignBytes, TARGET_POINTER_SIZE); if (argAlignBytes == 2 * TARGET_POINTER_SIZE) { if (passUsingFloatRegs) { if (fltArgRegNum % 2 == 1) { fltArgSkippedRegMask |= genMapArgNumToRegMask(fltArgRegNum, TYP_FLOAT); fltArgRegNum++; } } else if (passUsingIntRegs) { if (intArgRegNum % 2 == 1) { argSkippedRegMask |= genMapArgNumToRegMask(intArgRegNum, TYP_I_IMPL); intArgRegNum++; } } #if defined(DEBUG) if (argSlots % 2 == 1) { argSlots++; } #endif } #elif defined(TARGET_ARM64) assert(!callIsVararg || !isHfaArg); passUsingFloatRegs = !callIsVararg && (isHfaArg || varTypeUsesFloatReg(argx)); #elif defined(TARGET_AMD64) passUsingFloatRegs = varTypeIsFloating(argx); #elif defined(TARGET_X86) passUsingFloatRegs = false; #else #error Unsupported or unset target architecture #endif // TARGET* bool isBackFilled = false; unsigned nextFltArgRegNum = fltArgRegNum; // This is the next floating-point argument register number to use var_types structBaseType = TYP_STRUCT; unsigned structSize = 0; bool passStructByRef = false; bool isStructArg; GenTree* actualArg = argx->gtEffectiveVal(true /* Commas only */); // // Figure out the size of the argument. This is either in number of registers, or number of // TARGET_POINTER_SIZE stack slots, or the sum of these if the argument is split between the registers and // the stack. // isStructArg = varTypeIsStruct(argx); CORINFO_CLASS_HANDLE objClass = NO_CLASS_HANDLE; if (isStructArg) { objClass = gtGetStructHandle(argx); if (argx->TypeGet() == TYP_STRUCT) { // For TYP_STRUCT arguments we must have an OBJ, LCL_VAR or MKREFANY switch (actualArg->OperGet()) { case GT_OBJ: structSize = actualArg->AsObj()->GetLayout()->GetSize(); assert(structSize == info.compCompHnd->getClassSize(objClass)); break; case GT_LCL_VAR: structSize = lvaGetDesc(actualArg->AsLclVarCommon())->lvExactSize; break; case GT_MKREFANY: structSize = info.compCompHnd->getClassSize(objClass); break; default: BADCODE("illegal argument tree in fgInitArgInfo"); break; } } else { structSize = genTypeSize(argx); assert(structSize == info.compCompHnd->getClassSize(objClass)); } } #if defined(TARGET_AMD64) #ifdef UNIX_AMD64_ABI if (!isStructArg) { size = 1; // On AMD64, all primitives fit in a single (64-bit) 'slot' byteSize = genTypeSize(argx); } else { size = (unsigned)(roundUp(structSize, TARGET_POINTER_SIZE)) / TARGET_POINTER_SIZE; byteSize = structSize; eeGetSystemVAmd64PassStructInRegisterDescriptor(objClass, &structDesc); } #else // !UNIX_AMD64_ABI size = 1; // On AMD64 Windows, all args fit in a single (64-bit) 'slot' if (!isStructArg) { byteSize = genTypeSize(argx); } #endif // UNIX_AMD64_ABI #elif defined(TARGET_ARM64) if (isStructArg) { if (isHfaArg) { // HFA structs are passed by value in multiple registers. // The "size" in registers may differ the size in pointer-sized units. CORINFO_CLASS_HANDLE structHnd = gtGetStructHandle(argx); size = GetHfaCount(structHnd); byteSize = info.compCompHnd->getClassSize(structHnd); } else { // Structs are either passed in 1 or 2 (64-bit) slots. // Structs that are the size of 2 pointers are passed by value in multiple registers, // if sufficient registers are available. // Structs that are larger than 2 pointers (except for HFAs) are passed by // reference (to a copy) size = (unsigned)(roundUp(structSize, TARGET_POINTER_SIZE)) / TARGET_POINTER_SIZE; byteSize = structSize; if (size > 2) { size = 1; } } // Note that there are some additional rules for multireg structs. // (i.e they cannot be split between registers and the stack) } else { size = 1; // Otherwise, all primitive types fit in a single (64-bit) 'slot' byteSize = genTypeSize(argx); } #elif defined(TARGET_ARM) || defined(TARGET_X86) if (isStructArg) { size = (unsigned)(roundUp(structSize, TARGET_POINTER_SIZE)) / TARGET_POINTER_SIZE; byteSize = structSize; } else { // The typical case. // Long/double type argument(s) will be modified as needed in Lowering. size = genTypeStSz(argx->gtType); byteSize = genTypeSize(argx); } #else #error Unsupported or unset target architecture #endif // TARGET_XXX if (isStructArg) { assert(argx == args->GetNode()); assert(structSize != 0); structPassingKind howToPassStruct; structBaseType = getArgTypeForStruct(objClass, &howToPassStruct, callIsVararg, structSize); passStructByRef = (howToPassStruct == SPK_ByReference); if (howToPassStruct == SPK_ByReference) { byteSize = TARGET_POINTER_SIZE; } else { byteSize = structSize; } if (howToPassStruct == SPK_PrimitiveType) { #ifdef TARGET_ARM // TODO-CQ: getArgTypeForStruct should *not* return TYP_DOUBLE for a double struct, // or for a struct of two floats. This causes the struct to be address-taken. if (structBaseType == TYP_DOUBLE) { size = 2; } else #endif // TARGET_ARM { size = 1; } } else if (passStructByRef) { size = 1; } } const var_types argType = args->GetNode()->TypeGet(); if (args->GetNode()->OperIs(GT_PUTARG_TYPE)) { byteSize = genTypeSize(argType); } // The 'size' value has now must have been set. (the original value of zero is an invalid value) assert(size != 0); assert(byteSize != 0); if (compMacOsArm64Abi()) { // Arm64 Apple has a special ABI for passing small size arguments on stack, // bytes are aligned to 1-byte, shorts to 2-byte, int/float to 4-byte, etc. // It means passing 8 1-byte arguments on stack can take as small as 8 bytes. argAlignBytes = eeGetArgSizeAlignment(argType, isFloatHfa); } // // Figure out if the argument will be passed in a register. // bool isRegArg = false; NonStandardArgKind nonStandardArgKind = NonStandardArgKind::None; regNumber nonStdRegNum = REG_NA; if (isRegParamType(genActualType(argx->TypeGet())) #ifdef UNIX_AMD64_ABI && (!isStructArg || structDesc.passedInRegisters) #elif defined(TARGET_X86) || (isStructArg && isTrivialPointerSizedStruct(objClass)) #endif ) { #ifdef TARGET_ARM if (passUsingFloatRegs) { // First, see if it can be back-filled if (!anyFloatStackArgs && // Is it legal to back-fill? (We haven't put any FP args on the stack yet) (fltArgSkippedRegMask != RBM_NONE) && // Is there an available back-fill slot? (size == 1)) // The size to back-fill is one float register { // Back-fill the register. isBackFilled = true; regMaskTP backFillBitMask = genFindLowestBit(fltArgSkippedRegMask); fltArgSkippedRegMask &= ~backFillBitMask; // Remove the back-filled register(s) from the skipped mask nextFltArgRegNum = genMapFloatRegNumToRegArgNum(genRegNumFromMask(backFillBitMask)); assert(nextFltArgRegNum < MAX_FLOAT_REG_ARG); } // Does the entire float, double, or HFA fit in the FP arg registers? // Check if the last register needed is still in the argument register range. isRegArg = (nextFltArgRegNum + size - 1) < MAX_FLOAT_REG_ARG; if (!isRegArg) { anyFloatStackArgs = true; } } else { isRegArg = intArgRegNum < MAX_REG_ARG; } #elif defined(TARGET_ARM64) if (passUsingFloatRegs) { // Check if the last register needed is still in the fp argument register range. isRegArg = (nextFltArgRegNum + (size - 1)) < MAX_FLOAT_REG_ARG; // Do we have a HFA arg that we wanted to pass in registers, but we ran out of FP registers? if (isHfaArg && !isRegArg) { // recompute the 'size' so that it represent the number of stack slots rather than the number of // registers // unsigned roundupSize = (unsigned)roundUp(structSize, TARGET_POINTER_SIZE); size = roundupSize / TARGET_POINTER_SIZE; // We also must update fltArgRegNum so that we no longer try to // allocate any new floating point registers for args // This prevents us from backfilling a subsequent arg into d7 // fltArgRegNum = MAX_FLOAT_REG_ARG; } } else { // Check if the last register needed is still in the int argument register range. isRegArg = (intArgRegNum + (size - 1)) < maxRegArgs; // Did we run out of registers when we had a 16-byte struct (size===2) ? // (i.e we only have one register remaining but we needed two registers to pass this arg) // This prevents us from backfilling a subsequent arg into x7 // if (!isRegArg && (size > 1)) { // Arm64 windows native varargs allows splitting a 16 byte struct between stack // and the last general purpose register. if (TargetOS::IsWindows && callIsVararg) { // Override the decision and force a split. isRegArg = (intArgRegNum + (size - 1)) <= maxRegArgs; } else { // We also must update intArgRegNum so that we no longer try to // allocate any new general purpose registers for args // intArgRegNum = maxRegArgs; } } } #else // not TARGET_ARM or TARGET_ARM64 #if defined(UNIX_AMD64_ABI) // Here a struct can be passed in register following the classifications of its members and size. // Now make sure there are actually enough registers to do so. if (isStructArg) { unsigned int structFloatRegs = 0; unsigned int structIntRegs = 0; for (unsigned int i = 0; i < structDesc.eightByteCount; i++) { if (structDesc.IsIntegralSlot(i)) { structIntRegs++; } else if (structDesc.IsSseSlot(i)) { structFloatRegs++; } } isRegArg = ((nextFltArgRegNum + structFloatRegs) <= MAX_FLOAT_REG_ARG) && ((intArgRegNum + structIntRegs) <= MAX_REG_ARG); } else { if (passUsingFloatRegs) { isRegArg = nextFltArgRegNum < MAX_FLOAT_REG_ARG; } else { isRegArg = intArgRegNum < MAX_REG_ARG; } } #else // !defined(UNIX_AMD64_ABI) isRegArg = (intArgRegNum + (size - 1)) < maxRegArgs; #endif // !defined(UNIX_AMD64_ABI) #endif // TARGET_ARM } else { isRegArg = false; } // If there are nonstandard args (outside the calling convention) they were inserted above // and noted them in a table so we can recognize them here and build their argInfo. // // They should not affect the placement of any other args or stack space required. // Example: on AMD64 R10 and R11 are used for indirect VSD (generic interface) and cookie calls. bool isNonStandard = nonStandardArgs.Find(argx, &nonStdRegNum, &nonStandardArgKind); if (isNonStandard) { isRegArg = (nonStdRegNum != REG_STK); } else if (call->IsTailCallViaJitHelper()) { // We have already (before calling fgMorphArgs()) appended the 4 special args // required by the x86 tailcall helper. These args are required to go on the // stack. Force them to the stack here. assert(numArgs >= 4); if (argIndex >= numArgs - 4) { isRegArg = false; } } // Now we know if the argument goes in registers or not and how big it is. CLANG_FORMAT_COMMENT_ANCHOR; #ifdef TARGET_ARM // If we ever allocate a floating point argument to the stack, then all // subsequent HFA/float/double arguments go on the stack. if (!isRegArg && passUsingFloatRegs) { for (; fltArgRegNum < MAX_FLOAT_REG_ARG; ++fltArgRegNum) { fltArgSkippedRegMask |= genMapArgNumToRegMask(fltArgRegNum, TYP_FLOAT); } } // If we think we're going to split a struct between integer registers and the stack, check to // see if we've already assigned a floating-point arg to the stack. if (isRegArg && // We decided above to use a register for the argument !passUsingFloatRegs && // We're using integer registers (intArgRegNum + size > MAX_REG_ARG) && // We're going to split a struct type onto registers and stack anyFloatStackArgs) // We've already used the stack for a floating-point argument { isRegArg = false; // Change our mind; don't pass this struct partially in registers // Skip the rest of the integer argument registers for (; intArgRegNum < MAX_REG_ARG; ++intArgRegNum) { argSkippedRegMask |= genMapArgNumToRegMask(intArgRegNum, TYP_I_IMPL); } } #endif // TARGET_ARM // Now create the fgArgTabEntry. fgArgTabEntry* newArgEntry; if (isRegArg) { regNumber nextRegNum = REG_STK; #if defined(UNIX_AMD64_ABI) regNumber nextOtherRegNum = REG_STK; unsigned int structFloatRegs = 0; unsigned int structIntRegs = 0; #endif // defined(UNIX_AMD64_ABI) if (isNonStandard) { nextRegNum = nonStdRegNum; } #if defined(UNIX_AMD64_ABI) else if (isStructArg && structDesc.passedInRegisters) { // It is a struct passed in registers. Assign the next available register. assert((structDesc.eightByteCount <= 2) && "Too many eightbytes."); regNumber* nextRegNumPtrs[2] = {&nextRegNum, &nextOtherRegNum}; for (unsigned int i = 0; i < structDesc.eightByteCount; i++) { if (structDesc.IsIntegralSlot(i)) { *nextRegNumPtrs[i] = genMapIntRegArgNumToRegNum(intArgRegNum + structIntRegs); ++structIntRegs; } else if (structDesc.IsSseSlot(i)) { *nextRegNumPtrs[i] = genMapFloatRegArgNumToRegNum(nextFltArgRegNum + structFloatRegs); ++structFloatRegs; } } } #endif // defined(UNIX_AMD64_ABI) else { // fill in or update the argInfo table nextRegNum = passUsingFloatRegs ? genMapFloatRegArgNumToRegNum(nextFltArgRegNum) : genMapIntRegArgNumToRegNum(intArgRegNum); } #ifdef TARGET_AMD64 #ifndef UNIX_AMD64_ABI assert(size == 1); #endif #endif // This is a register argument - put it in the table newArgEntry = call->fgArgInfo->AddRegArg(argIndex, argx, args, nextRegNum, size, byteSize, argAlignBytes, isStructArg, isFloatHfa, callIsVararg UNIX_AMD64_ABI_ONLY_ARG(nextOtherRegNum) UNIX_AMD64_ABI_ONLY_ARG(structIntRegs) UNIX_AMD64_ABI_ONLY_ARG(structFloatRegs) UNIX_AMD64_ABI_ONLY_ARG(&structDesc)); newArgEntry->SetIsBackFilled(isBackFilled); // Set up the next intArgRegNum and fltArgRegNum values. if (!isBackFilled) { #if defined(UNIX_AMD64_ABI) if (isStructArg) { // For this case, we've already set the regNums in the argTabEntry intArgRegNum += structIntRegs; fltArgRegNum += structFloatRegs; } else #endif // defined(UNIX_AMD64_ABI) { if (!isNonStandard) { #if FEATURE_ARG_SPLIT // Check for a split (partially enregistered) struct if (compFeatureArgSplit() && !passUsingFloatRegs && ((intArgRegNum + size) > MAX_REG_ARG)) { // This indicates a partial enregistration of a struct type assert((isStructArg) || argx->OperIs(GT_FIELD_LIST) || argx->OperIsCopyBlkOp() || (argx->gtOper == GT_COMMA && (argx->gtFlags & GTF_ASG))); unsigned numRegsPartial = MAX_REG_ARG - intArgRegNum; assert((unsigned char)numRegsPartial == numRegsPartial); call->fgArgInfo->SplitArg(argIndex, numRegsPartial, size - numRegsPartial); } #endif // FEATURE_ARG_SPLIT if (passUsingFloatRegs) { fltArgRegNum += size; #ifdef WINDOWS_AMD64_ABI // Whenever we pass an integer register argument // we skip the corresponding floating point register argument intArgRegNum = min(intArgRegNum + size, MAX_REG_ARG); #endif // WINDOWS_AMD64_ABI // No supported architecture supports partial structs using float registers. assert(fltArgRegNum <= MAX_FLOAT_REG_ARG); } else { // Increment intArgRegNum by 'size' registers intArgRegNum += size; #ifdef WINDOWS_AMD64_ABI fltArgRegNum = min(fltArgRegNum + size, MAX_FLOAT_REG_ARG); #endif // WINDOWS_AMD64_ABI } } } } } else // We have an argument that is not passed in a register { // This is a stack argument - put it in the table newArgEntry = call->fgArgInfo->AddStkArg(argIndex, argx, args, size, byteSize, argAlignBytes, isStructArg, isFloatHfa, callIsVararg); #ifdef UNIX_AMD64_ABI // TODO-Amd64-Unix-CQ: This is temporary (see also in fgMorphArgs). if (structDesc.passedInRegisters) { newArgEntry->structDesc.CopyFrom(structDesc); } #endif } newArgEntry->nonStandardArgKind = nonStandardArgKind; if (GlobalJitOptions::compFeatureHfa) { if (isHfaArg) { newArgEntry->SetHfaType(hfaType, hfaSlots); } } newArgEntry->SetMultiRegNums(); noway_assert(newArgEntry != nullptr); if (newArgEntry->isStruct) { newArgEntry->passedByRef = passStructByRef; newArgEntry->argType = (structBaseType == TYP_UNKNOWN) ? argx->TypeGet() : structBaseType; } else { newArgEntry->argType = argx->TypeGet(); } DEBUG_ARG_SLOTS_ONLY(argSlots += size;) } // end foreach argument loop #ifdef DEBUG if (verbose) { JITDUMP("ArgTable for %d.%s after fgInitArgInfo:\n", call->gtTreeID, GenTree::OpName(call->gtOper)); call->fgArgInfo->Dump(this); JITDUMP("\n"); } #endif } //------------------------------------------------------------------------ // fgMorphArgs: Walk and transform (morph) the arguments of a call // // Arguments: // callNode - the call for which we are doing the argument morphing // // Return Value: // Like most morph methods, this method returns the morphed node, // though in this case there are currently no scenarios where the // node itself is re-created. // // Notes: // This calls fgInitArgInfo to create the 'fgArgInfo' for the call. // If it has already been created, that method will simply return. // // This method changes the state of the call node. It uses the existence // of gtCallLateArgs (the late arguments list) to determine if it has // already done the first round of morphing. // // The first time it is called (i.e. during global morphing), this method // computes the "late arguments". This is when it determines which arguments // need to be evaluated to temps prior to the main argument setup, and which // can be directly evaluated into the argument location. It also creates a // second argument list (gtCallLateArgs) that does the final placement of the // arguments, e.g. into registers or onto the stack. // // The "non-late arguments", aka the gtCallArgs, are doing the in-order // evaluation of the arguments that might have side-effects, such as embedded // assignments, calls or possible throws. In these cases, it and earlier // arguments must be evaluated to temps. // // On targets with a fixed outgoing argument area (FEATURE_FIXED_OUT_ARGS), // if we have any nested calls, we need to defer the copying of the argument // into the fixed argument area until after the call. If the argument did not // otherwise need to be computed into a temp, it is moved to gtCallLateArgs and // replaced in the "early" arg list (gtCallArgs) with a placeholder node. #ifdef _PREFAST_ #pragma warning(push) #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function #endif GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* call) { GenTreeCall::Use* args; GenTree* argx; GenTreeFlags flagsSummary = GTF_EMPTY; unsigned argIndex = 0; DEBUG_ARG_SLOTS_ONLY(unsigned argSlots = 0;) bool reMorphing = call->AreArgsComplete(); // Set up the fgArgInfo. fgInitArgInfo(call); JITDUMP("%sMorphing args for %d.%s:\n", (reMorphing) ? "Re" : "", call->gtTreeID, GenTree::OpName(call->gtOper)); // If we are remorphing, process the late arguments (which were determined by a previous caller). if (reMorphing) { for (GenTreeCall::Use& use : call->LateArgs()) { use.SetNode(fgMorphTree(use.GetNode())); flagsSummary |= use.GetNode()->gtFlags; } assert(call->fgArgInfo != nullptr); } call->fgArgInfo->RemorphReset(); // First we morph the argument subtrees ('this' pointer, arguments, etc.). // During the first call to fgMorphArgs we also record the // information about late arguments we have in 'fgArgInfo'. // This information is used later to contruct the gtCallLateArgs */ // Process the 'this' argument value, if present. if (call->gtCallThisArg != nullptr) { argx = call->gtCallThisArg->GetNode(); fgArgTabEntry* thisArgEntry = call->fgArgInfo->GetArgEntry(0, reMorphing); argx = fgMorphTree(argx); call->gtCallThisArg->SetNode(argx); // This is a register argument - possibly update it in the table. call->fgArgInfo->UpdateRegArg(thisArgEntry, argx, reMorphing); flagsSummary |= argx->gtFlags; if (!reMorphing && call->IsExpandedEarly() && call->IsVirtualVtable()) { if (!argx->OperIsLocal()) { thisArgEntry->needTmp = true; call->fgArgInfo->SetNeedsTemps(); } } assert(argIndex == 0); argIndex++; DEBUG_ARG_SLOTS_ONLY(argSlots++;) } // Note that this name is a bit of a misnomer - it indicates that there are struct args // that occupy more than a single slot that are passed by value (not necessarily in regs). bool hasMultiregStructArgs = false; for (args = call->gtCallArgs; args != nullptr; args = args->GetNext(), argIndex++) { GenTree** parentArgx = &args->NodeRef(); fgArgTabEntry* argEntry = call->fgArgInfo->GetArgEntry(argIndex, reMorphing); // Morph the arg node, and update the parent and argEntry pointers. argx = *parentArgx; argx = fgMorphTree(argx); *parentArgx = argx; assert(argx == args->GetNode()); DEBUG_ARG_SLOTS_ONLY(unsigned size = argEntry->getSize();) CORINFO_CLASS_HANDLE copyBlkClass = NO_CLASS_HANDLE; #if defined(DEBUG_ARG_SLOTS) if (!compMacOsArm64Abi()) { if (argEntry->GetByteAlignment() == 2 * TARGET_POINTER_SIZE) { if (argSlots % 2 == 1) { argSlots++; } } } #endif // DEBUG if (argEntry->isNonStandard() && argEntry->isPassedInRegisters()) { // We need to update the node field for this nonStandard arg here // as it may have been changed by the call to fgMorphTree. call->fgArgInfo->UpdateRegArg(argEntry, argx, reMorphing); flagsSummary |= argx->gtFlags; continue; } DEBUG_ARG_SLOTS_ASSERT(size != 0); DEBUG_ARG_SLOTS_ONLY(argSlots += argEntry->getSlotCount();) if (argx->IsLocalAddrExpr() != nullptr) { argx->gtType = TYP_I_IMPL; } // Get information about this argument. var_types hfaType = argEntry->GetHfaType(); bool isHfaArg = (hfaType != TYP_UNDEF); bool passUsingFloatRegs = argEntry->isPassedInFloatRegisters(); unsigned structSize = 0; // Struct arguments may be morphed into a node that is not a struct type. // In such case the fgArgTabEntry keeps track of whether the original node (before morphing) // was a struct and the struct classification. bool isStructArg = argEntry->isStruct; GenTree* argObj = argx->gtEffectiveVal(true /*commaOnly*/); if (isStructArg && varTypeIsStruct(argObj) && !argObj->OperIs(GT_ASG, GT_MKREFANY, GT_FIELD_LIST, GT_ARGPLACE)) { CORINFO_CLASS_HANDLE objClass = gtGetStructHandle(argObj); unsigned originalSize; if (argObj->TypeGet() == TYP_STRUCT) { if (argObj->OperIs(GT_OBJ)) { // Get the size off the OBJ node. originalSize = argObj->AsObj()->GetLayout()->GetSize(); assert(originalSize == info.compCompHnd->getClassSize(objClass)); } else { // We have a BADCODE assert for this in fgInitArgInfo. assert(argObj->OperIs(GT_LCL_VAR)); originalSize = lvaGetDesc(argObj->AsLclVarCommon())->lvExactSize; } } else { originalSize = genTypeSize(argx); assert(originalSize == info.compCompHnd->getClassSize(objClass)); } unsigned roundupSize = (unsigned)roundUp(originalSize, TARGET_POINTER_SIZE); var_types structBaseType = argEntry->argType; // First, handle the case where the argument is passed by reference. if (argEntry->passedByRef) { DEBUG_ARG_SLOTS_ASSERT(size == 1); copyBlkClass = objClass; #ifdef UNIX_AMD64_ABI assert(!"Structs are not passed by reference on x64/ux"); #endif // UNIX_AMD64_ABI } else // This is passed by value. { // Check to see if we can transform this into load of a primitive type. // 'size' must be the number of pointer sized items DEBUG_ARG_SLOTS_ASSERT(size == roundupSize / TARGET_POINTER_SIZE); structSize = originalSize; unsigned passingSize = originalSize; // Check to see if we can transform this struct load (GT_OBJ) into a GT_IND of the appropriate size. // When it can do this is platform-dependent: // - In general, it can be done for power of 2 structs that fit in a single register. // - For ARM and ARM64 it must also be a non-HFA struct, or have a single field. // - This is irrelevant for X86, since structs are always passed by value on the stack. GenTree* lclVar = fgIsIndirOfAddrOfLocal(argObj); bool canTransform = false; if (structBaseType != TYP_STRUCT) { if (isPow2(passingSize)) { canTransform = (!argEntry->IsHfaArg() || (passingSize == genTypeSize(argEntry->GetHfaType()))); } #if defined(TARGET_ARM64) || defined(UNIX_AMD64_ABI) // For ARM64 or AMD64/UX we can pass non-power-of-2 structs in a register, but we can // only transform in that case if the arg is a local. // TODO-CQ: This transformation should be applicable in general, not just for the ARM64 // or UNIX_AMD64_ABI cases where they will be passed in registers. else { canTransform = (lclVar != nullptr); passingSize = genTypeSize(structBaseType); } #endif // TARGET_ARM64 || UNIX_AMD64_ABI } if (!canTransform) { #if defined(TARGET_AMD64) #ifndef UNIX_AMD64_ABI // On Windows structs are always copied and passed by reference (handled above) unless they are // passed by value in a single register. assert(size == 1); copyBlkClass = objClass; #else // UNIX_AMD64_ABI // On Unix, structs are always passed by value. // We only need a copy if we have one of the following: // - The sizes don't match for a non-lclVar argument. // - We have a known struct type (e.g. SIMD) that requires multiple registers. // TODO-Amd64-Unix-Throughput: We don't need to keep the structDesc in the argEntry if it's not // actually passed in registers. if (argEntry->isPassedInRegisters()) { if (argObj->OperIs(GT_OBJ)) { if (passingSize != structSize) { copyBlkClass = objClass; } } else if (lclVar == nullptr) { // This should only be the case of a value directly producing a known struct type. assert(argObj->TypeGet() != TYP_STRUCT); if (argEntry->numRegs > 1) { copyBlkClass = objClass; } } } #endif // UNIX_AMD64_ABI #elif defined(TARGET_ARM64) if ((passingSize != structSize) && (lclVar == nullptr)) { copyBlkClass = objClass; } #endif #ifdef TARGET_ARM // TODO-1stClassStructs: Unify these conditions across targets. if (((lclVar != nullptr) && (lvaGetPromotionType(lclVar->AsLclVarCommon()->GetLclNum()) == PROMOTION_TYPE_INDEPENDENT)) || ((argObj->OperIs(GT_OBJ)) && (passingSize != structSize))) { copyBlkClass = objClass; } if (structSize < TARGET_POINTER_SIZE) { copyBlkClass = objClass; } #endif // TARGET_ARM } else { // We have a struct argument that fits into a register, and it is either a power of 2, // or a local. // Change our argument, as needed, into a value of the appropriate type. CLANG_FORMAT_COMMENT_ANCHOR; #ifdef TARGET_ARM DEBUG_ARG_SLOTS_ASSERT((size == 1) || ((structBaseType == TYP_DOUBLE) && (size == 2))); #else DEBUG_ARG_SLOTS_ASSERT((size == 1) || (varTypeIsSIMD(structBaseType) && size == (genTypeSize(structBaseType) / REGSIZE_BYTES))); #endif assert((structBaseType != TYP_STRUCT) && (genTypeSize(structBaseType) >= originalSize)); if (argObj->OperIs(GT_OBJ)) { argObj->ChangeOper(GT_IND); // Now see if we can fold *(&X) into X if (argObj->AsOp()->gtOp1->gtOper == GT_ADDR) { GenTree* temp = argObj->AsOp()->gtOp1->AsOp()->gtOp1; // Keep the DONT_CSE flag in sync // (as the addr always marks it for its op1) temp->gtFlags &= ~GTF_DONT_CSE; temp->gtFlags |= (argObj->gtFlags & GTF_DONT_CSE); DEBUG_DESTROY_NODE(argObj->AsOp()->gtOp1); // GT_ADDR DEBUG_DESTROY_NODE(argObj); // GT_IND argObj = temp; *parentArgx = temp; argx = temp; } } if (argObj->gtOper == GT_LCL_VAR) { unsigned lclNum = argObj->AsLclVarCommon()->GetLclNum(); LclVarDsc* varDsc = lvaGetDesc(lclNum); if (varDsc->lvPromoted) { if (varDsc->lvFieldCnt == 1) { // get the first and only promoted field LclVarDsc* fieldVarDsc = lvaGetDesc(varDsc->lvFieldLclStart); if (genTypeSize(fieldVarDsc->TypeGet()) >= originalSize) { // we will use the first and only promoted field argObj->AsLclVarCommon()->SetLclNum(varDsc->lvFieldLclStart); if (varTypeIsEnregisterable(fieldVarDsc->TypeGet()) && (genTypeSize(fieldVarDsc->TypeGet()) == originalSize)) { // Just use the existing field's type argObj->gtType = fieldVarDsc->TypeGet(); } else { // Can't use the existing field's type, so use GT_LCL_FLD to swizzle // to a new type lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::SwizzleArg)); argObj->ChangeOper(GT_LCL_FLD); argObj->gtType = structBaseType; } assert(varTypeIsEnregisterable(argObj->TypeGet())); assert(copyBlkClass == NO_CLASS_HANDLE); } else { // use GT_LCL_FLD to swizzle the single field struct to a new type lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::SwizzleArg)); argObj->ChangeOper(GT_LCL_FLD); argObj->gtType = structBaseType; } } else { // The struct fits into a single register, but it has been promoted into its // constituent fields, and so we have to re-assemble it copyBlkClass = objClass; } } else if (genTypeSize(varDsc->TypeGet()) != genTypeSize(structBaseType)) { // Not a promoted struct, so just swizzle the type by using GT_LCL_FLD lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::SwizzleArg)); argObj->ChangeOper(GT_LCL_FLD); argObj->gtType = structBaseType; } } else { // Not a GT_LCL_VAR, so we can just change the type on the node argObj->gtType = structBaseType; } assert(varTypeIsEnregisterable(argObj->TypeGet()) || ((copyBlkClass != NO_CLASS_HANDLE) && varTypeIsEnregisterable(structBaseType))); } #if !defined(UNIX_AMD64_ABI) && !defined(TARGET_ARMARCH) // TODO-CQ-XARCH: there is no need for a temp copy if we improve our code generation in // `genPutStructArgStk` for xarch like we did it for Arm/Arm64. // We still have a struct unless we converted the GT_OBJ into a GT_IND above... if (isHfaArg && passUsingFloatRegs) { } else if (structBaseType == TYP_STRUCT) { // If the valuetype size is not a multiple of TARGET_POINTER_SIZE, // we must copyblk to a temp before doing the obj to avoid // the obj reading memory past the end of the valuetype CLANG_FORMAT_COMMENT_ANCHOR; if (roundupSize > originalSize) { copyBlkClass = objClass; // There are a few special cases where we can omit using a CopyBlk // where we normally would need to use one. if (argObj->OperIs(GT_OBJ) && argObj->AsObj()->gtGetOp1()->IsLocalAddrExpr() != nullptr) // Is the source a LclVar? { copyBlkClass = NO_CLASS_HANDLE; } } } #endif // !UNIX_AMD64_ABI } } if (argEntry->isPassedInRegisters()) { call->fgArgInfo->UpdateRegArg(argEntry, argx, reMorphing); } else { call->fgArgInfo->UpdateStkArg(argEntry, argx, reMorphing); } if (copyBlkClass != NO_CLASS_HANDLE) { fgMakeOutgoingStructArgCopy(call, args, copyBlkClass); } if (argx->gtOper == GT_MKREFANY) { // 'Lower' the MKREFANY tree and insert it. noway_assert(!reMorphing); #ifdef TARGET_X86 // Build the mkrefany as a GT_FIELD_LIST GenTreeFieldList* fieldList = new (this, GT_FIELD_LIST) GenTreeFieldList(); fieldList->AddField(this, argx->AsOp()->gtGetOp1(), OFFSETOF__CORINFO_TypedReference__dataPtr, TYP_BYREF); fieldList->AddField(this, argx->AsOp()->gtGetOp2(), OFFSETOF__CORINFO_TypedReference__type, TYP_I_IMPL); fgArgTabEntry* fp = gtArgEntryByNode(call, argx); args->SetNode(fieldList); assert(fp->GetNode() == fieldList); #else // !TARGET_X86 // Get a new temp // Here we don't need unsafe value cls check since the addr of temp is used only in mkrefany unsigned tmp = lvaGrabTemp(true DEBUGARG("by-value mkrefany struct argument")); lvaSetStruct(tmp, impGetRefAnyClass(), false); // Build the mkrefany as a comma node: // (tmp.ptr=argx),(tmp.type=handle) GenTreeLclFld* destPtrSlot = gtNewLclFldNode(tmp, TYP_I_IMPL, OFFSETOF__CORINFO_TypedReference__dataPtr); GenTreeLclFld* destTypeSlot = gtNewLclFldNode(tmp, TYP_I_IMPL, OFFSETOF__CORINFO_TypedReference__type); destPtrSlot->SetFieldSeq(GetFieldSeqStore()->CreateSingleton(GetRefanyDataField())); destPtrSlot->gtFlags |= GTF_VAR_DEF; destTypeSlot->SetFieldSeq(GetFieldSeqStore()->CreateSingleton(GetRefanyTypeField())); destTypeSlot->gtFlags |= GTF_VAR_DEF; GenTree* asgPtrSlot = gtNewAssignNode(destPtrSlot, argx->AsOp()->gtOp1); GenTree* asgTypeSlot = gtNewAssignNode(destTypeSlot, argx->AsOp()->gtOp2); GenTree* asg = gtNewOperNode(GT_COMMA, TYP_VOID, asgPtrSlot, asgTypeSlot); // Change the expression to "(tmp=val)" args->SetNode(asg); // EvalArgsToTemps will cause tmp to actually get loaded as the argument call->fgArgInfo->EvalToTmp(argEntry, tmp, asg); lvaSetVarAddrExposed(tmp DEBUGARG(AddressExposedReason::TOO_CONSERVATIVE)); #endif // !TARGET_X86 } #if FEATURE_MULTIREG_ARGS if (isStructArg) { if (((argEntry->numRegs + argEntry->GetStackSlotsNumber()) > 1) || (isHfaArg && argx->TypeGet() == TYP_STRUCT)) { hasMultiregStructArgs = true; } } #ifdef TARGET_ARM else if ((argEntry->argType == TYP_LONG) || (argEntry->argType == TYP_DOUBLE)) { assert((argEntry->numRegs == 2) || (argEntry->numSlots == 2)); } #endif else { // We must have exactly one register or slot. assert(((argEntry->numRegs == 1) && (argEntry->GetStackSlotsNumber() == 0)) || ((argEntry->numRegs == 0) && (argEntry->GetStackSlotsNumber() == 1))); } #endif #if defined(TARGET_X86) if (isStructArg) { GenTree* lclNode = argx->OperIs(GT_LCL_VAR) ? argx : fgIsIndirOfAddrOfLocal(argx); if ((lclNode != nullptr) && (lvaGetPromotionType(lclNode->AsLclVarCommon()->GetLclNum()) == Compiler::PROMOTION_TYPE_INDEPENDENT)) { // Make a GT_FIELD_LIST of the field lclVars. GenTreeLclVarCommon* lcl = lclNode->AsLclVarCommon(); LclVarDsc* varDsc = lvaGetDesc(lcl); GenTreeFieldList* fieldList = new (this, GT_FIELD_LIST) GenTreeFieldList(); fgArgTabEntry* fp = gtArgEntryByNode(call, argx); args->SetNode(fieldList); assert(fp->GetNode() == fieldList); for (unsigned fieldLclNum = varDsc->lvFieldLclStart; fieldLclNum < varDsc->lvFieldLclStart + varDsc->lvFieldCnt; ++fieldLclNum) { LclVarDsc* fieldVarDsc = lvaGetDesc(fieldLclNum); GenTree* fieldLcl; if (fieldLclNum == varDsc->lvFieldLclStart) { lcl->SetLclNum(fieldLclNum); lcl->SetOperResetFlags(GT_LCL_VAR); lcl->gtType = fieldVarDsc->TypeGet(); fieldLcl = lcl; } else { fieldLcl = gtNewLclvNode(fieldLclNum, fieldVarDsc->TypeGet()); } fieldList->AddField(this, fieldLcl, fieldVarDsc->lvFldOffset, fieldVarDsc->TypeGet()); } } } #endif // TARGET_X86 flagsSummary |= args->GetNode()->gtFlags; } // end foreach argument loop if (!reMorphing) { call->fgArgInfo->ArgsComplete(); } /* Process the function address, if indirect call */ if (call->gtCallType == CT_INDIRECT) { call->gtCallAddr = fgMorphTree(call->gtCallAddr); // Const CSE may create an assignment node here flagsSummary |= call->gtCallAddr->gtFlags; } #if FEATURE_FIXED_OUT_ARGS // Record the outgoing argument size. If the call is a fast tail // call, it will setup its arguments in incoming arg area instead // of the out-going arg area, so we don't need to track the // outgoing arg size. if (!call->IsFastTailCall()) { #if defined(UNIX_AMD64_ABI) // This is currently required for the UNIX ABI to work correctly. opts.compNeedToAlignFrame = true; #endif // UNIX_AMD64_ABI const unsigned outgoingArgSpaceSize = GetOutgoingArgByteSize(call->fgArgInfo->GetNextSlotByteOffset()); #if defined(DEBUG_ARG_SLOTS) unsigned preallocatedArgCount = 0; if (!compMacOsArm64Abi()) { preallocatedArgCount = call->fgArgInfo->GetNextSlotNum(); assert(outgoingArgSpaceSize == preallocatedArgCount * REGSIZE_BYTES); } #endif call->fgArgInfo->SetOutArgSize(max(outgoingArgSpaceSize, MIN_ARG_AREA_FOR_CALL)); #ifdef DEBUG if (verbose) { const fgArgInfo* argInfo = call->fgArgInfo; #if defined(DEBUG_ARG_SLOTS) if (!compMacOsArm64Abi()) { printf("argSlots=%d, preallocatedArgCount=%d, nextSlotNum=%d, nextSlotByteOffset=%d, " "outgoingArgSpaceSize=%d\n", argSlots, preallocatedArgCount, argInfo->GetNextSlotNum(), argInfo->GetNextSlotByteOffset(), outgoingArgSpaceSize); } else { printf("nextSlotByteOffset=%d, outgoingArgSpaceSize=%d\n", argInfo->GetNextSlotByteOffset(), outgoingArgSpaceSize); } #else printf("nextSlotByteOffset=%d, outgoingArgSpaceSize=%d\n", argInfo->GetNextSlotByteOffset(), outgoingArgSpaceSize); #endif } #endif } #endif // FEATURE_FIXED_OUT_ARGS // Clear the ASG and EXCEPT (if possible) flags on the call node call->gtFlags &= ~GTF_ASG; if (!call->OperMayThrow(this)) { call->gtFlags &= ~GTF_EXCEPT; } // Union in the side effect flags from the call's operands call->gtFlags |= flagsSummary & GTF_ALL_EFFECT; // If we are remorphing or don't have any register arguments or other arguments that need // temps, then we don't need to call SortArgs() and EvalArgsToTemps(). // if (!reMorphing && (call->fgArgInfo->HasRegArgs() || call->fgArgInfo->NeedsTemps())) { // Do the 'defer or eval to temp' analysis. call->fgArgInfo->SortArgs(); call->fgArgInfo->EvalArgsToTemps(); } if (hasMultiregStructArgs) { fgMorphMultiregStructArgs(call); } #ifdef DEBUG if (verbose) { JITDUMP("ArgTable for %d.%s after fgMorphArgs:\n", call->gtTreeID, GenTree::OpName(call->gtOper)); call->fgArgInfo->Dump(this); JITDUMP("\n"); } #endif return call; } #ifdef _PREFAST_ #pragma warning(pop) #endif //----------------------------------------------------------------------------- // fgMorphMultiregStructArgs: Locate the TYP_STRUCT arguments and // call fgMorphMultiregStructArg on each of them. // // Arguments: // call : a GenTreeCall node that has one or more TYP_STRUCT arguments\. // // Notes: // We only call fgMorphMultiregStructArg for struct arguments that are not passed as simple types. // It will ensure that the struct arguments are in the correct form. // If this method fails to find any TYP_STRUCT arguments it will assert. // void Compiler::fgMorphMultiregStructArgs(GenTreeCall* call) { bool foundStructArg = false; GenTreeFlags flagsSummary = GTF_EMPTY; #ifdef TARGET_X86 assert(!"Logic error: no MultiregStructArgs for X86"); #endif #if defined(TARGET_AMD64) && !defined(UNIX_AMD64_ABI) assert(!"Logic error: no MultiregStructArgs for Windows X64 ABI"); #endif for (GenTreeCall::Use& use : call->Args()) { // For late arguments the arg tree that is overridden is in the gtCallLateArgs list. // For such late args the gtCallArgList contains the setup arg node (evaluating the arg.) // The tree from the gtCallLateArgs list is passed to the callee. The fgArgEntry node contains the mapping // between the nodes in both lists. If the arg is not a late arg, the fgArgEntry->node points to itself, // otherwise points to the list in the late args list. bool isLateArg = (use.GetNode()->gtFlags & GTF_LATE_ARG) != 0; fgArgTabEntry* fgEntryPtr = gtArgEntryByNode(call, use.GetNode()); assert(fgEntryPtr != nullptr); GenTree* argx = fgEntryPtr->GetNode(); GenTreeCall::Use* lateUse = nullptr; GenTree* lateNode = nullptr; if (isLateArg) { for (GenTreeCall::Use& lateArgUse : call->LateArgs()) { GenTree* argNode = lateArgUse.GetNode(); if (argx == argNode) { lateUse = &lateArgUse; lateNode = argNode; break; } } assert((lateUse != nullptr) && (lateNode != nullptr)); } if (!fgEntryPtr->isStruct) { continue; } unsigned size = (fgEntryPtr->numRegs + fgEntryPtr->GetStackSlotsNumber()); if ((size > 1) || (fgEntryPtr->IsHfaArg() && argx->TypeGet() == TYP_STRUCT)) { foundStructArg = true; if (varTypeIsStruct(argx) && !argx->OperIs(GT_FIELD_LIST)) { if (fgEntryPtr->IsHfaRegArg()) { var_types hfaType = fgEntryPtr->GetHfaType(); unsigned structSize; if (argx->OperIs(GT_OBJ)) { structSize = argx->AsObj()->GetLayout()->GetSize(); } else if (varTypeIsSIMD(argx)) { structSize = genTypeSize(argx); } else { assert(argx->OperIs(GT_LCL_VAR)); structSize = lvaGetDesc(argx->AsLclVar())->lvExactSize; } assert(structSize > 0); if (structSize == genTypeSize(hfaType)) { if (argx->OperIs(GT_OBJ)) { argx->SetOper(GT_IND); } argx->gtType = hfaType; } } GenTree* newArgx = fgMorphMultiregStructArg(argx, fgEntryPtr); // Did we replace 'argx' with a new tree? if (newArgx != argx) { // link the new arg node into either the late arg list or the gtCallArgs list if (isLateArg) { lateUse->SetNode(newArgx); } else { use.SetNode(newArgx); } assert(fgEntryPtr->GetNode() == newArgx); } } } } // We should only call this method when we actually have one or more multireg struct args assert(foundStructArg); // Update the flags call->gtFlags |= (flagsSummary & GTF_ALL_EFFECT); } //----------------------------------------------------------------------------- // fgMorphMultiregStructArg: Given a TYP_STRUCT arg from a call argument list, // morph the argument as needed to be passed correctly. // // Arguments: // arg - A GenTree node containing a TYP_STRUCT arg // fgEntryPtr - the fgArgTabEntry information for the current 'arg' // // Notes: // The arg must be a GT_OBJ or GT_LCL_VAR or GT_LCL_FLD of TYP_STRUCT. // If 'arg' is a lclVar passed on the stack, we will ensure that any lclVars that must be on the // stack are marked as doNotEnregister, and then we return. // // If it is passed by register, we mutate the argument into the GT_FIELD_LIST form // which is only used for struct arguments. // // If arg is a LclVar we check if it is struct promoted and has the right number of fields // and if they are at the appropriate offsets we will use the struct promted fields // in the GT_FIELD_LIST nodes that we create. // If we have a GT_LCL_VAR that isn't struct promoted or doesn't meet the requirements // we will use a set of GT_LCL_FLDs nodes to access the various portions of the struct // this also forces the struct to be stack allocated into the local frame. // For the GT_OBJ case will clone the address expression and generate two (or more) // indirections. // Currently the implementation handles ARM64/ARM and will NYI for other architectures. // GenTree* Compiler::fgMorphMultiregStructArg(GenTree* arg, fgArgTabEntry* fgEntryPtr) { assert(varTypeIsStruct(arg->TypeGet())); #if !defined(TARGET_ARMARCH) && !defined(UNIX_AMD64_ABI) NYI("fgMorphMultiregStructArg requires implementation for this target"); #endif #ifdef TARGET_ARM if ((fgEntryPtr->IsSplit() && fgEntryPtr->GetStackSlotsNumber() + fgEntryPtr->numRegs > 4) || (!fgEntryPtr->IsSplit() && fgEntryPtr->GetRegNum() == REG_STK)) #else if (fgEntryPtr->GetRegNum() == REG_STK) #endif { GenTreeLclVarCommon* lcl = nullptr; GenTree* actualArg = arg->gtEffectiveVal(); if (actualArg->OperGet() == GT_OBJ) { if (actualArg->gtGetOp1()->OperIs(GT_ADDR) && actualArg->gtGetOp1()->gtGetOp1()->OperIs(GT_LCL_VAR)) { lcl = actualArg->gtGetOp1()->gtGetOp1()->AsLclVarCommon(); } } else if (actualArg->OperGet() == GT_LCL_VAR) { lcl = actualArg->AsLclVarCommon(); } if (lcl != nullptr) { if (lvaGetPromotionType(lcl->GetLclNum()) == PROMOTION_TYPE_INDEPENDENT) { arg = fgMorphLclArgToFieldlist(lcl); } else if (arg->TypeGet() == TYP_STRUCT) { // If this is a non-register struct, it must be referenced from memory. if (!actualArg->OperIs(GT_OBJ)) { // Create an Obj of the temp to use it as a call argument. arg = gtNewOperNode(GT_ADDR, TYP_I_IMPL, arg); arg = gtNewObjNode(lvaGetStruct(lcl->GetLclNum()), arg); } // Its fields will need to be accessed by address. lvaSetVarDoNotEnregister(lcl->GetLclNum() DEBUG_ARG(DoNotEnregisterReason::IsStructArg)); } } return arg; } #if FEATURE_MULTIREG_ARGS // Examine 'arg' and setup argValue objClass and structSize // const CORINFO_CLASS_HANDLE objClass = gtGetStructHandle(arg); GenTree* argValue = arg; // normally argValue will be arg, but see right below unsigned structSize = 0; if (arg->TypeGet() != TYP_STRUCT) { structSize = genTypeSize(arg->TypeGet()); assert(structSize == info.compCompHnd->getClassSize(objClass)); } else if (arg->OperGet() == GT_OBJ) { GenTreeObj* argObj = arg->AsObj(); const ClassLayout* objLayout = argObj->GetLayout(); structSize = objLayout->GetSize(); assert(structSize == info.compCompHnd->getClassSize(objClass)); // If we have a GT_OBJ of a GT_ADDR then we set argValue to the child node of the GT_ADDR. GenTree* op1 = argObj->gtOp1; if (op1->OperGet() == GT_ADDR) { GenTree* underlyingTree = op1->AsOp()->gtOp1; // Only update to the same type. if (underlyingTree->OperIs(GT_LCL_VAR)) { const LclVarDsc* varDsc = lvaGetDesc(underlyingTree->AsLclVar()); if (ClassLayout::AreCompatible(varDsc->GetLayout(), objLayout)) { argValue = underlyingTree; } } } } else if (arg->OperGet() == GT_LCL_VAR) { LclVarDsc* varDsc = lvaGetDesc(arg->AsLclVarCommon()); structSize = varDsc->lvExactSize; assert(structSize == info.compCompHnd->getClassSize(objClass)); } else { structSize = info.compCompHnd->getClassSize(objClass); } var_types hfaType = TYP_UNDEF; var_types elemType = TYP_UNDEF; unsigned elemCount = 0; unsigned elemSize = 0; var_types type[MAX_ARG_REG_COUNT] = {}; // TYP_UNDEF = 0 hfaType = fgEntryPtr->GetHfaType(); if (varTypeIsValidHfaType(hfaType) && fgEntryPtr->isPassedInFloatRegisters()) { elemType = hfaType; elemSize = genTypeSize(elemType); elemCount = structSize / elemSize; assert(elemSize * elemCount == structSize); for (unsigned inx = 0; inx < elemCount; inx++) { type[inx] = elemType; } } else { assert(structSize <= MAX_ARG_REG_COUNT * TARGET_POINTER_SIZE); BYTE gcPtrs[MAX_ARG_REG_COUNT]; elemCount = roundUp(structSize, TARGET_POINTER_SIZE) / TARGET_POINTER_SIZE; info.compCompHnd->getClassGClayout(objClass, &gcPtrs[0]); for (unsigned inx = 0; inx < elemCount; inx++) { #ifdef UNIX_AMD64_ABI if (gcPtrs[inx] == TYPE_GC_NONE) { type[inx] = GetTypeFromClassificationAndSizes(fgEntryPtr->structDesc.eightByteClassifications[inx], fgEntryPtr->structDesc.eightByteSizes[inx]); } else #endif // UNIX_AMD64_ABI { type[inx] = getJitGCType(gcPtrs[inx]); } } #ifndef UNIX_AMD64_ABI if ((argValue->OperGet() == GT_LCL_FLD) || (argValue->OperGet() == GT_LCL_VAR)) { elemSize = TARGET_POINTER_SIZE; // We can safely widen this to aligned bytes since we are loading from // a GT_LCL_VAR or a GT_LCL_FLD which is properly padded and // lives in the stack frame or will be a promoted field. // structSize = elemCount * TARGET_POINTER_SIZE; } else // we must have a GT_OBJ { assert(argValue->OperGet() == GT_OBJ); // We need to load the struct from an arbitrary address // and we can't read past the end of the structSize // We adjust the last load type here // unsigned remainingBytes = structSize % TARGET_POINTER_SIZE; unsigned lastElem = elemCount - 1; if (remainingBytes != 0) { switch (remainingBytes) { case 1: type[lastElem] = TYP_BYTE; break; case 2: type[lastElem] = TYP_SHORT; break; #if defined(TARGET_ARM64) || defined(UNIX_AMD64_ABI) case 4: type[lastElem] = TYP_INT; break; #endif // (TARGET_ARM64) || (UNIX_AMD64_ABI) default: noway_assert(!"NYI: odd sized struct in fgMorphMultiregStructArg"); break; } } } #endif // !UNIX_AMD64_ABI } // We should still have a TYP_STRUCT assert(varTypeIsStruct(argValue->TypeGet())); GenTreeFieldList* newArg = nullptr; // Are we passing a struct LclVar? // if (argValue->OperGet() == GT_LCL_VAR) { GenTreeLclVarCommon* varNode = argValue->AsLclVarCommon(); unsigned varNum = varNode->GetLclNum(); LclVarDsc* varDsc = lvaGetDesc(varNum); // At this point any TYP_STRUCT LclVar must be an aligned struct // or an HFA struct, both which are passed by value. // assert((varDsc->lvSize() == elemCount * TARGET_POINTER_SIZE) || varDsc->lvIsHfa()); varDsc->lvIsMultiRegArg = true; #ifdef DEBUG if (verbose) { JITDUMP("Multireg struct argument V%02u : ", varNum); fgEntryPtr->Dump(); } #endif // DEBUG #ifndef UNIX_AMD64_ABI // This local variable must match the layout of the 'objClass' type exactly if (varDsc->lvIsHfa() && fgEntryPtr->isPassedInFloatRegisters()) { // We have a HFA struct. noway_assert(elemType == varDsc->GetHfaType()); noway_assert(elemSize == genTypeSize(elemType)); noway_assert(elemCount == (varDsc->lvExactSize / elemSize)); noway_assert(elemSize * elemCount == varDsc->lvExactSize); for (unsigned inx = 0; (inx < elemCount); inx++) { noway_assert(type[inx] == elemType); } } else { #if defined(TARGET_ARM64) // We must have a 16-byte struct (non-HFA) noway_assert(elemCount == 2); #elif defined(TARGET_ARM) noway_assert(elemCount <= 4); #endif for (unsigned inx = 0; inx < elemCount; inx++) { var_types currentGcLayoutType = varDsc->GetLayout()->GetGCPtrType(inx); // We setup the type[inx] value above using the GC info from 'objClass' // This GT_LCL_VAR must have the same GC layout info // if (varTypeIsGC(currentGcLayoutType)) { noway_assert(type[inx] == currentGcLayoutType); } else { // We may have use a small type when we setup the type[inx] values above // We can safely widen this to TYP_I_IMPL type[inx] = TYP_I_IMPL; } } } if (varDsc->lvPromoted && varDsc->lvIsHfa() && fgEntryPtr->isPassedInFloatRegisters()) { bool canMorphToFieldList = true; for (unsigned fldOffset = 0; fldOffset < varDsc->lvExactSize; fldOffset += elemSize) { const unsigned fldVarNum = lvaGetFieldLocal(varDsc, fldOffset); if ((fldVarNum == BAD_VAR_NUM) || !varTypeUsesFloatReg(lvaGetDesc(fldVarNum))) { canMorphToFieldList = false; break; } } if (canMorphToFieldList) { newArg = fgMorphLclArgToFieldlist(varNode); } } else #endif // !UNIX_AMD64_ABI #if defined(TARGET_ARM64) || defined(UNIX_AMD64_ABI) // Is this LclVar a promoted struct with exactly 2 fields? if (varDsc->lvPromoted && (varDsc->lvFieldCnt == 2) && !varDsc->lvIsHfa()) { // See if we have two promoted fields that start at offset 0 and 8? unsigned loVarNum = lvaGetFieldLocal(varDsc, 0); unsigned hiVarNum = lvaGetFieldLocal(varDsc, TARGET_POINTER_SIZE); // Did we find the promoted fields at the necessary offsets? if ((loVarNum != BAD_VAR_NUM) && (hiVarNum != BAD_VAR_NUM)) { LclVarDsc* loVarDsc = lvaGetDesc(loVarNum); LclVarDsc* hiVarDsc = lvaGetDesc(hiVarNum); var_types loType = loVarDsc->lvType; var_types hiType = hiVarDsc->lvType; if ((varTypeIsFloating(loType) != genIsValidFloatReg(fgEntryPtr->GetRegNum(0))) || (varTypeIsFloating(hiType) != genIsValidFloatReg(fgEntryPtr->GetRegNum(1)))) { // TODO-LSRA - It currently doesn't support the passing of floating point LCL_VARS in the integer // registers. So for now we will use GT_LCLFLD's to pass this struct (it won't be enregistered) // JITDUMP("Multireg struct V%02u will be passed using GT_LCLFLD because it has float fields.\n", varNum); // // we call lvaSetVarDoNotEnregister and do the proper transformation below. // } else { // We can use the struct promoted field as the two arguments // Create a new tree for 'arg' // replace the existing LDOBJ(ADDR(LCLVAR)) // with a FIELD_LIST(LCLVAR-LO, FIELD_LIST(LCLVAR-HI, nullptr)) // newArg = new (this, GT_FIELD_LIST) GenTreeFieldList(); newArg->AddField(this, gtNewLclvNode(loVarNum, loType), 0, loType); newArg->AddField(this, gtNewLclvNode(hiVarNum, hiType), TARGET_POINTER_SIZE, hiType); } } } else { // // We will create a list of GT_LCL_FLDs nodes to pass this struct // lvaSetVarDoNotEnregister(varNum DEBUG_ARG(DoNotEnregisterReason::LocalField)); } #elif defined(TARGET_ARM) // Is this LclVar a promoted struct with exactly same size? if (varDsc->lvPromoted && (varDsc->lvFieldCnt == elemCount) && !varDsc->lvIsHfa()) { // See if we have promoted fields? unsigned varNums[4]; bool hasBadVarNum = false; for (unsigned inx = 0; inx < elemCount; inx++) { varNums[inx] = lvaGetFieldLocal(varDsc, TARGET_POINTER_SIZE * inx); if (varNums[inx] == BAD_VAR_NUM) { hasBadVarNum = true; break; } } // Did we find the promoted fields at the necessary offsets? if (!hasBadVarNum) { LclVarDsc* varDscs[4]; var_types varType[4]; bool varIsFloat = false; for (unsigned inx = 0; inx < elemCount; inx++) { varDscs[inx] = lvaGetDesc(varNums[inx]); varType[inx] = varDscs[inx]->lvType; if (varTypeIsFloating(varType[inx])) { // TODO-LSRA - It currently doesn't support the passing of floating point LCL_VARS in the // integer // registers. So for now we will use GT_LCLFLD's to pass this struct (it won't be enregistered) // JITDUMP("Multireg struct V%02u will be passed using GT_LCLFLD because it has float fields.\n", varNum); // // we call lvaSetVarDoNotEnregister and do the proper transformation below. // varIsFloat = true; break; } } if (!varIsFloat) { newArg = fgMorphLclArgToFieldlist(varNode); } } } else { // // We will create a list of GT_LCL_FLDs nodes to pass this struct // lvaSetVarDoNotEnregister(varNum DEBUG_ARG(DoNotEnregisterReason::LocalField)); } #endif // TARGET_ARM } // If we didn't set newarg to a new List Node tree // if (newArg == nullptr) { if (fgEntryPtr->GetRegNum() == REG_STK) { // We leave this stack passed argument alone return arg; } // Are we passing a GT_LCL_FLD (or a GT_LCL_VAR that was not struct promoted ) // A GT_LCL_FLD could also contain a 16-byte struct or HFA struct inside it? // if ((argValue->OperGet() == GT_LCL_FLD) || (argValue->OperGet() == GT_LCL_VAR)) { GenTreeLclVarCommon* varNode = argValue->AsLclVarCommon(); unsigned varNum = varNode->GetLclNum(); LclVarDsc* varDsc = lvaGetDesc(varNum); unsigned baseOffset = varNode->GetLclOffs(); unsigned lastOffset = baseOffset + structSize; // The allocated size of our LocalVar must be at least as big as lastOffset assert(varDsc->lvSize() >= lastOffset); if (varDsc->HasGCPtr()) { // alignment of the baseOffset is required noway_assert((baseOffset % TARGET_POINTER_SIZE) == 0); #ifndef UNIX_AMD64_ABI noway_assert(elemSize == TARGET_POINTER_SIZE); #endif unsigned baseIndex = baseOffset / TARGET_POINTER_SIZE; ClassLayout* layout = varDsc->GetLayout(); for (unsigned inx = 0; (inx < elemCount); inx++) { // The GC information must match what we setup using 'objClass' if (layout->IsGCPtr(baseIndex + inx) || varTypeGCtype(type[inx])) { noway_assert(type[inx] == layout->GetGCPtrType(baseIndex + inx)); } } } else // this varDsc contains no GC pointers { for (unsigned inx = 0; inx < elemCount; inx++) { // The GC information must match what we setup using 'objClass' noway_assert(!varTypeIsGC(type[inx])); } } // // We create a list of GT_LCL_FLDs nodes to pass this struct // lvaSetVarDoNotEnregister(varNum DEBUG_ARG(DoNotEnregisterReason::LocalField)); // Create a new tree for 'arg' // replace the existing LDOBJ(ADDR(LCLVAR)) // with a FIELD_LIST(LCLFLD-LO, LCLFLD-HI) // unsigned offset = baseOffset; newArg = new (this, GT_FIELD_LIST) GenTreeFieldList(); for (unsigned inx = 0; inx < elemCount; inx++) { GenTree* nextLclFld = gtNewLclFldNode(varNum, type[inx], offset); newArg->AddField(this, nextLclFld, offset, type[inx]); offset += genTypeSize(type[inx]); } } // Are we passing a GT_OBJ struct? // else if (argValue->OperGet() == GT_OBJ) { GenTreeObj* argObj = argValue->AsObj(); GenTree* baseAddr = argObj->gtOp1; var_types addrType = baseAddr->TypeGet(); if (baseAddr->OperGet() == GT_ADDR) { GenTree* addrTaken = baseAddr->AsOp()->gtOp1; if (addrTaken->IsLocal()) { GenTreeLclVarCommon* varNode = addrTaken->AsLclVarCommon(); unsigned varNum = varNode->GetLclNum(); // We access non-struct type (for example, long) as a struct type. // Make sure lclVar lives on stack to make sure its fields are accessible by address. lvaSetVarDoNotEnregister(varNum DEBUGARG(DoNotEnregisterReason::LocalField)); } } // Create a new tree for 'arg' // replace the existing LDOBJ(EXPR) // with a FIELD_LIST(IND(EXPR), FIELD_LIST(IND(EXPR+8), nullptr) ...) // newArg = new (this, GT_FIELD_LIST) GenTreeFieldList(); unsigned offset = 0; for (unsigned inx = 0; inx < elemCount; inx++) { GenTree* curAddr = baseAddr; if (offset != 0) { GenTree* baseAddrDup = gtCloneExpr(baseAddr); noway_assert(baseAddrDup != nullptr); curAddr = gtNewOperNode(GT_ADD, addrType, baseAddrDup, gtNewIconNode(offset, TYP_I_IMPL)); } else { curAddr = baseAddr; } GenTree* curItem = gtNewIndir(type[inx], curAddr); // For safety all GT_IND should have at least GT_GLOB_REF set. curItem->gtFlags |= GTF_GLOB_REF; newArg->AddField(this, curItem, offset, type[inx]); offset += genTypeSize(type[inx]); } } } #ifdef DEBUG // If we reach here we should have set newArg to something if (newArg == nullptr) { gtDispTree(argValue); assert(!"Missing case in fgMorphMultiregStructArg"); } #endif noway_assert(newArg != nullptr); #ifdef DEBUG if (verbose) { printf("fgMorphMultiregStructArg created tree:\n"); gtDispTree(newArg); } #endif arg = newArg; // consider calling fgMorphTree(newArg); #endif // FEATURE_MULTIREG_ARGS return arg; } //------------------------------------------------------------------------ // fgMorphLclArgToFieldlist: Morph a GT_LCL_VAR node to a GT_FIELD_LIST of its promoted fields // // Arguments: // lcl - The GT_LCL_VAR node we will transform // // Return value: // The new GT_FIELD_LIST that we have created. // GenTreeFieldList* Compiler::fgMorphLclArgToFieldlist(GenTreeLclVarCommon* lcl) { LclVarDsc* varDsc = lvaGetDesc(lcl); assert(varDsc->lvPromoted); unsigned fieldCount = varDsc->lvFieldCnt; unsigned fieldLclNum = varDsc->lvFieldLclStart; GenTreeFieldList* fieldList = new (this, GT_FIELD_LIST) GenTreeFieldList(); for (unsigned i = 0; i < fieldCount; i++) { LclVarDsc* fieldVarDsc = lvaGetDesc(fieldLclNum); GenTree* lclVar = gtNewLclvNode(fieldLclNum, fieldVarDsc->TypeGet()); fieldList->AddField(this, lclVar, fieldVarDsc->lvFldOffset, fieldVarDsc->TypeGet()); fieldLclNum++; } return fieldList; } //------------------------------------------------------------------------ // fgMakeOutgoingStructArgCopy: make a copy of a struct variable if necessary, // to pass to a callee. // // Arguments: // call - call being processed // args - args for the call // copyBlkClass - class handle for the struct // // The arg is updated if necessary with the copy. // void Compiler::fgMakeOutgoingStructArgCopy(GenTreeCall* call, GenTreeCall::Use* args, CORINFO_CLASS_HANDLE copyBlkClass) { GenTree* argx = args->GetNode(); noway_assert(argx->gtOper != GT_MKREFANY); fgArgTabEntry* argEntry = Compiler::gtArgEntryByNode(call, argx); // If we're optimizing, see if we can avoid making a copy. // // We don't need a copy if this is the last use of an implicit by-ref local. // if (opts.OptimizationEnabled()) { GenTreeLclVar* const lcl = argx->IsImplicitByrefParameterValue(this); if (lcl != nullptr) { const unsigned varNum = lcl->GetLclNum(); LclVarDsc* const varDsc = lvaGetDesc(varNum); const unsigned short totalAppearances = varDsc->lvRefCnt(RCS_EARLY); // We don't have liveness so we rely on other indications of last use. // // We handle these cases: // // * (must not copy) If the call is a tail call, the use is a last use. // We must skip the copy if we have a fast tail call. // // * (may not copy) if the call is noreturn, the use is a last use. // We also check for just one reference here as we are not doing // alias analysis of the call's parameters, or checking if the call // site is not within some try region. // // * (may not copy) if there is exactly one use of the local in the method, // and the call is not in loop, this is a last use. // // fgMightHaveLoop() is expensive; check it last, only if necessary. // if (call->IsTailCall() || // ((totalAppearances == 1) && call->IsNoReturn()) || // ((totalAppearances == 1) && !fgMightHaveLoop())) { args->SetNode(lcl); assert(argEntry->GetNode() == lcl); JITDUMP("did not need to make outgoing copy for last use of implicit byref V%2d\n", varNum); return; } } } JITDUMP("making an outgoing copy for struct arg\n"); if (fgOutgoingArgTemps == nullptr) { fgOutgoingArgTemps = hashBv::Create(this); } unsigned tmp = 0; bool found = false; // Attempt to find a local we have already used for an outgoing struct and reuse it. // We do not reuse within a statement. if (!opts.MinOpts()) { indexType lclNum; FOREACH_HBV_BIT_SET(lclNum, fgOutgoingArgTemps) { LclVarDsc* varDsc = lvaGetDesc((unsigned)lclNum); if (typeInfo::AreEquivalent(varDsc->lvVerTypeInfo, typeInfo(TI_STRUCT, copyBlkClass)) && !fgCurrentlyInUseArgTemps->testBit(lclNum)) { tmp = (unsigned)lclNum; found = true; JITDUMP("reusing outgoing struct arg"); break; } } NEXT_HBV_BIT_SET; } // Create the CopyBlk tree and insert it. if (!found) { // Get a new temp // Here We don't need unsafe value cls check, since the addr of this temp is used only in copyblk. tmp = lvaGrabTemp(true DEBUGARG("by-value struct argument")); lvaSetStruct(tmp, copyBlkClass, false); if (call->IsVarargs()) { lvaSetStructUsedAsVarArg(tmp); } fgOutgoingArgTemps->setBit(tmp); } fgCurrentlyInUseArgTemps->setBit(tmp); // TYP_SIMD structs should not be enregistered, since ABI requires it to be // allocated on stack and address of it needs to be passed. if (lclVarIsSIMDType(tmp)) { // TODO: check if we need this block here or other parts already deal with it. lvaSetVarDoNotEnregister(tmp DEBUGARG(DoNotEnregisterReason::IsStructArg)); } // Create a reference to the temp GenTree* dest = gtNewLclvNode(tmp, lvaTable[tmp].lvType); dest->gtFlags |= (GTF_DONT_CSE | GTF_VAR_DEF); // This is a def of the local, "entire" by construction. // Copy the valuetype to the temp GenTree* copyBlk = gtNewBlkOpNode(dest, argx, false /* not volatile */, true /* copyBlock */); copyBlk = fgMorphCopyBlock(copyBlk); #if FEATURE_FIXED_OUT_ARGS // Do the copy early, and evalute the temp later (see EvalArgsToTemps) // When on Unix create LCL_FLD for structs passed in more than one registers. See fgMakeTmpArgNode GenTree* arg = copyBlk; #else // FEATURE_FIXED_OUT_ARGS // Structs are always on the stack, and thus never need temps // so we have to put the copy and temp all into one expression. argEntry->tmpNum = tmp; GenTree* arg = fgMakeTmpArgNode(argEntry); // Change the expression to "(tmp=val),tmp" arg = gtNewOperNode(GT_COMMA, arg->TypeGet(), copyBlk, arg); #endif // FEATURE_FIXED_OUT_ARGS args->SetNode(arg); call->fgArgInfo->EvalToTmp(argEntry, tmp, arg); } #ifdef TARGET_ARM // See declaration for specification comment. void Compiler::fgAddSkippedRegsInPromotedStructArg(LclVarDsc* varDsc, unsigned firstArgRegNum, regMaskTP* pArgSkippedRegMask) { assert(varDsc->lvPromoted); // There's no way to do these calculations without breaking abstraction and assuming that // integer register arguments are consecutive ints. They are on ARM. // To start, figure out what register contains the last byte of the first argument. LclVarDsc* firstFldVarDsc = lvaGetDesc(varDsc->lvFieldLclStart); unsigned lastFldRegOfLastByte = (firstFldVarDsc->lvFldOffset + firstFldVarDsc->lvExactSize - 1) / TARGET_POINTER_SIZE; ; // Now we're keeping track of the register that the last field ended in; see what registers // subsequent fields start in, and whether any are skipped. // (We assume here the invariant that the fields are sorted in offset order.) for (unsigned fldVarOffset = 1; fldVarOffset < varDsc->lvFieldCnt; fldVarOffset++) { unsigned fldVarNum = varDsc->lvFieldLclStart + fldVarOffset; LclVarDsc* fldVarDsc = lvaGetDesc(fldVarNum); unsigned fldRegOffset = fldVarDsc->lvFldOffset / TARGET_POINTER_SIZE; assert(fldRegOffset >= lastFldRegOfLastByte); // Assuming sorted fields. // This loop should enumerate the offsets of any registers skipped. // Find what reg contains the last byte: // And start at the first register after that. If that isn't the first reg of the current for (unsigned skippedRegOffsets = lastFldRegOfLastByte + 1; skippedRegOffsets < fldRegOffset; skippedRegOffsets++) { // If the register number would not be an arg reg, we're done. if (firstArgRegNum + skippedRegOffsets >= MAX_REG_ARG) return; *pArgSkippedRegMask |= genRegMask(regNumber(firstArgRegNum + skippedRegOffsets)); } lastFldRegOfLastByte = (fldVarDsc->lvFldOffset + fldVarDsc->lvExactSize - 1) / TARGET_POINTER_SIZE; } } #endif // TARGET_ARM /***************************************************************************** * * A little helper used to rearrange nested commutative operations. The * effect is that nested associative, commutative operations are transformed * into a 'left-deep' tree, i.e. into something like this: * * (((a op b) op c) op d) op... */ #if REARRANGE_ADDS void Compiler::fgMoveOpsLeft(GenTree* tree) { GenTree* op1; GenTree* op2; genTreeOps oper; do { op1 = tree->AsOp()->gtOp1; op2 = tree->AsOp()->gtOp2; oper = tree->OperGet(); noway_assert(GenTree::OperIsCommutative(oper)); noway_assert(oper == GT_ADD || oper == GT_XOR || oper == GT_OR || oper == GT_AND || oper == GT_MUL); noway_assert(!varTypeIsFloating(tree->TypeGet()) || !opts.genFPorder); noway_assert(oper == op2->gtOper); // Commutativity doesn't hold if overflow checks are needed if (tree->gtOverflowEx() || op2->gtOverflowEx()) { return; } if (gtIsActiveCSE_Candidate(op2)) { // If we have marked op2 as a CSE candidate, // we can't perform a commutative reordering // because any value numbers that we computed for op2 // will be incorrect after performing a commutative reordering // return; } if (oper == GT_MUL && (op2->gtFlags & GTF_MUL_64RSLT)) { return; } // Check for GTF_ADDRMODE_NO_CSE flag on add/mul Binary Operators if (((oper == GT_ADD) || (oper == GT_MUL)) && ((tree->gtFlags & GTF_ADDRMODE_NO_CSE) != 0)) { return; } if ((tree->gtFlags | op2->gtFlags) & GTF_BOOLEAN) { // We could deal with this, but we were always broken and just hit the assert // below regarding flags, which means it's not frequent, so will just bail out. // See #195514 return; } noway_assert(!tree->gtOverflowEx() && !op2->gtOverflowEx()); GenTree* ad1 = op2->AsOp()->gtOp1; GenTree* ad2 = op2->AsOp()->gtOp2; // Compiler::optOptimizeBools() can create GT_OR of two GC pointers yeilding a GT_INT // We can not reorder such GT_OR trees // if (varTypeIsGC(ad1->TypeGet()) != varTypeIsGC(op2->TypeGet())) { break; } // Don't split up a byref calculation and create a new byref. E.g., // [byref]+ (ref, [int]+ (int, int)) => [byref]+ ([byref]+ (ref, int), int). // Doing this transformation could create a situation where the first // addition (that is, [byref]+ (ref, int) ) creates a byref pointer that // no longer points within the ref object. If a GC happens, the byref won't // get updated. This can happen, for instance, if one of the int components // is negative. It also requires the address generation be in a fully-interruptible // code region. // if (varTypeIsGC(op1->TypeGet()) && op2->TypeGet() == TYP_I_IMPL) { assert(varTypeIsGC(tree->TypeGet()) && (oper == GT_ADD)); break; } /* Change "(x op (y op z))" to "(x op y) op z" */ /* ie. "(op1 op (ad1 op ad2))" to "(op1 op ad1) op ad2" */ GenTree* new_op1 = op2; new_op1->AsOp()->gtOp1 = op1; new_op1->AsOp()->gtOp2 = ad1; /* Change the flags. */ // Make sure we arent throwing away any flags noway_assert((new_op1->gtFlags & ~(GTF_MAKE_CSE | GTF_DONT_CSE | // It is ok that new_op1->gtFlags contains GTF_DONT_CSE flag. GTF_REVERSE_OPS | // The reverse ops flag also can be set, it will be re-calculated GTF_NODE_MASK | GTF_ALL_EFFECT | GTF_UNSIGNED)) == 0); new_op1->gtFlags = (new_op1->gtFlags & (GTF_NODE_MASK | GTF_DONT_CSE)) | // Make sure we propagate GTF_DONT_CSE flag. (op1->gtFlags & GTF_ALL_EFFECT) | (ad1->gtFlags & GTF_ALL_EFFECT); /* Retype new_op1 if it has not/become a GC ptr. */ if (varTypeIsGC(op1->TypeGet())) { noway_assert((varTypeIsGC(tree->TypeGet()) && op2->TypeGet() == TYP_I_IMPL && oper == GT_ADD) || // byref(ref + (int+int)) (varTypeIsI(tree->TypeGet()) && op2->TypeGet() == TYP_I_IMPL && oper == GT_OR)); // int(gcref | int(gcref|intval)) new_op1->gtType = tree->gtType; } else if (varTypeIsGC(ad2->TypeGet())) { // Neither ad1 nor op1 are GC. So new_op1 isnt either noway_assert(op1->gtType == TYP_I_IMPL && ad1->gtType == TYP_I_IMPL); new_op1->gtType = TYP_I_IMPL; } // If new_op1 is a new expression. Assign it a new unique value number. // vnStore is null before the ValueNumber phase has run if (vnStore != nullptr) { // We can only keep the old value number on new_op1 if both op1 and ad2 // have the same non-NoVN value numbers. Since op is commutative, comparing // only ad2 and op1 is enough. if ((op1->gtVNPair.GetLiberal() == ValueNumStore::NoVN) || (ad2->gtVNPair.GetLiberal() == ValueNumStore::NoVN) || (ad2->gtVNPair.GetLiberal() != op1->gtVNPair.GetLiberal())) { new_op1->gtVNPair.SetBoth(vnStore->VNForExpr(nullptr, new_op1->TypeGet())); } } tree->AsOp()->gtOp1 = new_op1; tree->AsOp()->gtOp2 = ad2; /* If 'new_op1' is now the same nested op, process it recursively */ if ((ad1->gtOper == oper) && !ad1->gtOverflowEx()) { fgMoveOpsLeft(new_op1); } /* If 'ad2' is now the same nested op, process it * Instead of recursion, we set up op1 and op2 for the next loop. */ op1 = new_op1; op2 = ad2; } while ((op2->gtOper == oper) && !op2->gtOverflowEx()); return; } #endif /*****************************************************************************/ void Compiler::fgSetRngChkTarget(GenTree* tree, bool delay) { if (tree->OperIs(GT_BOUNDS_CHECK)) { GenTreeBoundsChk* const boundsChk = tree->AsBoundsChk(); BasicBlock* const failBlock = fgSetRngChkTargetInner(boundsChk->gtThrowKind, delay); if (failBlock != nullptr) { boundsChk->gtIndRngFailBB = failBlock; } } else if (tree->OperIs(GT_INDEX_ADDR)) { GenTreeIndexAddr* const indexAddr = tree->AsIndexAddr(); BasicBlock* const failBlock = fgSetRngChkTargetInner(SCK_RNGCHK_FAIL, delay); if (failBlock != nullptr) { indexAddr->gtIndRngFailBB = failBlock; } } else { noway_assert(tree->OperIs(GT_ARR_ELEM, GT_ARR_INDEX)); fgSetRngChkTargetInner(SCK_RNGCHK_FAIL, delay); } } BasicBlock* Compiler::fgSetRngChkTargetInner(SpecialCodeKind kind, bool delay) { if (opts.MinOpts()) { delay = false; } if (!opts.compDbgCode) { if (!delay && !compIsForInlining()) { // Create/find the appropriate "range-fail" label return fgRngChkTarget(compCurBB, kind); } } return nullptr; } /***************************************************************************** * * Expand a GT_INDEX node and fully morph the child operands * * The orginal GT_INDEX node is bashed into the GT_IND node that accesses * the array element. We expand the GT_INDEX node into a larger tree that * evaluates the array base and index. The simplest expansion is a GT_COMMA * with a GT_BOUNDS_CHECK and a GT_IND with a GTF_INX_RNGCHK flag. * For complex array or index expressions one or more GT_COMMA assignments * are inserted so that we only evaluate the array or index expressions once. * * The fully expanded tree is then morphed. This causes gtFoldExpr to * perform local constant prop and reorder the constants in the tree and * fold them. * * We then parse the resulting array element expression in order to locate * and label the constants and variables that occur in the tree. */ const int MAX_ARR_COMPLEXITY = 4; const int MAX_INDEX_COMPLEXITY = 4; GenTree* Compiler::fgMorphArrayIndex(GenTree* tree) { noway_assert(tree->gtOper == GT_INDEX); GenTreeIndex* asIndex = tree->AsIndex(); var_types elemTyp = asIndex->TypeGet(); unsigned elemSize = asIndex->gtIndElemSize; CORINFO_CLASS_HANDLE elemStructType = asIndex->gtStructElemClass; noway_assert(elemTyp != TYP_STRUCT || elemStructType != nullptr); // Fold "cns_str"[cns_index] to ushort constant // NOTE: don't do it for empty string, the operation will fail anyway if (opts.OptimizationEnabled() && asIndex->Arr()->OperIs(GT_CNS_STR) && !asIndex->Arr()->AsStrCon()->IsStringEmptyField() && asIndex->Index()->IsIntCnsFitsInI32()) { const int cnsIndex = static_cast<int>(asIndex->Index()->AsIntConCommon()->IconValue()); if (cnsIndex >= 0) { int length; const char16_t* str = info.compCompHnd->getStringLiteral(asIndex->Arr()->AsStrCon()->gtScpHnd, asIndex->Arr()->AsStrCon()->gtSconCPX, &length); if ((cnsIndex < length) && (str != nullptr)) { GenTree* cnsCharNode = gtNewIconNode(str[cnsIndex], TYP_INT); INDEBUG(cnsCharNode->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED); return cnsCharNode; } } } #ifdef FEATURE_SIMD if (supportSIMDTypes() && varTypeIsStruct(elemTyp) && structSizeMightRepresentSIMDType(elemSize)) { // If this is a SIMD type, this is the point at which we lose the type information, // so we need to set the correct type on the GT_IND. // (We don't care about the base type here, so we only check, but don't retain, the return value). unsigned simdElemSize = 0; if (getBaseJitTypeAndSizeOfSIMDType(elemStructType, &simdElemSize) != CORINFO_TYPE_UNDEF) { assert(simdElemSize == elemSize); elemTyp = getSIMDTypeForSize(elemSize); // This is the new type of the node. tree->gtType = elemTyp; // Now set elemStructType to null so that we don't confuse value numbering. elemStructType = nullptr; } } #endif // FEATURE_SIMD // Set up the array length's offset into lenOffs // And the first element's offset into elemOffs ssize_t lenOffs; ssize_t elemOffs; if (tree->gtFlags & GTF_INX_STRING_LAYOUT) { lenOffs = OFFSETOF__CORINFO_String__stringLen; elemOffs = OFFSETOF__CORINFO_String__chars; tree->gtFlags &= ~GTF_INX_STRING_LAYOUT; // Clear this flag as it is used for GTF_IND_VOLATILE } else { // We have a standard array lenOffs = OFFSETOF__CORINFO_Array__length; elemOffs = OFFSETOF__CORINFO_Array__data; } // In minopts, we expand GT_INDEX to GT_IND(GT_INDEX_ADDR) in order to minimize the size of the IR. As minopts // compilation time is roughly proportional to the size of the IR, this helps keep compilation times down. // Furthermore, this representation typically saves on code size in minopts w.r.t. the complete expansion // performed when optimizing, as it does not require LclVar nodes (which are always stack loads/stores in // minopts). // // When we *are* optimizing, we fully expand GT_INDEX to: // 1. Evaluate the array address expression and store the result in a temp if the expression is complex or // side-effecting. // 2. Evaluate the array index expression and store the result in a temp if the expression is complex or // side-effecting. // 3. Perform an explicit bounds check: GT_BOUNDS_CHECK(index, GT_ARR_LENGTH(array)) // 4. Compute the address of the element that will be accessed: // GT_ADD(GT_ADD(array, firstElementOffset), GT_MUL(index, elementSize)) // 5. Dereference the address with a GT_IND. // // This expansion explicitly exposes the bounds check and the address calculation to the optimizer, which allows // for more straightforward bounds-check removal, CSE, etc. if (opts.MinOpts()) { GenTree* const array = fgMorphTree(asIndex->Arr()); GenTree* const index = fgMorphTree(asIndex->Index()); GenTreeIndexAddr* const indexAddr = new (this, GT_INDEX_ADDR) GenTreeIndexAddr(array, index, elemTyp, elemStructType, elemSize, static_cast<unsigned>(lenOffs), static_cast<unsigned>(elemOffs)); indexAddr->gtFlags |= (array->gtFlags | index->gtFlags) & GTF_ALL_EFFECT; // Mark the indirection node as needing a range check if necessary. // Note this will always be true unless JitSkipArrayBoundCheck() is used if ((indexAddr->gtFlags & GTF_INX_RNGCHK) != 0) { fgSetRngChkTarget(indexAddr); } if (!tree->TypeIs(TYP_STRUCT)) { tree->ChangeOper(GT_IND); } else { DEBUG_DESTROY_NODE(tree); tree = gtNewObjNode(elemStructType, indexAddr); INDEBUG(tree->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED); } GenTreeIndir* const indir = tree->AsIndir(); indir->Addr() = indexAddr; bool canCSE = indir->CanCSE(); indir->gtFlags = GTF_IND_ARR_INDEX | (indexAddr->gtFlags & GTF_ALL_EFFECT); if (!canCSE) { indir->SetDoNotCSE(); } INDEBUG(indexAddr->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED); return indir; } GenTree* arrRef = asIndex->Arr(); GenTree* index = asIndex->Index(); bool chkd = ((tree->gtFlags & GTF_INX_RNGCHK) != 0); // if false, range checking will be disabled bool indexNonFaulting = ((tree->gtFlags & GTF_INX_NOFAULT) != 0); // if true, mark GTF_IND_NONFAULTING bool nCSE = ((tree->gtFlags & GTF_DONT_CSE) != 0); GenTree* arrRefDefn = nullptr; // non-NULL if we need to allocate a temp for the arrRef expression GenTree* indexDefn = nullptr; // non-NULL if we need to allocate a temp for the index expression GenTree* bndsChk = nullptr; // If we're doing range checking, introduce a GT_BOUNDS_CHECK node for the address. if (chkd) { GenTree* arrRef2 = nullptr; // The second copy will be used in array address expression GenTree* index2 = nullptr; // If the arrRef or index expressions involves an assignment, a call or reads from global memory, // then we *must* allocate a temporary in which to "localize" those values, to ensure that the // same values are used in the bounds check and the actual dereference. // Also we allocate the temporary when the expresion is sufficiently complex/expensive. // // Note that if the expression is a GT_FIELD, it has not yet been morphed so its true complexity is // not exposed. Without that condition there are cases of local struct fields that were previously, // needlessly, marked as GTF_GLOB_REF, and when that was fixed, there were some regressions that // were mostly ameliorated by adding this condition. // // Likewise, allocate a temporary if the expression is a GT_LCL_FLD node. These used to be created // after fgMorphArrayIndex from GT_FIELD trees so this preserves the existing behavior. This is // perhaps a decision that should be left to CSE but FX diffs show that it is slightly better to // do this here. if ((arrRef->gtFlags & (GTF_ASG | GTF_CALL | GTF_GLOB_REF)) || gtComplexityExceeds(&arrRef, MAX_ARR_COMPLEXITY) || arrRef->OperIs(GT_FIELD, GT_LCL_FLD)) { unsigned arrRefTmpNum = lvaGrabTemp(true DEBUGARG("arr expr")); arrRefDefn = gtNewTempAssign(arrRefTmpNum, arrRef); arrRef = gtNewLclvNode(arrRefTmpNum, arrRef->TypeGet()); arrRef2 = gtNewLclvNode(arrRefTmpNum, arrRef->TypeGet()); } else { arrRef2 = gtCloneExpr(arrRef); noway_assert(arrRef2 != nullptr); } if ((index->gtFlags & (GTF_ASG | GTF_CALL | GTF_GLOB_REF)) || gtComplexityExceeds(&index, MAX_ARR_COMPLEXITY) || index->OperIs(GT_FIELD, GT_LCL_FLD)) { unsigned indexTmpNum = lvaGrabTemp(true DEBUGARG("index expr")); indexDefn = gtNewTempAssign(indexTmpNum, index); index = gtNewLclvNode(indexTmpNum, index->TypeGet()); index2 = gtNewLclvNode(indexTmpNum, index->TypeGet()); } else { index2 = gtCloneExpr(index); noway_assert(index2 != nullptr); } // Next introduce a GT_BOUNDS_CHECK node var_types bndsChkType = TYP_INT; // By default, try to use 32-bit comparison for array bounds check. #ifdef TARGET_64BIT // The CLI Spec allows an array to be indexed by either an int32 or a native int. In the case // of a 64 bit architecture this means the array index can potentially be a TYP_LONG, so for this case, // the comparison will have to be widen to 64 bits. if (index->TypeGet() == TYP_I_IMPL) { bndsChkType = TYP_I_IMPL; } #endif // TARGET_64BIT GenTree* arrLen = gtNewArrLen(TYP_INT, arrRef, (int)lenOffs, compCurBB); if (bndsChkType != TYP_INT) { arrLen = gtNewCastNode(bndsChkType, arrLen, true, bndsChkType); } GenTreeBoundsChk* arrBndsChk = new (this, GT_BOUNDS_CHECK) GenTreeBoundsChk(index, arrLen, SCK_RNGCHK_FAIL); bndsChk = arrBndsChk; // Now we'll switch to using the second copies for arrRef and index // to compute the address expression arrRef = arrRef2; index = index2; } // Create the "addr" which is "*(arrRef + ((index * elemSize) + elemOffs))" GenTree* addr; #ifdef TARGET_64BIT // Widen 'index' on 64-bit targets if (index->TypeGet() != TYP_I_IMPL) { if (index->OperGet() == GT_CNS_INT) { index->gtType = TYP_I_IMPL; } else { index = gtNewCastNode(TYP_I_IMPL, index, true, TYP_I_IMPL); } } #endif // TARGET_64BIT /* Scale the index value if necessary */ if (elemSize > 1) { GenTree* size = gtNewIconNode(elemSize, TYP_I_IMPL); // Fix 392756 WP7 Crossgen // // During codegen optGetArrayRefScaleAndIndex() makes the assumption that op2 of a GT_MUL node // is a constant and is not capable of handling CSE'ing the elemSize constant into a lclvar. // Hence to prevent the constant from becoming a CSE we mark it as NO_CSE. // size->gtFlags |= GTF_DONT_CSE; /* Multiply by the array element size */ addr = gtNewOperNode(GT_MUL, TYP_I_IMPL, index, size); } else { addr = index; } // Be careful to only create the byref pointer when the full index expression is added to the array reference. // We don't want to create a partial byref address expression that doesn't include the full index offset: // a byref must point within the containing object. It is dangerous (especially when optimizations come into // play) to create a "partial" byref that doesn't point exactly to the correct object; there is risk that // the partial byref will not point within the object, and thus not get updated correctly during a GC. // This is mostly a risk in fully-interruptible code regions. // We can generate two types of trees for "addr": // // 1) "arrRef + (index + elemOffset)" // 2) "(arrRef + elemOffset) + index" // // XArch has powerful addressing modes such as [base + index*scale + offset] so it's fine with 1), // while for Arm we better try to make an invariant sub-tree as large as possible, which is usually // "(arrRef + elemOffset)" and is CSE/LoopHoisting friendly => produces better codegen. // 2) should still be safe from GC's point of view since both ADD operations are byref and point to // within the object so GC will be able to correctly track and update them. bool groupArrayRefWithElemOffset = false; #ifdef TARGET_ARMARCH groupArrayRefWithElemOffset = true; // TODO: in some cases even on ARM we better use 1) shape because if "index" is invariant and "arrRef" is not // we at least will be able to hoist/CSE "index + elemOffset" in some cases. // See https://github.com/dotnet/runtime/pull/61293#issuecomment-964146497 // Use 2) form only for primitive types for now - it significantly reduced number of size regressions if (!varTypeIsIntegral(elemTyp) && !varTypeIsFloating(elemTyp)) { groupArrayRefWithElemOffset = false; } #endif // First element's offset GenTree* elemOffset = gtNewIconNode(elemOffs, TYP_I_IMPL); if (groupArrayRefWithElemOffset) { GenTree* basePlusOffset = gtNewOperNode(GT_ADD, TYP_BYREF, arrRef, elemOffset); addr = gtNewOperNode(GT_ADD, TYP_BYREF, basePlusOffset, addr); } else { addr = gtNewOperNode(GT_ADD, TYP_I_IMPL, addr, elemOffset); addr = gtNewOperNode(GT_ADD, TYP_BYREF, arrRef, addr); } assert(((tree->gtDebugFlags & GTF_DEBUG_NODE_LARGE) != 0) || (GenTree::s_gtNodeSizes[GT_IND] == TREE_NODE_SZ_SMALL)); // Change the orginal GT_INDEX node into a GT_IND node tree->SetOper(GT_IND); // If the index node is a floating-point type, notify the compiler // we'll potentially use floating point registers at the time of codegen. if (varTypeUsesFloatReg(tree->gtType)) { this->compFloatingPointUsed = true; } // We've now consumed the GTF_INX_RNGCHK and GTF_INX_NOFAULT, and the node // is no longer a GT_INDEX node. tree->gtFlags &= ~(GTF_INX_RNGCHK | GTF_INX_NOFAULT); tree->AsOp()->gtOp1 = addr; // This is an array index expression. tree->gtFlags |= GTF_IND_ARR_INDEX; // If there's a bounds check, the indir won't fault. if (bndsChk || indexNonFaulting) { tree->gtFlags |= GTF_IND_NONFAULTING; } else { tree->gtFlags |= GTF_EXCEPT; } if (nCSE) { tree->gtFlags |= GTF_DONT_CSE; } // Store information about it. GetArrayInfoMap()->Set(tree, ArrayInfo(elemTyp, elemSize, (int)elemOffs, elemStructType)); // Remember this 'indTree' that we just created, as we still need to attach the fieldSeq information to it. GenTree* indTree = tree; // Did we create a bndsChk tree? if (bndsChk) { // Use a GT_COMMA node to prepend the array bound check // tree = gtNewOperNode(GT_COMMA, elemTyp, bndsChk, tree); /* Mark the indirection node as needing a range check */ fgSetRngChkTarget(bndsChk); } if (indexDefn != nullptr) { // Use a GT_COMMA node to prepend the index assignment // tree = gtNewOperNode(GT_COMMA, tree->TypeGet(), indexDefn, tree); } if (arrRefDefn != nullptr) { // Use a GT_COMMA node to prepend the arRef assignment // tree = gtNewOperNode(GT_COMMA, tree->TypeGet(), arrRefDefn, tree); } JITDUMP("fgMorphArrayIndex (before remorph):\n") DISPTREE(tree) // Currently we morph the tree to perform some folding operations prior // to attaching fieldSeq info and labeling constant array index contributions // tree = fgMorphTree(tree); JITDUMP("fgMorphArrayIndex (after remorph):\n") DISPTREE(tree) // Ideally we just want to proceed to attaching fieldSeq info and labeling the // constant array index contributions, but the morphing operation may have changed // the 'tree' into something that now unconditionally throws an exception. // // In such case the gtEffectiveVal could be a new tree or it's gtOper could be modified // or it could be left unchanged. If it is unchanged then we should not return, // instead we should proceed to attaching fieldSeq info, etc... // GenTree* arrElem = tree->gtEffectiveVal(); if (fgIsCommaThrow(tree)) { if ((arrElem != indTree) || // A new tree node may have been created (!indTree->OperIs(GT_IND))) // The GT_IND may have been changed to a GT_CNS_INT { return tree; // Just return the Comma-Throw, don't try to attach the fieldSeq info, etc.. } } assert(!fgGlobalMorph || (arrElem->gtDebugFlags & GTF_DEBUG_NODE_MORPHED)); DBEXEC(fgGlobalMorph && (arrElem == tree), tree->gtDebugFlags &= ~GTF_DEBUG_NODE_MORPHED) addr = arrElem->gtGetOp1(); GenTree* cnsOff = nullptr; if (addr->OperIs(GT_ADD)) { GenTree* addrOp1 = addr->gtGetOp1(); if (groupArrayRefWithElemOffset) { if (addrOp1->OperIs(GT_ADD) && addrOp1->gtGetOp2()->IsCnsIntOrI()) { assert(addrOp1->gtGetOp1()->TypeIs(TYP_REF)); cnsOff = addrOp1->gtGetOp2(); addr = addr->gtGetOp2(); // Label any constant array index contributions with #ConstantIndex and any LclVars with // GTF_VAR_ARR_INDEX addr->LabelIndex(this); } else { assert(addr->gtGetOp2()->IsCnsIntOrI()); cnsOff = addr->gtGetOp2(); addr = nullptr; } } else { assert(addr->TypeIs(TYP_BYREF)); assert(addr->gtGetOp1()->TypeIs(TYP_REF)); addr = addr->gtGetOp2(); // Look for the constant [#FirstElem] node here, or as the RHS of an ADD. if (addr->IsCnsIntOrI()) { cnsOff = addr; addr = nullptr; } else { if ((addr->OperIs(GT_ADD)) && addr->gtGetOp2()->IsCnsIntOrI()) { cnsOff = addr->gtGetOp2(); addr = addr->gtGetOp1(); } // Label any constant array index contributions with #ConstantIndex and any LclVars with // GTF_VAR_ARR_INDEX addr->LabelIndex(this); } } } else if (addr->IsCnsIntOrI()) { cnsOff = addr; } FieldSeqNode* firstElemFseq = GetFieldSeqStore()->CreateSingleton(FieldSeqStore::FirstElemPseudoField); if ((cnsOff != nullptr) && (cnsOff->AsIntCon()->gtIconVal == elemOffs)) { // Assign it the [#FirstElem] field sequence // cnsOff->AsIntCon()->gtFieldSeq = firstElemFseq; } else // We have folded the first element's offset with the index expression { // Build the [#ConstantIndex, #FirstElem] field sequence // FieldSeqNode* constantIndexFseq = GetFieldSeqStore()->CreateSingleton(FieldSeqStore::ConstantIndexPseudoField); FieldSeqNode* fieldSeq = GetFieldSeqStore()->Append(constantIndexFseq, firstElemFseq); if (cnsOff == nullptr) // It must have folded into a zero offset { // Record in the general zero-offset map. fgAddFieldSeqForZeroOffset(addr, fieldSeq); } else { cnsOff->AsIntCon()->gtFieldSeq = fieldSeq; } } return tree; } #ifdef TARGET_X86 /***************************************************************************** * * Wrap fixed stack arguments for varargs functions to go through varargs * cookie to access them, except for the cookie itself. * * Non-x86 platforms are allowed to access all arguments directly * so we don't need this code. * */ GenTree* Compiler::fgMorphStackArgForVarArgs(unsigned lclNum, var_types varType, unsigned lclOffs) { /* For the fixed stack arguments of a varargs function, we need to go through the varargs cookies to access them, except for the cookie itself */ LclVarDsc* varDsc = lvaGetDesc(lclNum); if (varDsc->lvIsParam && !varDsc->lvIsRegArg && lclNum != lvaVarargsHandleArg) { // Create a node representing the local pointing to the base of the args GenTree* ptrArg = gtNewOperNode(GT_SUB, TYP_I_IMPL, gtNewLclvNode(lvaVarargsBaseOfStkArgs, TYP_I_IMPL), gtNewIconNode(varDsc->GetStackOffset() - codeGen->intRegState.rsCalleeRegArgCount * REGSIZE_BYTES - lclOffs)); // Access the argument through the local GenTree* tree; if (varTypeIsStruct(varType)) { CORINFO_CLASS_HANDLE typeHnd = varDsc->GetStructHnd(); assert(typeHnd != nullptr); tree = gtNewObjNode(typeHnd, ptrArg); } else { tree = gtNewOperNode(GT_IND, varType, ptrArg); } tree->gtFlags |= GTF_IND_TGTANYWHERE; if (varDsc->IsAddressExposed()) { tree->gtFlags |= GTF_GLOB_REF; } return fgMorphTree(tree); } return NULL; } #endif /***************************************************************************** * * Transform the given GT_LCL_VAR tree for code generation. */ GenTree* Compiler::fgMorphLocalVar(GenTree* tree, bool forceRemorph) { assert(tree->gtOper == GT_LCL_VAR); unsigned lclNum = tree->AsLclVarCommon()->GetLclNum(); var_types varType = lvaGetRealType(lclNum); LclVarDsc* varDsc = lvaGetDesc(lclNum); if (varDsc->IsAddressExposed()) { tree->gtFlags |= GTF_GLOB_REF; } #ifdef TARGET_X86 if (info.compIsVarArgs) { GenTree* newTree = fgMorphStackArgForVarArgs(lclNum, varType, 0); if (newTree != nullptr) { if (newTree->OperIsBlk() && ((tree->gtFlags & GTF_VAR_DEF) == 0)) { newTree->SetOper(GT_IND); } return newTree; } } #endif // TARGET_X86 /* If not during the global morphing phase bail */ if (!fgGlobalMorph && !forceRemorph) { return tree; } bool varAddr = (tree->gtFlags & GTF_DONT_CSE) != 0; noway_assert(!(tree->gtFlags & GTF_VAR_DEF) || varAddr); // GTF_VAR_DEF should always imply varAddr if (!varAddr && varDsc->lvNormalizeOnLoad()) { // TYP_BOOL quirk: previously, the code in optAssertionIsSubrange did not handle TYP_BOOL. // Now it does, but this leads to some regressions because we lose the uniform VNs for trees // that represent the "reduced" normalize-on-load locals, i. e. LCL_VAR(small type V00), created // here with local assertions, and "expanded", i. e. CAST(small type <- LCL_VAR(int V00)). // This is a pretty fundamental problem with how normalize-on-load locals appear to the optimizer. // This quirk preserves the previous behavior. // TODO-CQ: fix the VNs for normalize-on-load locals and remove this quirk. bool isBoolQuirk = varType == TYP_BOOL; // Assertion prop can tell us to omit adding a cast here. This is // useful when the local is a small-typed parameter that is passed in a // register: in that case, the ABI specifies that the upper bits might // be invalid, but the assertion guarantees us that we have normalized // when we wrote it. if (optLocalAssertionProp && !isBoolQuirk && optAssertionIsSubrange(tree, IntegralRange::ForType(varType), apFull) != NO_ASSERTION_INDEX) { // The previous assertion can guarantee us that if this node gets // assigned a register, it will be normalized already. It is still // possible that this node ends up being in memory, in which case // normalization will still be needed, so we better have the right // type. assert(tree->TypeGet() == varDsc->TypeGet()); return tree; } // Small-typed arguments and aliased locals are normalized on load. // Other small-typed locals are normalized on store. // Also, under the debugger as the debugger could write to the variable. // If this is one of the former, insert a narrowing cast on the load. // ie. Convert: var-short --> cast-short(var-int) tree->gtType = TYP_INT; fgMorphTreeDone(tree); tree = gtNewCastNode(TYP_INT, tree, false, varType); fgMorphTreeDone(tree); return tree; } return tree; } /***************************************************************************** Grab a temp for big offset morphing. This method will grab a new temp if no temp of this "type" has been created. Or it will return the same cached one if it has been created. */ unsigned Compiler::fgGetBigOffsetMorphingTemp(var_types type) { unsigned lclNum = fgBigOffsetMorphingTemps[type]; if (lclNum == BAD_VAR_NUM) { // We haven't created a temp for this kind of type. Create one now. lclNum = lvaGrabTemp(false DEBUGARG("Big Offset Morphing")); fgBigOffsetMorphingTemps[type] = lclNum; } else { // We better get the right type. noway_assert(lvaTable[lclNum].TypeGet() == type); } noway_assert(lclNum != BAD_VAR_NUM); return lclNum; } /***************************************************************************** * * Transform the given GT_FIELD tree for code generation. */ GenTree* Compiler::fgMorphField(GenTree* tree, MorphAddrContext* mac) { assert(tree->gtOper == GT_FIELD); CORINFO_FIELD_HANDLE symHnd = tree->AsField()->gtFldHnd; unsigned fldOffset = tree->AsField()->gtFldOffset; GenTree* objRef = tree->AsField()->GetFldObj(); bool fieldMayOverlap = false; bool objIsLocal = false; if (fgGlobalMorph && (objRef != nullptr) && (objRef->gtOper == GT_ADDR)) { // Make sure we've checked if 'objRef' is an address of an implicit-byref parameter. // If it is, fgMorphImplicitByRefArgs may change it do a different opcode, which the // simd field rewrites are sensitive to. fgMorphImplicitByRefArgs(objRef); } noway_assert(((objRef != nullptr) && (objRef->IsLocalAddrExpr() != nullptr)) || ((tree->gtFlags & GTF_GLOB_REF) != 0)); if (tree->AsField()->gtFldMayOverlap) { fieldMayOverlap = true; // Reset the flag because we may reuse the node. tree->AsField()->gtFldMayOverlap = false; } #ifdef FEATURE_SIMD // if this field belongs to simd struct, translate it to simd intrinsic. if (mac == nullptr) { if (IsBaselineSimdIsaSupported()) { GenTree* newTree = fgMorphFieldToSimdGetElement(tree); if (newTree != tree) { newTree = fgMorphTree(newTree); return newTree; } } } else if ((objRef != nullptr) && (objRef->OperGet() == GT_ADDR) && varTypeIsSIMD(objRef->gtGetOp1())) { GenTreeLclVarCommon* lcl = objRef->IsLocalAddrExpr(); if (lcl != nullptr) { lvaSetVarDoNotEnregister(lcl->GetLclNum() DEBUGARG(DoNotEnregisterReason::LocalField)); } } #endif // Create a default MorphAddrContext early so it doesn't go out of scope // before it is used. MorphAddrContext defMAC(MACK_Ind); /* Is this an instance data member? */ if (objRef) { GenTree* addr; objIsLocal = objRef->IsLocal(); if (tree->gtFlags & GTF_IND_TLS_REF) { NO_WAY("instance field can not be a TLS ref."); } /* We'll create the expression "*(objRef + mem_offs)" */ noway_assert(varTypeIsGC(objRef->TypeGet()) || objRef->TypeGet() == TYP_I_IMPL); /* Now we have a tree like this: +--------------------+ | GT_FIELD | tree +----------+---------+ | +--------------+-------------+ |tree->AsField()->GetFldObj()| +--------------+-------------+ We want to make it like this (when fldOffset is <= MAX_UNCHECKED_OFFSET_FOR_NULL_OBJECT): +--------------------+ | GT_IND/GT_OBJ | tree +---------+----------+ | | +---------+----------+ | GT_ADD | addr +---------+----------+ | / \ / \ / \ +-------------------+ +----------------------+ | objRef | | fldOffset | | | | (when fldOffset !=0) | +-------------------+ +----------------------+ or this (when fldOffset is > MAX_UNCHECKED_OFFSET_FOR_NULL_OBJECT): +--------------------+ | GT_IND/GT_OBJ | tree +----------+---------+ | +----------+---------+ | GT_COMMA | comma2 +----------+---------+ | / \ / \ / \ / \ +---------+----------+ +---------+----------+ comma | GT_COMMA | | "+" (i.e. GT_ADD) | addr +---------+----------+ +---------+----------+ | | / \ / \ / \ / \ / \ / \ +-----+-----+ +-----+-----+ +---------+ +-----------+ asg | GT_ASG | ind | GT_IND | | tmpLcl | | fldOffset | +-----+-----+ +-----+-----+ +---------+ +-----------+ | | / \ | / \ | / \ | +-----+-----+ +-----+-----+ +-----------+ | tmpLcl | | objRef | | tmpLcl | +-----------+ +-----------+ +-----------+ */ var_types objRefType = objRef->TypeGet(); GenTree* comma = nullptr; // NULL mac means we encounter the GT_FIELD first. This denotes a dereference of the field, // and thus is equivalent to a MACK_Ind with zero offset. if (mac == nullptr) { mac = &defMAC; } // This flag is set to enable the "conservative" style of explicit null-check insertion. // This means that we insert an explicit null check whenever we create byref by adding a // constant offset to a ref, in a MACK_Addr context (meaning that the byref is not immediately // dereferenced). The alternative is "aggressive", which would not insert such checks (for // small offsets); in this plan, we would transfer some null-checking responsibility to // callee's of methods taking byref parameters. They would have to add explicit null checks // when creating derived byrefs from argument byrefs by adding constants to argument byrefs, in // contexts where the resulting derived byref is not immediately dereferenced (or if the offset is too // large). To make the "aggressive" scheme work, however, we'd also have to add explicit derived-from-null // checks for byref parameters to "external" methods implemented in C++, and in P/Invoke stubs. // This is left here to point out how to implement it. CLANG_FORMAT_COMMENT_ANCHOR; #define CONSERVATIVE_NULL_CHECK_BYREF_CREATION 1 bool addExplicitNullCheck = false; // Implicit byref locals and string literals are never null. if (fgAddrCouldBeNull(objRef)) { // If the objRef is a GT_ADDR node, it, itself, never requires null checking. The expression // whose address is being taken is either a local or static variable, whose address is necessarily // non-null, or else it is a field dereference, which will do its own bounds checking if necessary. if (objRef->gtOper != GT_ADDR && (mac->m_kind == MACK_Addr || mac->m_kind == MACK_Ind)) { if (!mac->m_allConstantOffsets || fgIsBigOffset(mac->m_totalOffset + fldOffset)) { addExplicitNullCheck = true; } else { // In R2R mode the field offset for some fields may change when the code // is loaded. So we can't rely on a zero offset here to suppress the null check. // // See GitHub issue #16454. bool fieldHasChangeableOffset = false; #ifdef FEATURE_READYTORUN fieldHasChangeableOffset = (tree->AsField()->gtFieldLookup.addr != nullptr); #endif #if CONSERVATIVE_NULL_CHECK_BYREF_CREATION addExplicitNullCheck = (mac->m_kind == MACK_Addr) && ((mac->m_totalOffset + fldOffset > 0) || fieldHasChangeableOffset); #else addExplicitNullCheck = (objRef->gtType == TYP_BYREF && mac->m_kind == MACK_Addr && ((mac->m_totalOffset + fldOffset > 0) || fieldHasChangeableOffset)); #endif } } } if (addExplicitNullCheck) { #ifdef DEBUG if (verbose) { printf("Before explicit null check morphing:\n"); gtDispTree(tree); } #endif // // Create the "comma" subtree // GenTree* asg = nullptr; GenTree* nullchk; unsigned lclNum; if (objRef->gtOper != GT_LCL_VAR) { lclNum = fgGetBigOffsetMorphingTemp(genActualType(objRef->TypeGet())); // Create the "asg" node asg = gtNewTempAssign(lclNum, objRef); } else { lclNum = objRef->AsLclVarCommon()->GetLclNum(); } GenTree* lclVar = gtNewLclvNode(lclNum, objRefType); nullchk = gtNewNullCheck(lclVar, compCurBB); nullchk->gtFlags |= GTF_DONT_CSE; // Don't try to create a CSE for these TYP_BYTE indirections if (asg) { // Create the "comma" node. comma = gtNewOperNode(GT_COMMA, TYP_VOID, // We don't want to return anything from this "comma" node. // Set the type to TYP_VOID, so we can select "cmp" instruction // instead of "mov" instruction later on. asg, nullchk); } else { comma = nullchk; } addr = gtNewLclvNode(lclNum, objRefType); // Use "tmpLcl" to create "addr" node. } else { addr = objRef; } #ifdef FEATURE_READYTORUN if (tree->AsField()->gtFieldLookup.addr != nullptr) { GenTree* offsetNode = nullptr; if (tree->AsField()->gtFieldLookup.accessType == IAT_PVALUE) { offsetNode = gtNewIndOfIconHandleNode(TYP_I_IMPL, (size_t)tree->AsField()->gtFieldLookup.addr, GTF_ICON_CONST_PTR, true); #ifdef DEBUG offsetNode->gtGetOp1()->AsIntCon()->gtTargetHandle = (size_t)symHnd; #endif } else { noway_assert(!"unexpected accessType for R2R field access"); } var_types addType = (objRefType == TYP_I_IMPL) ? TYP_I_IMPL : TYP_BYREF; addr = gtNewOperNode(GT_ADD, addType, addr, offsetNode); } #endif if (fldOffset != 0) { // Generate the "addr" node. /* Add the member offset to the object's address */ FieldSeqNode* fieldSeq = fieldMayOverlap ? FieldSeqStore::NotAField() : GetFieldSeqStore()->CreateSingleton(symHnd); addr = gtNewOperNode(GT_ADD, (var_types)(objRefType == TYP_I_IMPL ? TYP_I_IMPL : TYP_BYREF), addr, gtNewIconHandleNode(fldOffset, GTF_ICON_FIELD_OFF, fieldSeq)); } // Now let's set the "tree" as a GT_IND tree. tree->SetOper(GT_IND); tree->AsOp()->gtOp1 = addr; tree->SetIndirExceptionFlags(this); if (addExplicitNullCheck) { // // Create "comma2" node and link it to "tree". // GenTree* comma2; comma2 = gtNewOperNode(GT_COMMA, addr->TypeGet(), // The type of "comma2" node is the same as the type of "addr" node. comma, addr); tree->AsOp()->gtOp1 = comma2; } #ifdef DEBUG if (verbose) { if (addExplicitNullCheck) { printf("After adding explicit null check:\n"); gtDispTree(tree); } } #endif } else /* This is a static data member */ { if (tree->gtFlags & GTF_IND_TLS_REF) { // Thread Local Storage static field reference // // Field ref is a TLS 'Thread-Local-Storage' reference // // Build this tree: IND(*) # // | // ADD(I_IMPL) // / \. // / CNS(fldOffset) // / // / // / // IND(I_IMPL) == [Base of this DLL's TLS] // | // ADD(I_IMPL) // / \. // / CNS(IdValue*4) or MUL // / / \. // IND(I_IMPL) / CNS(4) // | / // CNS(TLS_HDL,0x2C) IND // | // CNS(pIdAddr) // // # Denotes the orginal node // void** pIdAddr = nullptr; unsigned IdValue = info.compCompHnd->getFieldThreadLocalStoreID(symHnd, (void**)&pIdAddr); // // If we can we access the TLS DLL index ID value directly // then pIdAddr will be NULL and // IdValue will be the actual TLS DLL index ID // GenTree* dllRef = nullptr; if (pIdAddr == nullptr) { if (IdValue != 0) { dllRef = gtNewIconNode(IdValue * 4, TYP_I_IMPL); } } else { dllRef = gtNewIndOfIconHandleNode(TYP_I_IMPL, (size_t)pIdAddr, GTF_ICON_CONST_PTR, true); // Next we multiply by 4 dllRef = gtNewOperNode(GT_MUL, TYP_I_IMPL, dllRef, gtNewIconNode(4, TYP_I_IMPL)); } #define WIN32_TLS_SLOTS (0x2C) // Offset from fs:[0] where the pointer to the slots resides // Mark this ICON as a TLS_HDL, codegen will use FS:[cns] GenTree* tlsRef = gtNewIconHandleNode(WIN32_TLS_SLOTS, GTF_ICON_TLS_HDL); // Translate GTF_FLD_INITCLASS to GTF_ICON_INITCLASS if ((tree->gtFlags & GTF_FLD_INITCLASS) != 0) { tree->gtFlags &= ~GTF_FLD_INITCLASS; tlsRef->gtFlags |= GTF_ICON_INITCLASS; } tlsRef = gtNewOperNode(GT_IND, TYP_I_IMPL, tlsRef); if (dllRef != nullptr) { /* Add the dllRef */ tlsRef = gtNewOperNode(GT_ADD, TYP_I_IMPL, tlsRef, dllRef); } /* indirect to have tlsRef point at the base of the DLLs Thread Local Storage */ tlsRef = gtNewOperNode(GT_IND, TYP_I_IMPL, tlsRef); if (fldOffset != 0) { FieldSeqNode* fieldSeq = fieldMayOverlap ? FieldSeqStore::NotAField() : GetFieldSeqStore()->CreateSingleton(symHnd); GenTree* fldOffsetNode = new (this, GT_CNS_INT) GenTreeIntCon(TYP_INT, fldOffset, fieldSeq); /* Add the TLS static field offset to the address */ tlsRef = gtNewOperNode(GT_ADD, TYP_I_IMPL, tlsRef, fldOffsetNode); } // Final indirect to get to actual value of TLS static field tree->SetOper(GT_IND); tree->AsOp()->gtOp1 = tlsRef; noway_assert(tree->gtFlags & GTF_IND_TLS_REF); } else { assert(!fieldMayOverlap); // Normal static field reference // // If we can we access the static's address directly // then pFldAddr will be NULL and // fldAddr will be the actual address of the static field // void** pFldAddr = nullptr; void* fldAddr = info.compCompHnd->getFieldAddress(symHnd, (void**)&pFldAddr); // We should always be able to access this static field address directly // assert(pFldAddr == nullptr); // For boxed statics, this direct address will be for the box. We have already added // the indirection for the field itself and attached the sequence, in importation. bool isBoxedStatic = gtIsStaticFieldPtrToBoxedStruct(tree->TypeGet(), symHnd); FieldSeqNode* fldSeq = !isBoxedStatic ? GetFieldSeqStore()->CreateSingleton(symHnd) : FieldSeqStore::NotAField(); // TODO-CQ: enable this optimization for 32 bit targets. bool isStaticReadOnlyInited = false; #ifdef TARGET_64BIT if (tree->TypeIs(TYP_REF) && !isBoxedStatic) { bool pIsSpeculative = true; if (info.compCompHnd->getStaticFieldCurrentClass(symHnd, &pIsSpeculative) != NO_CLASS_HANDLE) { isStaticReadOnlyInited = !pIsSpeculative; } } #endif // TARGET_64BIT // TODO: choices made below have mostly historical reasons and // should be unified to always use the IND(<address>) form. CLANG_FORMAT_COMMENT_ANCHOR; #ifdef TARGET_64BIT bool preferIndir = isBoxedStatic || isStaticReadOnlyInited || (IMAGE_REL_BASED_REL32 != eeGetRelocTypeHint(fldAddr)); #else // !TARGET_64BIT bool preferIndir = isBoxedStatic; #endif // !TARGET_64BIT if (preferIndir) { GenTreeFlags handleKind = GTF_EMPTY; if (isBoxedStatic) { handleKind = GTF_ICON_STATIC_BOX_PTR; } else if (isStaticReadOnlyInited) { handleKind = GTF_ICON_CONST_PTR; } else { handleKind = GTF_ICON_STATIC_HDL; } GenTree* addr = gtNewIconHandleNode((size_t)fldAddr, handleKind, fldSeq); // Translate GTF_FLD_INITCLASS to GTF_ICON_INITCLASS, if we need to. if (((tree->gtFlags & GTF_FLD_INITCLASS) != 0) && !isStaticReadOnlyInited) { tree->gtFlags &= ~GTF_FLD_INITCLASS; addr->gtFlags |= GTF_ICON_INITCLASS; } tree->SetOper(GT_IND); tree->AsOp()->gtOp1 = addr; if (isBoxedStatic) { // The box for the static cannot be null, and is logically invariant, since it // represents (a base for) the static's address. tree->gtFlags |= (GTF_IND_INVARIANT | GTF_IND_NONFAULTING | GTF_IND_NONNULL); } else if (isStaticReadOnlyInited) { JITDUMP("Marking initialized static read-only field '%s' as invariant.\n", eeGetFieldName(symHnd)); // Static readonly field is not null at this point (see getStaticFieldCurrentClass impl). tree->gtFlags |= (GTF_IND_INVARIANT | GTF_IND_NONFAULTING | GTF_IND_NONNULL); } return fgMorphSmpOp(tree); } else { // Only volatile or classinit could be set, and they map over noway_assert((tree->gtFlags & ~(GTF_FLD_VOLATILE | GTF_FLD_INITCLASS | GTF_COMMON_MASK)) == 0); static_assert_no_msg(GTF_FLD_VOLATILE == GTF_CLS_VAR_VOLATILE); static_assert_no_msg(GTF_FLD_INITCLASS == GTF_CLS_VAR_INITCLASS); tree->SetOper(GT_CLS_VAR); tree->AsClsVar()->gtClsVarHnd = symHnd; tree->AsClsVar()->gtFieldSeq = fldSeq; } return tree; } } noway_assert(tree->gtOper == GT_IND); if (fldOffset == 0) { GenTree* addr = tree->AsOp()->gtOp1; // 'addr' may be a GT_COMMA. Skip over any comma nodes addr = addr->gtEffectiveVal(); #ifdef DEBUG if (verbose) { printf("\nBefore calling fgAddFieldSeqForZeroOffset:\n"); gtDispTree(tree); } #endif // We expect 'addr' to be an address at this point. assert(addr->TypeGet() == TYP_BYREF || addr->TypeGet() == TYP_I_IMPL || addr->TypeGet() == TYP_REF); // Since we don't make a constant zero to attach the field sequence to, associate it with the "addr" node. FieldSeqNode* fieldSeq = fieldMayOverlap ? FieldSeqStore::NotAField() : GetFieldSeqStore()->CreateSingleton(symHnd); fgAddFieldSeqForZeroOffset(addr, fieldSeq); } // Pass down the current mac; if non null we are computing an address GenTree* result = fgMorphSmpOp(tree, mac); #ifdef DEBUG if (verbose) { printf("\nFinal value of Compiler::fgMorphField after calling fgMorphSmpOp:\n"); gtDispTree(result); } #endif return result; } //------------------------------------------------------------------------------ // fgMorphCallInline: attempt to inline a call // // Arguments: // call - call expression to inline, inline candidate // inlineResult - result tracking and reporting // // Notes: // Attempts to inline the call. // // If successful, callee's IR is inserted in place of the call, and // is marked with an InlineContext. // // If unsuccessful, the transformations done in anticipation of a // possible inline are undone, and the candidate flag on the call // is cleared. void Compiler::fgMorphCallInline(GenTreeCall* call, InlineResult* inlineResult) { bool inliningFailed = false; // Is this call an inline candidate? if (call->IsInlineCandidate()) { InlineContext* createdContext = nullptr; // Attempt the inline fgMorphCallInlineHelper(call, inlineResult, &createdContext); // We should have made up our minds one way or another.... assert(inlineResult->IsDecided()); // If we failed to inline, we have a bit of work to do to cleanup if (inlineResult->IsFailure()) { if (createdContext != nullptr) { // We created a context before we got to the failure, so mark // it as failed in the tree. createdContext->SetFailed(inlineResult); } else { #ifdef DEBUG // In debug we always put all inline attempts into the inline tree. InlineContext* ctx = m_inlineStrategy->NewContext(call->gtInlineCandidateInfo->inlinersContext, fgMorphStmt, call); ctx->SetFailed(inlineResult); #endif } inliningFailed = true; // Clear the Inline Candidate flag so we can ensure later we tried // inlining all candidates. // call->gtFlags &= ~GTF_CALL_INLINE_CANDIDATE; } } else { // This wasn't an inline candidate. So it must be a GDV candidate. assert(call->IsGuardedDevirtualizationCandidate()); // We already know we can't inline this call, so don't even bother to try. inliningFailed = true; } // If we failed to inline (or didn't even try), do some cleanup. if (inliningFailed) { if (call->gtReturnType != TYP_VOID) { JITDUMP("Inlining [%06u] failed, so bashing " FMT_STMT " to NOP\n", dspTreeID(call), fgMorphStmt->GetID()); // Detach the GT_CALL tree from the original statement by // hanging a "nothing" node to it. Later the "nothing" node will be removed // and the original GT_CALL tree will be picked up by the GT_RET_EXPR node. noway_assert(fgMorphStmt->GetRootNode() == call); fgMorphStmt->SetRootNode(gtNewNothingNode()); } } } //------------------------------------------------------------------------------ // fgMorphCallInlineHelper: Helper to attempt to inline a call // // Arguments: // call - call expression to inline, inline candidate // result - result to set to success or failure // createdContext - The context that was created if the inline attempt got to the inliner. // // Notes: // Attempts to inline the call. // // If successful, callee's IR is inserted in place of the call, and // is marked with an InlineContext. // // If unsuccessful, the transformations done in anticipation of a // possible inline are undone, and the candidate flag on the call // is cleared. // // If a context was created because we got to the importer then it is output by this function. // If the inline succeeded, this context will already be marked as successful. If it failed and // a context is returned, then it will not have been marked as success or failed. void Compiler::fgMorphCallInlineHelper(GenTreeCall* call, InlineResult* result, InlineContext** createdContext) { // Don't expect any surprises here. assert(result->IsCandidate()); if (lvaCount >= MAX_LV_NUM_COUNT_FOR_INLINING) { // For now, attributing this to call site, though it's really // more of a budget issue (lvaCount currently includes all // caller and prospective callee locals). We still might be // able to inline other callees into this caller, or inline // this callee in other callers. result->NoteFatal(InlineObservation::CALLSITE_TOO_MANY_LOCALS); return; } if (call->IsVirtual()) { result->NoteFatal(InlineObservation::CALLSITE_IS_VIRTUAL); return; } // Re-check this because guarded devirtualization may allow these through. if (gtIsRecursiveCall(call) && call->IsImplicitTailCall()) { result->NoteFatal(InlineObservation::CALLSITE_IMPLICIT_REC_TAIL_CALL); return; } // impMarkInlineCandidate() is expected not to mark tail prefixed calls // and recursive tail calls as inline candidates. noway_assert(!call->IsTailPrefixedCall()); noway_assert(!call->IsImplicitTailCall() || !gtIsRecursiveCall(call)); // // Calling inlinee's compiler to inline the method. // unsigned startVars = lvaCount; #ifdef DEBUG if (verbose) { printf("Expanding INLINE_CANDIDATE in statement "); printStmtID(fgMorphStmt); printf(" in " FMT_BB ":\n", compCurBB->bbNum); gtDispStmt(fgMorphStmt); if (call->IsImplicitTailCall()) { printf("Note: candidate is implicit tail call\n"); } } #endif impInlineRoot()->m_inlineStrategy->NoteAttempt(result); // // Invoke the compiler to inline the call. // fgInvokeInlineeCompiler(call, result, createdContext); if (result->IsFailure()) { // Undo some changes made in anticipation of inlining... // Zero out the used locals memset(lvaTable + startVars, 0, (lvaCount - startVars) * sizeof(*lvaTable)); for (unsigned i = startVars; i < lvaCount; i++) { new (&lvaTable[i], jitstd::placement_t()) LclVarDsc(); // call the constructor. } lvaCount = startVars; #ifdef DEBUG if (verbose) { // printf("Inlining failed. Restore lvaCount to %d.\n", lvaCount); } #endif return; } #ifdef DEBUG if (verbose) { // printf("After inlining lvaCount=%d.\n", lvaCount); } #endif } //------------------------------------------------------------------------ // fgCanFastTailCall: Check to see if this tail call can be optimized as epilog+jmp. // // Arguments: // callee - The callee to check // failReason - If this method returns false, the reason why. Can be nullptr. // // Return Value: // Returns true or false based on whether the callee can be fastTailCalled // // Notes: // This function is target specific and each target will make the fastTailCall // decision differently. See the notes below. // // This function calls fgInitArgInfo() to initialize the arg info table, which // is used to analyze the argument. This function can alter the call arguments // by adding argument IR nodes for non-standard arguments. // // Windows Amd64: // A fast tail call can be made whenever the number of callee arguments // is less than or equal to the number of caller arguments, or we have four // or fewer callee arguments. This is because, on Windows AMD64, each // argument uses exactly one register or one 8-byte stack slot. Thus, we only // need to count arguments, and not be concerned with the size of each // incoming or outgoing argument. // // Can fast tail call examples (amd64 Windows): // // -- Callee will have all register arguments -- // caller(int, int, int, int) // callee(int, int, float, int) // // -- Callee requires stack space that is equal or less than the caller -- // caller(struct, struct, struct, struct, struct, struct) // callee(int, int, int, int, int, int) // // -- Callee requires stack space that is less than the caller -- // caller(struct, double, struct, float, struct, struct) // callee(int, int, int, int, int) // // -- Callee will have all register arguments -- // caller(int) // callee(int, int, int, int) // // Cannot fast tail call examples (amd64 Windows): // // -- Callee requires stack space that is larger than the caller -- // caller(struct, double, struct, float, struct, struct) // callee(int, int, int, int, int, double, double, double) // // -- Callee has a byref struct argument -- // caller(int, int, int) // callee(struct(size 3 bytes)) // // Unix Amd64 && Arm64: // A fastTailCall decision can be made whenever the callee's stack space is // less than or equal to the caller's stack space. There are many permutations // of when the caller and callee have different stack sizes if there are // structs being passed to either the caller or callee. // // Exceptions: // If the callee has a 9 to 16 byte struct argument and the callee has // stack arguments, the decision will be to not fast tail call. This is // because before fgMorphArgs is done, the struct is unknown whether it // will be placed on the stack or enregistered. Therefore, the conservative // decision of do not fast tail call is taken. This limitations should be // removed if/when fgMorphArgs no longer depends on fgCanFastTailCall. // // Can fast tail call examples (amd64 Unix): // // -- Callee will have all register arguments -- // caller(int, int, int, int) // callee(int, int, float, int) // // -- Callee requires stack space that is equal to the caller -- // caller({ long, long }, { int, int }, { int }, { int }, { int }, { int }) -- 6 int register arguments, 16 byte // stack // space // callee(int, int, int, int, int, int, int, int) -- 6 int register arguments, 16 byte stack space // // -- Callee requires stack space that is less than the caller -- // caller({ long, long }, int, { long, long }, int, { long, long }, { long, long }) 6 int register arguments, 32 byte // stack // space // callee(int, int, int, int, int, int, { long, long } ) // 6 int register arguments, 16 byte stack space // // -- Callee will have all register arguments -- // caller(int) // callee(int, int, int, int) // // Cannot fast tail call examples (amd64 Unix): // // -- Callee requires stack space that is larger than the caller -- // caller(float, float, float, float, float, float, float, float) -- 8 float register arguments // callee(int, int, int, int, int, int, int, int) -- 6 int register arguments, 16 byte stack space // // -- Callee has structs which cannot be enregistered (Implementation Limitation) -- // caller(float, float, float, float, float, float, float, float, { double, double, double }) -- 8 float register // arguments, 24 byte stack space // callee({ double, double, double }) -- 24 bytes stack space // // -- Callee requires stack space and has a struct argument >8 bytes and <16 bytes (Implementation Limitation) -- // caller(int, int, int, int, int, int, { double, double, double }) -- 6 int register arguments, 24 byte stack space // callee(int, int, int, int, int, int, { int, int }) -- 6 int registers, 16 byte stack space // // -- Caller requires stack space and nCalleeArgs > nCallerArgs (Bug) -- // caller({ double, double, double, double, double, double }) // 48 byte stack // callee(int, int) -- 2 int registers bool Compiler::fgCanFastTailCall(GenTreeCall* callee, const char** failReason) { #if FEATURE_FASTTAILCALL // To reach here means that the return types of the caller and callee are tail call compatible. // In the case of structs that can be returned in a register, compRetNativeType is set to the actual return type. CLANG_FORMAT_COMMENT_ANCHOR; #ifdef DEBUG if (callee->IsTailPrefixedCall()) { var_types retType = info.compRetType; assert(impTailCallRetTypeCompatible(false, retType, info.compMethodInfo->args.retTypeClass, info.compCallConv, (var_types)callee->gtReturnType, callee->gtRetClsHnd, callee->GetUnmanagedCallConv())); } #endif assert(!callee->AreArgsComplete()); fgInitArgInfo(callee); fgArgInfo* argInfo = callee->fgArgInfo; unsigned calleeArgStackSize = 0; unsigned callerArgStackSize = info.compArgStackSize; auto reportFastTailCallDecision = [&](const char* thisFailReason) { if (failReason != nullptr) { *failReason = thisFailReason; } #ifdef DEBUG if ((JitConfig.JitReportFastTailCallDecisions()) == 1) { if (callee->gtCallType != CT_INDIRECT) { const char* methodName; methodName = eeGetMethodFullName(callee->gtCallMethHnd); printf("[Fast tailcall decision]: Caller: %s\n[Fast tailcall decision]: Callee: %s -- Decision: ", info.compFullName, methodName); } else { printf("[Fast tailcall decision]: Caller: %s\n[Fast tailcall decision]: Callee: IndirectCall -- " "Decision: ", info.compFullName); } if (thisFailReason == nullptr) { printf("Will fast tailcall"); } else { printf("Will not fast tailcall (%s)", thisFailReason); } printf(" (CallerArgStackSize: %d, CalleeArgStackSize: %d)\n\n", callerArgStackSize, calleeArgStackSize); } else { if (thisFailReason == nullptr) { JITDUMP("[Fast tailcall decision]: Will fast tailcall\n"); } else { JITDUMP("[Fast tailcall decision]: Will not fast tailcall (%s)\n", thisFailReason); } } #endif // DEBUG }; for (unsigned index = 0; index < argInfo->ArgCount(); ++index) { fgArgTabEntry* arg = argInfo->GetArgEntry(index, false); calleeArgStackSize = roundUp(calleeArgStackSize, arg->GetByteAlignment()); calleeArgStackSize += arg->GetStackByteSize(); #ifdef TARGET_ARM if (arg->IsSplit()) { reportFastTailCallDecision("Splitted argument in callee is not supported on ARM32"); return false; } #endif // TARGET_ARM } calleeArgStackSize = GetOutgoingArgByteSize(calleeArgStackSize); #ifdef TARGET_ARM if (compHasSplitParam) { reportFastTailCallDecision("Splitted argument in caller is not supported on ARM32"); return false; } if (compIsProfilerHookNeeded()) { reportFastTailCallDecision("Profiler is not supported on ARM32"); return false; } // On ARM32 we have only one non-parameter volatile register and we need it // for the GS security cookie check. We could technically still tailcall // when the callee does not use all argument registers, but we keep the // code simple here. if (getNeedsGSSecurityCookie()) { reportFastTailCallDecision("Not enough registers available due to the GS security cookie check"); return false; } #endif if (!opts.compFastTailCalls) { reportFastTailCallDecision("Configuration doesn't allow fast tail calls"); return false; } if (callee->IsStressTailCall()) { reportFastTailCallDecision("Fast tail calls are not performed under tail call stress"); return false; } #ifdef TARGET_ARM if (callee->IsR2RRelativeIndir() || callee->HasNonStandardAddedArgs(this)) { reportFastTailCallDecision( "Method with non-standard args passed in callee saved register cannot be tail called"); return false; } #endif // Note on vararg methods: // If the caller is vararg method, we don't know the number of arguments passed by caller's caller. // But we can be sure that in-coming arg area of vararg caller would be sufficient to hold its // fixed args. Therefore, we can allow a vararg method to fast tail call other methods as long as // out-going area required for callee is bounded by caller's fixed argument space. // // Note that callee being a vararg method is not a problem since we can account the params being passed. // // We will currently decide to not fast tail call on Windows armarch if the caller or callee is a vararg // method. This is due to the ABI differences for native vararg methods for these platforms. There is // work required to shuffle arguments to the correct locations. CLANG_FORMAT_COMMENT_ANCHOR; if (TargetOS::IsWindows && TargetArchitecture::IsArmArch && (info.compIsVarArgs || callee->IsVarargs())) { reportFastTailCallDecision("Fast tail calls with varargs not supported on Windows ARM/ARM64"); return false; } if (compLocallocUsed) { reportFastTailCallDecision("Localloc used"); return false; } #ifdef TARGET_AMD64 // Needed for Jit64 compat. // In future, enabling fast tail calls from methods that need GS cookie // check would require codegen side work to emit GS cookie check before a // tail call. if (getNeedsGSSecurityCookie()) { reportFastTailCallDecision("GS Security cookie check required"); return false; } #endif // If the NextCallReturnAddress intrinsic is used we should do normal calls. if (info.compHasNextCallRetAddr) { reportFastTailCallDecision("Uses NextCallReturnAddress intrinsic"); return false; } if (callee->HasRetBufArg()) // RetBuf { // If callee has RetBuf param, caller too must have it. // Otherwise go the slow route. if (info.compRetBuffArg == BAD_VAR_NUM) { reportFastTailCallDecision("Callee has RetBuf but caller does not."); return false; } } // For a fast tail call the caller will use its incoming arg stack space to place // arguments, so if the callee requires more arg stack space than is available here // the fast tail call cannot be performed. This is common to all platforms. // Note that the GC'ness of on stack args need not match since the arg setup area is marked // as non-interruptible for fast tail calls. if (calleeArgStackSize > callerArgStackSize) { reportFastTailCallDecision("Not enough incoming arg space"); return false; } // For Windows some struct parameters are copied on the local frame // and then passed by reference. We cannot fast tail call in these situation // as we need to keep our frame around. if (fgCallHasMustCopyByrefParameter(callee)) { reportFastTailCallDecision("Callee has a byref parameter"); return false; } reportFastTailCallDecision(nullptr); return true; #else // FEATURE_FASTTAILCALL if (failReason) *failReason = "Fast tailcalls are not supported on this platform"; return false; #endif } //------------------------------------------------------------------------ // fgCallHasMustCopyByrefParameter: Check to see if this call has a byref parameter that // requires a struct copy in the caller. // // Arguments: // callee - The callee to check // // Return Value: // Returns true or false based on whether this call has a byref parameter that // requires a struct copy in the caller. #if FEATURE_FASTTAILCALL bool Compiler::fgCallHasMustCopyByrefParameter(GenTreeCall* callee) { fgArgInfo* argInfo = callee->fgArgInfo; bool hasMustCopyByrefParameter = false; for (unsigned index = 0; index < argInfo->ArgCount(); ++index) { fgArgTabEntry* arg = argInfo->GetArgEntry(index, false); if (arg->isStruct) { if (arg->passedByRef) { // Generally a byref arg will block tail calling, as we have to // make a local copy of the struct for the callee. hasMustCopyByrefParameter = true; // If we're optimizing, we may be able to pass our caller's byref to our callee, // and so still be able to avoid a struct copy. if (opts.OptimizationEnabled()) { // First, see if this arg is an implicit byref param. GenTreeLclVar* const lcl = arg->GetNode()->IsImplicitByrefParameterValue(this); if (lcl != nullptr) { // Yes, the arg is an implicit byref param. const unsigned lclNum = lcl->GetLclNum(); LclVarDsc* const varDsc = lvaGetDesc(lcl); // The param must not be promoted; if we've promoted, then the arg will be // a local struct assembled from the promoted fields. if (varDsc->lvPromoted) { JITDUMP("Arg [%06u] is promoted implicit byref V%02u, so no tail call\n", dspTreeID(arg->GetNode()), lclNum); } else { JITDUMP("Arg [%06u] is unpromoted implicit byref V%02u, seeing if we can still tail call\n", dspTreeID(arg->GetNode()), lclNum); // We have to worry about introducing aliases if we bypass copying // the struct at the call. We'll do some limited analysis to see if we // can rule this out. const unsigned argLimit = 6; // If this is the only appearance of the byref in the method, then // aliasing is not possible. // // If no other call arg refers to this byref, and no other arg is // a pointer which could refer to this byref, we can optimize. // // We only check this for calls with small numbers of arguments, // as the analysis cost will be quadratic. // const unsigned totalAppearances = varDsc->lvRefCnt(RCS_EARLY); const unsigned callAppearances = (unsigned)varDsc->lvRefCntWtd(RCS_EARLY); assert(totalAppearances >= callAppearances); if (totalAppearances == 1) { JITDUMP("... yes, arg is the only appearance of V%02u\n", lclNum); hasMustCopyByrefParameter = false; } else if (totalAppearances > callAppearances) { // lvRefCntWtd tracks the number of appearances of the arg at call sites. // If this number doesn't match the regular ref count, there is // a non-call appearance, and we must be conservative. // JITDUMP("... no, arg has %u non-call appearance(s)\n", totalAppearances - callAppearances); } else if (argInfo->ArgCount() <= argLimit) { JITDUMP("... all %u appearance(s) are as implicit byref args to calls.\n" "... Running alias analysis on this call's args\n", totalAppearances); GenTree* interferingArg = nullptr; for (unsigned index2 = 0; index2 < argInfo->ArgCount(); ++index2) { if (index2 == index) { continue; } fgArgTabEntry* const arg2 = argInfo->GetArgEntry(index2, false); JITDUMP("... checking other arg [%06u]...\n", dspTreeID(arg2->GetNode())); DISPTREE(arg2->GetNode()); // Do we pass 'lcl' more than once to the callee? if (arg2->isStruct && arg2->passedByRef) { GenTreeLclVarCommon* const lcl2 = arg2->GetNode()->IsImplicitByrefParameterValue(this); if ((lcl2 != nullptr) && (lclNum == lcl2->GetLclNum())) { // not copying would introduce aliased implicit byref structs // in the callee ... we can't optimize. interferingArg = arg2->GetNode(); break; } else { JITDUMP("... arg refers to different implicit byref V%02u\n", lcl2->GetLclNum()); continue; } } // Do we pass a byref pointer which might point within 'lcl'? // // We can assume the 'lcl' is unaliased on entry to the // method, so the only way we can have an aliasing byref pointer at // the call is if 'lcl' is address taken/exposed in the method. // // Note even though 'lcl' is not promoted, we are in the middle // of the promote->rewrite->undo->(morph)->demote cycle, and so // might see references to promoted fields of 'lcl' that haven't yet // been demoted (see fgMarkDemotedImplicitByRefArgs). // // So, we also need to scan all 'lcl's fields, if any, to see if they // are exposed. // // When looking for aliases from other args, we check for both TYP_BYREF // and TYP_I_IMPL typed args here. Conceptually anything that points into // an implicit byref parameter should be TYP_BYREF, as these parameters could // refer to boxed heap locations (say if the method is invoked by reflection) // but there are some stack only structs (like typed references) where // the importer/runtime code uses TYP_I_IMPL, and fgInitArgInfo will // transiently retype all simple address-of implicit parameter args as // TYP_I_IMPL. // if ((arg2->argType == TYP_BYREF) || (arg2->argType == TYP_I_IMPL)) { JITDUMP("...arg is a byref, must run an alias check\n"); bool checkExposure = true; bool hasExposure = false; // See if there is any way arg could refer to a parameter struct. GenTree* arg2Node = arg2->GetNode(); if (arg2Node->OperIs(GT_LCL_VAR)) { GenTreeLclVarCommon* arg2LclNode = arg2Node->AsLclVarCommon(); assert(arg2LclNode->GetLclNum() != lclNum); LclVarDsc* arg2Dsc = lvaGetDesc(arg2LclNode); // Other params can't alias implicit byref params if (arg2Dsc->lvIsParam) { checkExposure = false; } } // Because we're checking TYP_I_IMPL above, at least // screen out obvious things that can't cause aliases. else if (arg2Node->IsIntegralConst()) { checkExposure = false; } if (checkExposure) { JITDUMP( "... not sure where byref arg points, checking if V%02u is exposed\n", lclNum); // arg2 might alias arg, see if we've exposed // arg somewhere in the method. if (varDsc->lvHasLdAddrOp || varDsc->IsAddressExposed()) { // Struct as a whole is exposed, can't optimize JITDUMP("... V%02u is exposed\n", lclNum); hasExposure = true; } else if (varDsc->lvFieldLclStart != 0) { // This is the promoted/undone struct case. // // The field start is actually the local number of the promoted local, // use it to enumerate the fields. const unsigned promotedLcl = varDsc->lvFieldLclStart; LclVarDsc* const promotedVarDsc = lvaGetDesc(promotedLcl); JITDUMP("...promoted-unpromoted case -- also checking exposure of " "fields of V%02u\n", promotedLcl); for (unsigned fieldIndex = 0; fieldIndex < promotedVarDsc->lvFieldCnt; fieldIndex++) { LclVarDsc* fieldDsc = lvaGetDesc(promotedVarDsc->lvFieldLclStart + fieldIndex); if (fieldDsc->lvHasLdAddrOp || fieldDsc->IsAddressExposed()) { // Promoted and not yet demoted field is exposed, can't optimize JITDUMP("... field V%02u is exposed\n", promotedVarDsc->lvFieldLclStart + fieldIndex); hasExposure = true; break; } } } } if (hasExposure) { interferingArg = arg2->GetNode(); break; } } else { JITDUMP("...arg is not a byref or implicit byref (%s)\n", varTypeName(arg2->GetNode()->TypeGet())); } } if (interferingArg != nullptr) { JITDUMP("... no, arg [%06u] may alias with V%02u\n", dspTreeID(interferingArg), lclNum); } else { JITDUMP("... yes, no other arg in call can alias V%02u\n", lclNum); hasMustCopyByrefParameter = false; } } else { JITDUMP(" ... no, call has %u > %u args, alias analysis deemed too costly\n", argInfo->ArgCount(), argLimit); } } } } if (hasMustCopyByrefParameter) { // This arg requires a struct copy. No reason to keep scanning the remaining args. break; } } } } return hasMustCopyByrefParameter; } #endif //------------------------------------------------------------------------ // fgMorphPotentialTailCall: Attempt to morph a call that the importer has // identified as a potential tailcall to an actual tailcall and return the // placeholder node to use in this case. // // Arguments: // call - The call to morph. // // Return Value: // Returns a node to use if the call was morphed into a tailcall. If this // function returns a node the call is done being morphed and the new node // should be used. Otherwise the call will have been demoted to a regular call // and should go through normal morph. // // Notes: // This is called only for calls that the importer has already identified as // potential tailcalls. It will do profitability and legality checks and // classify which kind of tailcall we are able to (or should) do, along with // modifying the trees to perform that kind of tailcall. // GenTree* Compiler::fgMorphPotentialTailCall(GenTreeCall* call) { // It should either be an explicit (i.e. tail prefixed) or an implicit tail call assert(call->IsTailPrefixedCall() ^ call->IsImplicitTailCall()); // It cannot be an inline candidate assert(!call->IsInlineCandidate()); auto failTailCall = [&](const char* reason, unsigned lclNum = BAD_VAR_NUM) { #ifdef DEBUG if (verbose) { printf("\nRejecting tail call in morph for call "); printTreeID(call); printf(": %s", reason); if (lclNum != BAD_VAR_NUM) { printf(" V%02u", lclNum); } printf("\n"); } #endif // for non user funcs, we have no handles to report info.compCompHnd->reportTailCallDecision(nullptr, (call->gtCallType == CT_USER_FUNC) ? call->gtCallMethHnd : nullptr, call->IsTailPrefixedCall(), TAILCALL_FAIL, reason); // We have checked the candidate so demote. call->gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL; #if FEATURE_TAILCALL_OPT call->gtCallMoreFlags &= ~GTF_CALL_M_IMPLICIT_TAILCALL; #endif }; if (call->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC) { failTailCall("Might turn into an intrinsic"); return nullptr; } #ifdef TARGET_ARM if (call->gtCallMoreFlags & GTF_CALL_M_WRAPPER_DELEGATE_INV) { failTailCall("Non-standard calling convention"); return nullptr; } #endif if (call->IsNoReturn() && !call->IsTailPrefixedCall()) { // Such tail calls always throw an exception and we won't be able to see current // Caller() in the stacktrace. failTailCall("Never returns"); return nullptr; } #ifdef DEBUG if (opts.compGcChecks && (info.compRetType == TYP_REF)) { failTailCall("COMPlus_JitGCChecks or stress might have interposed a call to CORINFO_HELP_CHECK_OBJ, " "invalidating tailcall opportunity"); return nullptr; } #endif // We have to ensure to pass the incoming retValBuf as the // outgoing one. Using a temp will not do as this function will // not regain control to do the copy. This can happen when inlining // a tailcall which also has a potential tailcall in it: the IL looks // like we can do a tailcall, but the trees generated use a temp for the inlinee's // result. TODO-CQ: Fix this. if (info.compRetBuffArg != BAD_VAR_NUM) { noway_assert(call->TypeGet() == TYP_VOID); GenTree* retValBuf = call->gtCallArgs->GetNode(); if (retValBuf->gtOper != GT_LCL_VAR || retValBuf->AsLclVarCommon()->GetLclNum() != info.compRetBuffArg) { failTailCall("Need to copy return buffer"); return nullptr; } } // We are still not sure whether it can be a tail call. Because, when converting // a call to an implicit tail call, we must check that there are no locals with // their address taken. If this is the case, we have to assume that the address // has been leaked and the current stack frame must live until after the final // call. // Verify that none of vars has lvHasLdAddrOp or IsAddressExposed() bit set. Note // that lvHasLdAddrOp is much more conservative. We cannot just base it on // IsAddressExposed() alone since it is not guaranteed to be set on all VarDscs // during morph stage. The reason for also checking IsAddressExposed() is that in case // of vararg methods user args are marked as addr exposed but not lvHasLdAddrOp. // The combination of lvHasLdAddrOp and IsAddressExposed() though conservative allows us // never to be incorrect. // // TODO-Throughput: have a compiler level flag to indicate whether method has vars whose // address is taken. Such a flag could be set whenever lvHasLdAddrOp or IsAddressExposed() // is set. This avoids the need for iterating through all lcl vars of the current // method. Right now throughout the code base we are not consistently using 'set' // method to set lvHasLdAddrOp and IsAddressExposed() flags. bool isImplicitOrStressTailCall = call->IsImplicitTailCall() || call->IsStressTailCall(); if (isImplicitOrStressTailCall && compLocallocUsed) { failTailCall("Localloc used"); return nullptr; } bool hasStructParam = false; for (unsigned varNum = 0; varNum < lvaCount; varNum++) { LclVarDsc* varDsc = lvaGetDesc(varNum); // If the method is marked as an explicit tail call we will skip the // following three hazard checks. // We still must check for any struct parameters and set 'hasStructParam' // so that we won't transform the recursive tail call into a loop. // if (isImplicitOrStressTailCall) { if (varDsc->lvHasLdAddrOp && !lvaIsImplicitByRefLocal(varNum)) { failTailCall("Local address taken", varNum); return nullptr; } if (varDsc->IsAddressExposed()) { if (lvaIsImplicitByRefLocal(varNum)) { // The address of the implicit-byref is a non-address use of the pointer parameter. } else if (varDsc->lvIsStructField && lvaIsImplicitByRefLocal(varDsc->lvParentLcl)) { // The address of the implicit-byref's field is likewise a non-address use of the pointer // parameter. } else if (varDsc->lvPromoted && (lvaTable[varDsc->lvFieldLclStart].lvParentLcl != varNum)) { // This temp was used for struct promotion bookkeeping. It will not be used, and will have // its ref count and address-taken flag reset in fgMarkDemotedImplicitByRefArgs. assert(lvaIsImplicitByRefLocal(lvaTable[varDsc->lvFieldLclStart].lvParentLcl)); assert(fgGlobalMorph); } else { failTailCall("Local address taken", varNum); return nullptr; } } if (varDsc->lvPromoted && varDsc->lvIsParam && !lvaIsImplicitByRefLocal(varNum)) { failTailCall("Has Struct Promoted Param", varNum); return nullptr; } if (varDsc->lvPinned) { // A tail call removes the method from the stack, which means the pinning // goes away for the callee. We can't allow that. failTailCall("Has Pinned Vars", varNum); return nullptr; } } if (varTypeIsStruct(varDsc->TypeGet()) && varDsc->lvIsParam) { hasStructParam = true; // This prevents transforming a recursive tail call into a loop // but doesn't prevent tail call optimization so we need to // look at the rest of parameters. } } if (!fgCheckStmtAfterTailCall()) { failTailCall("Unexpected statements after the tail call"); return nullptr; } const char* failReason = nullptr; bool canFastTailCall = fgCanFastTailCall(call, &failReason); CORINFO_TAILCALL_HELPERS tailCallHelpers; bool tailCallViaJitHelper = false; if (!canFastTailCall) { if (call->IsImplicitTailCall()) { // Implicit or opportunistic tail calls are always dispatched via fast tail call // mechanism and never via tail call helper for perf. failTailCall(failReason); return nullptr; } assert(call->IsTailPrefixedCall()); assert(call->tailCallInfo != nullptr); // We do not currently handle non-standard args except for VSD stubs. if (!call->IsVirtualStub() && call->HasNonStandardAddedArgs(this)) { failTailCall( "Method with non-standard args passed in callee trash register cannot be tail called via helper"); return nullptr; } // On x86 we have a faster mechanism than the general one which we use // in almost all cases. See fgCanTailCallViaJitHelper for more information. if (fgCanTailCallViaJitHelper()) { tailCallViaJitHelper = true; } else { // Make sure we can get the helpers. We do this last as the runtime // will likely be required to generate these. CORINFO_RESOLVED_TOKEN* token = nullptr; CORINFO_SIG_INFO* sig = call->tailCallInfo->GetSig(); unsigned flags = 0; if (!call->tailCallInfo->IsCalli()) { token = call->tailCallInfo->GetToken(); if (call->tailCallInfo->IsCallvirt()) { flags |= CORINFO_TAILCALL_IS_CALLVIRT; } } if (call->gtCallThisArg != nullptr) { var_types thisArgType = call->gtCallThisArg->GetNode()->TypeGet(); if (thisArgType != TYP_REF) { flags |= CORINFO_TAILCALL_THIS_ARG_IS_BYREF; } } if (!info.compCompHnd->getTailCallHelpers(token, sig, (CORINFO_GET_TAILCALL_HELPERS_FLAGS)flags, &tailCallHelpers)) { failTailCall("Tail call help not available"); return nullptr; } } } // Check if we can make the tailcall a loop. bool fastTailCallToLoop = false; #if FEATURE_TAILCALL_OPT // TODO-CQ: enable the transformation when the method has a struct parameter that can be passed in a register // or return type is a struct that can be passed in a register. // // TODO-CQ: if the method being compiled requires generic context reported in gc-info (either through // hidden generic context param or through keep alive thisptr), then while transforming a recursive // call to such a method requires that the generic context stored on stack slot be updated. Right now, // fgMorphRecursiveFastTailCallIntoLoop() is not handling update of generic context while transforming // a recursive call into a loop. Another option is to modify gtIsRecursiveCall() to check that the // generic type parameters of both caller and callee generic method are the same. if (opts.compTailCallLoopOpt && canFastTailCall && gtIsRecursiveCall(call) && !lvaReportParamTypeArg() && !lvaKeepAliveAndReportThis() && !call->IsVirtual() && !hasStructParam && !varTypeIsStruct(call->TypeGet())) { fastTailCallToLoop = true; } #endif // Ok -- now we are committed to performing a tailcall. Report the decision. CorInfoTailCall tailCallResult; if (fastTailCallToLoop) { tailCallResult = TAILCALL_RECURSIVE; } else if (canFastTailCall) { tailCallResult = TAILCALL_OPTIMIZED; } else { tailCallResult = TAILCALL_HELPER; } info.compCompHnd->reportTailCallDecision(nullptr, (call->gtCallType == CT_USER_FUNC) ? call->gtCallMethHnd : nullptr, call->IsTailPrefixedCall(), tailCallResult, nullptr); // Are we currently planning to expand the gtControlExpr as an early virtual call target? // if (call->IsExpandedEarly() && call->IsVirtualVtable()) { // It isn't alway profitable to expand a virtual call early // // We alway expand the TAILCALL_HELPER type late. // And we exapnd late when we have an optimized tail call // and the this pointer needs to be evaluated into a temp. // if (tailCallResult == TAILCALL_HELPER) { // We will alway expand this late in lower instead. // (see LowerTailCallViaJitHelper as it needs some work // for us to be able to expand this earlier in morph) // call->ClearExpandedEarly(); } else if ((tailCallResult == TAILCALL_OPTIMIZED) && ((call->gtCallThisArg->GetNode()->gtFlags & GTF_SIDE_EFFECT) != 0)) { // We generate better code when we expand this late in lower instead. // call->ClearExpandedEarly(); } } // Now actually morph the call. compTailCallUsed = true; // This will prevent inlining this call. call->gtCallMoreFlags |= GTF_CALL_M_TAILCALL; if (tailCallViaJitHelper) { call->gtCallMoreFlags |= GTF_CALL_M_TAILCALL_VIA_JIT_HELPER; } #if FEATURE_TAILCALL_OPT if (fastTailCallToLoop) { call->gtCallMoreFlags |= GTF_CALL_M_TAILCALL_TO_LOOP; } #endif // Mark that this is no longer a pending tailcall. We need to do this before // we call fgMorphCall again (which happens in the fast tailcall case) to // avoid recursing back into this method. call->gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL; #if FEATURE_TAILCALL_OPT call->gtCallMoreFlags &= ~GTF_CALL_M_IMPLICIT_TAILCALL; #endif #ifdef DEBUG if (verbose) { printf("\nGTF_CALL_M_TAILCALL bit set for call "); printTreeID(call); printf("\n"); if (fastTailCallToLoop) { printf("\nGTF_CALL_M_TAILCALL_TO_LOOP bit set for call "); printTreeID(call); printf("\n"); } } #endif // For R2R we might need a different entry point for this call if we are doing a tailcall. // The reason is that the normal delay load helper uses the return address to find the indirection // cell in xarch, but now the JIT is expected to leave the indirection cell in REG_R2R_INDIRECT_PARAM: // We optimize delegate invocations manually in the JIT so skip this for those. if (call->IsR2RRelativeIndir() && canFastTailCall && !fastTailCallToLoop && !call->IsDelegateInvoke()) { info.compCompHnd->updateEntryPointForTailCall(&call->gtEntryPoint); #ifdef TARGET_XARCH // We have already computed arg info to make the fast tailcall decision, but on X64 we now // have to pass the indirection cell, so redo arg info. call->ResetArgInfo(); #endif } // If this block has a flow successor, make suitable updates. // BasicBlock* const nextBlock = compCurBB->GetUniqueSucc(); if (nextBlock == nullptr) { // No unique successor. compCurBB should be a return. // assert(compCurBB->bbJumpKind == BBJ_RETURN); } else { // Flow no longer reaches nextBlock from here. // fgRemoveRefPred(nextBlock, compCurBB); // Adjust profile weights. // // Note if this is a tail call to loop, further updates // are needed once we install the loop edge. // if (compCurBB->hasProfileWeight() && nextBlock->hasProfileWeight()) { // Since we have linear flow we can update the next block weight. // weight_t const blockWeight = compCurBB->bbWeight; weight_t const nextWeight = nextBlock->bbWeight; weight_t const newNextWeight = nextWeight - blockWeight; // If the math would result in a negative weight then there's // no local repair we can do; just leave things inconsistent. // if (newNextWeight >= 0) { // Note if we'd already morphed the IR in nextblock we might // have done something profile sensitive that we should arguably reconsider. // JITDUMP("Reducing profile weight of " FMT_BB " from " FMT_WT " to " FMT_WT "\n", nextBlock->bbNum, nextWeight, newNextWeight); nextBlock->setBBProfileWeight(newNextWeight); } else { JITDUMP("Not reducing profile weight of " FMT_BB " as its weight " FMT_WT " is less than direct flow pred " FMT_BB " weight " FMT_WT "\n", nextBlock->bbNum, nextWeight, compCurBB->bbNum, blockWeight); } // If nextBlock is not a BBJ_RETURN, it should have a unique successor that // is a BBJ_RETURN, as we allow a little bit of flow after a tail call. // if (nextBlock->bbJumpKind != BBJ_RETURN) { BasicBlock* retBlock = nextBlock->GetUniqueSucc(); // Check if we have a sequence of GT_ASG blocks where the same variable is assigned // to temp locals over and over. // Also allow casts on the RHSs of the assignments, and blocks with GT_NOPs. // // { GT_ASG(t_0, GT_CALL(...)) } // { GT_ASG(t_1, t0) } (with casts on rhs potentially) // ... // { GT_ASG(t_n, t_(n - 1)) } // { GT_RET t_n } // if (retBlock->bbJumpKind != BBJ_RETURN) { // Make sure the block has a single statement assert(nextBlock->firstStmt() == nextBlock->lastStmt()); // And the root node is "ASG(LCL_VAR, LCL_VAR)" GenTree* asgNode = nextBlock->firstStmt()->GetRootNode(); assert(asgNode->OperIs(GT_ASG)); unsigned lcl = asgNode->gtGetOp1()->AsLclVarCommon()->GetLclNum(); while (retBlock->bbJumpKind != BBJ_RETURN) { #ifdef DEBUG Statement* nonEmptyStmt = nullptr; for (Statement* const stmt : retBlock->Statements()) { // Ignore NOP statements if (!stmt->GetRootNode()->OperIs(GT_NOP)) { // Only a single non-NOP statement is allowed assert(nonEmptyStmt == nullptr); nonEmptyStmt = stmt; } } if (nonEmptyStmt != nullptr) { asgNode = nonEmptyStmt->GetRootNode(); if (!asgNode->OperIs(GT_NOP)) { assert(asgNode->OperIs(GT_ASG)); GenTree* rhs = asgNode->gtGetOp2(); while (rhs->OperIs(GT_CAST)) { assert(!rhs->gtOverflow()); rhs = rhs->gtGetOp1(); } assert(lcl == rhs->AsLclVarCommon()->GetLclNum()); lcl = asgNode->gtGetOp1()->AsLclVarCommon()->GetLclNum(); } } #endif retBlock = retBlock->GetUniqueSucc(); } } assert(retBlock->bbJumpKind == BBJ_RETURN); if (retBlock->hasProfileWeight()) { // Do similar updates here. // weight_t const nextNextWeight = retBlock->bbWeight; weight_t const newNextNextWeight = nextNextWeight - blockWeight; // If the math would result in an negative weight then there's // no local repair we can do; just leave things inconsistent. // if (newNextNextWeight >= 0) { JITDUMP("Reducing profile weight of " FMT_BB " from " FMT_WT " to " FMT_WT "\n", retBlock->bbNum, nextNextWeight, newNextNextWeight); retBlock->setBBProfileWeight(newNextNextWeight); } else { JITDUMP("Not reducing profile weight of " FMT_BB " as its weight " FMT_WT " is less than direct flow pred " FMT_BB " weight " FMT_WT "\n", retBlock->bbNum, nextNextWeight, compCurBB->bbNum, blockWeight); } } } } } #if !FEATURE_TAILCALL_OPT_SHARED_RETURN // We enable shared-ret tail call optimization for recursive calls even if // FEATURE_TAILCALL_OPT_SHARED_RETURN is not defined. if (gtIsRecursiveCall(call)) #endif { // Many tailcalls will have call and ret in the same block, and thus be // BBJ_RETURN, but if the call falls through to a ret, and we are doing a // tailcall, change it here. compCurBB->bbJumpKind = BBJ_RETURN; } GenTree* stmtExpr = fgMorphStmt->GetRootNode(); #ifdef DEBUG // Tail call needs to be in one of the following IR forms // Either a call stmt or // GT_RETURN(GT_CALL(..)) or GT_RETURN(GT_CAST(GT_CALL(..))) // var = GT_CALL(..) or var = (GT_CAST(GT_CALL(..))) // GT_COMMA(GT_CALL(..), GT_NOP) or GT_COMMA(GT_CAST(GT_CALL(..)), GT_NOP) // In the above, // GT_CASTS may be nested. genTreeOps stmtOper = stmtExpr->gtOper; if (stmtOper == GT_CALL) { assert(stmtExpr == call); } else { assert(stmtOper == GT_RETURN || stmtOper == GT_ASG || stmtOper == GT_COMMA); GenTree* treeWithCall; if (stmtOper == GT_RETURN) { treeWithCall = stmtExpr->gtGetOp1(); } else if (stmtOper == GT_COMMA) { // Second operation must be nop. assert(stmtExpr->gtGetOp2()->IsNothingNode()); treeWithCall = stmtExpr->gtGetOp1(); } else { treeWithCall = stmtExpr->gtGetOp2(); } // Peel off casts while (treeWithCall->gtOper == GT_CAST) { assert(!treeWithCall->gtOverflow()); treeWithCall = treeWithCall->gtGetOp1(); } assert(treeWithCall == call); } #endif // Store the call type for later to introduce the correct placeholder. var_types origCallType = call->TypeGet(); GenTree* result; if (!canFastTailCall && !tailCallViaJitHelper) { // For tailcall via CORINFO_TAILCALL_HELPERS we transform into regular // calls with (to the JIT) regular control flow so we do not need to do // much special handling. result = fgMorphTailCallViaHelpers(call, tailCallHelpers); } else { // Otherwise we will transform into something that does not return. For // fast tailcalls a "jump" and for tailcall via JIT helper a call to a // JIT helper that does not return. So peel off everything after the // call. Statement* nextMorphStmt = fgMorphStmt->GetNextStmt(); JITDUMP("Remove all stmts after the call.\n"); while (nextMorphStmt != nullptr) { Statement* stmtToRemove = nextMorphStmt; nextMorphStmt = stmtToRemove->GetNextStmt(); fgRemoveStmt(compCurBB, stmtToRemove); } bool isRootReplaced = false; GenTree* root = fgMorphStmt->GetRootNode(); if (root != call) { JITDUMP("Replace root node [%06d] with [%06d] tail call node.\n", dspTreeID(root), dspTreeID(call)); isRootReplaced = true; fgMorphStmt->SetRootNode(call); } // Avoid potential extra work for the return (for example, vzeroupper) call->gtType = TYP_VOID; // The runtime requires that we perform a null check on the `this` argument before // tail calling to a virtual dispatch stub. This requirement is a consequence of limitations // in the runtime's ability to map an AV to a NullReferenceException if // the AV occurs in a dispatch stub that has unmanaged caller. if (call->IsVirtualStub()) { call->gtFlags |= GTF_CALL_NULLCHECK; } // Do some target-specific transformations (before we process the args, // etc.) for the JIT helper case. if (tailCallViaJitHelper) { fgMorphTailCallViaJitHelper(call); // Force re-evaluating the argInfo. fgMorphTailCallViaJitHelper will modify the // argument list, invalidating the argInfo. call->fgArgInfo = nullptr; } // Tail call via JIT helper: The VM can't use return address hijacking // if we're not going to return and the helper doesn't have enough info // to safely poll, so we poll before the tail call, if the block isn't // already safe. Since tail call via helper is a slow mechanism it // doen't matter whether we emit GC poll. his is done to be in parity // with Jit64. Also this avoids GC info size increase if all most all // methods are expected to be tail calls (e.g. F#). // // Note that we can avoid emitting GC-poll if we know that the current // BB is dominated by a Gc-SafePoint block. But we don't have dominator // info at this point. One option is to just add a place holder node for // GC-poll (e.g. GT_GCPOLL) here and remove it in lowering if the block // is dominated by a GC-SafePoint. For now it not clear whether // optimizing slow tail calls is worth the effort. As a low cost check, // we check whether the first and current basic blocks are // GC-SafePoints. // // Fast Tail call as epilog+jmp - No need to insert GC-poll. Instead, // fgSetBlockOrder() is going to mark the method as fully interruptible // if the block containing this tail call is reachable without executing // any call. BasicBlock* curBlock = compCurBB; if (canFastTailCall || (fgFirstBB->bbFlags & BBF_GC_SAFE_POINT) || (compCurBB->bbFlags & BBF_GC_SAFE_POINT) || (fgCreateGCPoll(GCPOLL_INLINE, compCurBB) == curBlock)) { // We didn't insert a poll block, so we need to morph the call now // (Normally it will get morphed when we get to the split poll block) GenTree* temp = fgMorphCall(call); noway_assert(temp == call); } // Fast tail call: in case of fast tail calls, we need a jmp epilog and // hence mark it as BBJ_RETURN with BBF_JMP flag set. noway_assert(compCurBB->bbJumpKind == BBJ_RETURN); if (canFastTailCall) { compCurBB->bbFlags |= BBF_HAS_JMP; } else { // We call CORINFO_HELP_TAILCALL which does not return, so we will // not need epilogue. compCurBB->bbJumpKind = BBJ_THROW; } if (isRootReplaced) { // We have replaced the root node of this stmt and deleted the rest, // but we still have the deleted, dead nodes on the `fgMorph*` stack // if the root node was an `ASG`, `RET` or `CAST`. // Return a zero con node to exit morphing of the old trees without asserts // and forbid POST_ORDER morphing doing something wrong with our call. var_types callType; if (varTypeIsStruct(origCallType)) { CORINFO_CLASS_HANDLE retClsHnd = call->gtRetClsHnd; Compiler::structPassingKind howToReturnStruct; callType = getReturnTypeForStruct(retClsHnd, call->GetUnmanagedCallConv(), &howToReturnStruct); assert((howToReturnStruct != SPK_Unknown) && (howToReturnStruct != SPK_ByReference)); if (howToReturnStruct == SPK_ByValue) { callType = TYP_I_IMPL; } else if (howToReturnStruct == SPK_ByValueAsHfa || varTypeIsSIMD(callType)) { callType = TYP_FLOAT; } assert((callType != TYP_UNKNOWN) && !varTypeIsStruct(callType)); } else { callType = origCallType; } assert((callType != TYP_UNKNOWN) && !varTypeIsStruct(callType)); callType = genActualType(callType); GenTree* zero = gtNewZeroConNode(callType); result = fgMorphTree(zero); } else { result = call; } } return result; } //------------------------------------------------------------------------ // fgMorphTailCallViaHelpers: Transform the given GT_CALL tree for tailcall code // generation. // // Arguments: // call - The call to transform // helpers - The tailcall helpers provided by the runtime. // // Return Value: // Returns the transformed node. // // Notes: // This transforms // GT_CALL // {callTarget} // {this} // {args} // into // GT_COMMA // GT_CALL StoreArgsStub // {callTarget} (depending on flags provided by the runtime) // {this} (as a regular arg) // {args} // GT_COMMA // GT_CALL Dispatcher // GT_ADDR ReturnAddress // {CallTargetStub} // GT_ADDR ReturnValue // GT_LCL ReturnValue // whenever the call node returns a value. If the call node does not return a // value the last comma will not be there. // GenTree* Compiler::fgMorphTailCallViaHelpers(GenTreeCall* call, CORINFO_TAILCALL_HELPERS& help) { // R2R requires different handling but we don't support tailcall via // helpers in R2R yet, so just leave it for now. // TODO: R2R: TailCallViaHelper assert(!opts.IsReadyToRun()); JITDUMP("fgMorphTailCallViaHelpers (before):\n"); DISPTREE(call); // Don't support tail calling helper methods assert(call->gtCallType != CT_HELPER); // We come this route only for tail prefixed calls that cannot be dispatched as // fast tail calls assert(!call->IsImplicitTailCall()); // We want to use the following assert, but it can modify the IR in some cases, so we // can't do that in an assert. // assert(!fgCanFastTailCall(call, nullptr)); // We might or might not have called fgInitArgInfo before this point: in // builds with FEATURE_FASTTAILCALL we will have called it when checking if // we could do a fast tailcall, so it is possible we have added extra IR // for non-standard args that we must get rid of. Get rid of that IR here // and do this first as it will 'expose' the retbuf as the first arg, which // we rely upon in fgCreateCallDispatcherAndGetResult. call->ResetArgInfo(); GenTree* callDispatcherAndGetResult = fgCreateCallDispatcherAndGetResult(call, help.hCallTarget, help.hDispatcher); // Change the call to a call to the StoreArgs stub. if (call->HasRetBufArg()) { JITDUMP("Removing retbuf"); call->gtCallArgs = call->gtCallArgs->GetNext(); call->gtCallMoreFlags &= ~GTF_CALL_M_RETBUFFARG; } const bool stubNeedsTargetFnPtr = (help.flags & CORINFO_TAILCALL_STORE_TARGET) != 0; GenTree* doBeforeStoreArgsStub = nullptr; GenTree* thisPtrStubArg = nullptr; // Put 'this' in normal param list if (call->gtCallThisArg != nullptr) { JITDUMP("Moving this pointer into arg list\n"); GenTree* objp = call->gtCallThisArg->GetNode(); GenTree* thisPtr = nullptr; call->gtCallThisArg = nullptr; // JIT will need one or two copies of "this" in the following cases: // 1) the call needs null check; // 2) StoreArgs stub needs the target function pointer address and if the call is virtual // the stub also needs "this" in order to evalute the target. const bool callNeedsNullCheck = call->NeedsNullCheck(); const bool stubNeedsThisPtr = stubNeedsTargetFnPtr && call->IsVirtual(); // TODO-Review: The following transformation is implemented under assumption that // both conditions can be true. However, I could not construct such example // where a virtual tail call would require null check. In case, if the conditions // are mutually exclusive the following could be simplified. if (callNeedsNullCheck || stubNeedsThisPtr) { // Clone "this" if "this" has no side effects. if ((objp->gtFlags & GTF_SIDE_EFFECT) == 0) { thisPtr = gtClone(objp, true); } // Create a temp and spill "this" to the temp if "this" has side effects or "this" was too complex to clone. if (thisPtr == nullptr) { const unsigned lclNum = lvaGrabTemp(true DEBUGARG("tail call thisptr")); // tmp = "this" doBeforeStoreArgsStub = gtNewTempAssign(lclNum, objp); if (callNeedsNullCheck) { // COMMA(tmp = "this", deref(tmp)) GenTree* tmp = gtNewLclvNode(lclNum, objp->TypeGet()); GenTree* nullcheck = gtNewNullCheck(tmp, compCurBB); doBeforeStoreArgsStub = gtNewOperNode(GT_COMMA, TYP_VOID, doBeforeStoreArgsStub, nullcheck); } thisPtr = gtNewLclvNode(lclNum, objp->TypeGet()); if (stubNeedsThisPtr) { thisPtrStubArg = gtNewLclvNode(lclNum, objp->TypeGet()); } } else { if (callNeedsNullCheck) { // deref("this") doBeforeStoreArgsStub = gtNewNullCheck(objp, compCurBB); if (stubNeedsThisPtr) { thisPtrStubArg = gtClone(objp, true); } } else { assert(stubNeedsThisPtr); thisPtrStubArg = objp; } } call->gtFlags &= ~GTF_CALL_NULLCHECK; assert((thisPtrStubArg != nullptr) == stubNeedsThisPtr); } else { thisPtr = objp; } // During rationalization tmp="this" and null check will be materialized // in the right execution order. assert(thisPtr != nullptr); call->gtCallArgs = gtPrependNewCallArg(thisPtr, call->gtCallArgs); } // We may need to pass the target, for instance for calli or generic methods // where we pass instantiating stub. if (stubNeedsTargetFnPtr) { JITDUMP("Adding target since VM requested it\n"); GenTree* target; if (!call->IsVirtual()) { if (call->gtCallType == CT_INDIRECT) { noway_assert(call->gtCallAddr != nullptr); target = call->gtCallAddr; } else { CORINFO_CONST_LOOKUP addrInfo; info.compCompHnd->getFunctionEntryPoint(call->gtCallMethHnd, &addrInfo); CORINFO_GENERIC_HANDLE handle = nullptr; void* pIndirection = nullptr; assert(addrInfo.accessType != IAT_PPVALUE && addrInfo.accessType != IAT_RELPVALUE); if (addrInfo.accessType == IAT_VALUE) { handle = addrInfo.handle; } else if (addrInfo.accessType == IAT_PVALUE) { pIndirection = addrInfo.addr; } target = gtNewIconEmbHndNode(handle, pIndirection, GTF_ICON_FTN_ADDR, call->gtCallMethHnd); } } else { assert(!call->tailCallInfo->GetSig()->hasTypeArg()); CORINFO_CALL_INFO callInfo; unsigned flags = CORINFO_CALLINFO_LDFTN; if (call->tailCallInfo->IsCallvirt()) { flags |= CORINFO_CALLINFO_CALLVIRT; } eeGetCallInfo(call->tailCallInfo->GetToken(), nullptr, (CORINFO_CALLINFO_FLAGS)flags, &callInfo); target = getVirtMethodPointerTree(thisPtrStubArg, call->tailCallInfo->GetToken(), &callInfo); } // Insert target as last arg GenTreeCall::Use** newArgSlot = &call->gtCallArgs; while (*newArgSlot != nullptr) { newArgSlot = &(*newArgSlot)->NextRef(); } *newArgSlot = gtNewCallArgs(target); } // This is now a direct call to the store args stub and not a tailcall. call->gtCallType = CT_USER_FUNC; call->gtCallMethHnd = help.hStoreArgs; call->gtFlags &= ~GTF_CALL_VIRT_KIND_MASK; call->gtCallMoreFlags &= ~(GTF_CALL_M_TAILCALL | GTF_CALL_M_DELEGATE_INV | GTF_CALL_M_WRAPPER_DELEGATE_INV); // The store-args stub returns no value. call->gtRetClsHnd = nullptr; call->gtType = TYP_VOID; call->gtReturnType = TYP_VOID; GenTree* callStoreArgsStub = call; if (doBeforeStoreArgsStub != nullptr) { callStoreArgsStub = gtNewOperNode(GT_COMMA, TYP_VOID, doBeforeStoreArgsStub, callStoreArgsStub); } GenTree* finalTree = gtNewOperNode(GT_COMMA, callDispatcherAndGetResult->TypeGet(), callStoreArgsStub, callDispatcherAndGetResult); finalTree = fgMorphTree(finalTree); JITDUMP("fgMorphTailCallViaHelpers (after):\n"); DISPTREE(finalTree); return finalTree; } //------------------------------------------------------------------------ // fgCreateCallDispatcherAndGetResult: Given a call // CALL // {callTarget} // {retbuf} // {this} // {args} // create a similarly typed node that calls the tailcall dispatcher and returns // the result, as in the following: // COMMA // CALL TailCallDispatcher // ADDR ReturnAddress // &CallTargetFunc // ADDR RetValue // RetValue // If the call has type TYP_VOID, only create the CALL node. // // Arguments: // origCall - the call // callTargetStubHnd - the handle of the CallTarget function (this is a special // IL stub created by the runtime) // dispatcherHnd - the handle of the tailcall dispatcher function // // Return Value: // A node that can be used in place of the original call. // GenTree* Compiler::fgCreateCallDispatcherAndGetResult(GenTreeCall* origCall, CORINFO_METHOD_HANDLE callTargetStubHnd, CORINFO_METHOD_HANDLE dispatcherHnd) { GenTreeCall* callDispatcherNode = gtNewCallNode(CT_USER_FUNC, dispatcherHnd, TYP_VOID, nullptr, fgMorphStmt->GetDebugInfo()); // The dispatcher has signature // void DispatchTailCalls(void* callersRetAddrSlot, void* callTarget, void* retValue) // Add return value arg. GenTree* retValArg; GenTree* retVal = nullptr; unsigned int newRetLcl = BAD_VAR_NUM; GenTree* copyToRetBufNode = nullptr; if (origCall->HasRetBufArg()) { JITDUMP("Transferring retbuf\n"); GenTree* retBufArg = origCall->gtCallArgs->GetNode(); assert(info.compRetBuffArg != BAD_VAR_NUM); assert(retBufArg->OperIsLocal()); assert(retBufArg->AsLclVarCommon()->GetLclNum() == info.compRetBuffArg); // Caller return buffer argument retBufArg can point to GC heap while the dispatcher expects // the return value argument retValArg to point to the stack. // We use a temporary stack allocated return buffer to hold the value during the dispatcher call // and copy the value back to the caller return buffer after that. unsigned int tmpRetBufNum = lvaGrabTemp(true DEBUGARG("substitute local for return buffer")); constexpr bool unsafeValueClsCheck = false; lvaSetStruct(tmpRetBufNum, origCall->gtRetClsHnd, unsafeValueClsCheck); lvaSetVarAddrExposed(tmpRetBufNum DEBUGARG(AddressExposedReason::DISPATCH_RET_BUF)); var_types tmpRetBufType = lvaGetDesc(tmpRetBufNum)->TypeGet(); retValArg = gtNewOperNode(GT_ADDR, TYP_I_IMPL, gtNewLclvNode(tmpRetBufNum, tmpRetBufType)); var_types callerRetBufType = lvaGetDesc(info.compRetBuffArg)->TypeGet(); GenTree* dstAddr = gtNewLclvNode(info.compRetBuffArg, callerRetBufType); GenTree* dst = gtNewObjNode(info.compMethodInfo->args.retTypeClass, dstAddr); GenTree* src = gtNewLclvNode(tmpRetBufNum, tmpRetBufType); constexpr bool isVolatile = false; constexpr bool isCopyBlock = true; copyToRetBufNode = gtNewBlkOpNode(dst, src, isVolatile, isCopyBlock); if (origCall->gtType != TYP_VOID) { retVal = gtClone(retBufArg); } } else if (origCall->gtType != TYP_VOID) { JITDUMP("Creating a new temp for the return value\n"); newRetLcl = lvaGrabTemp(false DEBUGARG("Return value for tail call dispatcher")); if (varTypeIsStruct(origCall->gtType)) { lvaSetStruct(newRetLcl, origCall->gtRetClsHnd, false); } else { // Since we pass a reference to the return value to the dispatcher // we need to use the real return type so we can normalize it on // load when we return it. lvaTable[newRetLcl].lvType = (var_types)origCall->gtReturnType; } lvaSetVarAddrExposed(newRetLcl DEBUGARG(AddressExposedReason::DISPATCH_RET_BUF)); retValArg = gtNewOperNode(GT_ADDR, TYP_I_IMPL, gtNewLclvNode(newRetLcl, genActualType(lvaTable[newRetLcl].lvType))); retVal = gtNewLclvNode(newRetLcl, genActualType(lvaTable[newRetLcl].lvType)); if (varTypeIsStruct(origCall->gtType)) { retVal = impFixupStructReturnType(retVal, origCall->gtRetClsHnd, origCall->GetUnmanagedCallConv()); } } else { JITDUMP("No return value so using null pointer as arg\n"); retValArg = gtNewZeroConNode(TYP_I_IMPL); } callDispatcherNode->gtCallArgs = gtPrependNewCallArg(retValArg, callDispatcherNode->gtCallArgs); // Add callTarget callDispatcherNode->gtCallArgs = gtPrependNewCallArg(new (this, GT_FTN_ADDR) GenTreeFptrVal(TYP_I_IMPL, callTargetStubHnd), callDispatcherNode->gtCallArgs); // Add the caller's return address slot. if (lvaRetAddrVar == BAD_VAR_NUM) { lvaRetAddrVar = lvaGrabTemp(false DEBUGARG("Return address")); lvaTable[lvaRetAddrVar].lvType = TYP_I_IMPL; lvaSetVarAddrExposed(lvaRetAddrVar DEBUGARG(AddressExposedReason::DISPATCH_RET_BUF)); } GenTree* retAddrSlot = gtNewOperNode(GT_ADDR, TYP_I_IMPL, gtNewLclvNode(lvaRetAddrVar, TYP_I_IMPL)); callDispatcherNode->gtCallArgs = gtPrependNewCallArg(retAddrSlot, callDispatcherNode->gtCallArgs); GenTree* finalTree = callDispatcherNode; if (copyToRetBufNode != nullptr) { finalTree = gtNewOperNode(GT_COMMA, TYP_VOID, callDispatcherNode, copyToRetBufNode); } if (origCall->gtType == TYP_VOID) { return finalTree; } assert(retVal != nullptr); finalTree = gtNewOperNode(GT_COMMA, origCall->TypeGet(), finalTree, retVal); // The JIT seems to want to CSE this comma and messes up multi-reg ret // values in the process. Just avoid CSE'ing this tree entirely in that // case. if (origCall->HasMultiRegRetVal()) { finalTree->gtFlags |= GTF_DONT_CSE; } return finalTree; } //------------------------------------------------------------------------ // getLookupTree: get a lookup tree // // Arguments: // pResolvedToken - resolved token of the call // pLookup - the lookup to get the tree for // handleFlags - flags to set on the result node // compileTimeHandle - compile-time handle corresponding to the lookup // // Return Value: // A node representing the lookup tree // GenTree* Compiler::getLookupTree(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_LOOKUP* pLookup, GenTreeFlags handleFlags, void* compileTimeHandle) { if (!pLookup->lookupKind.needsRuntimeLookup) { // No runtime lookup is required. // Access is direct or memory-indirect (of a fixed address) reference CORINFO_GENERIC_HANDLE handle = nullptr; void* pIndirection = nullptr; assert(pLookup->constLookup.accessType != IAT_PPVALUE && pLookup->constLookup.accessType != IAT_RELPVALUE); if (pLookup->constLookup.accessType == IAT_VALUE) { handle = pLookup->constLookup.handle; } else if (pLookup->constLookup.accessType == IAT_PVALUE) { pIndirection = pLookup->constLookup.addr; } return gtNewIconEmbHndNode(handle, pIndirection, handleFlags, compileTimeHandle); } return getRuntimeLookupTree(pResolvedToken, pLookup, compileTimeHandle); } //------------------------------------------------------------------------ // getRuntimeLookupTree: get a tree for a runtime lookup // // Arguments: // pResolvedToken - resolved token of the call // pLookup - the lookup to get the tree for // compileTimeHandle - compile-time handle corresponding to the lookup // // Return Value: // A node representing the runtime lookup tree // GenTree* Compiler::getRuntimeLookupTree(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_LOOKUP* pLookup, void* compileTimeHandle) { assert(!compIsForInlining()); CORINFO_RUNTIME_LOOKUP* pRuntimeLookup = &pLookup->runtimeLookup; // If pRuntimeLookup->indirections is equal to CORINFO_USEHELPER, it specifies that a run-time helper should be // used; otherwise, it specifies the number of indirections via pRuntimeLookup->offsets array. if ((pRuntimeLookup->indirections == CORINFO_USEHELPER) || pRuntimeLookup->testForNull || pRuntimeLookup->testForFixup) { // If the first condition is true, runtime lookup tree is available only via the run-time helper function. // TODO-CQ If the second or third condition is true, we are always using the slow path since we can't // introduce control flow at this point. See impRuntimeLookupToTree for the logic to avoid calling the helper. // The long-term solution is to introduce a new node representing a runtime lookup, create instances // of that node both in the importer and here, and expand the node in lower (introducing control flow if // necessary). return gtNewRuntimeLookupHelperCallNode(pRuntimeLookup, getRuntimeContextTree(pLookup->lookupKind.runtimeLookupKind), compileTimeHandle); } GenTree* result = getRuntimeContextTree(pLookup->lookupKind.runtimeLookupKind); ArrayStack<GenTree*> stmts(getAllocator(CMK_ArrayStack)); auto cloneTree = [&](GenTree** tree DEBUGARG(const char* reason)) -> GenTree* { if (!((*tree)->gtFlags & GTF_GLOB_EFFECT)) { GenTree* clone = gtClone(*tree, true); if (clone) { return clone; } } unsigned temp = lvaGrabTemp(true DEBUGARG(reason)); stmts.Push(gtNewTempAssign(temp, *tree)); *tree = gtNewLclvNode(temp, lvaGetActualType(temp)); return gtNewLclvNode(temp, lvaGetActualType(temp)); }; // Apply repeated indirections for (WORD i = 0; i < pRuntimeLookup->indirections; i++) { GenTree* preInd = nullptr; if ((i == 1 && pRuntimeLookup->indirectFirstOffset) || (i == 2 && pRuntimeLookup->indirectSecondOffset)) { preInd = cloneTree(&result DEBUGARG("getRuntimeLookupTree indirectOffset")); } if (i != 0) { result = gtNewOperNode(GT_IND, TYP_I_IMPL, result); result->gtFlags |= GTF_IND_NONFAULTING; result->gtFlags |= GTF_IND_INVARIANT; } if ((i == 1 && pRuntimeLookup->indirectFirstOffset) || (i == 2 && pRuntimeLookup->indirectSecondOffset)) { result = gtNewOperNode(GT_ADD, TYP_I_IMPL, preInd, result); } if (pRuntimeLookup->offsets[i] != 0) { result = gtNewOperNode(GT_ADD, TYP_I_IMPL, result, gtNewIconNode(pRuntimeLookup->offsets[i], TYP_I_IMPL)); } } assert(!pRuntimeLookup->testForNull); if (pRuntimeLookup->indirections > 0) { assert(!pRuntimeLookup->testForFixup); result = gtNewOperNode(GT_IND, TYP_I_IMPL, result); result->gtFlags |= GTF_IND_NONFAULTING; } // Produces GT_COMMA(stmt1, GT_COMMA(stmt2, ... GT_COMMA(stmtN, result))) while (!stmts.Empty()) { result = gtNewOperNode(GT_COMMA, TYP_I_IMPL, stmts.Pop(), result); } DISPTREE(result); return result; } //------------------------------------------------------------------------ // getVirtMethodPointerTree: get a tree for a virtual method pointer // // Arguments: // thisPtr - tree representing `this` pointer // pResolvedToken - pointer to the resolved token of the method // pCallInfo - pointer to call info // // Return Value: // A node representing the virtual method pointer GenTree* Compiler::getVirtMethodPointerTree(GenTree* thisPtr, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo) { GenTree* exactTypeDesc = getTokenHandleTree(pResolvedToken, true); GenTree* exactMethodDesc = getTokenHandleTree(pResolvedToken, false); GenTreeCall::Use* helpArgs = gtNewCallArgs(thisPtr, exactTypeDesc, exactMethodDesc); return gtNewHelperCallNode(CORINFO_HELP_VIRTUAL_FUNC_PTR, TYP_I_IMPL, helpArgs); } //------------------------------------------------------------------------ // getTokenHandleTree: get a handle tree for a token // // Arguments: // pResolvedToken - token to get a handle for // parent - whether parent should be imported // // Return Value: // A node representing the virtual method pointer GenTree* Compiler::getTokenHandleTree(CORINFO_RESOLVED_TOKEN* pResolvedToken, bool parent) { CORINFO_GENERICHANDLE_RESULT embedInfo; info.compCompHnd->embedGenericHandle(pResolvedToken, parent, &embedInfo); GenTree* result = getLookupTree(pResolvedToken, &embedInfo.lookup, gtTokenToIconFlags(pResolvedToken->token), embedInfo.compileTimeHandle); // If we have a result and it requires runtime lookup, wrap it in a runtime lookup node. if ((result != nullptr) && embedInfo.lookup.lookupKind.needsRuntimeLookup) { result = gtNewRuntimeLookup(embedInfo.compileTimeHandle, embedInfo.handleType, result); } return result; } /***************************************************************************** * * Transform the given GT_CALL tree for tail call via JIT helper. */ void Compiler::fgMorphTailCallViaJitHelper(GenTreeCall* call) { JITDUMP("fgMorphTailCallViaJitHelper (before):\n"); DISPTREE(call); // For the helper-assisted tail calls, we need to push all the arguments // into a single list, and then add a few extra at the beginning or end. // // For x86, the tailcall helper is defined as: // // JIT_TailCall(<function args>, int numberOfOldStackArgsWords, int numberOfNewStackArgsWords, int flags, void* // callTarget) // // Note that the special arguments are on the stack, whereas the function arguments follow // the normal convention: there might be register arguments in ECX and EDX. The stack will // look like (highest address at the top): // first normal stack argument // ... // last normal stack argument // numberOfOldStackArgs // numberOfNewStackArgs // flags // callTarget // // Each special arg is 4 bytes. // // 'flags' is a bitmask where: // 1 == restore callee-save registers (EDI,ESI,EBX). The JIT always saves all // callee-saved registers for tailcall functions. Note that the helper assumes // that the callee-saved registers live immediately below EBP, and must have been // pushed in this order: EDI, ESI, EBX. // 2 == call target is a virtual stub dispatch. // // The x86 tail call helper lives in VM\i386\jithelp.asm. See that function for more details // on the custom calling convention. // Check for PInvoke call types that we don't handle in codegen yet. assert(!call->IsUnmanaged()); assert(call->IsVirtual() || (call->gtCallType != CT_INDIRECT) || (call->gtCallCookie == nullptr)); // Don't support tail calling helper methods assert(call->gtCallType != CT_HELPER); // We come this route only for tail prefixed calls that cannot be dispatched as // fast tail calls assert(!call->IsImplicitTailCall()); // We want to use the following assert, but it can modify the IR in some cases, so we // can't do that in an assert. // assert(!fgCanFastTailCall(call, nullptr)); // First move the 'this' pointer (if any) onto the regular arg list. We do this because // we are going to prepend special arguments onto the argument list (for non-x86 platforms), // and thus shift where the 'this' pointer will be passed to a later argument slot. In // addition, for all platforms, we are going to change the call into a helper call. Our code // generation code for handling calls to helpers does not handle 'this' pointers. So, when we // do this transformation, we must explicitly create a null 'this' pointer check, if required, // since special 'this' pointer handling will no longer kick in. // // Some call types, such as virtual vtable calls, require creating a call address expression // that involves the "this" pointer. Lowering will sometimes create an embedded statement // to create a temporary that is assigned to the "this" pointer expression, and then use // that temp to create the call address expression. This temp creation embedded statement // will occur immediately before the "this" pointer argument, and then will be used for both // the "this" pointer argument as well as the call address expression. In the normal ordering, // the embedded statement establishing the "this" pointer temp will execute before both uses // of the temp. However, for tail calls via a helper, we move the "this" pointer onto the // normal call argument list, and insert a placeholder which will hold the call address // expression. For non-x86, things are ok, because the order of execution of these is not // altered. However, for x86, the call address expression is inserted as the *last* argument // in the argument list, *after* the "this" pointer. It will be put on the stack, and be // evaluated first. To ensure we don't end up with out-of-order temp definition and use, // for those cases where call lowering creates an embedded form temp of "this", we will // create a temp here, early, that will later get morphed correctly. if (call->gtCallThisArg != nullptr) { GenTree* thisPtr = nullptr; GenTree* objp = call->gtCallThisArg->GetNode(); call->gtCallThisArg = nullptr; if ((call->IsDelegateInvoke() || call->IsVirtualVtable()) && !objp->OperIs(GT_LCL_VAR)) { // tmp = "this" unsigned lclNum = lvaGrabTemp(true DEBUGARG("tail call thisptr")); GenTree* asg = gtNewTempAssign(lclNum, objp); // COMMA(tmp = "this", tmp) var_types vt = objp->TypeGet(); GenTree* tmp = gtNewLclvNode(lclNum, vt); thisPtr = gtNewOperNode(GT_COMMA, vt, asg, tmp); objp = thisPtr; } if (call->NeedsNullCheck()) { // clone "this" if "this" has no side effects. if ((thisPtr == nullptr) && !(objp->gtFlags & GTF_SIDE_EFFECT)) { thisPtr = gtClone(objp, true); } var_types vt = objp->TypeGet(); if (thisPtr == nullptr) { // create a temp if either "this" has side effects or "this" is too complex to clone. // tmp = "this" unsigned lclNum = lvaGrabTemp(true DEBUGARG("tail call thisptr")); GenTree* asg = gtNewTempAssign(lclNum, objp); // COMMA(tmp = "this", deref(tmp)) GenTree* tmp = gtNewLclvNode(lclNum, vt); GenTree* nullcheck = gtNewNullCheck(tmp, compCurBB); asg = gtNewOperNode(GT_COMMA, TYP_VOID, asg, nullcheck); // COMMA(COMMA(tmp = "this", deref(tmp)), tmp) thisPtr = gtNewOperNode(GT_COMMA, vt, asg, gtNewLclvNode(lclNum, vt)); } else { // thisPtr = COMMA(deref("this"), "this") GenTree* nullcheck = gtNewNullCheck(thisPtr, compCurBB); thisPtr = gtNewOperNode(GT_COMMA, vt, nullcheck, gtClone(objp, true)); } call->gtFlags &= ~GTF_CALL_NULLCHECK; } else { thisPtr = objp; } // TODO-Cleanup: we leave it as a virtual stub call to // use logic in `LowerVirtualStubCall`, clear GTF_CALL_VIRT_KIND_MASK here // and change `LowerCall` to recognize it as a direct call. // During rationalization tmp="this" and null check will // materialize as embedded stmts in right execution order. assert(thisPtr != nullptr); call->gtCallArgs = gtPrependNewCallArg(thisPtr, call->gtCallArgs); } // Find the end of the argument list. ppArg will point at the last pointer; setting *ppArg will // append to the list. GenTreeCall::Use** ppArg = &call->gtCallArgs; for (GenTreeCall::Use& use : call->Args()) { ppArg = &use.NextRef(); } assert(ppArg != nullptr); assert(*ppArg == nullptr); unsigned nOldStkArgsWords = (compArgSize - (codeGen->intRegState.rsCalleeRegArgCount * REGSIZE_BYTES)) / REGSIZE_BYTES; GenTree* arg3 = gtNewIconNode((ssize_t)nOldStkArgsWords, TYP_I_IMPL); *ppArg = gtNewCallArgs(arg3); // numberOfOldStackArgs ppArg = &((*ppArg)->NextRef()); // Inject a placeholder for the count of outgoing stack arguments that the Lowering phase will generate. // The constant will be replaced. GenTree* arg2 = gtNewIconNode(9, TYP_I_IMPL); *ppArg = gtNewCallArgs(arg2); // numberOfNewStackArgs ppArg = &((*ppArg)->NextRef()); // Inject a placeholder for the flags. // The constant will be replaced. GenTree* arg1 = gtNewIconNode(8, TYP_I_IMPL); *ppArg = gtNewCallArgs(arg1); ppArg = &((*ppArg)->NextRef()); // Inject a placeholder for the real call target that the Lowering phase will generate. // The constant will be replaced. GenTree* arg0 = gtNewIconNode(7, TYP_I_IMPL); *ppArg = gtNewCallArgs(arg0); // It is now a varargs tail call. call->gtCallMoreFlags |= GTF_CALL_M_VARARGS; call->gtFlags &= ~GTF_CALL_POP_ARGS; // The function is responsible for doing explicit null check when it is necessary. assert(!call->NeedsNullCheck()); JITDUMP("fgMorphTailCallViaJitHelper (after):\n"); DISPTREE(call); } //------------------------------------------------------------------------ // fgGetStubAddrArg: Return the virtual stub address for the given call. // // Notes: // the JIT must place the address of the stub used to load the call target, // the "stub indirection cell", in special call argument with special register. // // Arguments: // call - a call that needs virtual stub dispatching. // // Return Value: // addr tree with set resister requirements. // GenTree* Compiler::fgGetStubAddrArg(GenTreeCall* call) { assert(call->IsVirtualStub()); GenTree* stubAddrArg; if (call->gtCallType == CT_INDIRECT) { stubAddrArg = gtClone(call->gtCallAddr, true); } else { assert(call->gtCallMoreFlags & GTF_CALL_M_VIRTSTUB_REL_INDIRECT); ssize_t addr = ssize_t(call->gtStubCallStubAddr); stubAddrArg = gtNewIconHandleNode(addr, GTF_ICON_FTN_ADDR); #ifdef DEBUG stubAddrArg->AsIntCon()->gtTargetHandle = (size_t)call->gtCallMethHnd; #endif } assert(stubAddrArg != nullptr); stubAddrArg->SetRegNum(virtualStubParamInfo->GetReg()); return stubAddrArg; } //------------------------------------------------------------------------------ // fgGetArgTabEntryParameterLclNum : Get the lcl num for the parameter that // corresponds to the argument to a recursive call. // // Notes: // Due to non-standard args this is not just fgArgTabEntry::argNum. // For example, in R2R compilations we will have added a non-standard // arg for the R2R indirection cell. // // Arguments: // argTabEntry - the arg // unsigned Compiler::fgGetArgTabEntryParameterLclNum(GenTreeCall* call, fgArgTabEntry* argTabEntry) { fgArgInfo* argInfo = call->fgArgInfo; unsigned argCount = argInfo->ArgCount(); fgArgTabEntry** argTable = argInfo->ArgTable(); unsigned numToRemove = 0; for (unsigned i = 0; i < argCount; i++) { fgArgTabEntry* arg = argTable[i]; // Late added args add extra args that do not map to IL parameters and that we should not reassign. if (!arg->isNonStandard() || !arg->isNonStandardArgAddedLate()) continue; if (arg->argNum < argTabEntry->argNum) numToRemove++; } return argTabEntry->argNum - numToRemove; } //------------------------------------------------------------------------------ // fgMorphRecursiveFastTailCallIntoLoop : Transform a recursive fast tail call into a loop. // // // Arguments: // block - basic block ending with a recursive fast tail call // recursiveTailCall - recursive tail call to transform // // Notes: // The legality of the transformation is ensured by the checks in endsWithTailCallConvertibleToLoop. void Compiler::fgMorphRecursiveFastTailCallIntoLoop(BasicBlock* block, GenTreeCall* recursiveTailCall) { assert(recursiveTailCall->IsTailCallConvertibleToLoop()); Statement* lastStmt = block->lastStmt(); assert(recursiveTailCall == lastStmt->GetRootNode()); // Transform recursive tail call into a loop. Statement* earlyArgInsertionPoint = lastStmt; const DebugInfo& callDI = lastStmt->GetDebugInfo(); // Hoist arg setup statement for the 'this' argument. GenTreeCall::Use* thisArg = recursiveTailCall->gtCallThisArg; if ((thisArg != nullptr) && !thisArg->GetNode()->IsNothingNode() && !thisArg->GetNode()->IsArgPlaceHolderNode()) { Statement* thisArgStmt = gtNewStmt(thisArg->GetNode(), callDI); fgInsertStmtBefore(block, earlyArgInsertionPoint, thisArgStmt); } // All arguments whose trees may involve caller parameter local variables need to be assigned to temps first; // then the temps need to be assigned to the method parameters. This is done so that the caller // parameters are not re-assigned before call arguments depending on them are evaluated. // tmpAssignmentInsertionPoint and paramAssignmentInsertionPoint keep track of // where the next temp or parameter assignment should be inserted. // In the example below the first call argument (arg1 - 1) needs to be assigned to a temp first // while the second call argument (const 1) doesn't. // Basic block before tail recursion elimination: // ***** BB04, stmt 1 (top level) // [000037] ------------ * stmtExpr void (top level) (IL 0x00A...0x013) // [000033] --C - G------ - \--* call void RecursiveMethod // [000030] ------------ | / --* const int - 1 // [000031] ------------arg0 in rcx + --* +int // [000029] ------------ | \--* lclVar int V00 arg1 // [000032] ------------arg1 in rdx \--* const int 1 // // // Basic block after tail recursion elimination : // ***** BB04, stmt 1 (top level) // [000051] ------------ * stmtExpr void (top level) (IL 0x00A... ? ? ? ) // [000030] ------------ | / --* const int - 1 // [000031] ------------ | / --* +int // [000029] ------------ | | \--* lclVar int V00 arg1 // [000050] - A---------- \--* = int // [000049] D------N---- \--* lclVar int V02 tmp0 // // ***** BB04, stmt 2 (top level) // [000055] ------------ * stmtExpr void (top level) (IL 0x00A... ? ? ? ) // [000052] ------------ | / --* lclVar int V02 tmp0 // [000054] - A---------- \--* = int // [000053] D------N---- \--* lclVar int V00 arg0 // ***** BB04, stmt 3 (top level) // [000058] ------------ * stmtExpr void (top level) (IL 0x00A... ? ? ? ) // [000032] ------------ | / --* const int 1 // [000057] - A---------- \--* = int // [000056] D------N---- \--* lclVar int V01 arg1 Statement* tmpAssignmentInsertionPoint = lastStmt; Statement* paramAssignmentInsertionPoint = lastStmt; // Process early args. They may contain both setup statements for late args and actual args. // Early args don't include 'this' arg. We need to account for that so that the call to gtArgEntryByArgNum // below has the correct second argument. int earlyArgIndex = (thisArg == nullptr) ? 0 : 1; for (GenTreeCall::Use& use : recursiveTailCall->Args()) { GenTree* earlyArg = use.GetNode(); if (!earlyArg->IsNothingNode() && !earlyArg->IsArgPlaceHolderNode()) { if ((earlyArg->gtFlags & GTF_LATE_ARG) != 0) { // This is a setup node so we need to hoist it. Statement* earlyArgStmt = gtNewStmt(earlyArg, callDI); fgInsertStmtBefore(block, earlyArgInsertionPoint, earlyArgStmt); } else { // This is an actual argument that needs to be assigned to the corresponding caller parameter. fgArgTabEntry* curArgTabEntry = gtArgEntryByArgNum(recursiveTailCall, earlyArgIndex); // Late-added non-standard args are extra args that are not passed as locals, so skip those if (!curArgTabEntry->isNonStandard() || !curArgTabEntry->isNonStandardArgAddedLate()) { Statement* paramAssignStmt = fgAssignRecursiveCallArgToCallerParam(earlyArg, curArgTabEntry, fgGetArgTabEntryParameterLclNum(recursiveTailCall, curArgTabEntry), block, callDI, tmpAssignmentInsertionPoint, paramAssignmentInsertionPoint); if ((tmpAssignmentInsertionPoint == lastStmt) && (paramAssignStmt != nullptr)) { // All temp assignments will happen before the first param assignment. tmpAssignmentInsertionPoint = paramAssignStmt; } } } } earlyArgIndex++; } // Process late args. int lateArgIndex = 0; for (GenTreeCall::Use& use : recursiveTailCall->LateArgs()) { // A late argument is an actual argument that needs to be assigned to the corresponding caller's parameter. GenTree* lateArg = use.GetNode(); fgArgTabEntry* curArgTabEntry = gtArgEntryByLateArgIndex(recursiveTailCall, lateArgIndex); // Late-added non-standard args are extra args that are not passed as locals, so skip those if (!curArgTabEntry->isNonStandard() || !curArgTabEntry->isNonStandardArgAddedLate()) { Statement* paramAssignStmt = fgAssignRecursiveCallArgToCallerParam(lateArg, curArgTabEntry, fgGetArgTabEntryParameterLclNum(recursiveTailCall, curArgTabEntry), block, callDI, tmpAssignmentInsertionPoint, paramAssignmentInsertionPoint); if ((tmpAssignmentInsertionPoint == lastStmt) && (paramAssignStmt != nullptr)) { // All temp assignments will happen before the first param assignment. tmpAssignmentInsertionPoint = paramAssignStmt; } } lateArgIndex++; } // If the method has starg.s 0 or ldarga.s 0 a special local (lvaArg0Var) is created so that // compThisArg stays immutable. Normally it's assigned in fgFirstBBScratch block. Since that // block won't be in the loop (it's assumed to have no predecessors), we need to update the special local here. if (!info.compIsStatic && (lvaArg0Var != info.compThisArg)) { var_types thisType = lvaTable[info.compThisArg].TypeGet(); GenTree* arg0 = gtNewLclvNode(lvaArg0Var, thisType); GenTree* arg0Assignment = gtNewAssignNode(arg0, gtNewLclvNode(info.compThisArg, thisType)); Statement* arg0AssignmentStmt = gtNewStmt(arg0Assignment, callDI); fgInsertStmtBefore(block, paramAssignmentInsertionPoint, arg0AssignmentStmt); } // If compInitMem is set, we may need to zero-initialize some locals. Normally it's done in the prolog // but this loop can't include the prolog. Since we don't have liveness information, we insert zero-initialization // for all non-parameter IL locals as well as temp structs with GC fields. // Liveness phase will remove unnecessary initializations. if (info.compInitMem || compSuppressedZeroInit) { unsigned varNum; LclVarDsc* varDsc; for (varNum = 0, varDsc = lvaTable; varNum < lvaCount; varNum++, varDsc++) { #if FEATURE_FIXED_OUT_ARGS if (varNum == lvaOutgoingArgSpaceVar) { continue; } #endif // FEATURE_FIXED_OUT_ARGS if (!varDsc->lvIsParam) { var_types lclType = varDsc->TypeGet(); bool isUserLocal = (varNum < info.compLocalsCount); bool structWithGCFields = ((lclType == TYP_STRUCT) && varDsc->GetLayout()->HasGCPtr()); bool hadSuppressedInit = varDsc->lvSuppressedZeroInit; if ((info.compInitMem && (isUserLocal || structWithGCFields)) || hadSuppressedInit) { GenTree* lcl = gtNewLclvNode(varNum, lclType); GenTree* init = nullptr; if (varTypeIsStruct(lclType)) { const bool isVolatile = false; const bool isCopyBlock = false; init = gtNewBlkOpNode(lcl, gtNewIconNode(0), isVolatile, isCopyBlock); init = fgMorphInitBlock(init); } else { GenTree* zero = gtNewZeroConNode(genActualType(lclType)); init = gtNewAssignNode(lcl, zero); } Statement* initStmt = gtNewStmt(init, callDI); fgInsertStmtBefore(block, lastStmt, initStmt); } } } } // Remove the call fgRemoveStmt(block, lastStmt); // Set the loop edge. if (opts.IsOSR()) { // Todo: this may not look like a viable loop header. // Might need the moral equivalent of a scratch BB. block->bbJumpDest = fgEntryBB; } else { // Ensure we have a scratch block and then target the next // block. Loop detection needs to see a pred out of the loop, // so mark the scratch block BBF_DONT_REMOVE to prevent empty // block removal on it. fgEnsureFirstBBisScratch(); fgFirstBB->bbFlags |= BBF_DONT_REMOVE; block->bbJumpDest = fgFirstBB->bbNext; } // Finish hooking things up. block->bbJumpKind = BBJ_ALWAYS; fgAddRefPred(block->bbJumpDest, block); block->bbFlags &= ~BBF_HAS_JMP; } //------------------------------------------------------------------------------ // fgAssignRecursiveCallArgToCallerParam : Assign argument to a recursive call to the corresponding caller parameter. // // // Arguments: // arg - argument to assign // argTabEntry - argument table entry corresponding to arg // lclParamNum - the lcl num of the parameter // block --- basic block the call is in // callILOffset - IL offset of the call // tmpAssignmentInsertionPoint - tree before which temp assignment should be inserted (if necessary) // paramAssignmentInsertionPoint - tree before which parameter assignment should be inserted // // Return Value: // parameter assignment statement if one was inserted; nullptr otherwise. Statement* Compiler::fgAssignRecursiveCallArgToCallerParam(GenTree* arg, fgArgTabEntry* argTabEntry, unsigned lclParamNum, BasicBlock* block, const DebugInfo& callDI, Statement* tmpAssignmentInsertionPoint, Statement* paramAssignmentInsertionPoint) { // Call arguments should be assigned to temps first and then the temps should be assigned to parameters because // some argument trees may reference parameters directly. GenTree* argInTemp = nullptr; bool needToAssignParameter = true; // TODO-CQ: enable calls with struct arguments passed in registers. noway_assert(!varTypeIsStruct(arg->TypeGet())); if ((argTabEntry->isTmp) || arg->IsCnsIntOrI() || arg->IsCnsFltOrDbl()) { // The argument is already assigned to a temp or is a const. argInTemp = arg; } else if (arg->OperGet() == GT_LCL_VAR) { unsigned lclNum = arg->AsLclVar()->GetLclNum(); LclVarDsc* varDsc = lvaGetDesc(lclNum); if (!varDsc->lvIsParam) { // The argument is a non-parameter local so it doesn't need to be assigned to a temp. argInTemp = arg; } else if (lclNum == lclParamNum) { // The argument is the same parameter local that we were about to assign so // we can skip the assignment. needToAssignParameter = false; } } // TODO: We don't need temp assignments if we can prove that the argument tree doesn't involve // any caller parameters. Some common cases are handled above but we may be able to eliminate // more temp assignments. Statement* paramAssignStmt = nullptr; if (needToAssignParameter) { if (argInTemp == nullptr) { // The argument is not assigned to a temp. We need to create a new temp and insert an assignment. // TODO: we can avoid a temp assignment if we can prove that the argument tree // doesn't involve any caller parameters. unsigned tmpNum = lvaGrabTemp(true DEBUGARG("arg temp")); lvaTable[tmpNum].lvType = arg->gtType; GenTree* tempSrc = arg; GenTree* tempDest = gtNewLclvNode(tmpNum, tempSrc->gtType); GenTree* tmpAssignNode = gtNewAssignNode(tempDest, tempSrc); Statement* tmpAssignStmt = gtNewStmt(tmpAssignNode, callDI); fgInsertStmtBefore(block, tmpAssignmentInsertionPoint, tmpAssignStmt); argInTemp = gtNewLclvNode(tmpNum, tempSrc->gtType); } // Now assign the temp to the parameter. const LclVarDsc* paramDsc = lvaGetDesc(lclParamNum); assert(paramDsc->lvIsParam); GenTree* paramDest = gtNewLclvNode(lclParamNum, paramDsc->lvType); GenTree* paramAssignNode = gtNewAssignNode(paramDest, argInTemp); paramAssignStmt = gtNewStmt(paramAssignNode, callDI); fgInsertStmtBefore(block, paramAssignmentInsertionPoint, paramAssignStmt); } return paramAssignStmt; } /***************************************************************************** * * Transform the given GT_CALL tree for code generation. */ GenTree* Compiler::fgMorphCall(GenTreeCall* call) { if (call->CanTailCall()) { GenTree* newNode = fgMorphPotentialTailCall(call); if (newNode != nullptr) { return newNode; } assert(!call->CanTailCall()); #if FEATURE_MULTIREG_RET if (fgGlobalMorph && call->HasMultiRegRetVal() && varTypeIsStruct(call->TypeGet())) { // The tail call has been rejected so we must finish the work deferred // by impFixupCallStructReturn for multi-reg-returning calls and transform // ret call // into // temp = call // ret temp // Force re-evaluating the argInfo as the return argument has changed. call->ResetArgInfo(); // Create a new temp. unsigned tmpNum = lvaGrabTemp(false DEBUGARG("Return value temp for multi-reg return (rejected tail call).")); lvaTable[tmpNum].lvIsMultiRegRet = true; CORINFO_CLASS_HANDLE structHandle = call->gtRetClsHnd; assert(structHandle != NO_CLASS_HANDLE); const bool unsafeValueClsCheck = false; lvaSetStruct(tmpNum, structHandle, unsafeValueClsCheck); var_types structType = lvaTable[tmpNum].lvType; GenTree* dst = gtNewLclvNode(tmpNum, structType); GenTree* assg = gtNewAssignNode(dst, call); assg = fgMorphTree(assg); // Create the assignment statement and insert it before the current statement. Statement* assgStmt = gtNewStmt(assg, compCurStmt->GetDebugInfo()); fgInsertStmtBefore(compCurBB, compCurStmt, assgStmt); // Return the temp. GenTree* result = gtNewLclvNode(tmpNum, lvaTable[tmpNum].lvType); result->gtFlags |= GTF_DONT_CSE; compCurBB->bbFlags |= BBF_HAS_CALL; // This block has a call #ifdef DEBUG if (verbose) { printf("\nInserting assignment of a multi-reg call result to a temp:\n"); gtDispStmt(assgStmt); } result->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED; #endif // DEBUG return result; } #endif } if ((call->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC) == 0 && (call->gtCallMethHnd == eeFindHelper(CORINFO_HELP_VIRTUAL_FUNC_PTR) #ifdef FEATURE_READYTORUN || call->gtCallMethHnd == eeFindHelper(CORINFO_HELP_READYTORUN_VIRTUAL_FUNC_PTR) #endif ) && (call == fgMorphStmt->GetRootNode())) { // This is call to CORINFO_HELP_VIRTUAL_FUNC_PTR with ignored result. // Transform it into a null check. GenTree* thisPtr = call->gtCallArgs->GetNode(); GenTree* nullCheck = gtNewNullCheck(thisPtr, compCurBB); return fgMorphTree(nullCheck); } noway_assert(call->gtOper == GT_CALL); // // Only count calls once (only in the global morph phase) // if (fgGlobalMorph) { if (call->gtCallType == CT_INDIRECT) { optCallCount++; optIndirectCallCount++; } else if (call->gtCallType == CT_USER_FUNC) { optCallCount++; if (call->IsVirtual()) { optIndirectCallCount++; } } } // Couldn't inline - remember that this BB contains method calls // Mark the block as a GC safe point for the call if possible. // In the event the call indicates the block isn't a GC safe point // and the call is unmanaged with a GC transition suppression request // then insert a GC poll. CLANG_FORMAT_COMMENT_ANCHOR; if (IsGcSafePoint(call)) { compCurBB->bbFlags |= BBF_GC_SAFE_POINT; } // Regardless of the state of the basic block with respect to GC safe point, // we will always insert a GC Poll for scenarios involving a suppressed GC // transition. Only mark the block for GC Poll insertion on the first morph. if (fgGlobalMorph && call->IsUnmanaged() && call->IsSuppressGCTransition()) { compCurBB->bbFlags |= (BBF_HAS_SUPPRESSGC_CALL | BBF_GC_SAFE_POINT); optMethodFlags |= OMF_NEEDS_GCPOLLS; } // Morph Type.op_Equality, Type.op_Inequality, and Enum.HasFlag // // We need to do these before the arguments are morphed if ((call->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC)) { // See if this is foldable GenTree* optTree = gtFoldExprCall(call); // If we optimized, morph the result if (optTree != call) { return fgMorphTree(optTree); } } compCurBB->bbFlags |= BBF_HAS_CALL; // This block has a call /* Process the "normal" argument list */ call = fgMorphArgs(call); noway_assert(call->gtOper == GT_CALL); // Should we expand this virtual method call target early here? // if (call->IsExpandedEarly() && call->IsVirtualVtable()) { // We only expand the Vtable Call target once in the global morph phase if (fgGlobalMorph) { assert(call->gtControlExpr == nullptr); // We only call this method and assign gtControlExpr once call->gtControlExpr = fgExpandVirtualVtableCallTarget(call); } // We always have to morph or re-morph the control expr // call->gtControlExpr = fgMorphTree(call->gtControlExpr); // Propagate any gtFlags into the call call->gtFlags |= call->gtControlExpr->gtFlags; } // Morph stelem.ref helper call to store a null value, into a store into an array without the helper. // This needs to be done after the arguments are morphed to ensure constant propagation has already taken place. if (opts.OptimizationEnabled() && (call->gtCallType == CT_HELPER) && (call->gtCallMethHnd == eeFindHelper(CORINFO_HELP_ARRADDR_ST))) { GenTree* value = gtArgEntryByArgNum(call, 2)->GetNode(); if (value->IsIntegralConst(0)) { assert(value->OperGet() == GT_CNS_INT); GenTree* arr = gtArgEntryByArgNum(call, 0)->GetNode(); GenTree* index = gtArgEntryByArgNum(call, 1)->GetNode(); // Either or both of the array and index arguments may have been spilled to temps by `fgMorphArgs`. Copy // the spill trees as well if necessary. GenTreeOp* argSetup = nullptr; for (GenTreeCall::Use& use : call->Args()) { GenTree* const arg = use.GetNode(); if (arg->OperGet() != GT_ASG) { continue; } assert(arg != arr); assert(arg != index); arg->gtFlags &= ~GTF_LATE_ARG; GenTree* op1 = argSetup; if (op1 == nullptr) { op1 = gtNewNothingNode(); #if DEBUG op1->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED; #endif // DEBUG } argSetup = new (this, GT_COMMA) GenTreeOp(GT_COMMA, TYP_VOID, op1, arg); #if DEBUG argSetup->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED; #endif // DEBUG } #ifdef DEBUG auto resetMorphedFlag = [](GenTree** slot, fgWalkData* data) -> fgWalkResult { (*slot)->gtDebugFlags &= ~GTF_DEBUG_NODE_MORPHED; return WALK_CONTINUE; }; fgWalkTreePost(&arr, resetMorphedFlag); fgWalkTreePost(&index, resetMorphedFlag); fgWalkTreePost(&value, resetMorphedFlag); #endif // DEBUG GenTree* const arrIndexNode = gtNewIndexRef(TYP_REF, arr, index); GenTree* const arrStore = gtNewAssignNode(arrIndexNode, value); GenTree* result = fgMorphTree(arrStore); if (argSetup != nullptr) { result = new (this, GT_COMMA) GenTreeOp(GT_COMMA, TYP_VOID, argSetup, result); #if DEBUG result->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED; #endif // DEBUG } return result; } } if (call->IsNoReturn()) { // // If we know that the call does not return then we can set fgRemoveRestOfBlock // to remove all subsequent statements and change the call's basic block to BBJ_THROW. // As a result the compiler won't need to preserve live registers across the call. // // This isn't need for tail calls as there shouldn't be any code after the call anyway. // Besides, the tail call code is part of the epilog and converting the block to // BBJ_THROW would result in the tail call being dropped as the epilog is generated // only for BBJ_RETURN blocks. // if (!call->IsTailCall()) { fgRemoveRestOfBlock = true; } } return call; } /***************************************************************************** * * Expand and return the call target address for a VirtualCall * The code here should match that generated by LowerVirtualVtableCall */ GenTree* Compiler::fgExpandVirtualVtableCallTarget(GenTreeCall* call) { GenTree* result; JITDUMP("Expanding virtual call target for %d.%s:\n", call->gtTreeID, GenTree::OpName(call->gtOper)); noway_assert(call->gtCallType == CT_USER_FUNC); // get a reference to the thisPtr being passed fgArgTabEntry* thisArgTabEntry = gtArgEntryByArgNum(call, 0); GenTree* thisPtr = thisArgTabEntry->GetNode(); // fgMorphArgs must enforce this invariant by creating a temp // assert(thisPtr->OperIsLocal()); // Make a copy of the thisPtr by cloning // thisPtr = gtClone(thisPtr, true); noway_assert(thisPtr != nullptr); // Get hold of the vtable offset unsigned vtabOffsOfIndirection; unsigned vtabOffsAfterIndirection; bool isRelative; info.compCompHnd->getMethodVTableOffset(call->gtCallMethHnd, &vtabOffsOfIndirection, &vtabOffsAfterIndirection, &isRelative); // Dereference the this pointer to obtain the method table, it is called vtab below GenTree* vtab; assert(VPTR_OFFS == 0); // We have to add this value to the thisPtr to get the methodTable vtab = gtNewOperNode(GT_IND, TYP_I_IMPL, thisPtr); vtab->gtFlags |= GTF_IND_INVARIANT; // Get the appropriate vtable chunk if (vtabOffsOfIndirection != CORINFO_VIRTUALCALL_NO_CHUNK) { // Note this isRelative code path is currently never executed // as the VM doesn't ever return: isRelative == true // if (isRelative) { // MethodTable offset is a relative pointer. // // Additional temporary variable is used to store virtual table pointer. // Address of method is obtained by the next computations: // // Save relative offset to tmp (vtab is virtual table pointer, vtabOffsOfIndirection is offset of // vtable-1st-level-indirection): // tmp = vtab // // Save address of method to result (vtabOffsAfterIndirection is offset of vtable-2nd-level-indirection): // result = [tmp + vtabOffsOfIndirection + vtabOffsAfterIndirection + [tmp + vtabOffsOfIndirection]] // // // When isRelative is true we need to setup two temporary variables // var1 = vtab // var2 = var1 + vtabOffsOfIndirection + vtabOffsAfterIndirection + [var1 + vtabOffsOfIndirection] // result = [var2] + var2 // unsigned varNum1 = lvaGrabTemp(true DEBUGARG("var1 - vtab")); unsigned varNum2 = lvaGrabTemp(true DEBUGARG("var2 - relative")); GenTree* asgVar1 = gtNewTempAssign(varNum1, vtab); // var1 = vtab // [tmp + vtabOffsOfIndirection] GenTree* tmpTree1 = gtNewOperNode(GT_ADD, TYP_I_IMPL, gtNewLclvNode(varNum1, TYP_I_IMPL), gtNewIconNode(vtabOffsOfIndirection, TYP_I_IMPL)); tmpTree1 = gtNewOperNode(GT_IND, TYP_I_IMPL, tmpTree1, false); tmpTree1->gtFlags |= GTF_IND_NONFAULTING; tmpTree1->gtFlags |= GTF_IND_INVARIANT; // var1 + vtabOffsOfIndirection + vtabOffsAfterIndirection GenTree* tmpTree2 = gtNewOperNode(GT_ADD, TYP_I_IMPL, gtNewLclvNode(varNum1, TYP_I_IMPL), gtNewIconNode(vtabOffsOfIndirection + vtabOffsAfterIndirection, TYP_I_IMPL)); // var1 + vtabOffsOfIndirection + vtabOffsAfterIndirection + [var1 + vtabOffsOfIndirection] tmpTree2 = gtNewOperNode(GT_ADD, TYP_I_IMPL, tmpTree2, tmpTree1); GenTree* asgVar2 = gtNewTempAssign(varNum2, tmpTree2); // var2 = <expression> // This last indirection is not invariant, but is non-faulting result = gtNewOperNode(GT_IND, TYP_I_IMPL, gtNewLclvNode(varNum2, TYP_I_IMPL), false); // [var2] result->gtFlags |= GTF_IND_NONFAULTING; result = gtNewOperNode(GT_ADD, TYP_I_IMPL, result, gtNewLclvNode(varNum2, TYP_I_IMPL)); // [var2] + var2 // Now stitch together the two assignment and the calculation of result into a single tree GenTree* commaTree = gtNewOperNode(GT_COMMA, TYP_I_IMPL, asgVar2, result); result = gtNewOperNode(GT_COMMA, TYP_I_IMPL, asgVar1, commaTree); } else { // result = [vtab + vtabOffsOfIndirection] result = gtNewOperNode(GT_ADD, TYP_I_IMPL, vtab, gtNewIconNode(vtabOffsOfIndirection, TYP_I_IMPL)); result = gtNewOperNode(GT_IND, TYP_I_IMPL, result, false); result->gtFlags |= GTF_IND_NONFAULTING; result->gtFlags |= GTF_IND_INVARIANT; } } else { result = vtab; assert(!isRelative); } if (!isRelative) { // Load the function address // result = [result + vtabOffsAfterIndirection] result = gtNewOperNode(GT_ADD, TYP_I_IMPL, result, gtNewIconNode(vtabOffsAfterIndirection, TYP_I_IMPL)); // This last indirection is not invariant, but is non-faulting result = gtNewOperNode(GT_IND, TYP_I_IMPL, result, false); result->gtFlags |= GTF_IND_NONFAULTING; } return result; } /***************************************************************************** * * Transform the given constant tree for code generation. */ GenTree* Compiler::fgMorphConst(GenTree* tree) { assert(tree->OperIsConst()); /* Clear any exception flags or other unnecessary flags * that may have been set before folding this node to a constant */ tree->gtFlags &= ~(GTF_ALL_EFFECT | GTF_REVERSE_OPS); if (!tree->OperIs(GT_CNS_STR)) { return tree; } if (tree->AsStrCon()->IsStringEmptyField()) { LPVOID pValue; InfoAccessType iat = info.compCompHnd->emptyStringLiteral(&pValue); return fgMorphTree(gtNewStringLiteralNode(iat, pValue)); } // TODO-CQ: Do this for compCurBB->isRunRarely(). Doing that currently will // guarantee slow performance for that block. Instead cache the return value // of CORINFO_HELP_STRCNS and go to cache first giving reasonable perf. bool useLazyStrCns = false; if (compCurBB->bbJumpKind == BBJ_THROW) { useLazyStrCns = true; } else if (fgGlobalMorph && compCurStmt->GetRootNode()->IsCall()) { // Quick check: if the root node of the current statement happens to be a noreturn call. GenTreeCall* call = compCurStmt->GetRootNode()->AsCall(); useLazyStrCns = call->IsNoReturn() || fgIsThrow(call); } if (useLazyStrCns) { CorInfoHelpFunc helper = info.compCompHnd->getLazyStringLiteralHelper(tree->AsStrCon()->gtScpHnd); if (helper != CORINFO_HELP_UNDEF) { // For un-important blocks, we want to construct the string lazily GenTreeCall::Use* args; if (helper == CORINFO_HELP_STRCNS_CURRENT_MODULE) { args = gtNewCallArgs(gtNewIconNode(RidFromToken(tree->AsStrCon()->gtSconCPX), TYP_INT)); } else { args = gtNewCallArgs(gtNewIconNode(RidFromToken(tree->AsStrCon()->gtSconCPX), TYP_INT), gtNewIconEmbScpHndNode(tree->AsStrCon()->gtScpHnd)); } tree = gtNewHelperCallNode(helper, TYP_REF, args); return fgMorphTree(tree); } } assert(tree->AsStrCon()->gtScpHnd == info.compScopeHnd || !IsUninitialized(tree->AsStrCon()->gtScpHnd)); LPVOID pValue; InfoAccessType iat = info.compCompHnd->constructStringLiteral(tree->AsStrCon()->gtScpHnd, tree->AsStrCon()->gtSconCPX, &pValue); tree = gtNewStringLiteralNode(iat, pValue); return fgMorphTree(tree); } //------------------------------------------------------------------------ // fgMorphTryFoldObjAsLclVar: try to fold an Obj node as a LclVar. // // Arguments: // obj - the obj node. // destroyNodes -- destroy nodes that are optimized away // // Return value: // GenTreeLclVar if the obj can be replaced by it, null otherwise. // // Notes: // TODO-CQ: currently this transformation is done only under copy block, // but it is benefitial to do for each OBJ node. However, `PUT_ARG_STACK` // for some platforms does not expect struct `LCL_VAR` as a source, so // it needs more work. // GenTreeLclVar* Compiler::fgMorphTryFoldObjAsLclVar(GenTreeObj* obj, bool destroyNodes) { if (opts.OptimizationEnabled()) { GenTree* op1 = obj->Addr(); assert(!op1->OperIs(GT_LCL_VAR_ADDR) && "missed an opt opportunity"); if (op1->OperIs(GT_ADDR)) { GenTreeUnOp* addr = op1->AsUnOp(); GenTree* addrOp = addr->gtGetOp1(); if (addrOp->TypeIs(obj->TypeGet()) && addrOp->OperIs(GT_LCL_VAR)) { GenTreeLclVar* lclVar = addrOp->AsLclVar(); ClassLayout* lclVarLayout = lvaGetDesc(lclVar)->GetLayout(); ClassLayout* objLayout = obj->GetLayout(); if (ClassLayout::AreCompatible(lclVarLayout, objLayout)) { #ifdef DEBUG CORINFO_CLASS_HANDLE objClsHandle = obj->GetLayout()->GetClassHandle(); assert(objClsHandle != NO_CLASS_HANDLE); if (verbose) { CORINFO_CLASS_HANDLE lclClsHnd = gtGetStructHandle(lclVar); printf("fold OBJ(ADDR(X)) [%06u] into X [%06u], ", dspTreeID(obj), dspTreeID(lclVar)); printf("with %s handles\n", ((lclClsHnd == objClsHandle) ? "matching" : "different")); } #endif // Keep the DONT_CSE flag in sync // (as the addr always marks it for its op1) lclVar->gtFlags &= ~GTF_DONT_CSE; lclVar->gtFlags |= (obj->gtFlags & GTF_DONT_CSE); if (destroyNodes) { DEBUG_DESTROY_NODE(obj); DEBUG_DESTROY_NODE(addr); } return lclVar; } } } } return nullptr; } /***************************************************************************** * * Transform the given GTK_LEAF tree for code generation. */ GenTree* Compiler::fgMorphLeaf(GenTree* tree) { assert(tree->OperKind() & GTK_LEAF); if (tree->gtOper == GT_LCL_VAR) { const bool forceRemorph = false; return fgMorphLocalVar(tree, forceRemorph); } else if (tree->gtOper == GT_LCL_FLD) { if (lvaGetDesc(tree->AsLclFld())->IsAddressExposed()) { tree->gtFlags |= GTF_GLOB_REF; } #ifdef TARGET_X86 if (info.compIsVarArgs) { GenTree* newTree = fgMorphStackArgForVarArgs(tree->AsLclFld()->GetLclNum(), tree->TypeGet(), tree->AsLclFld()->GetLclOffs()); if (newTree != nullptr) { if (newTree->OperIsBlk() && ((tree->gtFlags & GTF_VAR_DEF) == 0)) { newTree->SetOper(GT_IND); } return newTree; } } #endif // TARGET_X86 } else if (tree->gtOper == GT_FTN_ADDR) { GenTreeFptrVal* fptrValTree = tree->AsFptrVal(); // A function pointer address is being used. Let the VM know if this is the // target of a Delegate or a raw function pointer. bool isUnsafeFunctionPointer = !fptrValTree->gtFptrDelegateTarget; CORINFO_CONST_LOOKUP addrInfo; #ifdef FEATURE_READYTORUN if (fptrValTree->gtEntryPoint.addr != nullptr) { addrInfo = fptrValTree->gtEntryPoint; } else #endif { info.compCompHnd->getFunctionFixedEntryPoint(fptrValTree->gtFptrMethod, isUnsafeFunctionPointer, &addrInfo); } GenTree* indNode = nullptr; switch (addrInfo.accessType) { case IAT_PPVALUE: indNode = gtNewIndOfIconHandleNode(TYP_I_IMPL, (size_t)addrInfo.handle, GTF_ICON_CONST_PTR, true); // Add the second indirection indNode = gtNewOperNode(GT_IND, TYP_I_IMPL, indNode); // This indirection won't cause an exception. indNode->gtFlags |= GTF_IND_NONFAULTING; // This indirection also is invariant. indNode->gtFlags |= GTF_IND_INVARIANT; break; case IAT_PVALUE: indNode = gtNewIndOfIconHandleNode(TYP_I_IMPL, (size_t)addrInfo.handle, GTF_ICON_FTN_ADDR, true); break; case IAT_VALUE: // Refer to gtNewIconHandleNode() as the template for constructing a constant handle // tree->SetOper(GT_CNS_INT); tree->AsIntConCommon()->SetIconValue(ssize_t(addrInfo.handle)); tree->gtFlags |= GTF_ICON_FTN_ADDR; break; default: noway_assert(!"Unknown addrInfo.accessType"); } if (indNode != nullptr) { DEBUG_DESTROY_NODE(tree); tree = fgMorphTree(indNode); } } return tree; } void Compiler::fgAssignSetVarDef(GenTree* tree) { GenTreeLclVarCommon* lclVarCmnTree; bool isEntire = false; if (tree->DefinesLocal(this, &lclVarCmnTree, &isEntire)) { if (isEntire) { lclVarCmnTree->gtFlags |= GTF_VAR_DEF; } else { // We consider partial definitions to be modeled as uses followed by definitions. // This captures the idea that precedings defs are not necessarily made redundant // by this definition. lclVarCmnTree->gtFlags |= (GTF_VAR_DEF | GTF_VAR_USEASG); } } } //------------------------------------------------------------------------ // fgMorphOneAsgBlockOp: Attempt to replace a block assignment with a scalar assignment // // Arguments: // tree - The block assignment to be possibly morphed // // Return Value: // The modified tree if successful, nullptr otherwise. // // Assumptions: // 'tree' must be a block assignment. // // Notes: // If successful, this method always returns the incoming tree, modifying only // its arguments. // GenTree* Compiler::fgMorphOneAsgBlockOp(GenTree* tree) { // This must be a block assignment. noway_assert(tree->OperIsBlkOp()); var_types asgType = tree->TypeGet(); GenTree* asg = tree; GenTree* dest = asg->gtGetOp1(); GenTree* src = asg->gtGetOp2(); unsigned destVarNum = BAD_VAR_NUM; LclVarDsc* destVarDsc = nullptr; GenTree* destLclVarTree = nullptr; bool isCopyBlock = asg->OperIsCopyBlkOp(); bool isInitBlock = !isCopyBlock; unsigned size = 0; CORINFO_CLASS_HANDLE clsHnd = NO_CLASS_HANDLE; if (dest->gtEffectiveVal()->OperIsBlk()) { GenTreeBlk* lhsBlk = dest->gtEffectiveVal()->AsBlk(); size = lhsBlk->Size(); if (impIsAddressInLocal(lhsBlk->Addr(), &destLclVarTree)) { destVarNum = destLclVarTree->AsLclVarCommon()->GetLclNum(); destVarDsc = lvaGetDesc(destVarNum); } if (lhsBlk->OperGet() == GT_OBJ) { clsHnd = lhsBlk->AsObj()->GetLayout()->GetClassHandle(); } } else { // Is this an enregisterable struct that is already a simple assignment? // This can happen if we are re-morphing. // Note that we won't do this straightaway if this is a SIMD type, since it // may be a promoted lclVar (sometimes we promote the individual float fields of // fixed-size SIMD). if (dest->OperGet() == GT_IND) { noway_assert(asgType != TYP_STRUCT); if (varTypeIsStruct(asgType)) { destLclVarTree = fgIsIndirOfAddrOfLocal(dest); } if (isCopyBlock && destLclVarTree == nullptr && !src->OperIs(GT_LCL_VAR)) { fgMorphBlockOperand(src, asgType, genTypeSize(asgType), false /*isBlkReqd*/); dest->gtFlags |= GTF_DONT_CSE; return tree; } } else { noway_assert(dest->OperIsLocal()); destLclVarTree = dest; } if (destLclVarTree != nullptr) { destVarNum = destLclVarTree->AsLclVarCommon()->GetLclNum(); destVarDsc = lvaGetDesc(destVarNum); if (asgType == TYP_STRUCT) { clsHnd = destVarDsc->GetStructHnd(); size = destVarDsc->lvExactSize; } } if (asgType != TYP_STRUCT) { size = genTypeSize(asgType); } } if (size == 0) { return nullptr; } if ((destVarDsc != nullptr) && varTypeIsStruct(destLclVarTree) && destVarDsc->lvPromoted) { // Let fgMorphCopyBlock handle it. return nullptr; } if (src->IsCall() || src->OperIsSIMD()) { // Can't take ADDR from these nodes, let fgMorphCopyBlock handle it, #11413. return nullptr; } if ((destVarDsc != nullptr) && !varTypeIsStruct(destVarDsc->TypeGet())) { // // See if we can do a simple transformation: // // GT_ASG <TYP_size> // / \. // GT_IND GT_IND or CNS_INT // | | // [dest] [src] // if (asgType == TYP_STRUCT) { // It is possible to use `initobj` to init a primitive type on the stack, // like `ldloca.s 1; initobj 1B000003` where `V01` has type `ref`; // in this case we generate `ASG struct(BLK<8> struct(ADDR byref(LCL_VAR ref)), 0)` // and this code path transforms it into `ASG ref(LCL_VARref, 0)` because it is not a real // struct assignment. if (size == REGSIZE_BYTES) { if (clsHnd == NO_CLASS_HANDLE) { // A register-sized cpblk can be treated as an integer asignment. asgType = TYP_I_IMPL; } else { BYTE gcPtr; info.compCompHnd->getClassGClayout(clsHnd, &gcPtr); asgType = getJitGCType(gcPtr); } } else { switch (size) { case 1: asgType = TYP_BYTE; break; case 2: asgType = TYP_SHORT; break; #ifdef TARGET_64BIT case 4: asgType = TYP_INT; break; #endif // TARGET_64BIT } } } } GenTree* srcLclVarTree = nullptr; LclVarDsc* srcVarDsc = nullptr; if (isCopyBlock) { if (src->OperGet() == GT_LCL_VAR) { srcLclVarTree = src; srcVarDsc = lvaGetDesc(src->AsLclVarCommon()); } else if (src->OperIsIndir() && impIsAddressInLocal(src->AsOp()->gtOp1, &srcLclVarTree)) { srcVarDsc = lvaGetDesc(srcLclVarTree->AsLclVarCommon()); } if ((srcVarDsc != nullptr) && varTypeIsStruct(srcLclVarTree) && srcVarDsc->lvPromoted) { // Let fgMorphCopyBlock handle it. return nullptr; } } if (asgType != TYP_STRUCT) { noway_assert((size <= REGSIZE_BYTES) || varTypeIsSIMD(asgType)); // For initBlk, a non constant source is not going to allow us to fiddle // with the bits to create a single assigment. // Nor do we (for now) support transforming an InitBlock of SIMD type, unless // it is a direct assignment to a lclVar and the value is zero. if (isInitBlock) { if (!src->IsConstInitVal()) { return nullptr; } if (varTypeIsSIMD(asgType) && (!src->IsIntegralConst(0) || (destVarDsc == nullptr))) { return nullptr; } } if (destVarDsc != nullptr) { // Kill everything about dest if (optLocalAssertionProp) { if (optAssertionCount > 0) { fgKillDependentAssertions(destVarNum DEBUGARG(tree)); } } // A previous incarnation of this code also required the local not to be // address-exposed(=taken). That seems orthogonal to the decision of whether // to do field-wise assignments: being address-exposed will cause it to be // "dependently" promoted, so it will be in the right memory location. One possible // further reason for avoiding field-wise stores is that the struct might have alignment-induced // holes, whose contents could be meaningful in unsafe code. If we decide that's a valid // concern, then we could compromise, and say that address-exposed + fields do not completely cover the // memory of the struct prevent field-wise assignments. Same situation exists for the "src" decision. if (varTypeIsStruct(destLclVarTree) && destVarDsc->lvPromoted) { // Let fgMorphInitBlock handle it. (Since we'll need to do field-var-wise assignments.) return nullptr; } else if (!varTypeIsFloating(destLclVarTree->TypeGet()) && (size == genTypeSize(destVarDsc))) { // Use the dest local var directly, as well as its type. dest = destLclVarTree; asgType = destVarDsc->lvType; // If the block operation had been a write to a local var of a small int type, // of the exact size of the small int type, and the var is NormalizeOnStore, // we would have labeled it GTF_VAR_USEASG, because the block operation wouldn't // have done that normalization. If we're now making it into an assignment, // the NormalizeOnStore will work, and it can be a full def. if (destVarDsc->lvNormalizeOnStore()) { dest->gtFlags &= (~GTF_VAR_USEASG); } } else { // Could be a non-promoted struct, or a floating point type local, or // an int subject to a partial write. Don't enregister. lvaSetVarDoNotEnregister(destVarNum DEBUGARG(DoNotEnregisterReason::OneAsgRetyping)); // Mark the local var tree as a definition point of the local. destLclVarTree->gtFlags |= GTF_VAR_DEF; if (size < destVarDsc->lvExactSize) { // If it's not a full-width assignment.... destLclVarTree->gtFlags |= GTF_VAR_USEASG; } if (dest == destLclVarTree) { GenTree* addr = gtNewOperNode(GT_ADDR, TYP_BYREF, dest); dest = gtNewIndir(asgType, addr); } } } // Check to ensure we don't have a reducible *(& ... ) if (dest->OperIsIndir() && dest->AsIndir()->Addr()->OperGet() == GT_ADDR) { // If dest is an Indir or Block, and it has a child that is a Addr node // GenTree* addrNode = dest->AsIndir()->Addr(); // known to be a GT_ADDR // Can we just remove the Ind(Addr(destOp)) and operate directly on 'destOp'? // GenTree* destOp = addrNode->gtGetOp1(); var_types destOpType = destOp->TypeGet(); // We can if we have a primitive integer type and the sizes are exactly the same. // if ((varTypeIsIntegralOrI(destOp) && (size == genTypeSize(destOpType)))) { dest = destOp; asgType = destOpType; } } if (dest->gtEffectiveVal()->OperIsIndir()) { // If we have no information about the destination, we have to assume it could // live anywhere (not just in the GC heap). // Mark the GT_IND node so that we use the correct write barrier helper in case // the field is a GC ref. if (!fgIsIndirOfAddrOfLocal(dest)) { dest->gtFlags |= (GTF_GLOB_REF | GTF_IND_TGTANYWHERE); tree->gtFlags |= GTF_GLOB_REF; } dest->SetIndirExceptionFlags(this); tree->gtFlags |= (dest->gtFlags & GTF_EXCEPT); } if (isCopyBlock) { if (srcVarDsc != nullptr) { // Handled above. assert(!varTypeIsStruct(srcLclVarTree) || !srcVarDsc->lvPromoted); if (!varTypeIsFloating(srcLclVarTree->TypeGet()) && size == genTypeSize(genActualType(srcLclVarTree->TypeGet()))) { // Use the src local var directly. src = srcLclVarTree; } else { // The source argument of the copyblk can potentially be accessed only through indir(addr(lclVar)) // or indir(lclVarAddr) so it must be on the stack. unsigned lclVarNum = srcLclVarTree->AsLclVarCommon()->GetLclNum(); lvaSetVarDoNotEnregister(lclVarNum DEBUGARG(DoNotEnregisterReason::OneAsgRetyping)); GenTree* srcAddr; if (src == srcLclVarTree) { srcAddr = gtNewOperNode(GT_ADDR, TYP_BYREF, src); src = gtNewOperNode(GT_IND, asgType, srcAddr); } else { assert(src->OperIsIndir()); } } } if (src->OperIsIndir()) { if (!fgIsIndirOfAddrOfLocal(src)) { // If we have no information about the src, we have to assume it could // live anywhere (not just in the GC heap). // Mark the GT_IND node so that we use the correct write barrier helper in case // the field is a GC ref. src->gtFlags |= (GTF_GLOB_REF | GTF_IND_TGTANYWHERE); } src->SetIndirExceptionFlags(this); } } else // InitBlk { #ifdef FEATURE_SIMD if (varTypeIsSIMD(asgType)) { assert(!isCopyBlock); // Else we would have returned the tree above. noway_assert(src->IsIntegralConst(0)); noway_assert(destVarDsc != nullptr); src = gtNewSIMDNode(asgType, src, SIMDIntrinsicInit, destVarDsc->GetSimdBaseJitType(), size); } else #endif { if (src->OperIsInitVal()) { src = src->gtGetOp1(); } assert(src->IsCnsIntOrI()); // This will mutate the integer constant, in place, to be the correct // value for the type we are using in the assignment. src->AsIntCon()->FixupInitBlkValue(asgType); } } // Ensure that the dest is setup appropriately. if (dest->gtEffectiveVal()->OperIsIndir()) { dest = fgMorphBlockOperand(dest, asgType, size, false /*isBlkReqd*/); } // Ensure that the rhs is setup appropriately. if (isCopyBlock) { src = fgMorphBlockOperand(src, asgType, size, false /*isBlkReqd*/); } // Set the lhs and rhs on the assignment. if (dest != tree->AsOp()->gtOp1) { asg->AsOp()->gtOp1 = dest; } if (src != asg->AsOp()->gtOp2) { asg->AsOp()->gtOp2 = src; } asg->ChangeType(asgType); dest->gtFlags |= GTF_DONT_CSE; asg->gtFlags &= ~GTF_EXCEPT; asg->gtFlags |= ((dest->gtFlags | src->gtFlags) & GTF_ALL_EFFECT); // Un-set GTF_REVERSE_OPS, and it will be set later if appropriate. asg->gtFlags &= ~GTF_REVERSE_OPS; #ifdef DEBUG if (verbose) { printf("fgMorphOneAsgBlock (after):\n"); gtDispTree(tree); } #endif return tree; } return nullptr; } //------------------------------------------------------------------------ // fgMorphPromoteLocalInitBlock: Attempts to promote a local block init tree // to a tree of promoted field initialization assignments. // // Arguments: // destLclNode - The destination LclVar node // initVal - The initialization value // blockSize - The amount of bytes to initialize // // Return Value: // A tree that performs field by field initialization of the destination // struct variable if various conditions are met, nullptr otherwise. // // Notes: // This transforms a single block initialization assignment like: // // * ASG struct (init) // +--* BLK(12) struct // | \--* ADDR long // | \--* LCL_VAR struct(P) V02 loc0 // | \--* int V02.a (offs=0x00) -> V06 tmp3 // | \--* ubyte V02.c (offs=0x04) -> V07 tmp4 // | \--* float V02.d (offs=0x08) -> V08 tmp5 // \--* INIT_VAL int // \--* CNS_INT int 42 // // into a COMMA tree of assignments that initialize each promoted struct // field: // // * COMMA void // +--* COMMA void // | +--* ASG int // | | +--* LCL_VAR int V06 tmp3 // | | \--* CNS_INT int 0x2A2A2A2A // | \--* ASG ubyte // | +--* LCL_VAR ubyte V07 tmp4 // | \--* CNS_INT int 42 // \--* ASG float // +--* LCL_VAR float V08 tmp5 // \--* CNS_DBL float 1.5113661732714390e-13 // GenTree* Compiler::fgMorphPromoteLocalInitBlock(GenTreeLclVar* destLclNode, GenTree* initVal, unsigned blockSize) { assert(destLclNode->OperIs(GT_LCL_VAR)); LclVarDsc* destLclVar = lvaGetDesc(destLclNode); assert(varTypeIsStruct(destLclVar->TypeGet())); assert(destLclVar->lvPromoted); if (blockSize == 0) { JITDUMP(" size is zero or unknown.\n"); return nullptr; } if (destLclVar->IsAddressExposed() && destLclVar->lvContainsHoles) { JITDUMP(" dest is address exposed and contains holes.\n"); return nullptr; } if (destLclVar->lvCustomLayout && destLclVar->lvContainsHoles) { // TODO-1stClassStructs: there are no reasons for this pessimization, delete it. JITDUMP(" dest has custom layout and contains holes.\n"); return nullptr; } if (destLclVar->lvExactSize != blockSize) { JITDUMP(" dest size mismatch.\n"); return nullptr; } if (!initVal->OperIs(GT_CNS_INT)) { JITDUMP(" source is not constant.\n"); return nullptr; } const int64_t initPattern = (initVal->AsIntCon()->IconValue() & 0xFF) * 0x0101010101010101LL; if (initPattern != 0) { for (unsigned i = 0; i < destLclVar->lvFieldCnt; ++i) { LclVarDsc* fieldDesc = lvaGetDesc(destLclVar->lvFieldLclStart + i); if (varTypeIsSIMD(fieldDesc->TypeGet()) || varTypeIsGC(fieldDesc->TypeGet())) { // Cannot initialize GC or SIMD types with a non-zero constant. // The former is completly bogus. The later restriction could be // lifted by supporting non-zero SIMD constants or by generating // field initialization code that converts an integer constant to // the appropiate SIMD value. Unlikely to be very useful, though. JITDUMP(" dest contains GC and/or SIMD fields and source constant is not 0.\n"); return nullptr; } } } JITDUMP(" using field by field initialization.\n"); GenTree* tree = nullptr; for (unsigned i = 0; i < destLclVar->lvFieldCnt; ++i) { unsigned fieldLclNum = destLclVar->lvFieldLclStart + i; LclVarDsc* fieldDesc = lvaGetDesc(fieldLclNum); GenTree* dest = gtNewLclvNode(fieldLclNum, fieldDesc->TypeGet()); // If it had been labeled a "USEASG", assignments to the individual promoted fields are not. dest->gtFlags |= (destLclNode->gtFlags & ~(GTF_NODE_MASK | GTF_VAR_USEASG)); GenTree* src; switch (dest->TypeGet()) { case TYP_BOOL: case TYP_BYTE: case TYP_UBYTE: case TYP_SHORT: case TYP_USHORT: // Promoted fields are expected to be "normalize on load". If that changes then // we may need to adjust this code to widen the constant correctly. assert(fieldDesc->lvNormalizeOnLoad()); FALLTHROUGH; case TYP_INT: { int64_t mask = (int64_t(1) << (genTypeSize(dest->TypeGet()) * 8)) - 1; src = gtNewIconNode(static_cast<int32_t>(initPattern & mask)); break; } case TYP_LONG: src = gtNewLconNode(initPattern); break; case TYP_FLOAT: float floatPattern; memcpy(&floatPattern, &initPattern, sizeof(floatPattern)); src = gtNewDconNode(floatPattern, dest->TypeGet()); break; case TYP_DOUBLE: double doublePattern; memcpy(&doublePattern, &initPattern, sizeof(doublePattern)); src = gtNewDconNode(doublePattern, dest->TypeGet()); break; case TYP_REF: case TYP_BYREF: #ifdef FEATURE_SIMD case TYP_SIMD8: case TYP_SIMD12: case TYP_SIMD16: case TYP_SIMD32: #endif // FEATURE_SIMD assert(initPattern == 0); src = gtNewIconNode(0, dest->TypeGet()); break; default: unreached(); } GenTree* asg = gtNewAssignNode(dest, src); if (optLocalAssertionProp) { optAssertionGen(asg); } if (tree != nullptr) { tree = gtNewOperNode(GT_COMMA, TYP_VOID, tree, asg); } else { tree = asg; } } return tree; } //------------------------------------------------------------------------ // fgMorphGetStructAddr: Gets the address of a struct object // // Arguments: // pTree - the parent's pointer to the struct object node // clsHnd - the class handle for the struct type // isRValue - true if this is a source (not dest) // // Return Value: // Returns the address of the struct value, possibly modifying the existing tree to // sink the address below any comma nodes (this is to canonicalize for value numbering). // If this is a source, it will morph it to an GT_IND before taking its address, // since it may not be remorphed (and we don't want blk nodes as rvalues). GenTree* Compiler::fgMorphGetStructAddr(GenTree** pTree, CORINFO_CLASS_HANDLE clsHnd, bool isRValue) { GenTree* addr; GenTree* tree = *pTree; // If this is an indirection, we can return its op1, unless it's a GTF_IND_ARR_INDEX, in which case we // need to hang onto that for the purposes of value numbering. if (tree->OperIsIndir()) { if ((tree->gtFlags & GTF_IND_ARR_INDEX) == 0) { addr = tree->AsOp()->gtOp1; } else { if (isRValue && tree->OperIsBlk()) { tree->ChangeOper(GT_IND); } addr = gtNewOperNode(GT_ADDR, TYP_BYREF, tree); } } else if (tree->gtOper == GT_COMMA) { // If this is a comma, we're going to "sink" the GT_ADDR below it. (void)fgMorphGetStructAddr(&(tree->AsOp()->gtOp2), clsHnd, isRValue); tree->gtType = TYP_BYREF; addr = tree; } else { switch (tree->gtOper) { case GT_LCL_FLD: case GT_LCL_VAR: case GT_INDEX: case GT_FIELD: case GT_ARR_ELEM: addr = gtNewOperNode(GT_ADDR, TYP_BYREF, tree); break; case GT_INDEX_ADDR: addr = tree; break; default: { // TODO: Consider using lvaGrabTemp and gtNewTempAssign instead, since we're // not going to use "temp" GenTree* temp = fgInsertCommaFormTemp(pTree, clsHnd); unsigned lclNum = temp->gtEffectiveVal()->AsLclVar()->GetLclNum(); lvaSetVarDoNotEnregister(lclNum DEBUG_ARG(DoNotEnregisterReason::VMNeedsStackAddr)); addr = fgMorphGetStructAddr(pTree, clsHnd, isRValue); break; } } } *pTree = addr; return addr; } //------------------------------------------------------------------------ // fgMorphBlockOperand: Canonicalize an operand of a block assignment // // Arguments: // tree - The block operand // asgType - The type of the assignment // blockWidth - The size of the block // isBlkReqd - true iff this operand must remain a block node // // Return Value: // Returns the morphed block operand // // Notes: // This does the following: // - Ensures that a struct operand is a block node or lclVar. // - Ensures that any COMMAs are above ADDR nodes. // Although 'tree' WAS an operand of a block assignment, the assignment // may have been retyped to be a scalar assignment. GenTree* Compiler::fgMorphBlockOperand(GenTree* tree, var_types asgType, unsigned blockWidth, bool isBlkReqd) { GenTree* effectiveVal = tree->gtEffectiveVal(); if (asgType != TYP_STRUCT) { if (effectiveVal->OperIsIndir()) { if (!isBlkReqd) { GenTree* addr = effectiveVal->AsIndir()->Addr(); if ((addr->OperGet() == GT_ADDR) && (addr->gtGetOp1()->TypeGet() == asgType)) { effectiveVal = addr->gtGetOp1(); } else if (effectiveVal->OperIsBlk()) { effectiveVal->SetOper(GT_IND); } } effectiveVal->gtType = asgType; } else if (effectiveVal->TypeGet() != asgType) { if (effectiveVal->IsCall()) { #ifdef DEBUG GenTreeCall* call = effectiveVal->AsCall(); assert(call->TypeGet() == TYP_STRUCT); assert(blockWidth == info.compCompHnd->getClassSize(call->gtRetClsHnd)); #endif } else { GenTree* addr = gtNewOperNode(GT_ADDR, TYP_BYREF, effectiveVal); effectiveVal = gtNewIndir(asgType, addr); } } } else { GenTreeIndir* indirTree = nullptr; GenTreeLclVarCommon* lclNode = nullptr; bool needsIndirection = true; if (effectiveVal->OperIsIndir()) { indirTree = effectiveVal->AsIndir(); GenTree* addr = effectiveVal->AsIndir()->Addr(); if ((addr->OperGet() == GT_ADDR) && (addr->gtGetOp1()->OperGet() == GT_LCL_VAR)) { lclNode = addr->gtGetOp1()->AsLclVarCommon(); } } else if (effectiveVal->OperGet() == GT_LCL_VAR) { lclNode = effectiveVal->AsLclVarCommon(); } else if (effectiveVal->IsCall()) { needsIndirection = false; #ifdef DEBUG GenTreeCall* call = effectiveVal->AsCall(); assert(call->TypeGet() == TYP_STRUCT); assert(blockWidth == info.compCompHnd->getClassSize(call->gtRetClsHnd)); #endif } #ifdef TARGET_ARM64 else if (effectiveVal->OperIsHWIntrinsic()) { needsIndirection = false; #ifdef DEBUG GenTreeHWIntrinsic* intrinsic = effectiveVal->AsHWIntrinsic(); assert(intrinsic->TypeGet() == TYP_STRUCT); assert(HWIntrinsicInfo::IsMultiReg(intrinsic->GetHWIntrinsicId())); #endif } #endif // TARGET_ARM64 if (lclNode != nullptr) { const LclVarDsc* varDsc = lvaGetDesc(lclNode); if (varTypeIsStruct(varDsc) && (varDsc->lvExactSize == blockWidth) && (varDsc->lvType == asgType)) { if (effectiveVal != lclNode) { JITDUMP("Replacing block node [%06d] with lclVar V%02u\n", dspTreeID(tree), lclNode->GetLclNum()); effectiveVal = lclNode; } needsIndirection = false; } else { // This may be a lclVar that was determined to be address-exposed. effectiveVal->gtFlags |= (lclNode->gtFlags & GTF_ALL_EFFECT); } } if (needsIndirection) { if (indirTree != nullptr) { // If we have an indirection and a block is required, it should already be a block. assert(indirTree->OperIsBlk() || !isBlkReqd); effectiveVal->gtType = asgType; } else { GenTree* newTree; GenTree* addr = gtNewOperNode(GT_ADDR, TYP_BYREF, effectiveVal); if (isBlkReqd) { CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleIfPresent(effectiveVal); if (clsHnd == NO_CLASS_HANDLE) { newTree = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, addr, typGetBlkLayout(blockWidth)); } else { newTree = gtNewObjNode(clsHnd, addr); gtSetObjGcInfo(newTree->AsObj()); } } else { newTree = gtNewIndir(asgType, addr); } effectiveVal = newTree; } } } assert(effectiveVal->TypeIs(asgType) || (varTypeIsSIMD(asgType) && varTypeIsStruct(effectiveVal))); tree = effectiveVal; return tree; } //------------------------------------------------------------------------ // fgMorphCanUseLclFldForCopy: check if we can access LclVar2 using LclVar1's fields. // // Arguments: // lclNum1 - a promoted lclVar that is used in fieldwise assignment; // lclNum2 - the local variable on the other side of ASG, can be BAD_VAR_NUM. // // Return Value: // True if the second local is valid and has the same struct handle as the first, // false otherwise. // // Notes: // This check is needed to avoid accessing LCL_VARs with incorrect // CORINFO_FIELD_HANDLE that would confuse VN optimizations. // bool Compiler::fgMorphCanUseLclFldForCopy(unsigned lclNum1, unsigned lclNum2) { assert(lclNum1 != BAD_VAR_NUM); if (lclNum2 == BAD_VAR_NUM) { return false; } const LclVarDsc* varDsc1 = lvaGetDesc(lclNum1); const LclVarDsc* varDsc2 = lvaGetDesc(lclNum2); assert(varTypeIsStruct(varDsc1)); if (!varTypeIsStruct(varDsc2)) { return false; } CORINFO_CLASS_HANDLE struct1 = varDsc1->GetStructHnd(); CORINFO_CLASS_HANDLE struct2 = varDsc2->GetStructHnd(); assert(struct1 != NO_CLASS_HANDLE); assert(struct2 != NO_CLASS_HANDLE); if (struct1 != struct2) { return false; } return true; } // insert conversions and normalize to make tree amenable to register // FP architectures GenTree* Compiler::fgMorphForRegisterFP(GenTree* tree) { if (tree->OperIsArithmetic()) { if (varTypeIsFloating(tree)) { GenTree* op1 = tree->AsOp()->gtOp1; GenTree* op2 = tree->gtGetOp2(); assert(varTypeIsFloating(op1->TypeGet()) && varTypeIsFloating(op2->TypeGet())); if (op1->TypeGet() != tree->TypeGet()) { tree->AsOp()->gtOp1 = gtNewCastNode(tree->TypeGet(), op1, false, tree->TypeGet()); } if (op2->TypeGet() != tree->TypeGet()) { tree->AsOp()->gtOp2 = gtNewCastNode(tree->TypeGet(), op2, false, tree->TypeGet()); } } } else if (tree->OperIsCompare()) { GenTree* op1 = tree->AsOp()->gtOp1; if (varTypeIsFloating(op1)) { GenTree* op2 = tree->gtGetOp2(); assert(varTypeIsFloating(op2)); if (op1->TypeGet() != op2->TypeGet()) { // both had better be floating, just one bigger than other if (op1->TypeGet() == TYP_FLOAT) { assert(op2->TypeGet() == TYP_DOUBLE); tree->AsOp()->gtOp1 = gtNewCastNode(TYP_DOUBLE, op1, false, TYP_DOUBLE); } else if (op2->TypeGet() == TYP_FLOAT) { assert(op1->TypeGet() == TYP_DOUBLE); tree->AsOp()->gtOp2 = gtNewCastNode(TYP_DOUBLE, op2, false, TYP_DOUBLE); } } } } return tree; } #ifdef FEATURE_SIMD //-------------------------------------------------------------------------------------------------------------- // getSIMDStructFromField: // Checking whether the field belongs to a simd struct or not. If it is, return the GenTree* for // the struct node, also base type, field index and simd size. If it is not, just return nullptr. // Usually if the tree node is from a simd lclvar which is not used in any SIMD intrinsic, then we // should return nullptr, since in this case we should treat SIMD struct as a regular struct. // However if no matter what, you just want get simd struct node, you can set the ignoreUsedInSIMDIntrinsic // as true. Then there will be no IsUsedInSIMDIntrinsic checking, and it will return SIMD struct node // if the struct is a SIMD struct. // // Arguments: // tree - GentreePtr. This node will be checked to see this is a field which belongs to a simd // struct used for simd intrinsic or not. // simdBaseJitTypeOut - CorInfoType pointer, if the tree node is the tree we want, we set *simdBaseJitTypeOut // to simd lclvar's base JIT type. // indexOut - unsigned pointer, if the tree is used for simd intrinsic, we will set *indexOut // equals to the index number of this field. // simdSizeOut - unsigned pointer, if the tree is used for simd intrinsic, set the *simdSizeOut // equals to the simd struct size which this tree belongs to. // ignoreUsedInSIMDIntrinsic - bool. If this is set to true, then this function will ignore // the UsedInSIMDIntrinsic check. // // return value: // A GenTree* which points the simd lclvar tree belongs to. If the tree is not the simd // instrinic related field, return nullptr. // GenTree* Compiler::getSIMDStructFromField(GenTree* tree, CorInfoType* simdBaseJitTypeOut, unsigned* indexOut, unsigned* simdSizeOut, bool ignoreUsedInSIMDIntrinsic /*false*/) { GenTree* ret = nullptr; if (tree->OperGet() == GT_FIELD) { GenTree* objRef = tree->AsField()->GetFldObj(); if (objRef != nullptr) { GenTree* obj = nullptr; if (objRef->gtOper == GT_ADDR) { obj = objRef->AsOp()->gtOp1; } else if (ignoreUsedInSIMDIntrinsic) { obj = objRef; } else { return nullptr; } if (isSIMDTypeLocal(obj)) { LclVarDsc* varDsc = lvaGetDesc(obj->AsLclVarCommon()); if (varDsc->lvIsUsedInSIMDIntrinsic() || ignoreUsedInSIMDIntrinsic) { *simdSizeOut = varDsc->lvExactSize; *simdBaseJitTypeOut = getBaseJitTypeOfSIMDLocal(obj); ret = obj; } } else if (obj->OperGet() == GT_SIMD) { ret = obj; GenTreeSIMD* simdNode = obj->AsSIMD(); *simdSizeOut = simdNode->GetSimdSize(); *simdBaseJitTypeOut = simdNode->GetSimdBaseJitType(); } #ifdef FEATURE_HW_INTRINSICS else if (obj->OperIsHWIntrinsic()) { ret = obj; GenTreeHWIntrinsic* simdNode = obj->AsHWIntrinsic(); *simdSizeOut = simdNode->GetSimdSize(); *simdBaseJitTypeOut = simdNode->GetSimdBaseJitType(); } #endif // FEATURE_HW_INTRINSICS } } if (ret != nullptr) { var_types fieldType = tree->TypeGet(); if (fieldType == TYP_LONG) { // Vector2/3/4 expose public float fields while Vector<T> // and Vector64/128/256<T> have internal ulong fields. So // we should only ever encounter accesses for TYP_FLOAT or // TYP_LONG and in the case of the latter we don't want the // generic type since we are executing some algorithm on the // raw underlying bits instead. *simdBaseJitTypeOut = CORINFO_TYPE_ULONG; } else { assert(fieldType == TYP_FLOAT); } unsigned baseTypeSize = genTypeSize(JITtype2varType(*simdBaseJitTypeOut)); *indexOut = tree->AsField()->gtFldOffset / baseTypeSize; } return ret; } /***************************************************************************** * If a read operation tries to access simd struct field, then transform the operation * to the SimdGetElementNode, and return the new tree. Otherwise, return the old tree. * Argument: * tree - GenTree*. If this pointer points to simd struct which is used for simd * intrinsic, we will morph it as simd intrinsic NI_Vector128_GetElement. * Return: * A GenTree* which points to the new tree. If the tree is not for simd intrinsic, * return nullptr. */ GenTree* Compiler::fgMorphFieldToSimdGetElement(GenTree* tree) { unsigned index = 0; CorInfoType simdBaseJitType = CORINFO_TYPE_UNDEF; unsigned simdSize = 0; GenTree* simdStructNode = getSIMDStructFromField(tree, &simdBaseJitType, &index, &simdSize); if (simdStructNode != nullptr) { var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType); GenTree* op2 = gtNewIconNode(index, TYP_INT); assert(simdSize <= 16); assert(simdSize >= ((index + 1) * genTypeSize(simdBaseType))); tree = gtNewSimdGetElementNode(simdBaseType, simdStructNode, op2, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); } return tree; } /***************************************************************************** * Transform an assignment of a SIMD struct field to SimdWithElementNode, and * return a new tree. If it is not such an assignment, then return the old tree. * Argument: * tree - GenTree*. If this pointer points to simd struct which is used for simd * intrinsic, we will morph it as simd intrinsic set. * Return: * A GenTree* which points to the new tree. If the tree is not for simd intrinsic, * return nullptr. */ GenTree* Compiler::fgMorphFieldAssignToSimdSetElement(GenTree* tree) { assert(tree->OperGet() == GT_ASG); unsigned index = 0; CorInfoType simdBaseJitType = CORINFO_TYPE_UNDEF; unsigned simdSize = 0; GenTree* simdStructNode = getSIMDStructFromField(tree->gtGetOp1(), &simdBaseJitType, &index, &simdSize); if (simdStructNode != nullptr) { var_types simdType = simdStructNode->gtType; var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType); assert(simdSize <= 16); assert(simdSize >= ((index + 1) * genTypeSize(simdBaseType))); GenTree* op2 = gtNewIconNode(index, TYP_INT); GenTree* op3 = tree->gtGetOp2(); NamedIntrinsic intrinsicId = NI_Vector128_WithElement; GenTree* target = gtClone(simdStructNode); assert(target != nullptr); GenTree* simdTree = gtNewSimdWithElementNode(simdType, simdStructNode, op2, op3, simdBaseJitType, simdSize, /* isSimdAsHWIntrinsic */ true); tree->AsOp()->gtOp1 = target; tree->AsOp()->gtOp2 = simdTree; // fgMorphTree has already called fgMorphImplicitByRefArgs() on this assignment, but the source // and target have not yet been morphed. // Therefore, in case the source and/or target are now implicit byrefs, we need to call it again. if (fgMorphImplicitByRefArgs(tree)) { if (tree->gtGetOp1()->OperIsBlk()) { assert(tree->gtGetOp1()->TypeGet() == simdType); tree->gtGetOp1()->SetOper(GT_IND); tree->gtGetOp1()->gtType = simdType; } } #ifdef DEBUG tree->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED; #endif } return tree; } #endif // FEATURE_SIMD //------------------------------------------------------------------------------ // fgMorphCommutative : Try to simplify "(X op C1) op C2" to "X op C3" // for commutative operators. // // Arguments: // tree - node to fold // // return value: // A folded GenTree* instance or nullptr if something prevents folding. // GenTreeOp* Compiler::fgMorphCommutative(GenTreeOp* tree) { assert(varTypeIsIntegralOrI(tree->TypeGet())); assert(tree->OperIs(GT_ADD, GT_MUL, GT_OR, GT_AND, GT_XOR)); // op1 can be GT_COMMA, in this case we're going to fold // "(op (COMMA(... (op X C1))) C2)" to "(COMMA(... (op X C3)))" GenTree* op1 = tree->gtGetOp1()->gtEffectiveVal(true); genTreeOps oper = tree->OperGet(); if (!op1->OperIs(oper) || !tree->gtGetOp2()->IsCnsIntOrI() || !op1->gtGetOp2()->IsCnsIntOrI() || op1->gtGetOp1()->IsCnsIntOrI()) { return nullptr; } if (!fgGlobalMorph && (op1 != tree->gtGetOp1())) { // Since 'tree->gtGetOp1()' can have complex structure (e.g. COMMA(..(COMMA(..,op1))) // don't run the optimization for such trees outside of global morph. // Otherwise, there is a chance of violating VNs invariants and/or modifying a tree // that is an active CSE candidate. return nullptr; } if (gtIsActiveCSE_Candidate(tree) || gtIsActiveCSE_Candidate(op1)) { // The optimization removes 'tree' from IR and changes the value of 'op1'. return nullptr; } if (tree->OperMayOverflow() && (tree->gtOverflow() || op1->gtOverflow())) { return nullptr; } GenTreeIntCon* cns1 = op1->gtGetOp2()->AsIntCon(); GenTreeIntCon* cns2 = tree->gtGetOp2()->AsIntCon(); if (!varTypeIsIntegralOrI(tree->TypeGet()) || cns1->TypeIs(TYP_REF) || !cns1->TypeIs(cns2->TypeGet())) { return nullptr; } if (gtIsActiveCSE_Candidate(cns1) || gtIsActiveCSE_Candidate(cns2)) { // The optimization removes 'cns2' from IR and changes the value of 'cns1'. return nullptr; } GenTree* folded = gtFoldExprConst(gtNewOperNode(oper, cns1->TypeGet(), cns1, cns2)); if (!folded->IsCnsIntOrI()) { // Give up if we can't fold "C1 op C2" return nullptr; } auto foldedCns = folded->AsIntCon(); cns1->SetIconValue(foldedCns->IconValue()); cns1->SetVNsFromNode(foldedCns); cns1->gtFieldSeq = foldedCns->gtFieldSeq; op1 = tree->gtGetOp1(); op1->SetVNsFromNode(tree); DEBUG_DESTROY_NODE(tree); DEBUG_DESTROY_NODE(cns2); DEBUG_DESTROY_NODE(foldedCns); INDEBUG(cns1->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED); return op1->AsOp(); } //------------------------------------------------------------------------------ // fgMorphCastedBitwiseOp : Try to simplify "(T)x op (T)y" to "(T)(x op y)". // // Arguments: // tree - node to fold // // Return Value: // A folded GenTree* instance, or nullptr if it couldn't be folded GenTree* Compiler::fgMorphCastedBitwiseOp(GenTreeOp* tree) { // This transform does not preserve VNs and deletes a node. assert(fgGlobalMorph); assert(varTypeIsIntegralOrI(tree)); assert(tree->OperIs(GT_OR, GT_AND, GT_XOR)); GenTree* op1 = tree->gtGetOp1(); GenTree* op2 = tree->gtGetOp2(); genTreeOps oper = tree->OperGet(); // see whether both ops are casts, with matching to and from types. if (op1->OperIs(GT_CAST) && op2->OperIs(GT_CAST)) { // bail if either operand is a checked cast if (op1->gtOverflow() || op2->gtOverflow()) { return nullptr; } var_types fromType = op1->AsCast()->CastOp()->TypeGet(); var_types toType = op1->AsCast()->CastToType(); bool isUnsigned = op1->IsUnsigned(); if (varTypeIsFloating(fromType) || (op2->CastFromType() != fromType) || (op2->CastToType() != toType) || (op2->IsUnsigned() != isUnsigned)) { return nullptr; } /* // Reuse gentree nodes: // // tree op1 // / \ | // op1 op2 ==> tree // | | / \. // x y x y // // (op2 becomes garbage) */ tree->gtOp1 = op1->AsCast()->CastOp(); tree->gtOp2 = op2->AsCast()->CastOp(); tree->gtType = genActualType(fromType); op1->gtType = genActualType(toType); op1->AsCast()->gtOp1 = tree; op1->AsCast()->CastToType() = toType; op1->SetAllEffectsFlags(tree); // no need to update isUnsigned DEBUG_DESTROY_NODE(op2); INDEBUG(op1->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED); return op1; } return nullptr; } /***************************************************************************** * * Transform the given GTK_SMPOP tree for code generation. */ #ifdef _PREFAST_ #pragma warning(push) #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function #endif GenTree* Compiler::fgMorphSmpOp(GenTree* tree, MorphAddrContext* mac) { ALLOCA_CHECK(); assert(tree->OperKind() & GTK_SMPOP); /* The steps in this function are : o Perform required preorder processing o Process the first, then second operand, if any o Perform required postorder morphing o Perform optional postorder morphing if optimizing */ bool isQmarkColon = false; AssertionIndex origAssertionCount = DUMMY_INIT(0); AssertionDsc* origAssertionTab = DUMMY_INIT(NULL); AssertionIndex thenAssertionCount = DUMMY_INIT(0); AssertionDsc* thenAssertionTab = DUMMY_INIT(NULL); if (fgGlobalMorph) { tree = fgMorphForRegisterFP(tree); } genTreeOps oper = tree->OperGet(); var_types typ = tree->TypeGet(); GenTree* op1 = tree->AsOp()->gtOp1; GenTree* op2 = tree->gtGetOp2IfPresent(); /*------------------------------------------------------------------------- * First do any PRE-ORDER processing */ switch (oper) { // Some arithmetic operators need to use a helper call to the EE int helper; case GT_ASG: tree = fgDoNormalizeOnStore(tree); /* fgDoNormalizeOnStore can change op2 */ noway_assert(op1 == tree->AsOp()->gtOp1); op2 = tree->AsOp()->gtOp2; #ifdef FEATURE_SIMD if (IsBaselineSimdIsaSupported()) { // We should check whether op2 should be assigned to a SIMD field or not. // If it is, we should tranlate the tree to simd intrinsic. assert(!fgGlobalMorph || ((tree->gtDebugFlags & GTF_DEBUG_NODE_MORPHED) == 0)); GenTree* newTree = fgMorphFieldAssignToSimdSetElement(tree); typ = tree->TypeGet(); op1 = tree->gtGetOp1(); op2 = tree->gtGetOp2(); #ifdef DEBUG assert((tree == newTree) && (tree->OperGet() == oper)); if ((tree->gtDebugFlags & GTF_DEBUG_NODE_MORPHED) != 0) { tree->gtDebugFlags &= ~GTF_DEBUG_NODE_MORPHED; } #endif // DEBUG } #endif // We can't CSE the LHS of an assignment. Only r-values can be CSEed. // Previously, the "lhs" (addr) of a block op was CSE'd. So, to duplicate the former // behavior, allow CSE'ing if is a struct type (or a TYP_REF transformed from a struct type) // TODO-1stClassStructs: improve this. if (op1->IsLocal() || (op1->TypeGet() != TYP_STRUCT)) { op1->gtFlags |= GTF_DONT_CSE; } break; case GT_ADDR: /* op1 of a GT_ADDR is an l-value. Only r-values can be CSEed */ op1->gtFlags |= GTF_DONT_CSE; break; case GT_QMARK: case GT_JTRUE: noway_assert(op1); if (op1->OperIsCompare()) { /* Mark the comparison node with GTF_RELOP_JMP_USED so it knows that it does not need to materialize the result as a 0 or 1. */ /* We also mark it as DONT_CSE, as we don't handle QMARKs with nonRELOP op1s */ op1->gtFlags |= (GTF_RELOP_JMP_USED | GTF_DONT_CSE); // Request that the codegen for op1 sets the condition flags // when it generates the code for op1. // // Codegen for op1 must set the condition flags if // this method returns true. // op1->gtRequestSetFlags(); } else { GenTree* effOp1 = op1->gtEffectiveVal(); noway_assert((effOp1->gtOper == GT_CNS_INT) && (effOp1->IsIntegralConst(0) || effOp1->IsIntegralConst(1))); } break; case GT_COLON: if (optLocalAssertionProp) { isQmarkColon = true; } break; case GT_FIELD: return fgMorphField(tree, mac); case GT_INDEX: return fgMorphArrayIndex(tree); case GT_CAST: { GenTree* morphedCast = fgMorphExpandCast(tree->AsCast()); if (morphedCast != nullptr) { return morphedCast; } op1 = tree->AsCast()->CastOp(); } break; case GT_MUL: noway_assert(op2 != nullptr); if (opts.OptimizationEnabled() && !optValnumCSE_phase && !tree->gtOverflow()) { // MUL(NEG(a), C) => MUL(a, NEG(C)) if (op1->OperIs(GT_NEG) && !op1->gtGetOp1()->IsCnsIntOrI() && op2->IsCnsIntOrI() && !op2->IsIconHandle()) { GenTree* newOp1 = op1->gtGetOp1(); GenTree* newConst = gtNewIconNode(-op2->AsIntCon()->IconValue(), op2->TypeGet()); DEBUG_DESTROY_NODE(op1); DEBUG_DESTROY_NODE(op2); tree->AsOp()->gtOp1 = newOp1; tree->AsOp()->gtOp2 = newConst; return fgMorphSmpOp(tree, mac); } } #ifndef TARGET_64BIT if (typ == TYP_LONG) { // For (long)int1 * (long)int2, we dont actually do the // casts, and just multiply the 32 bit values, which will // give us the 64 bit result in edx:eax. if (tree->Is64RsltMul()) { // We are seeing this node again. // Morph only the children of casts, // so as to avoid losing them. tree = fgMorphLongMul(tree->AsOp()); goto DONE_MORPHING_CHILDREN; } tree = fgRecognizeAndMorphLongMul(tree->AsOp()); op1 = tree->AsOp()->gtGetOp1(); op2 = tree->AsOp()->gtGetOp2(); if (tree->Is64RsltMul()) { goto DONE_MORPHING_CHILDREN; } else { if (tree->gtOverflow()) helper = tree->IsUnsigned() ? CORINFO_HELP_ULMUL_OVF : CORINFO_HELP_LMUL_OVF; else helper = CORINFO_HELP_LMUL; goto USE_HELPER_FOR_ARITH; } } #endif // !TARGET_64BIT break; case GT_ARR_LENGTH: if (op1->OperIs(GT_CNS_STR)) { // Optimize `ldstr + String::get_Length()` to CNS_INT // e.g. "Hello".Length => 5 GenTreeIntCon* iconNode = gtNewStringLiteralLength(op1->AsStrCon()); if (iconNode != nullptr) { INDEBUG(iconNode->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED); return iconNode; } } break; case GT_DIV: // Replace "val / dcon" with "val * (1.0 / dcon)" if dcon is a power of two. // Powers of two within range are always exactly represented, // so multiplication by the reciprocal is safe in this scenario if (fgGlobalMorph && op2->IsCnsFltOrDbl()) { double divisor = op2->AsDblCon()->gtDconVal; if (((typ == TYP_DOUBLE) && FloatingPointUtils::hasPreciseReciprocal(divisor)) || ((typ == TYP_FLOAT) && FloatingPointUtils::hasPreciseReciprocal(forceCastToFloat(divisor)))) { oper = GT_MUL; tree->ChangeOper(oper); op2->AsDblCon()->gtDconVal = 1.0 / divisor; } } // Convert DIV to UDIV if boths op1 and op2 are known to be never negative if (!gtIsActiveCSE_Candidate(tree) && varTypeIsIntegral(tree) && op1->IsNeverNegative(this) && op2->IsNeverNegative(this)) { assert(tree->OperIs(GT_DIV)); tree->ChangeOper(GT_UDIV, GenTree::PRESERVE_VN); return fgMorphSmpOp(tree, mac); } #ifndef TARGET_64BIT if (typ == TYP_LONG) { helper = CORINFO_HELP_LDIV; goto USE_HELPER_FOR_ARITH; } #if USE_HELPERS_FOR_INT_DIV if (typ == TYP_INT) { helper = CORINFO_HELP_DIV; goto USE_HELPER_FOR_ARITH; } #endif #endif // !TARGET_64BIT break; case GT_UDIV: #ifndef TARGET_64BIT if (typ == TYP_LONG) { helper = CORINFO_HELP_ULDIV; goto USE_HELPER_FOR_ARITH; } #if USE_HELPERS_FOR_INT_DIV if (typ == TYP_INT) { helper = CORINFO_HELP_UDIV; goto USE_HELPER_FOR_ARITH; } #endif #endif // TARGET_64BIT break; case GT_MOD: if (varTypeIsFloating(typ)) { helper = CORINFO_HELP_DBLREM; noway_assert(op2); if (op1->TypeGet() == TYP_FLOAT) { if (op2->TypeGet() == TYP_FLOAT) { helper = CORINFO_HELP_FLTREM; } else { tree->AsOp()->gtOp1 = op1 = gtNewCastNode(TYP_DOUBLE, op1, false, TYP_DOUBLE); } } else if (op2->TypeGet() == TYP_FLOAT) { tree->AsOp()->gtOp2 = op2 = gtNewCastNode(TYP_DOUBLE, op2, false, TYP_DOUBLE); } goto USE_HELPER_FOR_ARITH; } // Convert MOD to UMOD if boths op1 and op2 are known to be never negative if (!gtIsActiveCSE_Candidate(tree) && varTypeIsIntegral(tree) && op1->IsNeverNegative(this) && op2->IsNeverNegative(this)) { assert(tree->OperIs(GT_MOD)); tree->ChangeOper(GT_UMOD, GenTree::PRESERVE_VN); return fgMorphSmpOp(tree, mac); } // Do not use optimizations (unlike UMOD's idiv optimizing during codegen) for signed mod. // A similar optimization for signed mod will not work for a negative perfectly divisible // HI-word. To make it correct, we would need to divide without the sign and then flip the // result sign after mod. This requires 18 opcodes + flow making it not worthy to inline. goto ASSIGN_HELPER_FOR_MOD; case GT_UMOD: #ifdef TARGET_ARMARCH // // Note for TARGET_ARMARCH we don't have a remainder instruction, so we don't do this optimization // #else // TARGET_XARCH // If this is an unsigned long mod with a constant divisor, // then don't morph to a helper call - it can be done faster inline using idiv. noway_assert(op2); if ((typ == TYP_LONG) && opts.OptEnabled(CLFLG_CONSTANTFOLD)) { if (op2->OperIs(GT_CNS_NATIVELONG) && op2->AsIntConCommon()->LngValue() >= 2 && op2->AsIntConCommon()->LngValue() <= 0x3fffffff) { tree->AsOp()->gtOp1 = op1 = fgMorphTree(op1); noway_assert(op1->TypeIs(TYP_LONG)); // Update flags for op1 morph. tree->gtFlags &= ~GTF_ALL_EFFECT; // Only update with op1 as op2 is a constant. tree->gtFlags |= (op1->gtFlags & GTF_ALL_EFFECT); // If op1 is a constant, then do constant folding of the division operator. if (op1->OperIs(GT_CNS_NATIVELONG)) { tree = gtFoldExpr(tree); } if (!tree->OperIsConst()) { tree->AsOp()->CheckDivideByConstOptimized(this); } return tree; } } #endif // TARGET_XARCH ASSIGN_HELPER_FOR_MOD: // For "val % 1", return 0 if op1 doesn't have any side effects // and we are not in the CSE phase, we cannot discard 'tree' // because it may contain CSE expressions that we haven't yet examined. // if (((op1->gtFlags & GTF_SIDE_EFFECT) == 0) && !optValnumCSE_phase) { if (op2->IsIntegralConst(1)) { GenTree* zeroNode = gtNewZeroConNode(typ); #ifdef DEBUG zeroNode->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED; #endif DEBUG_DESTROY_NODE(tree); return zeroNode; } } #ifndef TARGET_64BIT if (typ == TYP_LONG) { helper = (oper == GT_UMOD) ? CORINFO_HELP_ULMOD : CORINFO_HELP_LMOD; goto USE_HELPER_FOR_ARITH; } #if USE_HELPERS_FOR_INT_DIV if (typ == TYP_INT) { if (oper == GT_UMOD) { helper = CORINFO_HELP_UMOD; goto USE_HELPER_FOR_ARITH; } else if (oper == GT_MOD) { helper = CORINFO_HELP_MOD; goto USE_HELPER_FOR_ARITH; } } #endif #endif // !TARGET_64BIT #ifdef TARGET_ARM64 // For ARM64 we don't have a remainder instruction, // The architecture manual suggests the following transformation to // generate code for such operator: // // a % b = a - (a / b) * b; // // TODO: there are special cases where it can be done better, for example // when the modulo operation is unsigned and the divisor is a // integer constant power of two. In this case, we can make the transform: // // a % b = a & (b - 1); // // Lower supports it for all cases except when `a` is constant, but // in Morph we can't guarantee that `a` won't be transformed into a constant, // so can't guarantee that lower will be able to do this optimization. { // Do "a % b = a - (a / b) * b" morph always, see TODO before this block. bool doMorphModToSubMulDiv = true; if (doMorphModToSubMulDiv) { assert(!optValnumCSE_phase); tree = fgMorphModToSubMulDiv(tree->AsOp()); op1 = tree->AsOp()->gtOp1; op2 = tree->AsOp()->gtOp2; } } #else // !TARGET_ARM64 // If b is not a power of 2 constant then lowering replaces a % b // with a - (a / b) * b and applies magic division optimization to // a / b. The code may already contain an a / b expression (e.g. // x = a / 10; y = a % 10;) and then we end up with redundant code. // If we convert % to / here we give CSE the opportunity to eliminate // the redundant division. If there's no redundant division then // nothing is lost, lowering would have done this transform anyway. if (!optValnumCSE_phase && ((tree->OperGet() == GT_MOD) && op2->IsIntegralConst())) { ssize_t divisorValue = op2->AsIntCon()->IconValue(); size_t absDivisorValue = (divisorValue == SSIZE_T_MIN) ? static_cast<size_t>(divisorValue) : static_cast<size_t>(abs(divisorValue)); if (!isPow2(absDivisorValue)) { tree = fgMorphModToSubMulDiv(tree->AsOp()); op1 = tree->AsOp()->gtOp1; op2 = tree->AsOp()->gtOp2; } } #endif // !TARGET_ARM64 break; USE_HELPER_FOR_ARITH: { // TODO: this comment is wrong now, do an appropriate fix. /* We have to morph these arithmetic operations into helper calls before morphing the arguments (preorder), else the arguments won't get correct values of fgPtrArgCntCur. However, try to fold the tree first in case we end up with a simple node which won't need a helper call at all */ noway_assert(tree->OperIsBinary()); GenTree* oldTree = tree; tree = gtFoldExpr(tree); // Were we able to fold it ? // Note that gtFoldExpr may return a non-leaf even if successful // e.g. for something like "expr / 1" - see also bug #290853 if (tree->OperIsLeaf() || (oldTree != tree)) { return (oldTree != tree) ? fgMorphTree(tree) : fgMorphLeaf(tree); } // Did we fold it into a comma node with throw? if (tree->gtOper == GT_COMMA) { noway_assert(fgIsCommaThrow(tree)); return fgMorphTree(tree); } } return fgMorphIntoHelperCall(tree, helper, gtNewCallArgs(op1, op2)); case GT_RETURN: if (!tree->TypeIs(TYP_VOID)) { if (op1->OperIs(GT_OBJ, GT_BLK, GT_IND)) { op1 = fgMorphRetInd(tree->AsUnOp()); } if (op1->OperIs(GT_LCL_VAR)) { // With a `genReturnBB` this `RETURN(src)` tree will be replaced by a `ASG(genReturnLocal, src)` // and `ASG` will be tranformed into field by field copy without parent local referencing if // possible. GenTreeLclVar* lclVar = op1->AsLclVar(); unsigned lclNum = lclVar->GetLclNum(); if ((genReturnLocal == BAD_VAR_NUM) || (genReturnLocal == lclNum)) { LclVarDsc* varDsc = lvaGetDesc(lclVar); if (varDsc->CanBeReplacedWithItsField(this)) { // We can replace the struct with its only field and allow copy propagation to replace // return value that was written as a field. unsigned fieldLclNum = varDsc->lvFieldLclStart; LclVarDsc* fieldDsc = lvaGetDesc(fieldLclNum); JITDUMP("Replacing an independently promoted local var V%02u with its only field " "V%02u for " "the return [%06u]\n", lclVar->GetLclNum(), fieldLclNum, dspTreeID(tree)); lclVar->SetLclNum(fieldLclNum); lclVar->ChangeType(fieldDsc->lvType); } } } } // normalize small integer return values if (fgGlobalMorph && varTypeIsSmall(info.compRetType) && (op1 != nullptr) && !op1->TypeIs(TYP_VOID) && fgCastNeeded(op1, info.compRetType)) { // Small-typed return values are normalized by the callee op1 = gtNewCastNode(TYP_INT, op1, false, info.compRetType); // Propagate GTF_COLON_COND op1->gtFlags |= (tree->gtFlags & GTF_COLON_COND); tree->AsOp()->gtOp1 = fgMorphTree(op1); // Propagate side effect flags tree->SetAllEffectsFlags(tree->AsOp()->gtGetOp1()); return tree; } break; case GT_EQ: case GT_NE: { GenTree* optimizedTree = gtFoldTypeCompare(tree); if (optimizedTree != tree) { return fgMorphTree(optimizedTree); } // Pattern-matching optimization: // (a % c) ==/!= 0 // for power-of-2 constant `c` // => // a & (c - 1) ==/!= 0 // For integer `a`, even if negative. if (opts.OptimizationEnabled() && !optValnumCSE_phase) { assert(tree->OperIs(GT_EQ, GT_NE)); if (op1->OperIs(GT_MOD) && varTypeIsIntegral(op1) && op2->IsIntegralConst(0)) { GenTree* op1op2 = op1->AsOp()->gtOp2; if (op1op2->IsCnsIntOrI()) { const ssize_t modValue = op1op2->AsIntCon()->IconValue(); if (isPow2(modValue)) { JITDUMP("\nTransforming:\n"); DISPTREE(tree); op1->SetOper(GT_AND); // Change % => & op1op2->AsIntConCommon()->SetIconValue(modValue - 1); // Change c => c - 1 fgUpdateConstTreeValueNumber(op1op2); JITDUMP("\ninto:\n"); DISPTREE(tree); } } } } } FALLTHROUGH; case GT_GT: { // Try and optimize nullable boxes feeding compares GenTree* optimizedTree = gtFoldBoxNullable(tree); if (optimizedTree->OperGet() != tree->OperGet()) { return optimizedTree; } else { tree = optimizedTree; } op1 = tree->AsOp()->gtOp1; op2 = tree->gtGetOp2IfPresent(); break; } case GT_RUNTIMELOOKUP: return fgMorphTree(op1); #ifdef TARGET_ARM case GT_INTRINSIC: if (tree->AsIntrinsic()->gtIntrinsicName == NI_System_Math_Round) { switch (tree->TypeGet()) { case TYP_DOUBLE: return fgMorphIntoHelperCall(tree, CORINFO_HELP_DBLROUND, gtNewCallArgs(op1)); case TYP_FLOAT: return fgMorphIntoHelperCall(tree, CORINFO_HELP_FLTROUND, gtNewCallArgs(op1)); default: unreached(); } } break; #endif case GT_PUTARG_TYPE: return fgMorphTree(tree->AsUnOp()->gtGetOp1()); case GT_NULLCHECK: { op1 = tree->AsUnOp()->gtGetOp1(); if (op1->IsCall()) { GenTreeCall* const call = op1->AsCall(); if (call->IsHelperCall() && s_helperCallProperties.NonNullReturn(eeGetHelperNum(call->gtCallMethHnd))) { JITDUMP("\nNULLCHECK on [%06u] will always succeed\n", dspTreeID(call)); // TODO: Can we also remove the call? // return fgMorphTree(call); } } } break; default: break; } if (opts.OptimizationEnabled() && fgGlobalMorph) { GenTree* morphed = fgMorphReduceAddOps(tree); if (morphed != tree) return fgMorphTree(morphed); } /*------------------------------------------------------------------------- * Process the first operand, if any */ if (op1) { // If we are entering the "then" part of a Qmark-Colon we must // save the state of the current copy assignment table // so that we can restore this state when entering the "else" part if (isQmarkColon) { noway_assert(optLocalAssertionProp); if (optAssertionCount) { noway_assert(optAssertionCount <= optMaxAssertionCount); // else ALLOCA() is a bad idea unsigned tabSize = optAssertionCount * sizeof(AssertionDsc); origAssertionTab = (AssertionDsc*)ALLOCA(tabSize); origAssertionCount = optAssertionCount; memcpy(origAssertionTab, optAssertionTabPrivate, tabSize); } else { origAssertionCount = 0; origAssertionTab = nullptr; } } // We might need a new MorphAddressContext context. (These are used to convey // parent context about how addresses being calculated will be used; see the // specification comment for MorphAddrContext for full details.) // Assume it's an Ind context to start. MorphAddrContext subIndMac1(MACK_Ind); MorphAddrContext* subMac1 = mac; if (subMac1 == nullptr || subMac1->m_kind == MACK_Ind) { switch (tree->gtOper) { case GT_ADDR: // A non-null mac here implies this node is part of an address computation. // If so, we need to pass the existing mac down to the child node. // // Otherwise, use a new mac. if (subMac1 == nullptr) { subMac1 = &subIndMac1; subMac1->m_kind = MACK_Addr; } break; case GT_COMMA: // In a comma, the incoming context only applies to the rightmost arg of the // comma list. The left arg (op1) gets a fresh context. subMac1 = nullptr; break; case GT_OBJ: case GT_BLK: case GT_IND: // A non-null mac here implies this node is part of an address computation (the tree parent is // GT_ADDR). // If so, we need to pass the existing mac down to the child node. // // Otherwise, use a new mac. if (subMac1 == nullptr) { subMac1 = &subIndMac1; } break; default: break; } } // For additions, if we're in an IND context keep track of whether // all offsets added to the address are constant, and their sum. if (tree->gtOper == GT_ADD && subMac1 != nullptr) { assert(subMac1->m_kind == MACK_Ind || subMac1->m_kind == MACK_Addr); // Can't be a CopyBlock. GenTree* otherOp = tree->AsOp()->gtOp2; // Is the other operator a constant? if (otherOp->IsCnsIntOrI()) { ClrSafeInt<size_t> totalOffset(subMac1->m_totalOffset); totalOffset += otherOp->AsIntConCommon()->IconValue(); if (totalOffset.IsOverflow()) { // We will consider an offset so large as to overflow as "not a constant" -- // we will do a null check. subMac1->m_allConstantOffsets = false; } else { subMac1->m_totalOffset += otherOp->AsIntConCommon()->IconValue(); } } else { subMac1->m_allConstantOffsets = false; } } // If op1 is a GT_FIELD or indir, we need to pass down the mac if // its parent is GT_ADDR, since the address of op1 // is part of an ongoing address computation. Otherwise // op1 represents the value of the field and so any address // calculations it does are in a new context. if (((op1->gtOper == GT_FIELD) || op1->OperIsIndir()) && (tree->gtOper != GT_ADDR)) { subMac1 = nullptr; // The impact of op1's value to any ongoing // address computation is handled below when looking // at op2. } tree->AsOp()->gtOp1 = op1 = fgMorphTree(op1, subMac1); // If we are exiting the "then" part of a Qmark-Colon we must // save the state of the current copy assignment table // so that we can merge this state with the "else" part exit if (isQmarkColon) { noway_assert(optLocalAssertionProp); if (optAssertionCount) { noway_assert(optAssertionCount <= optMaxAssertionCount); // else ALLOCA() is a bad idea unsigned tabSize = optAssertionCount * sizeof(AssertionDsc); thenAssertionTab = (AssertionDsc*)ALLOCA(tabSize); thenAssertionCount = optAssertionCount; memcpy(thenAssertionTab, optAssertionTabPrivate, tabSize); } else { thenAssertionCount = 0; thenAssertionTab = nullptr; } } /* Morphing along with folding and inlining may have changed the * side effect flags, so we have to reset them * * NOTE: Don't reset the exception flags on nodes that may throw */ assert(tree->gtOper != GT_CALL); if (!tree->OperRequiresCallFlag(this)) { tree->gtFlags &= ~GTF_CALL; } /* Propagate the new flags */ tree->gtFlags |= (op1->gtFlags & GTF_ALL_EFFECT); // &aliasedVar doesn't need GTF_GLOB_REF, though alisasedVar does // Similarly for clsVar if (oper == GT_ADDR && (op1->gtOper == GT_LCL_VAR || op1->gtOper == GT_CLS_VAR)) { tree->gtFlags &= ~GTF_GLOB_REF; } } // if (op1) /*------------------------------------------------------------------------- * Process the second operand, if any */ if (op2) { // If we are entering the "else" part of a Qmark-Colon we must // reset the state of the current copy assignment table if (isQmarkColon) { noway_assert(optLocalAssertionProp); optAssertionReset(0); if (origAssertionCount) { size_t tabSize = origAssertionCount * sizeof(AssertionDsc); memcpy(optAssertionTabPrivate, origAssertionTab, tabSize); optAssertionReset(origAssertionCount); } } // We might need a new MorphAddressContext context to use in evaluating op2. // (These are used to convey parent context about how addresses being calculated // will be used; see the specification comment for MorphAddrContext for full details.) // Assume it's an Ind context to start. switch (tree->gtOper) { case GT_ADD: if (mac != nullptr && mac->m_kind == MACK_Ind) { GenTree* otherOp = tree->AsOp()->gtOp1; // Is the other operator a constant? if (otherOp->IsCnsIntOrI()) { mac->m_totalOffset += otherOp->AsIntConCommon()->IconValue(); } else { mac->m_allConstantOffsets = false; } } break; default: break; } // If op2 is a GT_FIELD or indir, we must be taking its value, // so it should evaluate its address in a new context. if ((op2->gtOper == GT_FIELD) || op2->OperIsIndir()) { // The impact of op2's value to any ongoing // address computation is handled above when looking // at op1. mac = nullptr; } tree->AsOp()->gtOp2 = op2 = fgMorphTree(op2, mac); /* Propagate the side effect flags from op2 */ tree->gtFlags |= (op2->gtFlags & GTF_ALL_EFFECT); // If we are exiting the "else" part of a Qmark-Colon we must // merge the state of the current copy assignment table with // that of the exit of the "then" part. if (isQmarkColon) { noway_assert(optLocalAssertionProp); // If either exit table has zero entries then // the merged table also has zero entries if (optAssertionCount == 0 || thenAssertionCount == 0) { optAssertionReset(0); } else { size_t tabSize = optAssertionCount * sizeof(AssertionDsc); if ((optAssertionCount != thenAssertionCount) || (memcmp(thenAssertionTab, optAssertionTabPrivate, tabSize) != 0)) { // Yes they are different so we have to find the merged set // Iterate over the copy asgn table removing any entries // that do not have an exact match in the thenAssertionTab AssertionIndex index = 1; while (index <= optAssertionCount) { AssertionDsc* curAssertion = optGetAssertion(index); for (unsigned j = 0; j < thenAssertionCount; j++) { AssertionDsc* thenAssertion = &thenAssertionTab[j]; // Do the left sides match? if ((curAssertion->op1.lcl.lclNum == thenAssertion->op1.lcl.lclNum) && (curAssertion->assertionKind == thenAssertion->assertionKind)) { // Do the right sides match? if ((curAssertion->op2.kind == thenAssertion->op2.kind) && (curAssertion->op2.lconVal == thenAssertion->op2.lconVal)) { goto KEEP; } else { goto REMOVE; } } } // // If we fall out of the loop above then we didn't find // any matching entry in the thenAssertionTab so it must // have been killed on that path so we remove it here // REMOVE: // The data at optAssertionTabPrivate[i] is to be removed CLANG_FORMAT_COMMENT_ANCHOR; #ifdef DEBUG if (verbose) { printf("The QMARK-COLON "); printTreeID(tree); printf(" removes assertion candidate #%d\n", index); } #endif optAssertionRemove(index); continue; KEEP: // The data at optAssertionTabPrivate[i] is to be kept index++; } } } } } // if (op2) #ifndef TARGET_64BIT DONE_MORPHING_CHILDREN: #endif // !TARGET_64BIT if (tree->OperIsIndirOrArrLength()) { tree->SetIndirExceptionFlags(this); } else { if (tree->OperMayThrow(this)) { // Mark the tree node as potentially throwing an exception tree->gtFlags |= GTF_EXCEPT; } else { if (((op1 == nullptr) || ((op1->gtFlags & GTF_EXCEPT) == 0)) && ((op2 == nullptr) || ((op2->gtFlags & GTF_EXCEPT) == 0))) { tree->gtFlags &= ~GTF_EXCEPT; } } } if (tree->OperRequiresAsgFlag()) { tree->gtFlags |= GTF_ASG; } else { if (((op1 == nullptr) || ((op1->gtFlags & GTF_ASG) == 0)) && ((op2 == nullptr) || ((op2->gtFlags & GTF_ASG) == 0))) { tree->gtFlags &= ~GTF_ASG; } } if (tree->OperRequiresCallFlag(this)) { tree->gtFlags |= GTF_CALL; } else { if (((op1 == nullptr) || ((op1->gtFlags & GTF_CALL) == 0)) && ((op2 == nullptr) || ((op2->gtFlags & GTF_CALL) == 0))) { tree->gtFlags &= ~GTF_CALL; } } /*------------------------------------------------------------------------- * Now do POST-ORDER processing */ if (varTypeIsGC(tree->TypeGet()) && (op1 && !varTypeIsGC(op1->TypeGet())) && (op2 && !varTypeIsGC(op2->TypeGet()))) { // The tree is really not GC but was marked as such. Now that the // children have been unmarked, unmark the tree too. // Remember that GT_COMMA inherits it's type only from op2 if (tree->gtOper == GT_COMMA) { tree->gtType = genActualType(op2->TypeGet()); } else { tree->gtType = genActualType(op1->TypeGet()); } } GenTree* oldTree = tree; GenTree* qmarkOp1 = nullptr; GenTree* qmarkOp2 = nullptr; if ((tree->OperGet() == GT_QMARK) && (tree->AsOp()->gtOp2->OperGet() == GT_COLON)) { qmarkOp1 = oldTree->AsOp()->gtOp2->AsOp()->gtOp1; qmarkOp2 = oldTree->AsOp()->gtOp2->AsOp()->gtOp2; } // Try to fold it, maybe we get lucky, tree = gtFoldExpr(tree); if (oldTree != tree) { /* if gtFoldExpr returned op1 or op2 then we are done */ if ((tree == op1) || (tree == op2) || (tree == qmarkOp1) || (tree == qmarkOp2)) { return tree; } /* If we created a comma-throw tree then we need to morph op1 */ if (fgIsCommaThrow(tree)) { tree->AsOp()->gtOp1 = fgMorphTree(tree->AsOp()->gtOp1); fgMorphTreeDone(tree); return tree; } return tree; } else if (tree->OperIsConst()) { return tree; } /* gtFoldExpr could have used setOper to change the oper */ oper = tree->OperGet(); typ = tree->TypeGet(); /* gtFoldExpr could have changed op1 and op2 */ op1 = tree->AsOp()->gtOp1; op2 = tree->gtGetOp2IfPresent(); // Do we have an integer compare operation? // if (tree->OperIsCompare() && varTypeIsIntegralOrI(tree->TypeGet())) { // Are we comparing against zero? // if (op2->IsIntegralConst(0)) { // Request that the codegen for op1 sets the condition flags // when it generates the code for op1. // // Codegen for op1 must set the condition flags if // this method returns true. // op1->gtRequestSetFlags(); } } /*------------------------------------------------------------------------- * Perform the required oper-specific postorder morphing */ GenTree* temp; size_t ival1; GenTree* lclVarTree; GenTree* effectiveOp1; FieldSeqNode* fieldSeq = nullptr; switch (oper) { case GT_ASG: if (op1->OperIs(GT_LCL_VAR) && ((op1->gtFlags & GTF_VAR_FOLDED_IND) != 0)) { op1->gtFlags &= ~GTF_VAR_FOLDED_IND; tree = fgDoNormalizeOnStore(tree); op2 = tree->gtGetOp2(); } lclVarTree = fgIsIndirOfAddrOfLocal(op1); if (lclVarTree != nullptr) { lclVarTree->gtFlags |= GTF_VAR_DEF; } effectiveOp1 = op1->gtEffectiveVal(); // If we are storing a small type, we might be able to omit a cast. if (effectiveOp1->OperIs(GT_IND, GT_CLS_VAR) && varTypeIsSmall(effectiveOp1)) { if (!gtIsActiveCSE_Candidate(op2) && op2->OperIs(GT_CAST) && varTypeIsIntegral(op2->AsCast()->CastOp()) && !op2->gtOverflow()) { var_types castType = op2->CastToType(); // If we are performing a narrowing cast and // castType is larger or the same as op1's type // then we can discard the cast. if (varTypeIsSmall(castType) && (genTypeSize(castType) >= genTypeSize(effectiveOp1))) { tree->AsOp()->gtOp2 = op2 = op2->AsCast()->CastOp(); } } } fgAssignSetVarDef(tree); /* We can't CSE the LHS of an assignment */ /* We also must set in the pre-morphing phase, otherwise assertionProp doesn't see it */ if (op1->IsLocal() || (op1->TypeGet() != TYP_STRUCT)) { op1->gtFlags |= GTF_DONT_CSE; } break; case GT_CAST: tree = fgOptimizeCast(tree->AsCast()); if (!tree->OperIsSimple()) { return tree; } if (tree->OperIs(GT_CAST) && tree->gtOverflow()) { fgAddCodeRef(compCurBB, bbThrowIndex(compCurBB), SCK_OVERFLOW); } typ = tree->TypeGet(); oper = tree->OperGet(); op1 = tree->AsOp()->gtGetOp1(); op2 = tree->gtGetOp2IfPresent(); break; case GT_EQ: case GT_NE: // It is not safe to reorder/delete CSE's if (!optValnumCSE_phase && op2->IsIntegralConst()) { tree = fgOptimizeEqualityComparisonWithConst(tree->AsOp()); assert(tree->OperIsCompare()); oper = tree->OperGet(); op1 = tree->gtGetOp1(); op2 = tree->gtGetOp2(); } goto COMPARE; case GT_LT: case GT_LE: case GT_GE: case GT_GT: if (!optValnumCSE_phase && (op1->OperIs(GT_CAST) || op2->OperIs(GT_CAST))) { tree = fgOptimizeRelationalComparisonWithCasts(tree->AsOp()); oper = tree->OperGet(); op1 = tree->gtGetOp1(); op2 = tree->gtGetOp2(); } // op2's value may be changed, so it cannot be a CSE candidate. if (op2->IsIntegralConst() && !gtIsActiveCSE_Candidate(op2)) { tree = fgOptimizeRelationalComparisonWithConst(tree->AsOp()); oper = tree->OperGet(); assert(op1 == tree->AsOp()->gtGetOp1()); assert(op2 == tree->AsOp()->gtGetOp2()); } COMPARE: noway_assert(tree->OperIsCompare()); break; case GT_MUL: #ifndef TARGET_64BIT if (typ == TYP_LONG) { // This must be GTF_MUL_64RSLT INDEBUG(tree->AsOp()->DebugCheckLongMul()); return tree; } #endif // TARGET_64BIT goto CM_OVF_OP; case GT_SUB: if (tree->gtOverflow()) { goto CM_OVF_OP; } // TODO #4104: there are a lot of other places where // this condition is not checked before transformations. if (fgGlobalMorph) { /* Check for "op1 - cns2" , we change it to "op1 + (-cns2)" */ noway_assert(op2); if (op2->IsCnsIntOrI() && !op2->IsIconHandle()) { // Negate the constant and change the node to be "+", // except when `op2` is a const byref. op2->AsIntConCommon()->SetIconValue(-op2->AsIntConCommon()->IconValue()); op2->AsIntConRef().gtFieldSeq = FieldSeqStore::NotAField(); oper = GT_ADD; tree->ChangeOper(oper); goto CM_ADD_OP; } /* Check for "cns1 - op2" , we change it to "(cns1 + (-op2))" */ noway_assert(op1); if (op1->IsCnsIntOrI()) { noway_assert(varTypeIsIntOrI(tree)); // The type of the new GT_NEG node cannot just be op2->TypeGet(). // Otherwise we may sign-extend incorrectly in cases where the GT_NEG // node ends up feeding directly into a cast, for example in // GT_CAST<ubyte>(GT_SUB(0, s_1.ubyte)) tree->AsOp()->gtOp2 = op2 = gtNewOperNode(GT_NEG, genActualType(op2->TypeGet()), op2); fgMorphTreeDone(op2); oper = GT_ADD; tree->ChangeOper(oper); goto CM_ADD_OP; } /* No match - exit */ } // Skip optimization if non-NEG operand is constant. // Both op1 and op2 are not constant because it was already checked above. if (opts.OptimizationEnabled() && fgGlobalMorph) { // a - -b = > a + b // SUB(a, (NEG(b)) => ADD(a, b) if (!op1->OperIs(GT_NEG) && op2->OperIs(GT_NEG)) { // tree: SUB // op1: a // op2: NEG // op2Child: b GenTree* op2Child = op2->AsOp()->gtOp1; // b oper = GT_ADD; tree->SetOper(oper, GenTree::PRESERVE_VN); tree->AsOp()->gtOp2 = op2Child; DEBUG_DESTROY_NODE(op2); op2 = op2Child; } // -a - -b = > b - a // SUB(NEG(a), (NEG(b)) => SUB(b, a) else if (op1->OperIs(GT_NEG) && op2->OperIs(GT_NEG) && gtCanSwapOrder(op1, op2)) { // tree: SUB // op1: NEG // op1Child: a // op2: NEG // op2Child: b GenTree* op1Child = op1->AsOp()->gtOp1; // a GenTree* op2Child = op2->AsOp()->gtOp1; // b tree->AsOp()->gtOp1 = op2Child; tree->AsOp()->gtOp2 = op1Child; DEBUG_DESTROY_NODE(op1); DEBUG_DESTROY_NODE(op2); op1 = op2Child; op2 = op1Child; } } break; #ifdef TARGET_ARM64 case GT_DIV: if (!varTypeIsFloating(tree->gtType)) { // Codegen for this instruction needs to be able to throw two exceptions: fgAddCodeRef(compCurBB, bbThrowIndex(compCurBB), SCK_OVERFLOW); fgAddCodeRef(compCurBB, bbThrowIndex(compCurBB), SCK_DIV_BY_ZERO); } break; case GT_UDIV: // Codegen for this instruction needs to be able to throw one exception: fgAddCodeRef(compCurBB, bbThrowIndex(compCurBB), SCK_DIV_BY_ZERO); break; #endif case GT_ADD: CM_OVF_OP: if (tree->gtOverflow()) { tree->gtRequestSetFlags(); // Add the excptn-throwing basic block to jump to on overflow fgAddCodeRef(compCurBB, bbThrowIndex(compCurBB), SCK_OVERFLOW); // We can't do any commutative morphing for overflow instructions break; } CM_ADD_OP: FALLTHROUGH; case GT_OR: case GT_XOR: case GT_AND: tree = fgOptimizeCommutativeArithmetic(tree->AsOp()); if (!tree->OperIsSimple()) { return tree; } typ = tree->TypeGet(); oper = tree->OperGet(); op1 = tree->gtGetOp1(); op2 = tree->gtGetOp2IfPresent(); break; case GT_NOT: case GT_NEG: // Remove double negation/not. // Note: this is not a safe tranformation if "tree" is a CSE candidate. // Consider for example the following expression: NEG(NEG(OP)), where any // NEG is a CSE candidate. Were we to morph this to just OP, CSE would fail to find // the original NEG in the statement. if (op1->OperIs(oper) && opts.OptimizationEnabled() && !gtIsActiveCSE_Candidate(tree) && !gtIsActiveCSE_Candidate(op1)) { JITDUMP("Remove double negation/not\n") GenTree* op1op1 = op1->gtGetOp1(); DEBUG_DESTROY_NODE(tree); DEBUG_DESTROY_NODE(op1); return op1op1; } // Distribute negation over simple multiplication/division expressions if (opts.OptimizationEnabled() && !optValnumCSE_phase && tree->OperIs(GT_NEG) && op1->OperIs(GT_MUL, GT_DIV)) { GenTreeOp* mulOrDiv = op1->AsOp(); GenTree* op1op1 = mulOrDiv->gtGetOp1(); GenTree* op1op2 = mulOrDiv->gtGetOp2(); if (!op1op1->IsCnsIntOrI() && op1op2->IsCnsIntOrI() && !op1op2->IsIconHandle()) { // NEG(MUL(a, C)) => MUL(a, -C) // NEG(DIV(a, C)) => DIV(a, -C), except when C = {-1, 1} ssize_t constVal = op1op2->AsIntCon()->IconValue(); if ((mulOrDiv->OperIs(GT_DIV) && (constVal != -1) && (constVal != 1)) || (mulOrDiv->OperIs(GT_MUL) && !mulOrDiv->gtOverflow())) { GenTree* newOp1 = op1op1; // a GenTree* newOp2 = gtNewIconNode(-constVal, op1op2->TypeGet()); // -C mulOrDiv->gtOp1 = newOp1; mulOrDiv->gtOp2 = newOp2; mulOrDiv->SetVNsFromNode(tree); DEBUG_DESTROY_NODE(tree); DEBUG_DESTROY_NODE(op1op2); return mulOrDiv; } } } /* Any constant cases should have been folded earlier */ noway_assert(!op1->OperIsConst() || !opts.OptEnabled(CLFLG_CONSTANTFOLD) || optValnumCSE_phase); break; case GT_CKFINITE: noway_assert(varTypeIsFloating(op1->TypeGet())); fgAddCodeRef(compCurBB, bbThrowIndex(compCurBB), SCK_ARITH_EXCPN); break; case GT_BOUNDS_CHECK: fgSetRngChkTarget(tree); break; case GT_OBJ: case GT_BLK: case GT_IND: { // If we have IND(ADDR(X)) and X has GTF_GLOB_REF, we must set GTF_GLOB_REF on // the OBJ. Note that the GTF_GLOB_REF will have been cleared on ADDR(X) where X // is a local or CLS_VAR, even if it has been address-exposed. if (op1->OperIs(GT_ADDR)) { tree->gtFlags |= (op1->AsUnOp()->gtGetOp1()->gtFlags & GTF_GLOB_REF); } if (!tree->OperIs(GT_IND)) { break; } // Can not remove a GT_IND if it is currently a CSE candidate. if (gtIsActiveCSE_Candidate(tree)) { break; } bool foldAndReturnTemp = false; temp = nullptr; ival1 = 0; // Don't remove a volatile GT_IND, even if the address points to a local variable. if ((tree->gtFlags & GTF_IND_VOLATILE) == 0) { /* Try to Fold *(&X) into X */ if (op1->gtOper == GT_ADDR) { // Can not remove a GT_ADDR if it is currently a CSE candidate. if (gtIsActiveCSE_Candidate(op1)) { break; } temp = op1->AsOp()->gtOp1; // X // In the test below, if they're both TYP_STRUCT, this of course does *not* mean that // they are the *same* struct type. In fact, they almost certainly aren't. If the // address has an associated field sequence, that identifies this case; go through // the "lcl_fld" path rather than this one. FieldSeqNode* addrFieldSeq = nullptr; // This is an unused out parameter below. if (typ == temp->TypeGet() && !GetZeroOffsetFieldMap()->Lookup(op1, &addrFieldSeq)) { foldAndReturnTemp = true; } else if (temp->OperIsLocal()) { unsigned lclNum = temp->AsLclVarCommon()->GetLclNum(); LclVarDsc* varDsc = lvaGetDesc(lclNum); // We will try to optimize when we have a promoted struct promoted with a zero lvFldOffset if (varDsc->lvPromoted && (varDsc->lvFldOffset == 0)) { noway_assert(varTypeIsStruct(varDsc)); // We will try to optimize when we have a single field struct that is being struct promoted if (varDsc->lvFieldCnt == 1) { unsigned lclNumFld = varDsc->lvFieldLclStart; // just grab the promoted field LclVarDsc* fieldVarDsc = lvaGetDesc(lclNumFld); // Also make sure that the tree type matches the fieldVarType and that it's lvFldOffset // is zero if (fieldVarDsc->TypeGet() == typ && (fieldVarDsc->lvFldOffset == 0)) { // We can just use the existing promoted field LclNum temp->AsLclVarCommon()->SetLclNum(lclNumFld); temp->gtType = fieldVarDsc->TypeGet(); foldAndReturnTemp = true; } } } // If the type of the IND (typ) is a "small int", and the type of the local has the // same width, then we can reduce to just the local variable -- it will be // correctly normalized. // // The below transformation cannot be applied if the local var needs to be normalized on load. else if (varTypeIsSmall(typ) && (genTypeSize(varDsc) == genTypeSize(typ)) && !lvaTable[lclNum].lvNormalizeOnLoad()) { const bool definitelyLoad = (tree->gtFlags & GTF_DONT_CSE) == 0; const bool possiblyStore = !definitelyLoad; if (possiblyStore || (varTypeIsUnsigned(varDsc) == varTypeIsUnsigned(typ))) { typ = temp->TypeGet(); tree->gtType = typ; foldAndReturnTemp = true; if (possiblyStore) { // This node can be on the left-hand-side of an assignment node. // Mark this node with GTF_VAR_FOLDED_IND to make sure that fgDoNormalizeOnStore() // is called on its parent in post-order morph. temp->gtFlags |= GTF_VAR_FOLDED_IND; } } } // For matching types we can fold else if (!varTypeIsStruct(typ) && (lvaTable[lclNum].lvType == typ) && !lvaTable[lclNum].lvNormalizeOnLoad()) { tree->gtType = typ = temp->TypeGet(); foldAndReturnTemp = true; } else { // Assumes that when Lookup returns "false" it will leave "fieldSeq" unmodified (i.e. // nullptr) assert(fieldSeq == nullptr); bool b = GetZeroOffsetFieldMap()->Lookup(op1, &fieldSeq); assert(b || fieldSeq == nullptr); if ((fieldSeq != nullptr) && (temp->OperGet() == GT_LCL_FLD)) { // Append the field sequence, change the type. temp->AsLclFld()->SetFieldSeq( GetFieldSeqStore()->Append(temp->AsLclFld()->GetFieldSeq(), fieldSeq)); temp->gtType = typ; foldAndReturnTemp = true; } } // Otherwise will will fold this into a GT_LCL_FLD below // where we check (temp != nullptr) } else // !temp->OperIsLocal() { // We don't try to fold away the GT_IND/GT_ADDR for this case temp = nullptr; } } else if (op1->OperGet() == GT_ADD) { #ifdef TARGET_ARM // Check for a misalignment floating point indirection. if (varTypeIsFloating(typ)) { GenTree* addOp2 = op1->AsOp()->gtGetOp2(); if (addOp2->IsCnsIntOrI()) { ssize_t offset = addOp2->AsIntCon()->gtIconVal; if ((offset % emitTypeSize(TYP_FLOAT)) != 0) { tree->gtFlags |= GTF_IND_UNALIGNED; } } } #endif // TARGET_ARM /* Try to change *(&lcl + cns) into lcl[cns] to prevent materialization of &lcl */ if (op1->AsOp()->gtOp1->OperGet() == GT_ADDR && op1->AsOp()->gtOp2->OperGet() == GT_CNS_INT && opts.OptimizationEnabled()) { // No overflow arithmetic with pointers noway_assert(!op1->gtOverflow()); temp = op1->AsOp()->gtOp1->AsOp()->gtOp1; if (!temp->OperIsLocal()) { temp = nullptr; break; } // Can not remove the GT_ADDR if it is currently a CSE candidate. if (gtIsActiveCSE_Candidate(op1->AsOp()->gtOp1)) { break; } ival1 = op1->AsOp()->gtOp2->AsIntCon()->gtIconVal; fieldSeq = op1->AsOp()->gtOp2->AsIntCon()->gtFieldSeq; // Does the address have an associated zero-offset field sequence? FieldSeqNode* addrFieldSeq = nullptr; if (GetZeroOffsetFieldMap()->Lookup(op1->AsOp()->gtOp1, &addrFieldSeq)) { fieldSeq = GetFieldSeqStore()->Append(addrFieldSeq, fieldSeq); } if (ival1 == 0 && typ == temp->TypeGet() && temp->TypeGet() != TYP_STRUCT) { noway_assert(!varTypeIsGC(temp->TypeGet())); foldAndReturnTemp = true; } else { // The emitter can't handle large offsets if (ival1 != (unsigned short)ival1) { break; } // The emitter can get confused by invalid offsets if (ival1 >= Compiler::lvaLclSize(temp->AsLclVarCommon()->GetLclNum())) { break; } } // Now we can fold this into a GT_LCL_FLD below // where we check (temp != nullptr) } } } // At this point we may have a lclVar or lclFld that might be foldable with a bit of extra massaging: // - We may have a load of a local where the load has a different type than the local // - We may have a load of a local plus an offset // // In these cases, we will change the lclVar or lclFld into a lclFld of the appropriate type and // offset if doing so is legal. The only cases in which this transformation is illegal are if the load // begins before the local or if the load extends beyond the end of the local (i.e. if the load is // out-of-bounds w.r.t. the local). if ((temp != nullptr) && !foldAndReturnTemp) { assert(temp->OperIsLocal()); const unsigned lclNum = temp->AsLclVarCommon()->GetLclNum(); LclVarDsc* const varDsc = lvaGetDesc(lclNum); const var_types tempTyp = temp->TypeGet(); const bool useExactSize = varTypeIsStruct(tempTyp) || (tempTyp == TYP_BLK) || (tempTyp == TYP_LCLBLK); const unsigned varSize = useExactSize ? varDsc->lvExactSize : genTypeSize(temp); // Make sure we do not enregister this lclVar. lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::LocalField)); // If the size of the load is greater than the size of the lclVar, we cannot fold this access into // a lclFld: the access represented by an lclFld node must begin at or after the start of the // lclVar and must not extend beyond the end of the lclVar. if ((ival1 >= 0) && ((ival1 + genTypeSize(typ)) <= varSize)) { GenTreeLclFld* lclFld; // We will turn a GT_LCL_VAR into a GT_LCL_FLD with an gtLclOffs of 'ival' // or if we already have a GT_LCL_FLD we will adjust the gtLclOffs by adding 'ival' // Then we change the type of the GT_LCL_FLD to match the orginal GT_IND type. // if (temp->OperGet() == GT_LCL_FLD) { lclFld = temp->AsLclFld(); lclFld->SetLclOffs(lclFld->GetLclOffs() + static_cast<unsigned>(ival1)); lclFld->SetFieldSeq(GetFieldSeqStore()->Append(lclFld->GetFieldSeq(), fieldSeq)); } else // We have a GT_LCL_VAR. { assert(temp->OperGet() == GT_LCL_VAR); temp->ChangeOper(GT_LCL_FLD); // Note that this makes the gtFieldSeq "NotAField". lclFld = temp->AsLclFld(); lclFld->SetLclOffs(static_cast<unsigned>(ival1)); if (fieldSeq != nullptr) { // If it does represent a field, note that. lclFld->SetFieldSeq(fieldSeq); } } temp->gtType = tree->gtType; foldAndReturnTemp = true; } } if (foldAndReturnTemp) { assert(temp != nullptr); assert(temp->TypeGet() == typ); assert((op1->OperGet() == GT_ADD) || (op1->OperGet() == GT_ADDR)); // Copy the value of GTF_DONT_CSE from the original tree to `temp`: it can be set for // 'temp' because a GT_ADDR always marks it for its operand. temp->gtFlags &= ~GTF_DONT_CSE; temp->gtFlags |= (tree->gtFlags & GTF_DONT_CSE); if (op1->OperGet() == GT_ADD) { DEBUG_DESTROY_NODE(op1->AsOp()->gtOp1); // GT_ADDR DEBUG_DESTROY_NODE(op1->AsOp()->gtOp2); // GT_CNS_INT } DEBUG_DESTROY_NODE(op1); // GT_ADD or GT_ADDR DEBUG_DESTROY_NODE(tree); // GT_IND // If the result of the fold is a local var, we may need to perform further adjustments e.g. for // normalization. if (temp->OperIs(GT_LCL_VAR)) { #ifdef DEBUG // We clear this flag on `temp` because `fgMorphLocalVar` may assert that this bit is clear // and the node in question must have this bit set (as it has already been morphed). temp->gtDebugFlags &= ~GTF_DEBUG_NODE_MORPHED; #endif // DEBUG const bool forceRemorph = true; temp = fgMorphLocalVar(temp, forceRemorph); #ifdef DEBUG // We then set this flag on `temp` because `fgMorhpLocalVar` may not set it itself, and the // caller of `fgMorphSmpOp` may assert that this flag is set on `temp` once this function // returns. temp->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED; #endif // DEBUG } return temp; } // Only do this optimization when we are in the global optimizer. Doing this after value numbering // could result in an invalid value number for the newly generated GT_IND node. if ((op1->OperGet() == GT_COMMA) && fgGlobalMorph) { // Perform the transform IND(COMMA(x, ..., z)) == COMMA(x, ..., IND(z)). // TBD: this transformation is currently necessary for correctness -- it might // be good to analyze the failures that result if we don't do this, and fix them // in other ways. Ideally, this should be optional. GenTree* commaNode = op1; GenTreeFlags treeFlags = tree->gtFlags; commaNode->gtType = typ; commaNode->gtFlags = (treeFlags & ~GTF_REVERSE_OPS); // Bashing the GT_COMMA flags here is // dangerous, clear the GTF_REVERSE_OPS at // least. #ifdef DEBUG commaNode->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED; #endif while (commaNode->AsOp()->gtOp2->gtOper == GT_COMMA) { commaNode = commaNode->AsOp()->gtOp2; commaNode->gtType = typ; commaNode->gtFlags = (treeFlags & ~GTF_REVERSE_OPS & ~GTF_ASG & ~GTF_CALL); // Bashing the GT_COMMA flags here is // dangerous, clear the GTF_REVERSE_OPS, GT_ASG, and GT_CALL at // least. commaNode->gtFlags |= ((commaNode->AsOp()->gtOp1->gtFlags | commaNode->AsOp()->gtOp2->gtFlags) & (GTF_ASG | GTF_CALL)); #ifdef DEBUG commaNode->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED; #endif } bool wasArrIndex = (tree->gtFlags & GTF_IND_ARR_INDEX) != 0; ArrayInfo arrInfo; if (wasArrIndex) { bool b = GetArrayInfoMap()->Lookup(tree, &arrInfo); assert(b); GetArrayInfoMap()->Remove(tree); } tree = op1; GenTree* addr = commaNode->AsOp()->gtOp2; // TODO-1stClassStructs: we often create a struct IND without a handle, fix it. op1 = gtNewIndir(typ, addr); // This is very conservative op1->gtFlags |= treeFlags & ~GTF_ALL_EFFECT & ~GTF_IND_NONFAULTING; op1->gtFlags |= (addr->gtFlags & GTF_ALL_EFFECT); if (wasArrIndex) { GetArrayInfoMap()->Set(op1, arrInfo); } #ifdef DEBUG op1->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED; #endif commaNode->AsOp()->gtOp2 = op1; commaNode->gtFlags |= (op1->gtFlags & GTF_ALL_EFFECT); return tree; } break; } case GT_ADDR: // Can not remove op1 if it is currently a CSE candidate. if (gtIsActiveCSE_Candidate(op1)) { break; } if (op1->OperGet() == GT_IND) { if ((op1->gtFlags & GTF_IND_ARR_INDEX) == 0) { // Can not remove a GT_ADDR if it is currently a CSE candidate. if (gtIsActiveCSE_Candidate(tree)) { break; } // Perform the transform ADDR(IND(...)) == (...). GenTree* addr = op1->AsOp()->gtOp1; // If tree has a zero field sequence annotation, update the annotation // on addr node. FieldSeqNode* zeroFieldSeq = nullptr; if (GetZeroOffsetFieldMap()->Lookup(tree, &zeroFieldSeq)) { fgAddFieldSeqForZeroOffset(addr, zeroFieldSeq); } noway_assert(varTypeIsGC(addr->gtType) || addr->gtType == TYP_I_IMPL); DEBUG_DESTROY_NODE(op1); DEBUG_DESTROY_NODE(tree); return addr; } } else if (op1->OperGet() == GT_OBJ) { // Can not remove a GT_ADDR if it is currently a CSE candidate. if (gtIsActiveCSE_Candidate(tree)) { break; } // Perform the transform ADDR(OBJ(...)) == (...). GenTree* addr = op1->AsObj()->Addr(); noway_assert(varTypeIsGC(addr->gtType) || addr->gtType == TYP_I_IMPL); DEBUG_DESTROY_NODE(op1); DEBUG_DESTROY_NODE(tree); return addr; } else if ((op1->gtOper == GT_COMMA) && !optValnumCSE_phase) { // Perform the transform ADDR(COMMA(x, ..., z)) == COMMA(x, ..., ADDR(z)). // (Be sure to mark "z" as an l-value...) ArrayStack<GenTree*> commas(getAllocator(CMK_ArrayStack)); for (GenTree* comma = op1; comma != nullptr && comma->gtOper == GT_COMMA; comma = comma->gtGetOp2()) { commas.Push(comma); } GenTree* commaNode = commas.Top(); // The top-level addr might be annotated with a zeroOffset field. FieldSeqNode* zeroFieldSeq = nullptr; bool isZeroOffset = GetZeroOffsetFieldMap()->Lookup(tree, &zeroFieldSeq); tree = op1; commaNode->AsOp()->gtOp2->gtFlags |= GTF_DONT_CSE; // If the node we're about to put under a GT_ADDR is an indirection, it // doesn't need to be materialized, since we only want the addressing mode. Because // of this, this GT_IND is not a faulting indirection and we don't have to extract it // as a side effect. GenTree* commaOp2 = commaNode->AsOp()->gtOp2; if (commaOp2->OperIsBlk()) { commaOp2->SetOper(GT_IND); } if (commaOp2->gtOper == GT_IND) { commaOp2->gtFlags |= GTF_IND_NONFAULTING; commaOp2->gtFlags &= ~GTF_EXCEPT; commaOp2->gtFlags |= (commaOp2->AsOp()->gtOp1->gtFlags & GTF_EXCEPT); } op1 = gtNewOperNode(GT_ADDR, TYP_BYREF, commaOp2); if (isZeroOffset) { // Transfer the annotation to the new GT_ADDR node. fgAddFieldSeqForZeroOffset(op1, zeroFieldSeq); } commaNode->AsOp()->gtOp2 = op1; // Originally, I gave all the comma nodes type "byref". But the ADDR(IND(x)) == x transform // might give op1 a type different from byref (like, say, native int). So now go back and give // all the comma nodes the type of op1. // TODO: the comma flag update below is conservative and can be improved. // For example, if we made the ADDR(IND(x)) == x transformation, we may be able to // get rid of some of the IND flags on the COMMA nodes (e.g., GTF_GLOB_REF). while (!commas.Empty()) { GenTree* comma = commas.Pop(); comma->gtType = op1->gtType; comma->gtFlags |= op1->gtFlags; #ifdef DEBUG comma->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED; #endif gtUpdateNodeSideEffects(comma); } return tree; } break; case GT_COLON: if (fgGlobalMorph) { /* Mark the nodes that are conditionally executed */ fgWalkTreePre(&tree, gtMarkColonCond); } /* Since we're doing this postorder we clear this if it got set by a child */ fgRemoveRestOfBlock = false; break; case GT_COMMA: /* Special case: trees that don't produce a value */ if (op2->OperIs(GT_ASG) || (op2->OperGet() == GT_COMMA && op2->TypeGet() == TYP_VOID) || fgIsThrow(op2)) { typ = tree->gtType = TYP_VOID; } // If we are in the Valuenum CSE phase then don't morph away anything as these // nodes may have CSE defs/uses in them. // if (!optValnumCSE_phase) { // Extract the side effects from the left side of the comma. Since they don't "go" anywhere, this // is all we need. GenTree* op1SideEffects = nullptr; // The addition of "GTF_MAKE_CSE" below prevents us from throwing away (for example) // hoisted expressions in loops. gtExtractSideEffList(op1, &op1SideEffects, (GTF_SIDE_EFFECT | GTF_MAKE_CSE)); if (op1SideEffects) { // Replace the left hand side with the side effect list. op1 = op1SideEffects; tree->AsOp()->gtOp1 = op1SideEffects; gtUpdateNodeSideEffects(tree); } else { op2->gtFlags |= (tree->gtFlags & (GTF_DONT_CSE | GTF_LATE_ARG)); DEBUG_DESTROY_NODE(tree); DEBUG_DESTROY_NODE(op1); return op2; } // If the right operand is just a void nop node, throw it away. Unless this is a // comma throw, in which case we want the top-level morphing loop to recognize it. if (op2->IsNothingNode() && op1->TypeIs(TYP_VOID) && !fgIsCommaThrow(tree)) { op1->gtFlags |= (tree->gtFlags & (GTF_DONT_CSE | GTF_LATE_ARG)); DEBUG_DESTROY_NODE(tree); DEBUG_DESTROY_NODE(op2); return op1; } } break; case GT_JTRUE: /* Special case if fgRemoveRestOfBlock is set to true */ if (fgRemoveRestOfBlock) { if (fgIsCommaThrow(op1, true)) { GenTree* throwNode = op1->AsOp()->gtOp1; JITDUMP("Removing [%06d] GT_JTRUE as the block now unconditionally throws an exception.\n", dspTreeID(tree)); DEBUG_DESTROY_NODE(tree); return throwNode; } noway_assert(op1->OperIsCompare()); noway_assert(op1->gtFlags & GTF_EXCEPT); // We need to keep op1 for the side-effects. Hang it off // a GT_COMMA node JITDUMP("Keeping side-effects by bashing [%06d] GT_JTRUE into a GT_COMMA.\n", dspTreeID(tree)); tree->ChangeOper(GT_COMMA); tree->AsOp()->gtOp2 = op2 = gtNewNothingNode(); // Additionally since we're eliminating the JTRUE // codegen won't like it if op1 is a RELOP of longs, floats or doubles. // So we change it into a GT_COMMA as well. JITDUMP("Also bashing [%06d] (a relop) into a GT_COMMA.\n", dspTreeID(op1)); op1->ChangeOper(GT_COMMA); op1->gtFlags &= ~GTF_UNSIGNED; // Clear the unsigned flag if it was set on the relop op1->gtType = op1->AsOp()->gtOp1->gtType; return tree; } break; case GT_INTRINSIC: if (tree->AsIntrinsic()->gtIntrinsicName == NI_System_Runtime_CompilerServices_RuntimeHelpers_IsKnownConstant) { // Should be expanded by the time it reaches CSE phase assert(!optValnumCSE_phase); JITDUMP("\nExpanding RuntimeHelpers.IsKnownConstant to "); if (op1->OperIsConst()) { // We're lucky to catch a constant here while importer was not JITDUMP("true\n"); DEBUG_DESTROY_NODE(tree, op1); tree = gtNewIconNode(1); } else { GenTree* op1SideEffects = nullptr; gtExtractSideEffList(op1, &op1SideEffects, GTF_ALL_EFFECT); if (op1SideEffects != nullptr) { DEBUG_DESTROY_NODE(tree); // Keep side-effects of op1 tree = gtNewOperNode(GT_COMMA, TYP_INT, op1SideEffects, gtNewIconNode(0)); JITDUMP("false with side effects:\n") DISPTREE(tree); } else { JITDUMP("false\n"); DEBUG_DESTROY_NODE(tree, op1); tree = gtNewIconNode(0); } } INDEBUG(tree->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED); return tree; } break; default: break; } assert(oper == tree->gtOper); // Propagate comma throws. // If we are in the Valuenum CSE phase then don't morph away anything as these // nodes may have CSE defs/uses in them. if (fgGlobalMorph && (oper != GT_ASG) && (oper != GT_COLON)) { if ((op1 != nullptr) && fgIsCommaThrow(op1, true)) { GenTree* propagatedThrow = fgPropagateCommaThrow(tree, op1->AsOp(), GTF_EMPTY); if (propagatedThrow != nullptr) { return propagatedThrow; } } if ((op2 != nullptr) && fgIsCommaThrow(op2, true)) { GenTree* propagatedThrow = fgPropagateCommaThrow(tree, op2->AsOp(), op1->gtFlags & GTF_ALL_EFFECT); if (propagatedThrow != nullptr) { return propagatedThrow; } } } /*------------------------------------------------------------------------- * Optional morphing is done if tree transformations is permitted */ if ((opts.compFlags & CLFLG_TREETRANS) == 0) { return tree; } tree = fgMorphSmpOpOptional(tree->AsOp()); return tree; } //------------------------------------------------------------------------ // fgOptimizeCast: Optimizes the supplied GT_CAST tree. // // Tries to get rid of the cast, its operand, the GTF_OVERFLOW flag, calls // calls "optNarrowTree". Called in post-order by "fgMorphSmpOp". // // Arguments: // tree - the cast tree to optimize // // Return Value: // The optimized tree (that can have any shape). // GenTree* Compiler::fgOptimizeCast(GenTreeCast* cast) { GenTree* src = cast->CastOp(); if (gtIsActiveCSE_Candidate(cast) || gtIsActiveCSE_Candidate(src)) { return cast; } // See if we can discard the cast. if (varTypeIsIntegral(cast) && varTypeIsIntegral(src)) { IntegralRange srcRange = IntegralRange::ForNode(src, this); IntegralRange noOvfRange = IntegralRange::ForCastInput(cast); if (noOvfRange.Contains(srcRange)) { // Casting between same-sized types is a no-op, // given we have proven this cast cannot overflow. if (genActualType(cast) == genActualType(src)) { return src; } cast->ClearOverflow(); cast->SetAllEffectsFlags(src); // Try and see if we can make this cast into a cheaper zero-extending version. if (genActualTypeIsInt(src) && cast->TypeIs(TYP_LONG) && srcRange.IsPositive()) { cast->SetUnsigned(); } } // For checked casts, we're done. if (cast->gtOverflow()) { return cast; } var_types castToType = cast->CastToType(); // For indir-like nodes, we may be able to change their type to satisfy (and discard) the cast. if (varTypeIsSmall(castToType) && (genTypeSize(castToType) == genTypeSize(src)) && src->OperIs(GT_IND, GT_CLS_VAR, GT_LCL_FLD)) { // We're changing the type here so we need to update the VN; // in other cases we discard the cast without modifying src // so the VN doesn't change. src->ChangeType(castToType); src->SetVNsFromNode(cast); return src; } // Try to narrow the operand of the cast and discard the cast. if (opts.OptEnabled(CLFLG_TREETRANS) && (genTypeSize(src) > genTypeSize(castToType)) && optNarrowTree(src, src->TypeGet(), castToType, cast->gtVNPair, false)) { optNarrowTree(src, src->TypeGet(), castToType, cast->gtVNPair, true); // "optNarrowTree" may leave a dead cast behind. if (src->OperIs(GT_CAST) && (src->AsCast()->CastToType() == genActualType(src->AsCast()->CastOp()))) { src = src->AsCast()->CastOp(); } return src; } // Check for two consecutive casts, we may be able to discard the intermediate one. if (opts.OptimizationEnabled() && src->OperIs(GT_CAST) && !src->gtOverflow()) { var_types dstCastToType = castToType; var_types srcCastToType = src->AsCast()->CastToType(); // CAST(ubyte <- CAST(short <- X)): CAST(ubyte <- X). // CAST(ushort <- CAST(short <- X)): CAST(ushort <- X). if (varTypeIsSmall(srcCastToType) && (genTypeSize(dstCastToType) <= genTypeSize(srcCastToType))) { cast->CastOp() = src->AsCast()->CastOp(); DEBUG_DESTROY_NODE(src); } } } return cast; } //------------------------------------------------------------------------ // fgOptimizeEqualityComparisonWithConst: optimizes various EQ/NE(OP, CONST) patterns. // // Arguments: // cmp - The GT_NE/GT_EQ tree the second operand of which is an integral constant // // Return Value: // The optimized tree, "cmp" in case no optimizations were done. // Currently only returns relop trees. // GenTree* Compiler::fgOptimizeEqualityComparisonWithConst(GenTreeOp* cmp) { assert(cmp->OperIs(GT_EQ, GT_NE)); assert(cmp->gtGetOp2()->IsIntegralConst()); assert(!optValnumCSE_phase); GenTree* op1 = cmp->gtGetOp1(); GenTreeIntConCommon* op2 = cmp->gtGetOp2()->AsIntConCommon(); // Check for "(expr +/- icon1) ==/!= (non-zero-icon2)". if (op2->IsCnsIntOrI() && (op2->IconValue() != 0)) { // Since this can occur repeatedly we use a while loop. while (op1->OperIs(GT_ADD, GT_SUB) && op1->AsOp()->gtGetOp2()->IsCnsIntOrI() && op1->TypeIs(TYP_INT) && !op1->gtOverflow()) { // Got it; change "x + icon1 == icon2" to "x == icon2 - icon1". ssize_t op1Value = op1->AsOp()->gtGetOp2()->AsIntCon()->IconValue(); ssize_t op2Value = op2->IconValue(); if (op1->OperIs(GT_ADD)) { op2Value -= op1Value; } else { op2Value += op1Value; } op1 = op1->AsOp()->gtGetOp1(); op2->SetIconValue(static_cast<int32_t>(op2Value)); } cmp->gtOp1 = op1; fgUpdateConstTreeValueNumber(op2); } // Here we look for the following tree // // EQ/NE // / \. // op1 CNS 0/1 // if (op2->IsIntegralConst(0) || op2->IsIntegralConst(1)) { ssize_t op2Value = static_cast<ssize_t>(op2->IntegralValue()); if (op1->OperIsCompare()) { // Here we look for the following tree // // EQ/NE -> RELOP/!RELOP // / \ / \. // RELOP CNS 0/1 // / \. // // Note that we will remove/destroy the EQ/NE node and move // the RELOP up into it's location. // Here we reverse the RELOP if necessary. bool reverse = ((op2Value == 0) == (cmp->OperIs(GT_EQ))); if (reverse) { gtReverseCond(op1); } noway_assert((op1->gtFlags & GTF_RELOP_JMP_USED) == 0); op1->gtFlags |= cmp->gtFlags & (GTF_RELOP_JMP_USED | GTF_DONT_CSE); op1->SetVNsFromNode(cmp); DEBUG_DESTROY_NODE(cmp); return op1; } // // Now we check for a compare with the result of an '&' operator // // Here we look for the following transformation: // // EQ/NE EQ/NE // / \ / \. // AND CNS 0/1 -> AND CNS 0 // / \ / \. // RSZ/RSH CNS 1 x CNS (1 << y) // / \. // x CNS_INT +y if (fgGlobalMorph && op1->OperIs(GT_AND) && op1->AsOp()->gtGetOp1()->OperIs(GT_RSZ, GT_RSH)) { GenTreeOp* andOp = op1->AsOp(); GenTreeOp* rshiftOp = andOp->gtGetOp1()->AsOp(); if (!rshiftOp->gtGetOp2()->IsCnsIntOrI()) { goto SKIP; } ssize_t shiftAmount = rshiftOp->gtGetOp2()->AsIntCon()->IconValue(); if (shiftAmount < 0) { goto SKIP; } if (!andOp->gtGetOp2()->IsIntegralConst(1)) { goto SKIP; } GenTreeIntConCommon* andMask = andOp->gtGetOp2()->AsIntConCommon(); if (andOp->TypeIs(TYP_INT)) { if (shiftAmount > 31) { goto SKIP; } andMask->SetIconValue(static_cast<int32_t>(1 << shiftAmount)); // Reverse the condition if necessary. if (op2Value == 1) { gtReverseCond(cmp); op2->SetIconValue(0); } } else if (andOp->TypeIs(TYP_LONG)) { if (shiftAmount > 63) { goto SKIP; } andMask->SetLngValue(1ll << shiftAmount); // Reverse the cond if necessary if (op2Value == 1) { gtReverseCond(cmp); op2->SetLngValue(0); } } andOp->gtOp1 = rshiftOp->gtGetOp1(); DEBUG_DESTROY_NODE(rshiftOp->gtGetOp2()); DEBUG_DESTROY_NODE(rshiftOp); } } SKIP: // Now check for compares with small constant longs that can be cast to int. // Note that we filter out negative values here so that the transformations // below are correct. E. g. "EQ(-1L, CAST_UN(int))" is always "false", but were // we to make it into "EQ(-1, int)", "true" becomes possible for negative inputs. if (!op2->TypeIs(TYP_LONG) || ((op2->LngValue() >> 31) != 0)) { return cmp; } if (!op1->OperIs(GT_AND)) { // Another interesting case: cast from int. if (op1->OperIs(GT_CAST) && op1->AsCast()->CastOp()->TypeIs(TYP_INT) && !op1->gtOverflow()) { // Simply make this into an integer comparison. cmp->gtOp1 = op1->AsCast()->CastOp(); op2->BashToConst(static_cast<int32_t>(op2->LngValue())); fgUpdateConstTreeValueNumber(op2); } return cmp; } // Now we perform the following optimization: // EQ/NE(AND(OP long, CNS_LNG), CNS_LNG) => // EQ/NE(AND(CAST(int <- OP), CNS_INT), CNS_INT) // when the constants are sufficiently small. // This transform cannot preserve VNs. if (fgGlobalMorph) { assert(op1->TypeIs(TYP_LONG) && op1->OperIs(GT_AND)); // Is the result of the mask effectively an INT? GenTreeOp* andOp = op1->AsOp(); if (!andOp->gtGetOp2()->OperIs(GT_CNS_NATIVELONG)) { return cmp; } GenTreeIntConCommon* andMask = andOp->gtGetOp2()->AsIntConCommon(); if ((andMask->LngValue() >> 32) != 0) { return cmp; } // Now we narrow the first operand of AND to int. if (optNarrowTree(andOp->gtGetOp1(), TYP_LONG, TYP_INT, ValueNumPair(), false)) { optNarrowTree(andOp->gtGetOp1(), TYP_LONG, TYP_INT, ValueNumPair(), true); } else { andOp->gtOp1 = gtNewCastNode(TYP_INT, andOp->gtGetOp1(), false, TYP_INT); } assert(andMask == andOp->gtGetOp2()); // Now replace the mask node. andMask->BashToConst(static_cast<int32_t>(andMask->LngValue())); // Now change the type of the AND node. andOp->ChangeType(TYP_INT); // Finally we replace the comparand. op2->BashToConst(static_cast<int32_t>(op2->LngValue())); } return cmp; } //------------------------------------------------------------------------ // fgOptimizeRelationalComparisonWithConst: optimizes a comparison operation. // // Recognizes comparisons against various constant operands and morphs // them, if possible, into comparisons against zero. // // Arguments: // cmp - the GT_LE/GT_LT/GT_GE/GT_GT tree to morph. // // Return Value: // The "cmp" tree, possibly with a modified oper. // The second operand's constant value may be modified as well. // // Assumptions: // The operands have been swapped so that any constants are on the right. // The second operand is an integral constant. // GenTree* Compiler::fgOptimizeRelationalComparisonWithConst(GenTreeOp* cmp) { assert(cmp->OperIs(GT_LE, GT_LT, GT_GE, GT_GT)); assert(cmp->gtGetOp2()->IsIntegralConst()); assert(!gtIsActiveCSE_Candidate(cmp->gtGetOp2())); GenTree* op1 = cmp->gtGetOp1(); GenTreeIntConCommon* op2 = cmp->gtGetOp2()->AsIntConCommon(); assert(genActualType(op1) == genActualType(op2)); genTreeOps oper = cmp->OperGet(); int64_t op2Value = op2->IntegralValue(); if (op2Value == 1) { // Check for "expr >= 1". if (oper == GT_GE) { // Change to "expr != 0" for unsigned and "expr > 0" for signed. oper = cmp->IsUnsigned() ? GT_NE : GT_GT; } // Check for "expr < 1". else if (oper == GT_LT) { // Change to "expr == 0" for unsigned and "expr <= 0". oper = cmp->IsUnsigned() ? GT_EQ : GT_LE; } } // Check for "expr relop -1". else if (!cmp->IsUnsigned() && (op2Value == -1)) { // Check for "expr <= -1". if (oper == GT_LE) { // Change to "expr < 0". oper = GT_LT; } // Check for "expr > -1". else if (oper == GT_GT) { // Change to "expr >= 0". oper = GT_GE; } } else if (cmp->IsUnsigned()) { if ((oper == GT_LE) || (oper == GT_GT)) { if (op2Value == 0) { // IL doesn't have a cne instruction so compilers use cgt.un instead. The JIT // recognizes certain patterns that involve GT_NE (e.g (x & 4) != 0) and fails // if GT_GT is used instead. Transform (x GT_GT.unsigned 0) into (x GT_NE 0) // and (x GT_LE.unsigned 0) into (x GT_EQ 0). The later case is rare, it sometimes // occurs as a result of branch inversion. oper = (oper == GT_LE) ? GT_EQ : GT_NE; cmp->gtFlags &= ~GTF_UNSIGNED; } // LE_UN/GT_UN(expr, int/long.MaxValue) => GE/LT(expr, 0). else if (((op1->TypeIs(TYP_LONG) && (op2Value == INT64_MAX))) || ((genActualType(op1) == TYP_INT) && (op2Value == INT32_MAX))) { oper = (oper == GT_LE) ? GT_GE : GT_LT; cmp->gtFlags &= ~GTF_UNSIGNED; } } } if (!cmp->OperIs(oper)) { // Keep the old ValueNumber for 'tree' as the new expr // will still compute the same value as before. cmp->SetOper(oper, GenTree::PRESERVE_VN); op2->SetIntegralValue(0); fgUpdateConstTreeValueNumber(op2); } return cmp; } #ifdef FEATURE_HW_INTRINSICS //------------------------------------------------------------------------ // fgOptimizeHWIntrinsic: optimize a HW intrinsic node // // Arguments: // node - HWIntrinsic node to examine // // Returns: // The original node if no optimization happened or if tree bashing occured. // An alternative tree if an optimization happened. // // Notes: // Checks for HWIntrinsic nodes: Vector64.Create/Vector128.Create/Vector256.Create, // and if the call is one of these, attempt to optimize. // This is post-order, meaning that it will not morph the children. // GenTree* Compiler::fgOptimizeHWIntrinsic(GenTreeHWIntrinsic* node) { assert(!optValnumCSE_phase); if (opts.OptimizationDisabled()) { return node; } switch (node->GetHWIntrinsicId()) { case NI_Vector128_Create: #if defined(TARGET_XARCH) case NI_Vector256_Create: #elif defined(TARGET_ARM64) case NI_Vector64_Create: #endif { bool hwAllArgsAreConstZero = true; for (GenTree* arg : node->Operands()) { if (!arg->IsIntegralConst(0) && !arg->IsFloatPositiveZero()) { hwAllArgsAreConstZero = false; break; } } if (hwAllArgsAreConstZero) { switch (node->GetHWIntrinsicId()) { case NI_Vector128_Create: { node->ResetHWIntrinsicId(NI_Vector128_get_Zero); break; } #if defined(TARGET_XARCH) case NI_Vector256_Create: { node->ResetHWIntrinsicId(NI_Vector256_get_Zero); break; } #elif defined(TARGET_ARM64) case NI_Vector64_Create: { node->ResetHWIntrinsicId(NI_Vector64_get_Zero); break; } #endif default: unreached(); } } break; } default: break; } return node; } #endif //------------------------------------------------------------------------ // fgOptimizeCommutativeArithmetic: Optimizes commutative operations. // // Arguments: // tree - the unchecked GT_ADD/GT_MUL/GT_OR/GT_XOR/GT_AND tree to optimize. // // Return Value: // The optimized tree that can have any shape. // GenTree* Compiler::fgOptimizeCommutativeArithmetic(GenTreeOp* tree) { assert(tree->OperIs(GT_ADD, GT_MUL, GT_OR, GT_XOR, GT_AND)); assert(!tree->gtOverflowEx()); // Commute constants to the right. if (tree->gtGetOp1()->OperIsConst() && !tree->gtGetOp1()->TypeIs(TYP_REF)) { // TODO-Review: We used to assert here that "(!op2->OperIsConst() || !opts.OptEnabled(CLFLG_CONSTANTFOLD))". // This may indicate a missed "remorph". Task is to re-enable this assertion and investigate. std::swap(tree->gtOp1, tree->gtOp2); } if (fgOperIsBitwiseRotationRoot(tree->OperGet())) { GenTree* rotationTree = fgRecognizeAndMorphBitwiseRotation(tree); if (rotationTree != nullptr) { return rotationTree; } } if (fgGlobalMorph && tree->OperIs(GT_AND, GT_OR, GT_XOR)) { GenTree* castTree = fgMorphCastedBitwiseOp(tree->AsOp()); if (castTree != nullptr) { return castTree; } } if (varTypeIsIntegralOrI(tree)) { genTreeOps oldTreeOper = tree->OperGet(); GenTreeOp* optimizedTree = fgMorphCommutative(tree->AsOp()); if (optimizedTree != nullptr) { if (!optimizedTree->OperIs(oldTreeOper)) { // "optimizedTree" could end up being a COMMA. return optimizedTree; } tree = optimizedTree; } } if (!optValnumCSE_phase) { GenTree* optimizedTree = nullptr; if (tree->OperIs(GT_ADD)) { optimizedTree = fgOptimizeAddition(tree); } else if (tree->OperIs(GT_MUL)) { optimizedTree = fgOptimizeMultiply(tree); } else if (tree->OperIs(GT_AND)) { optimizedTree = fgOptimizeBitwiseAnd(tree); } if (optimizedTree != nullptr) { return optimizedTree; } } return tree; } //------------------------------------------------------------------------ // fgOptimizeAddition: optimizes addition. // // Arguments: // add - the unchecked GT_ADD tree to optimize. // // Return Value: // The optimized tree, that can have any shape, in case any transformations // were performed. Otherwise, "nullptr", guaranteeing no state change. // GenTree* Compiler::fgOptimizeAddition(GenTreeOp* add) { assert(add->OperIs(GT_ADD) && !add->gtOverflow()); assert(!optValnumCSE_phase); GenTree* op1 = add->gtGetOp1(); GenTree* op2 = add->gtGetOp2(); // Fold "((x + icon1) + (y + icon2))" to ((x + y) + (icon1 + icon2))". // Be careful not to create a byref pointer that may point outside of the ref object. // Only do this in global morph as we don't recompute the VN for "(x + y)", the new "op2". if (op1->OperIs(GT_ADD) && op2->OperIs(GT_ADD) && !op1->gtOverflow() && !op2->gtOverflow() && op1->AsOp()->gtGetOp2()->IsCnsIntOrI() && op2->AsOp()->gtGetOp2()->IsCnsIntOrI() && !varTypeIsGC(op1->AsOp()->gtGetOp1()) && !varTypeIsGC(op2->AsOp()->gtGetOp1()) && fgGlobalMorph) { GenTreeOp* addOne = op1->AsOp(); GenTreeOp* addTwo = op2->AsOp(); GenTreeIntCon* constOne = addOne->gtGetOp2()->AsIntCon(); GenTreeIntCon* constTwo = addTwo->gtGetOp2()->AsIntCon(); addOne->gtOp2 = addTwo->gtGetOp1(); addOne->SetAllEffectsFlags(addOne->gtGetOp1(), addOne->gtGetOp2()); DEBUG_DESTROY_NODE(addTwo); constOne->SetValueTruncating(constOne->IconValue() + constTwo->IconValue()); op2 = constOne; add->gtOp2 = constOne; DEBUG_DESTROY_NODE(constTwo); } // Fold (x + 0) - given it won't change the tree type to TYP_REF. // TODO-Bug: this code will lose the GC-ness of a tree like "native int + byref(0)". if (op2->IsIntegralConst(0) && ((add->TypeGet() == op1->TypeGet()) || !op1->TypeIs(TYP_REF))) { if (op2->IsCnsIntOrI() && varTypeIsI(op1)) { fgAddFieldSeqForZeroOffset(op1, op2->AsIntCon()->gtFieldSeq); } DEBUG_DESTROY_NODE(op2); DEBUG_DESTROY_NODE(add); return op1; } // Note that these transformations are legal for floating-point ADDs as well. if (opts.OptimizationEnabled()) { // - a + b = > b - a // ADD((NEG(a), b) => SUB(b, a) // Do not do this if "op2" is constant for canonicalization purposes. if (op1->OperIs(GT_NEG) && !op2->OperIs(GT_NEG) && !op2->IsIntegralConst() && gtCanSwapOrder(op1, op2)) { add->SetOper(GT_SUB); add->gtOp1 = op2; add->gtOp2 = op1->AsOp()->gtGetOp1(); DEBUG_DESTROY_NODE(op1); return add; } // a + -b = > a - b // ADD(a, (NEG(b)) => SUB(a, b) if (!op1->OperIs(GT_NEG) && op2->OperIs(GT_NEG)) { add->SetOper(GT_SUB); add->gtOp2 = op2->AsOp()->gtGetOp1(); DEBUG_DESTROY_NODE(op2); return add; } } return nullptr; } //------------------------------------------------------------------------ // fgOptimizeMultiply: optimizes multiplication. // // Arguments: // mul - the unchecked TYP_I_IMPL/TYP_INT GT_MUL tree to optimize. // // Return Value: // The optimized tree, that can have any shape, in case any transformations // were performed. Otherwise, "nullptr", guaranteeing no state change. // GenTree* Compiler::fgOptimizeMultiply(GenTreeOp* mul) { assert(mul->OperIs(GT_MUL)); assert(varTypeIsIntOrI(mul) || varTypeIsFloating(mul)); assert(!mul->gtOverflow()); assert(!optValnumCSE_phase); GenTree* op1 = mul->gtGetOp1(); GenTree* op2 = mul->gtGetOp2(); assert(mul->TypeGet() == genActualType(op1)); assert(mul->TypeGet() == genActualType(op2)); if (opts.OptimizationEnabled() && op2->IsCnsFltOrDbl()) { double multiplierValue = op2->AsDblCon()->gtDconVal; if (multiplierValue == 1.0) { // Fold "x * 1.0" to "x". DEBUG_DESTROY_NODE(op2); DEBUG_DESTROY_NODE(mul); return op1; } // Fold "x * 2.0" to "x + x". // If op1 is not a local we will have to introduce a temporary via GT_COMMA. // Unfortunately, it's not optHoistLoopCode-friendly (yet), so we'll only do // this for locals / after hoisting has run (when rationalization remorphs // math INTRINSICSs into calls...). if ((multiplierValue == 2.0) && (op1->IsLocal() || (fgOrder == FGOrderLinear))) { op2 = fgMakeMultiUse(&op1); GenTree* add = gtNewOperNode(GT_ADD, mul->TypeGet(), op1, op2); INDEBUG(add->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED); return add; } } if (op2->IsIntegralConst()) { ssize_t mult = op2->AsIntConCommon()->IconValue(); bool op2IsConstIndex = op2->OperGet() == GT_CNS_INT && op2->AsIntCon()->gtFieldSeq != nullptr && op2->AsIntCon()->gtFieldSeq->IsConstantIndexFieldSeq(); assert(!op2IsConstIndex || op2->AsIntCon()->gtFieldSeq->m_next == nullptr); if (mult == 0) { // We may be able to throw away op1 (unless it has side-effects) if ((op1->gtFlags & GTF_SIDE_EFFECT) == 0) { DEBUG_DESTROY_NODE(op1); DEBUG_DESTROY_NODE(mul); return op2; // Just return the "0" node } // We need to keep op1 for the side-effects. Hang it off a GT_COMMA node. mul->ChangeOper(GT_COMMA, GenTree::PRESERVE_VN); return mul; } #ifdef TARGET_XARCH // Should we try to replace integer multiplication with lea/add/shift sequences? bool mulShiftOpt = compCodeOpt() != SMALL_CODE; #else // !TARGET_XARCH bool mulShiftOpt = false; #endif // !TARGET_XARCH size_t abs_mult = (mult >= 0) ? mult : -mult; size_t lowestBit = genFindLowestBit(abs_mult); bool changeToShift = false; // is it a power of two? (positive or negative) if (abs_mult == lowestBit) { // if negative negate (min-int does not need negation) if (mult < 0 && mult != SSIZE_T_MIN) { op1 = gtNewOperNode(GT_NEG, genActualType(op1), op1); mul->gtOp1 = op1; fgMorphTreeDone(op1); } // If "op2" is a constant array index, the other multiplicand must be a constant. // Transfer the annotation to the other one. if (op2->OperGet() == GT_CNS_INT && op2->AsIntCon()->gtFieldSeq != nullptr && op2->AsIntCon()->gtFieldSeq->IsConstantIndexFieldSeq()) { assert(op2->AsIntCon()->gtFieldSeq->m_next == nullptr); GenTree* otherOp = op1; if (otherOp->OperGet() == GT_NEG) { otherOp = otherOp->AsOp()->gtOp1; } assert(otherOp->OperGet() == GT_CNS_INT); assert(otherOp->AsIntCon()->gtFieldSeq == FieldSeqStore::NotAField()); otherOp->AsIntCon()->gtFieldSeq = op2->AsIntCon()->gtFieldSeq; } if (abs_mult == 1) { DEBUG_DESTROY_NODE(op2); DEBUG_DESTROY_NODE(mul); return op1; } // Change the multiplication into a shift by log2(val) bits. op2->AsIntConCommon()->SetIconValue(genLog2(abs_mult)); changeToShift = true; } else if (mulShiftOpt && (lowestBit > 1) && jitIsScaleIndexMul(lowestBit)) { int shift = genLog2(lowestBit); ssize_t factor = abs_mult >> shift; if (factor == 3 || factor == 5 || factor == 9) { // if negative negate (min-int does not need negation) if (mult < 0 && mult != SSIZE_T_MIN) { op1 = gtNewOperNode(GT_NEG, genActualType(op1), op1); mul->gtOp1 = op1; fgMorphTreeDone(op1); } GenTree* factorIcon = gtNewIconNode(factor, mul->TypeGet()); if (op2IsConstIndex) { factorIcon->AsIntCon()->gtFieldSeq = GetFieldSeqStore()->CreateSingleton(FieldSeqStore::ConstantIndexPseudoField); } // change the multiplication into a smaller multiplication (by 3, 5 or 9) and a shift op1 = gtNewOperNode(GT_MUL, mul->TypeGet(), op1, factorIcon); mul->gtOp1 = op1; fgMorphTreeDone(op1); op2->AsIntConCommon()->SetIconValue(shift); changeToShift = true; } } if (changeToShift) { fgUpdateConstTreeValueNumber(op2); mul->ChangeOper(GT_LSH, GenTree::PRESERVE_VN); return mul; } } return nullptr; } //------------------------------------------------------------------------ // fgOptimizeBitwiseAnd: optimizes the "and" operation. // // Arguments: // andOp - the GT_AND tree to optimize. // // Return Value: // The optimized tree, currently always a relop, in case any transformations // were performed. Otherwise, "nullptr", guaranteeing no state change. // GenTree* Compiler::fgOptimizeBitwiseAnd(GenTreeOp* andOp) { assert(andOp->OperIs(GT_AND)); assert(!optValnumCSE_phase); GenTree* op1 = andOp->gtGetOp1(); GenTree* op2 = andOp->gtGetOp2(); // Fold "cmp & 1" to just "cmp". if (andOp->TypeIs(TYP_INT) && op1->OperIsCompare() && op2->IsIntegralConst(1)) { DEBUG_DESTROY_NODE(op2); DEBUG_DESTROY_NODE(andOp); return op1; } return nullptr; } //------------------------------------------------------------------------ // fgOptimizeRelationalComparisonWithCasts: Recognizes comparisons against // various cast operands and tries to remove them. E.g.: // // * GE int // +--* CAST long <- ulong <- uint // | \--* X int // \--* CNS_INT long // // to: // // * GE_un int // +--* X int // \--* CNS_INT int // // same for: // // * GE int // +--* CAST long <- ulong <- uint // | \--* X int // \--* CAST long <- [u]long <- int // \--* ARR_LEN int // // These patterns quite often show up along with index checks // // Arguments: // cmp - the GT_LE/GT_LT/GT_GE/GT_GT tree to morph. // // Return Value: // Returns the same tree where operands might have narrower types // // Notes: // TODO-Casts: consider unifying this function with "optNarrowTree" // GenTree* Compiler::fgOptimizeRelationalComparisonWithCasts(GenTreeOp* cmp) { assert(cmp->OperIs(GT_LE, GT_LT, GT_GE, GT_GT)); assert(!optValnumCSE_phase); GenTree* op1 = cmp->gtGetOp1(); GenTree* op2 = cmp->gtGetOp2(); // Caller is expected to call this function only if we have CAST nodes assert(op1->OperIs(GT_CAST) || op2->OperIs(GT_CAST)); if (!op1->TypeIs(TYP_LONG)) { // We can extend this logic to handle small types as well, but currently it's done mostly to // assist range check elimination return cmp; } GenTree* castOp; GenTree* knownPositiveOp; bool knownPositiveIsOp2; if (op2->IsIntegralConst() || ((op2->OperIs(GT_CAST) && op2->AsCast()->CastOp()->OperIs(GT_ARR_LENGTH)))) { // op2 is either a LONG constant or (T)ARR_LENGTH knownPositiveIsOp2 = true; castOp = cmp->gtGetOp1(); knownPositiveOp = cmp->gtGetOp2(); } else { // op1 is either a LONG constant (yes, it's pretty normal for relops) // or (T)ARR_LENGTH castOp = cmp->gtGetOp2(); knownPositiveOp = cmp->gtGetOp1(); knownPositiveIsOp2 = false; } if (castOp->OperIs(GT_CAST) && varTypeIsLong(castOp->CastToType()) && castOp->AsCast()->CastOp()->TypeIs(TYP_INT) && castOp->IsUnsigned() && !castOp->gtOverflow()) { bool knownPositiveFitsIntoU32 = false; if (knownPositiveOp->IsIntegralConst() && FitsIn<UINT32>(knownPositiveOp->AsIntConCommon()->IntegralValue())) { // BTW, we can fold the whole condition if op2 doesn't fit into UINT_MAX. knownPositiveFitsIntoU32 = true; } else if (knownPositiveOp->OperIs(GT_CAST) && varTypeIsLong(knownPositiveOp->CastToType()) && knownPositiveOp->AsCast()->CastOp()->OperIs(GT_ARR_LENGTH)) { knownPositiveFitsIntoU32 = true; // TODO-Casts: recognize Span.Length here as well. } if (!knownPositiveFitsIntoU32) { return cmp; } JITDUMP("Removing redundant cast(s) for:\n") DISPTREE(cmp) JITDUMP("\n\nto:\n\n") cmp->SetUnsigned(); // Drop cast from castOp if (knownPositiveIsOp2) { cmp->gtOp1 = castOp->AsCast()->CastOp(); } else { cmp->gtOp2 = castOp->AsCast()->CastOp(); } DEBUG_DESTROY_NODE(castOp); if (knownPositiveOp->OperIs(GT_CAST)) { // Drop cast from knownPositiveOp too if (knownPositiveIsOp2) { cmp->gtOp2 = knownPositiveOp->AsCast()->CastOp(); } else { cmp->gtOp1 = knownPositiveOp->AsCast()->CastOp(); } DEBUG_DESTROY_NODE(knownPositiveOp); } else { // Change type for constant from LONG to INT knownPositiveOp->ChangeType(TYP_INT); #ifndef TARGET_64BIT assert(knownPositiveOp->OperIs(GT_CNS_LNG)); knownPositiveOp->BashToConst(static_cast<int>(knownPositiveOp->AsIntConCommon()->IntegralValue())); #endif fgUpdateConstTreeValueNumber(knownPositiveOp); } DISPTREE(cmp) JITDUMP("\n") } return cmp; } //------------------------------------------------------------------------ // fgPropagateCommaThrow: propagate a "comma throw" up the tree. // // "Comma throws" in the compiler represent the canonical form of an always // throwing expression. They have the shape of COMMA(THROW, ZERO), to satisfy // the semantic that the original expression produced some value and are // generated by "gtFoldExprConst" when it encounters checked arithmetic that // will determinably overflow. // // In the global morphing phase, "comma throws" are "propagated" up the tree, // in post-order, to eliminate nodes that will never execute. This method, // called by "fgMorphSmpOp", encapsulates this optimization. // // Arguments: // parent - the node currently being processed. // commaThrow - the comma throw in question, "parent"'s operand. // precedingSideEffects - side effects of nodes preceding "comma" in execution order. // // Return Value: // If "parent" is to be replaced with a comma throw, i. e. the propagation was successful, // the new "parent", otherwise "nullptr", guaranteeing no state change, with one exception: // the "fgRemoveRestOfBlock" "global" may be set. Note that the new returned tree does not // have to be a "comma throw", it can be "bare" throw call if the "parent" node did not // produce any value. // // Notes: // "Comma throws" are very rare. // GenTree* Compiler::fgPropagateCommaThrow(GenTree* parent, GenTreeOp* commaThrow, GenTreeFlags precedingSideEffects) { // Comma throw propagation does not preserve VNs, and deletes nodes. assert(fgGlobalMorph); assert(fgIsCommaThrow(commaThrow)); if ((commaThrow->gtFlags & GTF_COLON_COND) == 0) { fgRemoveRestOfBlock = true; } if ((precedingSideEffects & GTF_ALL_EFFECT) == 0) { if (parent->TypeIs(TYP_VOID)) { // Return the throw node as the new tree. return commaThrow->gtGetOp1(); } // Fix up the COMMA's type if needed. if (genActualType(parent) != genActualType(commaThrow)) { commaThrow->gtGetOp2()->BashToZeroConst(genActualType(parent)); commaThrow->ChangeType(genActualType(parent)); } return commaThrow; } return nullptr; } //---------------------------------------------------------------------------------------------- // fgMorphRetInd: Try to get rid of extra IND(ADDR()) pairs in a return tree. // // Arguments: // node - The return node that uses an indirection. // // Return Value: // the original op1 of the ret if there was no optimization or an optimized new op1. // GenTree* Compiler::fgMorphRetInd(GenTreeUnOp* ret) { assert(ret->OperIs(GT_RETURN)); assert(ret->gtGetOp1()->OperIs(GT_IND, GT_BLK, GT_OBJ)); GenTreeIndir* ind = ret->gtGetOp1()->AsIndir(); GenTree* addr = ind->Addr(); if (addr->OperIs(GT_ADDR) && addr->gtGetOp1()->OperIs(GT_LCL_VAR)) { // If struct promotion was undone, adjust the annotations if (fgGlobalMorph && fgMorphImplicitByRefArgs(addr)) { return ind; } // If `return` retypes LCL_VAR as a smaller struct it should not set `doNotEnregister` on that // LclVar. // Example: in `Vector128:AsVector2` we have RETURN SIMD8(OBJ SIMD8(ADDR byref(LCL_VAR SIMD16))). GenTreeLclVar* lclVar = addr->gtGetOp1()->AsLclVar(); if (!lvaIsImplicitByRefLocal(lclVar->GetLclNum())) { assert(!gtIsActiveCSE_Candidate(addr) && !gtIsActiveCSE_Candidate(ind)); unsigned indSize; if (ind->OperIs(GT_IND)) { indSize = genTypeSize(ind); } else { indSize = ind->AsBlk()->GetLayout()->GetSize(); } LclVarDsc* varDsc = lvaGetDesc(lclVar); unsigned lclVarSize; if (!lclVar->TypeIs(TYP_STRUCT)) { lclVarSize = genTypeSize(varDsc->TypeGet()); } else { lclVarSize = varDsc->lvExactSize; } // TODO: change conditions in `canFold` to `indSize <= lclVarSize`, but currently do not support `BITCAST // int<-SIMD16` etc. assert((indSize <= lclVarSize) || varDsc->lvDoNotEnregister); #if defined(TARGET_64BIT) bool canFold = (indSize == lclVarSize); #else // !TARGET_64BIT // TODO: improve 32 bit targets handling for LONG returns if necessary, nowadays we do not support `BITCAST // long<->double` there. bool canFold = (indSize == lclVarSize) && (lclVarSize <= REGSIZE_BYTES); #endif // TODO: support `genReturnBB != nullptr`, it requires #11413 to avoid `Incompatible types for // gtNewTempAssign`. if (canFold && (genReturnBB == nullptr)) { // Fold (TYPE1)*(&(TYPE2)x) even if types do not match, lowering will handle it. // Getting rid of this IND(ADDR()) pair allows to keep lclVar as not address taken // and enregister it. DEBUG_DESTROY_NODE(ind); DEBUG_DESTROY_NODE(addr); ret->gtOp1 = lclVar; // We use GTF_DONT_CSE as an "is under GT_ADDR" check. We can // get rid of it now since the GT_RETURN node should never have // its address taken. assert((ret->gtFlags & GTF_DONT_CSE) == 0); lclVar->gtFlags &= ~GTF_DONT_CSE; return lclVar; } else if (!varDsc->lvDoNotEnregister) { lvaSetVarDoNotEnregister(lclVar->GetLclNum() DEBUGARG(DoNotEnregisterReason::BlockOpRet)); } } } return ind; } #ifdef _PREFAST_ #pragma warning(pop) #endif GenTree* Compiler::fgMorphSmpOpOptional(GenTreeOp* tree) { genTreeOps oper = tree->gtOper; GenTree* op1 = tree->gtOp1; GenTree* op2 = tree->gtOp2; var_types typ = tree->TypeGet(); if (fgGlobalMorph && GenTree::OperIsCommutative(oper)) { /* Swap the operands so that the more expensive one is 'op1' */ if (tree->gtFlags & GTF_REVERSE_OPS) { tree->gtOp1 = op2; tree->gtOp2 = op1; op2 = op1; op1 = tree->gtOp1; tree->gtFlags &= ~GTF_REVERSE_OPS; } if (oper == op2->gtOper) { /* Reorder nested operators at the same precedence level to be left-recursive. For example, change "(a+(b+c))" to the equivalent expression "((a+b)+c)". */ /* Things are handled differently for floating-point operators */ if (!varTypeIsFloating(tree->TypeGet())) { fgMoveOpsLeft(tree); op1 = tree->gtOp1; op2 = tree->gtOp2; } } } #if REARRANGE_ADDS /* Change "((x+icon)+y)" to "((x+y)+icon)" Don't reorder floating-point operations */ if (fgGlobalMorph && (oper == GT_ADD) && !tree->gtOverflow() && (op1->gtOper == GT_ADD) && !op1->gtOverflow() && varTypeIsIntegralOrI(typ)) { GenTree* ad1 = op1->AsOp()->gtOp1; GenTree* ad2 = op1->AsOp()->gtOp2; if (!op2->OperIsConst() && ad2->OperIsConst()) { // This takes // + (tree) // / \. // / \. // / \. // + (op1) op2 // / \. // / \. // ad1 ad2 // // and it swaps ad2 and op2. // Don't create a byref pointer that may point outside of the ref object. // If a GC happens, the byref won't get updated. This can happen if one // of the int components is negative. It also requires the address generation // be in a fully-interruptible code region. if (!varTypeIsGC(ad1->TypeGet()) && !varTypeIsGC(op2->TypeGet())) { tree->gtOp2 = ad2; op1->AsOp()->gtOp2 = op2; op1->gtFlags |= op2->gtFlags & GTF_ALL_EFFECT; op2 = tree->gtOp2; } } } #endif /*------------------------------------------------------------------------- * Perform optional oper-specific postorder morphing */ switch (oper) { case GT_ASG: // Make sure we're allowed to do this. if (optValnumCSE_phase) { // It is not safe to reorder/delete CSE's break; } if (varTypeIsStruct(typ) && !tree->IsPhiDefn()) { if (tree->OperIsCopyBlkOp()) { return fgMorphCopyBlock(tree); } else { return fgMorphInitBlock(tree); } } if (typ == TYP_LONG) { break; } if (op2->gtFlags & GTF_ASG) { break; } if ((op2->gtFlags & GTF_CALL) && (op1->gtFlags & GTF_ALL_EFFECT)) { break; } /* Special case: a cast that can be thrown away */ // TODO-Cleanup: fgMorphSmp does a similar optimization. However, it removes only // one cast and sometimes there is another one after it that gets removed by this // code. fgMorphSmp should be improved to remove all redundant casts so this code // can be removed. if (op1->gtOper == GT_IND && op2->gtOper == GT_CAST && !op2->gtOverflow()) { var_types srct; var_types cast; var_types dstt; srct = op2->AsCast()->CastOp()->TypeGet(); cast = (var_types)op2->CastToType(); dstt = op1->TypeGet(); /* Make sure these are all ints and precision is not lost */ if (genTypeSize(cast) >= genTypeSize(dstt) && dstt <= TYP_INT && srct <= TYP_INT) { op2 = tree->gtOp2 = op2->AsCast()->CastOp(); } } break; case GT_MUL: /* Check for the case "(val + icon) * icon" */ if (op2->gtOper == GT_CNS_INT && op1->gtOper == GT_ADD) { GenTree* add = op1->AsOp()->gtOp2; if (add->IsCnsIntOrI() && (op2->GetScaleIndexMul() != 0)) { if (tree->gtOverflow() || op1->gtOverflow()) { break; } ssize_t imul = op2->AsIntCon()->gtIconVal; ssize_t iadd = add->AsIntCon()->gtIconVal; /* Change '(val + iadd) * imul' -> '(val * imul) + (iadd * imul)' */ oper = GT_ADD; tree->ChangeOper(oper); op2->AsIntCon()->SetValueTruncating(iadd * imul); op1->ChangeOper(GT_MUL); add->AsIntCon()->SetIconValue(imul); } } break; case GT_DIV: /* For "val / 1", just return "val" */ if (op2->IsIntegralConst(1)) { DEBUG_DESTROY_NODE(tree); return op1; } break; case GT_UDIV: case GT_UMOD: tree->CheckDivideByConstOptimized(this); break; case GT_LSH: /* Check for the case "(val + icon) << icon" */ if (!optValnumCSE_phase && op2->IsCnsIntOrI() && op1->gtOper == GT_ADD && !op1->gtOverflow()) { GenTree* cns = op1->AsOp()->gtOp2; if (cns->IsCnsIntOrI() && (op2->GetScaleIndexShf() != 0)) { ssize_t ishf = op2->AsIntConCommon()->IconValue(); ssize_t iadd = cns->AsIntConCommon()->IconValue(); // printf("Changing '(val+icon1)<<icon2' into '(val<<icon2+icon1<<icon2)'\n"); /* Change "(val + iadd) << ishf" into "(val<<ishf + iadd<<ishf)" */ tree->ChangeOper(GT_ADD); // we are reusing the shift amount node here, but the type we want is that of the shift result op2->gtType = op1->gtType; op2->AsIntConCommon()->SetValueTruncating(iadd << ishf); if (cns->gtOper == GT_CNS_INT && cns->AsIntCon()->gtFieldSeq != nullptr && cns->AsIntCon()->gtFieldSeq->IsConstantIndexFieldSeq()) { assert(cns->AsIntCon()->gtFieldSeq->m_next == nullptr); op2->AsIntCon()->gtFieldSeq = cns->AsIntCon()->gtFieldSeq; } op1->ChangeOper(GT_LSH); cns->AsIntConCommon()->SetIconValue(ishf); } } break; case GT_XOR: if (!optValnumCSE_phase) { /* "x ^ -1" is "~x" */ if (op2->IsIntegralConst(-1)) { tree->ChangeOper(GT_NOT); tree->gtOp2 = nullptr; DEBUG_DESTROY_NODE(op2); } else if (op2->IsIntegralConst(1) && op1->OperIsCompare()) { /* "binaryVal ^ 1" is "!binaryVal" */ gtReverseCond(op1); DEBUG_DESTROY_NODE(op2); DEBUG_DESTROY_NODE(tree); return op1; } } break; case GT_INIT_VAL: // Initialization values for initBlk have special semantics - their lower // byte is used to fill the struct. However, we allow 0 as a "bare" value, // which enables them to get a VNForZero, and be propagated. if (op1->IsIntegralConst(0)) { return op1; } break; default: break; } return tree; } #if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS) //------------------------------------------------------------------------ // fgMorphMultiOp: Morph a GenTreeMultiOp (SIMD/HWINTRINSIC) tree. // // Arguments: // multiOp - The tree to morph // // Return Value: // The fully morphed tree. // GenTree* Compiler::fgMorphMultiOp(GenTreeMultiOp* multiOp) { gtUpdateNodeOperSideEffects(multiOp); bool dontCseConstArguments = false; #if defined(FEATURE_HW_INTRINSICS) // Opportunistically, avoid unexpected CSE for hw intrinsics with IMM arguments if (multiOp->OperIs(GT_HWINTRINSIC)) { NamedIntrinsic hwIntrinsic = multiOp->AsHWIntrinsic()->GetHWIntrinsicId(); #if defined(TARGET_XARCH) if (HWIntrinsicInfo::lookupCategory(hwIntrinsic) == HW_Category_IMM) { dontCseConstArguments = true; } #elif defined(TARGET_ARMARCH) if (HWIntrinsicInfo::HasImmediateOperand(hwIntrinsic)) { dontCseConstArguments = true; } #endif } #endif for (GenTree** use : multiOp->UseEdges()) { *use = fgMorphTree(*use); GenTree* operand = *use; multiOp->gtFlags |= (operand->gtFlags & GTF_ALL_EFFECT); if (dontCseConstArguments && operand->OperIsConst()) { operand->SetDoNotCSE(); } // Promoted structs after morph must be in one of two states: // a) Fully eliminated from the IR (independent promotion) OR only be // used by "special" nodes (e. g. LHS of ASGs for multi-reg structs). // b) Marked as do-not-enregister (dependent promotion). // // So here we preserve this invariant and mark any promoted structs as do-not-enreg. // if (operand->OperIs(GT_LCL_VAR) && lvaGetDesc(operand->AsLclVar())->lvPromoted) { lvaSetVarDoNotEnregister(operand->AsLclVar()->GetLclNum() DEBUGARG(DoNotEnregisterReason::SimdUserForcesDep)); } } #if defined(FEATURE_HW_INTRINSICS) if (opts.OptimizationEnabled() && multiOp->OperIs(GT_HWINTRINSIC)) { GenTreeHWIntrinsic* hw = multiOp->AsHWIntrinsic(); switch (hw->GetHWIntrinsicId()) { #if defined(TARGET_XARCH) case NI_SSE_Xor: case NI_SSE2_Xor: case NI_AVX_Xor: case NI_AVX2_Xor: { // Transform XOR(X, 0) to X for vectors GenTree* op1 = hw->Op(1); GenTree* op2 = hw->Op(2); if (!gtIsActiveCSE_Candidate(hw)) { if (op1->IsIntegralConstVector(0) && !gtIsActiveCSE_Candidate(op1)) { DEBUG_DESTROY_NODE(hw); DEBUG_DESTROY_NODE(op1); return op2; } if (op2->IsIntegralConstVector(0) && !gtIsActiveCSE_Candidate(op2)) { DEBUG_DESTROY_NODE(hw); DEBUG_DESTROY_NODE(op2); return op1; } } break; } #endif case NI_Vector128_Create: #if defined(TARGET_XARCH) case NI_Vector256_Create: #elif defined(TARGET_ARMARCH) case NI_Vector64_Create: #endif { bool hwAllArgsAreConst = true; for (GenTree** use : multiOp->UseEdges()) { if (!(*use)->OperIsConst()) { hwAllArgsAreConst = false; break; } } // Avoid unexpected CSE for constant arguments for Vector_.Create // but only if all arguments are constants. if (hwAllArgsAreConst) { for (GenTree** use : multiOp->UseEdges()) { (*use)->SetDoNotCSE(); } } } break; default: break; } } #endif // defined(FEATURE_HW_INTRINSICS) && defined(TARGET_XARCH) #ifdef FEATURE_HW_INTRINSICS if (multiOp->OperIsHWIntrinsic() && !optValnumCSE_phase) { return fgOptimizeHWIntrinsic(multiOp->AsHWIntrinsic()); } #endif return multiOp; } #endif // defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS) //------------------------------------------------------------------------ // fgMorphModToSubMulDiv: Transform a % b into the equivalent a - (a / b) * b // (see ECMA III 3.55 and III.3.56). // // Arguments: // tree - The GT_MOD/GT_UMOD tree to morph // // Returns: // The morphed tree // // Notes: // For ARM64 we don't have a remainder instruction so this transform is // always done. For XARCH this transform is done if we know that magic // division will be used, in that case this transform allows CSE to // eliminate the redundant div from code like "x = a / 3; y = a % 3;". // GenTree* Compiler::fgMorphModToSubMulDiv(GenTreeOp* tree) { JITDUMP("\nMorphing MOD/UMOD [%06u] to Sub/Mul/Div\n", dspTreeID(tree)); if (tree->OperGet() == GT_MOD) { tree->SetOper(GT_DIV); } else if (tree->OperGet() == GT_UMOD) { tree->SetOper(GT_UDIV); } else { noway_assert(!"Illegal gtOper in fgMorphModToSubMulDiv"); } var_types type = tree->gtType; GenTree* const copyOfNumeratorValue = fgMakeMultiUse(&tree->gtOp1); GenTree* const copyOfDenominatorValue = fgMakeMultiUse(&tree->gtOp2); GenTree* const mul = gtNewOperNode(GT_MUL, type, tree, copyOfDenominatorValue); GenTree* const sub = gtNewOperNode(GT_SUB, type, copyOfNumeratorValue, mul); // Ensure "sub" does not evaluate "copyOfNumeratorValue" before it is defined by "mul". // sub->gtFlags |= GTF_REVERSE_OPS; #ifdef DEBUG sub->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED; #endif tree->CheckDivideByConstOptimized(this); return sub; } //------------------------------------------------------------------------------ // fgOperIsBitwiseRotationRoot : Check if the operation can be a root of a bitwise rotation tree. // // // Arguments: // oper - Operation to check // // Return Value: // True if the operation can be a root of a bitwise rotation tree; false otherwise. bool Compiler::fgOperIsBitwiseRotationRoot(genTreeOps oper) { return (oper == GT_OR) || (oper == GT_XOR); } //------------------------------------------------------------------------------ // fgRecognizeAndMorphBitwiseRotation : Check if the tree represents a left or right rotation. If so, return // an equivalent GT_ROL or GT_ROR tree; otherwise, return the original tree. // // Arguments: // tree - tree to check for a rotation pattern // // Return Value: // An equivalent GT_ROL or GT_ROR tree if a pattern is found; "nullptr" otherwise. // // Assumption: // The input is a GT_OR or a GT_XOR tree. GenTree* Compiler::fgRecognizeAndMorphBitwiseRotation(GenTree* tree) { // // Check for a rotation pattern, e.g., // // OR ROL // / \ / \. // LSH RSZ -> x y // / \ / \. // x AND x AND // / \ / \. // y 31 ADD 31 // / \. // NEG 32 // | // y // The patterns recognized: // (x << (y & M)) op (x >>> ((-y + N) & M)) // (x >>> ((-y + N) & M)) op (x << (y & M)) // // (x << y) op (x >>> (-y + N)) // (x >> > (-y + N)) op (x << y) // // (x >>> (y & M)) op (x << ((-y + N) & M)) // (x << ((-y + N) & M)) op (x >>> (y & M)) // // (x >>> y) op (x << (-y + N)) // (x << (-y + N)) op (x >>> y) // // (x << c1) op (x >>> c2) // (x >>> c1) op (x << c2) // // where // c1 and c2 are const // c1 + c2 == bitsize(x) // N == bitsize(x) // M is const // M & (N - 1) == N - 1 // op is either | or ^ if (((tree->gtFlags & GTF_PERSISTENT_SIDE_EFFECTS) != 0) || ((tree->gtFlags & GTF_ORDER_SIDEEFF) != 0)) { // We can't do anything if the tree has assignments, calls, or volatile // reads. Note that we allow GTF_EXCEPT side effect since any exceptions // thrown by the original tree will be thrown by the transformed tree as well. return nullptr; } genTreeOps oper = tree->OperGet(); assert(fgOperIsBitwiseRotationRoot(oper)); // Check if we have an LSH on one side of the OR and an RSZ on the other side. GenTree* op1 = tree->gtGetOp1(); GenTree* op2 = tree->gtGetOp2(); GenTree* leftShiftTree = nullptr; GenTree* rightShiftTree = nullptr; if ((op1->OperGet() == GT_LSH) && (op2->OperGet() == GT_RSZ)) { leftShiftTree = op1; rightShiftTree = op2; } else if ((op1->OperGet() == GT_RSZ) && (op2->OperGet() == GT_LSH)) { leftShiftTree = op2; rightShiftTree = op1; } else { return nullptr; } // Check if the trees representing the value to shift are identical. // We already checked that there are no side effects above. if (GenTree::Compare(leftShiftTree->gtGetOp1(), rightShiftTree->gtGetOp1())) { GenTree* rotatedValue = leftShiftTree->gtGetOp1(); var_types rotatedValueActualType = genActualType(rotatedValue->gtType); ssize_t rotatedValueBitSize = genTypeSize(rotatedValueActualType) * 8; noway_assert((rotatedValueBitSize == 32) || (rotatedValueBitSize == 64)); GenTree* leftShiftIndex = leftShiftTree->gtGetOp2(); GenTree* rightShiftIndex = rightShiftTree->gtGetOp2(); // The shift index may be masked. At least (rotatedValueBitSize - 1) lower bits // shouldn't be masked for the transformation to be valid. If additional // higher bits are not masked, the transformation is still valid since the result // of MSIL shift instructions is unspecified if the shift amount is greater or equal // than the width of the value being shifted. ssize_t minimalMask = rotatedValueBitSize - 1; ssize_t leftShiftMask = -1; ssize_t rightShiftMask = -1; if ((leftShiftIndex->OperGet() == GT_AND)) { if (leftShiftIndex->gtGetOp2()->IsCnsIntOrI()) { leftShiftMask = leftShiftIndex->gtGetOp2()->AsIntCon()->gtIconVal; leftShiftIndex = leftShiftIndex->gtGetOp1(); } else { return nullptr; } } if ((rightShiftIndex->OperGet() == GT_AND)) { if (rightShiftIndex->gtGetOp2()->IsCnsIntOrI()) { rightShiftMask = rightShiftIndex->gtGetOp2()->AsIntCon()->gtIconVal; rightShiftIndex = rightShiftIndex->gtGetOp1(); } else { return nullptr; } } if (((minimalMask & leftShiftMask) != minimalMask) || ((minimalMask & rightShiftMask) != minimalMask)) { // The shift index is overmasked, e.g., we have // something like (x << y & 15) or // (x >> (32 - y) & 15 with 32 bit x. // The transformation is not valid. return nullptr; } GenTree* shiftIndexWithAdd = nullptr; GenTree* shiftIndexWithoutAdd = nullptr; genTreeOps rotateOp = GT_NONE; GenTree* rotateIndex = nullptr; if (leftShiftIndex->OperGet() == GT_ADD) { shiftIndexWithAdd = leftShiftIndex; shiftIndexWithoutAdd = rightShiftIndex; rotateOp = GT_ROR; } else if (rightShiftIndex->OperGet() == GT_ADD) { shiftIndexWithAdd = rightShiftIndex; shiftIndexWithoutAdd = leftShiftIndex; rotateOp = GT_ROL; } if (shiftIndexWithAdd != nullptr) { if (shiftIndexWithAdd->gtGetOp2()->IsCnsIntOrI()) { if (shiftIndexWithAdd->gtGetOp2()->AsIntCon()->gtIconVal == rotatedValueBitSize) { if (shiftIndexWithAdd->gtGetOp1()->OperGet() == GT_NEG) { if (GenTree::Compare(shiftIndexWithAdd->gtGetOp1()->gtGetOp1(), shiftIndexWithoutAdd)) { // We found one of these patterns: // (x << (y & M)) | (x >>> ((-y + N) & M)) // (x << y) | (x >>> (-y + N)) // (x >>> (y & M)) | (x << ((-y + N) & M)) // (x >>> y) | (x << (-y + N)) // where N == bitsize(x), M is const, and // M & (N - 1) == N - 1 CLANG_FORMAT_COMMENT_ANCHOR; #ifndef TARGET_64BIT if (!shiftIndexWithoutAdd->IsCnsIntOrI() && (rotatedValueBitSize == 64)) { // TODO-X86-CQ: we need to handle variable-sized long shifts specially on x86. // GT_LSH, GT_RSH, and GT_RSZ have helpers for this case. We may need // to add helpers for GT_ROL and GT_ROR. return nullptr; } #endif rotateIndex = shiftIndexWithoutAdd; } } } } } else if ((leftShiftIndex->IsCnsIntOrI() && rightShiftIndex->IsCnsIntOrI())) { if (leftShiftIndex->AsIntCon()->gtIconVal + rightShiftIndex->AsIntCon()->gtIconVal == rotatedValueBitSize) { // We found this pattern: // (x << c1) | (x >>> c2) // where c1 and c2 are const and c1 + c2 == bitsize(x) rotateOp = GT_ROL; rotateIndex = leftShiftIndex; } } if (rotateIndex != nullptr) { noway_assert(GenTree::OperIsRotate(rotateOp)); GenTreeFlags inputTreeEffects = tree->gtFlags & GTF_ALL_EFFECT; // We can use the same tree only during global morph; reusing the tree in a later morph // may invalidate value numbers. if (fgGlobalMorph) { tree->AsOp()->gtOp1 = rotatedValue; tree->AsOp()->gtOp2 = rotateIndex; tree->ChangeOper(rotateOp); unsigned childFlags = 0; for (GenTree* op : tree->Operands()) { childFlags |= (op->gtFlags & GTF_ALL_EFFECT); } // The parent's flags should be a superset of its operands' flags noway_assert((inputTreeEffects & childFlags) == childFlags); } else { tree = gtNewOperNode(rotateOp, rotatedValueActualType, rotatedValue, rotateIndex); noway_assert(inputTreeEffects == (tree->gtFlags & GTF_ALL_EFFECT)); } return tree; } } return nullptr; } #if !defined(TARGET_64BIT) //------------------------------------------------------------------------------ // fgRecognizeAndMorphLongMul : Check for and morph long multiplication with 32 bit operands. // // Uses "GenTree::IsValidLongMul" to check for the long multiplication pattern. Will swap // operands if the first one is a constant and the second one is not, even for trees which // end up not being eligibile for long multiplication. // // Arguments: // mul - GT_MUL tree to check for a long multiplication opportunity // // Return Value: // The original tree, with operands possibly swapped, if it is not eligible for long multiplication. // Tree with GTF_MUL_64RSLT set, side effect flags propagated, and children morphed if it is. // GenTreeOp* Compiler::fgRecognizeAndMorphLongMul(GenTreeOp* mul) { assert(mul->OperIs(GT_MUL)); assert(mul->TypeIs(TYP_LONG)); GenTree* op1 = mul->gtGetOp1(); GenTree* op2 = mul->gtGetOp2(); // "IsValidLongMul" and decomposition do not handle constant op1. if (op1->IsIntegralConst()) { std::swap(op1, op2); mul->gtOp1 = op1; mul->gtOp2 = op2; } if (!mul->IsValidLongMul()) { return mul; } // MUL_LONG needs to do the work the casts would have done. mul->ClearUnsigned(); if (op1->IsUnsigned()) { mul->SetUnsigned(); } // "IsValidLongMul" returned "true", so this GT_MUL cannot overflow. mul->ClearOverflow(); mul->Set64RsltMul(); return fgMorphLongMul(mul); } //------------------------------------------------------------------------------ // fgMorphLongMul : Morphs GT_MUL nodes marked with GTF_MUL_64RSLT. // // Morphs *only* the operands of casts that compose the long mul to // avoid them being folded aways. // // Arguments: // mul - GT_MUL tree to morph operands of // // Return Value: // The original tree, with operands morphed and flags propagated. // GenTreeOp* Compiler::fgMorphLongMul(GenTreeOp* mul) { INDEBUG(mul->DebugCheckLongMul()); GenTree* op1 = mul->gtGetOp1(); GenTree* op2 = mul->gtGetOp2(); // Morph the operands. We cannot allow the casts to go away, so we morph their operands directly. op1->AsCast()->CastOp() = fgMorphTree(op1->AsCast()->CastOp()); op1->SetAllEffectsFlags(op1->AsCast()->CastOp()); if (op2->OperIs(GT_CAST)) { op2->AsCast()->CastOp() = fgMorphTree(op2->AsCast()->CastOp()); op2->SetAllEffectsFlags(op2->AsCast()->CastOp()); } mul->SetAllEffectsFlags(op1, op2); op1->SetDoNotCSE(); op2->SetDoNotCSE(); return mul; } #endif // !defined(TARGET_64BIT) /***************************************************************************** * * Transform the given tree for code generation and return an equivalent tree. */ GenTree* Compiler::fgMorphTree(GenTree* tree, MorphAddrContext* mac) { assert(tree); #ifdef DEBUG if (verbose) { if ((unsigned)JitConfig.JitBreakMorphTree() == tree->gtTreeID) { noway_assert(!"JitBreakMorphTree hit"); } } #endif #ifdef DEBUG int thisMorphNum = 0; if (verbose && treesBeforeAfterMorph) { thisMorphNum = morphNum++; printf("\nfgMorphTree (before %d):\n", thisMorphNum); gtDispTree(tree); } #endif if (fgGlobalMorph) { // Apply any rewrites for implicit byref arguments before morphing the // tree. if (fgMorphImplicitByRefArgs(tree)) { #ifdef DEBUG if (verbose && treesBeforeAfterMorph) { printf("\nfgMorphTree (%d), after implicit-byref rewrite:\n", thisMorphNum); gtDispTree(tree); } #endif } } /*------------------------------------------------------------------------- * fgMorphTree() can potentially replace a tree with another, and the * caller has to store the return value correctly. * Turn this on to always make copy of "tree" here to shake out * hidden/unupdated references. */ #ifdef DEBUG if (compStressCompile(STRESS_GENERIC_CHECK, 0)) { GenTree* copy; if (GenTree::s_gtNodeSizes[tree->gtOper] == TREE_NODE_SZ_SMALL) { copy = gtNewLargeOperNode(GT_ADD, TYP_INT); } else { copy = new (this, GT_CALL) GenTreeCall(TYP_INT); } copy->ReplaceWith(tree, this); #if defined(LATE_DISASM) // GT_CNS_INT is considered small, so ReplaceWith() won't copy all fields if ((tree->gtOper == GT_CNS_INT) && tree->IsIconHandle()) { copy->AsIntCon()->gtCompileTimeHandle = tree->AsIntCon()->gtCompileTimeHandle; } #endif DEBUG_DESTROY_NODE(tree); tree = copy; } #endif // DEBUG if (fgGlobalMorph) { /* Ensure that we haven't morphed this node already */ assert(((tree->gtDebugFlags & GTF_DEBUG_NODE_MORPHED) == 0) && "ERROR: Already morphed this node!"); /* Before morphing the tree, we try to propagate any active assertions */ if (optLocalAssertionProp) { /* Do we have any active assertions? */ if (optAssertionCount > 0) { GenTree* newTree = tree; while (newTree != nullptr) { tree = newTree; /* newTree is non-Null if we propagated an assertion */ newTree = optAssertionProp(apFull, tree, nullptr, nullptr); } assert(tree != nullptr); } } PREFAST_ASSUME(tree != nullptr); } /* Save the original un-morphed tree for fgMorphTreeDone */ GenTree* oldTree = tree; /* Figure out what kind of a node we have */ unsigned kind = tree->OperKind(); /* Is this a constant node? */ if (tree->OperIsConst()) { tree = fgMorphConst(tree); goto DONE; } /* Is this a leaf node? */ if (kind & GTK_LEAF) { tree = fgMorphLeaf(tree); goto DONE; } /* Is it a 'simple' unary/binary operator? */ if (kind & GTK_SMPOP) { tree = fgMorphSmpOp(tree, mac); goto DONE; } /* See what kind of a special operator we have here */ switch (tree->OperGet()) { case GT_CALL: if (tree->OperMayThrow(this)) { tree->gtFlags |= GTF_EXCEPT; } else { tree->gtFlags &= ~GTF_EXCEPT; } tree = fgMorphCall(tree->AsCall()); break; #if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS) #if defined(FEATURE_SIMD) case GT_SIMD: #endif #if defined(FEATURE_HW_INTRINSICS) case GT_HWINTRINSIC: #endif tree = fgMorphMultiOp(tree->AsMultiOp()); break; #endif // defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS) case GT_ARR_ELEM: tree->AsArrElem()->gtArrObj = fgMorphTree(tree->AsArrElem()->gtArrObj); unsigned dim; for (dim = 0; dim < tree->AsArrElem()->gtArrRank; dim++) { tree->AsArrElem()->gtArrInds[dim] = fgMorphTree(tree->AsArrElem()->gtArrInds[dim]); } tree->gtFlags &= ~GTF_CALL; tree->gtFlags |= tree->AsArrElem()->gtArrObj->gtFlags & GTF_ALL_EFFECT; for (dim = 0; dim < tree->AsArrElem()->gtArrRank; dim++) { tree->gtFlags |= tree->AsArrElem()->gtArrInds[dim]->gtFlags & GTF_ALL_EFFECT; } if (fgGlobalMorph) { fgSetRngChkTarget(tree, false); } break; case GT_ARR_OFFSET: tree->AsArrOffs()->gtOffset = fgMorphTree(tree->AsArrOffs()->gtOffset); tree->AsArrOffs()->gtIndex = fgMorphTree(tree->AsArrOffs()->gtIndex); tree->AsArrOffs()->gtArrObj = fgMorphTree(tree->AsArrOffs()->gtArrObj); tree->gtFlags &= ~GTF_CALL; tree->gtFlags |= tree->AsArrOffs()->gtOffset->gtFlags & GTF_ALL_EFFECT; tree->gtFlags |= tree->AsArrOffs()->gtIndex->gtFlags & GTF_ALL_EFFECT; tree->gtFlags |= tree->AsArrOffs()->gtArrObj->gtFlags & GTF_ALL_EFFECT; if (fgGlobalMorph) { fgSetRngChkTarget(tree, false); } break; case GT_PHI: tree->gtFlags &= ~GTF_ALL_EFFECT; for (GenTreePhi::Use& use : tree->AsPhi()->Uses()) { use.SetNode(fgMorphTree(use.GetNode())); tree->gtFlags |= use.GetNode()->gtFlags & GTF_ALL_EFFECT; } break; case GT_FIELD_LIST: tree->gtFlags &= ~GTF_ALL_EFFECT; for (GenTreeFieldList::Use& use : tree->AsFieldList()->Uses()) { use.SetNode(fgMorphTree(use.GetNode())); tree->gtFlags |= (use.GetNode()->gtFlags & GTF_ALL_EFFECT); } break; case GT_CMPXCHG: tree->AsCmpXchg()->gtOpLocation = fgMorphTree(tree->AsCmpXchg()->gtOpLocation); tree->AsCmpXchg()->gtOpValue = fgMorphTree(tree->AsCmpXchg()->gtOpValue); tree->AsCmpXchg()->gtOpComparand = fgMorphTree(tree->AsCmpXchg()->gtOpComparand); tree->gtFlags &= (~GTF_EXCEPT & ~GTF_CALL); tree->gtFlags |= tree->AsCmpXchg()->gtOpLocation->gtFlags & GTF_ALL_EFFECT; tree->gtFlags |= tree->AsCmpXchg()->gtOpValue->gtFlags & GTF_ALL_EFFECT; tree->gtFlags |= tree->AsCmpXchg()->gtOpComparand->gtFlags & GTF_ALL_EFFECT; break; case GT_STORE_DYN_BLK: tree = fgMorphStoreDynBlock(tree->AsStoreDynBlk()); break; default: #ifdef DEBUG gtDispTree(tree); #endif noway_assert(!"unexpected operator"); } DONE: fgMorphTreeDone(tree, oldTree DEBUGARG(thisMorphNum)); return tree; } //------------------------------------------------------------------------ // fgKillDependentAssertionsSingle: Kill all assertions specific to lclNum // // Arguments: // lclNum - The varNum of the lclVar for which we're killing assertions. // tree - (DEBUG only) the tree responsible for killing its assertions. // void Compiler::fgKillDependentAssertionsSingle(unsigned lclNum DEBUGARG(GenTree* tree)) { /* All dependent assertions are killed here */ ASSERT_TP killed = BitVecOps::MakeCopy(apTraits, GetAssertionDep(lclNum)); if (killed) { AssertionIndex index = optAssertionCount; while (killed && (index > 0)) { if (BitVecOps::IsMember(apTraits, killed, index - 1)) { #ifdef DEBUG AssertionDsc* curAssertion = optGetAssertion(index); noway_assert((curAssertion->op1.lcl.lclNum == lclNum) || ((curAssertion->op2.kind == O2K_LCLVAR_COPY) && (curAssertion->op2.lcl.lclNum == lclNum))); if (verbose) { printf("\nThe assignment "); printTreeID(tree); printf(" using V%02u removes: ", curAssertion->op1.lcl.lclNum); optPrintAssertion(curAssertion); } #endif // Remove this bit from the killed mask BitVecOps::RemoveElemD(apTraits, killed, index - 1); optAssertionRemove(index); } index--; } // killed mask should now be zero noway_assert(BitVecOps::IsEmpty(apTraits, killed)); } } //------------------------------------------------------------------------ // fgKillDependentAssertions: Kill all dependent assertions with regard to lclNum. // // Arguments: // lclNum - The varNum of the lclVar for which we're killing assertions. // tree - (DEBUG only) the tree responsible for killing its assertions. // // Notes: // For structs and struct fields, it will invalidate the children and parent // respectively. // Calls fgKillDependentAssertionsSingle to kill the assertions for a single lclVar. // void Compiler::fgKillDependentAssertions(unsigned lclNum DEBUGARG(GenTree* tree)) { LclVarDsc* varDsc = lvaGetDesc(lclNum); if (varDsc->lvPromoted) { noway_assert(varTypeIsStruct(varDsc)); // Kill the field locals. for (unsigned i = varDsc->lvFieldLclStart; i < varDsc->lvFieldLclStart + varDsc->lvFieldCnt; ++i) { fgKillDependentAssertionsSingle(i DEBUGARG(tree)); } // Kill the struct local itself. fgKillDependentAssertionsSingle(lclNum DEBUGARG(tree)); } else if (varDsc->lvIsStructField) { // Kill the field local. fgKillDependentAssertionsSingle(lclNum DEBUGARG(tree)); // Kill the parent struct. fgKillDependentAssertionsSingle(varDsc->lvParentLcl DEBUGARG(tree)); } else { fgKillDependentAssertionsSingle(lclNum DEBUGARG(tree)); } } /***************************************************************************** * * This function is called to complete the morphing of a tree node * It should only be called once for each node. * If DEBUG is defined the flag GTF_DEBUG_NODE_MORPHED is checked and updated, * to enforce the invariant that each node is only morphed once. * If local assertion prop is enabled the result tree may be replaced * by an equivalent tree. * */ void Compiler::fgMorphTreeDone(GenTree* tree, GenTree* oldTree /* == NULL */ DEBUGARG(int morphNum)) { #ifdef DEBUG if (verbose && treesBeforeAfterMorph) { printf("\nfgMorphTree (after %d):\n", morphNum); gtDispTree(tree); printf(""); // in our logic this causes a flush } #endif if (!fgGlobalMorph) { return; } if ((oldTree != nullptr) && (oldTree != tree)) { /* Ensure that we have morphed this node */ assert((tree->gtDebugFlags & GTF_DEBUG_NODE_MORPHED) && "ERROR: Did not morph this node!"); #ifdef DEBUG TransferTestDataToNode(oldTree, tree); #endif } else { // Ensure that we haven't morphed this node already assert(((tree->gtDebugFlags & GTF_DEBUG_NODE_MORPHED) == 0) && "ERROR: Already morphed this node!"); } if (tree->OperIsConst()) { goto DONE; } if (!optLocalAssertionProp) { goto DONE; } /* Do we have any active assertions? */ if (optAssertionCount > 0) { /* Is this an assignment to a local variable */ GenTreeLclVarCommon* lclVarTree = nullptr; // The check below will miss LIR-style assignments. // // But we shouldn't be running local assertion prop on these, // as local prop gets disabled when we run global prop. assert(!tree->OperIs(GT_STORE_LCL_VAR, GT_STORE_LCL_FLD)); // DefinesLocal can return true for some BLK op uses, so // check what gets assigned only when we're at an assignment. if (tree->OperIs(GT_ASG) && tree->DefinesLocal(this, &lclVarTree)) { unsigned lclNum = lclVarTree->GetLclNum(); noway_assert(lclNum < lvaCount); fgKillDependentAssertions(lclNum DEBUGARG(tree)); } } /* If this tree makes a new assertion - make it available */ optAssertionGen(tree); DONE:; #ifdef DEBUG /* Mark this node as being morphed */ tree->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED; #endif } //------------------------------------------------------------------------ // fgFoldConditional: try and fold conditionals and optimize BBJ_COND or // BBJ_SWITCH blocks. // // Argumetns: // block - block to examine // // Returns: // FoldResult indicating what changes were made, if any // Compiler::FoldResult Compiler::fgFoldConditional(BasicBlock* block) { FoldResult result = FoldResult::FOLD_DID_NOTHING; // We don't want to make any code unreachable // if (opts.OptimizationDisabled()) { return result; } if (block->bbJumpKind == BBJ_COND) { noway_assert(block->bbStmtList != nullptr && block->bbStmtList->GetPrevStmt() != nullptr); Statement* lastStmt = block->lastStmt(); noway_assert(lastStmt->GetNextStmt() == nullptr); if (lastStmt->GetRootNode()->gtOper == GT_CALL) { noway_assert(fgRemoveRestOfBlock); // Unconditional throw - transform the basic block into a BBJ_THROW // fgConvertBBToThrowBB(block); result = FoldResult::FOLD_CHANGED_CONTROL_FLOW; JITDUMP("\nConditional folded at " FMT_BB "\n", block->bbNum); JITDUMP(FMT_BB " becomes a BBJ_THROW\n", block->bbNum); return result; } noway_assert(lastStmt->GetRootNode()->gtOper == GT_JTRUE); /* Did we fold the conditional */ noway_assert(lastStmt->GetRootNode()->AsOp()->gtOp1); GenTree* condTree; condTree = lastStmt->GetRootNode()->AsOp()->gtOp1; GenTree* cond; cond = condTree->gtEffectiveVal(true); if (cond->OperIsConst()) { /* Yupee - we folded the conditional! * Remove the conditional statement */ noway_assert(cond->gtOper == GT_CNS_INT); noway_assert((block->bbNext->countOfInEdges() > 0) && (block->bbJumpDest->countOfInEdges() > 0)); if (condTree != cond) { // Preserve any side effects assert(condTree->OperIs(GT_COMMA)); lastStmt->SetRootNode(condTree); result = FoldResult::FOLD_ALTERED_LAST_STMT; } else { // no side effects, remove the jump entirely fgRemoveStmt(block, lastStmt); result = FoldResult::FOLD_REMOVED_LAST_STMT; } // block is a BBJ_COND that we are folding the conditional for. // bTaken is the path that will always be taken from block. // bNotTaken is the path that will never be taken from block. // BasicBlock* bTaken; BasicBlock* bNotTaken; if (cond->AsIntCon()->gtIconVal != 0) { /* JTRUE 1 - transform the basic block into a BBJ_ALWAYS */ block->bbJumpKind = BBJ_ALWAYS; bTaken = block->bbJumpDest; bNotTaken = block->bbNext; } else { /* Unmark the loop if we are removing a backwards branch */ /* dest block must also be marked as a loop head and */ /* We must be able to reach the backedge block */ if ((block->bbJumpDest->isLoopHead()) && (block->bbJumpDest->bbNum <= block->bbNum) && fgReachable(block->bbJumpDest, block)) { optUnmarkLoopBlocks(block->bbJumpDest, block); } /* JTRUE 0 - transform the basic block into a BBJ_NONE */ block->bbJumpKind = BBJ_NONE; bTaken = block->bbNext; bNotTaken = block->bbJumpDest; } if (fgHaveValidEdgeWeights) { // We are removing an edge from block to bNotTaken // and we have already computed the edge weights, so // we will try to adjust some of the weights // flowList* edgeTaken = fgGetPredForBlock(bTaken, block); BasicBlock* bUpdated = nullptr; // non-NULL if we updated the weight of an internal block // We examine the taken edge (block -> bTaken) // if block has valid profile weight and bTaken does not we try to adjust bTaken's weight // else if bTaken has valid profile weight and block does not we try to adjust block's weight // We can only adjust the block weights when (the edge block -> bTaken) is the only edge into bTaken // if (block->hasProfileWeight()) { // The edge weights for (block -> bTaken) are 100% of block's weight edgeTaken->setEdgeWeights(block->bbWeight, block->bbWeight, bTaken); if (!bTaken->hasProfileWeight()) { if ((bTaken->countOfInEdges() == 1) || (bTaken->bbWeight < block->bbWeight)) { // Update the weight of bTaken bTaken->inheritWeight(block); bUpdated = bTaken; } } } else if (bTaken->hasProfileWeight()) { if (bTaken->countOfInEdges() == 1) { // There is only one in edge to bTaken edgeTaken->setEdgeWeights(bTaken->bbWeight, bTaken->bbWeight, bTaken); // Update the weight of block block->inheritWeight(bTaken); bUpdated = block; } } if (bUpdated != nullptr) { weight_t newMinWeight; weight_t newMaxWeight; flowList* edge; // Now fix the weights of the edges out of 'bUpdated' switch (bUpdated->bbJumpKind) { case BBJ_NONE: edge = fgGetPredForBlock(bUpdated->bbNext, bUpdated); newMaxWeight = bUpdated->bbWeight; newMinWeight = min(edge->edgeWeightMin(), newMaxWeight); edge->setEdgeWeights(newMinWeight, newMaxWeight, bUpdated->bbNext); break; case BBJ_COND: edge = fgGetPredForBlock(bUpdated->bbNext, bUpdated); newMaxWeight = bUpdated->bbWeight; newMinWeight = min(edge->edgeWeightMin(), newMaxWeight); edge->setEdgeWeights(newMinWeight, newMaxWeight, bUpdated->bbNext); FALLTHROUGH; case BBJ_ALWAYS: edge = fgGetPredForBlock(bUpdated->bbJumpDest, bUpdated); newMaxWeight = bUpdated->bbWeight; newMinWeight = min(edge->edgeWeightMin(), newMaxWeight); edge->setEdgeWeights(newMinWeight, newMaxWeight, bUpdated->bbNext); break; default: // We don't handle BBJ_SWITCH break; } } } /* modify the flow graph */ /* Remove 'block' from the predecessor list of 'bNotTaken' */ fgRemoveRefPred(bNotTaken, block); #ifdef DEBUG if (verbose) { printf("\nConditional folded at " FMT_BB "\n", block->bbNum); printf(FMT_BB " becomes a %s", block->bbNum, block->bbJumpKind == BBJ_ALWAYS ? "BBJ_ALWAYS" : "BBJ_NONE"); if (block->bbJumpKind == BBJ_ALWAYS) { printf(" to " FMT_BB, block->bbJumpDest->bbNum); } printf("\n"); } #endif /* if the block was a loop condition we may have to modify * the loop table */ for (unsigned loopNum = 0; loopNum < optLoopCount; loopNum++) { /* Some loops may have been already removed by * loop unrolling or conditional folding */ if (optLoopTable[loopNum].lpFlags & LPFLG_REMOVED) { continue; } /* We are only interested in the loop bottom */ if (optLoopTable[loopNum].lpBottom == block) { if (cond->AsIntCon()->gtIconVal == 0) { /* This was a bogus loop (condition always false) * Remove the loop from the table */ optMarkLoopRemoved(loopNum); optLoopTable[loopNum].lpTop->unmarkLoopAlign(this DEBUG_ARG("Bogus loop")); #ifdef DEBUG if (verbose) { printf("Removing loop " FMT_LP " (from " FMT_BB " to " FMT_BB ")\n\n", loopNum, optLoopTable[loopNum].lpTop->bbNum, optLoopTable[loopNum].lpBottom->bbNum); } #endif } } } } } else if (block->bbJumpKind == BBJ_SWITCH) { noway_assert(block->bbStmtList != nullptr && block->bbStmtList->GetPrevStmt() != nullptr); Statement* lastStmt = block->lastStmt(); noway_assert(lastStmt->GetNextStmt() == nullptr); if (lastStmt->GetRootNode()->gtOper == GT_CALL) { noway_assert(fgRemoveRestOfBlock); // Unconditional throw - transform the basic block into a BBJ_THROW // fgConvertBBToThrowBB(block); result = FoldResult::FOLD_CHANGED_CONTROL_FLOW; JITDUMP("\nConditional folded at " FMT_BB "\n", block->bbNum); JITDUMP(FMT_BB " becomes a BBJ_THROW\n", block->bbNum); return result; } noway_assert(lastStmt->GetRootNode()->gtOper == GT_SWITCH); /* Did we fold the conditional */ noway_assert(lastStmt->GetRootNode()->AsOp()->gtOp1); GenTree* condTree; condTree = lastStmt->GetRootNode()->AsOp()->gtOp1; GenTree* cond; cond = condTree->gtEffectiveVal(true); if (cond->OperIsConst()) { /* Yupee - we folded the conditional! * Remove the conditional statement */ noway_assert(cond->gtOper == GT_CNS_INT); if (condTree != cond) { // Preserve any side effects assert(condTree->OperIs(GT_COMMA)); lastStmt->SetRootNode(condTree); result = FoldResult::FOLD_ALTERED_LAST_STMT; } else { // no side effects, remove the switch entirely fgRemoveStmt(block, lastStmt); result = FoldResult::FOLD_REMOVED_LAST_STMT; } /* modify the flow graph */ /* Find the actual jump target */ unsigned switchVal; switchVal = (unsigned)cond->AsIntCon()->gtIconVal; unsigned jumpCnt; jumpCnt = block->bbJumpSwt->bbsCount; BasicBlock** jumpTab; jumpTab = block->bbJumpSwt->bbsDstTab; bool foundVal; foundVal = false; for (unsigned val = 0; val < jumpCnt; val++, jumpTab++) { BasicBlock* curJump = *jumpTab; assert(curJump->countOfInEdges() > 0); // If val matches switchVal or we are at the last entry and // we never found the switch value then set the new jump dest if ((val == switchVal) || (!foundVal && (val == jumpCnt - 1))) { if (curJump != block->bbNext) { /* transform the basic block into a BBJ_ALWAYS */ block->bbJumpKind = BBJ_ALWAYS; block->bbJumpDest = curJump; } else { /* transform the basic block into a BBJ_NONE */ block->bbJumpKind = BBJ_NONE; } foundVal = true; } else { /* Remove 'block' from the predecessor list of 'curJump' */ fgRemoveRefPred(curJump, block); } } assert(foundVal); #ifdef DEBUG if (verbose) { printf("\nConditional folded at " FMT_BB "\n", block->bbNum); printf(FMT_BB " becomes a %s", block->bbNum, block->bbJumpKind == BBJ_ALWAYS ? "BBJ_ALWAYS" : "BBJ_NONE"); if (block->bbJumpKind == BBJ_ALWAYS) { printf(" to " FMT_BB, block->bbJumpDest->bbNum); } printf("\n"); } #endif } } return result; } //------------------------------------------------------------------------ // fgMorphBlockStmt: morph a single statement in a block. // // Arguments: // block - block containing the statement // stmt - statement to morph // msg - string to identify caller in a dump // // Returns: // true if 'stmt' was removed from the block. // s false if 'stmt' is still in the block (even if other statements were removed). // // Notes: // Can be called anytime, unlike fgMorphStmts() which should only be called once. // bool Compiler::fgMorphBlockStmt(BasicBlock* block, Statement* stmt DEBUGARG(const char* msg)) { assert(block != nullptr); assert(stmt != nullptr); // Reset some ambient state fgRemoveRestOfBlock = false; compCurBB = block; compCurStmt = stmt; GenTree* morph = fgMorphTree(stmt->GetRootNode()); // Bug 1106830 - During the CSE phase we can't just remove // morph->AsOp()->gtOp2 as it could contain CSE expressions. // This leads to a noway_assert in OptCSE.cpp when // searching for the removed CSE ref. (using gtFindLink) // if (!optValnumCSE_phase) { // Check for morph as a GT_COMMA with an unconditional throw if (fgIsCommaThrow(morph, true)) { #ifdef DEBUG if (verbose) { printf("Folding a top-level fgIsCommaThrow stmt\n"); printf("Removing op2 as unreachable:\n"); gtDispTree(morph->AsOp()->gtOp2); printf("\n"); } #endif // Use the call as the new stmt morph = morph->AsOp()->gtOp1; noway_assert(morph->gtOper == GT_CALL); } // we can get a throw as a statement root if (fgIsThrow(morph)) { #ifdef DEBUG if (verbose) { printf("We have a top-level fgIsThrow stmt\n"); printf("Removing the rest of block as unreachable:\n"); } #endif noway_assert((morph->gtFlags & GTF_COLON_COND) == 0); fgRemoveRestOfBlock = true; } } stmt->SetRootNode(morph); // Can the entire tree be removed? bool removedStmt = false; // Defer removing statements during CSE so we don't inadvertently remove any CSE defs. if (!optValnumCSE_phase) { removedStmt = fgCheckRemoveStmt(block, stmt); } // Or this is the last statement of a conditional branch that was just folded? if (!removedStmt && (stmt->GetNextStmt() == nullptr) && !fgRemoveRestOfBlock) { FoldResult const fr = fgFoldConditional(block); removedStmt = (fr == FoldResult::FOLD_REMOVED_LAST_STMT); } if (!removedStmt) { // Have to re-do the evaluation order since for example some later code does not expect constants as op1 gtSetStmtInfo(stmt); // Have to re-link the nodes for this statement fgSetStmtSeq(stmt); } #ifdef DEBUG if (verbose) { printf("%s %s tree:\n", msg, (removedStmt ? "removed" : "morphed")); gtDispTree(morph); printf("\n"); } #endif if (fgRemoveRestOfBlock) { // Remove the rest of the stmts in the block for (Statement* removeStmt : StatementList(stmt->GetNextStmt())) { fgRemoveStmt(block, removeStmt); } // The rest of block has been removed and we will always throw an exception. // // For compDbgCode, we prepend an empty BB as the firstBB, it is BBJ_NONE. // We should not convert it to a ThrowBB. if ((block != fgFirstBB) || ((fgFirstBB->bbFlags & BBF_INTERNAL) == 0)) { // Convert block to a throw bb fgConvertBBToThrowBB(block); } #ifdef DEBUG if (verbose) { printf("\n%s Block " FMT_BB " becomes a throw block.\n", msg, block->bbNum); } #endif fgRemoveRestOfBlock = false; } return removedStmt; } /***************************************************************************** * * Morph the statements of the given block. * This function should be called just once for a block. Use fgMorphBlockStmt() * for reentrant calls. */ void Compiler::fgMorphStmts(BasicBlock* block) { fgRemoveRestOfBlock = false; fgCurrentlyInUseArgTemps = hashBv::Create(this); for (Statement* const stmt : block->Statements()) { if (fgRemoveRestOfBlock) { fgRemoveStmt(block, stmt); continue; } #ifdef FEATURE_SIMD if (opts.OptimizationEnabled() && stmt->GetRootNode()->TypeGet() == TYP_FLOAT && stmt->GetRootNode()->OperGet() == GT_ASG) { fgMorphCombineSIMDFieldAssignments(block, stmt); } #endif fgMorphStmt = stmt; compCurStmt = stmt; GenTree* oldTree = stmt->GetRootNode(); #ifdef DEBUG unsigned oldHash = verbose ? gtHashValue(oldTree) : DUMMY_INIT(~0); if (verbose) { printf("\nfgMorphTree " FMT_BB ", " FMT_STMT " (before)\n", block->bbNum, stmt->GetID()); gtDispTree(oldTree); } #endif /* Morph this statement tree */ GenTree* morphedTree = fgMorphTree(oldTree); // mark any outgoing arg temps as free so we can reuse them in the next statement. fgCurrentlyInUseArgTemps->ZeroAll(); // Has fgMorphStmt been sneakily changed ? if ((stmt->GetRootNode() != oldTree) || (block != compCurBB)) { if (stmt->GetRootNode() != oldTree) { /* This must be tailcall. Ignore 'morphedTree' and carry on with the tail-call node */ morphedTree = stmt->GetRootNode(); } else { /* This must be a tailcall that caused a GCPoll to get injected. We haven't actually morphed the call yet but the flag still got set, clear it here... */ CLANG_FORMAT_COMMENT_ANCHOR; #ifdef DEBUG morphedTree->gtDebugFlags &= ~GTF_DEBUG_NODE_MORPHED; #endif } noway_assert(compTailCallUsed); noway_assert(morphedTree->gtOper == GT_CALL); GenTreeCall* call = morphedTree->AsCall(); // Could be // - a fast call made as jmp in which case block will be ending with // BBJ_RETURN (as we need epilog) and marked as containing a jmp. // - a tailcall dispatched via JIT helper, on x86, in which case // block will be ending with BBJ_THROW. // - a tail call dispatched via runtime help (IL stubs), in which // case there will not be any tailcall and the block will be ending // with BBJ_RETURN (as normal control flow) noway_assert((call->IsFastTailCall() && (compCurBB->bbJumpKind == BBJ_RETURN) && ((compCurBB->bbFlags & BBF_HAS_JMP)) != 0) || (call->IsTailCallViaJitHelper() && (compCurBB->bbJumpKind == BBJ_THROW)) || (!call->IsTailCall() && (compCurBB->bbJumpKind == BBJ_RETURN))); } #ifdef DEBUG if (compStressCompile(STRESS_CLONE_EXPR, 30)) { // Clone all the trees to stress gtCloneExpr() if (verbose) { printf("\nfgMorphTree (stressClone from):\n"); gtDispTree(morphedTree); } morphedTree = gtCloneExpr(morphedTree); noway_assert(morphedTree != nullptr); if (verbose) { printf("\nfgMorphTree (stressClone to):\n"); gtDispTree(morphedTree); } } /* If the hash value changes. we modified the tree during morphing */ if (verbose) { unsigned newHash = gtHashValue(morphedTree); if (newHash != oldHash) { printf("\nfgMorphTree " FMT_BB ", " FMT_STMT " (after)\n", block->bbNum, stmt->GetID()); gtDispTree(morphedTree); } } #endif /* Check for morphedTree as a GT_COMMA with an unconditional throw */ if (!gtIsActiveCSE_Candidate(morphedTree) && fgIsCommaThrow(morphedTree, true)) { /* Use the call as the new stmt */ morphedTree = morphedTree->AsOp()->gtOp1; noway_assert(morphedTree->gtOper == GT_CALL); noway_assert((morphedTree->gtFlags & GTF_COLON_COND) == 0); fgRemoveRestOfBlock = true; } stmt->SetRootNode(morphedTree); if (fgRemoveRestOfBlock) { continue; } /* Has the statement been optimized away */ if (fgCheckRemoveStmt(block, stmt)) { continue; } /* Check if this block ends with a conditional branch that can be folded */ if (fgFoldConditional(block) != FoldResult::FOLD_DID_NOTHING) { continue; } if (ehBlockHasExnFlowDsc(block)) { continue; } } if (fgRemoveRestOfBlock) { if ((block->bbJumpKind == BBJ_COND) || (block->bbJumpKind == BBJ_SWITCH)) { Statement* first = block->firstStmt(); noway_assert(first); Statement* lastStmt = block->lastStmt(); noway_assert(lastStmt && lastStmt->GetNextStmt() == nullptr); GenTree* last = lastStmt->GetRootNode(); if (((block->bbJumpKind == BBJ_COND) && (last->gtOper == GT_JTRUE)) || ((block->bbJumpKind == BBJ_SWITCH) && (last->gtOper == GT_SWITCH))) { GenTree* op1 = last->AsOp()->gtOp1; if (op1->OperIsCompare()) { /* Unmark the comparison node with GTF_RELOP_JMP_USED */ op1->gtFlags &= ~GTF_RELOP_JMP_USED; } lastStmt->SetRootNode(fgMorphTree(op1)); } } /* Mark block as a BBJ_THROW block */ fgConvertBBToThrowBB(block); } #if FEATURE_FASTTAILCALL GenTree* recursiveTailCall = nullptr; if (block->endsWithTailCallConvertibleToLoop(this, &recursiveTailCall)) { fgMorphRecursiveFastTailCallIntoLoop(block, recursiveTailCall->AsCall()); } #endif // Reset this back so that it doesn't leak out impacting other blocks fgRemoveRestOfBlock = false; } /***************************************************************************** * * Morph the blocks of the method. * Returns true if the basic block list is modified. * This function should be called just once. */ void Compiler::fgMorphBlocks() { #ifdef DEBUG if (verbose) { printf("\n*************** In fgMorphBlocks()\n"); } #endif /* Since fgMorphTree can be called after various optimizations to re-arrange * the nodes we need a global flag to signal if we are during the one-pass * global morphing */ fgGlobalMorph = true; // // Local assertion prop is enabled if we are optimized // optLocalAssertionProp = opts.OptimizationEnabled(); if (optLocalAssertionProp) { // // Initialize for local assertion prop // optAssertionInit(true); } if (!compEnregLocals()) { // Morph is checking if lvDoNotEnregister is already set for some optimizations. // If we are running without `CLFLG_REGVAR` flag set (`compEnregLocals() == false`) // then we already know that we won't enregister any locals and it is better to set // this flag before we start reading it. // The main reason why this flag is not set is that we are running in minOpts. lvSetMinOptsDoNotEnreg(); } /*------------------------------------------------------------------------- * Process all basic blocks in the function */ BasicBlock* block = fgFirstBB; noway_assert(block); do { #ifdef DEBUG if (verbose) { printf("\nMorphing " FMT_BB " of '%s'\n", block->bbNum, info.compFullName); } #endif if (optLocalAssertionProp) { // // Clear out any currently recorded assertion candidates // before processing each basic block, // also we must handle QMARK-COLON specially // optAssertionReset(0); } // Make the current basic block address available globally. compCurBB = block; // Process all statement trees in the basic block. fgMorphStmts(block); // Do we need to merge the result of this block into a single return block? if ((block->bbJumpKind == BBJ_RETURN) && ((block->bbFlags & BBF_HAS_JMP) == 0)) { if ((genReturnBB != nullptr) && (genReturnBB != block)) { fgMergeBlockReturn(block); } } block = block->bbNext; } while (block != nullptr); // We are done with the global morphing phase fgGlobalMorph = false; compCurBB = nullptr; // Under OSR, we no longer need to specially protect the original method entry // if (opts.IsOSR() && (fgEntryBB != nullptr) && (fgEntryBB->bbFlags & BBF_IMPORTED)) { JITDUMP("OSR: un-protecting original method entry " FMT_BB "\n", fgEntryBB->bbNum); assert(fgEntryBB->bbRefs > 0); fgEntryBB->bbRefs--; // We don't need to remember this block anymore. fgEntryBB = nullptr; } #ifdef DEBUG if (verboseTrees) { fgDispBasicBlocks(true); } #endif } //------------------------------------------------------------------------ // fgMergeBlockReturn: assign the block return value (if any) into the single return temp // and branch to the single return block. // // Arguments: // block - the block to process. // // Notes: // A block is not guaranteed to have a last stmt if its jump kind is BBJ_RETURN. // For example a method returning void could have an empty block with jump kind BBJ_RETURN. // Such blocks do materialize as part of in-lining. // // A block with jump kind BBJ_RETURN does not necessarily need to end with GT_RETURN. // It could end with a tail call or rejected tail call or monitor.exit or a GT_INTRINSIC. // For now it is safe to explicitly check whether last stmt is GT_RETURN if genReturnLocal // is BAD_VAR_NUM. // void Compiler::fgMergeBlockReturn(BasicBlock* block) { assert((block->bbJumpKind == BBJ_RETURN) && ((block->bbFlags & BBF_HAS_JMP) == 0)); assert((genReturnBB != nullptr) && (genReturnBB != block)); // TODO: Need to characterize the last top level stmt of a block ending with BBJ_RETURN. Statement* lastStmt = block->lastStmt(); GenTree* ret = (lastStmt != nullptr) ? lastStmt->GetRootNode() : nullptr; if ((ret != nullptr) && (ret->OperGet() == GT_RETURN) && ((ret->gtFlags & GTF_RET_MERGED) != 0)) { // This return was generated during epilog merging, so leave it alone } else { // We'll jump to the genReturnBB. CLANG_FORMAT_COMMENT_ANCHOR; #if !defined(TARGET_X86) if (info.compFlags & CORINFO_FLG_SYNCH) { fgConvertSyncReturnToLeave(block); } else #endif // !TARGET_X86 { block->bbJumpKind = BBJ_ALWAYS; block->bbJumpDest = genReturnBB; fgAddRefPred(genReturnBB, block); fgReturnCount--; } if (genReturnLocal != BAD_VAR_NUM) { // replace the GT_RETURN node to be a GT_ASG that stores the return value into genReturnLocal. // Method must be returning a value other than TYP_VOID. noway_assert(compMethodHasRetVal()); // This block must be ending with a GT_RETURN noway_assert(lastStmt != nullptr); noway_assert(lastStmt->GetNextStmt() == nullptr); noway_assert(ret != nullptr); // GT_RETURN must have non-null operand as the method is returning the value assigned to // genReturnLocal noway_assert(ret->OperGet() == GT_RETURN); noway_assert(ret->gtGetOp1() != nullptr); Statement* pAfterStatement = lastStmt; const DebugInfo& di = lastStmt->GetDebugInfo(); GenTree* tree = gtNewTempAssign(genReturnLocal, ret->gtGetOp1(), &pAfterStatement, di, block); if (tree->OperIsCopyBlkOp()) { tree = fgMorphCopyBlock(tree); } else if (tree->OperIsInitBlkOp()) { tree = fgMorphInitBlock(tree); } if (pAfterStatement == lastStmt) { lastStmt->SetRootNode(tree); } else { // gtNewTempAssign inserted additional statements after last fgRemoveStmt(block, lastStmt); Statement* newStmt = gtNewStmt(tree, di); fgInsertStmtAfter(block, pAfterStatement, newStmt); lastStmt = newStmt; } } else if (ret != nullptr && ret->OperGet() == GT_RETURN) { // This block ends with a GT_RETURN noway_assert(lastStmt != nullptr); noway_assert(lastStmt->GetNextStmt() == nullptr); // Must be a void GT_RETURN with null operand; delete it as this block branches to oneReturn // block noway_assert(ret->TypeGet() == TYP_VOID); noway_assert(ret->gtGetOp1() == nullptr); fgRemoveStmt(block, lastStmt); } JITDUMP("\nUpdate " FMT_BB " to jump to common return block.\n", block->bbNum); DISPBLOCK(block); if (block->hasProfileWeight()) { weight_t const oldWeight = genReturnBB->hasProfileWeight() ? genReturnBB->bbWeight : BB_ZERO_WEIGHT; weight_t const newWeight = oldWeight + block->bbWeight; JITDUMP("merging profile weight " FMT_WT " from " FMT_BB " to common return " FMT_BB "\n", block->bbWeight, block->bbNum, genReturnBB->bbNum); genReturnBB->setBBProfileWeight(newWeight); DISPBLOCK(genReturnBB); } } } /***************************************************************************** * * Make some decisions about the kind of code to generate. */ void Compiler::fgSetOptions() { #ifdef DEBUG /* Should we force fully interruptible code ? */ if (JitConfig.JitFullyInt() || compStressCompile(STRESS_GENERIC_VARN, 30)) { noway_assert(!codeGen->isGCTypeFixed()); SetInterruptible(true); } #endif if (opts.compDbgCode) { assert(!codeGen->isGCTypeFixed()); SetInterruptible(true); // debugging is easier this way ... } /* Assume we won't need an explicit stack frame if this is allowed */ if (compLocallocUsed) { codeGen->setFramePointerRequired(true); } #ifdef TARGET_X86 if (compTailCallUsed) codeGen->setFramePointerRequired(true); #endif // TARGET_X86 if (!opts.genFPopt) { codeGen->setFramePointerRequired(true); } // Assert that the EH table has been initialized by now. Note that // compHndBBtabAllocCount never decreases; it is a high-water mark // of table allocation. In contrast, compHndBBtabCount does shrink // if we delete a dead EH region, and if it shrinks to zero, the // table pointer compHndBBtab is unreliable. assert(compHndBBtabAllocCount >= info.compXcptnsCount); #ifdef TARGET_X86 // Note: this case, and the !X86 case below, should both use the // !X86 path. This would require a few more changes for X86 to use // compHndBBtabCount (the current number of EH clauses) instead of // info.compXcptnsCount (the number of EH clauses in IL), such as // in ehNeedsShadowSPslots(). This is because sometimes the IL has // an EH clause that we delete as statically dead code before we // get here, leaving no EH clauses left, and thus no requirement // to use a frame pointer because of EH. But until all the code uses // the same test, leave info.compXcptnsCount here. if (info.compXcptnsCount > 0) { codeGen->setFramePointerRequiredEH(true); } #else // !TARGET_X86 if (compHndBBtabCount > 0) { codeGen->setFramePointerRequiredEH(true); } #endif // TARGET_X86 #ifdef UNIX_X86_ABI if (info.compXcptnsCount > 0) { assert(!codeGen->isGCTypeFixed()); // Enforce fully interruptible codegen for funclet unwinding SetInterruptible(true); } #endif // UNIX_X86_ABI if (compMethodRequiresPInvokeFrame()) { codeGen->setFramePointerRequired(true); // Setup of Pinvoke frame currently requires an EBP style frame } if (info.compPublishStubParam) { codeGen->setFramePointerRequiredGCInfo(true); } if (compIsProfilerHookNeeded()) { codeGen->setFramePointerRequired(true); } if (info.compIsVarArgs) { // Code that initializes lvaVarargsBaseOfStkArgs requires this to be EBP relative. codeGen->setFramePointerRequiredGCInfo(true); } if (lvaReportParamTypeArg()) { codeGen->setFramePointerRequiredGCInfo(true); } // printf("method will %s be fully interruptible\n", GetInterruptible() ? " " : "not"); } /*****************************************************************************/ GenTree* Compiler::fgInitThisClass() { noway_assert(!compIsForInlining()); CORINFO_LOOKUP_KIND kind; info.compCompHnd->getLocationOfThisType(info.compMethodHnd, &kind); if (!kind.needsRuntimeLookup) { return fgGetSharedCCtor(info.compClassHnd); } else { #ifdef FEATURE_READYTORUN // Only CoreRT understands CORINFO_HELP_READYTORUN_GENERIC_STATIC_BASE. Don't do this on CoreCLR. if (opts.IsReadyToRun() && IsTargetAbi(CORINFO_CORERT_ABI)) { CORINFO_RESOLVED_TOKEN resolvedToken; memset(&resolvedToken, 0, sizeof(resolvedToken)); // We are in a shared method body, but maybe we don't need a runtime lookup after all. // This covers the case of a generic method on a non-generic type. if (!(info.compClassAttr & CORINFO_FLG_SHAREDINST)) { resolvedToken.hClass = info.compClassHnd; return impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_STATIC_BASE, TYP_BYREF); } // We need a runtime lookup. GenTree* ctxTree = getRuntimeContextTree(kind.runtimeLookupKind); // CORINFO_HELP_READYTORUN_GENERIC_STATIC_BASE with a zeroed out resolvedToken means "get the static // base of the class that owns the method being compiled". If we're in this method, it means we're not // inlining and there's no ambiguity. return impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_GENERIC_STATIC_BASE, TYP_BYREF, gtNewCallArgs(ctxTree), &kind); } #endif // Collectible types requires that for shared generic code, if we use the generic context paramter // that we report it. (This is a conservative approach, we could detect some cases particularly when the // context parameter is this that we don't need the eager reporting logic.) lvaGenericsContextInUse = true; switch (kind.runtimeLookupKind) { case CORINFO_LOOKUP_THISOBJ: { // This code takes a this pointer; but we need to pass the static method desc to get the right point in // the hierarchy GenTree* vtTree = gtNewLclvNode(info.compThisArg, TYP_REF); vtTree->gtFlags |= GTF_VAR_CONTEXT; // Vtable pointer of this object vtTree = gtNewMethodTableLookup(vtTree); GenTree* methodHnd = gtNewIconEmbMethHndNode(info.compMethodHnd); return gtNewHelperCallNode(CORINFO_HELP_INITINSTCLASS, TYP_VOID, gtNewCallArgs(vtTree, methodHnd)); } case CORINFO_LOOKUP_CLASSPARAM: { GenTree* vtTree = gtNewLclvNode(info.compTypeCtxtArg, TYP_I_IMPL); vtTree->gtFlags |= GTF_VAR_CONTEXT; return gtNewHelperCallNode(CORINFO_HELP_INITCLASS, TYP_VOID, gtNewCallArgs(vtTree)); } case CORINFO_LOOKUP_METHODPARAM: { GenTree* methHndTree = gtNewLclvNode(info.compTypeCtxtArg, TYP_I_IMPL); methHndTree->gtFlags |= GTF_VAR_CONTEXT; return gtNewHelperCallNode(CORINFO_HELP_INITINSTCLASS, TYP_VOID, gtNewCallArgs(gtNewIconNode(0), methHndTree)); } default: noway_assert(!"Unknown LOOKUP_KIND"); UNREACHABLE(); } } } #ifdef DEBUG /***************************************************************************** * * Tree walk callback to make sure no GT_QMARK nodes are present in the tree, * except for the allowed ? 1 : 0; pattern. */ Compiler::fgWalkResult Compiler::fgAssertNoQmark(GenTree** tree, fgWalkData* data) { if ((*tree)->OperGet() == GT_QMARK) { fgCheckQmarkAllowedForm(*tree); } return WALK_CONTINUE; } void Compiler::fgCheckQmarkAllowedForm(GenTree* tree) { assert(tree->OperGet() == GT_QMARK); assert(!"Qmarks beyond morph disallowed."); } /***************************************************************************** * * Verify that the importer has created GT_QMARK nodes in a way we can * process them. The following is allowed: * * 1. A top level qmark. Top level qmark is of the form: * a) (bool) ? (void) : (void) OR * b) V0N = (bool) ? (type) : (type) * * 2. Recursion is allowed at the top level, i.e., a GT_QMARK can be a child * of either op1 of colon or op2 of colon but not a child of any other * operator. */ void Compiler::fgPreExpandQmarkChecks(GenTree* expr) { GenTree* topQmark = fgGetTopLevelQmark(expr); // If the top level Qmark is null, then scan the tree to make sure // there are no qmarks within it. if (topQmark == nullptr) { fgWalkTreePre(&expr, Compiler::fgAssertNoQmark, nullptr); } else { // We could probably expand the cond node also, but don't think the extra effort is necessary, // so let's just assert the cond node of a top level qmark doesn't have further top level qmarks. fgWalkTreePre(&topQmark->AsOp()->gtOp1, Compiler::fgAssertNoQmark, nullptr); fgPreExpandQmarkChecks(topQmark->AsOp()->gtOp2->AsOp()->gtOp1); fgPreExpandQmarkChecks(topQmark->AsOp()->gtOp2->AsOp()->gtOp2); } } #endif // DEBUG /***************************************************************************** * * Get the top level GT_QMARK node in a given "expr", return NULL if such a * node is not present. If the top level GT_QMARK node is assigned to a * GT_LCL_VAR, then return the lcl node in ppDst. * */ GenTree* Compiler::fgGetTopLevelQmark(GenTree* expr, GenTree** ppDst /* = NULL */) { if (ppDst != nullptr) { *ppDst = nullptr; } GenTree* topQmark = nullptr; if (expr->gtOper == GT_QMARK) { topQmark = expr; } else if (expr->gtOper == GT_ASG && expr->AsOp()->gtOp2->gtOper == GT_QMARK && expr->AsOp()->gtOp1->gtOper == GT_LCL_VAR) { topQmark = expr->AsOp()->gtOp2; if (ppDst != nullptr) { *ppDst = expr->AsOp()->gtOp1; } } return topQmark; } /********************************************************************************* * * For a castclass helper call, * Importer creates the following tree: * tmp = (op1 == null) ? op1 : ((*op1 == (cse = op2, cse)) ? op1 : helper()); * * This method splits the qmark expression created by the importer into the * following blocks: (block, asg, cond1, cond2, helper, remainder) * Notice that op1 is the result for both the conditions. So we coalesce these * assignments into a single block instead of two blocks resulting a nested diamond. * * +---------->-----------+ * | | | * ^ ^ v * | | | * block-->asg-->cond1--+-->cond2--+-->helper--+-->remainder * * We expect to achieve the following codegen: * mov rsi, rdx tmp = op1 // asgBlock * test rsi, rsi goto skip if tmp == null ? // cond1Block * je SKIP * mov rcx, 0x76543210 cns = op2 // cond2Block * cmp qword ptr [rsi], rcx goto skip if *tmp == op2 * je SKIP * call CORINFO_HELP_CHKCASTCLASS_SPECIAL tmp = helper(cns, tmp) // helperBlock * mov rsi, rax * SKIP: // remainderBlock * tmp has the result. * */ void Compiler::fgExpandQmarkForCastInstOf(BasicBlock* block, Statement* stmt) { #ifdef DEBUG if (verbose) { printf("\nExpanding CastInstOf qmark in " FMT_BB " (before)\n", block->bbNum); fgDispBasicBlocks(block, block, true); } #endif // DEBUG GenTree* expr = stmt->GetRootNode(); GenTree* dst = nullptr; GenTree* qmark = fgGetTopLevelQmark(expr, &dst); noway_assert(dst != nullptr); assert(qmark->gtFlags & GTF_QMARK_CAST_INSTOF); // Get cond, true, false exprs for the qmark. GenTree* condExpr = qmark->gtGetOp1(); GenTree* trueExpr = qmark->gtGetOp2()->AsColon()->ThenNode(); GenTree* falseExpr = qmark->gtGetOp2()->AsColon()->ElseNode(); // Get cond, true, false exprs for the nested qmark. GenTree* nestedQmark = falseExpr; GenTree* cond2Expr; GenTree* true2Expr; GenTree* false2Expr; if (nestedQmark->gtOper == GT_QMARK) { cond2Expr = nestedQmark->gtGetOp1(); true2Expr = nestedQmark->gtGetOp2()->AsColon()->ThenNode(); false2Expr = nestedQmark->gtGetOp2()->AsColon()->ElseNode(); } else { // This is a rare case that arises when we are doing minopts and encounter isinst of null // gtFoldExpr was still is able to optimize away part of the tree (but not all). // That means it does not match our pattern. // Rather than write code to handle this case, just fake up some nodes to make it match the common // case. Synthesize a comparison that is always true, and for the result-on-true, use the // entire subtree we expected to be the nested question op. cond2Expr = gtNewOperNode(GT_EQ, TYP_INT, gtNewIconNode(0, TYP_I_IMPL), gtNewIconNode(0, TYP_I_IMPL)); true2Expr = nestedQmark; false2Expr = gtNewIconNode(0, TYP_I_IMPL); } assert(false2Expr->OperGet() == trueExpr->OperGet()); // Create the chain of blocks. See method header comment. // The order of blocks after this is the following: // block ... asgBlock ... cond1Block ... cond2Block ... helperBlock ... remainderBlock // // We need to remember flags that exist on 'block' that we want to propagate to 'remainderBlock', // if they are going to be cleared by fgSplitBlockAfterStatement(). We currently only do this only // for the GC safe point bit, the logic being that if 'block' was marked gcsafe, then surely // remainderBlock will still be GC safe. BasicBlockFlags propagateFlags = block->bbFlags & BBF_GC_SAFE_POINT; BasicBlock* remainderBlock = fgSplitBlockAfterStatement(block, stmt); fgRemoveRefPred(remainderBlock, block); // We're going to put more blocks between block and remainderBlock. BasicBlock* helperBlock = fgNewBBafter(BBJ_NONE, block, true); BasicBlock* cond2Block = fgNewBBafter(BBJ_COND, block, true); BasicBlock* cond1Block = fgNewBBafter(BBJ_COND, block, true); BasicBlock* asgBlock = fgNewBBafter(BBJ_NONE, block, true); remainderBlock->bbFlags |= propagateFlags; // These blocks are only internal if 'block' is (but they've been set as internal by fgNewBBafter). // If they're not internal, mark them as imported to avoid asserts about un-imported blocks. if ((block->bbFlags & BBF_INTERNAL) == 0) { helperBlock->bbFlags &= ~BBF_INTERNAL; cond2Block->bbFlags &= ~BBF_INTERNAL; cond1Block->bbFlags &= ~BBF_INTERNAL; asgBlock->bbFlags &= ~BBF_INTERNAL; helperBlock->bbFlags |= BBF_IMPORTED; cond2Block->bbFlags |= BBF_IMPORTED; cond1Block->bbFlags |= BBF_IMPORTED; asgBlock->bbFlags |= BBF_IMPORTED; } // Chain the flow correctly. fgAddRefPred(asgBlock, block); fgAddRefPred(cond1Block, asgBlock); fgAddRefPred(cond2Block, cond1Block); fgAddRefPred(helperBlock, cond2Block); fgAddRefPred(remainderBlock, helperBlock); fgAddRefPred(remainderBlock, cond1Block); fgAddRefPred(remainderBlock, cond2Block); cond1Block->bbJumpDest = remainderBlock; cond2Block->bbJumpDest = remainderBlock; // Set the weights; some are guesses. asgBlock->inheritWeight(block); cond1Block->inheritWeight(block); cond2Block->inheritWeightPercentage(cond1Block, 50); helperBlock->inheritWeightPercentage(cond2Block, 50); // Append cond1 as JTRUE to cond1Block GenTree* jmpTree = gtNewOperNode(GT_JTRUE, TYP_VOID, condExpr); Statement* jmpStmt = fgNewStmtFromTree(jmpTree, stmt->GetDebugInfo()); fgInsertStmtAtEnd(cond1Block, jmpStmt); // Append cond2 as JTRUE to cond2Block jmpTree = gtNewOperNode(GT_JTRUE, TYP_VOID, cond2Expr); jmpStmt = fgNewStmtFromTree(jmpTree, stmt->GetDebugInfo()); fgInsertStmtAtEnd(cond2Block, jmpStmt); // AsgBlock should get tmp = op1 assignment. trueExpr = gtNewTempAssign(dst->AsLclVarCommon()->GetLclNum(), trueExpr); Statement* trueStmt = fgNewStmtFromTree(trueExpr, stmt->GetDebugInfo()); fgInsertStmtAtEnd(asgBlock, trueStmt); // Since we are adding helper in the JTRUE false path, reverse the cond2 and add the helper. gtReverseCond(cond2Expr); GenTree* helperExpr = gtNewTempAssign(dst->AsLclVarCommon()->GetLclNum(), true2Expr); Statement* helperStmt = fgNewStmtFromTree(helperExpr, stmt->GetDebugInfo()); fgInsertStmtAtEnd(helperBlock, helperStmt); // Finally remove the nested qmark stmt. fgRemoveStmt(block, stmt); if (true2Expr->OperIs(GT_CALL) && (true2Expr->AsCall()->gtCallMoreFlags & GTF_CALL_M_DOES_NOT_RETURN)) { fgConvertBBToThrowBB(helperBlock); } #ifdef DEBUG if (verbose) { printf("\nExpanding CastInstOf qmark in " FMT_BB " (after)\n", block->bbNum); fgDispBasicBlocks(block, remainderBlock, true); } #endif // DEBUG } /***************************************************************************** * * Expand a statement with a top level qmark node. There are three cases, based * on whether the qmark has both "true" and "false" arms, or just one of them. * * S0; * C ? T : F; * S1; * * Generates ===> * * bbj_always * +---->------+ * false | | * S0 -->-- ~C -->-- T F -->-- S1 * | | * +--->--------+ * bbj_cond(true) * * ----------------------------------------- * * S0; * C ? T : NOP; * S1; * * Generates ===> * * false * S0 -->-- ~C -->-- T -->-- S1 * | | * +-->-------------+ * bbj_cond(true) * * ----------------------------------------- * * S0; * C ? NOP : F; * S1; * * Generates ===> * * false * S0 -->-- C -->-- F -->-- S1 * | | * +-->------------+ * bbj_cond(true) * * If the qmark assigns to a variable, then create tmps for "then" * and "else" results and assign the temp to the variable as a writeback step. */ void Compiler::fgExpandQmarkStmt(BasicBlock* block, Statement* stmt) { GenTree* expr = stmt->GetRootNode(); // Retrieve the Qmark node to be expanded. GenTree* dst = nullptr; GenTree* qmark = fgGetTopLevelQmark(expr, &dst); if (qmark == nullptr) { return; } if (qmark->gtFlags & GTF_QMARK_CAST_INSTOF) { fgExpandQmarkForCastInstOf(block, stmt); return; } #ifdef DEBUG if (verbose) { printf("\nExpanding top-level qmark in " FMT_BB " (before)\n", block->bbNum); fgDispBasicBlocks(block, block, true); } #endif // DEBUG // Retrieve the operands. GenTree* condExpr = qmark->gtGetOp1(); GenTree* trueExpr = qmark->gtGetOp2()->AsColon()->ThenNode(); GenTree* falseExpr = qmark->gtGetOp2()->AsColon()->ElseNode(); assert(!varTypeIsFloating(condExpr->TypeGet())); bool hasTrueExpr = (trueExpr->OperGet() != GT_NOP); bool hasFalseExpr = (falseExpr->OperGet() != GT_NOP); assert(hasTrueExpr || hasFalseExpr); // We expect to have at least one arm of the qmark! // Create remainder, cond and "else" blocks. After this, the blocks are in this order: // block ... condBlock ... elseBlock ... remainderBlock // // We need to remember flags that exist on 'block' that we want to propagate to 'remainderBlock', // if they are going to be cleared by fgSplitBlockAfterStatement(). We currently only do this only // for the GC safe point bit, the logic being that if 'block' was marked gcsafe, then surely // remainderBlock will still be GC safe. BasicBlockFlags propagateFlags = block->bbFlags & BBF_GC_SAFE_POINT; BasicBlock* remainderBlock = fgSplitBlockAfterStatement(block, stmt); fgRemoveRefPred(remainderBlock, block); // We're going to put more blocks between block and remainderBlock. BasicBlock* condBlock = fgNewBBafter(BBJ_COND, block, true); BasicBlock* elseBlock = fgNewBBafter(BBJ_NONE, condBlock, true); // These blocks are only internal if 'block' is (but they've been set as internal by fgNewBBafter). // If they're not internal, mark them as imported to avoid asserts about un-imported blocks. if ((block->bbFlags & BBF_INTERNAL) == 0) { condBlock->bbFlags &= ~BBF_INTERNAL; elseBlock->bbFlags &= ~BBF_INTERNAL; condBlock->bbFlags |= BBF_IMPORTED; elseBlock->bbFlags |= BBF_IMPORTED; } remainderBlock->bbFlags |= propagateFlags; condBlock->inheritWeight(block); fgAddRefPred(condBlock, block); fgAddRefPred(elseBlock, condBlock); fgAddRefPred(remainderBlock, elseBlock); BasicBlock* thenBlock = nullptr; if (hasTrueExpr && hasFalseExpr) { // bbj_always // +---->------+ // false | | // S0 -->-- ~C -->-- T F -->-- S1 // | | // +--->--------+ // bbj_cond(true) // gtReverseCond(condExpr); condBlock->bbJumpDest = elseBlock; thenBlock = fgNewBBafter(BBJ_ALWAYS, condBlock, true); thenBlock->bbJumpDest = remainderBlock; if ((block->bbFlags & BBF_INTERNAL) == 0) { thenBlock->bbFlags &= ~BBF_INTERNAL; thenBlock->bbFlags |= BBF_IMPORTED; } fgAddRefPred(thenBlock, condBlock); fgAddRefPred(remainderBlock, thenBlock); thenBlock->inheritWeightPercentage(condBlock, 50); elseBlock->inheritWeightPercentage(condBlock, 50); } else if (hasTrueExpr) { // false // S0 -->-- ~C -->-- T -->-- S1 // | | // +-->-------------+ // bbj_cond(true) // gtReverseCond(condExpr); condBlock->bbJumpDest = remainderBlock; fgAddRefPred(remainderBlock, condBlock); // Since we have no false expr, use the one we'd already created. thenBlock = elseBlock; elseBlock = nullptr; thenBlock->inheritWeightPercentage(condBlock, 50); } else if (hasFalseExpr) { // false // S0 -->-- C -->-- F -->-- S1 // | | // +-->------------+ // bbj_cond(true) // condBlock->bbJumpDest = remainderBlock; fgAddRefPred(remainderBlock, condBlock); elseBlock->inheritWeightPercentage(condBlock, 50); } GenTree* jmpTree = gtNewOperNode(GT_JTRUE, TYP_VOID, qmark->gtGetOp1()); Statement* jmpStmt = fgNewStmtFromTree(jmpTree, stmt->GetDebugInfo()); fgInsertStmtAtEnd(condBlock, jmpStmt); // Remove the original qmark statement. fgRemoveStmt(block, stmt); // Since we have top level qmarks, we either have a dst for it in which case // we need to create tmps for true and falseExprs, else just don't bother // assigning. unsigned lclNum = BAD_VAR_NUM; if (dst != nullptr) { assert(dst->gtOper == GT_LCL_VAR); lclNum = dst->AsLclVar()->GetLclNum(); } else { assert(qmark->TypeGet() == TYP_VOID); } if (hasTrueExpr) { if (dst != nullptr) { trueExpr = gtNewTempAssign(lclNum, trueExpr); } Statement* trueStmt = fgNewStmtFromTree(trueExpr, stmt->GetDebugInfo()); fgInsertStmtAtEnd(thenBlock, trueStmt); } // Assign the falseExpr into the dst or tmp, insert in elseBlock if (hasFalseExpr) { if (dst != nullptr) { falseExpr = gtNewTempAssign(lclNum, falseExpr); } Statement* falseStmt = fgNewStmtFromTree(falseExpr, stmt->GetDebugInfo()); fgInsertStmtAtEnd(elseBlock, falseStmt); } #ifdef DEBUG if (verbose) { printf("\nExpanding top-level qmark in " FMT_BB " (after)\n", block->bbNum); fgDispBasicBlocks(block, remainderBlock, true); } #endif // DEBUG } /***************************************************************************** * * Expand GT_QMARK nodes from the flow graph into basic blocks. * */ void Compiler::fgExpandQmarkNodes() { if (compQmarkUsed) { for (BasicBlock* const block : Blocks()) { for (Statement* const stmt : block->Statements()) { GenTree* expr = stmt->GetRootNode(); #ifdef DEBUG fgPreExpandQmarkChecks(expr); #endif fgExpandQmarkStmt(block, stmt); } } #ifdef DEBUG fgPostExpandQmarkChecks(); #endif } compQmarkRationalized = true; } #ifdef DEBUG /***************************************************************************** * * Make sure we don't have any more GT_QMARK nodes. * */ void Compiler::fgPostExpandQmarkChecks() { for (BasicBlock* const block : Blocks()) { for (Statement* const stmt : block->Statements()) { GenTree* expr = stmt->GetRootNode(); fgWalkTreePre(&expr, Compiler::fgAssertNoQmark, nullptr); } } } #endif /***************************************************************************** * * Promoting struct locals */ void Compiler::fgPromoteStructs() { #ifdef DEBUG if (verbose) { printf("*************** In fgPromoteStructs()\n"); } #endif // DEBUG if (!opts.OptEnabled(CLFLG_STRUCTPROMOTE)) { JITDUMP(" promotion opt flag not enabled\n"); return; } if (fgNoStructPromotion) { JITDUMP(" promotion disabled by JitNoStructPromotion\n"); return; } #if 0 // The code in this #if has been useful in debugging struct promotion issues, by // enabling selective enablement of the struct promotion optimization according to // method hash. #ifdef DEBUG unsigned methHash = info.compMethodHash(); char* lostr = getenv("structpromohashlo"); unsigned methHashLo = 0; if (lostr != NULL) { sscanf_s(lostr, "%x", &methHashLo); } char* histr = getenv("structpromohashhi"); unsigned methHashHi = UINT32_MAX; if (histr != NULL) { sscanf_s(histr, "%x", &methHashHi); } if (methHash < methHashLo || methHash > methHashHi) { return; } else { printf("Promoting structs for method %s, hash = 0x%x.\n", info.compFullName, info.compMethodHash()); printf(""); // in our logic this causes a flush } #endif // DEBUG #endif // 0 if (info.compIsVarArgs) { JITDUMP(" promotion disabled because of varargs\n"); return; } #ifdef DEBUG if (verbose) { printf("\nlvaTable before fgPromoteStructs\n"); lvaTableDump(); } #endif // DEBUG // The lvaTable might grow as we grab temps. Make a local copy here. unsigned startLvaCount = lvaCount; // // Loop through the original lvaTable. Looking for struct locals to be promoted. // lvaStructPromotionInfo structPromotionInfo; bool tooManyLocalsReported = false; // Clear the structPromotionHelper, since it is used during inlining, at which point it // may be conservative about looking up SIMD info. // We don't want to preserve those conservative decisions for the actual struct promotion. structPromotionHelper->Clear(); for (unsigned lclNum = 0; lclNum < startLvaCount; lclNum++) { // Whether this var got promoted bool promotedVar = false; LclVarDsc* varDsc = lvaGetDesc(lclNum); // If we have marked this as lvUsedInSIMDIntrinsic, then we do not want to promote // its fields. Instead, we will attempt to enregister the entire struct. if (varDsc->lvIsSIMDType() && (varDsc->lvIsUsedInSIMDIntrinsic() || isOpaqueSIMDLclVar(varDsc))) { varDsc->lvRegStruct = true; } // Don't promote if we have reached the tracking limit. else if (lvaHaveManyLocals()) { // Print the message first time when we detected this condition if (!tooManyLocalsReported) { JITDUMP("Stopped promoting struct fields, due to too many locals.\n"); } tooManyLocalsReported = true; } else if (varTypeIsStruct(varDsc)) { assert(structPromotionHelper != nullptr); promotedVar = structPromotionHelper->TryPromoteStructVar(lclNum); } if (!promotedVar && varDsc->lvIsSIMDType() && !varDsc->lvFieldAccessed) { // Even if we have not used this in a SIMD intrinsic, if it is not being promoted, // we will treat it as a reg struct. varDsc->lvRegStruct = true; } } #ifdef DEBUG if (verbose) { printf("\nlvaTable after fgPromoteStructs\n"); lvaTableDump(); } #endif // DEBUG } void Compiler::fgMorphStructField(GenTree* tree, GenTree* parent) { noway_assert(tree->OperGet() == GT_FIELD); GenTreeField* field = tree->AsField(); GenTree* objRef = field->GetFldObj(); GenTree* obj = ((objRef != nullptr) && (objRef->gtOper == GT_ADDR)) ? objRef->AsOp()->gtOp1 : nullptr; noway_assert((tree->gtFlags & GTF_GLOB_REF) || ((obj != nullptr) && (obj->gtOper == GT_LCL_VAR))); /* Is this an instance data member? */ if ((obj != nullptr) && (obj->gtOper == GT_LCL_VAR)) { unsigned lclNum = obj->AsLclVarCommon()->GetLclNum(); const LclVarDsc* varDsc = lvaGetDesc(lclNum); if (varTypeIsStruct(obj)) { if (varDsc->lvPromoted) { // Promoted struct unsigned fldOffset = field->gtFldOffset; unsigned fieldLclIndex = lvaGetFieldLocal(varDsc, fldOffset); if (fieldLclIndex == BAD_VAR_NUM) { // Access a promoted struct's field with an offset that doesn't correspond to any field. // It can happen if the struct was cast to another struct with different offsets. return; } const LclVarDsc* fieldDsc = lvaGetDesc(fieldLclIndex); var_types fieldType = fieldDsc->TypeGet(); assert(fieldType != TYP_STRUCT); // promoted LCL_VAR can't have a struct type. if (tree->TypeGet() != fieldType) { if (tree->TypeGet() != TYP_STRUCT) { // This is going to be an incorrect instruction promotion. // For example when we try to read int as long. return; } if (field->gtFldHnd != fieldDsc->lvFieldHnd) { CORINFO_CLASS_HANDLE fieldTreeClass = nullptr, fieldDscClass = nullptr; CorInfoType fieldTreeType = info.compCompHnd->getFieldType(field->gtFldHnd, &fieldTreeClass); CorInfoType fieldDscType = info.compCompHnd->getFieldType(fieldDsc->lvFieldHnd, &fieldDscClass); if (fieldTreeType != fieldDscType || fieldTreeClass != fieldDscClass) { // Access the promoted field with a different class handle, can't check that types match. return; } // Access the promoted field as a field of a non-promoted struct with the same class handle. } else { // As we already checked this above, we must have a tree with a TYP_STRUCT type // assert(tree->TypeGet() == TYP_STRUCT); // The field tree accesses it as a struct, but the promoted LCL_VAR field // says that it has another type. This happens when struct promotion unwraps // a single field struct to get to its ultimate type. // // Note that currently, we cannot have a promoted LCL_VAR field with a struct type. // // This mismatch in types can lead to problems for some parent node type like GT_RETURN. // So we check the parent node and only allow this optimization when we have // a GT_ADDR or a GT_ASG. // // Note that for a GT_ASG we have to do some additional work, // see below after the SetOper(GT_LCL_VAR) // if (!parent->OperIs(GT_ADDR, GT_ASG)) { // Don't transform other operations such as GT_RETURN // return; } #ifdef DEBUG // This is an additional DEBUG-only sanity check // assert(structPromotionHelper != nullptr); structPromotionHelper->CheckRetypedAsScalar(field->gtFldHnd, fieldType); #endif // DEBUG } } tree->SetOper(GT_LCL_VAR); tree->AsLclVarCommon()->SetLclNum(fieldLclIndex); tree->gtType = fieldType; tree->gtFlags &= GTF_NODE_MASK; // Note: that clears all flags except `GTF_COLON_COND`. if (parent->gtOper == GT_ASG) { // If we are changing the left side of an assignment, we need to set // these two flags: // if (parent->AsOp()->gtOp1 == tree) { tree->gtFlags |= GTF_VAR_DEF; tree->gtFlags |= GTF_DONT_CSE; } // Promotion of struct containing struct fields where the field // is a struct with a single pointer sized scalar type field: in // this case struct promotion uses the type of the underlying // scalar field as the type of struct field instead of recursively // promoting. This can lead to a case where we have a block-asgn // with its RHS replaced with a scalar type. Mark RHS value as // DONT_CSE so that assertion prop will not do const propagation. // The reason this is required is that if RHS of a block-asg is a // constant, then it is interpreted as init-block incorrectly. // // TODO - This can also be avoided if we implement recursive struct // promotion, tracked by #10019. if (varTypeIsStruct(parent) && parent->AsOp()->gtOp2 == tree && !varTypeIsStruct(tree)) { tree->gtFlags |= GTF_DONT_CSE; } } #ifdef DEBUG if (verbose) { printf("Replacing the field in promoted struct with local var V%02u\n", fieldLclIndex); } #endif // DEBUG } } else { // Normed struct // A "normed struct" is a struct that the VM tells us is a basic type. This can only happen if // the struct contains a single element, and that element is 4 bytes (on x64 it can also be 8 // bytes). Normally, the type of the local var and the type of GT_FIELD are equivalent. However, // there is one extremely rare case where that won't be true. An enum type is a special value type // that contains exactly one element of a primitive integer type (that, for CLS programs is named // "value__"). The VM tells us that a local var of that enum type is the primitive type of the // enum's single field. It turns out that it is legal for IL to access this field using ldflda or // ldfld. For example: // // .class public auto ansi sealed mynamespace.e_t extends [mscorlib]System.Enum // { // .field public specialname rtspecialname int16 value__ // .field public static literal valuetype mynamespace.e_t one = int16(0x0000) // } // .method public hidebysig static void Main() cil managed // { // .locals init (valuetype mynamespace.e_t V_0) // ... // ldloca.s V_0 // ldflda int16 mynamespace.e_t::value__ // ... // } // // Normally, compilers will not generate the ldflda, since it is superfluous. // // In the example, the lclVar is short, but the JIT promotes all trees using this local to the // "actual type", that is, INT. But the GT_FIELD is still SHORT. So, in the case of a type // mismatch like this, don't do this morphing. The local var may end up getting marked as // address taken, and the appropriate SHORT load will be done from memory in that case. if (tree->TypeGet() == obj->TypeGet()) { tree->ChangeOper(GT_LCL_VAR); tree->AsLclVarCommon()->SetLclNum(lclNum); tree->gtFlags &= GTF_NODE_MASK; if ((parent->gtOper == GT_ASG) && (parent->AsOp()->gtOp1 == tree)) { tree->gtFlags |= GTF_VAR_DEF; tree->gtFlags |= GTF_DONT_CSE; } #ifdef DEBUG if (verbose) { printf("Replacing the field in normed struct with local var V%02u\n", lclNum); } #endif // DEBUG } } } } void Compiler::fgMorphLocalField(GenTree* tree, GenTree* parent) { noway_assert(tree->OperGet() == GT_LCL_FLD); unsigned lclNum = tree->AsLclFld()->GetLclNum(); LclVarDsc* varDsc = lvaGetDesc(lclNum); if (varTypeIsStruct(varDsc)) { if (varDsc->lvPromoted) { // Promoted struct unsigned fldOffset = tree->AsLclFld()->GetLclOffs(); unsigned fieldLclIndex = 0; LclVarDsc* fldVarDsc = nullptr; if (fldOffset != BAD_VAR_NUM) { fieldLclIndex = lvaGetFieldLocal(varDsc, fldOffset); noway_assert(fieldLclIndex != BAD_VAR_NUM); fldVarDsc = lvaGetDesc(fieldLclIndex); } var_types treeType = tree->TypeGet(); var_types fieldType = fldVarDsc->TypeGet(); if (fldOffset != BAD_VAR_NUM && ((genTypeSize(fieldType) == genTypeSize(treeType)) || (varDsc->lvFieldCnt == 1))) { // There is an existing sub-field we can use. tree->AsLclFld()->SetLclNum(fieldLclIndex); // The field must be an enregisterable type; otherwise it would not be a promoted field. // The tree type may not match, e.g. for return types that have been morphed, but both // must be enregisterable types. assert(varTypeIsEnregisterable(treeType) && varTypeIsEnregisterable(fieldType)); tree->ChangeOper(GT_LCL_VAR); assert(tree->AsLclVarCommon()->GetLclNum() == fieldLclIndex); tree->gtType = fldVarDsc->TypeGet(); if ((parent->gtOper == GT_ASG) && (parent->AsOp()->gtOp1 == tree)) { tree->gtFlags |= GTF_VAR_DEF; tree->gtFlags |= GTF_DONT_CSE; } JITDUMP("Replacing the GT_LCL_FLD in promoted struct with local var V%02u\n", fieldLclIndex); } else { // There is no existing field that has all the parts that we need // So we must ensure that the struct lives in memory. lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::LocalField)); #ifdef DEBUG // We can't convert this guy to a float because he really does have his // address taken.. varDsc->lvKeepType = 1; #endif // DEBUG } } else if (varTypeIsSIMD(varDsc) && (genTypeSize(tree->TypeGet()) == genTypeSize(varDsc))) { assert(tree->AsLclFld()->GetLclOffs() == 0); tree->gtType = varDsc->TypeGet(); tree->ChangeOper(GT_LCL_VAR); JITDUMP("Replacing GT_LCL_FLD of struct with local var V%02u\n", lclNum); } } } //------------------------------------------------------------------------ // fgResetImplicitByRefRefCount: Clear the ref count field of all implicit byrefs void Compiler::fgResetImplicitByRefRefCount() { #if (defined(TARGET_AMD64) && !defined(UNIX_AMD64_ABI)) || defined(TARGET_ARM64) #ifdef DEBUG if (verbose) { printf("\n*************** In fgResetImplicitByRefRefCount()\n"); } #endif // DEBUG for (unsigned lclNum = 0; lclNum < info.compArgsCount; ++lclNum) { LclVarDsc* varDsc = lvaGetDesc(lclNum); if (varDsc->lvIsImplicitByRef) { // Clear the ref count field; fgMarkAddressTakenLocals will increment it per // appearance of implicit-by-ref param so that call arg morphing can do an // optimization for single-use implicit-by-ref params whose single use is as // an outgoing call argument. varDsc->setLvRefCnt(0, RCS_EARLY); varDsc->setLvRefCntWtd(0, RCS_EARLY); } } #endif // (TARGET_AMD64 && !UNIX_AMD64_ABI) || TARGET_ARM64 } //------------------------------------------------------------------------ // fgRetypeImplicitByRefArgs: Update the types on implicit byref parameters' `LclVarDsc`s (from // struct to pointer). Also choose (based on address-exposed analysis) // which struct promotions of implicit byrefs to keep or discard. // For those which are kept, insert the appropriate initialization code. // For those which are to be discarded, annotate the promoted field locals // so that fgMorphImplicitByRefArgs will know to rewrite their appearances // using indirections off the pointer parameters. void Compiler::fgRetypeImplicitByRefArgs() { #if (defined(TARGET_AMD64) && !defined(UNIX_AMD64_ABI)) || defined(TARGET_ARM64) #ifdef DEBUG if (verbose) { printf("\n*************** In fgRetypeImplicitByRefArgs()\n"); } #endif // DEBUG for (unsigned lclNum = 0; lclNum < info.compArgsCount; lclNum++) { LclVarDsc* varDsc = lvaGetDesc(lclNum); if (lvaIsImplicitByRefLocal(lclNum)) { unsigned size; if (varDsc->lvSize() > REGSIZE_BYTES) { size = varDsc->lvSize(); } else { CORINFO_CLASS_HANDLE typeHnd = varDsc->GetStructHnd(); size = info.compCompHnd->getClassSize(typeHnd); } if (varDsc->lvPromoted) { // This implicit-by-ref was promoted; create a new temp to represent the // promoted struct before rewriting this parameter as a pointer. unsigned newLclNum = lvaGrabTemp(false DEBUGARG("Promoted implicit byref")); lvaSetStruct(newLclNum, lvaGetStruct(lclNum), true); if (info.compIsVarArgs) { lvaSetStructUsedAsVarArg(newLclNum); } // Update varDsc since lvaGrabTemp might have re-allocated the var dsc array. varDsc = lvaGetDesc(lclNum); // Copy the struct promotion annotations to the new temp. LclVarDsc* newVarDsc = lvaGetDesc(newLclNum); newVarDsc->lvPromoted = true; newVarDsc->lvFieldLclStart = varDsc->lvFieldLclStart; newVarDsc->lvFieldCnt = varDsc->lvFieldCnt; newVarDsc->lvContainsHoles = varDsc->lvContainsHoles; newVarDsc->lvCustomLayout = varDsc->lvCustomLayout; #ifdef DEBUG newVarDsc->lvKeepType = true; #endif // DEBUG // Propagate address-taken-ness and do-not-enregister-ness. newVarDsc->SetAddressExposed(varDsc->IsAddressExposed() DEBUGARG(varDsc->GetAddrExposedReason())); newVarDsc->lvDoNotEnregister = varDsc->lvDoNotEnregister; newVarDsc->lvLiveInOutOfHndlr = varDsc->lvLiveInOutOfHndlr; newVarDsc->lvSingleDef = varDsc->lvSingleDef; newVarDsc->lvSingleDefRegCandidate = varDsc->lvSingleDefRegCandidate; newVarDsc->lvSpillAtSingleDef = varDsc->lvSpillAtSingleDef; #ifdef DEBUG newVarDsc->SetDoNotEnregReason(varDsc->GetDoNotEnregReason()); #endif // DEBUG // If the promotion is dependent, the promoted temp would just be committed // to memory anyway, so we'll rewrite its appearances to be indirections // through the pointer parameter, the same as we'd do for this // parameter if it weren't promoted at all (otherwise the initialization // of the new temp would just be a needless memcpy at method entry). // // Otherwise, see how many appearances there are. We keep two early ref counts: total // number of references to the struct or some field, and how many of these are // arguments to calls. We undo promotion unless we see enough non-call uses. // const unsigned totalAppearances = varDsc->lvRefCnt(RCS_EARLY); const unsigned callAppearances = (unsigned)varDsc->lvRefCntWtd(RCS_EARLY); assert(totalAppearances >= callAppearances); const unsigned nonCallAppearances = totalAppearances - callAppearances; bool undoPromotion = ((lvaGetPromotionType(newVarDsc) == PROMOTION_TYPE_DEPENDENT) || (nonCallAppearances <= varDsc->lvFieldCnt)); #ifdef DEBUG // Above is a profitability heurisic; either value of // undoPromotion should lead to correct code. So, // under stress, make different decisions at times. if (compStressCompile(STRESS_BYREF_PROMOTION, 25)) { undoPromotion = !undoPromotion; JITDUMP("Stress -- changing byref undo promotion for V%02u to %s undo\n", lclNum, undoPromotion ? "" : "NOT"); } #endif // DEBUG JITDUMP("%s promotion of implicit by-ref V%02u: %s total: %u non-call: %u fields: %u\n", undoPromotion ? "Undoing" : "Keeping", lclNum, (lvaGetPromotionType(newVarDsc) == PROMOTION_TYPE_DEPENDENT) ? "dependent;" : "", totalAppearances, nonCallAppearances, varDsc->lvFieldCnt); if (!undoPromotion) { // Insert IR that initializes the temp from the parameter. // LHS is a simple reference to the temp. fgEnsureFirstBBisScratch(); GenTree* lhs = gtNewLclvNode(newLclNum, varDsc->lvType); // RHS is an indirection (using GT_OBJ) off the parameter. GenTree* addr = gtNewLclvNode(lclNum, TYP_BYREF); GenTree* rhs = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, addr, typGetBlkLayout(size)); GenTree* assign = gtNewAssignNode(lhs, rhs); fgNewStmtAtBeg(fgFirstBB, assign); } // Update the locals corresponding to the promoted fields. unsigned fieldLclStart = varDsc->lvFieldLclStart; unsigned fieldCount = varDsc->lvFieldCnt; unsigned fieldLclStop = fieldLclStart + fieldCount; for (unsigned fieldLclNum = fieldLclStart; fieldLclNum < fieldLclStop; ++fieldLclNum) { LclVarDsc* fieldVarDsc = lvaGetDesc(fieldLclNum); if (undoPromotion) { // Leave lvParentLcl pointing to the parameter so that fgMorphImplicitByRefArgs // will know to rewrite appearances of this local. assert(fieldVarDsc->lvParentLcl == lclNum); } else { // Set the new parent. fieldVarDsc->lvParentLcl = newLclNum; } fieldVarDsc->lvIsParam = false; // The fields shouldn't inherit any register preferences from // the parameter which is really a pointer to the struct. fieldVarDsc->lvIsRegArg = false; fieldVarDsc->lvIsMultiRegArg = false; fieldVarDsc->SetArgReg(REG_NA); #if FEATURE_MULTIREG_ARGS fieldVarDsc->SetOtherArgReg(REG_NA); #endif } // Hijack lvFieldLclStart to record the new temp number. // It will get fixed up in fgMarkDemotedImplicitByRefArgs. varDsc->lvFieldLclStart = newLclNum; // Go ahead and clear lvFieldCnt -- either we're promoting // a replacement temp or we're not promoting this arg, and // in either case the parameter is now a pointer that doesn't // have these fields. varDsc->lvFieldCnt = 0; // Hijack lvPromoted to communicate to fgMorphImplicitByRefArgs // whether references to the struct should be rewritten as // indirections off the pointer (not promoted) or references // to the new struct local (promoted). varDsc->lvPromoted = !undoPromotion; } else { // The "undo promotion" path above clears lvPromoted for args that struct // promotion wanted to promote but that aren't considered profitable to // rewrite. It hijacks lvFieldLclStart to communicate to // fgMarkDemotedImplicitByRefArgs that it needs to clean up annotations left // on such args for fgMorphImplicitByRefArgs to consult in the interim. // Here we have an arg that was simply never promoted, so make sure it doesn't // have nonzero lvFieldLclStart, since that would confuse fgMorphImplicitByRefArgs // and fgMarkDemotedImplicitByRefArgs. assert(varDsc->lvFieldLclStart == 0); } // Since the parameter in this position is really a pointer, its type is TYP_BYREF. varDsc->lvType = TYP_BYREF; // Since this previously was a TYP_STRUCT and we have changed it to a TYP_BYREF // make sure that the following flag is not set as these will force SSA to // exclude tracking/enregistering these LclVars. (see SsaBuilder::IncludeInSsa) // varDsc->lvOverlappingFields = 0; // This flag could have been set, clear it. // The struct parameter may have had its address taken, but the pointer parameter // cannot -- any uses of the struct parameter's address are uses of the pointer // parameter's value, and there's no way for the MSIL to reference the pointer // parameter's address. So clear the address-taken bit for the parameter. varDsc->CleanAddressExposed(); varDsc->lvDoNotEnregister = 0; #ifdef DEBUG // This should not be converted to a double in stress mode, // because it is really a pointer varDsc->lvKeepType = 1; if (verbose) { printf("Changing the lvType for struct parameter V%02d to TYP_BYREF.\n", lclNum); } #endif // DEBUG } } #endif // (TARGET_AMD64 && !UNIX_AMD64_ABI) || TARGET_ARM64 } //------------------------------------------------------------------------ // fgMarkDemotedImplicitByRefArgs: Clear annotations for any implicit byrefs that struct promotion // asked to promote. Appearances of these have now been rewritten // (by fgMorphImplicitByRefArgs) using indirections from the pointer // parameter or references to the promotion temp, as appropriate. void Compiler::fgMarkDemotedImplicitByRefArgs() { JITDUMP("\n*************** In fgMarkDemotedImplicitByRefArgs()\n"); #if (defined(TARGET_AMD64) && !defined(UNIX_AMD64_ABI)) || defined(TARGET_ARM64) for (unsigned lclNum = 0; lclNum < info.compArgsCount; lclNum++) { LclVarDsc* varDsc = lvaGetDesc(lclNum); if (lvaIsImplicitByRefLocal(lclNum)) { JITDUMP("Clearing annotation for V%02d\n", lclNum); if (varDsc->lvPromoted) { // The parameter is simply a pointer now, so clear lvPromoted. It was left set // by fgRetypeImplicitByRefArgs to communicate to fgMorphImplicitByRefArgs that // appearances of this arg needed to be rewritten to a new promoted struct local. varDsc->lvPromoted = false; // Clear the lvFieldLclStart value that was set by fgRetypeImplicitByRefArgs // to tell fgMorphImplicitByRefArgs which local is the new promoted struct one. varDsc->lvFieldLclStart = 0; } else if (varDsc->lvFieldLclStart != 0) { // We created new temps to represent a promoted struct corresponding to this // parameter, but decided not to go through with the promotion and have // rewritten all uses as indirections off the pointer parameter. // We stashed the pointer to the new struct temp in lvFieldLclStart; make // note of that and clear the annotation. unsigned structLclNum = varDsc->lvFieldLclStart; varDsc->lvFieldLclStart = 0; // The temp struct is now unused; set flags appropriately so that we // won't allocate space for it on the stack. LclVarDsc* structVarDsc = lvaGetDesc(structLclNum); structVarDsc->CleanAddressExposed(); #ifdef DEBUG structVarDsc->lvUnusedStruct = true; structVarDsc->lvUndoneStructPromotion = true; #endif // DEBUG unsigned fieldLclStart = structVarDsc->lvFieldLclStart; unsigned fieldCount = structVarDsc->lvFieldCnt; unsigned fieldLclStop = fieldLclStart + fieldCount; for (unsigned fieldLclNum = fieldLclStart; fieldLclNum < fieldLclStop; ++fieldLclNum) { JITDUMP("Fixing pointer for field V%02d from V%02d to V%02d\n", fieldLclNum, lclNum, structLclNum); // Fix the pointer to the parent local. LclVarDsc* fieldVarDsc = lvaGetDesc(fieldLclNum); assert(fieldVarDsc->lvParentLcl == lclNum); fieldVarDsc->lvParentLcl = structLclNum; // The field local is now unused; set flags appropriately so that // we won't allocate stack space for it. fieldVarDsc->CleanAddressExposed(); } } } } #endif // (TARGET_AMD64 && !UNIX_AMD64_ABI) || TARGET_ARM64 } /***************************************************************************** * * Morph irregular parameters * for x64 and ARM64 this means turning them into byrefs, adding extra indirs. */ bool Compiler::fgMorphImplicitByRefArgs(GenTree* tree) { #if (!defined(TARGET_AMD64) || defined(UNIX_AMD64_ABI)) && !defined(TARGET_ARM64) return false; #else // (TARGET_AMD64 && !UNIX_AMD64_ABI) || TARGET_ARM64 bool changed = false; // Implicit byref morphing needs to know if the reference to the parameter is a // child of GT_ADDR or not, so this method looks one level down and does the // rewrite whenever a child is a reference to an implicit byref parameter. if (tree->gtOper == GT_ADDR) { if (tree->AsOp()->gtOp1->gtOper == GT_LCL_VAR) { GenTree* morphedTree = fgMorphImplicitByRefArgs(tree, true); changed = (morphedTree != nullptr); assert(!changed || (morphedTree == tree)); } } else { for (GenTree** pTree : tree->UseEdges()) { GenTree** pTreeCopy = pTree; GenTree* childTree = *pTree; if (childTree->gtOper == GT_LCL_VAR) { GenTree* newChildTree = fgMorphImplicitByRefArgs(childTree, false); if (newChildTree != nullptr) { changed = true; *pTreeCopy = newChildTree; } } } } return changed; #endif // (TARGET_AMD64 && !UNIX_AMD64_ABI) || TARGET_ARM64 } GenTree* Compiler::fgMorphImplicitByRefArgs(GenTree* tree, bool isAddr) { assert((tree->gtOper == GT_LCL_VAR) || ((tree->gtOper == GT_ADDR) && (tree->AsOp()->gtOp1->gtOper == GT_LCL_VAR))); assert(isAddr == (tree->gtOper == GT_ADDR)); GenTree* lclVarTree = isAddr ? tree->AsOp()->gtOp1 : tree; unsigned lclNum = lclVarTree->AsLclVarCommon()->GetLclNum(); LclVarDsc* lclVarDsc = lvaGetDesc(lclNum); CORINFO_FIELD_HANDLE fieldHnd; unsigned fieldOffset = 0; var_types fieldRefType = TYP_UNKNOWN; if (lvaIsImplicitByRefLocal(lclNum)) { // The SIMD transformation to coalesce contiguous references to SIMD vector fields will // re-invoke the traversal to mark address-taken locals. // So, we may encounter a tree that has already been transformed to TYP_BYREF. // If we do, leave it as-is. if (!varTypeIsStruct(lclVarTree)) { assert(lclVarTree->TypeGet() == TYP_BYREF); return nullptr; } else if (lclVarDsc->lvPromoted) { // fgRetypeImplicitByRefArgs created a new promoted struct local to represent this // arg. Rewrite this to refer to the new local. assert(lclVarDsc->lvFieldLclStart != 0); lclVarTree->AsLclVarCommon()->SetLclNum(lclVarDsc->lvFieldLclStart); return tree; } fieldHnd = nullptr; } else if (lclVarDsc->lvIsStructField && lvaIsImplicitByRefLocal(lclVarDsc->lvParentLcl)) { // This was a field reference to an implicit-by-reference struct parameter that was // dependently promoted; update it to a field reference off the pointer. // Grab the field handle from the struct field lclVar. fieldHnd = lclVarDsc->lvFieldHnd; fieldOffset = lclVarDsc->lvFldOffset; assert(fieldHnd != nullptr); // Update lclNum/lclVarDsc to refer to the parameter lclNum = lclVarDsc->lvParentLcl; lclVarDsc = lvaGetDesc(lclNum); fieldRefType = lclVarTree->TypeGet(); } else { // We only need to tranform the 'marked' implicit by ref parameters return nullptr; } // This is no longer a def of the lclVar, even if it WAS a def of the struct. lclVarTree->gtFlags &= ~(GTF_LIVENESS_MASK); if (isAddr) { if (fieldHnd == nullptr) { // change &X into just plain X tree->ReplaceWith(lclVarTree, this); tree->gtType = TYP_BYREF; } else { // change &(X.f) [i.e. GT_ADDR of local for promoted arg field] // into &(X, f) [i.e. GT_ADDR of GT_FIELD off ptr param] lclVarTree->AsLclVarCommon()->SetLclNum(lclNum); lclVarTree->gtType = TYP_BYREF; tree->AsOp()->gtOp1 = gtNewFieldRef(fieldRefType, fieldHnd, lclVarTree, fieldOffset); } #ifdef DEBUG if (verbose) { printf("Replacing address of implicit by ref struct parameter with byref:\n"); } #endif // DEBUG } else { // Change X into OBJ(X) or FIELD(X, f) var_types structType = tree->gtType; tree->gtType = TYP_BYREF; if (fieldHnd) { tree->AsLclVarCommon()->SetLclNum(lclNum); tree = gtNewFieldRef(fieldRefType, fieldHnd, tree, fieldOffset); } else { tree = gtNewObjNode(lclVarDsc->GetStructHnd(), tree); if (structType == TYP_STRUCT) { gtSetObjGcInfo(tree->AsObj()); } } // TODO-CQ: If the VM ever stops violating the ABI and passing heap references // we could remove TGTANYWHERE tree->gtFlags = ((tree->gtFlags & GTF_COMMON_MASK) | GTF_IND_TGTANYWHERE); #ifdef DEBUG if (verbose) { printf("Replacing value of implicit by ref struct parameter with indir of parameter:\n"); } #endif // DEBUG } #ifdef DEBUG if (verbose) { gtDispTree(tree); } #endif // DEBUG return tree; } //------------------------------------------------------------------------ // fgAddFieldSeqForZeroOffset: // Associate a fieldSeq (with a zero offset) with the GenTree node 'addr' // // Arguments: // addr - A GenTree node // fieldSeqZero - a fieldSeq (with a zero offset) // // Notes: // Some GenTree nodes have internal fields that record the field sequence. // If we have one of these nodes: GT_CNS_INT, GT_LCL_FLD // we can append the field sequence using the gtFieldSeq // If we have a GT_ADD of a GT_CNS_INT we can use the // fieldSeq from child node. // Otherwise we record 'fieldSeqZero' in the GenTree node using // a Map: GetFieldSeqStore() // When doing so we take care to preserve any existing zero field sequence // void Compiler::fgAddFieldSeqForZeroOffset(GenTree* addr, FieldSeqNode* fieldSeqZero) { // We expect 'addr' to be an address at this point. assert(addr->TypeGet() == TYP_BYREF || addr->TypeGet() == TYP_I_IMPL || addr->TypeGet() == TYP_REF); // Tunnel through any commas. const bool commaOnly = true; addr = addr->gtEffectiveVal(commaOnly); // We still expect 'addr' to be an address at this point. assert(addr->TypeGet() == TYP_BYREF || addr->TypeGet() == TYP_I_IMPL || addr->TypeGet() == TYP_REF); FieldSeqNode* fieldSeqUpdate = fieldSeqZero; GenTree* fieldSeqNode = addr; bool fieldSeqRecorded = false; #ifdef DEBUG if (verbose) { printf("\nfgAddFieldSeqForZeroOffset for"); gtDispAnyFieldSeq(fieldSeqZero); printf("\naddr (Before)\n"); gtDispNode(addr, nullptr, nullptr, false); gtDispCommonEndLine(addr); } #endif // DEBUG switch (addr->OperGet()) { case GT_CNS_INT: fieldSeqUpdate = GetFieldSeqStore()->Append(addr->AsIntCon()->gtFieldSeq, fieldSeqZero); addr->AsIntCon()->gtFieldSeq = fieldSeqUpdate; fieldSeqRecorded = true; break; case GT_ADDR: if (addr->AsOp()->gtOp1->OperGet() == GT_LCL_FLD) { fieldSeqNode = addr->AsOp()->gtOp1; GenTreeLclFld* lclFld = addr->AsOp()->gtOp1->AsLclFld(); fieldSeqUpdate = GetFieldSeqStore()->Append(lclFld->GetFieldSeq(), fieldSeqZero); lclFld->SetFieldSeq(fieldSeqUpdate); fieldSeqRecorded = true; } break; case GT_ADD: if (addr->AsOp()->gtOp1->OperGet() == GT_CNS_INT) { fieldSeqNode = addr->AsOp()->gtOp1; fieldSeqUpdate = GetFieldSeqStore()->Append(addr->AsOp()->gtOp1->AsIntCon()->gtFieldSeq, fieldSeqZero); addr->AsOp()->gtOp1->AsIntCon()->gtFieldSeq = fieldSeqUpdate; fieldSeqRecorded = true; } else if (addr->AsOp()->gtOp2->OperGet() == GT_CNS_INT) { fieldSeqNode = addr->AsOp()->gtOp2; fieldSeqUpdate = GetFieldSeqStore()->Append(addr->AsOp()->gtOp2->AsIntCon()->gtFieldSeq, fieldSeqZero); addr->AsOp()->gtOp2->AsIntCon()->gtFieldSeq = fieldSeqUpdate; fieldSeqRecorded = true; } break; default: break; } if (fieldSeqRecorded == false) { // Record in the general zero-offset map. // The "addr" node might already be annotated with a zero-offset field sequence. FieldSeqNode* existingFieldSeq = nullptr; if (GetZeroOffsetFieldMap()->Lookup(addr, &existingFieldSeq)) { // Append the zero field sequences fieldSeqUpdate = GetFieldSeqStore()->Append(existingFieldSeq, fieldSeqZero); } // Overwrite the field sequence annotation for op1 GetZeroOffsetFieldMap()->Set(addr, fieldSeqUpdate, NodeToFieldSeqMap::Overwrite); fieldSeqRecorded = true; } #ifdef DEBUG if (verbose) { printf(" (After)\n"); gtDispNode(fieldSeqNode, nullptr, nullptr, false); gtDispCommonEndLine(fieldSeqNode); } #endif // DEBUG } #ifdef FEATURE_SIMD //----------------------------------------------------------------------------------- // fgMorphCombineSIMDFieldAssignments: // If the RHS of the input stmt is a read for simd vector X Field, then this function // will keep reading next few stmts based on the vector size(2, 3, 4). // If the next stmts LHS are located contiguous and RHS are also located // contiguous, then we replace those statements with a copyblk. // // Argument: // block - BasicBlock*. block which stmt belongs to // stmt - Statement*. the stmt node we want to check // // return value: // if this funciton successfully optimized the stmts, then return true. Otherwise // return false; bool Compiler::fgMorphCombineSIMDFieldAssignments(BasicBlock* block, Statement* stmt) { GenTree* tree = stmt->GetRootNode(); assert(tree->OperGet() == GT_ASG); GenTree* originalLHS = tree->AsOp()->gtOp1; GenTree* prevLHS = tree->AsOp()->gtOp1; GenTree* prevRHS = tree->AsOp()->gtOp2; unsigned index = 0; CorInfoType simdBaseJitType = CORINFO_TYPE_UNDEF; unsigned simdSize = 0; GenTree* simdStructNode = getSIMDStructFromField(prevRHS, &simdBaseJitType, &index, &simdSize, true); if (simdStructNode == nullptr || index != 0 || simdBaseJitType != CORINFO_TYPE_FLOAT) { // if the RHS is not from a SIMD vector field X, then there is no need to check further. return false; } var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType); var_types simdType = getSIMDTypeForSize(simdSize); int assignmentsCount = simdSize / genTypeSize(simdBaseType) - 1; int remainingAssignments = assignmentsCount; Statement* curStmt = stmt->GetNextStmt(); Statement* lastStmt = stmt; while (curStmt != nullptr && remainingAssignments > 0) { GenTree* exp = curStmt->GetRootNode(); if (exp->OperGet() != GT_ASG) { break; } GenTree* curLHS = exp->gtGetOp1(); GenTree* curRHS = exp->gtGetOp2(); if (!areArgumentsContiguous(prevLHS, curLHS) || !areArgumentsContiguous(prevRHS, curRHS)) { break; } remainingAssignments--; prevLHS = curLHS; prevRHS = curRHS; lastStmt = curStmt; curStmt = curStmt->GetNextStmt(); } if (remainingAssignments > 0) { // if the left assignments number is bigger than zero, then this means // that the assignments are not assgining to the contiguously memory // locations from same vector. return false; } #ifdef DEBUG if (verbose) { printf("\nFound contiguous assignments from a SIMD vector to memory.\n"); printf("From " FMT_BB ", stmt ", block->bbNum); printStmtID(stmt); printf(" to stmt"); printStmtID(lastStmt); printf("\n"); } #endif for (int i = 0; i < assignmentsCount; i++) { fgRemoveStmt(block, stmt->GetNextStmt()); } GenTree* dstNode; if (originalLHS->OperIs(GT_LCL_FLD)) { dstNode = originalLHS; dstNode->gtType = simdType; dstNode->AsLclFld()->SetFieldSeq(FieldSeqStore::NotAField()); // This may have changed a partial local field into full local field if (dstNode->IsPartialLclFld(this)) { dstNode->gtFlags |= GTF_VAR_USEASG; } else { dstNode->gtFlags &= ~GTF_VAR_USEASG; } } else { GenTree* copyBlkDst = createAddressNodeForSIMDInit(originalLHS, simdSize); if (simdStructNode->OperIsLocal()) { setLclRelatedToSIMDIntrinsic(simdStructNode); } GenTree* copyBlkAddr = copyBlkDst; if (copyBlkAddr->gtOper == GT_LEA) { copyBlkAddr = copyBlkAddr->AsAddrMode()->Base(); } GenTreeLclVarCommon* localDst = copyBlkAddr->IsLocalAddrExpr(); if (localDst != nullptr) { setLclRelatedToSIMDIntrinsic(localDst); } if (simdStructNode->TypeGet() == TYP_BYREF) { assert(simdStructNode->OperIsLocal()); assert(lvaIsImplicitByRefLocal(simdStructNode->AsLclVarCommon()->GetLclNum())); simdStructNode = gtNewIndir(simdType, simdStructNode); } else { assert(varTypeIsSIMD(simdStructNode)); } dstNode = gtNewOperNode(GT_IND, simdType, copyBlkDst); } #ifdef DEBUG if (verbose) { printf("\n" FMT_BB " stmt ", block->bbNum); printStmtID(stmt); printf("(before)\n"); gtDispStmt(stmt); } #endif assert(!simdStructNode->CanCSE()); simdStructNode->ClearDoNotCSE(); tree = gtNewAssignNode(dstNode, simdStructNode); stmt->SetRootNode(tree); // Since we generated a new address node which didn't exist before, // we should expose this address manually here. // TODO-ADDR: Remove this when LocalAddressVisitor transforms all // local field access into LCL_FLDs, at that point we would be // combining 2 existing LCL_FLDs or 2 FIELDs that do not reference // a local and thus cannot result in a new address exposed local. fgMarkAddressExposedLocals(stmt); #ifdef DEBUG if (verbose) { printf("\nReplaced " FMT_BB " stmt", block->bbNum); printStmtID(stmt); printf("(after)\n"); gtDispStmt(stmt); } #endif return true; } #endif // FEATURE_SIMD //------------------------------------------------------------------------ // fgCheckStmtAfterTailCall: check that statements after the tail call stmt // candidate are in one of expected forms, that are desctibed below. // // Return Value: // 'true' if stmts are in the expected form, else 'false'. // bool Compiler::fgCheckStmtAfterTailCall() { // For void calls, we would have created a GT_CALL in the stmt list. // For non-void calls, we would have created a GT_RETURN(GT_CAST(GT_CALL)). // For calls returning structs, we would have a void call, followed by a void return. // For debuggable code, it would be an assignment of the call to a temp // We want to get rid of any of this extra trees, and just leave // the call. Statement* callStmt = fgMorphStmt; Statement* nextMorphStmt = callStmt->GetNextStmt(); // Check that the rest stmts in the block are in one of the following pattern: // 1) ret(void) // 2) ret(cast*(callResultLclVar)) // 3) lclVar = callResultLclVar, the actual ret(lclVar) in another block // 4) nop if (nextMorphStmt != nullptr) { GenTree* callExpr = callStmt->GetRootNode(); if (callExpr->gtOper != GT_ASG) { // The next stmt can be GT_RETURN(TYP_VOID) or GT_RETURN(lclVar), // where lclVar was return buffer in the call for structs or simd. Statement* retStmt = nextMorphStmt; GenTree* retExpr = retStmt->GetRootNode(); noway_assert(retExpr->gtOper == GT_RETURN); nextMorphStmt = retStmt->GetNextStmt(); } else { noway_assert(callExpr->gtGetOp1()->OperIsLocal()); unsigned callResultLclNumber = callExpr->gtGetOp1()->AsLclVarCommon()->GetLclNum(); #if FEATURE_TAILCALL_OPT_SHARED_RETURN // We can have a chain of assignments from the call result to // various inline return spill temps. These are ok as long // as the last one ultimately provides the return value or is ignored. // // And if we're returning a small type we may see a cast // on the source side. while ((nextMorphStmt != nullptr) && (nextMorphStmt->GetRootNode()->OperIs(GT_ASG, GT_NOP))) { if (nextMorphStmt->GetRootNode()->OperIs(GT_NOP)) { nextMorphStmt = nextMorphStmt->GetNextStmt(); continue; } Statement* moveStmt = nextMorphStmt; GenTree* moveExpr = nextMorphStmt->GetRootNode(); GenTree* moveDest = moveExpr->gtGetOp1(); noway_assert(moveDest->OperIsLocal()); // Tunnel through any casts on the source side. GenTree* moveSource = moveExpr->gtGetOp2(); while (moveSource->OperIs(GT_CAST)) { noway_assert(!moveSource->gtOverflow()); moveSource = moveSource->gtGetOp1(); } noway_assert(moveSource->OperIsLocal()); // Verify we're just passing the value from one local to another // along the chain. const unsigned srcLclNum = moveSource->AsLclVarCommon()->GetLclNum(); noway_assert(srcLclNum == callResultLclNumber); const unsigned dstLclNum = moveDest->AsLclVarCommon()->GetLclNum(); callResultLclNumber = dstLclNum; nextMorphStmt = moveStmt->GetNextStmt(); } if (nextMorphStmt != nullptr) #endif { Statement* retStmt = nextMorphStmt; GenTree* retExpr = nextMorphStmt->GetRootNode(); noway_assert(retExpr->gtOper == GT_RETURN); GenTree* treeWithLcl = retExpr->gtGetOp1(); while (treeWithLcl->gtOper == GT_CAST) { noway_assert(!treeWithLcl->gtOverflow()); treeWithLcl = treeWithLcl->gtGetOp1(); } noway_assert(callResultLclNumber == treeWithLcl->AsLclVarCommon()->GetLclNum()); nextMorphStmt = retStmt->GetNextStmt(); } } } return nextMorphStmt == nullptr; } //------------------------------------------------------------------------ // fgCanTailCallViaJitHelper: check whether we can use the faster tailcall // JIT helper on x86. // // Return Value: // 'true' if we can; or 'false' if we should use the generic tailcall mechanism. // bool Compiler::fgCanTailCallViaJitHelper() { #if !defined(TARGET_X86) || defined(UNIX_X86_ABI) || defined(FEATURE_READYTORUN) // On anything except windows X86 we have no faster mechanism available. return false; #else // The JIT helper does not properly handle the case where localloc was used. if (compLocallocUsed) return false; return true; #endif } //------------------------------------------------------------------------ // fgMorphReduceAddOps: reduce successive variable adds into a single multiply, // e.g., i + i + i + i => i * 4. // // Arguments: // tree - tree for reduction // // Return Value: // reduced tree if pattern matches, original tree otherwise // GenTree* Compiler::fgMorphReduceAddOps(GenTree* tree) { // ADD(_, V0) starts the pattern match. if (!tree->OperIs(GT_ADD) || tree->gtOverflow()) { return tree; } #ifndef TARGET_64BIT // Transforming 64-bit ADD to 64-bit MUL on 32-bit system results in replacing // ADD ops with a helper function call. Don't apply optimization in that case. if (tree->TypeGet() == TYP_LONG) { return tree; } #endif GenTree* lclVarTree = tree->AsOp()->gtOp2; GenTree* consTree = tree->AsOp()->gtOp1; GenTree* op1 = consTree; GenTree* op2 = lclVarTree; if (!op2->OperIs(GT_LCL_VAR) || !varTypeIsIntegral(op2)) { return tree; } int foldCount = 0; unsigned lclNum = op2->AsLclVarCommon()->GetLclNum(); // Search for pattern of shape ADD(ADD(ADD(lclNum, lclNum), lclNum), lclNum). while (true) { // ADD(lclNum, lclNum), end of tree if (op1->OperIs(GT_LCL_VAR) && op1->AsLclVarCommon()->GetLclNum() == lclNum && op2->OperIs(GT_LCL_VAR) && op2->AsLclVarCommon()->GetLclNum() == lclNum) { foldCount += 2; break; } // ADD(ADD(X, Y), lclNum), keep descending else if (op1->OperIs(GT_ADD) && !op1->gtOverflow() && op2->OperIs(GT_LCL_VAR) && op2->AsLclVarCommon()->GetLclNum() == lclNum) { foldCount++; op2 = op1->AsOp()->gtOp2; op1 = op1->AsOp()->gtOp1; } // Any other case is a pattern we won't attempt to fold for now. else { return tree; } } // V0 + V0 ... + V0 becomes V0 * foldCount, where postorder transform will optimize // accordingly consTree->BashToConst(foldCount, tree->TypeGet()); GenTree* morphed = gtNewOperNode(GT_MUL, tree->TypeGet(), lclVarTree, consTree); DEBUG_DESTROY_NODE(tree); return morphed; }
1
dotnet/runtime
66,282
Enable Fast Tail Call Optimization for ARM32
- Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
clamp03
2022-03-07T05:47:20Z
2022-03-13T11:50:20Z
227ff3d53784f655fa04dad059a98b3e8d291d61
51b90cc60b8528c77829ef18481b0f58db812776
Enable Fast Tail Call Optimization for ARM32. - Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
./src/coreclr/jit/register_arg_convention.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #ifndef __register_arg_convention__ #define __register_arg_convention__ class LclVarDsc; struct InitVarDscInfo { LclVarDsc* varDsc; unsigned varNum; unsigned intRegArgNum; unsigned floatRegArgNum; unsigned maxIntRegArgNum; unsigned maxFloatRegArgNum; bool hasRetBufArg; #ifdef TARGET_ARM // Support back-filling of FP parameters. This is similar to code in gtMorphArgs() that // handles arguments. regMaskTP fltArgSkippedRegMask; bool anyFloatStackArgs; #endif // TARGET_ARM #if FEATURE_FASTTAILCALL // It is used to calculate argument stack size information in byte unsigned stackArgSize; #endif // FEATURE_FASTTAILCALL public: // set to initial values void Init(LclVarDsc* lvaTable, bool _hasRetBufArg, unsigned _maxIntRegArgNum, unsigned _maxFloatRegArgNum) { hasRetBufArg = _hasRetBufArg; varDsc = &lvaTable[0]; // the first argument LclVar 0 varNum = 0; // the first argument varNum 0 intRegArgNum = 0; floatRegArgNum = 0; maxIntRegArgNum = _maxIntRegArgNum; maxFloatRegArgNum = _maxFloatRegArgNum; #ifdef TARGET_ARM fltArgSkippedRegMask = RBM_NONE; anyFloatStackArgs = false; #endif // TARGET_ARM #if FEATURE_FASTTAILCALL stackArgSize = 0; #endif // FEATURE_FASTTAILCALL } // return ref to current register arg for this type unsigned& regArgNum(var_types type) { return varTypeUsesFloatArgReg(type) ? floatRegArgNum : intRegArgNum; } // Allocate a set of contiguous argument registers. "type" is either an integer // type, indicating to use the integer registers, or a floating-point type, indicating // to use the floating-point registers. The actual type (TYP_FLOAT vs. TYP_DOUBLE) is // ignored. "numRegs" is the number of registers to allocate. Thus, on ARM, to allocate // a double-precision floating-point register, you need to pass numRegs=2. For an HFA, // pass the number of slots/registers needed. // This routine handles floating-point register back-filling on ARM. // Returns the first argument register of the allocated set. unsigned allocRegArg(var_types type, unsigned numRegs = 1); #ifdef TARGET_ARM // We are aligning the register to an ABI-required boundary, such as putting // double-precision floats in even-numbered registers, by skipping one register. // "requiredRegAlignment" is the amount to align to: 1 for no alignment (everything // is 1-aligned), 2 for "double" alignment. // Returns the number of registers skipped. unsigned alignReg(var_types type, unsigned requiredRegAlignment); #endif // TARGET_ARM // Return true if it is an enregisterable type and there is room. // Note that for "type", we only care if it is float or not. In particular, // "numRegs" must be "2" to allocate an ARM double-precision floating-point register. bool canEnreg(var_types type, unsigned numRegs = 1); // Set the fact that we have used up all remaining registers of 'type' // void setAllRegArgUsed(var_types type) { regArgNum(type) = maxRegArgNum(type); } #ifdef TARGET_ARM void setAnyFloatStackArgs() { anyFloatStackArgs = true; } bool existAnyFloatStackArgs() { return anyFloatStackArgs; } #endif // TARGET_ARM private: // return max register arg for this type unsigned maxRegArgNum(var_types type) { return varTypeUsesFloatArgReg(type) ? maxFloatRegArgNum : maxIntRegArgNum; } bool enoughAvailRegs(var_types type, unsigned numRegs = 1); void nextReg(var_types type, unsigned numRegs = 1) { regArgNum(type) = min(regArgNum(type) + numRegs, maxRegArgNum(type)); } }; #endif // __register_arg_convention__
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #ifndef __register_arg_convention__ #define __register_arg_convention__ class LclVarDsc; struct InitVarDscInfo { LclVarDsc* varDsc; unsigned varNum; unsigned intRegArgNum; unsigned floatRegArgNum; unsigned maxIntRegArgNum; unsigned maxFloatRegArgNum; bool hasRetBufArg; #ifdef TARGET_ARM // Support back-filling of FP parameters. This is similar to code in gtMorphArgs() that // handles arguments. regMaskTP fltArgSkippedRegMask; bool anyFloatStackArgs; bool hasSplitParam; #endif // TARGET_ARM #if FEATURE_FASTTAILCALL // It is used to calculate argument stack size information in byte unsigned stackArgSize; #endif // FEATURE_FASTTAILCALL public: // set to initial values void Init(LclVarDsc* lvaTable, bool _hasRetBufArg, unsigned _maxIntRegArgNum, unsigned _maxFloatRegArgNum) { hasRetBufArg = _hasRetBufArg; varDsc = &lvaTable[0]; // the first argument LclVar 0 varNum = 0; // the first argument varNum 0 intRegArgNum = 0; floatRegArgNum = 0; maxIntRegArgNum = _maxIntRegArgNum; maxFloatRegArgNum = _maxFloatRegArgNum; #ifdef TARGET_ARM fltArgSkippedRegMask = RBM_NONE; anyFloatStackArgs = false; hasSplitParam = false; #endif // TARGET_ARM #if FEATURE_FASTTAILCALL stackArgSize = 0; #endif // FEATURE_FASTTAILCALL } // return ref to current register arg for this type unsigned& regArgNum(var_types type) { return varTypeUsesFloatArgReg(type) ? floatRegArgNum : intRegArgNum; } // Allocate a set of contiguous argument registers. "type" is either an integer // type, indicating to use the integer registers, or a floating-point type, indicating // to use the floating-point registers. The actual type (TYP_FLOAT vs. TYP_DOUBLE) is // ignored. "numRegs" is the number of registers to allocate. Thus, on ARM, to allocate // a double-precision floating-point register, you need to pass numRegs=2. For an HFA, // pass the number of slots/registers needed. // This routine handles floating-point register back-filling on ARM. // Returns the first argument register of the allocated set. unsigned allocRegArg(var_types type, unsigned numRegs = 1); #ifdef TARGET_ARM // We are aligning the register to an ABI-required boundary, such as putting // double-precision floats in even-numbered registers, by skipping one register. // "requiredRegAlignment" is the amount to align to: 1 for no alignment (everything // is 1-aligned), 2 for "double" alignment. // Returns the number of registers skipped. unsigned alignReg(var_types type, unsigned requiredRegAlignment); #endif // TARGET_ARM // Return true if it is an enregisterable type and there is room. // Note that for "type", we only care if it is float or not. In particular, // "numRegs" must be "2" to allocate an ARM double-precision floating-point register. bool canEnreg(var_types type, unsigned numRegs = 1); // Set the fact that we have used up all remaining registers of 'type' // void setAllRegArgUsed(var_types type) { regArgNum(type) = maxRegArgNum(type); } #ifdef TARGET_ARM void setAnyFloatStackArgs() { anyFloatStackArgs = true; } bool existAnyFloatStackArgs() { return anyFloatStackArgs; } #endif // TARGET_ARM private: // return max register arg for this type unsigned maxRegArgNum(var_types type) { return varTypeUsesFloatArgReg(type) ? maxFloatRegArgNum : maxIntRegArgNum; } bool enoughAvailRegs(var_types type, unsigned numRegs = 1); void nextReg(var_types type, unsigned numRegs = 1) { regArgNum(type) = min(regArgNum(type) + numRegs, maxRegArgNum(type)); } }; #endif // __register_arg_convention__
1
dotnet/runtime
66,282
Enable Fast Tail Call Optimization for ARM32
- Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
clamp03
2022-03-07T05:47:20Z
2022-03-13T11:50:20Z
227ff3d53784f655fa04dad059a98b3e8d291d61
51b90cc60b8528c77829ef18481b0f58db812776
Enable Fast Tail Call Optimization for ARM32. - Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
./src/coreclr/jit/targetarm.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #pragma once #if !defined(TARGET_ARM) #error The file should not be included for this platform. #endif // clang-format off // TODO-ARM-CQ: Use shift for division by power of 2 // TODO-ARM-CQ: Check for sdiv/udiv at runtime and generate it if available #define USE_HELPERS_FOR_INT_DIV 1 // BeagleBoard (ARMv7A) doesn't support SDIV/UDIV #define CPU_LOAD_STORE_ARCH 1 #define ROUND_FLOAT 0 // Do not round intermed float expression results #define CPU_HAS_BYTE_REGS 0 #define CPBLK_UNROLL_LIMIT 32 // Upper bound to let the code generator to loop unroll CpBlk. #define INITBLK_UNROLL_LIMIT 16 // Upper bound to let the code generator to loop unroll InitBlk. #define FEATURE_FIXED_OUT_ARGS 1 // Preallocate the outgoing arg area in the prolog #define FEATURE_STRUCTPROMOTE 1 // JIT Optimization to promote fields of structs into registers #define FEATURE_MULTIREG_STRUCT_PROMOTE 0 // True when we want to promote fields of a multireg struct into registers #define FEATURE_FASTTAILCALL 0 // Tail calls made as epilog+jmp #define FEATURE_TAILCALL_OPT 0 // opportunistic Tail calls (i.e. without ".tail" prefix) made as fast tail calls. #define FEATURE_SET_FLAGS 1 // Set to true to force the JIT to mark the trees with GTF_SET_FLAGS when the flags need to be set #define FEATURE_MULTIREG_ARGS_OR_RET 1 // Support for passing and/or returning single values in more than one register (including HFA support) #define FEATURE_MULTIREG_ARGS 1 // Support for passing a single argument in more than one register (including passing HFAs) #define FEATURE_MULTIREG_RET 1 // Support for returning a single value in more than one register (including HFA returns) #define FEATURE_STRUCT_CLASSIFIER 0 // Uses a classifier function to determine is structs are passed/returned in more than one register #define MAX_PASS_SINGLEREG_BYTES 8 // Maximum size of a struct passed in a single register (double). #define MAX_PASS_MULTIREG_BYTES 32 // Maximum size of a struct that could be passed in more than one register (Max is an HFA of 4 doubles) #define MAX_RET_MULTIREG_BYTES 32 // Maximum size of a struct that could be returned in more than one register (Max is an HFA of 4 doubles) #define MAX_ARG_REG_COUNT 4 // Maximum registers used to pass a single argument in multiple registers. (max is 4 floats or doubles using an HFA) #define MAX_RET_REG_COUNT 4 // Maximum registers used to return a value. #define MAX_MULTIREG_COUNT 4 // Maxiumum number of registers defined by a single instruction (including calls). // This is also the maximum number of registers for a MultiReg node. #define NOGC_WRITE_BARRIERS 0 // We DO-NOT have specialized WriteBarrier JIT Helpers that DO-NOT trash the RBM_CALLEE_TRASH registers #define USER_ARGS_COME_LAST 1 #define EMIT_TRACK_STACK_DEPTH 1 // This is something of a workaround. For both ARM and AMD64, the frame size is fixed, so we don't really // need to track stack depth, but this is currently necessary to get GC information reported at call sites. #define TARGET_POINTER_SIZE 4 // equal to sizeof(void*) and the managed pointer size in bytes for this target #define FEATURE_EH 1 // To aid platform bring-up, eliminate exceptional EH clauses (catch, filter, filter-handler, fault) and directly execute 'finally' clauses. #define FEATURE_EH_CALLFINALLY_THUNKS 0 // Generate call-to-finally code in "thunks" in the enclosing EH region, protected by "cloned finally" clauses. #define ETW_EBP_FRAMED 1 // if 1 we cannot use REG_FP as a scratch register and must setup the frame pointer for most methods #define CSE_CONSTS 1 // Enable if we want to CSE constants #define REG_FP_FIRST REG_F0 #define REG_FP_LAST REG_F31 #define FIRST_FP_ARGREG REG_F0 #define LAST_FP_ARGREG REG_F15 #define REGNUM_BITS 6 // number of bits in a REG_* #define REGSIZE_BYTES 4 // number of bytes in one register #define MIN_ARG_AREA_FOR_CALL 0 // Minimum required outgoing argument space for a call. #define CODE_ALIGN 2 // code alignment requirement #define STACK_ALIGN 8 // stack alignment requirement #define RBM_INT_CALLEE_SAVED (RBM_R4|RBM_R5|RBM_R6|RBM_R7|RBM_R8|RBM_R9|RBM_R10) #define RBM_INT_CALLEE_TRASH (RBM_R0|RBM_R1|RBM_R2|RBM_R3|RBM_R12|RBM_LR) #define RBM_FLT_CALLEE_SAVED (RBM_F16|RBM_F17|RBM_F18|RBM_F19|RBM_F20|RBM_F21|RBM_F22|RBM_F23|RBM_F24|RBM_F25|RBM_F26|RBM_F27|RBM_F28|RBM_F29|RBM_F30|RBM_F31) #define RBM_FLT_CALLEE_TRASH (RBM_F0|RBM_F1|RBM_F2|RBM_F3|RBM_F4|RBM_F5|RBM_F6|RBM_F7|RBM_F8|RBM_F9|RBM_F10|RBM_F11|RBM_F12|RBM_F13|RBM_F14|RBM_F15) #define RBM_CALLEE_SAVED (RBM_INT_CALLEE_SAVED | RBM_FLT_CALLEE_SAVED) #define RBM_CALLEE_TRASH (RBM_INT_CALLEE_TRASH | RBM_FLT_CALLEE_TRASH) #define REG_DEFAULT_HELPER_CALL_TARGET REG_R12 #define RBM_DEFAULT_HELPER_CALL_TARGET RBM_R12 #define RBM_ALLINT (RBM_INT_CALLEE_SAVED | RBM_INT_CALLEE_TRASH) #define RBM_ALLFLOAT (RBM_FLT_CALLEE_SAVED | RBM_FLT_CALLEE_TRASH) #define RBM_ALLDOUBLE (RBM_F0|RBM_F2|RBM_F4|RBM_F6|RBM_F8|RBM_F10|RBM_F12|RBM_F14|RBM_F16|RBM_F18|RBM_F20|RBM_F22|RBM_F24|RBM_F26|RBM_F28|RBM_F30) #define REG_VAR_ORDER REG_R3,REG_R2,REG_R1,REG_R0,REG_R4,REG_LR,REG_R12,\ REG_R5,REG_R6,REG_R7,REG_R8,REG_R9,REG_R10 #define REG_VAR_ORDER_FLT REG_F8, REG_F9, REG_F10, REG_F11, \ REG_F12, REG_F13, REG_F14, REG_F15, \ REG_F6, REG_F7, REG_F4, REG_F5, \ REG_F2, REG_F3, REG_F0, REG_F1, \ REG_F16, REG_F17, REG_F18, REG_F19, \ REG_F20, REG_F21, REG_F22, REG_F23, \ REG_F24, REG_F25, REG_F26, REG_F27, \ REG_F28, REG_F29, REG_F30, REG_F31, #define RBM_LOW_REGS (RBM_R0|RBM_R1|RBM_R2|RBM_R3|RBM_R4|RBM_R5|RBM_R6|RBM_R7) #define RBM_HIGH_REGS (RBM_R8|RBM_R9|RBM_R10|RBM_R11|RBM_R12|RBM_SP|RBM_LR|RBM_PC) #define REG_CALLEE_SAVED_ORDER REG_R4,REG_R5,REG_R6,REG_R7,REG_R8,REG_R9,REG_R10,REG_R11 #define RBM_CALLEE_SAVED_ORDER RBM_R4,RBM_R5,RBM_R6,RBM_R7,RBM_R8,RBM_R9,RBM_R10,RBM_R11 #define CNT_CALLEE_SAVED (8) #define CNT_CALLEE_TRASH (6) #define CNT_CALLEE_ENREG (CNT_CALLEE_SAVED-1) #define CNT_CALLEE_SAVED_FLOAT (16) #define CNT_CALLEE_TRASH_FLOAT (16) #define CALLEE_SAVED_REG_MAXSZ (CNT_CALLEE_SAVED*REGSIZE_BYTES) #define CALLEE_SAVED_FLOAT_MAXSZ (CNT_CALLEE_SAVED_FLOAT*sizeof(float)) // Temporary registers used for the GS cookie check. #define REG_GSCOOKIE_TMP_0 REG_R12 #define REG_GSCOOKIE_TMP_1 REG_LR // register to hold shift amount; no special register is required on the ARM #define REG_SHIFT REG_NA #define RBM_SHIFT RBM_ALLINT // register to hold shift amount when shifting 64-bit values (this uses a helper call) #define REG_SHIFT_LNG REG_R2 // REG_ARG_2 #define RBM_SHIFT_LNG RBM_R2 // RBM_ARG_2 // This is a general scratch register that does not conflict with the argument registers #define REG_SCRATCH REG_LR // This is a general register that can be optionally reserved for other purposes during codegen #define REG_OPT_RSVD REG_R10 #define RBM_OPT_RSVD RBM_R10 // We reserve R9 to store SP on entry for stack unwinding when localloc is used // This needs to stay in sync with the ARM version of InlinedCallFrame::UpdateRegDisplay code. #define REG_SAVED_LOCALLOC_SP REG_R9 #define RBM_SAVED_LOCALLOC_SP RBM_R9 // Where is the exception object on entry to the handler block? #define REG_EXCEPTION_OBJECT REG_R0 #define RBM_EXCEPTION_OBJECT RBM_R0 #define REG_JUMP_THUNK_PARAM REG_R12 #define RBM_JUMP_THUNK_PARAM RBM_R12 // ARM write barrier ABI (see vm\arm\asmhelpers.asm, vm\arm\asmhelpers.S): // CORINFO_HELP_ASSIGN_REF (JIT_WriteBarrier), CORINFO_HELP_CHECKED_ASSIGN_REF (JIT_CheckedWriteBarrier): // On entry: // r0: the destination address (LHS of the assignment) // r1: the object reference (RHS of the assignment) // On exit: // r0: trashed // r3: trashed // CORINFO_HELP_ASSIGN_BYREF (JIT_ByRefWriteBarrier): // On entry: // r0: the destination address (object reference written here) // r1: the source address (points to object reference to write) // On exit: // r0: incremented by 4 // r1: incremented by 4 // r2: trashed // r3: trashed #define REG_WRITE_BARRIER_DST_BYREF REG_ARG_0 #define RBM_WRITE_BARRIER_DST_BYREF RBM_ARG_0 #define REG_WRITE_BARRIER_SRC_BYREF REG_ARG_1 #define RBM_WRITE_BARRIER_SRC_BYREF RBM_ARG_1 #define RBM_CALLEE_TRASH_NOGC (RBM_R2|RBM_R3|RBM_LR|RBM_DEFAULT_HELPER_CALL_TARGET) // Registers killed by CORINFO_HELP_ASSIGN_REF and CORINFO_HELP_CHECKED_ASSIGN_REF. #define RBM_CALLEE_TRASH_WRITEBARRIER (RBM_R0|RBM_R3|RBM_LR|RBM_DEFAULT_HELPER_CALL_TARGET) // Registers no longer containing GC pointers after CORINFO_HELP_ASSIGN_REF and CORINFO_HELP_CHECKED_ASSIGN_REF. #define RBM_CALLEE_GCTRASH_WRITEBARRIER RBM_CALLEE_TRASH_WRITEBARRIER // Registers killed by CORINFO_HELP_ASSIGN_BYREF. #define RBM_CALLEE_TRASH_WRITEBARRIER_BYREF (RBM_WRITE_BARRIER_DST_BYREF | RBM_WRITE_BARRIER_SRC_BYREF | RBM_CALLEE_TRASH_NOGC) // Registers no longer containing GC pointers after CORINFO_HELP_ASSIGN_BYREF. // Note that r0 and r1 are still valid byref pointers after this helper call, despite their value being changed. #define RBM_CALLEE_GCTRASH_WRITEBARRIER_BYREF RBM_CALLEE_TRASH_NOGC // GenericPInvokeCalliHelper VASigCookie Parameter #define REG_PINVOKE_COOKIE_PARAM REG_R4 #define RBM_PINVOKE_COOKIE_PARAM RBM_R4 // GenericPInvokeCalliHelper unmanaged target Parameter #define REG_PINVOKE_TARGET_PARAM REG_R12 #define RBM_PINVOKE_TARGET_PARAM RBM_R12 // IL stub's secret MethodDesc parameter (JitFlags::JIT_FLAG_PUBLISH_SECRET_PARAM) #define REG_SECRET_STUB_PARAM REG_R12 #define RBM_SECRET_STUB_PARAM RBM_R12 // R2R indirect call. Use the same registers as VSD #define REG_R2R_INDIRECT_PARAM REG_R4 #define RBM_R2R_INDIRECT_PARAM RBM_R4 // JMP Indirect call register #define REG_INDIRECT_CALL_TARGET_REG REG_R12 // Registers used by PInvoke frame setup #define REG_PINVOKE_FRAME REG_R4 #define RBM_PINVOKE_FRAME RBM_R4 #define REG_PINVOKE_TCB REG_R5 #define RBM_PINVOKE_TCB RBM_R5 #define REG_PINVOKE_SCRATCH REG_R6 #define RBM_PINVOKE_SCRATCH RBM_R6 // The following defines are useful for iterating a regNumber #define REG_FIRST REG_R0 #define REG_INT_FIRST REG_R0 #define REG_INT_LAST REG_LR #define REG_INT_COUNT (REG_INT_LAST - REG_INT_FIRST + 1) #define REG_NEXT(reg) ((regNumber)((unsigned)(reg) + 1)) #define REG_PREV(reg) ((regNumber)((unsigned)(reg) - 1)) // The following registers are used in emitting Enter/Leave/Tailcall profiler callbacks #define REG_PROFILER_ENTER_ARG REG_R0 #define RBM_PROFILER_ENTER_ARG RBM_R0 #define REG_PROFILER_RET_SCRATCH REG_R2 #define RBM_PROFILER_RET_SCRATCH RBM_R2 // The registers trashed by profiler enter/leave/tailcall hook // See vm\arm\asmhelpers.asm for more details. #define RBM_PROFILER_ENTER_TRASH RBM_NONE // While REG_PROFILER_RET_SCRATCH is not trashed by the method, the register allocator must // consider it killed by the return. #define RBM_PROFILER_LEAVE_TRASH RBM_PROFILER_RET_SCRATCH #define RBM_PROFILER_TAILCALL_TRASH RBM_NONE // Which register are int and long values returned in ? #define REG_INTRET REG_R0 #define RBM_INTRET RBM_R0 #define RBM_LNGRET (RBM_R1|RBM_R0) #define REG_LNGRET_LO REG_R0 #define REG_LNGRET_HI REG_R1 #define RBM_LNGRET_LO RBM_R0 #define RBM_LNGRET_HI RBM_R1 #define REG_FLOATRET REG_F0 #define RBM_FLOATRET RBM_F0 #define RBM_DOUBLERET (RBM_F0|RBM_F1) // The registers trashed by the CORINFO_HELP_STOP_FOR_GC helper (JIT_RareDisableHelper). // See vm\arm\amshelpers.asm for more details. #define RBM_STOP_FOR_GC_TRASH (RBM_CALLEE_TRASH & ~(RBM_LNGRET|RBM_R7|RBM_R8|RBM_R11|RBM_DOUBLERET|RBM_F2|RBM_F3|RBM_F4|RBM_F5|RBM_F6|RBM_F7)) // The registers trashed by the CORINFO_HELP_INIT_PINVOKE_FRAME helper. #define RBM_INIT_PINVOKE_FRAME_TRASH (RBM_CALLEE_TRASH | RBM_PINVOKE_TCB | RBM_PINVOKE_SCRATCH) #define RBM_VALIDATE_INDIRECT_CALL_TRASH (RBM_INT_CALLEE_TRASH) #define REG_VALIDATE_INDIRECT_CALL_ADDR REG_R0 #define REG_FPBASE REG_R11 #define RBM_FPBASE RBM_R11 #define STR_FPBASE "r11" #define REG_SPBASE REG_SP #define RBM_SPBASE RBM_SP #define STR_SPBASE "sp" #define FIRST_ARG_STACK_OFFS (2*REGSIZE_BYTES) // Caller's saved FP and return address #define MAX_REG_ARG 4 #define MAX_FLOAT_REG_ARG 16 #define MAX_HFA_RET_SLOTS 8 #define REG_ARG_FIRST REG_R0 #define REG_ARG_LAST REG_R3 #define REG_ARG_FP_FIRST REG_F0 #define REG_ARG_FP_LAST REG_F7 #define INIT_ARG_STACK_SLOT 0 // No outgoing reserved stack slots #define REG_ARG_0 REG_R0 #define REG_ARG_1 REG_R1 #define REG_ARG_2 REG_R2 #define REG_ARG_3 REG_R3 extern const regNumber intArgRegs [MAX_REG_ARG]; extern const regMaskTP intArgMasks[MAX_REG_ARG]; #define RBM_ARG_0 RBM_R0 #define RBM_ARG_1 RBM_R1 #define RBM_ARG_2 RBM_R2 #define RBM_ARG_3 RBM_R3 #define RBM_ARG_REGS (RBM_ARG_0|RBM_ARG_1|RBM_ARG_2|RBM_ARG_3) #define RBM_FLTARG_REGS (RBM_F0|RBM_F1|RBM_F2|RBM_F3|RBM_F4|RBM_F5|RBM_F6|RBM_F7|RBM_F8|RBM_F9|RBM_F10|RBM_F11|RBM_F12|RBM_F13|RBM_F14|RBM_F15) #define RBM_DBL_REGS RBM_ALLDOUBLE extern const regNumber fltArgRegs [MAX_FLOAT_REG_ARG]; extern const regMaskTP fltArgMasks[MAX_FLOAT_REG_ARG]; #define LBL_DIST_SMALL_MAX_NEG (0) #define LBL_DIST_SMALL_MAX_POS (+1020) #define LBL_DIST_MED_MAX_NEG (-4095) #define LBL_DIST_MED_MAX_POS (+4096) #define JMP_DIST_SMALL_MAX_NEG (-2048) #define JMP_DIST_SMALL_MAX_POS (+2046) #define CALL_DIST_MAX_NEG (-16777216) #define CALL_DIST_MAX_POS (+16777214) #define JCC_DIST_SMALL_MAX_NEG (-256) #define JCC_DIST_SMALL_MAX_POS (+254) #define JCC_DIST_MEDIUM_MAX_NEG (-1048576) #define JCC_DIST_MEDIUM_MAX_POS (+1048574) #define LBL_SIZE_SMALL (2) #define JMP_SIZE_SMALL (2) #define JMP_SIZE_LARGE (4) #define JCC_SIZE_SMALL (2) #define JCC_SIZE_MEDIUM (4) #define JCC_SIZE_LARGE (6) // The first thing in an ARM32 prolog pushes LR to the stack, so this can be 0. #define STACK_PROBE_BOUNDARY_THRESHOLD_BYTES 0 #define REG_STACK_PROBE_HELPER_ARG REG_R4 #define RBM_STACK_PROBE_HELPER_ARG RBM_R4 #define REG_STACK_PROBE_HELPER_CALL_TARGET REG_R5 #define RBM_STACK_PROBE_HELPER_CALL_TARGET RBM_R5 #define RBM_STACK_PROBE_HELPER_TRASH (RBM_R5 | RBM_LR) // clang-format on
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #pragma once #if !defined(TARGET_ARM) #error The file should not be included for this platform. #endif // clang-format off // TODO-ARM-CQ: Use shift for division by power of 2 // TODO-ARM-CQ: Check for sdiv/udiv at runtime and generate it if available #define USE_HELPERS_FOR_INT_DIV 1 // BeagleBoard (ARMv7A) doesn't support SDIV/UDIV #define CPU_LOAD_STORE_ARCH 1 #define ROUND_FLOAT 0 // Do not round intermed float expression results #define CPU_HAS_BYTE_REGS 0 #define CPBLK_UNROLL_LIMIT 32 // Upper bound to let the code generator to loop unroll CpBlk. #define INITBLK_UNROLL_LIMIT 16 // Upper bound to let the code generator to loop unroll InitBlk. #define FEATURE_FIXED_OUT_ARGS 1 // Preallocate the outgoing arg area in the prolog #define FEATURE_STRUCTPROMOTE 1 // JIT Optimization to promote fields of structs into registers #define FEATURE_MULTIREG_STRUCT_PROMOTE 0 // True when we want to promote fields of a multireg struct into registers #define FEATURE_FASTTAILCALL 1 // Tail calls made as epilog+jmp #define FEATURE_TAILCALL_OPT 1 // opportunistic Tail calls (i.e. without ".tail" prefix) made as fast tail calls. #define FEATURE_SET_FLAGS 1 // Set to true to force the JIT to mark the trees with GTF_SET_FLAGS when the flags need to be set #define FEATURE_MULTIREG_ARGS_OR_RET 1 // Support for passing and/or returning single values in more than one register (including HFA support) #define FEATURE_MULTIREG_ARGS 1 // Support for passing a single argument in more than one register (including passing HFAs) #define FEATURE_MULTIREG_RET 1 // Support for returning a single value in more than one register (including HFA returns) #define FEATURE_STRUCT_CLASSIFIER 0 // Uses a classifier function to determine is structs are passed/returned in more than one register #define MAX_PASS_SINGLEREG_BYTES 8 // Maximum size of a struct passed in a single register (double). #define MAX_PASS_MULTIREG_BYTES 32 // Maximum size of a struct that could be passed in more than one register (Max is an HFA of 4 doubles) #define MAX_RET_MULTIREG_BYTES 32 // Maximum size of a struct that could be returned in more than one register (Max is an HFA of 4 doubles) #define MAX_ARG_REG_COUNT 4 // Maximum registers used to pass a single argument in multiple registers. (max is 4 floats or doubles using an HFA) #define MAX_RET_REG_COUNT 4 // Maximum registers used to return a value. #define MAX_MULTIREG_COUNT 4 // Maxiumum number of registers defined by a single instruction (including calls). // This is also the maximum number of registers for a MultiReg node. #define NOGC_WRITE_BARRIERS 0 // We DO-NOT have specialized WriteBarrier JIT Helpers that DO-NOT trash the RBM_CALLEE_TRASH registers #define USER_ARGS_COME_LAST 1 #define EMIT_TRACK_STACK_DEPTH 1 // This is something of a workaround. For both ARM and AMD64, the frame size is fixed, so we don't really // need to track stack depth, but this is currently necessary to get GC information reported at call sites. #define TARGET_POINTER_SIZE 4 // equal to sizeof(void*) and the managed pointer size in bytes for this target #define FEATURE_EH 1 // To aid platform bring-up, eliminate exceptional EH clauses (catch, filter, filter-handler, fault) and directly execute 'finally' clauses. #define FEATURE_EH_CALLFINALLY_THUNKS 0 // Generate call-to-finally code in "thunks" in the enclosing EH region, protected by "cloned finally" clauses. #define ETW_EBP_FRAMED 1 // if 1 we cannot use REG_FP as a scratch register and must setup the frame pointer for most methods #define CSE_CONSTS 1 // Enable if we want to CSE constants #define REG_FP_FIRST REG_F0 #define REG_FP_LAST REG_F31 #define FIRST_FP_ARGREG REG_F0 #define LAST_FP_ARGREG REG_F15 #define REGNUM_BITS 6 // number of bits in a REG_* #define REGSIZE_BYTES 4 // number of bytes in one register #define MIN_ARG_AREA_FOR_CALL 0 // Minimum required outgoing argument space for a call. #define CODE_ALIGN 2 // code alignment requirement #define STACK_ALIGN 8 // stack alignment requirement #define RBM_INT_CALLEE_SAVED (RBM_R4|RBM_R5|RBM_R6|RBM_R7|RBM_R8|RBM_R9|RBM_R10) #define RBM_INT_CALLEE_TRASH (RBM_R0|RBM_R1|RBM_R2|RBM_R3|RBM_R12|RBM_LR) #define RBM_FLT_CALLEE_SAVED (RBM_F16|RBM_F17|RBM_F18|RBM_F19|RBM_F20|RBM_F21|RBM_F22|RBM_F23|RBM_F24|RBM_F25|RBM_F26|RBM_F27|RBM_F28|RBM_F29|RBM_F30|RBM_F31) #define RBM_FLT_CALLEE_TRASH (RBM_F0|RBM_F1|RBM_F2|RBM_F3|RBM_F4|RBM_F5|RBM_F6|RBM_F7|RBM_F8|RBM_F9|RBM_F10|RBM_F11|RBM_F12|RBM_F13|RBM_F14|RBM_F15) #define RBM_CALLEE_SAVED (RBM_INT_CALLEE_SAVED | RBM_FLT_CALLEE_SAVED) #define RBM_CALLEE_TRASH (RBM_INT_CALLEE_TRASH | RBM_FLT_CALLEE_TRASH) #define REG_DEFAULT_HELPER_CALL_TARGET REG_R12 #define RBM_DEFAULT_HELPER_CALL_TARGET RBM_R12 #define RBM_ALLINT (RBM_INT_CALLEE_SAVED | RBM_INT_CALLEE_TRASH) #define RBM_ALLFLOAT (RBM_FLT_CALLEE_SAVED | RBM_FLT_CALLEE_TRASH) #define RBM_ALLDOUBLE (RBM_F0|RBM_F2|RBM_F4|RBM_F6|RBM_F8|RBM_F10|RBM_F12|RBM_F14|RBM_F16|RBM_F18|RBM_F20|RBM_F22|RBM_F24|RBM_F26|RBM_F28|RBM_F30) #define REG_VAR_ORDER REG_R3,REG_R2,REG_R1,REG_R0,REG_R4,REG_LR,REG_R12,\ REG_R5,REG_R6,REG_R7,REG_R8,REG_R9,REG_R10 #define REG_VAR_ORDER_FLT REG_F8, REG_F9, REG_F10, REG_F11, \ REG_F12, REG_F13, REG_F14, REG_F15, \ REG_F6, REG_F7, REG_F4, REG_F5, \ REG_F2, REG_F3, REG_F0, REG_F1, \ REG_F16, REG_F17, REG_F18, REG_F19, \ REG_F20, REG_F21, REG_F22, REG_F23, \ REG_F24, REG_F25, REG_F26, REG_F27, \ REG_F28, REG_F29, REG_F30, REG_F31, #define RBM_LOW_REGS (RBM_R0|RBM_R1|RBM_R2|RBM_R3|RBM_R4|RBM_R5|RBM_R6|RBM_R7) #define RBM_HIGH_REGS (RBM_R8|RBM_R9|RBM_R10|RBM_R11|RBM_R12|RBM_SP|RBM_LR|RBM_PC) #define REG_CALLEE_SAVED_ORDER REG_R4,REG_R5,REG_R6,REG_R7,REG_R8,REG_R9,REG_R10,REG_R11 #define RBM_CALLEE_SAVED_ORDER RBM_R4,RBM_R5,RBM_R6,RBM_R7,RBM_R8,RBM_R9,RBM_R10,RBM_R11 #define CNT_CALLEE_SAVED (8) #define CNT_CALLEE_TRASH (6) #define CNT_CALLEE_ENREG (CNT_CALLEE_SAVED-1) #define CNT_CALLEE_SAVED_FLOAT (16) #define CNT_CALLEE_TRASH_FLOAT (16) #define CALLEE_SAVED_REG_MAXSZ (CNT_CALLEE_SAVED*REGSIZE_BYTES) #define CALLEE_SAVED_FLOAT_MAXSZ (CNT_CALLEE_SAVED_FLOAT*sizeof(float)) // Temporary registers used for the GS cookie check. #define REG_GSCOOKIE_TMP_0 REG_R12 #define REG_GSCOOKIE_TMP_1 REG_LR // register to hold shift amount; no special register is required on the ARM #define REG_SHIFT REG_NA #define RBM_SHIFT RBM_ALLINT // register to hold shift amount when shifting 64-bit values (this uses a helper call) #define REG_SHIFT_LNG REG_R2 // REG_ARG_2 #define RBM_SHIFT_LNG RBM_R2 // RBM_ARG_2 // This is a general scratch register that does not conflict with the argument registers #define REG_SCRATCH REG_LR // This is a general register that can be optionally reserved for other purposes during codegen #define REG_OPT_RSVD REG_R10 #define RBM_OPT_RSVD RBM_R10 // We reserve R9 to store SP on entry for stack unwinding when localloc is used // This needs to stay in sync with the ARM version of InlinedCallFrame::UpdateRegDisplay code. #define REG_SAVED_LOCALLOC_SP REG_R9 #define RBM_SAVED_LOCALLOC_SP RBM_R9 // Where is the exception object on entry to the handler block? #define REG_EXCEPTION_OBJECT REG_R0 #define RBM_EXCEPTION_OBJECT RBM_R0 #define REG_JUMP_THUNK_PARAM REG_R12 #define RBM_JUMP_THUNK_PARAM RBM_R12 // ARM write barrier ABI (see vm\arm\asmhelpers.asm, vm\arm\asmhelpers.S): // CORINFO_HELP_ASSIGN_REF (JIT_WriteBarrier), CORINFO_HELP_CHECKED_ASSIGN_REF (JIT_CheckedWriteBarrier): // On entry: // r0: the destination address (LHS of the assignment) // r1: the object reference (RHS of the assignment) // On exit: // r0: trashed // r3: trashed // CORINFO_HELP_ASSIGN_BYREF (JIT_ByRefWriteBarrier): // On entry: // r0: the destination address (object reference written here) // r1: the source address (points to object reference to write) // On exit: // r0: incremented by 4 // r1: incremented by 4 // r2: trashed // r3: trashed #define REG_WRITE_BARRIER_DST_BYREF REG_ARG_0 #define RBM_WRITE_BARRIER_DST_BYREF RBM_ARG_0 #define REG_WRITE_BARRIER_SRC_BYREF REG_ARG_1 #define RBM_WRITE_BARRIER_SRC_BYREF RBM_ARG_1 #define RBM_CALLEE_TRASH_NOGC (RBM_R2|RBM_R3|RBM_LR|RBM_DEFAULT_HELPER_CALL_TARGET) // Registers killed by CORINFO_HELP_ASSIGN_REF and CORINFO_HELP_CHECKED_ASSIGN_REF. #define RBM_CALLEE_TRASH_WRITEBARRIER (RBM_R0|RBM_R3|RBM_LR|RBM_DEFAULT_HELPER_CALL_TARGET) // Registers no longer containing GC pointers after CORINFO_HELP_ASSIGN_REF and CORINFO_HELP_CHECKED_ASSIGN_REF. #define RBM_CALLEE_GCTRASH_WRITEBARRIER RBM_CALLEE_TRASH_WRITEBARRIER // Registers killed by CORINFO_HELP_ASSIGN_BYREF. #define RBM_CALLEE_TRASH_WRITEBARRIER_BYREF (RBM_WRITE_BARRIER_DST_BYREF | RBM_WRITE_BARRIER_SRC_BYREF | RBM_CALLEE_TRASH_NOGC) // Registers no longer containing GC pointers after CORINFO_HELP_ASSIGN_BYREF. // Note that r0 and r1 are still valid byref pointers after this helper call, despite their value being changed. #define RBM_CALLEE_GCTRASH_WRITEBARRIER_BYREF RBM_CALLEE_TRASH_NOGC // GenericPInvokeCalliHelper VASigCookie Parameter #define REG_PINVOKE_COOKIE_PARAM REG_R4 #define RBM_PINVOKE_COOKIE_PARAM RBM_R4 // GenericPInvokeCalliHelper unmanaged target Parameter #define REG_PINVOKE_TARGET_PARAM REG_R12 #define RBM_PINVOKE_TARGET_PARAM RBM_R12 // IL stub's secret MethodDesc parameter (JitFlags::JIT_FLAG_PUBLISH_SECRET_PARAM) #define REG_SECRET_STUB_PARAM REG_R12 #define RBM_SECRET_STUB_PARAM RBM_R12 // R2R indirect call. Use the same registers as VSD #define REG_R2R_INDIRECT_PARAM REG_R4 #define RBM_R2R_INDIRECT_PARAM RBM_R4 // JMP Indirect call register #define REG_INDIRECT_CALL_TARGET_REG REG_R12 // Registers used by PInvoke frame setup #define REG_PINVOKE_FRAME REG_R4 #define RBM_PINVOKE_FRAME RBM_R4 #define REG_PINVOKE_TCB REG_R5 #define RBM_PINVOKE_TCB RBM_R5 #define REG_PINVOKE_SCRATCH REG_R6 #define RBM_PINVOKE_SCRATCH RBM_R6 // The following defines are useful for iterating a regNumber #define REG_FIRST REG_R0 #define REG_INT_FIRST REG_R0 #define REG_INT_LAST REG_LR #define REG_INT_COUNT (REG_INT_LAST - REG_INT_FIRST + 1) #define REG_NEXT(reg) ((regNumber)((unsigned)(reg) + 1)) #define REG_PREV(reg) ((regNumber)((unsigned)(reg) - 1)) // The following registers are used in emitting Enter/Leave/Tailcall profiler callbacks #define REG_PROFILER_ENTER_ARG REG_R0 #define RBM_PROFILER_ENTER_ARG RBM_R0 #define REG_PROFILER_RET_SCRATCH REG_R2 #define RBM_PROFILER_RET_SCRATCH RBM_R2 // The registers trashed by profiler enter/leave/tailcall hook // See vm\arm\asmhelpers.asm for more details. #define RBM_PROFILER_ENTER_TRASH RBM_NONE // While REG_PROFILER_RET_SCRATCH is not trashed by the method, the register allocator must // consider it killed by the return. #define RBM_PROFILER_LEAVE_TRASH RBM_PROFILER_RET_SCRATCH #define RBM_PROFILER_TAILCALL_TRASH RBM_NONE // Which register are int and long values returned in ? #define REG_INTRET REG_R0 #define RBM_INTRET RBM_R0 #define RBM_LNGRET (RBM_R1|RBM_R0) #define REG_LNGRET_LO REG_R0 #define REG_LNGRET_HI REG_R1 #define RBM_LNGRET_LO RBM_R0 #define RBM_LNGRET_HI RBM_R1 #define REG_FLOATRET REG_F0 #define RBM_FLOATRET RBM_F0 #define RBM_DOUBLERET (RBM_F0|RBM_F1) // The registers trashed by the CORINFO_HELP_STOP_FOR_GC helper (JIT_RareDisableHelper). // See vm\arm\amshelpers.asm for more details. #define RBM_STOP_FOR_GC_TRASH (RBM_CALLEE_TRASH & ~(RBM_LNGRET|RBM_R7|RBM_R8|RBM_R11|RBM_DOUBLERET|RBM_F2|RBM_F3|RBM_F4|RBM_F5|RBM_F6|RBM_F7)) // The registers trashed by the CORINFO_HELP_INIT_PINVOKE_FRAME helper. #define RBM_INIT_PINVOKE_FRAME_TRASH (RBM_CALLEE_TRASH | RBM_PINVOKE_TCB | RBM_PINVOKE_SCRATCH) #define RBM_VALIDATE_INDIRECT_CALL_TRASH (RBM_INT_CALLEE_TRASH) #define REG_VALIDATE_INDIRECT_CALL_ADDR REG_R0 #define REG_FPBASE REG_R11 #define RBM_FPBASE RBM_R11 #define STR_FPBASE "r11" #define REG_SPBASE REG_SP #define RBM_SPBASE RBM_SP #define STR_SPBASE "sp" #define FIRST_ARG_STACK_OFFS (2*REGSIZE_BYTES) // Caller's saved FP and return address #define MAX_REG_ARG 4 #define MAX_FLOAT_REG_ARG 16 #define MAX_HFA_RET_SLOTS 8 #define REG_ARG_FIRST REG_R0 #define REG_ARG_LAST REG_R3 #define REG_ARG_FP_FIRST REG_F0 #define REG_ARG_FP_LAST REG_F7 #define INIT_ARG_STACK_SLOT 0 // No outgoing reserved stack slots #define REG_ARG_0 REG_R0 #define REG_ARG_1 REG_R1 #define REG_ARG_2 REG_R2 #define REG_ARG_3 REG_R3 extern const regNumber intArgRegs [MAX_REG_ARG]; extern const regMaskTP intArgMasks[MAX_REG_ARG]; #define RBM_ARG_0 RBM_R0 #define RBM_ARG_1 RBM_R1 #define RBM_ARG_2 RBM_R2 #define RBM_ARG_3 RBM_R3 #define RBM_ARG_REGS (RBM_ARG_0|RBM_ARG_1|RBM_ARG_2|RBM_ARG_3) #define RBM_FLTARG_REGS (RBM_F0|RBM_F1|RBM_F2|RBM_F3|RBM_F4|RBM_F5|RBM_F6|RBM_F7|RBM_F8|RBM_F9|RBM_F10|RBM_F11|RBM_F12|RBM_F13|RBM_F14|RBM_F15) #define RBM_DBL_REGS RBM_ALLDOUBLE extern const regNumber fltArgRegs [MAX_FLOAT_REG_ARG]; extern const regMaskTP fltArgMasks[MAX_FLOAT_REG_ARG]; #define LBL_DIST_SMALL_MAX_NEG (0) #define LBL_DIST_SMALL_MAX_POS (+1020) #define LBL_DIST_MED_MAX_NEG (-4095) #define LBL_DIST_MED_MAX_POS (+4096) #define JMP_DIST_SMALL_MAX_NEG (-2048) #define JMP_DIST_SMALL_MAX_POS (+2046) #define CALL_DIST_MAX_NEG (-16777216) #define CALL_DIST_MAX_POS (+16777214) #define JCC_DIST_SMALL_MAX_NEG (-256) #define JCC_DIST_SMALL_MAX_POS (+254) #define JCC_DIST_MEDIUM_MAX_NEG (-1048576) #define JCC_DIST_MEDIUM_MAX_POS (+1048574) #define LBL_SIZE_SMALL (2) #define JMP_SIZE_SMALL (2) #define JMP_SIZE_LARGE (4) #define JCC_SIZE_SMALL (2) #define JCC_SIZE_MEDIUM (4) #define JCC_SIZE_LARGE (6) // The first thing in an ARM32 prolog pushes LR to the stack, so this can be 0. #define STACK_PROBE_BOUNDARY_THRESHOLD_BYTES 0 #define REG_STACK_PROBE_HELPER_ARG REG_R4 #define RBM_STACK_PROBE_HELPER_ARG RBM_R4 #define REG_STACK_PROBE_HELPER_CALL_TARGET REG_R5 #define RBM_STACK_PROBE_HELPER_CALL_TARGET RBM_R5 #define RBM_STACK_PROBE_HELPER_TRASH (RBM_R5 | RBM_LR) // clang-format on
1
dotnet/runtime
66,282
Enable Fast Tail Call Optimization for ARM32
- Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
clamp03
2022-03-07T05:47:20Z
2022-03-13T11:50:20Z
227ff3d53784f655fa04dad059a98b3e8d291d61
51b90cc60b8528c77829ef18481b0f58db812776
Enable Fast Tail Call Optimization for ARM32. - Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
./src/tests/nativeaot/SmokeTests/PInvoke/PInvokeNative.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include <stdlib.h> #include <stdio.h> #include <string.h> #include <stdint.h> #ifdef TARGET_WINDOWS #include <windows.h> #include <wtypes.h> #define DLL_EXPORT extern "C" __declspec(dllexport) #else #include<errno.h> #define HANDLE size_t #define DLL_EXPORT extern "C" __attribute((visibility("default"))) #endif #ifndef TARGET_WINDOWS #define __stdcall #endif #ifndef TARGET_WINDOWS #define __cdecl #endif #if (_MSC_VER >= 1400) // Check MSC version #pragma warning(push) #pragma warning(disable: 4996) // Disable deprecation #endif void* MemAlloc(long bytes) { #ifdef TARGET_WINDOWS return (unsigned char *)CoTaskMemAlloc(bytes); #else return (unsigned char *)malloc(bytes); #endif } void MemFree(void *p) { #ifdef TARGET_WINDOWS CoTaskMemFree(p); #else free(p); #endif } DLL_EXPORT int __stdcall Square(int intValue) { return intValue * intValue; } DLL_EXPORT int __stdcall IsTrue(bool value) { if (value == true) return 1; return 0; } DLL_EXPORT int __stdcall CheckIncremental(int *array, int sz) { if (array == NULL) return 1; for (int i = 0; i < sz; i++) { if (array[i] != i) return 1; } return 0; } struct Foo { int a; float b; }; DLL_EXPORT int __stdcall CheckIncremental_Foo(Foo *array, int sz) { if (array == NULL) return 1; for (int i = 0; i < sz; i++) { if (array[i].a != i || array[i].b != i) return 1; } return 0; } DLL_EXPORT int __stdcall Inc(int *val) { if (val == NULL) return -1; *val = *val + 1; return 0; } DLL_EXPORT int __stdcall VerifyByRefFoo(Foo *val) { if (val->a != 10) return -1; if (val->b != 20) return -1; val->a++; val->b++; return 0; } DLL_EXPORT bool __stdcall GetNextChar(short *value) { if (value == NULL) return false; *value = *value + 1; return true; } int CompareAnsiString(const char *val, const char * expected) { return strcmp(val, expected) == 0 ? 1 : 0; } int CompareUnicodeString(const unsigned short *val, const unsigned short *expected) { if (val == NULL && expected == NULL) return 1; if (val == NULL || expected == NULL) return 0; const unsigned short *p = val; const unsigned short *q = expected; while (*p && *q && *p == *q) { p++; q++; } return *p == 0 && *q == 0; } DLL_EXPORT int __stdcall VerifyAnsiString(char *val) { if (val == NULL) return 0; return CompareAnsiString(val, "Hello World"); } void CopyAnsiString(char *dst, const char *src) { if (src == NULL || dst == NULL) return; const char *q = src; char *p = dst; while (*q) { *p++ = *q++; } *p = '\0'; } DLL_EXPORT int __stdcall VerifyAnsiStringOut(char **val) { if (val == NULL) return 0; *val = (char*)MemAlloc(sizeof(char) * 12); CopyAnsiString(*val, "Hello World"); return 1; } DLL_EXPORT int __stdcall VerifyAnsiStringRef(char **val) { if (val == NULL) return 0; if (!CompareAnsiString(*val, "Hello World")) { MemFree(*val); return 0; } *val = (char*)MemAlloc(sizeof(char) * 13); CopyAnsiString(*val, "Hello World!"); return 1; } DLL_EXPORT int __stdcall VerifyAnsiStringArray(char **val) { if (val == NULL || *val == NULL) return 0; return CompareAnsiString(val[0], "Hello") && CompareAnsiString(val[1], "World"); } void ToUpper(char *val) { if (val == NULL) return; char *p = val; while (*p != '\0') { if (*p >= 'a' && *p <= 'z') { *p = *p - 'a' + 'A'; } p++; } } DLL_EXPORT void __stdcall ToUpper(char **val) { if (val == NULL) return; ToUpper(val[0]); ToUpper(val[1]); } DLL_EXPORT int __stdcall VerifyUnicodeString(unsigned short *val) { if (val == NULL) return 0; unsigned short expected[] = {'H', 'e', 'l', 'l', 'o', ' ', 'W', 'o', 'r', 'l', 'd', 0}; return CompareUnicodeString(val, expected); } DLL_EXPORT int __stdcall VerifyUnicodeStringOut(unsigned short **val) { if (val == NULL) return 0; unsigned short *p = (unsigned short *)MemAlloc(sizeof(unsigned short) * 12); unsigned short expected[] = { 'H', 'e', 'l', 'l', 'o', ' ', 'W', 'o', 'r', 'l', 'd', 0 }; for (int i = 0; i < 12; i++) p[i] = expected[i]; *val = p; return 1; } DLL_EXPORT int __stdcall VerifyUnicodeStringRef(unsigned short **val) { if (val == NULL) return 0; unsigned short expected[] = { 'H', 'e', 'l', 'l', 'o', ' ', 'W', 'o', 'r', 'l', 'd', 0}; unsigned short *p = expected; unsigned short *q = *val; if (!CompareUnicodeString(p, q)) return 0; MemFree(*val); p = (unsigned short*)MemAlloc(sizeof(unsigned short) * 13); int i; for (i = 0; i < 11; i++) p[i] = expected[i]; p[i++] = '!'; p[i] = '\0'; *val = p; return 1; } DLL_EXPORT bool __stdcall VerifySizeParamIndex(unsigned char ** arrByte, unsigned char *arrSize) { *arrSize = 10; *arrByte = (unsigned char *)MemAlloc(sizeof(unsigned char) * (*arrSize)); if (*arrByte == NULL) return false; for (int i = 0; i < *arrSize; i++) { (*arrByte)[i] = (unsigned char)i; } return true; } DLL_EXPORT bool __stdcall LastErrorTest() { int lasterror; #ifdef TARGET_WINDOWS lasterror = GetLastError(); SetLastError(12345); #else lasterror = errno; errno = 12345; #endif return lasterror == 0; } DLL_EXPORT void* __stdcall AllocateMemory(int bytes) { void *mem = malloc(bytes); return mem; } DLL_EXPORT bool __stdcall ReleaseMemory(void *mem) { free(mem); return true; } DLL_EXPORT bool __stdcall SafeHandleTest(HANDLE sh, long shValue) { return (long)((size_t)(sh)) == shValue; } DLL_EXPORT long __stdcall SafeHandleOutTest(HANDLE **sh) { *sh = (HANDLE *)malloc(100); return (long)((size_t)(*sh)); } DLL_EXPORT long __stdcall SafeHandleRefTest(HANDLE **sh, bool alloc) { if (alloc) *sh = (HANDLE *)malloc(100); return (long)((size_t)(*sh)); } DLL_EXPORT bool __stdcall ReversePInvoke_Int(int(__stdcall *fnPtr) (int, int, int, int, int, int, int, int, int, int)) { return fnPtr(1, 2, 3, 4, 5, 6, 7, 8, 9, 10) == 55; } typedef bool(__stdcall *StringFuncPtr) (char *); DLL_EXPORT bool __stdcall ReversePInvoke_String(StringFuncPtr fnPtr) { char str[] = "Hello World"; return fnPtr(str); } struct DelegateFieldStruct { StringFuncPtr fnPtr; }; DLL_EXPORT bool __stdcall ReversePInvoke_DelegateField(DelegateFieldStruct p) { char str[] = "Hello World"; return p.fnPtr(str); } typedef bool(__stdcall *OutStringFuncPtr) (char **); DLL_EXPORT bool __stdcall ReversePInvoke_OutString(OutStringFuncPtr fnPtr) { char *pResult; fnPtr(&pResult); return strcmp(pResult, "Hello there!") == 0; } typedef bool(__stdcall *ArrayFuncPtr) (int *, size_t sz); DLL_EXPORT bool __stdcall ReversePInvoke_Array(ArrayFuncPtr fnPtr) { int a[42]; for (int i = 0; i < 42; i++) a[i] = i; return fnPtr(a, 42); } bool CheckString(char *str) { return CompareAnsiString(str, "Hello World!") == 1; } DLL_EXPORT StringFuncPtr __stdcall GetDelegate() { return CheckString; } DLL_EXPORT bool __stdcall Callback(StringFuncPtr *fnPtr) { char str[] = "Hello World"; if ((*fnPtr)(str) == false) return false; *fnPtr = CheckString; return true; } // returns // -1 if val is null // 1 if val is "Hello World" // 0 otherwise DLL_EXPORT int __stdcall VerifyUnicodeStringBuilder(unsigned short *val) { if (val == NULL) return -1; if (!VerifyUnicodeString(val)) return 0; for (int i = 0; val[i] != '\0'; i++) { if ((char)val[i] >= 'a' && (char)val[i] <= 'z') { val[i] += 'A' - 'a'; } } return 1; } DLL_EXPORT int __stdcall VerifyUnicodeStringBuilderOut(unsigned short *val) { if (val == NULL) return 0; unsigned short src[] = { 'H', 'e', 'l', 'l', 'o', ' ', 'W', 'o', 'r', 'l', 'd', 0 }; for (int i = 0; i < 12; i++) val[i] = src[i]; return 1; } DLL_EXPORT int __stdcall VerifyAnsiStringBuilderOut(char *val) { if (val == NULL) return 0; CopyAnsiString(val, "Hello World!"); return 1; } // returns // -1 if val is null // 1 if val is "Hello World" // 0 otherwise DLL_EXPORT int __stdcall VerifyAnsiStringBuilder(char *val) { if (val == NULL) return -1; if (!VerifyAnsiString(val)) return 0; for (int i = 0; val[i] != '\0'; i++) { if (val[i] >= 'a' && val[i] <= 'z') { val[i] += 'A' - 'a'; } } return 1; } DLL_EXPORT int* __stdcall ReversePInvoke_Unused(void(__stdcall *fnPtr) (void)) { return 0; } struct NativeSequentialStruct { short s; int a; float b; char *str; }; struct NativeSequentialStruct2 { float a; int b; }; DLL_EXPORT bool __stdcall StructTest(NativeSequentialStruct nss) { if (nss.s != 100) return false; if (nss.a != 1) return false; if (nss.b != 10.0) return false; if (!CompareAnsiString(nss.str, "Hello")) return false; return true; } DLL_EXPORT bool __stdcall StructTest_Sequential2(NativeSequentialStruct2 nss) { if (nss.a != 10.0) return false; if (nss.b != 123) return false; return true; } DLL_EXPORT void __stdcall StructTest_ByRef(NativeSequentialStruct *nss) { nss->a++; nss->b++; char *p = nss->str; while (*p != '\0') { *p = *p + 1; p++; } } DLL_EXPORT void __stdcall StructTest_ByOut(NativeSequentialStruct *nss) { nss->s = 1; nss->a = 1; nss->b = 1.0; int arrSize = 7; char *p; p = (char *)MemAlloc(sizeof(char) * arrSize); for (int i = 0; i < arrSize; i++) { *(p + i) = i + '0'; } *(p + arrSize) = '\0'; nss->str = p; } DLL_EXPORT bool __stdcall StructTest_Array(NativeSequentialStruct *nss, int length) { if (nss == NULL) return false; char expected[16]; for (int i = 0; i < 3; i++) { if (nss[i].s != 0) return false; if (nss[i].a != i) return false; if (nss[i].b != i*i) return false; sprintf(expected, "%d", i); if (CompareAnsiString(expected, nss[i].str) == 0) return false; } return true; } typedef struct { int a; int b; int c; short inlineArray[128]; char inlineString[11]; } inlineStruct; typedef struct { int a; unsigned short inlineString[11]; } inlineUnicodeStruct; DLL_EXPORT bool __stdcall InlineArrayTest(inlineStruct* p, inlineUnicodeStruct *q) { for (short i = 0; i < 128; i++) { if (p->inlineArray[i] != i) return false; p->inlineArray[i] = i + 1; } if (CompareAnsiString(p->inlineString, "Hello") != 1) return false; unsigned short expected[] = { 'H', 'e', 'l', 'l', 'o', ' ', 'W', 'o', 'r', 'l', 0 }; if (CompareUnicodeString(q->inlineString, expected) != 1) return false; q->inlineString[5] = p->inlineString[5] = ' '; q->inlineString[6] = p->inlineString[6] = 'W'; q->inlineString[7] = p->inlineString[7] = 'o'; q->inlineString[8] = p->inlineString[8] = 'r'; q->inlineString[9] = p->inlineString[9] = 'l'; q->inlineString[10] = p->inlineString[10] = 'd'; return true; } struct NativeExplicitStruct { int a; char padding1[8]; float b; char padding2[8]; char *str; }; DLL_EXPORT bool __stdcall StructTest_Explicit(NativeExplicitStruct nes) { if (nes.a != 100) return false; if (nes.b != 100.0) return false; if (!CompareAnsiString(nes.str, "Hello")) return false; return true; } struct NativeNestedStruct { int a; NativeExplicitStruct nes; }; DLL_EXPORT bool __stdcall StructTest_Nested(NativeNestedStruct nns) { if (nns.a != 100) return false; return StructTest_Explicit(nns.nes); } DLL_EXPORT bool __stdcall VerifyAnsiCharArrayIn(char *a) { return CompareAnsiString(a, "Hello World") == 1; } DLL_EXPORT bool __stdcall VerifyAnsiCharArrayOut(char *a) { if (a == NULL) return false; CopyAnsiString(a, "Hello World!"); return true; } DLL_EXPORT bool __stdcall IsNULL(void *a) { return a == NULL; } DLL_EXPORT void __cdecl SetLastErrorFunc(int errorCode) { #ifdef TARGET_WINDOWS SetLastError(errorCode); #else errno = errorCode; #endif } DLL_EXPORT void* __stdcall GetFunctionPointer() { return (void*)&SetLastErrorFunc; } typedef struct { int c; char inlineString[260]; } inlineString; DLL_EXPORT bool __stdcall InlineStringTest(inlineString* p) { CopyAnsiString(p->inlineString, "Hello World!"); return true; } struct Callbacks { int(__stdcall *callback0) (void); int(__stdcall *callback1) (void); int(__stdcall *callback2) (void); }; DLL_EXPORT bool __stdcall RegisterCallbacks(Callbacks *callbacks) { return callbacks->callback0() == 0 && callbacks->callback1() == 1 && callbacks->callback2() == 2; } DLL_EXPORT int __stdcall ValidateSuccessCall(int errorCode) { return errorCode; } DLL_EXPORT int __stdcall ValidateIntResult(int errorCode, int* result) { *result = 42; return errorCode; } #ifndef DECIMAL_NEG // defined in wtypes.h typedef struct tagDEC { uint16_t wReserved; union { struct { uint8_t scale; uint8_t sign; }; uint16_t signscale; }; uint32_t Hi32; union { struct { uint32_t Lo32; uint32_t Mid32; }; uint64_t Lo64; }; } DECIMAL; #endif DLL_EXPORT DECIMAL __stdcall DecimalTest(DECIMAL value) { DECIMAL zero; memset(&zero, 0, sizeof(DECIMAL)); if (value.Lo32 != 100) { return zero; } if (value.Mid32 != 101) { return zero; } if (value.Hi32 != 102) { return zero; } if (value.sign != 0) { return zero; } if (value.scale != 1) { return zero; } value.sign = 128; value.scale = 2; value.Lo32 = 99; value.Mid32 = 98; value.Hi32 = 97; return value; } #if (_MSC_VER >= 1400) // Check MSC version #pragma warning(pop) // Renable previous depreciations #endif
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include <stdlib.h> #include <stdio.h> #include <string.h> #include <stdint.h> #ifdef TARGET_WINDOWS #include <windows.h> #include <wtypes.h> #define DLL_EXPORT extern "C" __declspec(dllexport) #else #include<errno.h> #define HANDLE size_t #define DLL_EXPORT extern "C" __attribute((visibility("default"))) #endif #ifndef TARGET_WINDOWS #define __stdcall #endif #ifndef TARGET_WINDOWS #define __cdecl #endif #if (_MSC_VER >= 1400) // Check MSC version #pragma warning(push) #pragma warning(disable: 4996) // Disable deprecation #endif void* MemAlloc(long bytes) { #ifdef TARGET_WINDOWS return (unsigned char *)CoTaskMemAlloc(bytes); #else return (unsigned char *)malloc(bytes); #endif } void MemFree(void *p) { #ifdef TARGET_WINDOWS CoTaskMemFree(p); #else free(p); #endif } DLL_EXPORT int __stdcall Square(int intValue) { return intValue * intValue; } DLL_EXPORT int __stdcall IsTrue(bool value) { if (value == true) return 1; return 0; } DLL_EXPORT int __stdcall CheckIncremental(int *array, int sz) { if (array == NULL) return 1; for (int i = 0; i < sz; i++) { if (array[i] != i) return 1; } return 0; } struct Foo { int a; float b; }; DLL_EXPORT int __stdcall CheckIncremental_Foo(Foo *array, int sz) { if (array == NULL) return 1; for (int i = 0; i < sz; i++) { if (array[i].a != i || array[i].b != i) return 1; } return 0; } DLL_EXPORT int __stdcall Inc(int *val) { if (val == NULL) return -1; *val = *val + 1; return 0; } DLL_EXPORT int __stdcall VerifyByRefFoo(Foo *val) { if (val->a != 10) return -1; if (val->b != 20) return -1; val->a++; val->b++; return 0; } DLL_EXPORT bool __stdcall GetNextChar(short *value) { if (value == NULL) return false; *value = *value + 1; return true; } int CompareAnsiString(const char *val, const char * expected) { return strcmp(val, expected) == 0 ? 1 : 0; } int CompareUnicodeString(const unsigned short *val, const unsigned short *expected) { if (val == NULL && expected == NULL) return 1; if (val == NULL || expected == NULL) return 0; const unsigned short *p = val; const unsigned short *q = expected; while (*p && *q && *p == *q) { p++; q++; } return *p == 0 && *q == 0; } DLL_EXPORT int __stdcall VerifyAnsiString(char *val) { if (val == NULL) return 0; return CompareAnsiString(val, "Hello World"); } void CopyAnsiString(char *dst, const char *src) { if (src == NULL || dst == NULL) return; const char *q = src; char *p = dst; while (*q) { *p++ = *q++; } *p = '\0'; } DLL_EXPORT int __stdcall VerifyAnsiStringOut(char **val) { if (val == NULL) return 0; *val = (char*)MemAlloc(sizeof(char) * 12); CopyAnsiString(*val, "Hello World"); return 1; } DLL_EXPORT int __stdcall VerifyAnsiStringRef(char **val) { if (val == NULL) return 0; if (!CompareAnsiString(*val, "Hello World")) { MemFree(*val); return 0; } *val = (char*)MemAlloc(sizeof(char) * 13); CopyAnsiString(*val, "Hello World!"); return 1; } DLL_EXPORT int __stdcall VerifyAnsiStringArray(char **val) { if (val == NULL || *val == NULL) return 0; return CompareAnsiString(val[0], "Hello") && CompareAnsiString(val[1], "World"); } void ToUpper(char *val) { if (val == NULL) return; char *p = val; while (*p != '\0') { if (*p >= 'a' && *p <= 'z') { *p = *p - 'a' + 'A'; } p++; } } DLL_EXPORT void __stdcall ToUpper(char **val) { if (val == NULL) return; ToUpper(val[0]); ToUpper(val[1]); } DLL_EXPORT int __stdcall VerifyUnicodeString(unsigned short *val) { if (val == NULL) return 0; unsigned short expected[] = {'H', 'e', 'l', 'l', 'o', ' ', 'W', 'o', 'r', 'l', 'd', 0}; return CompareUnicodeString(val, expected); } DLL_EXPORT int __stdcall VerifyUnicodeStringOut(unsigned short **val) { if (val == NULL) return 0; unsigned short *p = (unsigned short *)MemAlloc(sizeof(unsigned short) * 12); unsigned short expected[] = { 'H', 'e', 'l', 'l', 'o', ' ', 'W', 'o', 'r', 'l', 'd', 0 }; for (int i = 0; i < 12; i++) p[i] = expected[i]; *val = p; return 1; } DLL_EXPORT int __stdcall VerifyUnicodeStringRef(unsigned short **val) { if (val == NULL) return 0; unsigned short expected[] = { 'H', 'e', 'l', 'l', 'o', ' ', 'W', 'o', 'r', 'l', 'd', 0}; unsigned short *p = expected; unsigned short *q = *val; if (!CompareUnicodeString(p, q)) return 0; MemFree(*val); p = (unsigned short*)MemAlloc(sizeof(unsigned short) * 13); int i; for (i = 0; i < 11; i++) p[i] = expected[i]; p[i++] = '!'; p[i] = '\0'; *val = p; return 1; } DLL_EXPORT bool __stdcall VerifySizeParamIndex(unsigned char ** arrByte, unsigned char *arrSize) { *arrSize = 10; *arrByte = (unsigned char *)MemAlloc(sizeof(unsigned char) * (*arrSize)); if (*arrByte == NULL) return false; for (int i = 0; i < *arrSize; i++) { (*arrByte)[i] = (unsigned char)i; } return true; } DLL_EXPORT bool __stdcall LastErrorTest() { int lasterror; #ifdef TARGET_WINDOWS lasterror = GetLastError(); SetLastError(12345); #else lasterror = errno; errno = 12345; #endif return lasterror == 0; } DLL_EXPORT void* __stdcall AllocateMemory(int bytes) { void *mem = malloc(bytes); return mem; } DLL_EXPORT bool __stdcall ReleaseMemory(void *mem) { free(mem); return true; } DLL_EXPORT bool __stdcall SafeHandleTest(HANDLE sh, long shValue) { return (long)((size_t)(sh)) == shValue; } DLL_EXPORT long __stdcall SafeHandleOutTest(HANDLE **sh) { *sh = (HANDLE *)malloc(100); return (long)((size_t)(*sh)); } DLL_EXPORT long __stdcall SafeHandleRefTest(HANDLE **sh, bool alloc) { if (alloc) *sh = (HANDLE *)malloc(100); return (long)((size_t)(*sh)); } DLL_EXPORT bool __stdcall ReversePInvoke_Int(int(__stdcall *fnPtr) (int, int, int, int, int, int, int, int, int, int)) { return fnPtr(1, 2, 3, 4, 5, 6, 7, 8, 9, 10) == 55; } typedef bool(__stdcall *StringFuncPtr) (char *); DLL_EXPORT bool __stdcall ReversePInvoke_String(StringFuncPtr fnPtr) { char str[] = "Hello World"; return fnPtr(str); } struct DelegateFieldStruct { StringFuncPtr fnPtr; }; DLL_EXPORT bool __stdcall ReversePInvoke_DelegateField(DelegateFieldStruct p) { char str[] = "Hello World"; return p.fnPtr(str); } typedef bool(__stdcall *OutStringFuncPtr) (char **); DLL_EXPORT bool __stdcall ReversePInvoke_OutString(OutStringFuncPtr fnPtr) { char *pResult; fnPtr(&pResult); return strcmp(pResult, "Hello there!") == 0; } typedef bool(__stdcall *ArrayFuncPtr) (int *, size_t sz); DLL_EXPORT bool __stdcall ReversePInvoke_Array(ArrayFuncPtr fnPtr) { int a[42]; for (int i = 0; i < 42; i++) a[i] = i; return fnPtr(a, 42); } bool CheckString(char *str) { return CompareAnsiString(str, "Hello World!") == 1; } DLL_EXPORT StringFuncPtr __stdcall GetDelegate() { return CheckString; } DLL_EXPORT bool __stdcall Callback(StringFuncPtr *fnPtr) { char str[] = "Hello World"; if ((*fnPtr)(str) == false) return false; *fnPtr = CheckString; return true; } // returns // -1 if val is null // 1 if val is "Hello World" // 0 otherwise DLL_EXPORT int __stdcall VerifyUnicodeStringBuilder(unsigned short *val) { if (val == NULL) return -1; if (!VerifyUnicodeString(val)) return 0; for (int i = 0; val[i] != '\0'; i++) { if ((char)val[i] >= 'a' && (char)val[i] <= 'z') { val[i] += 'A' - 'a'; } } return 1; } DLL_EXPORT int __stdcall VerifyUnicodeStringBuilderOut(unsigned short *val) { if (val == NULL) return 0; unsigned short src[] = { 'H', 'e', 'l', 'l', 'o', ' ', 'W', 'o', 'r', 'l', 'd', 0 }; for (int i = 0; i < 12; i++) val[i] = src[i]; return 1; } DLL_EXPORT int __stdcall VerifyAnsiStringBuilderOut(char *val) { if (val == NULL) return 0; CopyAnsiString(val, "Hello World!"); return 1; } // returns // -1 if val is null // 1 if val is "Hello World" // 0 otherwise DLL_EXPORT int __stdcall VerifyAnsiStringBuilder(char *val) { if (val == NULL) return -1; if (!VerifyAnsiString(val)) return 0; for (int i = 0; val[i] != '\0'; i++) { if (val[i] >= 'a' && val[i] <= 'z') { val[i] += 'A' - 'a'; } } return 1; } DLL_EXPORT int* __stdcall ReversePInvoke_Unused(void(__stdcall *fnPtr) (void)) { return 0; } struct NativeSequentialStruct { short s; int a; float b; char *str; }; struct NativeSequentialStruct2 { float a; int b; }; DLL_EXPORT bool __stdcall StructTest(NativeSequentialStruct nss) { if (nss.s != 100) return false; if (nss.a != 1) return false; if (nss.b != 10.0) return false; if (!CompareAnsiString(nss.str, "Hello")) return false; return true; } DLL_EXPORT bool __stdcall StructTest_Sequential2(NativeSequentialStruct2 nss) { if (nss.a != 10.0) return false; if (nss.b != 123) return false; return true; } DLL_EXPORT void __stdcall StructTest_ByRef(NativeSequentialStruct *nss) { nss->a++; nss->b++; char *p = nss->str; while (*p != '\0') { *p = *p + 1; p++; } } DLL_EXPORT void __stdcall StructTest_ByOut(NativeSequentialStruct *nss) { nss->s = 1; nss->a = 1; nss->b = 1.0; int arrSize = 7; char *p; p = (char *)MemAlloc(sizeof(char) * arrSize); for (int i = 0; i < arrSize; i++) { *(p + i) = i + '0'; } *(p + arrSize) = '\0'; nss->str = p; } DLL_EXPORT bool __stdcall StructTest_Array(NativeSequentialStruct *nss, int length) { if (nss == NULL) return false; char expected[16]; for (int i = 0; i < 3; i++) { if (nss[i].s != 0) return false; if (nss[i].a != i) return false; if (nss[i].b != i*i) return false; sprintf(expected, "%d", i); if (CompareAnsiString(expected, nss[i].str) == 0) return false; } return true; } typedef struct { int a; int b; int c; short inlineArray[128]; char inlineString[11]; } inlineStruct; typedef struct { int a; unsigned short inlineString[11]; } inlineUnicodeStruct; DLL_EXPORT bool __stdcall InlineArrayTest(inlineStruct* p, inlineUnicodeStruct *q) { for (short i = 0; i < 128; i++) { if (p->inlineArray[i] != i) return false; p->inlineArray[i] = i + 1; } if (CompareAnsiString(p->inlineString, "Hello") != 1) return false; unsigned short expected[] = { 'H', 'e', 'l', 'l', 'o', ' ', 'W', 'o', 'r', 'l', 0 }; if (CompareUnicodeString(q->inlineString, expected) != 1) return false; q->inlineString[5] = p->inlineString[5] = ' '; q->inlineString[6] = p->inlineString[6] = 'W'; q->inlineString[7] = p->inlineString[7] = 'o'; q->inlineString[8] = p->inlineString[8] = 'r'; q->inlineString[9] = p->inlineString[9] = 'l'; q->inlineString[10] = p->inlineString[10] = 'd'; return true; } struct NativeExplicitStruct { int a; char padding1[8]; float b; char padding2[8]; char *str; }; DLL_EXPORT bool __stdcall StructTest_Explicit(NativeExplicitStruct nes) { if (nes.a != 100) return false; if (nes.b != 100.0) return false; if (!CompareAnsiString(nes.str, "Hello")) return false; return true; } struct NativeNestedStruct { int a; NativeExplicitStruct nes; }; DLL_EXPORT bool __stdcall StructTest_Nested(NativeNestedStruct nns) { if (nns.a != 100) return false; return StructTest_Explicit(nns.nes); } DLL_EXPORT bool __stdcall VerifyAnsiCharArrayIn(char *a) { return CompareAnsiString(a, "Hello World") == 1; } DLL_EXPORT bool __stdcall VerifyAnsiCharArrayOut(char *a) { if (a == NULL) return false; CopyAnsiString(a, "Hello World!"); return true; } DLL_EXPORT bool __stdcall IsNULL(void *a) { return a == NULL; } DLL_EXPORT void __cdecl SetLastErrorFunc(int errorCode) { #ifdef TARGET_WINDOWS SetLastError(errorCode); #else errno = errorCode; #endif } DLL_EXPORT void* __stdcall GetFunctionPointer() { return (void*)&SetLastErrorFunc; } typedef struct { int c; char inlineString[260]; } inlineString; DLL_EXPORT bool __stdcall InlineStringTest(inlineString* p) { CopyAnsiString(p->inlineString, "Hello World!"); return true; } struct Callbacks { int(__stdcall *callback0) (void); int(__stdcall *callback1) (void); int(__stdcall *callback2) (void); }; DLL_EXPORT bool __stdcall RegisterCallbacks(Callbacks *callbacks) { return callbacks->callback0() == 0 && callbacks->callback1() == 1 && callbacks->callback2() == 2; } DLL_EXPORT int __stdcall ValidateSuccessCall(int errorCode) { return errorCode; } DLL_EXPORT int __stdcall ValidateIntResult(int errorCode, int* result) { *result = 42; return errorCode; } #ifndef DECIMAL_NEG // defined in wtypes.h typedef struct tagDEC { uint16_t wReserved; union { struct { uint8_t scale; uint8_t sign; }; uint16_t signscale; }; uint32_t Hi32; union { struct { uint32_t Lo32; uint32_t Mid32; }; uint64_t Lo64; }; } DECIMAL; #endif DLL_EXPORT DECIMAL __stdcall DecimalTest(DECIMAL value) { DECIMAL zero; memset(&zero, 0, sizeof(DECIMAL)); if (value.Lo32 != 100) { return zero; } if (value.Mid32 != 101) { return zero; } if (value.Hi32 != 102) { return zero; } if (value.sign != 0) { return zero; } if (value.scale != 1) { return zero; } value.sign = 128; value.scale = 2; value.Lo32 = 99; value.Mid32 = 98; value.Hi32 = 97; return value; } #if (_MSC_VER >= 1400) // Check MSC version #pragma warning(pop) // Renable previous depreciations #endif
-1
dotnet/runtime
66,282
Enable Fast Tail Call Optimization for ARM32
- Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
clamp03
2022-03-07T05:47:20Z
2022-03-13T11:50:20Z
227ff3d53784f655fa04dad059a98b3e8d291d61
51b90cc60b8528c77829ef18481b0f58db812776
Enable Fast Tail Call Optimization for ARM32. - Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
./src/mono/mono/utils/mono-coop-semaphore.h
/** * \file */ #ifndef __MONO_COOP_SEMAPHORE_H__ #define __MONO_COOP_SEMAPHORE_H__ #include <config.h> #include <glib.h> #include "mono-os-semaphore.h" #include "mono-threads-api.h" /* We put the OS sync primitives in struct, so the compiler will warn us if * we use mono_os_(mutex|cond|sem)_... on MonoCoop(Mutex|Cond|Sem) structures */ typedef struct _MonoCoopSem MonoCoopSem; struct _MonoCoopSem { MonoSemType s; }; static inline void mono_coop_sem_init (MonoCoopSem *sem, int value) { mono_os_sem_init (&sem->s, value); } static inline void mono_coop_sem_destroy (MonoCoopSem *sem) { mono_os_sem_destroy (&sem->s); } static inline gint mono_coop_sem_wait (MonoCoopSem *sem, MonoSemFlags flags) { gint res; MONO_ENTER_GC_SAFE; res = mono_os_sem_wait (&sem->s, flags); MONO_EXIT_GC_SAFE; return res; } static inline MonoSemTimedwaitRet mono_coop_sem_timedwait (MonoCoopSem *sem, guint timeout_ms, MonoSemFlags flags) { MonoSemTimedwaitRet res; MONO_ENTER_GC_SAFE; res = mono_os_sem_timedwait (&sem->s, timeout_ms, flags); MONO_EXIT_GC_SAFE; return res; } static inline void mono_coop_sem_post (MonoCoopSem *sem) { mono_os_sem_post (&sem->s); } #endif /* __MONO_COOP_SEMAPHORE_H__ */
/** * \file */ #ifndef __MONO_COOP_SEMAPHORE_H__ #define __MONO_COOP_SEMAPHORE_H__ #include <config.h> #include <glib.h> #include "mono-os-semaphore.h" #include "mono-threads-api.h" /* We put the OS sync primitives in struct, so the compiler will warn us if * we use mono_os_(mutex|cond|sem)_... on MonoCoop(Mutex|Cond|Sem) structures */ typedef struct _MonoCoopSem MonoCoopSem; struct _MonoCoopSem { MonoSemType s; }; static inline void mono_coop_sem_init (MonoCoopSem *sem, int value) { mono_os_sem_init (&sem->s, value); } static inline void mono_coop_sem_destroy (MonoCoopSem *sem) { mono_os_sem_destroy (&sem->s); } static inline gint mono_coop_sem_wait (MonoCoopSem *sem, MonoSemFlags flags) { gint res; MONO_ENTER_GC_SAFE; res = mono_os_sem_wait (&sem->s, flags); MONO_EXIT_GC_SAFE; return res; } static inline MonoSemTimedwaitRet mono_coop_sem_timedwait (MonoCoopSem *sem, guint timeout_ms, MonoSemFlags flags) { MonoSemTimedwaitRet res; MONO_ENTER_GC_SAFE; res = mono_os_sem_timedwait (&sem->s, timeout_ms, flags); MONO_EXIT_GC_SAFE; return res; } static inline void mono_coop_sem_post (MonoCoopSem *sem) { mono_os_sem_post (&sem->s); } #endif /* __MONO_COOP_SEMAPHORE_H__ */
-1
dotnet/runtime
66,282
Enable Fast Tail Call Optimization for ARM32
- Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
clamp03
2022-03-07T05:47:20Z
2022-03-13T11:50:20Z
227ff3d53784f655fa04dad059a98b3e8d291d61
51b90cc60b8528c77829ef18481b0f58db812776
Enable Fast Tail Call Optimization for ARM32. - Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
./src/native/public/mono/metadata/details/environment-functions.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // // This file does not have ifdef guards, it is meant to be included multiple times with different definitions of MONO_API_FUNCTION #ifndef MONO_API_FUNCTION #error "MONO_API_FUNCTION(ret,name,args) macro not defined before including function declaration header" #endif MONO_API_FUNCTION(int32_t, mono_environment_exitcode_get, (void)) MONO_API_FUNCTION(void, mono_environment_exitcode_set, (int32_t value))
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // // This file does not have ifdef guards, it is meant to be included multiple times with different definitions of MONO_API_FUNCTION #ifndef MONO_API_FUNCTION #error "MONO_API_FUNCTION(ret,name,args) macro not defined before including function declaration header" #endif MONO_API_FUNCTION(int32_t, mono_environment_exitcode_get, (void)) MONO_API_FUNCTION(void, mono_environment_exitcode_set, (int32_t value))
-1
dotnet/runtime
66,282
Enable Fast Tail Call Optimization for ARM32
- Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
clamp03
2022-03-07T05:47:20Z
2022-03-13T11:50:20Z
227ff3d53784f655fa04dad059a98b3e8d291d61
51b90cc60b8528c77829ef18481b0f58db812776
Enable Fast Tail Call Optimization for ARM32. - Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
./src/native/corehost/test/nativehost/error_writer_redirector.cpp
#include "error_writer_redirector.h" #include "hostpolicy.h" namespace { thread_local static const pal::char_t* g_prefix = nullptr; pal::stringstream_t& get_redirected_error_stream() { thread_local static pal::stringstream_t error_output; return error_output; } void HOSTPOLICY_CALLTYPE error_writer(const pal::char_t* message) { if (g_prefix != nullptr) get_redirected_error_stream() << g_prefix; get_redirected_error_stream() << message; } } error_writer_redirector::error_writer_redirector(set_error_writer_fn set_error_writer, const pal::char_t* prefix) : _set_error_writer(set_error_writer) { g_prefix = prefix; get_redirected_error_stream().clear(); _previous_writer = _set_error_writer(error_writer); } error_writer_redirector::~error_writer_redirector() { _set_error_writer(_previous_writer); } bool error_writer_redirector::has_errors() { return get_redirected_error_stream().tellp() != std::streampos(0); } const pal::string_t error_writer_redirector::get_errors() { return get_redirected_error_stream().str(); }
#include "error_writer_redirector.h" #include "hostpolicy.h" namespace { thread_local static const pal::char_t* g_prefix = nullptr; pal::stringstream_t& get_redirected_error_stream() { thread_local static pal::stringstream_t error_output; return error_output; } void HOSTPOLICY_CALLTYPE error_writer(const pal::char_t* message) { if (g_prefix != nullptr) get_redirected_error_stream() << g_prefix; get_redirected_error_stream() << message; } } error_writer_redirector::error_writer_redirector(set_error_writer_fn set_error_writer, const pal::char_t* prefix) : _set_error_writer(set_error_writer) { g_prefix = prefix; get_redirected_error_stream().clear(); _previous_writer = _set_error_writer(error_writer); } error_writer_redirector::~error_writer_redirector() { _set_error_writer(_previous_writer); } bool error_writer_redirector::has_errors() { return get_redirected_error_stream().tellp() != std::streampos(0); } const pal::string_t error_writer_redirector::get_errors() { return get_redirected_error_stream().str(); }
-1
dotnet/runtime
66,282
Enable Fast Tail Call Optimization for ARM32
- Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
clamp03
2022-03-07T05:47:20Z
2022-03-13T11:50:20Z
227ff3d53784f655fa04dad059a98b3e8d291d61
51b90cc60b8528c77829ef18481b0f58db812776
Enable Fast Tail Call Optimization for ARM32. - Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
./src/coreclr/nativeaot/Runtime/SyncClean.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include "common.h" #include "CommonTypes.h" #include "CommonMacros.h" #include "daccess.h" #include "PalRedhawkCommon.h" #include "PalRedhawk.h" #include "rhassert.h" #include "slist.h" #include "holder.h" #include "SpinLock.h" #include "rhbinder.h" #include "CachedInterfaceDispatch.h" #include "SyncClean.hpp" void SyncClean::Terminate() { CleanUp(); } void SyncClean::CleanUp () { #ifdef FEATURE_CACHED_INTERFACE_DISPATCH // Update any interface dispatch caches that were unsafe to modify outside of this GC. ReclaimUnusedInterfaceDispatchCaches(); #endif }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include "common.h" #include "CommonTypes.h" #include "CommonMacros.h" #include "daccess.h" #include "PalRedhawkCommon.h" #include "PalRedhawk.h" #include "rhassert.h" #include "slist.h" #include "holder.h" #include "SpinLock.h" #include "rhbinder.h" #include "CachedInterfaceDispatch.h" #include "SyncClean.hpp" void SyncClean::Terminate() { CleanUp(); } void SyncClean::CleanUp () { #ifdef FEATURE_CACHED_INTERFACE_DISPATCH // Update any interface dispatch caches that were unsafe to modify outside of this GC. ReclaimUnusedInterfaceDispatchCaches(); #endif }
-1
dotnet/runtime
66,282
Enable Fast Tail Call Optimization for ARM32
- Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
clamp03
2022-03-07T05:47:20Z
2022-03-13T11:50:20Z
227ff3d53784f655fa04dad059a98b3e8d291d61
51b90cc60b8528c77829ef18481b0f58db812776
Enable Fast Tail Call Optimization for ARM32. - Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
./src/tests/profiler/native/rejitprofiler/sigparse.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #ifndef __PROFILER_SIGNATURE_PARSER__ #define __PROFILER_SIGNATURE_PARSER__ /* Sig ::= MethodDefSig | MethodRefSig | StandAloneMethodSig | FieldSig | PropertySig | LocalVarSig MethodDefSig ::= [[HASTHIS] [EXPLICITTHIS]] (DEFAULT|VARARG|GENERIC GenParamCount) ParamCount RetType Param* MethodRefSig ::= [[HASTHIS] [EXPLICITTHIS]] VARARG ParamCount RetType Param* [SENTINEL Param+] StandAloneMethodSig ::= [[HASTHIS] [EXPLICITTHIS]] (DEFAULT|VARARG|C|STDCALL|THISCALL|FASTCALL) ParamCount RetType Param* [SENTINEL Param+] FieldSig ::= FIELD CustomMod* Type PropertySig ::= PROPERTY [HASTHIS] ParamCount CustomMod* Type Param* LocalVarSig ::= LOCAL_SIG Count (TYPEDBYREF | ([CustomMod] [Constraint])* [BYREF] Type)+ ------------- CustomMod ::= ( CMOD_OPT | CMOD_REQD ) ( TypeDefEncoded | TypeRefEncoded ) Constraint ::= #define ELEMENT_TYPE_PINNED Param ::= CustomMod* ( TYPEDBYREF | [BYREF] Type ) RetType ::= CustomMod* ( VOID | TYPEDBYREF | [BYREF] Type ) Type ::= ( BOOLEAN | CHAR | I1 | U1 | U2 | U2 | I4 | U4 | I8 | U8 | R4 | R8 | I | U | | VALUETYPE TypeDefOrRefEncoded | CLASS TypeDefOrRefEncoded | STRING | OBJECT | PTR CustomMod* VOID | PTR CustomMod* Type | FNPTR MethodDefSig | FNPTR MethodRefSig | ARRAY Type ArrayShape | SZARRAY CustomMod* Type | GENERICINST (CLASS | VALUETYPE) TypeDefOrRefEncoded GenArgCount Type* | VAR Number | MVAR Number ArrayShape ::= Rank NumSizes Size* NumLoBounds LoBound* TypeDefOrRefEncoded ::= TypeDefEncoded | TypeRefEncoded TypeDefEncoded ::= 32-bit-3-part-encoding-for-typedefs-and-typerefs TypeRefEncoded ::= 32-bit-3-part-encoding-for-typedefs-and-typerefs ParamCount ::= 29-bit-encoded-integer GenArgCount ::= 29-bit-encoded-integer Count ::= 29-bit-encoded-integer Rank ::= 29-bit-encoded-integer NumSizes ::= 29-bit-encoded-integer Size ::= 29-bit-encoded-integer NumLoBounds ::= 29-bit-encoded-integer LoBounds ::= 29-bit-encoded-integer Number ::= 29-bit-encoded-integer */ #define ELEMENT_TYPE_END 0x00 //Marks end of a list #define ELEMENT_TYPE_VOID 0x01 #define ELEMENT_TYPE_BOOLEAN 0x02 #define ELEMENT_TYPE_CHAR 0x03 #define ELEMENT_TYPE_I1 0x04 #define ELEMENT_TYPE_U1 0x05 #define ELEMENT_TYPE_I2 0x06 #define ELEMENT_TYPE_U2 0x07 #define ELEMENT_TYPE_I4 0x08 #define ELEMENT_TYPE_U4 0x09 #define ELEMENT_TYPE_I8 0x0a #define ELEMENT_TYPE_U8 0x0b #define ELEMENT_TYPE_R4 0x0c #define ELEMENT_TYPE_R8 0x0d #define ELEMENT_TYPE_STRING 0x0e #define ELEMENT_TYPE_PTR 0x0f // Followed by type #define ELEMENT_TYPE_BYREF 0x10 // Followed by type #define ELEMENT_TYPE_VALUETYPE 0x11 // Followed by TypeDef or TypeRef token #define ELEMENT_TYPE_CLASS 0x12 // Followed by TypeDef or TypeRef token #define ELEMENT_TYPE_VAR 0x13 // Generic parameter in a generic type definition, represented as number #define ELEMENT_TYPE_ARRAY 0x14 // type rank boundsCount bound1 ... loCount lo1 ... #define ELEMENT_TYPE_GENERICINST 0x15 // Generic type instantiation. Followed by type type-arg-count type-1 ... type-n #define ELEMENT_TYPE_TYPEDBYREF 0x16 #define ELEMENT_TYPE_I 0x18 // System.IntPtr #define ELEMENT_TYPE_U 0x19 // System.UIntPtr #define ELEMENT_TYPE_FNPTR 0x1b // Followed by full method signature #define ELEMENT_TYPE_OBJECT 0x1c // System.Object #define ELEMENT_TYPE_SZARRAY 0x1d // Single-dim array with 0 lower bound #define ELEMENT_TYPE_MVAR 0x1e // Generic parameter in a generic method definition,represented as number #define ELEMENT_TYPE_CMOD_REQD 0x1f // Required modifier : followed by a TypeDef or TypeRef token #define ELEMENT_TYPE_CMOD_OPT 0x20 // Optional modifier : followed by a TypeDef or TypeRef token #define ELEMENT_TYPE_INTERNAL 0x21 // Implemented within the CLI #define ELEMENT_TYPE_MODIFIER 0x40 // Or'd with following element types #define ELEMENT_TYPE_SENTINEL 0x41 // Sentinel for vararg method signature #define ELEMENT_TYPE_PINNED 0x45 // Denotes a local variable that points at a pinned object #define SIG_METHOD_DEFAULT 0x00 // default calling convention #define SIG_METHOD_C 0x01 // C calling convention #define SIG_METHOD_STDCALL 0x02 // Stdcall calling convention #define SIG_METHOD_THISCALL 0x03 // thiscall calling convention #define SIG_METHOD_FASTCALL 0x04 // fastcall calling convention #define SIG_METHOD_VARARG 0x05 // vararg calling convention #define SIG_FIELD 0x06 // encodes a field #define SIG_LOCAL_SIG 0x07 // used for the .locals directive #define SIG_PROPERTY 0x08 // used to encode a property #define SIG_GENERIC 0x10 // used to indicate that the method has one or more generic parameters. #define SIG_HASTHIS 0x20 // used to encode the keyword instance in the calling convention #define SIG_EXPLICITTHIS 0x40 // used to encode the keyword explicit in the calling convention #define SIG_INDEX_TYPE_TYPEDEF 0x00 // ParseTypeDefOrRefEncoded returns this as the out index type for typedefs #define SIG_INDEX_TYPE_TYPEREF 0x01 // ParseTypeDefOrRefEncoded returns this as the out index type for typerefs #define SIG_INDEX_TYPE_TYPESPEC 0x02 // ParseTypeDefOrRefEncoded returns this as the out index type for typespecs typedef unsigned char sig_byte; typedef unsigned char sig_elem_type; typedef unsigned char sig_index_type; typedef unsigned int sig_index; typedef unsigned int sig_count; typedef unsigned int sig_mem_number; class SigParser { private: sig_byte *pbBase; sig_byte *pbCur; sig_byte *pbEnd; public: bool Parse(sig_byte *blob, sig_count len); private: bool ParseByte(sig_byte *pbOut); bool ParseNumber(sig_count *pOut); bool ParseTypeDefOrRefEncoded(sig_index_type *pOutIndexType, sig_index *pOutIndex); bool ParseMethod(sig_elem_type); bool ParseField(sig_elem_type); bool ParseProperty(sig_elem_type); bool ParseLocals(sig_elem_type); bool ParseLocal(); bool ParseOptionalCustomMods(); bool ParseOptionalCustomModsOrConstraint(); bool ParseCustomMod(); bool ParseRetType(); bool ParseType(); bool ParseParam(); bool ParseArrayShape(); protected: // subtype these methods to create your parser side-effects //---------------------------------------------------- // a method with given elem_type virtual void NotifyBeginMethod(sig_elem_type elem_type) = 0; virtual void NotifyEndMethod() = 0; // the method has a this pointer virtual void NotifyHasThis() = 0; // total parameters for the method virtual void NotifyParamCount(sig_count) = 0; // starting a return type virtual void NotifyBeginRetType() = 0; virtual void NotifyEndRetType() = 0; // starting a parameter virtual void NotifyBeginParam() = 0; virtual void NotifyEndParam() = 0; // sentinel indication the location of the "..." in the method signature virtual void NotifySentinal() = 0; // number of generic parameters in this method signature (if any) virtual void NotifyGenericParamCount(sig_count) = 0; //---------------------------------------------------- // a field with given elem_type virtual void NotifyBeginField(sig_elem_type elem_type) = 0; virtual void NotifyEndField() = 0; //---------------------------------------------------- // a block of locals with given elem_type (always just LOCAL_SIG for now) virtual void NotifyBeginLocals(sig_elem_type elem_type) = 0; virtual void NotifyEndLocals() = 0; // count of locals with a block virtual void NotifyLocalsCount(sig_count) = 0; // starting a new local within a local block virtual void NotifyBeginLocal() = 0; virtual void NotifyEndLocal() = 0; // the only constraint available to locals at the moment is ELEMENT_TYPE_PINNED virtual void NotifyConstraint(sig_elem_type elem_type) = 0; //---------------------------------------------------- // a property with given element type virtual void NotifyBeginProperty(sig_elem_type elem_type) = 0; virtual void NotifyEndProperty() = 0; //---------------------------------------------------- // starting array shape information for array types virtual void NotifyBeginArrayShape() = 0; virtual void NotifyEndArrayShape() = 0; // array rank (total number of dimensions) virtual void NotifyRank(sig_count) = 0; // number of dimensions with specified sizes followed by the size of each virtual void NotifyNumSizes(sig_count) = 0; virtual void NotifySize(sig_count) = 0; // BUG BUG lower bounds can be negative, how can this be encoded? // number of dimensions with specified lower bounds followed by lower bound of each virtual void NotifyNumLoBounds(sig_count) = 0; virtual void NotifyLoBound(sig_count) = 0; //---------------------------------------------------- // starting a normal type (occurs in many contexts such as param, field, local, etc) virtual void NotifyBeginType() = 0; virtual void NotifyEndType() = 0; virtual void NotifyTypedByref() = 0; // the type has the 'byref' modifier on it -- this normally proceeds the type definition in the context // the type is used, so for instance a parameter might have the byref modifier on it // so this happens before the BeginType in that context virtual void NotifyByref() = 0; // the type is "VOID" (this has limited uses, function returns and void pointer) virtual void NotifyVoid() = 0; // the type has the indicated custom modifiers (which can be optional or required) virtual void NotifyCustomMod(sig_elem_type cmod, sig_index_type indexType, sig_index index) = 0; // the type is a simple type, the elem_type defines it fully virtual void NotifyTypeSimple(sig_elem_type elem_type) = 0; // the type is specified by the given index of the given index type (normally a type index in the type metadata) // this callback is normally qualified by other ones such as NotifyTypeClass or NotifyTypeValueType virtual void NotifyTypeDefOrRef(sig_index_type indexType, int index) = 0; // the type is an instance of a generic // elem_type indicates value_type or class // indexType and index indicate the metadata for the type in question // number indicates the number of type specifications for the generic types that will follow virtual void NotifyTypeGenericInst(sig_elem_type elem_type, sig_index_type indexType, sig_index index, sig_mem_number number) = 0; // the type is the type of the nth generic type parameter for the class virtual void NotifyTypeGenericTypeVariable(sig_mem_number number) = 0; // the type is the type of the nth generic type parameter for the member virtual void NotifyTypeGenericMemberVariable(sig_mem_number number) = 0; // the type will be a value type virtual void NotifyTypeValueType() = 0; // the type will be a class virtual void NotifyTypeClass() = 0; // the type is a pointer to a type (nested type notifications follow) virtual void NotifyTypePointer() = 0; // the type is a function pointer, followed by the type of the function virtual void NotifyTypeFunctionPointer() = 0; // the type is an array, this is followed by the array shape, see above, as well as modifiers and element type virtual void NotifyTypeArray() = 0; // the type is a simple zero-based array, this has no shape but does have custom modifiers and element type virtual void NotifyTypeSzArray() = 0; }; //---------------------------------------------------- #endif // __PROFILER_SIGNATURE_PARSER__
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #ifndef __PROFILER_SIGNATURE_PARSER__ #define __PROFILER_SIGNATURE_PARSER__ /* Sig ::= MethodDefSig | MethodRefSig | StandAloneMethodSig | FieldSig | PropertySig | LocalVarSig MethodDefSig ::= [[HASTHIS] [EXPLICITTHIS]] (DEFAULT|VARARG|GENERIC GenParamCount) ParamCount RetType Param* MethodRefSig ::= [[HASTHIS] [EXPLICITTHIS]] VARARG ParamCount RetType Param* [SENTINEL Param+] StandAloneMethodSig ::= [[HASTHIS] [EXPLICITTHIS]] (DEFAULT|VARARG|C|STDCALL|THISCALL|FASTCALL) ParamCount RetType Param* [SENTINEL Param+] FieldSig ::= FIELD CustomMod* Type PropertySig ::= PROPERTY [HASTHIS] ParamCount CustomMod* Type Param* LocalVarSig ::= LOCAL_SIG Count (TYPEDBYREF | ([CustomMod] [Constraint])* [BYREF] Type)+ ------------- CustomMod ::= ( CMOD_OPT | CMOD_REQD ) ( TypeDefEncoded | TypeRefEncoded ) Constraint ::= #define ELEMENT_TYPE_PINNED Param ::= CustomMod* ( TYPEDBYREF | [BYREF] Type ) RetType ::= CustomMod* ( VOID | TYPEDBYREF | [BYREF] Type ) Type ::= ( BOOLEAN | CHAR | I1 | U1 | U2 | U2 | I4 | U4 | I8 | U8 | R4 | R8 | I | U | | VALUETYPE TypeDefOrRefEncoded | CLASS TypeDefOrRefEncoded | STRING | OBJECT | PTR CustomMod* VOID | PTR CustomMod* Type | FNPTR MethodDefSig | FNPTR MethodRefSig | ARRAY Type ArrayShape | SZARRAY CustomMod* Type | GENERICINST (CLASS | VALUETYPE) TypeDefOrRefEncoded GenArgCount Type* | VAR Number | MVAR Number ArrayShape ::= Rank NumSizes Size* NumLoBounds LoBound* TypeDefOrRefEncoded ::= TypeDefEncoded | TypeRefEncoded TypeDefEncoded ::= 32-bit-3-part-encoding-for-typedefs-and-typerefs TypeRefEncoded ::= 32-bit-3-part-encoding-for-typedefs-and-typerefs ParamCount ::= 29-bit-encoded-integer GenArgCount ::= 29-bit-encoded-integer Count ::= 29-bit-encoded-integer Rank ::= 29-bit-encoded-integer NumSizes ::= 29-bit-encoded-integer Size ::= 29-bit-encoded-integer NumLoBounds ::= 29-bit-encoded-integer LoBounds ::= 29-bit-encoded-integer Number ::= 29-bit-encoded-integer */ #define ELEMENT_TYPE_END 0x00 //Marks end of a list #define ELEMENT_TYPE_VOID 0x01 #define ELEMENT_TYPE_BOOLEAN 0x02 #define ELEMENT_TYPE_CHAR 0x03 #define ELEMENT_TYPE_I1 0x04 #define ELEMENT_TYPE_U1 0x05 #define ELEMENT_TYPE_I2 0x06 #define ELEMENT_TYPE_U2 0x07 #define ELEMENT_TYPE_I4 0x08 #define ELEMENT_TYPE_U4 0x09 #define ELEMENT_TYPE_I8 0x0a #define ELEMENT_TYPE_U8 0x0b #define ELEMENT_TYPE_R4 0x0c #define ELEMENT_TYPE_R8 0x0d #define ELEMENT_TYPE_STRING 0x0e #define ELEMENT_TYPE_PTR 0x0f // Followed by type #define ELEMENT_TYPE_BYREF 0x10 // Followed by type #define ELEMENT_TYPE_VALUETYPE 0x11 // Followed by TypeDef or TypeRef token #define ELEMENT_TYPE_CLASS 0x12 // Followed by TypeDef or TypeRef token #define ELEMENT_TYPE_VAR 0x13 // Generic parameter in a generic type definition, represented as number #define ELEMENT_TYPE_ARRAY 0x14 // type rank boundsCount bound1 ... loCount lo1 ... #define ELEMENT_TYPE_GENERICINST 0x15 // Generic type instantiation. Followed by type type-arg-count type-1 ... type-n #define ELEMENT_TYPE_TYPEDBYREF 0x16 #define ELEMENT_TYPE_I 0x18 // System.IntPtr #define ELEMENT_TYPE_U 0x19 // System.UIntPtr #define ELEMENT_TYPE_FNPTR 0x1b // Followed by full method signature #define ELEMENT_TYPE_OBJECT 0x1c // System.Object #define ELEMENT_TYPE_SZARRAY 0x1d // Single-dim array with 0 lower bound #define ELEMENT_TYPE_MVAR 0x1e // Generic parameter in a generic method definition,represented as number #define ELEMENT_TYPE_CMOD_REQD 0x1f // Required modifier : followed by a TypeDef or TypeRef token #define ELEMENT_TYPE_CMOD_OPT 0x20 // Optional modifier : followed by a TypeDef or TypeRef token #define ELEMENT_TYPE_INTERNAL 0x21 // Implemented within the CLI #define ELEMENT_TYPE_MODIFIER 0x40 // Or'd with following element types #define ELEMENT_TYPE_SENTINEL 0x41 // Sentinel for vararg method signature #define ELEMENT_TYPE_PINNED 0x45 // Denotes a local variable that points at a pinned object #define SIG_METHOD_DEFAULT 0x00 // default calling convention #define SIG_METHOD_C 0x01 // C calling convention #define SIG_METHOD_STDCALL 0x02 // Stdcall calling convention #define SIG_METHOD_THISCALL 0x03 // thiscall calling convention #define SIG_METHOD_FASTCALL 0x04 // fastcall calling convention #define SIG_METHOD_VARARG 0x05 // vararg calling convention #define SIG_FIELD 0x06 // encodes a field #define SIG_LOCAL_SIG 0x07 // used for the .locals directive #define SIG_PROPERTY 0x08 // used to encode a property #define SIG_GENERIC 0x10 // used to indicate that the method has one or more generic parameters. #define SIG_HASTHIS 0x20 // used to encode the keyword instance in the calling convention #define SIG_EXPLICITTHIS 0x40 // used to encode the keyword explicit in the calling convention #define SIG_INDEX_TYPE_TYPEDEF 0x00 // ParseTypeDefOrRefEncoded returns this as the out index type for typedefs #define SIG_INDEX_TYPE_TYPEREF 0x01 // ParseTypeDefOrRefEncoded returns this as the out index type for typerefs #define SIG_INDEX_TYPE_TYPESPEC 0x02 // ParseTypeDefOrRefEncoded returns this as the out index type for typespecs typedef unsigned char sig_byte; typedef unsigned char sig_elem_type; typedef unsigned char sig_index_type; typedef unsigned int sig_index; typedef unsigned int sig_count; typedef unsigned int sig_mem_number; class SigParser { private: sig_byte *pbBase; sig_byte *pbCur; sig_byte *pbEnd; public: bool Parse(sig_byte *blob, sig_count len); private: bool ParseByte(sig_byte *pbOut); bool ParseNumber(sig_count *pOut); bool ParseTypeDefOrRefEncoded(sig_index_type *pOutIndexType, sig_index *pOutIndex); bool ParseMethod(sig_elem_type); bool ParseField(sig_elem_type); bool ParseProperty(sig_elem_type); bool ParseLocals(sig_elem_type); bool ParseLocal(); bool ParseOptionalCustomMods(); bool ParseOptionalCustomModsOrConstraint(); bool ParseCustomMod(); bool ParseRetType(); bool ParseType(); bool ParseParam(); bool ParseArrayShape(); protected: // subtype these methods to create your parser side-effects //---------------------------------------------------- // a method with given elem_type virtual void NotifyBeginMethod(sig_elem_type elem_type) = 0; virtual void NotifyEndMethod() = 0; // the method has a this pointer virtual void NotifyHasThis() = 0; // total parameters for the method virtual void NotifyParamCount(sig_count) = 0; // starting a return type virtual void NotifyBeginRetType() = 0; virtual void NotifyEndRetType() = 0; // starting a parameter virtual void NotifyBeginParam() = 0; virtual void NotifyEndParam() = 0; // sentinel indication the location of the "..." in the method signature virtual void NotifySentinal() = 0; // number of generic parameters in this method signature (if any) virtual void NotifyGenericParamCount(sig_count) = 0; //---------------------------------------------------- // a field with given elem_type virtual void NotifyBeginField(sig_elem_type elem_type) = 0; virtual void NotifyEndField() = 0; //---------------------------------------------------- // a block of locals with given elem_type (always just LOCAL_SIG for now) virtual void NotifyBeginLocals(sig_elem_type elem_type) = 0; virtual void NotifyEndLocals() = 0; // count of locals with a block virtual void NotifyLocalsCount(sig_count) = 0; // starting a new local within a local block virtual void NotifyBeginLocal() = 0; virtual void NotifyEndLocal() = 0; // the only constraint available to locals at the moment is ELEMENT_TYPE_PINNED virtual void NotifyConstraint(sig_elem_type elem_type) = 0; //---------------------------------------------------- // a property with given element type virtual void NotifyBeginProperty(sig_elem_type elem_type) = 0; virtual void NotifyEndProperty() = 0; //---------------------------------------------------- // starting array shape information for array types virtual void NotifyBeginArrayShape() = 0; virtual void NotifyEndArrayShape() = 0; // array rank (total number of dimensions) virtual void NotifyRank(sig_count) = 0; // number of dimensions with specified sizes followed by the size of each virtual void NotifyNumSizes(sig_count) = 0; virtual void NotifySize(sig_count) = 0; // BUG BUG lower bounds can be negative, how can this be encoded? // number of dimensions with specified lower bounds followed by lower bound of each virtual void NotifyNumLoBounds(sig_count) = 0; virtual void NotifyLoBound(sig_count) = 0; //---------------------------------------------------- // starting a normal type (occurs in many contexts such as param, field, local, etc) virtual void NotifyBeginType() = 0; virtual void NotifyEndType() = 0; virtual void NotifyTypedByref() = 0; // the type has the 'byref' modifier on it -- this normally proceeds the type definition in the context // the type is used, so for instance a parameter might have the byref modifier on it // so this happens before the BeginType in that context virtual void NotifyByref() = 0; // the type is "VOID" (this has limited uses, function returns and void pointer) virtual void NotifyVoid() = 0; // the type has the indicated custom modifiers (which can be optional or required) virtual void NotifyCustomMod(sig_elem_type cmod, sig_index_type indexType, sig_index index) = 0; // the type is a simple type, the elem_type defines it fully virtual void NotifyTypeSimple(sig_elem_type elem_type) = 0; // the type is specified by the given index of the given index type (normally a type index in the type metadata) // this callback is normally qualified by other ones such as NotifyTypeClass or NotifyTypeValueType virtual void NotifyTypeDefOrRef(sig_index_type indexType, int index) = 0; // the type is an instance of a generic // elem_type indicates value_type or class // indexType and index indicate the metadata for the type in question // number indicates the number of type specifications for the generic types that will follow virtual void NotifyTypeGenericInst(sig_elem_type elem_type, sig_index_type indexType, sig_index index, sig_mem_number number) = 0; // the type is the type of the nth generic type parameter for the class virtual void NotifyTypeGenericTypeVariable(sig_mem_number number) = 0; // the type is the type of the nth generic type parameter for the member virtual void NotifyTypeGenericMemberVariable(sig_mem_number number) = 0; // the type will be a value type virtual void NotifyTypeValueType() = 0; // the type will be a class virtual void NotifyTypeClass() = 0; // the type is a pointer to a type (nested type notifications follow) virtual void NotifyTypePointer() = 0; // the type is a function pointer, followed by the type of the function virtual void NotifyTypeFunctionPointer() = 0; // the type is an array, this is followed by the array shape, see above, as well as modifiers and element type virtual void NotifyTypeArray() = 0; // the type is a simple zero-based array, this has no shape but does have custom modifiers and element type virtual void NotifyTypeSzArray() = 0; }; //---------------------------------------------------- #endif // __PROFILER_SIGNATURE_PARSER__
-1
dotnet/runtime
66,282
Enable Fast Tail Call Optimization for ARM32
- Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
clamp03
2022-03-07T05:47:20Z
2022-03-13T11:50:20Z
227ff3d53784f655fa04dad059a98b3e8d291d61
51b90cc60b8528c77829ef18481b0f58db812776
Enable Fast Tail Call Optimization for ARM32. - Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
./src/coreclr/pal/tests/palsuite/c_runtime/printf/test6/test6.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*============================================================================ ** ** Source: test6.c ** ** Purpose: Test #6 for the printf function. Tests the char specifier (%c). ** ** **==========================================================================*/ #include <palsuite.h> #include "../printf.h" PALTEST(c_runtime_printf_test6_paltest_printf_test6, "c_runtime/printf/test6/paltest_printf_test6") { WCHAR wc = (WCHAR) 'c'; if (PAL_Initialize(argc, argv)) { return FAIL; } DoCharTest("foo %c", 'b', "foo b"); DoCharTest("foo %hc", 'b', "foo b"); DoWCharTest("foo %lc", wc, "foo c"); DoCharTest("foo %Lc", 'b', "foo b"); DoCharTest("foo %I64c", 'b', "foo b"); DoCharTest("foo %5c", 'b', "foo b"); DoCharTest("foo %.0c", 'b', "foo b"); DoCharTest("foo %-5c", 'b', "foo b "); DoCharTest("foo %05c", 'b', "foo 0000b"); DoCharTest("foo % c", 'b', "foo b"); DoCharTest("foo %#c", 'b', "foo b"); PAL_Terminate(); return PASS; }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*============================================================================ ** ** Source: test6.c ** ** Purpose: Test #6 for the printf function. Tests the char specifier (%c). ** ** **==========================================================================*/ #include <palsuite.h> #include "../printf.h" PALTEST(c_runtime_printf_test6_paltest_printf_test6, "c_runtime/printf/test6/paltest_printf_test6") { WCHAR wc = (WCHAR) 'c'; if (PAL_Initialize(argc, argv)) { return FAIL; } DoCharTest("foo %c", 'b', "foo b"); DoCharTest("foo %hc", 'b', "foo b"); DoWCharTest("foo %lc", wc, "foo c"); DoCharTest("foo %Lc", 'b', "foo b"); DoCharTest("foo %I64c", 'b', "foo b"); DoCharTest("foo %5c", 'b', "foo b"); DoCharTest("foo %.0c", 'b', "foo b"); DoCharTest("foo %-5c", 'b', "foo b "); DoCharTest("foo %05c", 'b', "foo 0000b"); DoCharTest("foo % c", 'b', "foo b"); DoCharTest("foo %#c", 'b', "foo b"); PAL_Terminate(); return PASS; }
-1
dotnet/runtime
66,282
Enable Fast Tail Call Optimization for ARM32
- Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
clamp03
2022-03-07T05:47:20Z
2022-03-13T11:50:20Z
227ff3d53784f655fa04dad059a98b3e8d291d61
51b90cc60b8528c77829ef18481b0f58db812776
Enable Fast Tail Call Optimization for ARM32. - Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
./src/coreclr/nativeaot/Runtime/CachedInterfaceDispatch.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // ==--== // // Shared (non-architecture specific) portions of a mechanism to perform interface dispatch using an alternate // mechanism to VSD that does not require runtime generation of code. // // ============================================================================ #ifdef FEATURE_CACHED_INTERFACE_DISPATCH bool InitializeInterfaceDispatch(); void ReclaimUnusedInterfaceDispatchCaches(); // Interface dispatch caches contain an array of these entries. An instance of a cache is paired with a stub // that implicitly knows how many entries are contained. These entries must be aligned to twice the alignment // of a pointer due to the synchonization mechanism used to update them at runtime. struct InterfaceDispatchCacheEntry { MethodTable * m_pInstanceType; // Potential type of the object instance being dispatched on void * m_pTargetCode; // Method to dispatch to if the actual instance type matches the above }; // The interface dispatch cache itself. As well as the entries we include the cache size (since logic such as // cache miss processing needs to determine this value in a synchronized manner, so it can't be contained in // the owning interface dispatch indirection cell) and a list entry used to link the caches in one of a couple // of lists related to cache reclamation. #pragma warning(push) #pragma warning(disable:4200) // nonstandard extension used: zero-sized array in struct/union struct InterfaceDispatchCell; struct InterfaceDispatchCache { InterfaceDispatchCacheHeader m_cacheHeader; union { InterfaceDispatchCache * m_pNextFree; // next in free list #ifndef HOST_AMD64 InterfaceDispatchCell * m_pCell; // pointer back to interface dispatch cell - not used for AMD64 #endif }; uint32_t m_cEntries; InterfaceDispatchCacheEntry m_rgEntries[]; }; #pragma warning(pop) #endif // FEATURE_CACHED_INTERFACE_DISPATCH
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // ==--== // // Shared (non-architecture specific) portions of a mechanism to perform interface dispatch using an alternate // mechanism to VSD that does not require runtime generation of code. // // ============================================================================ #ifdef FEATURE_CACHED_INTERFACE_DISPATCH bool InitializeInterfaceDispatch(); void ReclaimUnusedInterfaceDispatchCaches(); // Interface dispatch caches contain an array of these entries. An instance of a cache is paired with a stub // that implicitly knows how many entries are contained. These entries must be aligned to twice the alignment // of a pointer due to the synchonization mechanism used to update them at runtime. struct InterfaceDispatchCacheEntry { MethodTable * m_pInstanceType; // Potential type of the object instance being dispatched on void * m_pTargetCode; // Method to dispatch to if the actual instance type matches the above }; // The interface dispatch cache itself. As well as the entries we include the cache size (since logic such as // cache miss processing needs to determine this value in a synchronized manner, so it can't be contained in // the owning interface dispatch indirection cell) and a list entry used to link the caches in one of a couple // of lists related to cache reclamation. #pragma warning(push) #pragma warning(disable:4200) // nonstandard extension used: zero-sized array in struct/union struct InterfaceDispatchCell; struct InterfaceDispatchCache { InterfaceDispatchCacheHeader m_cacheHeader; union { InterfaceDispatchCache * m_pNextFree; // next in free list #ifndef HOST_AMD64 InterfaceDispatchCell * m_pCell; // pointer back to interface dispatch cell - not used for AMD64 #endif }; uint32_t m_cEntries; InterfaceDispatchCacheEntry m_rgEntries[]; }; #pragma warning(pop) #endif // FEATURE_CACHED_INTERFACE_DISPATCH
-1
dotnet/runtime
66,282
Enable Fast Tail Call Optimization for ARM32
- Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
clamp03
2022-03-07T05:47:20Z
2022-03-13T11:50:20Z
227ff3d53784f655fa04dad059a98b3e8d291d61
51b90cc60b8528c77829ef18481b0f58db812776
Enable Fast Tail Call Optimization for ARM32. - Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
./src/coreclr/md/inc/assemblymdinternaldisp.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. //***************************************************************************** // AssemblyMDInternalDispenser.h // // // Contains utility code for MD directory // //***************************************************************************** #ifndef __AssemblyMDInternalDispenser__h__ #define __AssemblyMDInternalDispenser__h__ #include "../runtime/mdinternalro.h" #endif // __AssemblyMDInternalDispenser__h__
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. //***************************************************************************** // AssemblyMDInternalDispenser.h // // // Contains utility code for MD directory // //***************************************************************************** #ifndef __AssemblyMDInternalDispenser__h__ #define __AssemblyMDInternalDispenser__h__ #include "../runtime/mdinternalro.h" #endif // __AssemblyMDInternalDispenser__h__
-1
dotnet/runtime
66,282
Enable Fast Tail Call Optimization for ARM32
- Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
clamp03
2022-03-07T05:47:20Z
2022-03-13T11:50:20Z
227ff3d53784f655fa04dad059a98b3e8d291d61
51b90cc60b8528c77829ef18481b0f58db812776
Enable Fast Tail Call Optimization for ARM32. - Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
./src/tests/profiler/native/getappdomainstaticaddress/getappdomainstaticaddress.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #pragma once #include "../profiler.h" #include <atomic> #include <memory> #include <set> #include <mutex> #include <vector> #include <map> #include <string> #include <thread> #include <chrono> #include <functional> #include "cor.h" #include "corprof.h" typedef HRESULT (*GetDispenserFunc) (const CLSID &pClsid, const IID &pIid, void **ppv); class GetAppDomainStaticAddress : public Profiler { public: GetAppDomainStaticAddress(); virtual ~GetAppDomainStaticAddress(); static GUID GetClsid(); virtual HRESULT STDMETHODCALLTYPE Initialize(IUnknown* pICorProfilerInfoUnk) override; virtual HRESULT STDMETHODCALLTYPE Shutdown() override; virtual HRESULT STDMETHODCALLTYPE ModuleLoadFinished(ModuleID moduleId, HRESULT hrStatus) override; virtual HRESULT STDMETHODCALLTYPE ModuleUnloadStarted(ModuleID moduleId) override; virtual HRESULT STDMETHODCALLTYPE ClassLoadFinished(ClassID classId, HRESULT hrStatus) override; virtual HRESULT STDMETHODCALLTYPE ClassUnloadStarted(ClassID classId) override; virtual HRESULT STDMETHODCALLTYPE JITCompilationFinished(FunctionID functionId, HRESULT hrStatus, BOOL fIsSafeToBlock) override; virtual HRESULT STDMETHODCALLTYPE GarbageCollectionFinished() override; private: std::atomic<int> refCount; std::atomic<ULONG32> failures; std::atomic<ULONG32> successes; std::atomic<ULONG32> collectibleCount; std::atomic<ULONG32> nonCollectibleCount; std::atomic<int> jitEventCount; std::thread gcTriggerThread; AutoEvent gcWaitEvent; typedef std::map<ClassID, AppDomainID>ClassAppDomainMap; ClassAppDomainMap classADMap; std::mutex classADMapLock; bool IsRuntimeExecutingManagedCode(); std::vector<ClassID> GetGenericTypeArgs(ClassID classId); };
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #pragma once #include "../profiler.h" #include <atomic> #include <memory> #include <set> #include <mutex> #include <vector> #include <map> #include <string> #include <thread> #include <chrono> #include <functional> #include "cor.h" #include "corprof.h" typedef HRESULT (*GetDispenserFunc) (const CLSID &pClsid, const IID &pIid, void **ppv); class GetAppDomainStaticAddress : public Profiler { public: GetAppDomainStaticAddress(); virtual ~GetAppDomainStaticAddress(); static GUID GetClsid(); virtual HRESULT STDMETHODCALLTYPE Initialize(IUnknown* pICorProfilerInfoUnk) override; virtual HRESULT STDMETHODCALLTYPE Shutdown() override; virtual HRESULT STDMETHODCALLTYPE ModuleLoadFinished(ModuleID moduleId, HRESULT hrStatus) override; virtual HRESULT STDMETHODCALLTYPE ModuleUnloadStarted(ModuleID moduleId) override; virtual HRESULT STDMETHODCALLTYPE ClassLoadFinished(ClassID classId, HRESULT hrStatus) override; virtual HRESULT STDMETHODCALLTYPE ClassUnloadStarted(ClassID classId) override; virtual HRESULT STDMETHODCALLTYPE JITCompilationFinished(FunctionID functionId, HRESULT hrStatus, BOOL fIsSafeToBlock) override; virtual HRESULT STDMETHODCALLTYPE GarbageCollectionFinished() override; private: std::atomic<int> refCount; std::atomic<ULONG32> failures; std::atomic<ULONG32> successes; std::atomic<ULONG32> collectibleCount; std::atomic<ULONG32> nonCollectibleCount; std::atomic<int> jitEventCount; std::thread gcTriggerThread; AutoEvent gcWaitEvent; typedef std::map<ClassID, AppDomainID>ClassAppDomainMap; ClassAppDomainMap classADMap; std::mutex classADMapLock; bool IsRuntimeExecutingManagedCode(); std::vector<ClassID> GetGenericTypeArgs(ClassID classId); };
-1
dotnet/runtime
66,282
Enable Fast Tail Call Optimization for ARM32
- Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
clamp03
2022-03-07T05:47:20Z
2022-03-13T11:50:20Z
227ff3d53784f655fa04dad059a98b3e8d291d61
51b90cc60b8528c77829ef18481b0f58db812776
Enable Fast Tail Call Optimization for ARM32. - Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
./src/coreclr/jit/simdashwintrinsic.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #ifndef _SIMD_AS_HWINTRINSIC_H_ #define _SIMD_AS_HWINTRINSIC_H_ enum class SimdAsHWIntrinsicClassId { Unknown, Vector2, Vector3, Vector4, VectorT128, VectorT256, }; enum class SimdAsHWIntrinsicFlag : unsigned int { None = 0, // Indicates compFloatingPointUsed does not need to be set. NoFloatingPointUsed = 0x1, // Indicates the intrinsic is for an instance method. InstanceMethod = 0x02, // Indicates the operands should be swapped in importation. NeedsOperandsSwapped = 0x04, // Base type should come from the this argument BaseTypeFromThisArg = 0x08, // For SIMDVectorHandle, keep the base type from the result type KeepBaseTypeFromRet = 0x10, }; inline SimdAsHWIntrinsicFlag operator~(SimdAsHWIntrinsicFlag value) { return static_cast<SimdAsHWIntrinsicFlag>(~static_cast<unsigned int>(value)); } inline SimdAsHWIntrinsicFlag operator|(SimdAsHWIntrinsicFlag lhs, SimdAsHWIntrinsicFlag rhs) { return static_cast<SimdAsHWIntrinsicFlag>(static_cast<unsigned int>(lhs) | static_cast<unsigned int>(rhs)); } inline SimdAsHWIntrinsicFlag operator&(SimdAsHWIntrinsicFlag lhs, SimdAsHWIntrinsicFlag rhs) { return static_cast<SimdAsHWIntrinsicFlag>(static_cast<unsigned int>(lhs) & static_cast<unsigned int>(rhs)); } inline SimdAsHWIntrinsicFlag operator^(SimdAsHWIntrinsicFlag lhs, SimdAsHWIntrinsicFlag rhs) { return static_cast<SimdAsHWIntrinsicFlag>(static_cast<unsigned int>(lhs) ^ static_cast<unsigned int>(rhs)); } struct SimdAsHWIntrinsicInfo { NamedIntrinsic id; const char* name; SimdAsHWIntrinsicClassId classId; int numArgs; NamedIntrinsic hwIntrinsic[10]; SimdAsHWIntrinsicFlag flags; static const SimdAsHWIntrinsicInfo& lookup(NamedIntrinsic id); static NamedIntrinsic lookupId(CORINFO_SIG_INFO* sig, const char* className, const char* methodName, const char* enclosingClassName, int sizeOfVectorT); static SimdAsHWIntrinsicClassId lookupClassId(const char* className, const char* enclosingClassName, int sizeOfVectorT); // Member lookup static NamedIntrinsic lookupId(NamedIntrinsic id) { return lookup(id).id; } static const char* lookupName(NamedIntrinsic id) { return lookup(id).name; } static SimdAsHWIntrinsicClassId lookupClassId(NamedIntrinsic id) { return lookup(id).classId; } static int lookupNumArgs(NamedIntrinsic id) { return lookup(id).numArgs; } static NamedIntrinsic lookupHWIntrinsic(NamedIntrinsic id, var_types type) { if ((type < TYP_BYTE) || (type > TYP_DOUBLE)) { assert(!"Unexpected type"); return NI_Illegal; } return lookup(id).hwIntrinsic[type - TYP_BYTE]; } static SimdAsHWIntrinsicFlag lookupFlags(NamedIntrinsic id) { return lookup(id).flags; } // Flags lookup static bool IsFloatingPointUsed(NamedIntrinsic id) { SimdAsHWIntrinsicFlag flags = lookupFlags(id); return (flags & SimdAsHWIntrinsicFlag::NoFloatingPointUsed) == SimdAsHWIntrinsicFlag::None; } static bool IsInstanceMethod(NamedIntrinsic id) { SimdAsHWIntrinsicFlag flags = lookupFlags(id); return (flags & SimdAsHWIntrinsicFlag::InstanceMethod) == SimdAsHWIntrinsicFlag::InstanceMethod; } static bool NeedsOperandsSwapped(NamedIntrinsic id) { SimdAsHWIntrinsicFlag flags = lookupFlags(id); return (flags & SimdAsHWIntrinsicFlag::NeedsOperandsSwapped) == SimdAsHWIntrinsicFlag::NeedsOperandsSwapped; } static bool BaseTypeFromThisArg(NamedIntrinsic id) { SimdAsHWIntrinsicFlag flags = lookupFlags(id); return (flags & SimdAsHWIntrinsicFlag::BaseTypeFromThisArg) == SimdAsHWIntrinsicFlag::BaseTypeFromThisArg; } static bool KeepBaseTypeFromRet(NamedIntrinsic id) { SimdAsHWIntrinsicFlag flags = lookupFlags(id); return (flags & SimdAsHWIntrinsicFlag::KeepBaseTypeFromRet) == SimdAsHWIntrinsicFlag::KeepBaseTypeFromRet; } }; #endif // _SIMD_AS_HWINTRINSIC_H_
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #ifndef _SIMD_AS_HWINTRINSIC_H_ #define _SIMD_AS_HWINTRINSIC_H_ enum class SimdAsHWIntrinsicClassId { Unknown, Vector2, Vector3, Vector4, VectorT128, VectorT256, }; enum class SimdAsHWIntrinsicFlag : unsigned int { None = 0, // Indicates compFloatingPointUsed does not need to be set. NoFloatingPointUsed = 0x1, // Indicates the intrinsic is for an instance method. InstanceMethod = 0x02, // Indicates the operands should be swapped in importation. NeedsOperandsSwapped = 0x04, // Base type should come from the this argument BaseTypeFromThisArg = 0x08, // For SIMDVectorHandle, keep the base type from the result type KeepBaseTypeFromRet = 0x10, }; inline SimdAsHWIntrinsicFlag operator~(SimdAsHWIntrinsicFlag value) { return static_cast<SimdAsHWIntrinsicFlag>(~static_cast<unsigned int>(value)); } inline SimdAsHWIntrinsicFlag operator|(SimdAsHWIntrinsicFlag lhs, SimdAsHWIntrinsicFlag rhs) { return static_cast<SimdAsHWIntrinsicFlag>(static_cast<unsigned int>(lhs) | static_cast<unsigned int>(rhs)); } inline SimdAsHWIntrinsicFlag operator&(SimdAsHWIntrinsicFlag lhs, SimdAsHWIntrinsicFlag rhs) { return static_cast<SimdAsHWIntrinsicFlag>(static_cast<unsigned int>(lhs) & static_cast<unsigned int>(rhs)); } inline SimdAsHWIntrinsicFlag operator^(SimdAsHWIntrinsicFlag lhs, SimdAsHWIntrinsicFlag rhs) { return static_cast<SimdAsHWIntrinsicFlag>(static_cast<unsigned int>(lhs) ^ static_cast<unsigned int>(rhs)); } struct SimdAsHWIntrinsicInfo { NamedIntrinsic id; const char* name; SimdAsHWIntrinsicClassId classId; int numArgs; NamedIntrinsic hwIntrinsic[10]; SimdAsHWIntrinsicFlag flags; static const SimdAsHWIntrinsicInfo& lookup(NamedIntrinsic id); static NamedIntrinsic lookupId(CORINFO_SIG_INFO* sig, const char* className, const char* methodName, const char* enclosingClassName, int sizeOfVectorT); static SimdAsHWIntrinsicClassId lookupClassId(const char* className, const char* enclosingClassName, int sizeOfVectorT); // Member lookup static NamedIntrinsic lookupId(NamedIntrinsic id) { return lookup(id).id; } static const char* lookupName(NamedIntrinsic id) { return lookup(id).name; } static SimdAsHWIntrinsicClassId lookupClassId(NamedIntrinsic id) { return lookup(id).classId; } static int lookupNumArgs(NamedIntrinsic id) { return lookup(id).numArgs; } static NamedIntrinsic lookupHWIntrinsic(NamedIntrinsic id, var_types type) { if ((type < TYP_BYTE) || (type > TYP_DOUBLE)) { assert(!"Unexpected type"); return NI_Illegal; } return lookup(id).hwIntrinsic[type - TYP_BYTE]; } static SimdAsHWIntrinsicFlag lookupFlags(NamedIntrinsic id) { return lookup(id).flags; } // Flags lookup static bool IsFloatingPointUsed(NamedIntrinsic id) { SimdAsHWIntrinsicFlag flags = lookupFlags(id); return (flags & SimdAsHWIntrinsicFlag::NoFloatingPointUsed) == SimdAsHWIntrinsicFlag::None; } static bool IsInstanceMethod(NamedIntrinsic id) { SimdAsHWIntrinsicFlag flags = lookupFlags(id); return (flags & SimdAsHWIntrinsicFlag::InstanceMethod) == SimdAsHWIntrinsicFlag::InstanceMethod; } static bool NeedsOperandsSwapped(NamedIntrinsic id) { SimdAsHWIntrinsicFlag flags = lookupFlags(id); return (flags & SimdAsHWIntrinsicFlag::NeedsOperandsSwapped) == SimdAsHWIntrinsicFlag::NeedsOperandsSwapped; } static bool BaseTypeFromThisArg(NamedIntrinsic id) { SimdAsHWIntrinsicFlag flags = lookupFlags(id); return (flags & SimdAsHWIntrinsicFlag::BaseTypeFromThisArg) == SimdAsHWIntrinsicFlag::BaseTypeFromThisArg; } static bool KeepBaseTypeFromRet(NamedIntrinsic id) { SimdAsHWIntrinsicFlag flags = lookupFlags(id); return (flags & SimdAsHWIntrinsicFlag::KeepBaseTypeFromRet) == SimdAsHWIntrinsicFlag::KeepBaseTypeFromRet; } }; #endif // _SIMD_AS_HWINTRINSIC_H_
-1
dotnet/runtime
66,282
Enable Fast Tail Call Optimization for ARM32
- Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
clamp03
2022-03-07T05:47:20Z
2022-03-13T11:50:20Z
227ff3d53784f655fa04dad059a98b3e8d291d61
51b90cc60b8528c77829ef18481b0f58db812776
Enable Fast Tail Call Optimization for ARM32. - Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
./src/coreclr/pal/tests/palsuite/threading/ResetEvent/test2/test2.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*============================================================================= ** ** Source: test2.c ** ** Dependencies: PAL_Initialize ** PAL_Terminate ** CreateEvent ** CloseHandle ** WaitForSingleObject ** ** Purpose: Test to ensure proper operation of the ResetEvent() ** API by calling it on an event handle that's already ** unsignalled. ** ** **===========================================================================*/ #include <palsuite.h> PALTEST(threading_ResetEvent_test2_paltest_resetevent_test2, "threading/ResetEvent/test2/paltest_resetevent_test2") { /* local variables */ DWORD dwRet = 0; HANDLE hEvent = NULL; LPSECURITY_ATTRIBUTES lpEventAttributes = NULL; BOOL bManualReset = TRUE; BOOL bInitialState = FALSE; /* PAL initialization */ if( (PAL_Initialize(argc, argv)) != 0 ) { return( FAIL ); } /* create an unsignalled event which we can use with ResetEvent */ hEvent = CreateEvent( lpEventAttributes, bManualReset, bInitialState, NULL ); if( hEvent == INVALID_HANDLE_VALUE ) { /* ERROR */ Fail( "ERROR:%lu:CreateEvent() call failed\n", GetLastError() ); } /* verify that the event isn't signalled yet */ dwRet = WaitForSingleObject( hEvent, 0 ); if( dwRet != WAIT_TIMEOUT ) { /* ERROR */ Trace( "ERROR:WaitForSingleObject() call returned %lu, " "expected WAIT_TIMEOUT\n", dwRet ); CloseHandle( hEvent ); Fail( "Test failed\n" ); } /* try to reset the event */ if( ! ResetEvent( hEvent ) ) { /* ERROR */ Trace( "FAIL:%lu:ResetEvent() call failed\n", GetLastError() ); CloseHandle( hEvent ); Fail( "Test failed\n" ); } /* close the event handle */ if( ! CloseHandle( hEvent ) ) { Fail( "ERROR:%lu:CloseHandle() call failed\n", GetLastError() ); } /* PAL termination */ PAL_Terminate(); /* return success */ return PASS; }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*============================================================================= ** ** Source: test2.c ** ** Dependencies: PAL_Initialize ** PAL_Terminate ** CreateEvent ** CloseHandle ** WaitForSingleObject ** ** Purpose: Test to ensure proper operation of the ResetEvent() ** API by calling it on an event handle that's already ** unsignalled. ** ** **===========================================================================*/ #include <palsuite.h> PALTEST(threading_ResetEvent_test2_paltest_resetevent_test2, "threading/ResetEvent/test2/paltest_resetevent_test2") { /* local variables */ DWORD dwRet = 0; HANDLE hEvent = NULL; LPSECURITY_ATTRIBUTES lpEventAttributes = NULL; BOOL bManualReset = TRUE; BOOL bInitialState = FALSE; /* PAL initialization */ if( (PAL_Initialize(argc, argv)) != 0 ) { return( FAIL ); } /* create an unsignalled event which we can use with ResetEvent */ hEvent = CreateEvent( lpEventAttributes, bManualReset, bInitialState, NULL ); if( hEvent == INVALID_HANDLE_VALUE ) { /* ERROR */ Fail( "ERROR:%lu:CreateEvent() call failed\n", GetLastError() ); } /* verify that the event isn't signalled yet */ dwRet = WaitForSingleObject( hEvent, 0 ); if( dwRet != WAIT_TIMEOUT ) { /* ERROR */ Trace( "ERROR:WaitForSingleObject() call returned %lu, " "expected WAIT_TIMEOUT\n", dwRet ); CloseHandle( hEvent ); Fail( "Test failed\n" ); } /* try to reset the event */ if( ! ResetEvent( hEvent ) ) { /* ERROR */ Trace( "FAIL:%lu:ResetEvent() call failed\n", GetLastError() ); CloseHandle( hEvent ); Fail( "Test failed\n" ); } /* close the event handle */ if( ! CloseHandle( hEvent ) ) { Fail( "ERROR:%lu:CloseHandle() call failed\n", GetLastError() ); } /* PAL termination */ PAL_Terminate(); /* return success */ return PASS; }
-1
dotnet/runtime
66,282
Enable Fast Tail Call Optimization for ARM32
- Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
clamp03
2022-03-07T05:47:20Z
2022-03-13T11:50:20Z
227ff3d53784f655fa04dad059a98b3e8d291d61
51b90cc60b8528c77829ef18481b0f58db812776
Enable Fast Tail Call Optimization for ARM32. - Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
./src/coreclr/jit/valuenumtype.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // Defines the type "ValueNum". // This file exists only to break an include file cycle -- had been in ValueNum.h. But that // file wanted to include gentree.h to get GT_COUNT, and gentree.h wanted ton include ValueNum.h to // the ValueNum type. /*****************************************************************************/ #ifndef _VALUENUMTYPE_H_ #define _VALUENUMTYPE_H_ /*****************************************************************************/ // We will represent ValueNum's as unsigned integers. typedef UINT32 ValueNum; // There are two "kinds" of value numbers, which differ in their modeling of the actions of other threads. // "Liberal" value numbers assume that the other threads change contents of memory locations only at // synchronization points. Liberal VNs are appropriate, for example, in identifying CSE opportunities. // "Conservative" value numbers assume that the contents of memory locations change arbitrarily between // every two accesses. Conservative VNs are appropriate, for example, in assertion prop, where an observation // of a property of the value in some storage location is used to perform an optimization downstream on // an operation involving the contents of that storage location. If other threads may modify the storage // location between the two accesses, the observed property may no longer hold -- and conservative VNs make // it clear that the values need not be the same. // enum ValueNumKind { VNK_Liberal, VNK_Conservative }; struct ValueNumPair { private: ValueNum m_liberal; ValueNum m_conservative; public: ValueNum GetLiberal() const { return m_liberal; } void SetLiberal(ValueNum vn) { m_liberal = vn; } ValueNum GetConservative() const { return m_conservative; } void SetConservative(ValueNum vn) { m_conservative = vn; } ValueNum* GetLiberalAddr() { return &m_liberal; } ValueNum* GetConservativeAddr() { return &m_conservative; } ValueNum Get(ValueNumKind vnk) { if (vnk == VNK_Liberal) { return m_liberal; } else { assert(vnk == VNK_Conservative); return m_conservative; } } void Set(ValueNumKind vnk, ValueNum vn) { if (vnk == VNK_Liberal) { SetLiberal(vn); } else { assert(vnk == VNK_Conservative); SetConservative(vn); } } void SetBoth(ValueNum vn) { m_liberal = vn; m_conservative = vn; } bool operator==(const ValueNumPair& other) const { return (m_liberal == other.m_liberal) && (m_conservative == other.m_conservative); } bool operator!=(const ValueNumPair& other) const { return !(*this == other); } void operator=(const ValueNumPair& vn2) { m_liberal = vn2.m_liberal; m_conservative = vn2.m_conservative; } // Initializes both elements to "NoVN". Defined in ValueNum.cpp. ValueNumPair(); ValueNumPair(ValueNum lib, ValueNum cons) : m_liberal(lib), m_conservative(cons) { } // True iff neither element is "NoVN". Defined in ValueNum.cpp. bool BothDefined() const; bool BothEqual() const { return m_liberal == m_conservative; } }; #endif // _VALUENUMTYPE_H_
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // Defines the type "ValueNum". // This file exists only to break an include file cycle -- had been in ValueNum.h. But that // file wanted to include gentree.h to get GT_COUNT, and gentree.h wanted ton include ValueNum.h to // the ValueNum type. /*****************************************************************************/ #ifndef _VALUENUMTYPE_H_ #define _VALUENUMTYPE_H_ /*****************************************************************************/ // We will represent ValueNum's as unsigned integers. typedef UINT32 ValueNum; // There are two "kinds" of value numbers, which differ in their modeling of the actions of other threads. // "Liberal" value numbers assume that the other threads change contents of memory locations only at // synchronization points. Liberal VNs are appropriate, for example, in identifying CSE opportunities. // "Conservative" value numbers assume that the contents of memory locations change arbitrarily between // every two accesses. Conservative VNs are appropriate, for example, in assertion prop, where an observation // of a property of the value in some storage location is used to perform an optimization downstream on // an operation involving the contents of that storage location. If other threads may modify the storage // location between the two accesses, the observed property may no longer hold -- and conservative VNs make // it clear that the values need not be the same. // enum ValueNumKind { VNK_Liberal, VNK_Conservative }; struct ValueNumPair { private: ValueNum m_liberal; ValueNum m_conservative; public: ValueNum GetLiberal() const { return m_liberal; } void SetLiberal(ValueNum vn) { m_liberal = vn; } ValueNum GetConservative() const { return m_conservative; } void SetConservative(ValueNum vn) { m_conservative = vn; } ValueNum* GetLiberalAddr() { return &m_liberal; } ValueNum* GetConservativeAddr() { return &m_conservative; } ValueNum Get(ValueNumKind vnk) { if (vnk == VNK_Liberal) { return m_liberal; } else { assert(vnk == VNK_Conservative); return m_conservative; } } void Set(ValueNumKind vnk, ValueNum vn) { if (vnk == VNK_Liberal) { SetLiberal(vn); } else { assert(vnk == VNK_Conservative); SetConservative(vn); } } void SetBoth(ValueNum vn) { m_liberal = vn; m_conservative = vn; } bool operator==(const ValueNumPair& other) const { return (m_liberal == other.m_liberal) && (m_conservative == other.m_conservative); } bool operator!=(const ValueNumPair& other) const { return !(*this == other); } void operator=(const ValueNumPair& vn2) { m_liberal = vn2.m_liberal; m_conservative = vn2.m_conservative; } // Initializes both elements to "NoVN". Defined in ValueNum.cpp. ValueNumPair(); ValueNumPair(ValueNum lib, ValueNum cons) : m_liberal(lib), m_conservative(cons) { } // True iff neither element is "NoVN". Defined in ValueNum.cpp. bool BothDefined() const; bool BothEqual() const { return m_liberal == m_conservative; } }; #endif // _VALUENUMTYPE_H_
-1
dotnet/runtime
66,282
Enable Fast Tail Call Optimization for ARM32
- Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
clamp03
2022-03-07T05:47:20Z
2022-03-13T11:50:20Z
227ff3d53784f655fa04dad059a98b3e8d291d61
51b90cc60b8528c77829ef18481b0f58db812776
Enable Fast Tail Call Optimization for ARM32. - Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
./src/coreclr/pal/tests/palsuite/threading/CreateSemaphoreW_ReleaseSemaphore/test1/CreateSemaphore.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*============================================================ ** ** Source: CreateSemaphoreW_ReleaseSemaphore/test1/CreateSemaphore.c ** ** Purpose: Test Semaphore operation using classic IPC problem: ** "Producer-Consumer Problem". ** ** Dependencies: CreateThread ** ReleaseSemaphore ** WaitForSingleObject ** Sleep ** fflush ** ** **=========================================================*/ #define UNICODE #include <palsuite.h> #define PRODUCTION_TOTAL 26 #define _BUF_SIZE 10 DWORD dwThreadId; /* consumer thread identifier */ HANDLE hThread; /* handle to consumer thread */ HANDLE hSemaphoreM; /* handle to mutual exclusion semaphore */ HANDLE hSemaphoreE; /* handle to semaphore that counts empty buffer slots */ HANDLE hSemaphoreF; /* handle to semaphore that counts full buffer slots */ typedef struct Buffer { short readIndex; short writeIndex; CHAR message[_BUF_SIZE]; } BufferStructure; CHAR producerItems[PRODUCTION_TOTAL + 1]; CHAR consumerItems[PRODUCTION_TOTAL + 1]; /* * Read next message from the Buffer into provided pointer. * Returns: 0 on failure, 1 on success. */ int readBuf(BufferStructure *Buffer, char *c) { if( Buffer -> writeIndex == Buffer -> readIndex ) { return 0; } *c = Buffer -> message[Buffer -> readIndex++]; Buffer -> readIndex %= _BUF_SIZE; return 1; } /* * Write message generated by the producer to Buffer. * Returns: 0 on failure, 1 on success. */ int writeBuf(BufferStructure *Buffer, CHAR c) { if( ( ((Buffer -> writeIndex) + 1) % _BUF_SIZE) == (Buffer -> readIndex) ) { return 0; } Buffer -> message[Buffer -> writeIndex++] = c; Buffer -> writeIndex %= _BUF_SIZE; return 1; } /* * Atomic decrement of semaphore value. */ VOID down(HANDLE hSemaphore) { switch ( (WaitForSingleObject ( hSemaphore, 10000))) /* Wait 10 seconds */ { case WAIT_OBJECT_0: /* * Semaphore was signaled. OK to access * semaphore. */ break; case WAIT_ABANDONED: /* * Object was mutex object whose owning * thread has terminated. Shouldn't occur. */ Fail("WaitForSingleObject call returned 'WAIT_ABANDONED'.\n" "Failing Test.\n"); break; case WAIT_FAILED: /* WaitForSingleObject function failed */ Fail("WaitForSingleObject call returned 'WAIT_FAILED'.\n" "GetLastError returned %d\nFailing Test.\n",GetLastError()); break; default: Fail("WaitForSingleObject call returned an unexpected value.\n" "Failing Test.\n"); break; } } /* * Atomic increment of semaphore value. */ VOID up(HANDLE hSemaphore) { if (!ReleaseSemaphore ( hSemaphore, 1, NULL) ) { Fail("ReleaseSemaphore call failed. GetLastError returned %d\n", GetLastError()); } } /* * Sleep 500 milleseconds. */ VOID consumerSleep(VOID) { Sleep(500); } /* * Sleep between 10 milleseconds. */ VOID producerSleep(VOID) { Sleep(10); } /* * Produce a message and write the message to Buffer. */ VOID producer(BufferStructure *Buffer) { int n = 0; char c; while (n < PRODUCTION_TOTAL) { c = 'A' + n ; /* Produce Item */ down(hSemaphoreE); down(hSemaphoreM); if (writeBuf(Buffer, c)) { Trace("Producer produces %c.\n", c); fflush(stdout); producerItems[n++] = c; } up(hSemaphoreM); up(hSemaphoreF); producerSleep(); } return; } /* * Read and "Consume" the messages in Buffer. */ DWORD PALAPI consumer( LPVOID lpParam ) { int n = 0; char c; consumerSleep(); while (n < PRODUCTION_TOTAL) { down(hSemaphoreF); down(hSemaphoreM); if (readBuf((BufferStructure*)lpParam, &c)) { Trace("\tConsumer consumes %c.\n", c); fflush(stdout); consumerItems[n++] = c; } up(hSemaphoreM); up(hSemaphoreE); consumerSleep(); } return 0; } PALTEST(threading_CreateSemaphoreW_ReleaseSemaphore_test1_paltest_createsemaphorew_releasesemaphore_test1, "threading/CreateSemaphoreW_ReleaseSemaphore/test1/paltest_createsemaphorew_releasesemaphore_test1") { BufferStructure Buffer, *pBuffer; pBuffer = &Buffer; if(0 != (PAL_Initialize(argc, argv))) { return ( FAIL ); } /* * Create Semaphores */ hSemaphoreM = CreateSemaphoreExW ( NULL, 1, 1, NULL, 0, 0); if ( NULL == hSemaphoreM ) { Fail ( "hSemaphoreM = CreateSemaphoreExW () - returned NULL\n" "Failing Test.\nGetLastError returned %d\n", GetLastError()); } hSemaphoreE = CreateSemaphoreExW ( NULL, _BUF_SIZE , _BUF_SIZE , NULL, 0, 0); if ( NULL == hSemaphoreE ) { Fail ( "hSemaphoreE = CreateSemaphoreExW () - returned NULL\n" "Failing Test.\nGetLastError returned %d\n", GetLastError()); } hSemaphoreF = CreateSemaphoreExW ( NULL, 0, _BUF_SIZE , NULL, 0, 0); if ( NULL == hSemaphoreF ) { Fail ( "hSemaphoreF = CreateSemaphoreExW () - returned NULL\n" "Failing Test.\nGetLastError returned %d\n", GetLastError()); } /* * Initialize Buffer */ pBuffer->writeIndex = pBuffer->readIndex = 0; /* * Create Consumer */ hThread = CreateThread( NULL, 0, consumer, &Buffer, 0, &dwThreadId); if ( NULL == hThread ) { Fail ( "CreateThread() returned NULL. Failing test.\n" "GetLastError returned %d\n", GetLastError()); } /* * Start producing */ producer(pBuffer); /* * Wait for consumer to complete */ WaitForSingleObject (hThread, INFINITE); /* * Compare items produced vs. items consumed */ if ( 0 != strncmp (producerItems, consumerItems, PRODUCTION_TOTAL) ) { Fail("The producerItems string %s\n and the consumerItems string " "%s\ndo not match. This could be a problem with the strncmp()" " function\n FailingTest\nGetLastError() returned %d\n", producerItems, consumerItems, GetLastError()); } Trace ("producerItems and consumerItems arrays match. All %d\nitems " "were produced and consumed in order.\nTest passed.\n", PRODUCTION_TOTAL); PAL_Terminate(); return ( PASS ); }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*============================================================ ** ** Source: CreateSemaphoreW_ReleaseSemaphore/test1/CreateSemaphore.c ** ** Purpose: Test Semaphore operation using classic IPC problem: ** "Producer-Consumer Problem". ** ** Dependencies: CreateThread ** ReleaseSemaphore ** WaitForSingleObject ** Sleep ** fflush ** ** **=========================================================*/ #define UNICODE #include <palsuite.h> #define PRODUCTION_TOTAL 26 #define _BUF_SIZE 10 DWORD dwThreadId; /* consumer thread identifier */ HANDLE hThread; /* handle to consumer thread */ HANDLE hSemaphoreM; /* handle to mutual exclusion semaphore */ HANDLE hSemaphoreE; /* handle to semaphore that counts empty buffer slots */ HANDLE hSemaphoreF; /* handle to semaphore that counts full buffer slots */ typedef struct Buffer { short readIndex; short writeIndex; CHAR message[_BUF_SIZE]; } BufferStructure; CHAR producerItems[PRODUCTION_TOTAL + 1]; CHAR consumerItems[PRODUCTION_TOTAL + 1]; /* * Read next message from the Buffer into provided pointer. * Returns: 0 on failure, 1 on success. */ int readBuf(BufferStructure *Buffer, char *c) { if( Buffer -> writeIndex == Buffer -> readIndex ) { return 0; } *c = Buffer -> message[Buffer -> readIndex++]; Buffer -> readIndex %= _BUF_SIZE; return 1; } /* * Write message generated by the producer to Buffer. * Returns: 0 on failure, 1 on success. */ int writeBuf(BufferStructure *Buffer, CHAR c) { if( ( ((Buffer -> writeIndex) + 1) % _BUF_SIZE) == (Buffer -> readIndex) ) { return 0; } Buffer -> message[Buffer -> writeIndex++] = c; Buffer -> writeIndex %= _BUF_SIZE; return 1; } /* * Atomic decrement of semaphore value. */ VOID down(HANDLE hSemaphore) { switch ( (WaitForSingleObject ( hSemaphore, 10000))) /* Wait 10 seconds */ { case WAIT_OBJECT_0: /* * Semaphore was signaled. OK to access * semaphore. */ break; case WAIT_ABANDONED: /* * Object was mutex object whose owning * thread has terminated. Shouldn't occur. */ Fail("WaitForSingleObject call returned 'WAIT_ABANDONED'.\n" "Failing Test.\n"); break; case WAIT_FAILED: /* WaitForSingleObject function failed */ Fail("WaitForSingleObject call returned 'WAIT_FAILED'.\n" "GetLastError returned %d\nFailing Test.\n",GetLastError()); break; default: Fail("WaitForSingleObject call returned an unexpected value.\n" "Failing Test.\n"); break; } } /* * Atomic increment of semaphore value. */ VOID up(HANDLE hSemaphore) { if (!ReleaseSemaphore ( hSemaphore, 1, NULL) ) { Fail("ReleaseSemaphore call failed. GetLastError returned %d\n", GetLastError()); } } /* * Sleep 500 milleseconds. */ VOID consumerSleep(VOID) { Sleep(500); } /* * Sleep between 10 milleseconds. */ VOID producerSleep(VOID) { Sleep(10); } /* * Produce a message and write the message to Buffer. */ VOID producer(BufferStructure *Buffer) { int n = 0; char c; while (n < PRODUCTION_TOTAL) { c = 'A' + n ; /* Produce Item */ down(hSemaphoreE); down(hSemaphoreM); if (writeBuf(Buffer, c)) { Trace("Producer produces %c.\n", c); fflush(stdout); producerItems[n++] = c; } up(hSemaphoreM); up(hSemaphoreF); producerSleep(); } return; } /* * Read and "Consume" the messages in Buffer. */ DWORD PALAPI consumer( LPVOID lpParam ) { int n = 0; char c; consumerSleep(); while (n < PRODUCTION_TOTAL) { down(hSemaphoreF); down(hSemaphoreM); if (readBuf((BufferStructure*)lpParam, &c)) { Trace("\tConsumer consumes %c.\n", c); fflush(stdout); consumerItems[n++] = c; } up(hSemaphoreM); up(hSemaphoreE); consumerSleep(); } return 0; } PALTEST(threading_CreateSemaphoreW_ReleaseSemaphore_test1_paltest_createsemaphorew_releasesemaphore_test1, "threading/CreateSemaphoreW_ReleaseSemaphore/test1/paltest_createsemaphorew_releasesemaphore_test1") { BufferStructure Buffer, *pBuffer; pBuffer = &Buffer; if(0 != (PAL_Initialize(argc, argv))) { return ( FAIL ); } /* * Create Semaphores */ hSemaphoreM = CreateSemaphoreExW ( NULL, 1, 1, NULL, 0, 0); if ( NULL == hSemaphoreM ) { Fail ( "hSemaphoreM = CreateSemaphoreExW () - returned NULL\n" "Failing Test.\nGetLastError returned %d\n", GetLastError()); } hSemaphoreE = CreateSemaphoreExW ( NULL, _BUF_SIZE , _BUF_SIZE , NULL, 0, 0); if ( NULL == hSemaphoreE ) { Fail ( "hSemaphoreE = CreateSemaphoreExW () - returned NULL\n" "Failing Test.\nGetLastError returned %d\n", GetLastError()); } hSemaphoreF = CreateSemaphoreExW ( NULL, 0, _BUF_SIZE , NULL, 0, 0); if ( NULL == hSemaphoreF ) { Fail ( "hSemaphoreF = CreateSemaphoreExW () - returned NULL\n" "Failing Test.\nGetLastError returned %d\n", GetLastError()); } /* * Initialize Buffer */ pBuffer->writeIndex = pBuffer->readIndex = 0; /* * Create Consumer */ hThread = CreateThread( NULL, 0, consumer, &Buffer, 0, &dwThreadId); if ( NULL == hThread ) { Fail ( "CreateThread() returned NULL. Failing test.\n" "GetLastError returned %d\n", GetLastError()); } /* * Start producing */ producer(pBuffer); /* * Wait for consumer to complete */ WaitForSingleObject (hThread, INFINITE); /* * Compare items produced vs. items consumed */ if ( 0 != strncmp (producerItems, consumerItems, PRODUCTION_TOTAL) ) { Fail("The producerItems string %s\n and the consumerItems string " "%s\ndo not match. This could be a problem with the strncmp()" " function\n FailingTest\nGetLastError() returned %d\n", producerItems, consumerItems, GetLastError()); } Trace ("producerItems and consumerItems arrays match. All %d\nitems " "were produced and consumed in order.\nTest passed.\n", PRODUCTION_TOTAL); PAL_Terminate(); return ( PASS ); }
-1
dotnet/runtime
66,282
Enable Fast Tail Call Optimization for ARM32
- Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
clamp03
2022-03-07T05:47:20Z
2022-03-13T11:50:20Z
227ff3d53784f655fa04dad059a98b3e8d291d61
51b90cc60b8528c77829ef18481b0f58db812776
Enable Fast Tail Call Optimization for ARM32. - Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
./src/coreclr/jit/targetamd64.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*****************************************************************************/ #include "jitpch.h" #ifdef _MSC_VER #pragma hdrstop #endif #if defined(TARGET_AMD64) #include "target.h" const char* Target::g_tgtCPUName = "x64"; const Target::ArgOrder Target::g_tgtArgOrder = ARG_ORDER_R2L; const Target::ArgOrder Target::g_tgtUnmanagedArgOrder = ARG_ORDER_R2L; // clang-format off #ifdef UNIX_AMD64_ABI const regNumber intArgRegs [] = { REG_EDI, REG_ESI, REG_EDX, REG_ECX, REG_R8, REG_R9 }; const regMaskTP intArgMasks[] = { RBM_EDI, RBM_ESI, RBM_EDX, RBM_ECX, RBM_R8, RBM_R9 }; const regNumber fltArgRegs [] = { REG_XMM0, REG_XMM1, REG_XMM2, REG_XMM3, REG_XMM4, REG_XMM5, REG_XMM6, REG_XMM7 }; const regMaskTP fltArgMasks[] = { RBM_XMM0, RBM_XMM1, RBM_XMM2, RBM_XMM3, RBM_XMM4, RBM_XMM5, RBM_XMM6, RBM_XMM7 }; #else // !UNIX_AMD64_ABI const regNumber intArgRegs [] = { REG_ECX, REG_EDX, REG_R8, REG_R9 }; const regMaskTP intArgMasks[] = { RBM_ECX, RBM_EDX, RBM_R8, RBM_R9 }; const regNumber fltArgRegs [] = { REG_XMM0, REG_XMM1, REG_XMM2, REG_XMM3 }; const regMaskTP fltArgMasks[] = { RBM_XMM0, RBM_XMM1, RBM_XMM2, RBM_XMM3 }; #endif // !UNIX_AMD64_ABI // clang-format on #endif // TARGET_AMD64
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*****************************************************************************/ #include "jitpch.h" #ifdef _MSC_VER #pragma hdrstop #endif #if defined(TARGET_AMD64) #include "target.h" const char* Target::g_tgtCPUName = "x64"; const Target::ArgOrder Target::g_tgtArgOrder = ARG_ORDER_R2L; const Target::ArgOrder Target::g_tgtUnmanagedArgOrder = ARG_ORDER_R2L; // clang-format off #ifdef UNIX_AMD64_ABI const regNumber intArgRegs [] = { REG_EDI, REG_ESI, REG_EDX, REG_ECX, REG_R8, REG_R9 }; const regMaskTP intArgMasks[] = { RBM_EDI, RBM_ESI, RBM_EDX, RBM_ECX, RBM_R8, RBM_R9 }; const regNumber fltArgRegs [] = { REG_XMM0, REG_XMM1, REG_XMM2, REG_XMM3, REG_XMM4, REG_XMM5, REG_XMM6, REG_XMM7 }; const regMaskTP fltArgMasks[] = { RBM_XMM0, RBM_XMM1, RBM_XMM2, RBM_XMM3, RBM_XMM4, RBM_XMM5, RBM_XMM6, RBM_XMM7 }; #else // !UNIX_AMD64_ABI const regNumber intArgRegs [] = { REG_ECX, REG_EDX, REG_R8, REG_R9 }; const regMaskTP intArgMasks[] = { RBM_ECX, RBM_EDX, RBM_R8, RBM_R9 }; const regNumber fltArgRegs [] = { REG_XMM0, REG_XMM1, REG_XMM2, REG_XMM3 }; const regMaskTP fltArgMasks[] = { RBM_XMM0, RBM_XMM1, RBM_XMM2, RBM_XMM3 }; #endif // !UNIX_AMD64_ABI // clang-format on #endif // TARGET_AMD64
-1
dotnet/runtime
66,282
Enable Fast Tail Call Optimization for ARM32
- Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
clamp03
2022-03-07T05:47:20Z
2022-03-13T11:50:20Z
227ff3d53784f655fa04dad059a98b3e8d291d61
51b90cc60b8528c77829ef18481b0f58db812776
Enable Fast Tail Call Optimization for ARM32. - Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
./src/coreclr/pal/prebuilt/inc/corprof.h
/* this ALWAYS GENERATED file contains the definitions for the interfaces */ /* File created by MIDL compiler version 8.01.0622 */ /* Compiler settings for corprof.idl: Oicf, W1, Zp8, env=Win64 (32b run), target_arch=AMD64 8.01.0622 protocol : dce , ms_ext, c_ext, robust error checks: allocation ref bounds_check enum stub_data VC __declspec() decoration level: __declspec(uuid()), __declspec(selectany), __declspec(novtable) DECLSPEC_UUID(), MIDL_INTERFACE() */ /* @@MIDL_FILE_HEADING( ) */ #pragma warning( disable: 4049 ) /* more than 64k source lines */ /* verify that the <rpcndr.h> version is high enough to compile this file*/ #ifndef __REQUIRED_RPCNDR_H_VERSION__ #define __REQUIRED_RPCNDR_H_VERSION__ 475 #endif #include "rpc.h" #include "rpcndr.h" #ifndef __RPCNDR_H_VERSION__ #error this stub requires an updated version of <rpcndr.h> #endif /* __RPCNDR_H_VERSION__ */ #ifndef COM_NO_WINDOWS_H #include "windows.h" #include "ole2.h" #endif /*COM_NO_WINDOWS_H*/ #ifndef __corprof_h__ #define __corprof_h__ #if defined(_MSC_VER) && (_MSC_VER >= 1020) #pragma once #endif /* Forward Declarations */ #ifndef __ICorProfilerCallback_FWD_DEFINED__ #define __ICorProfilerCallback_FWD_DEFINED__ typedef interface ICorProfilerCallback ICorProfilerCallback; #endif /* __ICorProfilerCallback_FWD_DEFINED__ */ #ifndef __ICorProfilerCallback2_FWD_DEFINED__ #define __ICorProfilerCallback2_FWD_DEFINED__ typedef interface ICorProfilerCallback2 ICorProfilerCallback2; #endif /* __ICorProfilerCallback2_FWD_DEFINED__ */ #ifndef __ICorProfilerCallback3_FWD_DEFINED__ #define __ICorProfilerCallback3_FWD_DEFINED__ typedef interface ICorProfilerCallback3 ICorProfilerCallback3; #endif /* __ICorProfilerCallback3_FWD_DEFINED__ */ #ifndef __ICorProfilerCallback4_FWD_DEFINED__ #define __ICorProfilerCallback4_FWD_DEFINED__ typedef interface ICorProfilerCallback4 ICorProfilerCallback4; #endif /* __ICorProfilerCallback4_FWD_DEFINED__ */ #ifndef __ICorProfilerCallback5_FWD_DEFINED__ #define __ICorProfilerCallback5_FWD_DEFINED__ typedef interface ICorProfilerCallback5 ICorProfilerCallback5; #endif /* __ICorProfilerCallback5_FWD_DEFINED__ */ #ifndef __ICorProfilerCallback6_FWD_DEFINED__ #define __ICorProfilerCallback6_FWD_DEFINED__ typedef interface ICorProfilerCallback6 ICorProfilerCallback6; #endif /* __ICorProfilerCallback6_FWD_DEFINED__ */ #ifndef __ICorProfilerCallback7_FWD_DEFINED__ #define __ICorProfilerCallback7_FWD_DEFINED__ typedef interface ICorProfilerCallback7 ICorProfilerCallback7; #endif /* __ICorProfilerCallback7_FWD_DEFINED__ */ #ifndef __ICorProfilerCallback8_FWD_DEFINED__ #define __ICorProfilerCallback8_FWD_DEFINED__ typedef interface ICorProfilerCallback8 ICorProfilerCallback8; #endif /* __ICorProfilerCallback8_FWD_DEFINED__ */ #ifndef __ICorProfilerCallback9_FWD_DEFINED__ #define __ICorProfilerCallback9_FWD_DEFINED__ typedef interface ICorProfilerCallback9 ICorProfilerCallback9; #endif /* __ICorProfilerCallback9_FWD_DEFINED__ */ #ifndef __ICorProfilerCallback10_FWD_DEFINED__ #define __ICorProfilerCallback10_FWD_DEFINED__ typedef interface ICorProfilerCallback10 ICorProfilerCallback10; #endif /* __ICorProfilerCallback10_FWD_DEFINED__ */ #ifndef __ICorProfilerCallback11_FWD_DEFINED__ #define __ICorProfilerCallback11_FWD_DEFINED__ typedef interface ICorProfilerCallback11 ICorProfilerCallback11; #endif /* __ICorProfilerCallback11_FWD_DEFINED__ */ #ifndef __ICorProfilerInfo_FWD_DEFINED__ #define __ICorProfilerInfo_FWD_DEFINED__ typedef interface ICorProfilerInfo ICorProfilerInfo; #endif /* __ICorProfilerInfo_FWD_DEFINED__ */ #ifndef __ICorProfilerInfo2_FWD_DEFINED__ #define __ICorProfilerInfo2_FWD_DEFINED__ typedef interface ICorProfilerInfo2 ICorProfilerInfo2; #endif /* __ICorProfilerInfo2_FWD_DEFINED__ */ #ifndef __ICorProfilerInfo3_FWD_DEFINED__ #define __ICorProfilerInfo3_FWD_DEFINED__ typedef interface ICorProfilerInfo3 ICorProfilerInfo3; #endif /* __ICorProfilerInfo3_FWD_DEFINED__ */ #ifndef __ICorProfilerObjectEnum_FWD_DEFINED__ #define __ICorProfilerObjectEnum_FWD_DEFINED__ typedef interface ICorProfilerObjectEnum ICorProfilerObjectEnum; #endif /* __ICorProfilerObjectEnum_FWD_DEFINED__ */ #ifndef __ICorProfilerFunctionEnum_FWD_DEFINED__ #define __ICorProfilerFunctionEnum_FWD_DEFINED__ typedef interface ICorProfilerFunctionEnum ICorProfilerFunctionEnum; #endif /* __ICorProfilerFunctionEnum_FWD_DEFINED__ */ #ifndef __ICorProfilerModuleEnum_FWD_DEFINED__ #define __ICorProfilerModuleEnum_FWD_DEFINED__ typedef interface ICorProfilerModuleEnum ICorProfilerModuleEnum; #endif /* __ICorProfilerModuleEnum_FWD_DEFINED__ */ #ifndef __IMethodMalloc_FWD_DEFINED__ #define __IMethodMalloc_FWD_DEFINED__ typedef interface IMethodMalloc IMethodMalloc; #endif /* __IMethodMalloc_FWD_DEFINED__ */ #ifndef __ICorProfilerFunctionControl_FWD_DEFINED__ #define __ICorProfilerFunctionControl_FWD_DEFINED__ typedef interface ICorProfilerFunctionControl ICorProfilerFunctionControl; #endif /* __ICorProfilerFunctionControl_FWD_DEFINED__ */ #ifndef __ICorProfilerInfo4_FWD_DEFINED__ #define __ICorProfilerInfo4_FWD_DEFINED__ typedef interface ICorProfilerInfo4 ICorProfilerInfo4; #endif /* __ICorProfilerInfo4_FWD_DEFINED__ */ #ifndef __ICorProfilerInfo5_FWD_DEFINED__ #define __ICorProfilerInfo5_FWD_DEFINED__ typedef interface ICorProfilerInfo5 ICorProfilerInfo5; #endif /* __ICorProfilerInfo5_FWD_DEFINED__ */ #ifndef __ICorProfilerInfo6_FWD_DEFINED__ #define __ICorProfilerInfo6_FWD_DEFINED__ typedef interface ICorProfilerInfo6 ICorProfilerInfo6; #endif /* __ICorProfilerInfo6_FWD_DEFINED__ */ #ifndef __ICorProfilerInfo7_FWD_DEFINED__ #define __ICorProfilerInfo7_FWD_DEFINED__ typedef interface ICorProfilerInfo7 ICorProfilerInfo7; #endif /* __ICorProfilerInfo7_FWD_DEFINED__ */ #ifndef __ICorProfilerInfo8_FWD_DEFINED__ #define __ICorProfilerInfo8_FWD_DEFINED__ typedef interface ICorProfilerInfo8 ICorProfilerInfo8; #endif /* __ICorProfilerInfo8_FWD_DEFINED__ */ #ifndef __ICorProfilerInfo9_FWD_DEFINED__ #define __ICorProfilerInfo9_FWD_DEFINED__ typedef interface ICorProfilerInfo9 ICorProfilerInfo9; #endif /* __ICorProfilerInfo9_FWD_DEFINED__ */ #ifndef __ICorProfilerInfo10_FWD_DEFINED__ #define __ICorProfilerInfo10_FWD_DEFINED__ typedef interface ICorProfilerInfo10 ICorProfilerInfo10; #endif /* __ICorProfilerInfo10_FWD_DEFINED__ */ #ifndef __ICorProfilerInfo11_FWD_DEFINED__ #define __ICorProfilerInfo11_FWD_DEFINED__ typedef interface ICorProfilerInfo11 ICorProfilerInfo11; #endif /* __ICorProfilerInfo11_FWD_DEFINED__ */ #ifndef __ICorProfilerInfo12_FWD_DEFINED__ #define __ICorProfilerInfo12_FWD_DEFINED__ typedef interface ICorProfilerInfo12 ICorProfilerInfo12; #endif /* __ICorProfilerInfo12_FWD_DEFINED__ */ #ifndef __ICorProfilerMethodEnum_FWD_DEFINED__ #define __ICorProfilerMethodEnum_FWD_DEFINED__ typedef interface ICorProfilerMethodEnum ICorProfilerMethodEnum; #endif /* __ICorProfilerMethodEnum_FWD_DEFINED__ */ #ifndef __ICorProfilerThreadEnum_FWD_DEFINED__ #define __ICorProfilerThreadEnum_FWD_DEFINED__ typedef interface ICorProfilerThreadEnum ICorProfilerThreadEnum; #endif /* __ICorProfilerThreadEnum_FWD_DEFINED__ */ #ifndef __ICorProfilerAssemblyReferenceProvider_FWD_DEFINED__ #define __ICorProfilerAssemblyReferenceProvider_FWD_DEFINED__ typedef interface ICorProfilerAssemblyReferenceProvider ICorProfilerAssemblyReferenceProvider; #endif /* __ICorProfilerAssemblyReferenceProvider_FWD_DEFINED__ */ /* header files for imported files */ #include "unknwn.h" #ifdef __cplusplus extern "C"{ #endif /* interface __MIDL_itf_corprof_0000_0000 */ /* [local] */ #define CorDB_CONTROL_Profiling "Cor_Enable_Profiling" #define CorDB_CONTROL_ProfilingL L"Cor_Enable_Profiling" #if 0 typedef LONG32 mdToken; typedef mdToken mdModule; typedef mdToken mdTypeDef; typedef mdToken mdMethodDef; typedef mdToken mdFieldDef; typedef ULONG CorElementType; typedef /* [public][public][public][public] */ struct __MIDL___MIDL_itf_corprof_0000_0000_0001 { DWORD dwOSPlatformId; DWORD dwOSMajorVersion; DWORD dwOSMinorVersion; } OSINFO; typedef /* [public][public][public] */ struct __MIDL___MIDL_itf_corprof_0000_0000_0002 { USHORT usMajorVersion; USHORT usMinorVersion; USHORT usBuildNumber; USHORT usRevisionNumber; LPWSTR szLocale; ULONG cbLocale; DWORD *rProcessor; ULONG ulProcessor; OSINFO *rOS; ULONG ulOS; } ASSEMBLYMETADATA; #endif typedef const BYTE *LPCBYTE; typedef BYTE *LPBYTE; typedef BYTE COR_SIGNATURE; typedef COR_SIGNATURE *PCOR_SIGNATURE; typedef const COR_SIGNATURE *PCCOR_SIGNATURE; #ifndef _COR_IL_MAP #define _COR_IL_MAP typedef struct _COR_IL_MAP { ULONG32 oldOffset; ULONG32 newOffset; BOOL fAccurate; } COR_IL_MAP; #endif //_COR_IL_MAP #ifndef _COR_DEBUG_IL_TO_NATIVE_MAP_ #define _COR_DEBUG_IL_TO_NATIVE_MAP_ typedef enum CorDebugIlToNativeMappingTypes { NO_MAPPING = -1, PROLOG = -2, EPILOG = -3 } CorDebugIlToNativeMappingTypes; typedef struct COR_DEBUG_IL_TO_NATIVE_MAP { ULONG32 ilOffset; ULONG32 nativeStartOffset; ULONG32 nativeEndOffset; } COR_DEBUG_IL_TO_NATIVE_MAP; #endif // _COR_DEBUG_IL_TO_NATIVE_MAP_ #ifndef _COR_FIELD_OFFSET_ #define _COR_FIELD_OFFSET_ typedef struct _COR_FIELD_OFFSET { mdFieldDef ridOfField; ULONG ulOffset; } COR_FIELD_OFFSET; #endif // _COR_FIELD_OFFSET_ typedef UINT_PTR ProcessID; typedef UINT_PTR AssemblyID; typedef UINT_PTR AppDomainID; typedef UINT_PTR ModuleID; typedef UINT_PTR ClassID; typedef UINT_PTR ThreadID; typedef UINT_PTR ContextID; typedef UINT_PTR FunctionID; typedef UINT_PTR ObjectID; typedef UINT_PTR GCHandleID; typedef UINT_PTR COR_PRF_ELT_INFO; typedef UINT_PTR ReJITID; typedef /* [public][public][public][public][public][public][public][public][public][public][public][public][public] */ union __MIDL___MIDL_itf_corprof_0000_0000_0003 { FunctionID functionID; UINT_PTR clientID; } FunctionIDOrClientID; typedef UINT_PTR __stdcall __stdcall FunctionIDMapper( FunctionID funcId, BOOL *pbHookFunction); typedef UINT_PTR __stdcall __stdcall FunctionIDMapper2( FunctionID funcId, void *clientData, BOOL *pbHookFunction); typedef enum _COR_PRF_SNAPSHOT_INFO { COR_PRF_SNAPSHOT_DEFAULT = 0, COR_PRF_SNAPSHOT_REGISTER_CONTEXT = 0x1, COR_PRF_SNAPSHOT_X86_OPTIMIZED = 0x2 } COR_PRF_SNAPSHOT_INFO; typedef UINT_PTR COR_PRF_FRAME_INFO; typedef struct _COR_PRF_FUNCTION_ARGUMENT_RANGE { UINT_PTR startAddress; ULONG length; } COR_PRF_FUNCTION_ARGUMENT_RANGE; typedef struct _COR_PRF_FUNCTION_ARGUMENT_INFO { ULONG numRanges; ULONG totalArgumentSize; COR_PRF_FUNCTION_ARGUMENT_RANGE ranges[ 1 ]; } COR_PRF_FUNCTION_ARGUMENT_INFO; typedef struct _COR_PRF_CODE_INFO { UINT_PTR startAddress; SIZE_T size; } COR_PRF_CODE_INFO; typedef /* [public][public] */ enum __MIDL___MIDL_itf_corprof_0000_0000_0004 { COR_PRF_FIELD_NOT_A_STATIC = 0, COR_PRF_FIELD_APP_DOMAIN_STATIC = 0x1, COR_PRF_FIELD_THREAD_STATIC = 0x2, COR_PRF_FIELD_CONTEXT_STATIC = 0x4, COR_PRF_FIELD_RVA_STATIC = 0x8 } COR_PRF_STATIC_TYPE; typedef struct _COR_PRF_FUNCTION { FunctionID functionId; ReJITID reJitId; } COR_PRF_FUNCTION; typedef struct _COR_PRF_ASSEMBLY_REFERENCE_INFO { void *pbPublicKeyOrToken; ULONG cbPublicKeyOrToken; LPCWSTR szName; ASSEMBLYMETADATA *pMetaData; void *pbHashValue; ULONG cbHashValue; DWORD dwAssemblyRefFlags; } COR_PRF_ASSEMBLY_REFERENCE_INFO; typedef struct _COR_PRF_METHOD { ModuleID moduleId; mdMethodDef methodId; } COR_PRF_METHOD; typedef void FunctionEnter( FunctionID funcID); typedef void FunctionLeave( FunctionID funcID); typedef void FunctionTailcall( FunctionID funcID); typedef void FunctionEnter2( FunctionID funcId, UINT_PTR clientData, COR_PRF_FRAME_INFO func, COR_PRF_FUNCTION_ARGUMENT_INFO *argumentInfo); typedef void FunctionLeave2( FunctionID funcId, UINT_PTR clientData, COR_PRF_FRAME_INFO func, COR_PRF_FUNCTION_ARGUMENT_RANGE *retvalRange); typedef void FunctionTailcall2( FunctionID funcId, UINT_PTR clientData, COR_PRF_FRAME_INFO func); typedef void FunctionEnter3( FunctionIDOrClientID functionIDOrClientID); typedef void FunctionLeave3( FunctionIDOrClientID functionIDOrClientID); typedef void FunctionTailcall3( FunctionIDOrClientID functionIDOrClientID); typedef void FunctionEnter3WithInfo( FunctionIDOrClientID functionIDOrClientID, COR_PRF_ELT_INFO eltInfo); typedef void FunctionLeave3WithInfo( FunctionIDOrClientID functionIDOrClientID, COR_PRF_ELT_INFO eltInfo); typedef void FunctionTailcall3WithInfo( FunctionIDOrClientID functionIDOrClientID, COR_PRF_ELT_INFO eltInfo); typedef HRESULT __stdcall __stdcall StackSnapshotCallback( FunctionID funcId, UINT_PTR ip, COR_PRF_FRAME_INFO frameInfo, ULONG32 contextSize, BYTE context[ ], void *clientData); typedef BOOL ObjectReferenceCallback( ObjectID root, ObjectID *reference, void *clientData); typedef /* [public] */ enum __MIDL___MIDL_itf_corprof_0000_0000_0005 { COR_PRF_MONITOR_NONE = 0, COR_PRF_MONITOR_FUNCTION_UNLOADS = 0x1, COR_PRF_MONITOR_CLASS_LOADS = 0x2, COR_PRF_MONITOR_MODULE_LOADS = 0x4, COR_PRF_MONITOR_ASSEMBLY_LOADS = 0x8, COR_PRF_MONITOR_APPDOMAIN_LOADS = 0x10, COR_PRF_MONITOR_JIT_COMPILATION = 0x20, COR_PRF_MONITOR_EXCEPTIONS = 0x40, COR_PRF_MONITOR_GC = 0x80, COR_PRF_MONITOR_OBJECT_ALLOCATED = 0x100, COR_PRF_MONITOR_THREADS = 0x200, COR_PRF_MONITOR_REMOTING = 0x400, COR_PRF_MONITOR_CODE_TRANSITIONS = 0x800, COR_PRF_MONITOR_ENTERLEAVE = 0x1000, COR_PRF_MONITOR_CCW = 0x2000, COR_PRF_MONITOR_REMOTING_COOKIE = ( 0x4000 | COR_PRF_MONITOR_REMOTING ) , COR_PRF_MONITOR_REMOTING_ASYNC = ( 0x8000 | COR_PRF_MONITOR_REMOTING ) , COR_PRF_MONITOR_SUSPENDS = 0x10000, COR_PRF_MONITOR_CACHE_SEARCHES = 0x20000, COR_PRF_ENABLE_REJIT = 0x40000, COR_PRF_ENABLE_INPROC_DEBUGGING = 0x80000, COR_PRF_ENABLE_JIT_MAPS = 0x100000, COR_PRF_DISABLE_INLINING = 0x200000, COR_PRF_DISABLE_OPTIMIZATIONS = 0x400000, COR_PRF_ENABLE_OBJECT_ALLOCATED = 0x800000, COR_PRF_MONITOR_CLR_EXCEPTIONS = 0x1000000, COR_PRF_MONITOR_ALL = 0x107ffff, COR_PRF_ENABLE_FUNCTION_ARGS = 0x2000000, COR_PRF_ENABLE_FUNCTION_RETVAL = 0x4000000, COR_PRF_ENABLE_FRAME_INFO = 0x8000000, COR_PRF_ENABLE_STACK_SNAPSHOT = 0x10000000, COR_PRF_USE_PROFILE_IMAGES = 0x20000000, COR_PRF_DISABLE_TRANSPARENCY_CHECKS_UNDER_FULL_TRUST = 0x40000000, COR_PRF_DISABLE_ALL_NGEN_IMAGES = 0x80000000, COR_PRF_ALL = 0x8fffffff, COR_PRF_REQUIRE_PROFILE_IMAGE = ( ( COR_PRF_USE_PROFILE_IMAGES | COR_PRF_MONITOR_CODE_TRANSITIONS ) | COR_PRF_MONITOR_ENTERLEAVE ) , COR_PRF_ALLOWABLE_AFTER_ATTACH = ( ( ( ( ( ( ( ( ( ( COR_PRF_MONITOR_THREADS | COR_PRF_MONITOR_MODULE_LOADS ) | COR_PRF_MONITOR_ASSEMBLY_LOADS ) | COR_PRF_MONITOR_APPDOMAIN_LOADS ) | COR_PRF_ENABLE_STACK_SNAPSHOT ) | COR_PRF_MONITOR_GC ) | COR_PRF_MONITOR_SUSPENDS ) | COR_PRF_MONITOR_CLASS_LOADS ) | COR_PRF_MONITOR_EXCEPTIONS ) | COR_PRF_MONITOR_JIT_COMPILATION ) | COR_PRF_ENABLE_REJIT ) , COR_PRF_ALLOWABLE_NOTIFICATION_PROFILER = ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( COR_PRF_MONITOR_FUNCTION_UNLOADS | COR_PRF_MONITOR_CLASS_LOADS ) | COR_PRF_MONITOR_MODULE_LOADS ) | COR_PRF_MONITOR_ASSEMBLY_LOADS ) | COR_PRF_MONITOR_APPDOMAIN_LOADS ) | COR_PRF_MONITOR_JIT_COMPILATION ) | COR_PRF_MONITOR_EXCEPTIONS ) | COR_PRF_MONITOR_OBJECT_ALLOCATED ) | COR_PRF_MONITOR_THREADS ) | COR_PRF_MONITOR_CODE_TRANSITIONS ) | COR_PRF_MONITOR_CCW ) | COR_PRF_MONITOR_SUSPENDS ) | COR_PRF_MONITOR_CACHE_SEARCHES ) | COR_PRF_DISABLE_INLINING ) | COR_PRF_DISABLE_OPTIMIZATIONS ) | COR_PRF_ENABLE_OBJECT_ALLOCATED ) | COR_PRF_MONITOR_CLR_EXCEPTIONS ) | COR_PRF_ENABLE_STACK_SNAPSHOT ) | COR_PRF_USE_PROFILE_IMAGES ) | COR_PRF_DISABLE_ALL_NGEN_IMAGES ) , COR_PRF_MONITOR_IMMUTABLE = ( ( ( ( ( ( ( ( ( ( ( ( ( ( COR_PRF_MONITOR_CODE_TRANSITIONS | COR_PRF_MONITOR_REMOTING ) | COR_PRF_MONITOR_REMOTING_COOKIE ) | COR_PRF_MONITOR_REMOTING_ASYNC ) | COR_PRF_ENABLE_INPROC_DEBUGGING ) | COR_PRF_ENABLE_JIT_MAPS ) | COR_PRF_DISABLE_OPTIMIZATIONS ) | COR_PRF_DISABLE_INLINING ) | COR_PRF_ENABLE_OBJECT_ALLOCATED ) | COR_PRF_ENABLE_FUNCTION_ARGS ) | COR_PRF_ENABLE_FUNCTION_RETVAL ) | COR_PRF_ENABLE_FRAME_INFO ) | COR_PRF_USE_PROFILE_IMAGES ) | COR_PRF_DISABLE_TRANSPARENCY_CHECKS_UNDER_FULL_TRUST ) | COR_PRF_DISABLE_ALL_NGEN_IMAGES ) } COR_PRF_MONITOR; typedef /* [public] */ enum __MIDL___MIDL_itf_corprof_0000_0000_0006 { COR_PRF_HIGH_MONITOR_NONE = 0, COR_PRF_HIGH_ADD_ASSEMBLY_REFERENCES = 0x1, COR_PRF_HIGH_IN_MEMORY_SYMBOLS_UPDATED = 0x2, COR_PRF_HIGH_MONITOR_DYNAMIC_FUNCTION_UNLOADS = 0x4, COR_PRF_HIGH_DISABLE_TIERED_COMPILATION = 0x8, COR_PRF_HIGH_BASIC_GC = 0x10, COR_PRF_HIGH_MONITOR_GC_MOVED_OBJECTS = 0x20, COR_PRF_HIGH_REQUIRE_PROFILE_IMAGE = 0, COR_PRF_HIGH_MONITOR_LARGEOBJECT_ALLOCATED = 0x40, COR_PRF_HIGH_MONITOR_EVENT_PIPE = 0x80, COR_PRF_HIGH_MONITOR_PINNEDOBJECT_ALLOCATED = 0x100, COR_PRF_HIGH_ALLOWABLE_AFTER_ATTACH = ( ( ( ( ( COR_PRF_HIGH_IN_MEMORY_SYMBOLS_UPDATED | COR_PRF_HIGH_MONITOR_DYNAMIC_FUNCTION_UNLOADS ) | COR_PRF_HIGH_BASIC_GC ) | COR_PRF_HIGH_MONITOR_GC_MOVED_OBJECTS ) | COR_PRF_HIGH_MONITOR_LARGEOBJECT_ALLOCATED ) | COR_PRF_HIGH_MONITOR_EVENT_PIPE ) , COR_PRF_HIGH_ALLOWABLE_NOTIFICATION_PROFILER = ( ( ( ( ( ( COR_PRF_HIGH_IN_MEMORY_SYMBOLS_UPDATED | COR_PRF_HIGH_MONITOR_DYNAMIC_FUNCTION_UNLOADS ) | COR_PRF_HIGH_DISABLE_TIERED_COMPILATION ) | COR_PRF_HIGH_BASIC_GC ) | COR_PRF_HIGH_MONITOR_GC_MOVED_OBJECTS ) | COR_PRF_HIGH_MONITOR_LARGEOBJECT_ALLOCATED ) | COR_PRF_HIGH_MONITOR_EVENT_PIPE ) , COR_PRF_HIGH_MONITOR_IMMUTABLE = COR_PRF_HIGH_DISABLE_TIERED_COMPILATION } COR_PRF_HIGH_MONITOR; typedef /* [public] */ enum __MIDL___MIDL_itf_corprof_0000_0000_0007 { PROFILER_PARENT_UNKNOWN = 0xfffffffd, PROFILER_GLOBAL_CLASS = 0xfffffffe, PROFILER_GLOBAL_MODULE = 0xffffffff } COR_PRF_MISC; typedef /* [public][public] */ enum __MIDL___MIDL_itf_corprof_0000_0000_0008 { COR_PRF_CACHED_FUNCTION_FOUND = 0, COR_PRF_CACHED_FUNCTION_NOT_FOUND = ( COR_PRF_CACHED_FUNCTION_FOUND + 1 ) } COR_PRF_JIT_CACHE; typedef /* [public][public][public] */ enum __MIDL___MIDL_itf_corprof_0000_0000_0009 { COR_PRF_TRANSITION_CALL = 0, COR_PRF_TRANSITION_RETURN = ( COR_PRF_TRANSITION_CALL + 1 ) } COR_PRF_TRANSITION_REASON; typedef /* [public][public] */ enum __MIDL___MIDL_itf_corprof_0000_0000_0010 { COR_PRF_SUSPEND_OTHER = 0, COR_PRF_SUSPEND_FOR_GC = 1, COR_PRF_SUSPEND_FOR_APPDOMAIN_SHUTDOWN = 2, COR_PRF_SUSPEND_FOR_CODE_PITCHING = 3, COR_PRF_SUSPEND_FOR_SHUTDOWN = 4, COR_PRF_SUSPEND_FOR_INPROC_DEBUGGER = 6, COR_PRF_SUSPEND_FOR_GC_PREP = 7, COR_PRF_SUSPEND_FOR_REJIT = 8, COR_PRF_SUSPEND_FOR_PROFILER = 9 } COR_PRF_SUSPEND_REASON; typedef /* [public][public] */ enum __MIDL___MIDL_itf_corprof_0000_0000_0011 { COR_PRF_DESKTOP_CLR = 0x1, COR_PRF_CORE_CLR = 0x2 } COR_PRF_RUNTIME_TYPE; typedef /* [public] */ enum __MIDL___MIDL_itf_corprof_0000_0000_0012 { COR_PRF_REJIT_BLOCK_INLINING = 0x1, COR_PRF_REJIT_INLINING_CALLBACKS = 0x2 } COR_PRF_REJIT_FLAGS; typedef UINT_PTR EVENTPIPE_PROVIDER; typedef UINT_PTR EVENTPIPE_EVENT; typedef UINT64 EVENTPIPE_SESSION; typedef /* [public] */ enum __MIDL___MIDL_itf_corprof_0000_0000_0013 { COR_PRF_EVENTPIPE_OBJECT = 1, COR_PRF_EVENTPIPE_BOOLEAN = 3, COR_PRF_EVENTPIPE_CHAR = 4, COR_PRF_EVENTPIPE_SBYTE = 5, COR_PRF_EVENTPIPE_BYTE = 6, COR_PRF_EVENTPIPE_INT16 = 7, COR_PRF_EVENTPIPE_UINT16 = 8, COR_PRF_EVENTPIPE_INT32 = 9, COR_PRF_EVENTPIPE_UINT32 = 10, COR_PRF_EVENTPIPE_INT64 = 11, COR_PRF_EVENTPIPE_UINT64 = 12, COR_PRF_EVENTPIPE_SINGLE = 13, COR_PRF_EVENTPIPE_DOUBLE = 14, COR_PRF_EVENTPIPE_DECIMAL = 15, COR_PRF_EVENTPIPE_DATETIME = 16, COR_PRF_EVENTPIPE_GUID = 17, COR_PRF_EVENTPIPE_STRING = 18, COR_PRF_EVENTPIPE_ARRAY = 19 } COR_PRF_EVENTPIPE_PARAM_TYPE; typedef /* [public] */ enum __MIDL___MIDL_itf_corprof_0000_0000_0014 { COR_PRF_EVENTPIPE_LOGALWAYS = 0, COR_PRF_EVENTPIPE_CRITICAL = 1, COR_PRF_EVENTPIPE_ERROR = 2, COR_PRF_EVENTPIPE_WARNING = 3, COR_PRF_EVENTPIPE_INFORMATIONAL = 4, COR_PRF_EVENTPIPE_VERBOSE = 5 } COR_PRF_EVENTPIPE_LEVEL; typedef /* [public][public][public] */ struct __MIDL___MIDL_itf_corprof_0000_0000_0015 { const WCHAR *providerName; UINT64 keywords; UINT32 loggingLevel; const WCHAR *filterData; } COR_PRF_EVENTPIPE_PROVIDER_CONFIG; typedef /* [public][public] */ struct __MIDL___MIDL_itf_corprof_0000_0000_0016 { UINT32 type; UINT32 elementType; const WCHAR *name; } COR_PRF_EVENTPIPE_PARAM_DESC; typedef /* [public][public] */ struct __MIDL___MIDL_itf_corprof_0000_0000_0017 { UINT64 ptr; UINT32 size; UINT32 reserved; } COR_PRF_EVENT_DATA; extern RPC_IF_HANDLE __MIDL_itf_corprof_0000_0000_v0_0_c_ifspec; extern RPC_IF_HANDLE __MIDL_itf_corprof_0000_0000_v0_0_s_ifspec; #ifndef __ICorProfilerCallback_INTERFACE_DEFINED__ #define __ICorProfilerCallback_INTERFACE_DEFINED__ /* interface ICorProfilerCallback */ /* [local][unique][uuid][object] */ EXTERN_C const IID IID_ICorProfilerCallback; #if defined(__cplusplus) && !defined(CINTERFACE) MIDL_INTERFACE("176FBED1-A55C-4796-98CA-A9DA0EF883E7") ICorProfilerCallback : public IUnknown { public: virtual HRESULT STDMETHODCALLTYPE Initialize( /* [in] */ IUnknown *pICorProfilerInfoUnk) = 0; virtual HRESULT STDMETHODCALLTYPE Shutdown( void) = 0; virtual HRESULT STDMETHODCALLTYPE AppDomainCreationStarted( /* [in] */ AppDomainID appDomainId) = 0; virtual HRESULT STDMETHODCALLTYPE AppDomainCreationFinished( /* [in] */ AppDomainID appDomainId, /* [in] */ HRESULT hrStatus) = 0; virtual HRESULT STDMETHODCALLTYPE AppDomainShutdownStarted( /* [in] */ AppDomainID appDomainId) = 0; virtual HRESULT STDMETHODCALLTYPE AppDomainShutdownFinished( /* [in] */ AppDomainID appDomainId, /* [in] */ HRESULT hrStatus) = 0; virtual HRESULT STDMETHODCALLTYPE AssemblyLoadStarted( /* [in] */ AssemblyID assemblyId) = 0; virtual HRESULT STDMETHODCALLTYPE AssemblyLoadFinished( /* [in] */ AssemblyID assemblyId, /* [in] */ HRESULT hrStatus) = 0; virtual HRESULT STDMETHODCALLTYPE AssemblyUnloadStarted( /* [in] */ AssemblyID assemblyId) = 0; virtual HRESULT STDMETHODCALLTYPE AssemblyUnloadFinished( /* [in] */ AssemblyID assemblyId, /* [in] */ HRESULT hrStatus) = 0; virtual HRESULT STDMETHODCALLTYPE ModuleLoadStarted( /* [in] */ ModuleID moduleId) = 0; virtual HRESULT STDMETHODCALLTYPE ModuleLoadFinished( /* [in] */ ModuleID moduleId, /* [in] */ HRESULT hrStatus) = 0; virtual HRESULT STDMETHODCALLTYPE ModuleUnloadStarted( /* [in] */ ModuleID moduleId) = 0; virtual HRESULT STDMETHODCALLTYPE ModuleUnloadFinished( /* [in] */ ModuleID moduleId, /* [in] */ HRESULT hrStatus) = 0; virtual HRESULT STDMETHODCALLTYPE ModuleAttachedToAssembly( /* [in] */ ModuleID moduleId, /* [in] */ AssemblyID AssemblyId) = 0; virtual HRESULT STDMETHODCALLTYPE ClassLoadStarted( /* [in] */ ClassID classId) = 0; virtual HRESULT STDMETHODCALLTYPE ClassLoadFinished( /* [in] */ ClassID classId, /* [in] */ HRESULT hrStatus) = 0; virtual HRESULT STDMETHODCALLTYPE ClassUnloadStarted( /* [in] */ ClassID classId) = 0; virtual HRESULT STDMETHODCALLTYPE ClassUnloadFinished( /* [in] */ ClassID classId, /* [in] */ HRESULT hrStatus) = 0; virtual HRESULT STDMETHODCALLTYPE FunctionUnloadStarted( /* [in] */ FunctionID functionId) = 0; virtual HRESULT STDMETHODCALLTYPE JITCompilationStarted( /* [in] */ FunctionID functionId, /* [in] */ BOOL fIsSafeToBlock) = 0; virtual HRESULT STDMETHODCALLTYPE JITCompilationFinished( /* [in] */ FunctionID functionId, /* [in] */ HRESULT hrStatus, /* [in] */ BOOL fIsSafeToBlock) = 0; virtual HRESULT STDMETHODCALLTYPE JITCachedFunctionSearchStarted( /* [in] */ FunctionID functionId, /* [out] */ BOOL *pbUseCachedFunction) = 0; virtual HRESULT STDMETHODCALLTYPE JITCachedFunctionSearchFinished( /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_JIT_CACHE result) = 0; virtual HRESULT STDMETHODCALLTYPE JITFunctionPitched( /* [in] */ FunctionID functionId) = 0; virtual HRESULT STDMETHODCALLTYPE JITInlining( /* [in] */ FunctionID callerId, /* [in] */ FunctionID calleeId, /* [out] */ BOOL *pfShouldInline) = 0; virtual HRESULT STDMETHODCALLTYPE ThreadCreated( /* [in] */ ThreadID threadId) = 0; virtual HRESULT STDMETHODCALLTYPE ThreadDestroyed( /* [in] */ ThreadID threadId) = 0; virtual HRESULT STDMETHODCALLTYPE ThreadAssignedToOSThread( /* [in] */ ThreadID managedThreadId, /* [in] */ DWORD osThreadId) = 0; virtual HRESULT STDMETHODCALLTYPE RemotingClientInvocationStarted( void) = 0; virtual HRESULT STDMETHODCALLTYPE RemotingClientSendingMessage( /* [in] */ GUID *pCookie, /* [in] */ BOOL fIsAsync) = 0; virtual HRESULT STDMETHODCALLTYPE RemotingClientReceivingReply( /* [in] */ GUID *pCookie, /* [in] */ BOOL fIsAsync) = 0; virtual HRESULT STDMETHODCALLTYPE RemotingClientInvocationFinished( void) = 0; virtual HRESULT STDMETHODCALLTYPE RemotingServerReceivingMessage( /* [in] */ GUID *pCookie, /* [in] */ BOOL fIsAsync) = 0; virtual HRESULT STDMETHODCALLTYPE RemotingServerInvocationStarted( void) = 0; virtual HRESULT STDMETHODCALLTYPE RemotingServerInvocationReturned( void) = 0; virtual HRESULT STDMETHODCALLTYPE RemotingServerSendingReply( /* [in] */ GUID *pCookie, /* [in] */ BOOL fIsAsync) = 0; virtual HRESULT STDMETHODCALLTYPE UnmanagedToManagedTransition( /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_TRANSITION_REASON reason) = 0; virtual HRESULT STDMETHODCALLTYPE ManagedToUnmanagedTransition( /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_TRANSITION_REASON reason) = 0; virtual HRESULT STDMETHODCALLTYPE RuntimeSuspendStarted( /* [in] */ COR_PRF_SUSPEND_REASON suspendReason) = 0; virtual HRESULT STDMETHODCALLTYPE RuntimeSuspendFinished( void) = 0; virtual HRESULT STDMETHODCALLTYPE RuntimeSuspendAborted( void) = 0; virtual HRESULT STDMETHODCALLTYPE RuntimeResumeStarted( void) = 0; virtual HRESULT STDMETHODCALLTYPE RuntimeResumeFinished( void) = 0; virtual HRESULT STDMETHODCALLTYPE RuntimeThreadSuspended( /* [in] */ ThreadID threadId) = 0; virtual HRESULT STDMETHODCALLTYPE RuntimeThreadResumed( /* [in] */ ThreadID threadId) = 0; virtual HRESULT STDMETHODCALLTYPE MovedReferences( /* [in] */ ULONG cMovedObjectIDRanges, /* [size_is][in] */ ObjectID oldObjectIDRangeStart[ ], /* [size_is][in] */ ObjectID newObjectIDRangeStart[ ], /* [size_is][in] */ ULONG cObjectIDRangeLength[ ]) = 0; virtual HRESULT STDMETHODCALLTYPE ObjectAllocated( /* [in] */ ObjectID objectId, /* [in] */ ClassID classId) = 0; virtual HRESULT STDMETHODCALLTYPE ObjectsAllocatedByClass( /* [in] */ ULONG cClassCount, /* [size_is][in] */ ClassID classIds[ ], /* [size_is][in] */ ULONG cObjects[ ]) = 0; virtual HRESULT STDMETHODCALLTYPE ObjectReferences( /* [in] */ ObjectID objectId, /* [in] */ ClassID classId, /* [in] */ ULONG cObjectRefs, /* [size_is][in] */ ObjectID objectRefIds[ ]) = 0; virtual HRESULT STDMETHODCALLTYPE RootReferences( /* [in] */ ULONG cRootRefs, /* [size_is][in] */ ObjectID rootRefIds[ ]) = 0; virtual HRESULT STDMETHODCALLTYPE ExceptionThrown( /* [in] */ ObjectID thrownObjectId) = 0; virtual HRESULT STDMETHODCALLTYPE ExceptionSearchFunctionEnter( /* [in] */ FunctionID functionId) = 0; virtual HRESULT STDMETHODCALLTYPE ExceptionSearchFunctionLeave( void) = 0; virtual HRESULT STDMETHODCALLTYPE ExceptionSearchFilterEnter( /* [in] */ FunctionID functionId) = 0; virtual HRESULT STDMETHODCALLTYPE ExceptionSearchFilterLeave( void) = 0; virtual HRESULT STDMETHODCALLTYPE ExceptionSearchCatcherFound( /* [in] */ FunctionID functionId) = 0; virtual HRESULT STDMETHODCALLTYPE ExceptionOSHandlerEnter( /* [in] */ UINT_PTR __unused) = 0; virtual HRESULT STDMETHODCALLTYPE ExceptionOSHandlerLeave( /* [in] */ UINT_PTR __unused) = 0; virtual HRESULT STDMETHODCALLTYPE ExceptionUnwindFunctionEnter( /* [in] */ FunctionID functionId) = 0; virtual HRESULT STDMETHODCALLTYPE ExceptionUnwindFunctionLeave( void) = 0; virtual HRESULT STDMETHODCALLTYPE ExceptionUnwindFinallyEnter( /* [in] */ FunctionID functionId) = 0; virtual HRESULT STDMETHODCALLTYPE ExceptionUnwindFinallyLeave( void) = 0; virtual HRESULT STDMETHODCALLTYPE ExceptionCatcherEnter( /* [in] */ FunctionID functionId, /* [in] */ ObjectID objectId) = 0; virtual HRESULT STDMETHODCALLTYPE ExceptionCatcherLeave( void) = 0; virtual HRESULT STDMETHODCALLTYPE COMClassicVTableCreated( /* [in] */ ClassID wrappedClassId, /* [in] */ REFGUID implementedIID, /* [in] */ void *pVTable, /* [in] */ ULONG cSlots) = 0; virtual HRESULT STDMETHODCALLTYPE COMClassicVTableDestroyed( /* [in] */ ClassID wrappedClassId, /* [in] */ REFGUID implementedIID, /* [in] */ void *pVTable) = 0; virtual HRESULT STDMETHODCALLTYPE ExceptionCLRCatcherFound( void) = 0; virtual HRESULT STDMETHODCALLTYPE ExceptionCLRCatcherExecute( void) = 0; }; #else /* C style interface */ typedef struct ICorProfilerCallbackVtbl { BEGIN_INTERFACE HRESULT ( STDMETHODCALLTYPE *QueryInterface )( ICorProfilerCallback * This, /* [in] */ REFIID riid, /* [annotation][iid_is][out] */ _COM_Outptr_ void **ppvObject); ULONG ( STDMETHODCALLTYPE *AddRef )( ICorProfilerCallback * This); ULONG ( STDMETHODCALLTYPE *Release )( ICorProfilerCallback * This); HRESULT ( STDMETHODCALLTYPE *Initialize )( ICorProfilerCallback * This, /* [in] */ IUnknown *pICorProfilerInfoUnk); HRESULT ( STDMETHODCALLTYPE *Shutdown )( ICorProfilerCallback * This); HRESULT ( STDMETHODCALLTYPE *AppDomainCreationStarted )( ICorProfilerCallback * This, /* [in] */ AppDomainID appDomainId); HRESULT ( STDMETHODCALLTYPE *AppDomainCreationFinished )( ICorProfilerCallback * This, /* [in] */ AppDomainID appDomainId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *AppDomainShutdownStarted )( ICorProfilerCallback * This, /* [in] */ AppDomainID appDomainId); HRESULT ( STDMETHODCALLTYPE *AppDomainShutdownFinished )( ICorProfilerCallback * This, /* [in] */ AppDomainID appDomainId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *AssemblyLoadStarted )( ICorProfilerCallback * This, /* [in] */ AssemblyID assemblyId); HRESULT ( STDMETHODCALLTYPE *AssemblyLoadFinished )( ICorProfilerCallback * This, /* [in] */ AssemblyID assemblyId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *AssemblyUnloadStarted )( ICorProfilerCallback * This, /* [in] */ AssemblyID assemblyId); HRESULT ( STDMETHODCALLTYPE *AssemblyUnloadFinished )( ICorProfilerCallback * This, /* [in] */ AssemblyID assemblyId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *ModuleLoadStarted )( ICorProfilerCallback * This, /* [in] */ ModuleID moduleId); HRESULT ( STDMETHODCALLTYPE *ModuleLoadFinished )( ICorProfilerCallback * This, /* [in] */ ModuleID moduleId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *ModuleUnloadStarted )( ICorProfilerCallback * This, /* [in] */ ModuleID moduleId); HRESULT ( STDMETHODCALLTYPE *ModuleUnloadFinished )( ICorProfilerCallback * This, /* [in] */ ModuleID moduleId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *ModuleAttachedToAssembly )( ICorProfilerCallback * This, /* [in] */ ModuleID moduleId, /* [in] */ AssemblyID AssemblyId); HRESULT ( STDMETHODCALLTYPE *ClassLoadStarted )( ICorProfilerCallback * This, /* [in] */ ClassID classId); HRESULT ( STDMETHODCALLTYPE *ClassLoadFinished )( ICorProfilerCallback * This, /* [in] */ ClassID classId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *ClassUnloadStarted )( ICorProfilerCallback * This, /* [in] */ ClassID classId); HRESULT ( STDMETHODCALLTYPE *ClassUnloadFinished )( ICorProfilerCallback * This, /* [in] */ ClassID classId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *FunctionUnloadStarted )( ICorProfilerCallback * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *JITCompilationStarted )( ICorProfilerCallback * This, /* [in] */ FunctionID functionId, /* [in] */ BOOL fIsSafeToBlock); HRESULT ( STDMETHODCALLTYPE *JITCompilationFinished )( ICorProfilerCallback * This, /* [in] */ FunctionID functionId, /* [in] */ HRESULT hrStatus, /* [in] */ BOOL fIsSafeToBlock); HRESULT ( STDMETHODCALLTYPE *JITCachedFunctionSearchStarted )( ICorProfilerCallback * This, /* [in] */ FunctionID functionId, /* [out] */ BOOL *pbUseCachedFunction); HRESULT ( STDMETHODCALLTYPE *JITCachedFunctionSearchFinished )( ICorProfilerCallback * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_JIT_CACHE result); HRESULT ( STDMETHODCALLTYPE *JITFunctionPitched )( ICorProfilerCallback * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *JITInlining )( ICorProfilerCallback * This, /* [in] */ FunctionID callerId, /* [in] */ FunctionID calleeId, /* [out] */ BOOL *pfShouldInline); HRESULT ( STDMETHODCALLTYPE *ThreadCreated )( ICorProfilerCallback * This, /* [in] */ ThreadID threadId); HRESULT ( STDMETHODCALLTYPE *ThreadDestroyed )( ICorProfilerCallback * This, /* [in] */ ThreadID threadId); HRESULT ( STDMETHODCALLTYPE *ThreadAssignedToOSThread )( ICorProfilerCallback * This, /* [in] */ ThreadID managedThreadId, /* [in] */ DWORD osThreadId); HRESULT ( STDMETHODCALLTYPE *RemotingClientInvocationStarted )( ICorProfilerCallback * This); HRESULT ( STDMETHODCALLTYPE *RemotingClientSendingMessage )( ICorProfilerCallback * This, /* [in] */ GUID *pCookie, /* [in] */ BOOL fIsAsync); HRESULT ( STDMETHODCALLTYPE *RemotingClientReceivingReply )( ICorProfilerCallback * This, /* [in] */ GUID *pCookie, /* [in] */ BOOL fIsAsync); HRESULT ( STDMETHODCALLTYPE *RemotingClientInvocationFinished )( ICorProfilerCallback * This); HRESULT ( STDMETHODCALLTYPE *RemotingServerReceivingMessage )( ICorProfilerCallback * This, /* [in] */ GUID *pCookie, /* [in] */ BOOL fIsAsync); HRESULT ( STDMETHODCALLTYPE *RemotingServerInvocationStarted )( ICorProfilerCallback * This); HRESULT ( STDMETHODCALLTYPE *RemotingServerInvocationReturned )( ICorProfilerCallback * This); HRESULT ( STDMETHODCALLTYPE *RemotingServerSendingReply )( ICorProfilerCallback * This, /* [in] */ GUID *pCookie, /* [in] */ BOOL fIsAsync); HRESULT ( STDMETHODCALLTYPE *UnmanagedToManagedTransition )( ICorProfilerCallback * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_TRANSITION_REASON reason); HRESULT ( STDMETHODCALLTYPE *ManagedToUnmanagedTransition )( ICorProfilerCallback * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_TRANSITION_REASON reason); HRESULT ( STDMETHODCALLTYPE *RuntimeSuspendStarted )( ICorProfilerCallback * This, /* [in] */ COR_PRF_SUSPEND_REASON suspendReason); HRESULT ( STDMETHODCALLTYPE *RuntimeSuspendFinished )( ICorProfilerCallback * This); HRESULT ( STDMETHODCALLTYPE *RuntimeSuspendAborted )( ICorProfilerCallback * This); HRESULT ( STDMETHODCALLTYPE *RuntimeResumeStarted )( ICorProfilerCallback * This); HRESULT ( STDMETHODCALLTYPE *RuntimeResumeFinished )( ICorProfilerCallback * This); HRESULT ( STDMETHODCALLTYPE *RuntimeThreadSuspended )( ICorProfilerCallback * This, /* [in] */ ThreadID threadId); HRESULT ( STDMETHODCALLTYPE *RuntimeThreadResumed )( ICorProfilerCallback * This, /* [in] */ ThreadID threadId); HRESULT ( STDMETHODCALLTYPE *MovedReferences )( ICorProfilerCallback * This, /* [in] */ ULONG cMovedObjectIDRanges, /* [size_is][in] */ ObjectID oldObjectIDRangeStart[ ], /* [size_is][in] */ ObjectID newObjectIDRangeStart[ ], /* [size_is][in] */ ULONG cObjectIDRangeLength[ ]); HRESULT ( STDMETHODCALLTYPE *ObjectAllocated )( ICorProfilerCallback * This, /* [in] */ ObjectID objectId, /* [in] */ ClassID classId); HRESULT ( STDMETHODCALLTYPE *ObjectsAllocatedByClass )( ICorProfilerCallback * This, /* [in] */ ULONG cClassCount, /* [size_is][in] */ ClassID classIds[ ], /* [size_is][in] */ ULONG cObjects[ ]); HRESULT ( STDMETHODCALLTYPE *ObjectReferences )( ICorProfilerCallback * This, /* [in] */ ObjectID objectId, /* [in] */ ClassID classId, /* [in] */ ULONG cObjectRefs, /* [size_is][in] */ ObjectID objectRefIds[ ]); HRESULT ( STDMETHODCALLTYPE *RootReferences )( ICorProfilerCallback * This, /* [in] */ ULONG cRootRefs, /* [size_is][in] */ ObjectID rootRefIds[ ]); HRESULT ( STDMETHODCALLTYPE *ExceptionThrown )( ICorProfilerCallback * This, /* [in] */ ObjectID thrownObjectId); HRESULT ( STDMETHODCALLTYPE *ExceptionSearchFunctionEnter )( ICorProfilerCallback * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ExceptionSearchFunctionLeave )( ICorProfilerCallback * This); HRESULT ( STDMETHODCALLTYPE *ExceptionSearchFilterEnter )( ICorProfilerCallback * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ExceptionSearchFilterLeave )( ICorProfilerCallback * This); HRESULT ( STDMETHODCALLTYPE *ExceptionSearchCatcherFound )( ICorProfilerCallback * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ExceptionOSHandlerEnter )( ICorProfilerCallback * This, /* [in] */ UINT_PTR __unused); HRESULT ( STDMETHODCALLTYPE *ExceptionOSHandlerLeave )( ICorProfilerCallback * This, /* [in] */ UINT_PTR __unused); HRESULT ( STDMETHODCALLTYPE *ExceptionUnwindFunctionEnter )( ICorProfilerCallback * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ExceptionUnwindFunctionLeave )( ICorProfilerCallback * This); HRESULT ( STDMETHODCALLTYPE *ExceptionUnwindFinallyEnter )( ICorProfilerCallback * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ExceptionUnwindFinallyLeave )( ICorProfilerCallback * This); HRESULT ( STDMETHODCALLTYPE *ExceptionCatcherEnter )( ICorProfilerCallback * This, /* [in] */ FunctionID functionId, /* [in] */ ObjectID objectId); HRESULT ( STDMETHODCALLTYPE *ExceptionCatcherLeave )( ICorProfilerCallback * This); HRESULT ( STDMETHODCALLTYPE *COMClassicVTableCreated )( ICorProfilerCallback * This, /* [in] */ ClassID wrappedClassId, /* [in] */ REFGUID implementedIID, /* [in] */ void *pVTable, /* [in] */ ULONG cSlots); HRESULT ( STDMETHODCALLTYPE *COMClassicVTableDestroyed )( ICorProfilerCallback * This, /* [in] */ ClassID wrappedClassId, /* [in] */ REFGUID implementedIID, /* [in] */ void *pVTable); HRESULT ( STDMETHODCALLTYPE *ExceptionCLRCatcherFound )( ICorProfilerCallback * This); HRESULT ( STDMETHODCALLTYPE *ExceptionCLRCatcherExecute )( ICorProfilerCallback * This); END_INTERFACE } ICorProfilerCallbackVtbl; interface ICorProfilerCallback { CONST_VTBL struct ICorProfilerCallbackVtbl *lpVtbl; }; #ifdef COBJMACROS #define ICorProfilerCallback_QueryInterface(This,riid,ppvObject) \ ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) ) #define ICorProfilerCallback_AddRef(This) \ ( (This)->lpVtbl -> AddRef(This) ) #define ICorProfilerCallback_Release(This) \ ( (This)->lpVtbl -> Release(This) ) #define ICorProfilerCallback_Initialize(This,pICorProfilerInfoUnk) \ ( (This)->lpVtbl -> Initialize(This,pICorProfilerInfoUnk) ) #define ICorProfilerCallback_Shutdown(This) \ ( (This)->lpVtbl -> Shutdown(This) ) #define ICorProfilerCallback_AppDomainCreationStarted(This,appDomainId) \ ( (This)->lpVtbl -> AppDomainCreationStarted(This,appDomainId) ) #define ICorProfilerCallback_AppDomainCreationFinished(This,appDomainId,hrStatus) \ ( (This)->lpVtbl -> AppDomainCreationFinished(This,appDomainId,hrStatus) ) #define ICorProfilerCallback_AppDomainShutdownStarted(This,appDomainId) \ ( (This)->lpVtbl -> AppDomainShutdownStarted(This,appDomainId) ) #define ICorProfilerCallback_AppDomainShutdownFinished(This,appDomainId,hrStatus) \ ( (This)->lpVtbl -> AppDomainShutdownFinished(This,appDomainId,hrStatus) ) #define ICorProfilerCallback_AssemblyLoadStarted(This,assemblyId) \ ( (This)->lpVtbl -> AssemblyLoadStarted(This,assemblyId) ) #define ICorProfilerCallback_AssemblyLoadFinished(This,assemblyId,hrStatus) \ ( (This)->lpVtbl -> AssemblyLoadFinished(This,assemblyId,hrStatus) ) #define ICorProfilerCallback_AssemblyUnloadStarted(This,assemblyId) \ ( (This)->lpVtbl -> AssemblyUnloadStarted(This,assemblyId) ) #define ICorProfilerCallback_AssemblyUnloadFinished(This,assemblyId,hrStatus) \ ( (This)->lpVtbl -> AssemblyUnloadFinished(This,assemblyId,hrStatus) ) #define ICorProfilerCallback_ModuleLoadStarted(This,moduleId) \ ( (This)->lpVtbl -> ModuleLoadStarted(This,moduleId) ) #define ICorProfilerCallback_ModuleLoadFinished(This,moduleId,hrStatus) \ ( (This)->lpVtbl -> ModuleLoadFinished(This,moduleId,hrStatus) ) #define ICorProfilerCallback_ModuleUnloadStarted(This,moduleId) \ ( (This)->lpVtbl -> ModuleUnloadStarted(This,moduleId) ) #define ICorProfilerCallback_ModuleUnloadFinished(This,moduleId,hrStatus) \ ( (This)->lpVtbl -> ModuleUnloadFinished(This,moduleId,hrStatus) ) #define ICorProfilerCallback_ModuleAttachedToAssembly(This,moduleId,AssemblyId) \ ( (This)->lpVtbl -> ModuleAttachedToAssembly(This,moduleId,AssemblyId) ) #define ICorProfilerCallback_ClassLoadStarted(This,classId) \ ( (This)->lpVtbl -> ClassLoadStarted(This,classId) ) #define ICorProfilerCallback_ClassLoadFinished(This,classId,hrStatus) \ ( (This)->lpVtbl -> ClassLoadFinished(This,classId,hrStatus) ) #define ICorProfilerCallback_ClassUnloadStarted(This,classId) \ ( (This)->lpVtbl -> ClassUnloadStarted(This,classId) ) #define ICorProfilerCallback_ClassUnloadFinished(This,classId,hrStatus) \ ( (This)->lpVtbl -> ClassUnloadFinished(This,classId,hrStatus) ) #define ICorProfilerCallback_FunctionUnloadStarted(This,functionId) \ ( (This)->lpVtbl -> FunctionUnloadStarted(This,functionId) ) #define ICorProfilerCallback_JITCompilationStarted(This,functionId,fIsSafeToBlock) \ ( (This)->lpVtbl -> JITCompilationStarted(This,functionId,fIsSafeToBlock) ) #define ICorProfilerCallback_JITCompilationFinished(This,functionId,hrStatus,fIsSafeToBlock) \ ( (This)->lpVtbl -> JITCompilationFinished(This,functionId,hrStatus,fIsSafeToBlock) ) #define ICorProfilerCallback_JITCachedFunctionSearchStarted(This,functionId,pbUseCachedFunction) \ ( (This)->lpVtbl -> JITCachedFunctionSearchStarted(This,functionId,pbUseCachedFunction) ) #define ICorProfilerCallback_JITCachedFunctionSearchFinished(This,functionId,result) \ ( (This)->lpVtbl -> JITCachedFunctionSearchFinished(This,functionId,result) ) #define ICorProfilerCallback_JITFunctionPitched(This,functionId) \ ( (This)->lpVtbl -> JITFunctionPitched(This,functionId) ) #define ICorProfilerCallback_JITInlining(This,callerId,calleeId,pfShouldInline) \ ( (This)->lpVtbl -> JITInlining(This,callerId,calleeId,pfShouldInline) ) #define ICorProfilerCallback_ThreadCreated(This,threadId) \ ( (This)->lpVtbl -> ThreadCreated(This,threadId) ) #define ICorProfilerCallback_ThreadDestroyed(This,threadId) \ ( (This)->lpVtbl -> ThreadDestroyed(This,threadId) ) #define ICorProfilerCallback_ThreadAssignedToOSThread(This,managedThreadId,osThreadId) \ ( (This)->lpVtbl -> ThreadAssignedToOSThread(This,managedThreadId,osThreadId) ) #define ICorProfilerCallback_RemotingClientInvocationStarted(This) \ ( (This)->lpVtbl -> RemotingClientInvocationStarted(This) ) #define ICorProfilerCallback_RemotingClientSendingMessage(This,pCookie,fIsAsync) \ ( (This)->lpVtbl -> RemotingClientSendingMessage(This,pCookie,fIsAsync) ) #define ICorProfilerCallback_RemotingClientReceivingReply(This,pCookie,fIsAsync) \ ( (This)->lpVtbl -> RemotingClientReceivingReply(This,pCookie,fIsAsync) ) #define ICorProfilerCallback_RemotingClientInvocationFinished(This) \ ( (This)->lpVtbl -> RemotingClientInvocationFinished(This) ) #define ICorProfilerCallback_RemotingServerReceivingMessage(This,pCookie,fIsAsync) \ ( (This)->lpVtbl -> RemotingServerReceivingMessage(This,pCookie,fIsAsync) ) #define ICorProfilerCallback_RemotingServerInvocationStarted(This) \ ( (This)->lpVtbl -> RemotingServerInvocationStarted(This) ) #define ICorProfilerCallback_RemotingServerInvocationReturned(This) \ ( (This)->lpVtbl -> RemotingServerInvocationReturned(This) ) #define ICorProfilerCallback_RemotingServerSendingReply(This,pCookie,fIsAsync) \ ( (This)->lpVtbl -> RemotingServerSendingReply(This,pCookie,fIsAsync) ) #define ICorProfilerCallback_UnmanagedToManagedTransition(This,functionId,reason) \ ( (This)->lpVtbl -> UnmanagedToManagedTransition(This,functionId,reason) ) #define ICorProfilerCallback_ManagedToUnmanagedTransition(This,functionId,reason) \ ( (This)->lpVtbl -> ManagedToUnmanagedTransition(This,functionId,reason) ) #define ICorProfilerCallback_RuntimeSuspendStarted(This,suspendReason) \ ( (This)->lpVtbl -> RuntimeSuspendStarted(This,suspendReason) ) #define ICorProfilerCallback_RuntimeSuspendFinished(This) \ ( (This)->lpVtbl -> RuntimeSuspendFinished(This) ) #define ICorProfilerCallback_RuntimeSuspendAborted(This) \ ( (This)->lpVtbl -> RuntimeSuspendAborted(This) ) #define ICorProfilerCallback_RuntimeResumeStarted(This) \ ( (This)->lpVtbl -> RuntimeResumeStarted(This) ) #define ICorProfilerCallback_RuntimeResumeFinished(This) \ ( (This)->lpVtbl -> RuntimeResumeFinished(This) ) #define ICorProfilerCallback_RuntimeThreadSuspended(This,threadId) \ ( (This)->lpVtbl -> RuntimeThreadSuspended(This,threadId) ) #define ICorProfilerCallback_RuntimeThreadResumed(This,threadId) \ ( (This)->lpVtbl -> RuntimeThreadResumed(This,threadId) ) #define ICorProfilerCallback_MovedReferences(This,cMovedObjectIDRanges,oldObjectIDRangeStart,newObjectIDRangeStart,cObjectIDRangeLength) \ ( (This)->lpVtbl -> MovedReferences(This,cMovedObjectIDRanges,oldObjectIDRangeStart,newObjectIDRangeStart,cObjectIDRangeLength) ) #define ICorProfilerCallback_ObjectAllocated(This,objectId,classId) \ ( (This)->lpVtbl -> ObjectAllocated(This,objectId,classId) ) #define ICorProfilerCallback_ObjectsAllocatedByClass(This,cClassCount,classIds,cObjects) \ ( (This)->lpVtbl -> ObjectsAllocatedByClass(This,cClassCount,classIds,cObjects) ) #define ICorProfilerCallback_ObjectReferences(This,objectId,classId,cObjectRefs,objectRefIds) \ ( (This)->lpVtbl -> ObjectReferences(This,objectId,classId,cObjectRefs,objectRefIds) ) #define ICorProfilerCallback_RootReferences(This,cRootRefs,rootRefIds) \ ( (This)->lpVtbl -> RootReferences(This,cRootRefs,rootRefIds) ) #define ICorProfilerCallback_ExceptionThrown(This,thrownObjectId) \ ( (This)->lpVtbl -> ExceptionThrown(This,thrownObjectId) ) #define ICorProfilerCallback_ExceptionSearchFunctionEnter(This,functionId) \ ( (This)->lpVtbl -> ExceptionSearchFunctionEnter(This,functionId) ) #define ICorProfilerCallback_ExceptionSearchFunctionLeave(This) \ ( (This)->lpVtbl -> ExceptionSearchFunctionLeave(This) ) #define ICorProfilerCallback_ExceptionSearchFilterEnter(This,functionId) \ ( (This)->lpVtbl -> ExceptionSearchFilterEnter(This,functionId) ) #define ICorProfilerCallback_ExceptionSearchFilterLeave(This) \ ( (This)->lpVtbl -> ExceptionSearchFilterLeave(This) ) #define ICorProfilerCallback_ExceptionSearchCatcherFound(This,functionId) \ ( (This)->lpVtbl -> ExceptionSearchCatcherFound(This,functionId) ) #define ICorProfilerCallback_ExceptionOSHandlerEnter(This,__unused) \ ( (This)->lpVtbl -> ExceptionOSHandlerEnter(This,__unused) ) #define ICorProfilerCallback_ExceptionOSHandlerLeave(This,__unused) \ ( (This)->lpVtbl -> ExceptionOSHandlerLeave(This,__unused) ) #define ICorProfilerCallback_ExceptionUnwindFunctionEnter(This,functionId) \ ( (This)->lpVtbl -> ExceptionUnwindFunctionEnter(This,functionId) ) #define ICorProfilerCallback_ExceptionUnwindFunctionLeave(This) \ ( (This)->lpVtbl -> ExceptionUnwindFunctionLeave(This) ) #define ICorProfilerCallback_ExceptionUnwindFinallyEnter(This,functionId) \ ( (This)->lpVtbl -> ExceptionUnwindFinallyEnter(This,functionId) ) #define ICorProfilerCallback_ExceptionUnwindFinallyLeave(This) \ ( (This)->lpVtbl -> ExceptionUnwindFinallyLeave(This) ) #define ICorProfilerCallback_ExceptionCatcherEnter(This,functionId,objectId) \ ( (This)->lpVtbl -> ExceptionCatcherEnter(This,functionId,objectId) ) #define ICorProfilerCallback_ExceptionCatcherLeave(This) \ ( (This)->lpVtbl -> ExceptionCatcherLeave(This) ) #define ICorProfilerCallback_COMClassicVTableCreated(This,wrappedClassId,implementedIID,pVTable,cSlots) \ ( (This)->lpVtbl -> COMClassicVTableCreated(This,wrappedClassId,implementedIID,pVTable,cSlots) ) #define ICorProfilerCallback_COMClassicVTableDestroyed(This,wrappedClassId,implementedIID,pVTable) \ ( (This)->lpVtbl -> COMClassicVTableDestroyed(This,wrappedClassId,implementedIID,pVTable) ) #define ICorProfilerCallback_ExceptionCLRCatcherFound(This) \ ( (This)->lpVtbl -> ExceptionCLRCatcherFound(This) ) #define ICorProfilerCallback_ExceptionCLRCatcherExecute(This) \ ( (This)->lpVtbl -> ExceptionCLRCatcherExecute(This) ) #endif /* COBJMACROS */ #endif /* C style interface */ #endif /* __ICorProfilerCallback_INTERFACE_DEFINED__ */ /* interface __MIDL_itf_corprof_0000_0001 */ /* [local] */ typedef /* [public][public] */ enum __MIDL___MIDL_itf_corprof_0000_0001_0001 { COR_PRF_GC_ROOT_STACK = 1, COR_PRF_GC_ROOT_FINALIZER = 2, COR_PRF_GC_ROOT_HANDLE = 3, COR_PRF_GC_ROOT_OTHER = 0 } COR_PRF_GC_ROOT_KIND; typedef /* [public][public] */ enum __MIDL___MIDL_itf_corprof_0000_0001_0002 { COR_PRF_GC_ROOT_PINNING = 0x1, COR_PRF_GC_ROOT_WEAKREF = 0x2, COR_PRF_GC_ROOT_INTERIOR = 0x4, COR_PRF_GC_ROOT_REFCOUNTED = 0x8 } COR_PRF_GC_ROOT_FLAGS; typedef /* [public] */ enum __MIDL___MIDL_itf_corprof_0000_0001_0003 { COR_PRF_FINALIZER_CRITICAL = 0x1 } COR_PRF_FINALIZER_FLAGS; typedef /* [public][public][public][public] */ enum __MIDL___MIDL_itf_corprof_0000_0001_0004 { COR_PRF_GC_GEN_0 = 0, COR_PRF_GC_GEN_1 = 1, COR_PRF_GC_GEN_2 = 2, COR_PRF_GC_LARGE_OBJECT_HEAP = 3, COR_PRF_GC_PINNED_OBJECT_HEAP = 4 } COR_PRF_GC_GENERATION; typedef struct COR_PRF_GC_GENERATION_RANGE { COR_PRF_GC_GENERATION generation; ObjectID rangeStart; UINT_PTR rangeLength; UINT_PTR rangeLengthReserved; } COR_PRF_GC_GENERATION_RANGE; typedef /* [public][public][public] */ enum __MIDL___MIDL_itf_corprof_0000_0001_0005 { COR_PRF_CLAUSE_NONE = 0, COR_PRF_CLAUSE_FILTER = 1, COR_PRF_CLAUSE_CATCH = 2, COR_PRF_CLAUSE_FINALLY = 3 } COR_PRF_CLAUSE_TYPE; typedef struct COR_PRF_EX_CLAUSE_INFO { COR_PRF_CLAUSE_TYPE clauseType; UINT_PTR programCounter; UINT_PTR framePointer; UINT_PTR shadowStackPointer; } COR_PRF_EX_CLAUSE_INFO; typedef /* [public][public] */ enum __MIDL___MIDL_itf_corprof_0000_0001_0006 { COR_PRF_GC_INDUCED = 1, COR_PRF_GC_OTHER = 0 } COR_PRF_GC_REASON; typedef /* [public] */ enum __MIDL___MIDL_itf_corprof_0000_0001_0007 { COR_PRF_MODULE_DISK = 0x1, COR_PRF_MODULE_NGEN = 0x2, COR_PRF_MODULE_DYNAMIC = 0x4, COR_PRF_MODULE_COLLECTIBLE = 0x8, COR_PRF_MODULE_RESOURCE = 0x10, COR_PRF_MODULE_FLAT_LAYOUT = 0x20, COR_PRF_MODULE_WINDOWS_RUNTIME = 0x40 } COR_PRF_MODULE_FLAGS; extern RPC_IF_HANDLE __MIDL_itf_corprof_0000_0001_v0_0_c_ifspec; extern RPC_IF_HANDLE __MIDL_itf_corprof_0000_0001_v0_0_s_ifspec; #ifndef __ICorProfilerCallback2_INTERFACE_DEFINED__ #define __ICorProfilerCallback2_INTERFACE_DEFINED__ /* interface ICorProfilerCallback2 */ /* [local][unique][uuid][object] */ EXTERN_C const IID IID_ICorProfilerCallback2; #if defined(__cplusplus) && !defined(CINTERFACE) MIDL_INTERFACE("8A8CC829-CCF2-49fe-BBAE-0F022228071A") ICorProfilerCallback2 : public ICorProfilerCallback { public: virtual HRESULT STDMETHODCALLTYPE ThreadNameChanged( /* [in] */ ThreadID threadId, /* [in] */ ULONG cchName, /* [annotation][in] */ _In_reads_opt_(cchName) WCHAR name[ ]) = 0; virtual HRESULT STDMETHODCALLTYPE GarbageCollectionStarted( /* [in] */ int cGenerations, /* [size_is][in] */ BOOL generationCollected[ ], /* [in] */ COR_PRF_GC_REASON reason) = 0; virtual HRESULT STDMETHODCALLTYPE SurvivingReferences( /* [in] */ ULONG cSurvivingObjectIDRanges, /* [size_is][in] */ ObjectID objectIDRangeStart[ ], /* [size_is][in] */ ULONG cObjectIDRangeLength[ ]) = 0; virtual HRESULT STDMETHODCALLTYPE GarbageCollectionFinished( void) = 0; virtual HRESULT STDMETHODCALLTYPE FinalizeableObjectQueued( /* [in] */ DWORD finalizerFlags, /* [in] */ ObjectID objectID) = 0; virtual HRESULT STDMETHODCALLTYPE RootReferences2( /* [in] */ ULONG cRootRefs, /* [size_is][in] */ ObjectID rootRefIds[ ], /* [size_is][in] */ COR_PRF_GC_ROOT_KIND rootKinds[ ], /* [size_is][in] */ COR_PRF_GC_ROOT_FLAGS rootFlags[ ], /* [size_is][in] */ UINT_PTR rootIds[ ]) = 0; virtual HRESULT STDMETHODCALLTYPE HandleCreated( /* [in] */ GCHandleID handleId, /* [in] */ ObjectID initialObjectId) = 0; virtual HRESULT STDMETHODCALLTYPE HandleDestroyed( /* [in] */ GCHandleID handleId) = 0; }; #else /* C style interface */ typedef struct ICorProfilerCallback2Vtbl { BEGIN_INTERFACE HRESULT ( STDMETHODCALLTYPE *QueryInterface )( ICorProfilerCallback2 * This, /* [in] */ REFIID riid, /* [annotation][iid_is][out] */ _COM_Outptr_ void **ppvObject); ULONG ( STDMETHODCALLTYPE *AddRef )( ICorProfilerCallback2 * This); ULONG ( STDMETHODCALLTYPE *Release )( ICorProfilerCallback2 * This); HRESULT ( STDMETHODCALLTYPE *Initialize )( ICorProfilerCallback2 * This, /* [in] */ IUnknown *pICorProfilerInfoUnk); HRESULT ( STDMETHODCALLTYPE *Shutdown )( ICorProfilerCallback2 * This); HRESULT ( STDMETHODCALLTYPE *AppDomainCreationStarted )( ICorProfilerCallback2 * This, /* [in] */ AppDomainID appDomainId); HRESULT ( STDMETHODCALLTYPE *AppDomainCreationFinished )( ICorProfilerCallback2 * This, /* [in] */ AppDomainID appDomainId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *AppDomainShutdownStarted )( ICorProfilerCallback2 * This, /* [in] */ AppDomainID appDomainId); HRESULT ( STDMETHODCALLTYPE *AppDomainShutdownFinished )( ICorProfilerCallback2 * This, /* [in] */ AppDomainID appDomainId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *AssemblyLoadStarted )( ICorProfilerCallback2 * This, /* [in] */ AssemblyID assemblyId); HRESULT ( STDMETHODCALLTYPE *AssemblyLoadFinished )( ICorProfilerCallback2 * This, /* [in] */ AssemblyID assemblyId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *AssemblyUnloadStarted )( ICorProfilerCallback2 * This, /* [in] */ AssemblyID assemblyId); HRESULT ( STDMETHODCALLTYPE *AssemblyUnloadFinished )( ICorProfilerCallback2 * This, /* [in] */ AssemblyID assemblyId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *ModuleLoadStarted )( ICorProfilerCallback2 * This, /* [in] */ ModuleID moduleId); HRESULT ( STDMETHODCALLTYPE *ModuleLoadFinished )( ICorProfilerCallback2 * This, /* [in] */ ModuleID moduleId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *ModuleUnloadStarted )( ICorProfilerCallback2 * This, /* [in] */ ModuleID moduleId); HRESULT ( STDMETHODCALLTYPE *ModuleUnloadFinished )( ICorProfilerCallback2 * This, /* [in] */ ModuleID moduleId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *ModuleAttachedToAssembly )( ICorProfilerCallback2 * This, /* [in] */ ModuleID moduleId, /* [in] */ AssemblyID AssemblyId); HRESULT ( STDMETHODCALLTYPE *ClassLoadStarted )( ICorProfilerCallback2 * This, /* [in] */ ClassID classId); HRESULT ( STDMETHODCALLTYPE *ClassLoadFinished )( ICorProfilerCallback2 * This, /* [in] */ ClassID classId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *ClassUnloadStarted )( ICorProfilerCallback2 * This, /* [in] */ ClassID classId); HRESULT ( STDMETHODCALLTYPE *ClassUnloadFinished )( ICorProfilerCallback2 * This, /* [in] */ ClassID classId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *FunctionUnloadStarted )( ICorProfilerCallback2 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *JITCompilationStarted )( ICorProfilerCallback2 * This, /* [in] */ FunctionID functionId, /* [in] */ BOOL fIsSafeToBlock); HRESULT ( STDMETHODCALLTYPE *JITCompilationFinished )( ICorProfilerCallback2 * This, /* [in] */ FunctionID functionId, /* [in] */ HRESULT hrStatus, /* [in] */ BOOL fIsSafeToBlock); HRESULT ( STDMETHODCALLTYPE *JITCachedFunctionSearchStarted )( ICorProfilerCallback2 * This, /* [in] */ FunctionID functionId, /* [out] */ BOOL *pbUseCachedFunction); HRESULT ( STDMETHODCALLTYPE *JITCachedFunctionSearchFinished )( ICorProfilerCallback2 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_JIT_CACHE result); HRESULT ( STDMETHODCALLTYPE *JITFunctionPitched )( ICorProfilerCallback2 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *JITInlining )( ICorProfilerCallback2 * This, /* [in] */ FunctionID callerId, /* [in] */ FunctionID calleeId, /* [out] */ BOOL *pfShouldInline); HRESULT ( STDMETHODCALLTYPE *ThreadCreated )( ICorProfilerCallback2 * This, /* [in] */ ThreadID threadId); HRESULT ( STDMETHODCALLTYPE *ThreadDestroyed )( ICorProfilerCallback2 * This, /* [in] */ ThreadID threadId); HRESULT ( STDMETHODCALLTYPE *ThreadAssignedToOSThread )( ICorProfilerCallback2 * This, /* [in] */ ThreadID managedThreadId, /* [in] */ DWORD osThreadId); HRESULT ( STDMETHODCALLTYPE *RemotingClientInvocationStarted )( ICorProfilerCallback2 * This); HRESULT ( STDMETHODCALLTYPE *RemotingClientSendingMessage )( ICorProfilerCallback2 * This, /* [in] */ GUID *pCookie, /* [in] */ BOOL fIsAsync); HRESULT ( STDMETHODCALLTYPE *RemotingClientReceivingReply )( ICorProfilerCallback2 * This, /* [in] */ GUID *pCookie, /* [in] */ BOOL fIsAsync); HRESULT ( STDMETHODCALLTYPE *RemotingClientInvocationFinished )( ICorProfilerCallback2 * This); HRESULT ( STDMETHODCALLTYPE *RemotingServerReceivingMessage )( ICorProfilerCallback2 * This, /* [in] */ GUID *pCookie, /* [in] */ BOOL fIsAsync); HRESULT ( STDMETHODCALLTYPE *RemotingServerInvocationStarted )( ICorProfilerCallback2 * This); HRESULT ( STDMETHODCALLTYPE *RemotingServerInvocationReturned )( ICorProfilerCallback2 * This); HRESULT ( STDMETHODCALLTYPE *RemotingServerSendingReply )( ICorProfilerCallback2 * This, /* [in] */ GUID *pCookie, /* [in] */ BOOL fIsAsync); HRESULT ( STDMETHODCALLTYPE *UnmanagedToManagedTransition )( ICorProfilerCallback2 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_TRANSITION_REASON reason); HRESULT ( STDMETHODCALLTYPE *ManagedToUnmanagedTransition )( ICorProfilerCallback2 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_TRANSITION_REASON reason); HRESULT ( STDMETHODCALLTYPE *RuntimeSuspendStarted )( ICorProfilerCallback2 * This, /* [in] */ COR_PRF_SUSPEND_REASON suspendReason); HRESULT ( STDMETHODCALLTYPE *RuntimeSuspendFinished )( ICorProfilerCallback2 * This); HRESULT ( STDMETHODCALLTYPE *RuntimeSuspendAborted )( ICorProfilerCallback2 * This); HRESULT ( STDMETHODCALLTYPE *RuntimeResumeStarted )( ICorProfilerCallback2 * This); HRESULT ( STDMETHODCALLTYPE *RuntimeResumeFinished )( ICorProfilerCallback2 * This); HRESULT ( STDMETHODCALLTYPE *RuntimeThreadSuspended )( ICorProfilerCallback2 * This, /* [in] */ ThreadID threadId); HRESULT ( STDMETHODCALLTYPE *RuntimeThreadResumed )( ICorProfilerCallback2 * This, /* [in] */ ThreadID threadId); HRESULT ( STDMETHODCALLTYPE *MovedReferences )( ICorProfilerCallback2 * This, /* [in] */ ULONG cMovedObjectIDRanges, /* [size_is][in] */ ObjectID oldObjectIDRangeStart[ ], /* [size_is][in] */ ObjectID newObjectIDRangeStart[ ], /* [size_is][in] */ ULONG cObjectIDRangeLength[ ]); HRESULT ( STDMETHODCALLTYPE *ObjectAllocated )( ICorProfilerCallback2 * This, /* [in] */ ObjectID objectId, /* [in] */ ClassID classId); HRESULT ( STDMETHODCALLTYPE *ObjectsAllocatedByClass )( ICorProfilerCallback2 * This, /* [in] */ ULONG cClassCount, /* [size_is][in] */ ClassID classIds[ ], /* [size_is][in] */ ULONG cObjects[ ]); HRESULT ( STDMETHODCALLTYPE *ObjectReferences )( ICorProfilerCallback2 * This, /* [in] */ ObjectID objectId, /* [in] */ ClassID classId, /* [in] */ ULONG cObjectRefs, /* [size_is][in] */ ObjectID objectRefIds[ ]); HRESULT ( STDMETHODCALLTYPE *RootReferences )( ICorProfilerCallback2 * This, /* [in] */ ULONG cRootRefs, /* [size_is][in] */ ObjectID rootRefIds[ ]); HRESULT ( STDMETHODCALLTYPE *ExceptionThrown )( ICorProfilerCallback2 * This, /* [in] */ ObjectID thrownObjectId); HRESULT ( STDMETHODCALLTYPE *ExceptionSearchFunctionEnter )( ICorProfilerCallback2 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ExceptionSearchFunctionLeave )( ICorProfilerCallback2 * This); HRESULT ( STDMETHODCALLTYPE *ExceptionSearchFilterEnter )( ICorProfilerCallback2 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ExceptionSearchFilterLeave )( ICorProfilerCallback2 * This); HRESULT ( STDMETHODCALLTYPE *ExceptionSearchCatcherFound )( ICorProfilerCallback2 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ExceptionOSHandlerEnter )( ICorProfilerCallback2 * This, /* [in] */ UINT_PTR __unused); HRESULT ( STDMETHODCALLTYPE *ExceptionOSHandlerLeave )( ICorProfilerCallback2 * This, /* [in] */ UINT_PTR __unused); HRESULT ( STDMETHODCALLTYPE *ExceptionUnwindFunctionEnter )( ICorProfilerCallback2 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ExceptionUnwindFunctionLeave )( ICorProfilerCallback2 * This); HRESULT ( STDMETHODCALLTYPE *ExceptionUnwindFinallyEnter )( ICorProfilerCallback2 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ExceptionUnwindFinallyLeave )( ICorProfilerCallback2 * This); HRESULT ( STDMETHODCALLTYPE *ExceptionCatcherEnter )( ICorProfilerCallback2 * This, /* [in] */ FunctionID functionId, /* [in] */ ObjectID objectId); HRESULT ( STDMETHODCALLTYPE *ExceptionCatcherLeave )( ICorProfilerCallback2 * This); HRESULT ( STDMETHODCALLTYPE *COMClassicVTableCreated )( ICorProfilerCallback2 * This, /* [in] */ ClassID wrappedClassId, /* [in] */ REFGUID implementedIID, /* [in] */ void *pVTable, /* [in] */ ULONG cSlots); HRESULT ( STDMETHODCALLTYPE *COMClassicVTableDestroyed )( ICorProfilerCallback2 * This, /* [in] */ ClassID wrappedClassId, /* [in] */ REFGUID implementedIID, /* [in] */ void *pVTable); HRESULT ( STDMETHODCALLTYPE *ExceptionCLRCatcherFound )( ICorProfilerCallback2 * This); HRESULT ( STDMETHODCALLTYPE *ExceptionCLRCatcherExecute )( ICorProfilerCallback2 * This); HRESULT ( STDMETHODCALLTYPE *ThreadNameChanged )( ICorProfilerCallback2 * This, /* [in] */ ThreadID threadId, /* [in] */ ULONG cchName, /* [annotation][in] */ _In_reads_opt_(cchName) WCHAR name[ ]); HRESULT ( STDMETHODCALLTYPE *GarbageCollectionStarted )( ICorProfilerCallback2 * This, /* [in] */ int cGenerations, /* [size_is][in] */ BOOL generationCollected[ ], /* [in] */ COR_PRF_GC_REASON reason); HRESULT ( STDMETHODCALLTYPE *SurvivingReferences )( ICorProfilerCallback2 * This, /* [in] */ ULONG cSurvivingObjectIDRanges, /* [size_is][in] */ ObjectID objectIDRangeStart[ ], /* [size_is][in] */ ULONG cObjectIDRangeLength[ ]); HRESULT ( STDMETHODCALLTYPE *GarbageCollectionFinished )( ICorProfilerCallback2 * This); HRESULT ( STDMETHODCALLTYPE *FinalizeableObjectQueued )( ICorProfilerCallback2 * This, /* [in] */ DWORD finalizerFlags, /* [in] */ ObjectID objectID); HRESULT ( STDMETHODCALLTYPE *RootReferences2 )( ICorProfilerCallback2 * This, /* [in] */ ULONG cRootRefs, /* [size_is][in] */ ObjectID rootRefIds[ ], /* [size_is][in] */ COR_PRF_GC_ROOT_KIND rootKinds[ ], /* [size_is][in] */ COR_PRF_GC_ROOT_FLAGS rootFlags[ ], /* [size_is][in] */ UINT_PTR rootIds[ ]); HRESULT ( STDMETHODCALLTYPE *HandleCreated )( ICorProfilerCallback2 * This, /* [in] */ GCHandleID handleId, /* [in] */ ObjectID initialObjectId); HRESULT ( STDMETHODCALLTYPE *HandleDestroyed )( ICorProfilerCallback2 * This, /* [in] */ GCHandleID handleId); END_INTERFACE } ICorProfilerCallback2Vtbl; interface ICorProfilerCallback2 { CONST_VTBL struct ICorProfilerCallback2Vtbl *lpVtbl; }; #ifdef COBJMACROS #define ICorProfilerCallback2_QueryInterface(This,riid,ppvObject) \ ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) ) #define ICorProfilerCallback2_AddRef(This) \ ( (This)->lpVtbl -> AddRef(This) ) #define ICorProfilerCallback2_Release(This) \ ( (This)->lpVtbl -> Release(This) ) #define ICorProfilerCallback2_Initialize(This,pICorProfilerInfoUnk) \ ( (This)->lpVtbl -> Initialize(This,pICorProfilerInfoUnk) ) #define ICorProfilerCallback2_Shutdown(This) \ ( (This)->lpVtbl -> Shutdown(This) ) #define ICorProfilerCallback2_AppDomainCreationStarted(This,appDomainId) \ ( (This)->lpVtbl -> AppDomainCreationStarted(This,appDomainId) ) #define ICorProfilerCallback2_AppDomainCreationFinished(This,appDomainId,hrStatus) \ ( (This)->lpVtbl -> AppDomainCreationFinished(This,appDomainId,hrStatus) ) #define ICorProfilerCallback2_AppDomainShutdownStarted(This,appDomainId) \ ( (This)->lpVtbl -> AppDomainShutdownStarted(This,appDomainId) ) #define ICorProfilerCallback2_AppDomainShutdownFinished(This,appDomainId,hrStatus) \ ( (This)->lpVtbl -> AppDomainShutdownFinished(This,appDomainId,hrStatus) ) #define ICorProfilerCallback2_AssemblyLoadStarted(This,assemblyId) \ ( (This)->lpVtbl -> AssemblyLoadStarted(This,assemblyId) ) #define ICorProfilerCallback2_AssemblyLoadFinished(This,assemblyId,hrStatus) \ ( (This)->lpVtbl -> AssemblyLoadFinished(This,assemblyId,hrStatus) ) #define ICorProfilerCallback2_AssemblyUnloadStarted(This,assemblyId) \ ( (This)->lpVtbl -> AssemblyUnloadStarted(This,assemblyId) ) #define ICorProfilerCallback2_AssemblyUnloadFinished(This,assemblyId,hrStatus) \ ( (This)->lpVtbl -> AssemblyUnloadFinished(This,assemblyId,hrStatus) ) #define ICorProfilerCallback2_ModuleLoadStarted(This,moduleId) \ ( (This)->lpVtbl -> ModuleLoadStarted(This,moduleId) ) #define ICorProfilerCallback2_ModuleLoadFinished(This,moduleId,hrStatus) \ ( (This)->lpVtbl -> ModuleLoadFinished(This,moduleId,hrStatus) ) #define ICorProfilerCallback2_ModuleUnloadStarted(This,moduleId) \ ( (This)->lpVtbl -> ModuleUnloadStarted(This,moduleId) ) #define ICorProfilerCallback2_ModuleUnloadFinished(This,moduleId,hrStatus) \ ( (This)->lpVtbl -> ModuleUnloadFinished(This,moduleId,hrStatus) ) #define ICorProfilerCallback2_ModuleAttachedToAssembly(This,moduleId,AssemblyId) \ ( (This)->lpVtbl -> ModuleAttachedToAssembly(This,moduleId,AssemblyId) ) #define ICorProfilerCallback2_ClassLoadStarted(This,classId) \ ( (This)->lpVtbl -> ClassLoadStarted(This,classId) ) #define ICorProfilerCallback2_ClassLoadFinished(This,classId,hrStatus) \ ( (This)->lpVtbl -> ClassLoadFinished(This,classId,hrStatus) ) #define ICorProfilerCallback2_ClassUnloadStarted(This,classId) \ ( (This)->lpVtbl -> ClassUnloadStarted(This,classId) ) #define ICorProfilerCallback2_ClassUnloadFinished(This,classId,hrStatus) \ ( (This)->lpVtbl -> ClassUnloadFinished(This,classId,hrStatus) ) #define ICorProfilerCallback2_FunctionUnloadStarted(This,functionId) \ ( (This)->lpVtbl -> FunctionUnloadStarted(This,functionId) ) #define ICorProfilerCallback2_JITCompilationStarted(This,functionId,fIsSafeToBlock) \ ( (This)->lpVtbl -> JITCompilationStarted(This,functionId,fIsSafeToBlock) ) #define ICorProfilerCallback2_JITCompilationFinished(This,functionId,hrStatus,fIsSafeToBlock) \ ( (This)->lpVtbl -> JITCompilationFinished(This,functionId,hrStatus,fIsSafeToBlock) ) #define ICorProfilerCallback2_JITCachedFunctionSearchStarted(This,functionId,pbUseCachedFunction) \ ( (This)->lpVtbl -> JITCachedFunctionSearchStarted(This,functionId,pbUseCachedFunction) ) #define ICorProfilerCallback2_JITCachedFunctionSearchFinished(This,functionId,result) \ ( (This)->lpVtbl -> JITCachedFunctionSearchFinished(This,functionId,result) ) #define ICorProfilerCallback2_JITFunctionPitched(This,functionId) \ ( (This)->lpVtbl -> JITFunctionPitched(This,functionId) ) #define ICorProfilerCallback2_JITInlining(This,callerId,calleeId,pfShouldInline) \ ( (This)->lpVtbl -> JITInlining(This,callerId,calleeId,pfShouldInline) ) #define ICorProfilerCallback2_ThreadCreated(This,threadId) \ ( (This)->lpVtbl -> ThreadCreated(This,threadId) ) #define ICorProfilerCallback2_ThreadDestroyed(This,threadId) \ ( (This)->lpVtbl -> ThreadDestroyed(This,threadId) ) #define ICorProfilerCallback2_ThreadAssignedToOSThread(This,managedThreadId,osThreadId) \ ( (This)->lpVtbl -> ThreadAssignedToOSThread(This,managedThreadId,osThreadId) ) #define ICorProfilerCallback2_RemotingClientInvocationStarted(This) \ ( (This)->lpVtbl -> RemotingClientInvocationStarted(This) ) #define ICorProfilerCallback2_RemotingClientSendingMessage(This,pCookie,fIsAsync) \ ( (This)->lpVtbl -> RemotingClientSendingMessage(This,pCookie,fIsAsync) ) #define ICorProfilerCallback2_RemotingClientReceivingReply(This,pCookie,fIsAsync) \ ( (This)->lpVtbl -> RemotingClientReceivingReply(This,pCookie,fIsAsync) ) #define ICorProfilerCallback2_RemotingClientInvocationFinished(This) \ ( (This)->lpVtbl -> RemotingClientInvocationFinished(This) ) #define ICorProfilerCallback2_RemotingServerReceivingMessage(This,pCookie,fIsAsync) \ ( (This)->lpVtbl -> RemotingServerReceivingMessage(This,pCookie,fIsAsync) ) #define ICorProfilerCallback2_RemotingServerInvocationStarted(This) \ ( (This)->lpVtbl -> RemotingServerInvocationStarted(This) ) #define ICorProfilerCallback2_RemotingServerInvocationReturned(This) \ ( (This)->lpVtbl -> RemotingServerInvocationReturned(This) ) #define ICorProfilerCallback2_RemotingServerSendingReply(This,pCookie,fIsAsync) \ ( (This)->lpVtbl -> RemotingServerSendingReply(This,pCookie,fIsAsync) ) #define ICorProfilerCallback2_UnmanagedToManagedTransition(This,functionId,reason) \ ( (This)->lpVtbl -> UnmanagedToManagedTransition(This,functionId,reason) ) #define ICorProfilerCallback2_ManagedToUnmanagedTransition(This,functionId,reason) \ ( (This)->lpVtbl -> ManagedToUnmanagedTransition(This,functionId,reason) ) #define ICorProfilerCallback2_RuntimeSuspendStarted(This,suspendReason) \ ( (This)->lpVtbl -> RuntimeSuspendStarted(This,suspendReason) ) #define ICorProfilerCallback2_RuntimeSuspendFinished(This) \ ( (This)->lpVtbl -> RuntimeSuspendFinished(This) ) #define ICorProfilerCallback2_RuntimeSuspendAborted(This) \ ( (This)->lpVtbl -> RuntimeSuspendAborted(This) ) #define ICorProfilerCallback2_RuntimeResumeStarted(This) \ ( (This)->lpVtbl -> RuntimeResumeStarted(This) ) #define ICorProfilerCallback2_RuntimeResumeFinished(This) \ ( (This)->lpVtbl -> RuntimeResumeFinished(This) ) #define ICorProfilerCallback2_RuntimeThreadSuspended(This,threadId) \ ( (This)->lpVtbl -> RuntimeThreadSuspended(This,threadId) ) #define ICorProfilerCallback2_RuntimeThreadResumed(This,threadId) \ ( (This)->lpVtbl -> RuntimeThreadResumed(This,threadId) ) #define ICorProfilerCallback2_MovedReferences(This,cMovedObjectIDRanges,oldObjectIDRangeStart,newObjectIDRangeStart,cObjectIDRangeLength) \ ( (This)->lpVtbl -> MovedReferences(This,cMovedObjectIDRanges,oldObjectIDRangeStart,newObjectIDRangeStart,cObjectIDRangeLength) ) #define ICorProfilerCallback2_ObjectAllocated(This,objectId,classId) \ ( (This)->lpVtbl -> ObjectAllocated(This,objectId,classId) ) #define ICorProfilerCallback2_ObjectsAllocatedByClass(This,cClassCount,classIds,cObjects) \ ( (This)->lpVtbl -> ObjectsAllocatedByClass(This,cClassCount,classIds,cObjects) ) #define ICorProfilerCallback2_ObjectReferences(This,objectId,classId,cObjectRefs,objectRefIds) \ ( (This)->lpVtbl -> ObjectReferences(This,objectId,classId,cObjectRefs,objectRefIds) ) #define ICorProfilerCallback2_RootReferences(This,cRootRefs,rootRefIds) \ ( (This)->lpVtbl -> RootReferences(This,cRootRefs,rootRefIds) ) #define ICorProfilerCallback2_ExceptionThrown(This,thrownObjectId) \ ( (This)->lpVtbl -> ExceptionThrown(This,thrownObjectId) ) #define ICorProfilerCallback2_ExceptionSearchFunctionEnter(This,functionId) \ ( (This)->lpVtbl -> ExceptionSearchFunctionEnter(This,functionId) ) #define ICorProfilerCallback2_ExceptionSearchFunctionLeave(This) \ ( (This)->lpVtbl -> ExceptionSearchFunctionLeave(This) ) #define ICorProfilerCallback2_ExceptionSearchFilterEnter(This,functionId) \ ( (This)->lpVtbl -> ExceptionSearchFilterEnter(This,functionId) ) #define ICorProfilerCallback2_ExceptionSearchFilterLeave(This) \ ( (This)->lpVtbl -> ExceptionSearchFilterLeave(This) ) #define ICorProfilerCallback2_ExceptionSearchCatcherFound(This,functionId) \ ( (This)->lpVtbl -> ExceptionSearchCatcherFound(This,functionId) ) #define ICorProfilerCallback2_ExceptionOSHandlerEnter(This,__unused) \ ( (This)->lpVtbl -> ExceptionOSHandlerEnter(This,__unused) ) #define ICorProfilerCallback2_ExceptionOSHandlerLeave(This,__unused) \ ( (This)->lpVtbl -> ExceptionOSHandlerLeave(This,__unused) ) #define ICorProfilerCallback2_ExceptionUnwindFunctionEnter(This,functionId) \ ( (This)->lpVtbl -> ExceptionUnwindFunctionEnter(This,functionId) ) #define ICorProfilerCallback2_ExceptionUnwindFunctionLeave(This) \ ( (This)->lpVtbl -> ExceptionUnwindFunctionLeave(This) ) #define ICorProfilerCallback2_ExceptionUnwindFinallyEnter(This,functionId) \ ( (This)->lpVtbl -> ExceptionUnwindFinallyEnter(This,functionId) ) #define ICorProfilerCallback2_ExceptionUnwindFinallyLeave(This) \ ( (This)->lpVtbl -> ExceptionUnwindFinallyLeave(This) ) #define ICorProfilerCallback2_ExceptionCatcherEnter(This,functionId,objectId) \ ( (This)->lpVtbl -> ExceptionCatcherEnter(This,functionId,objectId) ) #define ICorProfilerCallback2_ExceptionCatcherLeave(This) \ ( (This)->lpVtbl -> ExceptionCatcherLeave(This) ) #define ICorProfilerCallback2_COMClassicVTableCreated(This,wrappedClassId,implementedIID,pVTable,cSlots) \ ( (This)->lpVtbl -> COMClassicVTableCreated(This,wrappedClassId,implementedIID,pVTable,cSlots) ) #define ICorProfilerCallback2_COMClassicVTableDestroyed(This,wrappedClassId,implementedIID,pVTable) \ ( (This)->lpVtbl -> COMClassicVTableDestroyed(This,wrappedClassId,implementedIID,pVTable) ) #define ICorProfilerCallback2_ExceptionCLRCatcherFound(This) \ ( (This)->lpVtbl -> ExceptionCLRCatcherFound(This) ) #define ICorProfilerCallback2_ExceptionCLRCatcherExecute(This) \ ( (This)->lpVtbl -> ExceptionCLRCatcherExecute(This) ) #define ICorProfilerCallback2_ThreadNameChanged(This,threadId,cchName,name) \ ( (This)->lpVtbl -> ThreadNameChanged(This,threadId,cchName,name) ) #define ICorProfilerCallback2_GarbageCollectionStarted(This,cGenerations,generationCollected,reason) \ ( (This)->lpVtbl -> GarbageCollectionStarted(This,cGenerations,generationCollected,reason) ) #define ICorProfilerCallback2_SurvivingReferences(This,cSurvivingObjectIDRanges,objectIDRangeStart,cObjectIDRangeLength) \ ( (This)->lpVtbl -> SurvivingReferences(This,cSurvivingObjectIDRanges,objectIDRangeStart,cObjectIDRangeLength) ) #define ICorProfilerCallback2_GarbageCollectionFinished(This) \ ( (This)->lpVtbl -> GarbageCollectionFinished(This) ) #define ICorProfilerCallback2_FinalizeableObjectQueued(This,finalizerFlags,objectID) \ ( (This)->lpVtbl -> FinalizeableObjectQueued(This,finalizerFlags,objectID) ) #define ICorProfilerCallback2_RootReferences2(This,cRootRefs,rootRefIds,rootKinds,rootFlags,rootIds) \ ( (This)->lpVtbl -> RootReferences2(This,cRootRefs,rootRefIds,rootKinds,rootFlags,rootIds) ) #define ICorProfilerCallback2_HandleCreated(This,handleId,initialObjectId) \ ( (This)->lpVtbl -> HandleCreated(This,handleId,initialObjectId) ) #define ICorProfilerCallback2_HandleDestroyed(This,handleId) \ ( (This)->lpVtbl -> HandleDestroyed(This,handleId) ) #endif /* COBJMACROS */ #endif /* C style interface */ #endif /* __ICorProfilerCallback2_INTERFACE_DEFINED__ */ #ifndef __ICorProfilerCallback3_INTERFACE_DEFINED__ #define __ICorProfilerCallback3_INTERFACE_DEFINED__ /* interface ICorProfilerCallback3 */ /* [local][unique][uuid][object] */ EXTERN_C const IID IID_ICorProfilerCallback3; #if defined(__cplusplus) && !defined(CINTERFACE) MIDL_INTERFACE("4FD2ED52-7731-4b8d-9469-03D2CC3086C5") ICorProfilerCallback3 : public ICorProfilerCallback2 { public: virtual HRESULT STDMETHODCALLTYPE InitializeForAttach( /* [in] */ IUnknown *pCorProfilerInfoUnk, /* [in] */ void *pvClientData, /* [in] */ UINT cbClientData) = 0; virtual HRESULT STDMETHODCALLTYPE ProfilerAttachComplete( void) = 0; virtual HRESULT STDMETHODCALLTYPE ProfilerDetachSucceeded( void) = 0; }; #else /* C style interface */ typedef struct ICorProfilerCallback3Vtbl { BEGIN_INTERFACE HRESULT ( STDMETHODCALLTYPE *QueryInterface )( ICorProfilerCallback3 * This, /* [in] */ REFIID riid, /* [annotation][iid_is][out] */ _COM_Outptr_ void **ppvObject); ULONG ( STDMETHODCALLTYPE *AddRef )( ICorProfilerCallback3 * This); ULONG ( STDMETHODCALLTYPE *Release )( ICorProfilerCallback3 * This); HRESULT ( STDMETHODCALLTYPE *Initialize )( ICorProfilerCallback3 * This, /* [in] */ IUnknown *pICorProfilerInfoUnk); HRESULT ( STDMETHODCALLTYPE *Shutdown )( ICorProfilerCallback3 * This); HRESULT ( STDMETHODCALLTYPE *AppDomainCreationStarted )( ICorProfilerCallback3 * This, /* [in] */ AppDomainID appDomainId); HRESULT ( STDMETHODCALLTYPE *AppDomainCreationFinished )( ICorProfilerCallback3 * This, /* [in] */ AppDomainID appDomainId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *AppDomainShutdownStarted )( ICorProfilerCallback3 * This, /* [in] */ AppDomainID appDomainId); HRESULT ( STDMETHODCALLTYPE *AppDomainShutdownFinished )( ICorProfilerCallback3 * This, /* [in] */ AppDomainID appDomainId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *AssemblyLoadStarted )( ICorProfilerCallback3 * This, /* [in] */ AssemblyID assemblyId); HRESULT ( STDMETHODCALLTYPE *AssemblyLoadFinished )( ICorProfilerCallback3 * This, /* [in] */ AssemblyID assemblyId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *AssemblyUnloadStarted )( ICorProfilerCallback3 * This, /* [in] */ AssemblyID assemblyId); HRESULT ( STDMETHODCALLTYPE *AssemblyUnloadFinished )( ICorProfilerCallback3 * This, /* [in] */ AssemblyID assemblyId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *ModuleLoadStarted )( ICorProfilerCallback3 * This, /* [in] */ ModuleID moduleId); HRESULT ( STDMETHODCALLTYPE *ModuleLoadFinished )( ICorProfilerCallback3 * This, /* [in] */ ModuleID moduleId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *ModuleUnloadStarted )( ICorProfilerCallback3 * This, /* [in] */ ModuleID moduleId); HRESULT ( STDMETHODCALLTYPE *ModuleUnloadFinished )( ICorProfilerCallback3 * This, /* [in] */ ModuleID moduleId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *ModuleAttachedToAssembly )( ICorProfilerCallback3 * This, /* [in] */ ModuleID moduleId, /* [in] */ AssemblyID AssemblyId); HRESULT ( STDMETHODCALLTYPE *ClassLoadStarted )( ICorProfilerCallback3 * This, /* [in] */ ClassID classId); HRESULT ( STDMETHODCALLTYPE *ClassLoadFinished )( ICorProfilerCallback3 * This, /* [in] */ ClassID classId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *ClassUnloadStarted )( ICorProfilerCallback3 * This, /* [in] */ ClassID classId); HRESULT ( STDMETHODCALLTYPE *ClassUnloadFinished )( ICorProfilerCallback3 * This, /* [in] */ ClassID classId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *FunctionUnloadStarted )( ICorProfilerCallback3 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *JITCompilationStarted )( ICorProfilerCallback3 * This, /* [in] */ FunctionID functionId, /* [in] */ BOOL fIsSafeToBlock); HRESULT ( STDMETHODCALLTYPE *JITCompilationFinished )( ICorProfilerCallback3 * This, /* [in] */ FunctionID functionId, /* [in] */ HRESULT hrStatus, /* [in] */ BOOL fIsSafeToBlock); HRESULT ( STDMETHODCALLTYPE *JITCachedFunctionSearchStarted )( ICorProfilerCallback3 * This, /* [in] */ FunctionID functionId, /* [out] */ BOOL *pbUseCachedFunction); HRESULT ( STDMETHODCALLTYPE *JITCachedFunctionSearchFinished )( ICorProfilerCallback3 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_JIT_CACHE result); HRESULT ( STDMETHODCALLTYPE *JITFunctionPitched )( ICorProfilerCallback3 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *JITInlining )( ICorProfilerCallback3 * This, /* [in] */ FunctionID callerId, /* [in] */ FunctionID calleeId, /* [out] */ BOOL *pfShouldInline); HRESULT ( STDMETHODCALLTYPE *ThreadCreated )( ICorProfilerCallback3 * This, /* [in] */ ThreadID threadId); HRESULT ( STDMETHODCALLTYPE *ThreadDestroyed )( ICorProfilerCallback3 * This, /* [in] */ ThreadID threadId); HRESULT ( STDMETHODCALLTYPE *ThreadAssignedToOSThread )( ICorProfilerCallback3 * This, /* [in] */ ThreadID managedThreadId, /* [in] */ DWORD osThreadId); HRESULT ( STDMETHODCALLTYPE *RemotingClientInvocationStarted )( ICorProfilerCallback3 * This); HRESULT ( STDMETHODCALLTYPE *RemotingClientSendingMessage )( ICorProfilerCallback3 * This, /* [in] */ GUID *pCookie, /* [in] */ BOOL fIsAsync); HRESULT ( STDMETHODCALLTYPE *RemotingClientReceivingReply )( ICorProfilerCallback3 * This, /* [in] */ GUID *pCookie, /* [in] */ BOOL fIsAsync); HRESULT ( STDMETHODCALLTYPE *RemotingClientInvocationFinished )( ICorProfilerCallback3 * This); HRESULT ( STDMETHODCALLTYPE *RemotingServerReceivingMessage )( ICorProfilerCallback3 * This, /* [in] */ GUID *pCookie, /* [in] */ BOOL fIsAsync); HRESULT ( STDMETHODCALLTYPE *RemotingServerInvocationStarted )( ICorProfilerCallback3 * This); HRESULT ( STDMETHODCALLTYPE *RemotingServerInvocationReturned )( ICorProfilerCallback3 * This); HRESULT ( STDMETHODCALLTYPE *RemotingServerSendingReply )( ICorProfilerCallback3 * This, /* [in] */ GUID *pCookie, /* [in] */ BOOL fIsAsync); HRESULT ( STDMETHODCALLTYPE *UnmanagedToManagedTransition )( ICorProfilerCallback3 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_TRANSITION_REASON reason); HRESULT ( STDMETHODCALLTYPE *ManagedToUnmanagedTransition )( ICorProfilerCallback3 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_TRANSITION_REASON reason); HRESULT ( STDMETHODCALLTYPE *RuntimeSuspendStarted )( ICorProfilerCallback3 * This, /* [in] */ COR_PRF_SUSPEND_REASON suspendReason); HRESULT ( STDMETHODCALLTYPE *RuntimeSuspendFinished )( ICorProfilerCallback3 * This); HRESULT ( STDMETHODCALLTYPE *RuntimeSuspendAborted )( ICorProfilerCallback3 * This); HRESULT ( STDMETHODCALLTYPE *RuntimeResumeStarted )( ICorProfilerCallback3 * This); HRESULT ( STDMETHODCALLTYPE *RuntimeResumeFinished )( ICorProfilerCallback3 * This); HRESULT ( STDMETHODCALLTYPE *RuntimeThreadSuspended )( ICorProfilerCallback3 * This, /* [in] */ ThreadID threadId); HRESULT ( STDMETHODCALLTYPE *RuntimeThreadResumed )( ICorProfilerCallback3 * This, /* [in] */ ThreadID threadId); HRESULT ( STDMETHODCALLTYPE *MovedReferences )( ICorProfilerCallback3 * This, /* [in] */ ULONG cMovedObjectIDRanges, /* [size_is][in] */ ObjectID oldObjectIDRangeStart[ ], /* [size_is][in] */ ObjectID newObjectIDRangeStart[ ], /* [size_is][in] */ ULONG cObjectIDRangeLength[ ]); HRESULT ( STDMETHODCALLTYPE *ObjectAllocated )( ICorProfilerCallback3 * This, /* [in] */ ObjectID objectId, /* [in] */ ClassID classId); HRESULT ( STDMETHODCALLTYPE *ObjectsAllocatedByClass )( ICorProfilerCallback3 * This, /* [in] */ ULONG cClassCount, /* [size_is][in] */ ClassID classIds[ ], /* [size_is][in] */ ULONG cObjects[ ]); HRESULT ( STDMETHODCALLTYPE *ObjectReferences )( ICorProfilerCallback3 * This, /* [in] */ ObjectID objectId, /* [in] */ ClassID classId, /* [in] */ ULONG cObjectRefs, /* [size_is][in] */ ObjectID objectRefIds[ ]); HRESULT ( STDMETHODCALLTYPE *RootReferences )( ICorProfilerCallback3 * This, /* [in] */ ULONG cRootRefs, /* [size_is][in] */ ObjectID rootRefIds[ ]); HRESULT ( STDMETHODCALLTYPE *ExceptionThrown )( ICorProfilerCallback3 * This, /* [in] */ ObjectID thrownObjectId); HRESULT ( STDMETHODCALLTYPE *ExceptionSearchFunctionEnter )( ICorProfilerCallback3 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ExceptionSearchFunctionLeave )( ICorProfilerCallback3 * This); HRESULT ( STDMETHODCALLTYPE *ExceptionSearchFilterEnter )( ICorProfilerCallback3 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ExceptionSearchFilterLeave )( ICorProfilerCallback3 * This); HRESULT ( STDMETHODCALLTYPE *ExceptionSearchCatcherFound )( ICorProfilerCallback3 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ExceptionOSHandlerEnter )( ICorProfilerCallback3 * This, /* [in] */ UINT_PTR __unused); HRESULT ( STDMETHODCALLTYPE *ExceptionOSHandlerLeave )( ICorProfilerCallback3 * This, /* [in] */ UINT_PTR __unused); HRESULT ( STDMETHODCALLTYPE *ExceptionUnwindFunctionEnter )( ICorProfilerCallback3 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ExceptionUnwindFunctionLeave )( ICorProfilerCallback3 * This); HRESULT ( STDMETHODCALLTYPE *ExceptionUnwindFinallyEnter )( ICorProfilerCallback3 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ExceptionUnwindFinallyLeave )( ICorProfilerCallback3 * This); HRESULT ( STDMETHODCALLTYPE *ExceptionCatcherEnter )( ICorProfilerCallback3 * This, /* [in] */ FunctionID functionId, /* [in] */ ObjectID objectId); HRESULT ( STDMETHODCALLTYPE *ExceptionCatcherLeave )( ICorProfilerCallback3 * This); HRESULT ( STDMETHODCALLTYPE *COMClassicVTableCreated )( ICorProfilerCallback3 * This, /* [in] */ ClassID wrappedClassId, /* [in] */ REFGUID implementedIID, /* [in] */ void *pVTable, /* [in] */ ULONG cSlots); HRESULT ( STDMETHODCALLTYPE *COMClassicVTableDestroyed )( ICorProfilerCallback3 * This, /* [in] */ ClassID wrappedClassId, /* [in] */ REFGUID implementedIID, /* [in] */ void *pVTable); HRESULT ( STDMETHODCALLTYPE *ExceptionCLRCatcherFound )( ICorProfilerCallback3 * This); HRESULT ( STDMETHODCALLTYPE *ExceptionCLRCatcherExecute )( ICorProfilerCallback3 * This); HRESULT ( STDMETHODCALLTYPE *ThreadNameChanged )( ICorProfilerCallback3 * This, /* [in] */ ThreadID threadId, /* [in] */ ULONG cchName, /* [annotation][in] */ _In_reads_opt_(cchName) WCHAR name[ ]); HRESULT ( STDMETHODCALLTYPE *GarbageCollectionStarted )( ICorProfilerCallback3 * This, /* [in] */ int cGenerations, /* [size_is][in] */ BOOL generationCollected[ ], /* [in] */ COR_PRF_GC_REASON reason); HRESULT ( STDMETHODCALLTYPE *SurvivingReferences )( ICorProfilerCallback3 * This, /* [in] */ ULONG cSurvivingObjectIDRanges, /* [size_is][in] */ ObjectID objectIDRangeStart[ ], /* [size_is][in] */ ULONG cObjectIDRangeLength[ ]); HRESULT ( STDMETHODCALLTYPE *GarbageCollectionFinished )( ICorProfilerCallback3 * This); HRESULT ( STDMETHODCALLTYPE *FinalizeableObjectQueued )( ICorProfilerCallback3 * This, /* [in] */ DWORD finalizerFlags, /* [in] */ ObjectID objectID); HRESULT ( STDMETHODCALLTYPE *RootReferences2 )( ICorProfilerCallback3 * This, /* [in] */ ULONG cRootRefs, /* [size_is][in] */ ObjectID rootRefIds[ ], /* [size_is][in] */ COR_PRF_GC_ROOT_KIND rootKinds[ ], /* [size_is][in] */ COR_PRF_GC_ROOT_FLAGS rootFlags[ ], /* [size_is][in] */ UINT_PTR rootIds[ ]); HRESULT ( STDMETHODCALLTYPE *HandleCreated )( ICorProfilerCallback3 * This, /* [in] */ GCHandleID handleId, /* [in] */ ObjectID initialObjectId); HRESULT ( STDMETHODCALLTYPE *HandleDestroyed )( ICorProfilerCallback3 * This, /* [in] */ GCHandleID handleId); HRESULT ( STDMETHODCALLTYPE *InitializeForAttach )( ICorProfilerCallback3 * This, /* [in] */ IUnknown *pCorProfilerInfoUnk, /* [in] */ void *pvClientData, /* [in] */ UINT cbClientData); HRESULT ( STDMETHODCALLTYPE *ProfilerAttachComplete )( ICorProfilerCallback3 * This); HRESULT ( STDMETHODCALLTYPE *ProfilerDetachSucceeded )( ICorProfilerCallback3 * This); END_INTERFACE } ICorProfilerCallback3Vtbl; interface ICorProfilerCallback3 { CONST_VTBL struct ICorProfilerCallback3Vtbl *lpVtbl; }; #ifdef COBJMACROS #define ICorProfilerCallback3_QueryInterface(This,riid,ppvObject) \ ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) ) #define ICorProfilerCallback3_AddRef(This) \ ( (This)->lpVtbl -> AddRef(This) ) #define ICorProfilerCallback3_Release(This) \ ( (This)->lpVtbl -> Release(This) ) #define ICorProfilerCallback3_Initialize(This,pICorProfilerInfoUnk) \ ( (This)->lpVtbl -> Initialize(This,pICorProfilerInfoUnk) ) #define ICorProfilerCallback3_Shutdown(This) \ ( (This)->lpVtbl -> Shutdown(This) ) #define ICorProfilerCallback3_AppDomainCreationStarted(This,appDomainId) \ ( (This)->lpVtbl -> AppDomainCreationStarted(This,appDomainId) ) #define ICorProfilerCallback3_AppDomainCreationFinished(This,appDomainId,hrStatus) \ ( (This)->lpVtbl -> AppDomainCreationFinished(This,appDomainId,hrStatus) ) #define ICorProfilerCallback3_AppDomainShutdownStarted(This,appDomainId) \ ( (This)->lpVtbl -> AppDomainShutdownStarted(This,appDomainId) ) #define ICorProfilerCallback3_AppDomainShutdownFinished(This,appDomainId,hrStatus) \ ( (This)->lpVtbl -> AppDomainShutdownFinished(This,appDomainId,hrStatus) ) #define ICorProfilerCallback3_AssemblyLoadStarted(This,assemblyId) \ ( (This)->lpVtbl -> AssemblyLoadStarted(This,assemblyId) ) #define ICorProfilerCallback3_AssemblyLoadFinished(This,assemblyId,hrStatus) \ ( (This)->lpVtbl -> AssemblyLoadFinished(This,assemblyId,hrStatus) ) #define ICorProfilerCallback3_AssemblyUnloadStarted(This,assemblyId) \ ( (This)->lpVtbl -> AssemblyUnloadStarted(This,assemblyId) ) #define ICorProfilerCallback3_AssemblyUnloadFinished(This,assemblyId,hrStatus) \ ( (This)->lpVtbl -> AssemblyUnloadFinished(This,assemblyId,hrStatus) ) #define ICorProfilerCallback3_ModuleLoadStarted(This,moduleId) \ ( (This)->lpVtbl -> ModuleLoadStarted(This,moduleId) ) #define ICorProfilerCallback3_ModuleLoadFinished(This,moduleId,hrStatus) \ ( (This)->lpVtbl -> ModuleLoadFinished(This,moduleId,hrStatus) ) #define ICorProfilerCallback3_ModuleUnloadStarted(This,moduleId) \ ( (This)->lpVtbl -> ModuleUnloadStarted(This,moduleId) ) #define ICorProfilerCallback3_ModuleUnloadFinished(This,moduleId,hrStatus) \ ( (This)->lpVtbl -> ModuleUnloadFinished(This,moduleId,hrStatus) ) #define ICorProfilerCallback3_ModuleAttachedToAssembly(This,moduleId,AssemblyId) \ ( (This)->lpVtbl -> ModuleAttachedToAssembly(This,moduleId,AssemblyId) ) #define ICorProfilerCallback3_ClassLoadStarted(This,classId) \ ( (This)->lpVtbl -> ClassLoadStarted(This,classId) ) #define ICorProfilerCallback3_ClassLoadFinished(This,classId,hrStatus) \ ( (This)->lpVtbl -> ClassLoadFinished(This,classId,hrStatus) ) #define ICorProfilerCallback3_ClassUnloadStarted(This,classId) \ ( (This)->lpVtbl -> ClassUnloadStarted(This,classId) ) #define ICorProfilerCallback3_ClassUnloadFinished(This,classId,hrStatus) \ ( (This)->lpVtbl -> ClassUnloadFinished(This,classId,hrStatus) ) #define ICorProfilerCallback3_FunctionUnloadStarted(This,functionId) \ ( (This)->lpVtbl -> FunctionUnloadStarted(This,functionId) ) #define ICorProfilerCallback3_JITCompilationStarted(This,functionId,fIsSafeToBlock) \ ( (This)->lpVtbl -> JITCompilationStarted(This,functionId,fIsSafeToBlock) ) #define ICorProfilerCallback3_JITCompilationFinished(This,functionId,hrStatus,fIsSafeToBlock) \ ( (This)->lpVtbl -> JITCompilationFinished(This,functionId,hrStatus,fIsSafeToBlock) ) #define ICorProfilerCallback3_JITCachedFunctionSearchStarted(This,functionId,pbUseCachedFunction) \ ( (This)->lpVtbl -> JITCachedFunctionSearchStarted(This,functionId,pbUseCachedFunction) ) #define ICorProfilerCallback3_JITCachedFunctionSearchFinished(This,functionId,result) \ ( (This)->lpVtbl -> JITCachedFunctionSearchFinished(This,functionId,result) ) #define ICorProfilerCallback3_JITFunctionPitched(This,functionId) \ ( (This)->lpVtbl -> JITFunctionPitched(This,functionId) ) #define ICorProfilerCallback3_JITInlining(This,callerId,calleeId,pfShouldInline) \ ( (This)->lpVtbl -> JITInlining(This,callerId,calleeId,pfShouldInline) ) #define ICorProfilerCallback3_ThreadCreated(This,threadId) \ ( (This)->lpVtbl -> ThreadCreated(This,threadId) ) #define ICorProfilerCallback3_ThreadDestroyed(This,threadId) \ ( (This)->lpVtbl -> ThreadDestroyed(This,threadId) ) #define ICorProfilerCallback3_ThreadAssignedToOSThread(This,managedThreadId,osThreadId) \ ( (This)->lpVtbl -> ThreadAssignedToOSThread(This,managedThreadId,osThreadId) ) #define ICorProfilerCallback3_RemotingClientInvocationStarted(This) \ ( (This)->lpVtbl -> RemotingClientInvocationStarted(This) ) #define ICorProfilerCallback3_RemotingClientSendingMessage(This,pCookie,fIsAsync) \ ( (This)->lpVtbl -> RemotingClientSendingMessage(This,pCookie,fIsAsync) ) #define ICorProfilerCallback3_RemotingClientReceivingReply(This,pCookie,fIsAsync) \ ( (This)->lpVtbl -> RemotingClientReceivingReply(This,pCookie,fIsAsync) ) #define ICorProfilerCallback3_RemotingClientInvocationFinished(This) \ ( (This)->lpVtbl -> RemotingClientInvocationFinished(This) ) #define ICorProfilerCallback3_RemotingServerReceivingMessage(This,pCookie,fIsAsync) \ ( (This)->lpVtbl -> RemotingServerReceivingMessage(This,pCookie,fIsAsync) ) #define ICorProfilerCallback3_RemotingServerInvocationStarted(This) \ ( (This)->lpVtbl -> RemotingServerInvocationStarted(This) ) #define ICorProfilerCallback3_RemotingServerInvocationReturned(This) \ ( (This)->lpVtbl -> RemotingServerInvocationReturned(This) ) #define ICorProfilerCallback3_RemotingServerSendingReply(This,pCookie,fIsAsync) \ ( (This)->lpVtbl -> RemotingServerSendingReply(This,pCookie,fIsAsync) ) #define ICorProfilerCallback3_UnmanagedToManagedTransition(This,functionId,reason) \ ( (This)->lpVtbl -> UnmanagedToManagedTransition(This,functionId,reason) ) #define ICorProfilerCallback3_ManagedToUnmanagedTransition(This,functionId,reason) \ ( (This)->lpVtbl -> ManagedToUnmanagedTransition(This,functionId,reason) ) #define ICorProfilerCallback3_RuntimeSuspendStarted(This,suspendReason) \ ( (This)->lpVtbl -> RuntimeSuspendStarted(This,suspendReason) ) #define ICorProfilerCallback3_RuntimeSuspendFinished(This) \ ( (This)->lpVtbl -> RuntimeSuspendFinished(This) ) #define ICorProfilerCallback3_RuntimeSuspendAborted(This) \ ( (This)->lpVtbl -> RuntimeSuspendAborted(This) ) #define ICorProfilerCallback3_RuntimeResumeStarted(This) \ ( (This)->lpVtbl -> RuntimeResumeStarted(This) ) #define ICorProfilerCallback3_RuntimeResumeFinished(This) \ ( (This)->lpVtbl -> RuntimeResumeFinished(This) ) #define ICorProfilerCallback3_RuntimeThreadSuspended(This,threadId) \ ( (This)->lpVtbl -> RuntimeThreadSuspended(This,threadId) ) #define ICorProfilerCallback3_RuntimeThreadResumed(This,threadId) \ ( (This)->lpVtbl -> RuntimeThreadResumed(This,threadId) ) #define ICorProfilerCallback3_MovedReferences(This,cMovedObjectIDRanges,oldObjectIDRangeStart,newObjectIDRangeStart,cObjectIDRangeLength) \ ( (This)->lpVtbl -> MovedReferences(This,cMovedObjectIDRanges,oldObjectIDRangeStart,newObjectIDRangeStart,cObjectIDRangeLength) ) #define ICorProfilerCallback3_ObjectAllocated(This,objectId,classId) \ ( (This)->lpVtbl -> ObjectAllocated(This,objectId,classId) ) #define ICorProfilerCallback3_ObjectsAllocatedByClass(This,cClassCount,classIds,cObjects) \ ( (This)->lpVtbl -> ObjectsAllocatedByClass(This,cClassCount,classIds,cObjects) ) #define ICorProfilerCallback3_ObjectReferences(This,objectId,classId,cObjectRefs,objectRefIds) \ ( (This)->lpVtbl -> ObjectReferences(This,objectId,classId,cObjectRefs,objectRefIds) ) #define ICorProfilerCallback3_RootReferences(This,cRootRefs,rootRefIds) \ ( (This)->lpVtbl -> RootReferences(This,cRootRefs,rootRefIds) ) #define ICorProfilerCallback3_ExceptionThrown(This,thrownObjectId) \ ( (This)->lpVtbl -> ExceptionThrown(This,thrownObjectId) ) #define ICorProfilerCallback3_ExceptionSearchFunctionEnter(This,functionId) \ ( (This)->lpVtbl -> ExceptionSearchFunctionEnter(This,functionId) ) #define ICorProfilerCallback3_ExceptionSearchFunctionLeave(This) \ ( (This)->lpVtbl -> ExceptionSearchFunctionLeave(This) ) #define ICorProfilerCallback3_ExceptionSearchFilterEnter(This,functionId) \ ( (This)->lpVtbl -> ExceptionSearchFilterEnter(This,functionId) ) #define ICorProfilerCallback3_ExceptionSearchFilterLeave(This) \ ( (This)->lpVtbl -> ExceptionSearchFilterLeave(This) ) #define ICorProfilerCallback3_ExceptionSearchCatcherFound(This,functionId) \ ( (This)->lpVtbl -> ExceptionSearchCatcherFound(This,functionId) ) #define ICorProfilerCallback3_ExceptionOSHandlerEnter(This,__unused) \ ( (This)->lpVtbl -> ExceptionOSHandlerEnter(This,__unused) ) #define ICorProfilerCallback3_ExceptionOSHandlerLeave(This,__unused) \ ( (This)->lpVtbl -> ExceptionOSHandlerLeave(This,__unused) ) #define ICorProfilerCallback3_ExceptionUnwindFunctionEnter(This,functionId) \ ( (This)->lpVtbl -> ExceptionUnwindFunctionEnter(This,functionId) ) #define ICorProfilerCallback3_ExceptionUnwindFunctionLeave(This) \ ( (This)->lpVtbl -> ExceptionUnwindFunctionLeave(This) ) #define ICorProfilerCallback3_ExceptionUnwindFinallyEnter(This,functionId) \ ( (This)->lpVtbl -> ExceptionUnwindFinallyEnter(This,functionId) ) #define ICorProfilerCallback3_ExceptionUnwindFinallyLeave(This) \ ( (This)->lpVtbl -> ExceptionUnwindFinallyLeave(This) ) #define ICorProfilerCallback3_ExceptionCatcherEnter(This,functionId,objectId) \ ( (This)->lpVtbl -> ExceptionCatcherEnter(This,functionId,objectId) ) #define ICorProfilerCallback3_ExceptionCatcherLeave(This) \ ( (This)->lpVtbl -> ExceptionCatcherLeave(This) ) #define ICorProfilerCallback3_COMClassicVTableCreated(This,wrappedClassId,implementedIID,pVTable,cSlots) \ ( (This)->lpVtbl -> COMClassicVTableCreated(This,wrappedClassId,implementedIID,pVTable,cSlots) ) #define ICorProfilerCallback3_COMClassicVTableDestroyed(This,wrappedClassId,implementedIID,pVTable) \ ( (This)->lpVtbl -> COMClassicVTableDestroyed(This,wrappedClassId,implementedIID,pVTable) ) #define ICorProfilerCallback3_ExceptionCLRCatcherFound(This) \ ( (This)->lpVtbl -> ExceptionCLRCatcherFound(This) ) #define ICorProfilerCallback3_ExceptionCLRCatcherExecute(This) \ ( (This)->lpVtbl -> ExceptionCLRCatcherExecute(This) ) #define ICorProfilerCallback3_ThreadNameChanged(This,threadId,cchName,name) \ ( (This)->lpVtbl -> ThreadNameChanged(This,threadId,cchName,name) ) #define ICorProfilerCallback3_GarbageCollectionStarted(This,cGenerations,generationCollected,reason) \ ( (This)->lpVtbl -> GarbageCollectionStarted(This,cGenerations,generationCollected,reason) ) #define ICorProfilerCallback3_SurvivingReferences(This,cSurvivingObjectIDRanges,objectIDRangeStart,cObjectIDRangeLength) \ ( (This)->lpVtbl -> SurvivingReferences(This,cSurvivingObjectIDRanges,objectIDRangeStart,cObjectIDRangeLength) ) #define ICorProfilerCallback3_GarbageCollectionFinished(This) \ ( (This)->lpVtbl -> GarbageCollectionFinished(This) ) #define ICorProfilerCallback3_FinalizeableObjectQueued(This,finalizerFlags,objectID) \ ( (This)->lpVtbl -> FinalizeableObjectQueued(This,finalizerFlags,objectID) ) #define ICorProfilerCallback3_RootReferences2(This,cRootRefs,rootRefIds,rootKinds,rootFlags,rootIds) \ ( (This)->lpVtbl -> RootReferences2(This,cRootRefs,rootRefIds,rootKinds,rootFlags,rootIds) ) #define ICorProfilerCallback3_HandleCreated(This,handleId,initialObjectId) \ ( (This)->lpVtbl -> HandleCreated(This,handleId,initialObjectId) ) #define ICorProfilerCallback3_HandleDestroyed(This,handleId) \ ( (This)->lpVtbl -> HandleDestroyed(This,handleId) ) #define ICorProfilerCallback3_InitializeForAttach(This,pCorProfilerInfoUnk,pvClientData,cbClientData) \ ( (This)->lpVtbl -> InitializeForAttach(This,pCorProfilerInfoUnk,pvClientData,cbClientData) ) #define ICorProfilerCallback3_ProfilerAttachComplete(This) \ ( (This)->lpVtbl -> ProfilerAttachComplete(This) ) #define ICorProfilerCallback3_ProfilerDetachSucceeded(This) \ ( (This)->lpVtbl -> ProfilerDetachSucceeded(This) ) #endif /* COBJMACROS */ #endif /* C style interface */ #endif /* __ICorProfilerCallback3_INTERFACE_DEFINED__ */ #ifndef __ICorProfilerCallback4_INTERFACE_DEFINED__ #define __ICorProfilerCallback4_INTERFACE_DEFINED__ /* interface ICorProfilerCallback4 */ /* [local][unique][uuid][object] */ EXTERN_C const IID IID_ICorProfilerCallback4; #if defined(__cplusplus) && !defined(CINTERFACE) MIDL_INTERFACE("7B63B2E3-107D-4d48-B2F6-F61E229470D2") ICorProfilerCallback4 : public ICorProfilerCallback3 { public: virtual HRESULT STDMETHODCALLTYPE ReJITCompilationStarted( /* [in] */ FunctionID functionId, /* [in] */ ReJITID rejitId, /* [in] */ BOOL fIsSafeToBlock) = 0; virtual HRESULT STDMETHODCALLTYPE GetReJITParameters( /* [in] */ ModuleID moduleId, /* [in] */ mdMethodDef methodId, /* [in] */ ICorProfilerFunctionControl *pFunctionControl) = 0; virtual HRESULT STDMETHODCALLTYPE ReJITCompilationFinished( /* [in] */ FunctionID functionId, /* [in] */ ReJITID rejitId, /* [in] */ HRESULT hrStatus, /* [in] */ BOOL fIsSafeToBlock) = 0; virtual HRESULT STDMETHODCALLTYPE ReJITError( /* [in] */ ModuleID moduleId, /* [in] */ mdMethodDef methodId, /* [in] */ FunctionID functionId, /* [in] */ HRESULT hrStatus) = 0; virtual HRESULT STDMETHODCALLTYPE MovedReferences2( /* [in] */ ULONG cMovedObjectIDRanges, /* [size_is][in] */ ObjectID oldObjectIDRangeStart[ ], /* [size_is][in] */ ObjectID newObjectIDRangeStart[ ], /* [size_is][in] */ SIZE_T cObjectIDRangeLength[ ]) = 0; virtual HRESULT STDMETHODCALLTYPE SurvivingReferences2( /* [in] */ ULONG cSurvivingObjectIDRanges, /* [size_is][in] */ ObjectID objectIDRangeStart[ ], /* [size_is][in] */ SIZE_T cObjectIDRangeLength[ ]) = 0; }; #else /* C style interface */ typedef struct ICorProfilerCallback4Vtbl { BEGIN_INTERFACE HRESULT ( STDMETHODCALLTYPE *QueryInterface )( ICorProfilerCallback4 * This, /* [in] */ REFIID riid, /* [annotation][iid_is][out] */ _COM_Outptr_ void **ppvObject); ULONG ( STDMETHODCALLTYPE *AddRef )( ICorProfilerCallback4 * This); ULONG ( STDMETHODCALLTYPE *Release )( ICorProfilerCallback4 * This); HRESULT ( STDMETHODCALLTYPE *Initialize )( ICorProfilerCallback4 * This, /* [in] */ IUnknown *pICorProfilerInfoUnk); HRESULT ( STDMETHODCALLTYPE *Shutdown )( ICorProfilerCallback4 * This); HRESULT ( STDMETHODCALLTYPE *AppDomainCreationStarted )( ICorProfilerCallback4 * This, /* [in] */ AppDomainID appDomainId); HRESULT ( STDMETHODCALLTYPE *AppDomainCreationFinished )( ICorProfilerCallback4 * This, /* [in] */ AppDomainID appDomainId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *AppDomainShutdownStarted )( ICorProfilerCallback4 * This, /* [in] */ AppDomainID appDomainId); HRESULT ( STDMETHODCALLTYPE *AppDomainShutdownFinished )( ICorProfilerCallback4 * This, /* [in] */ AppDomainID appDomainId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *AssemblyLoadStarted )( ICorProfilerCallback4 * This, /* [in] */ AssemblyID assemblyId); HRESULT ( STDMETHODCALLTYPE *AssemblyLoadFinished )( ICorProfilerCallback4 * This, /* [in] */ AssemblyID assemblyId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *AssemblyUnloadStarted )( ICorProfilerCallback4 * This, /* [in] */ AssemblyID assemblyId); HRESULT ( STDMETHODCALLTYPE *AssemblyUnloadFinished )( ICorProfilerCallback4 * This, /* [in] */ AssemblyID assemblyId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *ModuleLoadStarted )( ICorProfilerCallback4 * This, /* [in] */ ModuleID moduleId); HRESULT ( STDMETHODCALLTYPE *ModuleLoadFinished )( ICorProfilerCallback4 * This, /* [in] */ ModuleID moduleId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *ModuleUnloadStarted )( ICorProfilerCallback4 * This, /* [in] */ ModuleID moduleId); HRESULT ( STDMETHODCALLTYPE *ModuleUnloadFinished )( ICorProfilerCallback4 * This, /* [in] */ ModuleID moduleId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *ModuleAttachedToAssembly )( ICorProfilerCallback4 * This, /* [in] */ ModuleID moduleId, /* [in] */ AssemblyID AssemblyId); HRESULT ( STDMETHODCALLTYPE *ClassLoadStarted )( ICorProfilerCallback4 * This, /* [in] */ ClassID classId); HRESULT ( STDMETHODCALLTYPE *ClassLoadFinished )( ICorProfilerCallback4 * This, /* [in] */ ClassID classId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *ClassUnloadStarted )( ICorProfilerCallback4 * This, /* [in] */ ClassID classId); HRESULT ( STDMETHODCALLTYPE *ClassUnloadFinished )( ICorProfilerCallback4 * This, /* [in] */ ClassID classId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *FunctionUnloadStarted )( ICorProfilerCallback4 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *JITCompilationStarted )( ICorProfilerCallback4 * This, /* [in] */ FunctionID functionId, /* [in] */ BOOL fIsSafeToBlock); HRESULT ( STDMETHODCALLTYPE *JITCompilationFinished )( ICorProfilerCallback4 * This, /* [in] */ FunctionID functionId, /* [in] */ HRESULT hrStatus, /* [in] */ BOOL fIsSafeToBlock); HRESULT ( STDMETHODCALLTYPE *JITCachedFunctionSearchStarted )( ICorProfilerCallback4 * This, /* [in] */ FunctionID functionId, /* [out] */ BOOL *pbUseCachedFunction); HRESULT ( STDMETHODCALLTYPE *JITCachedFunctionSearchFinished )( ICorProfilerCallback4 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_JIT_CACHE result); HRESULT ( STDMETHODCALLTYPE *JITFunctionPitched )( ICorProfilerCallback4 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *JITInlining )( ICorProfilerCallback4 * This, /* [in] */ FunctionID callerId, /* [in] */ FunctionID calleeId, /* [out] */ BOOL *pfShouldInline); HRESULT ( STDMETHODCALLTYPE *ThreadCreated )( ICorProfilerCallback4 * This, /* [in] */ ThreadID threadId); HRESULT ( STDMETHODCALLTYPE *ThreadDestroyed )( ICorProfilerCallback4 * This, /* [in] */ ThreadID threadId); HRESULT ( STDMETHODCALLTYPE *ThreadAssignedToOSThread )( ICorProfilerCallback4 * This, /* [in] */ ThreadID managedThreadId, /* [in] */ DWORD osThreadId); HRESULT ( STDMETHODCALLTYPE *RemotingClientInvocationStarted )( ICorProfilerCallback4 * This); HRESULT ( STDMETHODCALLTYPE *RemotingClientSendingMessage )( ICorProfilerCallback4 * This, /* [in] */ GUID *pCookie, /* [in] */ BOOL fIsAsync); HRESULT ( STDMETHODCALLTYPE *RemotingClientReceivingReply )( ICorProfilerCallback4 * This, /* [in] */ GUID *pCookie, /* [in] */ BOOL fIsAsync); HRESULT ( STDMETHODCALLTYPE *RemotingClientInvocationFinished )( ICorProfilerCallback4 * This); HRESULT ( STDMETHODCALLTYPE *RemotingServerReceivingMessage )( ICorProfilerCallback4 * This, /* [in] */ GUID *pCookie, /* [in] */ BOOL fIsAsync); HRESULT ( STDMETHODCALLTYPE *RemotingServerInvocationStarted )( ICorProfilerCallback4 * This); HRESULT ( STDMETHODCALLTYPE *RemotingServerInvocationReturned )( ICorProfilerCallback4 * This); HRESULT ( STDMETHODCALLTYPE *RemotingServerSendingReply )( ICorProfilerCallback4 * This, /* [in] */ GUID *pCookie, /* [in] */ BOOL fIsAsync); HRESULT ( STDMETHODCALLTYPE *UnmanagedToManagedTransition )( ICorProfilerCallback4 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_TRANSITION_REASON reason); HRESULT ( STDMETHODCALLTYPE *ManagedToUnmanagedTransition )( ICorProfilerCallback4 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_TRANSITION_REASON reason); HRESULT ( STDMETHODCALLTYPE *RuntimeSuspendStarted )( ICorProfilerCallback4 * This, /* [in] */ COR_PRF_SUSPEND_REASON suspendReason); HRESULT ( STDMETHODCALLTYPE *RuntimeSuspendFinished )( ICorProfilerCallback4 * This); HRESULT ( STDMETHODCALLTYPE *RuntimeSuspendAborted )( ICorProfilerCallback4 * This); HRESULT ( STDMETHODCALLTYPE *RuntimeResumeStarted )( ICorProfilerCallback4 * This); HRESULT ( STDMETHODCALLTYPE *RuntimeResumeFinished )( ICorProfilerCallback4 * This); HRESULT ( STDMETHODCALLTYPE *RuntimeThreadSuspended )( ICorProfilerCallback4 * This, /* [in] */ ThreadID threadId); HRESULT ( STDMETHODCALLTYPE *RuntimeThreadResumed )( ICorProfilerCallback4 * This, /* [in] */ ThreadID threadId); HRESULT ( STDMETHODCALLTYPE *MovedReferences )( ICorProfilerCallback4 * This, /* [in] */ ULONG cMovedObjectIDRanges, /* [size_is][in] */ ObjectID oldObjectIDRangeStart[ ], /* [size_is][in] */ ObjectID newObjectIDRangeStart[ ], /* [size_is][in] */ ULONG cObjectIDRangeLength[ ]); HRESULT ( STDMETHODCALLTYPE *ObjectAllocated )( ICorProfilerCallback4 * This, /* [in] */ ObjectID objectId, /* [in] */ ClassID classId); HRESULT ( STDMETHODCALLTYPE *ObjectsAllocatedByClass )( ICorProfilerCallback4 * This, /* [in] */ ULONG cClassCount, /* [size_is][in] */ ClassID classIds[ ], /* [size_is][in] */ ULONG cObjects[ ]); HRESULT ( STDMETHODCALLTYPE *ObjectReferences )( ICorProfilerCallback4 * This, /* [in] */ ObjectID objectId, /* [in] */ ClassID classId, /* [in] */ ULONG cObjectRefs, /* [size_is][in] */ ObjectID objectRefIds[ ]); HRESULT ( STDMETHODCALLTYPE *RootReferences )( ICorProfilerCallback4 * This, /* [in] */ ULONG cRootRefs, /* [size_is][in] */ ObjectID rootRefIds[ ]); HRESULT ( STDMETHODCALLTYPE *ExceptionThrown )( ICorProfilerCallback4 * This, /* [in] */ ObjectID thrownObjectId); HRESULT ( STDMETHODCALLTYPE *ExceptionSearchFunctionEnter )( ICorProfilerCallback4 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ExceptionSearchFunctionLeave )( ICorProfilerCallback4 * This); HRESULT ( STDMETHODCALLTYPE *ExceptionSearchFilterEnter )( ICorProfilerCallback4 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ExceptionSearchFilterLeave )( ICorProfilerCallback4 * This); HRESULT ( STDMETHODCALLTYPE *ExceptionSearchCatcherFound )( ICorProfilerCallback4 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ExceptionOSHandlerEnter )( ICorProfilerCallback4 * This, /* [in] */ UINT_PTR __unused); HRESULT ( STDMETHODCALLTYPE *ExceptionOSHandlerLeave )( ICorProfilerCallback4 * This, /* [in] */ UINT_PTR __unused); HRESULT ( STDMETHODCALLTYPE *ExceptionUnwindFunctionEnter )( ICorProfilerCallback4 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ExceptionUnwindFunctionLeave )( ICorProfilerCallback4 * This); HRESULT ( STDMETHODCALLTYPE *ExceptionUnwindFinallyEnter )( ICorProfilerCallback4 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ExceptionUnwindFinallyLeave )( ICorProfilerCallback4 * This); HRESULT ( STDMETHODCALLTYPE *ExceptionCatcherEnter )( ICorProfilerCallback4 * This, /* [in] */ FunctionID functionId, /* [in] */ ObjectID objectId); HRESULT ( STDMETHODCALLTYPE *ExceptionCatcherLeave )( ICorProfilerCallback4 * This); HRESULT ( STDMETHODCALLTYPE *COMClassicVTableCreated )( ICorProfilerCallback4 * This, /* [in] */ ClassID wrappedClassId, /* [in] */ REFGUID implementedIID, /* [in] */ void *pVTable, /* [in] */ ULONG cSlots); HRESULT ( STDMETHODCALLTYPE *COMClassicVTableDestroyed )( ICorProfilerCallback4 * This, /* [in] */ ClassID wrappedClassId, /* [in] */ REFGUID implementedIID, /* [in] */ void *pVTable); HRESULT ( STDMETHODCALLTYPE *ExceptionCLRCatcherFound )( ICorProfilerCallback4 * This); HRESULT ( STDMETHODCALLTYPE *ExceptionCLRCatcherExecute )( ICorProfilerCallback4 * This); HRESULT ( STDMETHODCALLTYPE *ThreadNameChanged )( ICorProfilerCallback4 * This, /* [in] */ ThreadID threadId, /* [in] */ ULONG cchName, /* [annotation][in] */ _In_reads_opt_(cchName) WCHAR name[ ]); HRESULT ( STDMETHODCALLTYPE *GarbageCollectionStarted )( ICorProfilerCallback4 * This, /* [in] */ int cGenerations, /* [size_is][in] */ BOOL generationCollected[ ], /* [in] */ COR_PRF_GC_REASON reason); HRESULT ( STDMETHODCALLTYPE *SurvivingReferences )( ICorProfilerCallback4 * This, /* [in] */ ULONG cSurvivingObjectIDRanges, /* [size_is][in] */ ObjectID objectIDRangeStart[ ], /* [size_is][in] */ ULONG cObjectIDRangeLength[ ]); HRESULT ( STDMETHODCALLTYPE *GarbageCollectionFinished )( ICorProfilerCallback4 * This); HRESULT ( STDMETHODCALLTYPE *FinalizeableObjectQueued )( ICorProfilerCallback4 * This, /* [in] */ DWORD finalizerFlags, /* [in] */ ObjectID objectID); HRESULT ( STDMETHODCALLTYPE *RootReferences2 )( ICorProfilerCallback4 * This, /* [in] */ ULONG cRootRefs, /* [size_is][in] */ ObjectID rootRefIds[ ], /* [size_is][in] */ COR_PRF_GC_ROOT_KIND rootKinds[ ], /* [size_is][in] */ COR_PRF_GC_ROOT_FLAGS rootFlags[ ], /* [size_is][in] */ UINT_PTR rootIds[ ]); HRESULT ( STDMETHODCALLTYPE *HandleCreated )( ICorProfilerCallback4 * This, /* [in] */ GCHandleID handleId, /* [in] */ ObjectID initialObjectId); HRESULT ( STDMETHODCALLTYPE *HandleDestroyed )( ICorProfilerCallback4 * This, /* [in] */ GCHandleID handleId); HRESULT ( STDMETHODCALLTYPE *InitializeForAttach )( ICorProfilerCallback4 * This, /* [in] */ IUnknown *pCorProfilerInfoUnk, /* [in] */ void *pvClientData, /* [in] */ UINT cbClientData); HRESULT ( STDMETHODCALLTYPE *ProfilerAttachComplete )( ICorProfilerCallback4 * This); HRESULT ( STDMETHODCALLTYPE *ProfilerDetachSucceeded )( ICorProfilerCallback4 * This); HRESULT ( STDMETHODCALLTYPE *ReJITCompilationStarted )( ICorProfilerCallback4 * This, /* [in] */ FunctionID functionId, /* [in] */ ReJITID rejitId, /* [in] */ BOOL fIsSafeToBlock); HRESULT ( STDMETHODCALLTYPE *GetReJITParameters )( ICorProfilerCallback4 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdMethodDef methodId, /* [in] */ ICorProfilerFunctionControl *pFunctionControl); HRESULT ( STDMETHODCALLTYPE *ReJITCompilationFinished )( ICorProfilerCallback4 * This, /* [in] */ FunctionID functionId, /* [in] */ ReJITID rejitId, /* [in] */ HRESULT hrStatus, /* [in] */ BOOL fIsSafeToBlock); HRESULT ( STDMETHODCALLTYPE *ReJITError )( ICorProfilerCallback4 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdMethodDef methodId, /* [in] */ FunctionID functionId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *MovedReferences2 )( ICorProfilerCallback4 * This, /* [in] */ ULONG cMovedObjectIDRanges, /* [size_is][in] */ ObjectID oldObjectIDRangeStart[ ], /* [size_is][in] */ ObjectID newObjectIDRangeStart[ ], /* [size_is][in] */ SIZE_T cObjectIDRangeLength[ ]); HRESULT ( STDMETHODCALLTYPE *SurvivingReferences2 )( ICorProfilerCallback4 * This, /* [in] */ ULONG cSurvivingObjectIDRanges, /* [size_is][in] */ ObjectID objectIDRangeStart[ ], /* [size_is][in] */ SIZE_T cObjectIDRangeLength[ ]); END_INTERFACE } ICorProfilerCallback4Vtbl; interface ICorProfilerCallback4 { CONST_VTBL struct ICorProfilerCallback4Vtbl *lpVtbl; }; #ifdef COBJMACROS #define ICorProfilerCallback4_QueryInterface(This,riid,ppvObject) \ ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) ) #define ICorProfilerCallback4_AddRef(This) \ ( (This)->lpVtbl -> AddRef(This) ) #define ICorProfilerCallback4_Release(This) \ ( (This)->lpVtbl -> Release(This) ) #define ICorProfilerCallback4_Initialize(This,pICorProfilerInfoUnk) \ ( (This)->lpVtbl -> Initialize(This,pICorProfilerInfoUnk) ) #define ICorProfilerCallback4_Shutdown(This) \ ( (This)->lpVtbl -> Shutdown(This) ) #define ICorProfilerCallback4_AppDomainCreationStarted(This,appDomainId) \ ( (This)->lpVtbl -> AppDomainCreationStarted(This,appDomainId) ) #define ICorProfilerCallback4_AppDomainCreationFinished(This,appDomainId,hrStatus) \ ( (This)->lpVtbl -> AppDomainCreationFinished(This,appDomainId,hrStatus) ) #define ICorProfilerCallback4_AppDomainShutdownStarted(This,appDomainId) \ ( (This)->lpVtbl -> AppDomainShutdownStarted(This,appDomainId) ) #define ICorProfilerCallback4_AppDomainShutdownFinished(This,appDomainId,hrStatus) \ ( (This)->lpVtbl -> AppDomainShutdownFinished(This,appDomainId,hrStatus) ) #define ICorProfilerCallback4_AssemblyLoadStarted(This,assemblyId) \ ( (This)->lpVtbl -> AssemblyLoadStarted(This,assemblyId) ) #define ICorProfilerCallback4_AssemblyLoadFinished(This,assemblyId,hrStatus) \ ( (This)->lpVtbl -> AssemblyLoadFinished(This,assemblyId,hrStatus) ) #define ICorProfilerCallback4_AssemblyUnloadStarted(This,assemblyId) \ ( (This)->lpVtbl -> AssemblyUnloadStarted(This,assemblyId) ) #define ICorProfilerCallback4_AssemblyUnloadFinished(This,assemblyId,hrStatus) \ ( (This)->lpVtbl -> AssemblyUnloadFinished(This,assemblyId,hrStatus) ) #define ICorProfilerCallback4_ModuleLoadStarted(This,moduleId) \ ( (This)->lpVtbl -> ModuleLoadStarted(This,moduleId) ) #define ICorProfilerCallback4_ModuleLoadFinished(This,moduleId,hrStatus) \ ( (This)->lpVtbl -> ModuleLoadFinished(This,moduleId,hrStatus) ) #define ICorProfilerCallback4_ModuleUnloadStarted(This,moduleId) \ ( (This)->lpVtbl -> ModuleUnloadStarted(This,moduleId) ) #define ICorProfilerCallback4_ModuleUnloadFinished(This,moduleId,hrStatus) \ ( (This)->lpVtbl -> ModuleUnloadFinished(This,moduleId,hrStatus) ) #define ICorProfilerCallback4_ModuleAttachedToAssembly(This,moduleId,AssemblyId) \ ( (This)->lpVtbl -> ModuleAttachedToAssembly(This,moduleId,AssemblyId) ) #define ICorProfilerCallback4_ClassLoadStarted(This,classId) \ ( (This)->lpVtbl -> ClassLoadStarted(This,classId) ) #define ICorProfilerCallback4_ClassLoadFinished(This,classId,hrStatus) \ ( (This)->lpVtbl -> ClassLoadFinished(This,classId,hrStatus) ) #define ICorProfilerCallback4_ClassUnloadStarted(This,classId) \ ( (This)->lpVtbl -> ClassUnloadStarted(This,classId) ) #define ICorProfilerCallback4_ClassUnloadFinished(This,classId,hrStatus) \ ( (This)->lpVtbl -> ClassUnloadFinished(This,classId,hrStatus) ) #define ICorProfilerCallback4_FunctionUnloadStarted(This,functionId) \ ( (This)->lpVtbl -> FunctionUnloadStarted(This,functionId) ) #define ICorProfilerCallback4_JITCompilationStarted(This,functionId,fIsSafeToBlock) \ ( (This)->lpVtbl -> JITCompilationStarted(This,functionId,fIsSafeToBlock) ) #define ICorProfilerCallback4_JITCompilationFinished(This,functionId,hrStatus,fIsSafeToBlock) \ ( (This)->lpVtbl -> JITCompilationFinished(This,functionId,hrStatus,fIsSafeToBlock) ) #define ICorProfilerCallback4_JITCachedFunctionSearchStarted(This,functionId,pbUseCachedFunction) \ ( (This)->lpVtbl -> JITCachedFunctionSearchStarted(This,functionId,pbUseCachedFunction) ) #define ICorProfilerCallback4_JITCachedFunctionSearchFinished(This,functionId,result) \ ( (This)->lpVtbl -> JITCachedFunctionSearchFinished(This,functionId,result) ) #define ICorProfilerCallback4_JITFunctionPitched(This,functionId) \ ( (This)->lpVtbl -> JITFunctionPitched(This,functionId) ) #define ICorProfilerCallback4_JITInlining(This,callerId,calleeId,pfShouldInline) \ ( (This)->lpVtbl -> JITInlining(This,callerId,calleeId,pfShouldInline) ) #define ICorProfilerCallback4_ThreadCreated(This,threadId) \ ( (This)->lpVtbl -> ThreadCreated(This,threadId) ) #define ICorProfilerCallback4_ThreadDestroyed(This,threadId) \ ( (This)->lpVtbl -> ThreadDestroyed(This,threadId) ) #define ICorProfilerCallback4_ThreadAssignedToOSThread(This,managedThreadId,osThreadId) \ ( (This)->lpVtbl -> ThreadAssignedToOSThread(This,managedThreadId,osThreadId) ) #define ICorProfilerCallback4_RemotingClientInvocationStarted(This) \ ( (This)->lpVtbl -> RemotingClientInvocationStarted(This) ) #define ICorProfilerCallback4_RemotingClientSendingMessage(This,pCookie,fIsAsync) \ ( (This)->lpVtbl -> RemotingClientSendingMessage(This,pCookie,fIsAsync) ) #define ICorProfilerCallback4_RemotingClientReceivingReply(This,pCookie,fIsAsync) \ ( (This)->lpVtbl -> RemotingClientReceivingReply(This,pCookie,fIsAsync) ) #define ICorProfilerCallback4_RemotingClientInvocationFinished(This) \ ( (This)->lpVtbl -> RemotingClientInvocationFinished(This) ) #define ICorProfilerCallback4_RemotingServerReceivingMessage(This,pCookie,fIsAsync) \ ( (This)->lpVtbl -> RemotingServerReceivingMessage(This,pCookie,fIsAsync) ) #define ICorProfilerCallback4_RemotingServerInvocationStarted(This) \ ( (This)->lpVtbl -> RemotingServerInvocationStarted(This) ) #define ICorProfilerCallback4_RemotingServerInvocationReturned(This) \ ( (This)->lpVtbl -> RemotingServerInvocationReturned(This) ) #define ICorProfilerCallback4_RemotingServerSendingReply(This,pCookie,fIsAsync) \ ( (This)->lpVtbl -> RemotingServerSendingReply(This,pCookie,fIsAsync) ) #define ICorProfilerCallback4_UnmanagedToManagedTransition(This,functionId,reason) \ ( (This)->lpVtbl -> UnmanagedToManagedTransition(This,functionId,reason) ) #define ICorProfilerCallback4_ManagedToUnmanagedTransition(This,functionId,reason) \ ( (This)->lpVtbl -> ManagedToUnmanagedTransition(This,functionId,reason) ) #define ICorProfilerCallback4_RuntimeSuspendStarted(This,suspendReason) \ ( (This)->lpVtbl -> RuntimeSuspendStarted(This,suspendReason) ) #define ICorProfilerCallback4_RuntimeSuspendFinished(This) \ ( (This)->lpVtbl -> RuntimeSuspendFinished(This) ) #define ICorProfilerCallback4_RuntimeSuspendAborted(This) \ ( (This)->lpVtbl -> RuntimeSuspendAborted(This) ) #define ICorProfilerCallback4_RuntimeResumeStarted(This) \ ( (This)->lpVtbl -> RuntimeResumeStarted(This) ) #define ICorProfilerCallback4_RuntimeResumeFinished(This) \ ( (This)->lpVtbl -> RuntimeResumeFinished(This) ) #define ICorProfilerCallback4_RuntimeThreadSuspended(This,threadId) \ ( (This)->lpVtbl -> RuntimeThreadSuspended(This,threadId) ) #define ICorProfilerCallback4_RuntimeThreadResumed(This,threadId) \ ( (This)->lpVtbl -> RuntimeThreadResumed(This,threadId) ) #define ICorProfilerCallback4_MovedReferences(This,cMovedObjectIDRanges,oldObjectIDRangeStart,newObjectIDRangeStart,cObjectIDRangeLength) \ ( (This)->lpVtbl -> MovedReferences(This,cMovedObjectIDRanges,oldObjectIDRangeStart,newObjectIDRangeStart,cObjectIDRangeLength) ) #define ICorProfilerCallback4_ObjectAllocated(This,objectId,classId) \ ( (This)->lpVtbl -> ObjectAllocated(This,objectId,classId) ) #define ICorProfilerCallback4_ObjectsAllocatedByClass(This,cClassCount,classIds,cObjects) \ ( (This)->lpVtbl -> ObjectsAllocatedByClass(This,cClassCount,classIds,cObjects) ) #define ICorProfilerCallback4_ObjectReferences(This,objectId,classId,cObjectRefs,objectRefIds) \ ( (This)->lpVtbl -> ObjectReferences(This,objectId,classId,cObjectRefs,objectRefIds) ) #define ICorProfilerCallback4_RootReferences(This,cRootRefs,rootRefIds) \ ( (This)->lpVtbl -> RootReferences(This,cRootRefs,rootRefIds) ) #define ICorProfilerCallback4_ExceptionThrown(This,thrownObjectId) \ ( (This)->lpVtbl -> ExceptionThrown(This,thrownObjectId) ) #define ICorProfilerCallback4_ExceptionSearchFunctionEnter(This,functionId) \ ( (This)->lpVtbl -> ExceptionSearchFunctionEnter(This,functionId) ) #define ICorProfilerCallback4_ExceptionSearchFunctionLeave(This) \ ( (This)->lpVtbl -> ExceptionSearchFunctionLeave(This) ) #define ICorProfilerCallback4_ExceptionSearchFilterEnter(This,functionId) \ ( (This)->lpVtbl -> ExceptionSearchFilterEnter(This,functionId) ) #define ICorProfilerCallback4_ExceptionSearchFilterLeave(This) \ ( (This)->lpVtbl -> ExceptionSearchFilterLeave(This) ) #define ICorProfilerCallback4_ExceptionSearchCatcherFound(This,functionId) \ ( (This)->lpVtbl -> ExceptionSearchCatcherFound(This,functionId) ) #define ICorProfilerCallback4_ExceptionOSHandlerEnter(This,__unused) \ ( (This)->lpVtbl -> ExceptionOSHandlerEnter(This,__unused) ) #define ICorProfilerCallback4_ExceptionOSHandlerLeave(This,__unused) \ ( (This)->lpVtbl -> ExceptionOSHandlerLeave(This,__unused) ) #define ICorProfilerCallback4_ExceptionUnwindFunctionEnter(This,functionId) \ ( (This)->lpVtbl -> ExceptionUnwindFunctionEnter(This,functionId) ) #define ICorProfilerCallback4_ExceptionUnwindFunctionLeave(This) \ ( (This)->lpVtbl -> ExceptionUnwindFunctionLeave(This) ) #define ICorProfilerCallback4_ExceptionUnwindFinallyEnter(This,functionId) \ ( (This)->lpVtbl -> ExceptionUnwindFinallyEnter(This,functionId) ) #define ICorProfilerCallback4_ExceptionUnwindFinallyLeave(This) \ ( (This)->lpVtbl -> ExceptionUnwindFinallyLeave(This) ) #define ICorProfilerCallback4_ExceptionCatcherEnter(This,functionId,objectId) \ ( (This)->lpVtbl -> ExceptionCatcherEnter(This,functionId,objectId) ) #define ICorProfilerCallback4_ExceptionCatcherLeave(This) \ ( (This)->lpVtbl -> ExceptionCatcherLeave(This) ) #define ICorProfilerCallback4_COMClassicVTableCreated(This,wrappedClassId,implementedIID,pVTable,cSlots) \ ( (This)->lpVtbl -> COMClassicVTableCreated(This,wrappedClassId,implementedIID,pVTable,cSlots) ) #define ICorProfilerCallback4_COMClassicVTableDestroyed(This,wrappedClassId,implementedIID,pVTable) \ ( (This)->lpVtbl -> COMClassicVTableDestroyed(This,wrappedClassId,implementedIID,pVTable) ) #define ICorProfilerCallback4_ExceptionCLRCatcherFound(This) \ ( (This)->lpVtbl -> ExceptionCLRCatcherFound(This) ) #define ICorProfilerCallback4_ExceptionCLRCatcherExecute(This) \ ( (This)->lpVtbl -> ExceptionCLRCatcherExecute(This) ) #define ICorProfilerCallback4_ThreadNameChanged(This,threadId,cchName,name) \ ( (This)->lpVtbl -> ThreadNameChanged(This,threadId,cchName,name) ) #define ICorProfilerCallback4_GarbageCollectionStarted(This,cGenerations,generationCollected,reason) \ ( (This)->lpVtbl -> GarbageCollectionStarted(This,cGenerations,generationCollected,reason) ) #define ICorProfilerCallback4_SurvivingReferences(This,cSurvivingObjectIDRanges,objectIDRangeStart,cObjectIDRangeLength) \ ( (This)->lpVtbl -> SurvivingReferences(This,cSurvivingObjectIDRanges,objectIDRangeStart,cObjectIDRangeLength) ) #define ICorProfilerCallback4_GarbageCollectionFinished(This) \ ( (This)->lpVtbl -> GarbageCollectionFinished(This) ) #define ICorProfilerCallback4_FinalizeableObjectQueued(This,finalizerFlags,objectID) \ ( (This)->lpVtbl -> FinalizeableObjectQueued(This,finalizerFlags,objectID) ) #define ICorProfilerCallback4_RootReferences2(This,cRootRefs,rootRefIds,rootKinds,rootFlags,rootIds) \ ( (This)->lpVtbl -> RootReferences2(This,cRootRefs,rootRefIds,rootKinds,rootFlags,rootIds) ) #define ICorProfilerCallback4_HandleCreated(This,handleId,initialObjectId) \ ( (This)->lpVtbl -> HandleCreated(This,handleId,initialObjectId) ) #define ICorProfilerCallback4_HandleDestroyed(This,handleId) \ ( (This)->lpVtbl -> HandleDestroyed(This,handleId) ) #define ICorProfilerCallback4_InitializeForAttach(This,pCorProfilerInfoUnk,pvClientData,cbClientData) \ ( (This)->lpVtbl -> InitializeForAttach(This,pCorProfilerInfoUnk,pvClientData,cbClientData) ) #define ICorProfilerCallback4_ProfilerAttachComplete(This) \ ( (This)->lpVtbl -> ProfilerAttachComplete(This) ) #define ICorProfilerCallback4_ProfilerDetachSucceeded(This) \ ( (This)->lpVtbl -> ProfilerDetachSucceeded(This) ) #define ICorProfilerCallback4_ReJITCompilationStarted(This,functionId,rejitId,fIsSafeToBlock) \ ( (This)->lpVtbl -> ReJITCompilationStarted(This,functionId,rejitId,fIsSafeToBlock) ) #define ICorProfilerCallback4_GetReJITParameters(This,moduleId,methodId,pFunctionControl) \ ( (This)->lpVtbl -> GetReJITParameters(This,moduleId,methodId,pFunctionControl) ) #define ICorProfilerCallback4_ReJITCompilationFinished(This,functionId,rejitId,hrStatus,fIsSafeToBlock) \ ( (This)->lpVtbl -> ReJITCompilationFinished(This,functionId,rejitId,hrStatus,fIsSafeToBlock) ) #define ICorProfilerCallback4_ReJITError(This,moduleId,methodId,functionId,hrStatus) \ ( (This)->lpVtbl -> ReJITError(This,moduleId,methodId,functionId,hrStatus) ) #define ICorProfilerCallback4_MovedReferences2(This,cMovedObjectIDRanges,oldObjectIDRangeStart,newObjectIDRangeStart,cObjectIDRangeLength) \ ( (This)->lpVtbl -> MovedReferences2(This,cMovedObjectIDRanges,oldObjectIDRangeStart,newObjectIDRangeStart,cObjectIDRangeLength) ) #define ICorProfilerCallback4_SurvivingReferences2(This,cSurvivingObjectIDRanges,objectIDRangeStart,cObjectIDRangeLength) \ ( (This)->lpVtbl -> SurvivingReferences2(This,cSurvivingObjectIDRanges,objectIDRangeStart,cObjectIDRangeLength) ) #endif /* COBJMACROS */ #endif /* C style interface */ #endif /* __ICorProfilerCallback4_INTERFACE_DEFINED__ */ #ifndef __ICorProfilerCallback5_INTERFACE_DEFINED__ #define __ICorProfilerCallback5_INTERFACE_DEFINED__ /* interface ICorProfilerCallback5 */ /* [local][unique][uuid][object] */ EXTERN_C const IID IID_ICorProfilerCallback5; #if defined(__cplusplus) && !defined(CINTERFACE) MIDL_INTERFACE("8DFBA405-8C9F-45F8-BFFA-83B14CEF78B5") ICorProfilerCallback5 : public ICorProfilerCallback4 { public: virtual HRESULT STDMETHODCALLTYPE ConditionalWeakTableElementReferences( /* [in] */ ULONG cRootRefs, /* [size_is][in] */ ObjectID keyRefIds[ ], /* [size_is][in] */ ObjectID valueRefIds[ ], /* [size_is][in] */ GCHandleID rootIds[ ]) = 0; }; #else /* C style interface */ typedef struct ICorProfilerCallback5Vtbl { BEGIN_INTERFACE HRESULT ( STDMETHODCALLTYPE *QueryInterface )( ICorProfilerCallback5 * This, /* [in] */ REFIID riid, /* [annotation][iid_is][out] */ _COM_Outptr_ void **ppvObject); ULONG ( STDMETHODCALLTYPE *AddRef )( ICorProfilerCallback5 * This); ULONG ( STDMETHODCALLTYPE *Release )( ICorProfilerCallback5 * This); HRESULT ( STDMETHODCALLTYPE *Initialize )( ICorProfilerCallback5 * This, /* [in] */ IUnknown *pICorProfilerInfoUnk); HRESULT ( STDMETHODCALLTYPE *Shutdown )( ICorProfilerCallback5 * This); HRESULT ( STDMETHODCALLTYPE *AppDomainCreationStarted )( ICorProfilerCallback5 * This, /* [in] */ AppDomainID appDomainId); HRESULT ( STDMETHODCALLTYPE *AppDomainCreationFinished )( ICorProfilerCallback5 * This, /* [in] */ AppDomainID appDomainId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *AppDomainShutdownStarted )( ICorProfilerCallback5 * This, /* [in] */ AppDomainID appDomainId); HRESULT ( STDMETHODCALLTYPE *AppDomainShutdownFinished )( ICorProfilerCallback5 * This, /* [in] */ AppDomainID appDomainId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *AssemblyLoadStarted )( ICorProfilerCallback5 * This, /* [in] */ AssemblyID assemblyId); HRESULT ( STDMETHODCALLTYPE *AssemblyLoadFinished )( ICorProfilerCallback5 * This, /* [in] */ AssemblyID assemblyId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *AssemblyUnloadStarted )( ICorProfilerCallback5 * This, /* [in] */ AssemblyID assemblyId); HRESULT ( STDMETHODCALLTYPE *AssemblyUnloadFinished )( ICorProfilerCallback5 * This, /* [in] */ AssemblyID assemblyId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *ModuleLoadStarted )( ICorProfilerCallback5 * This, /* [in] */ ModuleID moduleId); HRESULT ( STDMETHODCALLTYPE *ModuleLoadFinished )( ICorProfilerCallback5 * This, /* [in] */ ModuleID moduleId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *ModuleUnloadStarted )( ICorProfilerCallback5 * This, /* [in] */ ModuleID moduleId); HRESULT ( STDMETHODCALLTYPE *ModuleUnloadFinished )( ICorProfilerCallback5 * This, /* [in] */ ModuleID moduleId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *ModuleAttachedToAssembly )( ICorProfilerCallback5 * This, /* [in] */ ModuleID moduleId, /* [in] */ AssemblyID AssemblyId); HRESULT ( STDMETHODCALLTYPE *ClassLoadStarted )( ICorProfilerCallback5 * This, /* [in] */ ClassID classId); HRESULT ( STDMETHODCALLTYPE *ClassLoadFinished )( ICorProfilerCallback5 * This, /* [in] */ ClassID classId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *ClassUnloadStarted )( ICorProfilerCallback5 * This, /* [in] */ ClassID classId); HRESULT ( STDMETHODCALLTYPE *ClassUnloadFinished )( ICorProfilerCallback5 * This, /* [in] */ ClassID classId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *FunctionUnloadStarted )( ICorProfilerCallback5 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *JITCompilationStarted )( ICorProfilerCallback5 * This, /* [in] */ FunctionID functionId, /* [in] */ BOOL fIsSafeToBlock); HRESULT ( STDMETHODCALLTYPE *JITCompilationFinished )( ICorProfilerCallback5 * This, /* [in] */ FunctionID functionId, /* [in] */ HRESULT hrStatus, /* [in] */ BOOL fIsSafeToBlock); HRESULT ( STDMETHODCALLTYPE *JITCachedFunctionSearchStarted )( ICorProfilerCallback5 * This, /* [in] */ FunctionID functionId, /* [out] */ BOOL *pbUseCachedFunction); HRESULT ( STDMETHODCALLTYPE *JITCachedFunctionSearchFinished )( ICorProfilerCallback5 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_JIT_CACHE result); HRESULT ( STDMETHODCALLTYPE *JITFunctionPitched )( ICorProfilerCallback5 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *JITInlining )( ICorProfilerCallback5 * This, /* [in] */ FunctionID callerId, /* [in] */ FunctionID calleeId, /* [out] */ BOOL *pfShouldInline); HRESULT ( STDMETHODCALLTYPE *ThreadCreated )( ICorProfilerCallback5 * This, /* [in] */ ThreadID threadId); HRESULT ( STDMETHODCALLTYPE *ThreadDestroyed )( ICorProfilerCallback5 * This, /* [in] */ ThreadID threadId); HRESULT ( STDMETHODCALLTYPE *ThreadAssignedToOSThread )( ICorProfilerCallback5 * This, /* [in] */ ThreadID managedThreadId, /* [in] */ DWORD osThreadId); HRESULT ( STDMETHODCALLTYPE *RemotingClientInvocationStarted )( ICorProfilerCallback5 * This); HRESULT ( STDMETHODCALLTYPE *RemotingClientSendingMessage )( ICorProfilerCallback5 * This, /* [in] */ GUID *pCookie, /* [in] */ BOOL fIsAsync); HRESULT ( STDMETHODCALLTYPE *RemotingClientReceivingReply )( ICorProfilerCallback5 * This, /* [in] */ GUID *pCookie, /* [in] */ BOOL fIsAsync); HRESULT ( STDMETHODCALLTYPE *RemotingClientInvocationFinished )( ICorProfilerCallback5 * This); HRESULT ( STDMETHODCALLTYPE *RemotingServerReceivingMessage )( ICorProfilerCallback5 * This, /* [in] */ GUID *pCookie, /* [in] */ BOOL fIsAsync); HRESULT ( STDMETHODCALLTYPE *RemotingServerInvocationStarted )( ICorProfilerCallback5 * This); HRESULT ( STDMETHODCALLTYPE *RemotingServerInvocationReturned )( ICorProfilerCallback5 * This); HRESULT ( STDMETHODCALLTYPE *RemotingServerSendingReply )( ICorProfilerCallback5 * This, /* [in] */ GUID *pCookie, /* [in] */ BOOL fIsAsync); HRESULT ( STDMETHODCALLTYPE *UnmanagedToManagedTransition )( ICorProfilerCallback5 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_TRANSITION_REASON reason); HRESULT ( STDMETHODCALLTYPE *ManagedToUnmanagedTransition )( ICorProfilerCallback5 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_TRANSITION_REASON reason); HRESULT ( STDMETHODCALLTYPE *RuntimeSuspendStarted )( ICorProfilerCallback5 * This, /* [in] */ COR_PRF_SUSPEND_REASON suspendReason); HRESULT ( STDMETHODCALLTYPE *RuntimeSuspendFinished )( ICorProfilerCallback5 * This); HRESULT ( STDMETHODCALLTYPE *RuntimeSuspendAborted )( ICorProfilerCallback5 * This); HRESULT ( STDMETHODCALLTYPE *RuntimeResumeStarted )( ICorProfilerCallback5 * This); HRESULT ( STDMETHODCALLTYPE *RuntimeResumeFinished )( ICorProfilerCallback5 * This); HRESULT ( STDMETHODCALLTYPE *RuntimeThreadSuspended )( ICorProfilerCallback5 * This, /* [in] */ ThreadID threadId); HRESULT ( STDMETHODCALLTYPE *RuntimeThreadResumed )( ICorProfilerCallback5 * This, /* [in] */ ThreadID threadId); HRESULT ( STDMETHODCALLTYPE *MovedReferences )( ICorProfilerCallback5 * This, /* [in] */ ULONG cMovedObjectIDRanges, /* [size_is][in] */ ObjectID oldObjectIDRangeStart[ ], /* [size_is][in] */ ObjectID newObjectIDRangeStart[ ], /* [size_is][in] */ ULONG cObjectIDRangeLength[ ]); HRESULT ( STDMETHODCALLTYPE *ObjectAllocated )( ICorProfilerCallback5 * This, /* [in] */ ObjectID objectId, /* [in] */ ClassID classId); HRESULT ( STDMETHODCALLTYPE *ObjectsAllocatedByClass )( ICorProfilerCallback5 * This, /* [in] */ ULONG cClassCount, /* [size_is][in] */ ClassID classIds[ ], /* [size_is][in] */ ULONG cObjects[ ]); HRESULT ( STDMETHODCALLTYPE *ObjectReferences )( ICorProfilerCallback5 * This, /* [in] */ ObjectID objectId, /* [in] */ ClassID classId, /* [in] */ ULONG cObjectRefs, /* [size_is][in] */ ObjectID objectRefIds[ ]); HRESULT ( STDMETHODCALLTYPE *RootReferences )( ICorProfilerCallback5 * This, /* [in] */ ULONG cRootRefs, /* [size_is][in] */ ObjectID rootRefIds[ ]); HRESULT ( STDMETHODCALLTYPE *ExceptionThrown )( ICorProfilerCallback5 * This, /* [in] */ ObjectID thrownObjectId); HRESULT ( STDMETHODCALLTYPE *ExceptionSearchFunctionEnter )( ICorProfilerCallback5 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ExceptionSearchFunctionLeave )( ICorProfilerCallback5 * This); HRESULT ( STDMETHODCALLTYPE *ExceptionSearchFilterEnter )( ICorProfilerCallback5 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ExceptionSearchFilterLeave )( ICorProfilerCallback5 * This); HRESULT ( STDMETHODCALLTYPE *ExceptionSearchCatcherFound )( ICorProfilerCallback5 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ExceptionOSHandlerEnter )( ICorProfilerCallback5 * This, /* [in] */ UINT_PTR __unused); HRESULT ( STDMETHODCALLTYPE *ExceptionOSHandlerLeave )( ICorProfilerCallback5 * This, /* [in] */ UINT_PTR __unused); HRESULT ( STDMETHODCALLTYPE *ExceptionUnwindFunctionEnter )( ICorProfilerCallback5 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ExceptionUnwindFunctionLeave )( ICorProfilerCallback5 * This); HRESULT ( STDMETHODCALLTYPE *ExceptionUnwindFinallyEnter )( ICorProfilerCallback5 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ExceptionUnwindFinallyLeave )( ICorProfilerCallback5 * This); HRESULT ( STDMETHODCALLTYPE *ExceptionCatcherEnter )( ICorProfilerCallback5 * This, /* [in] */ FunctionID functionId, /* [in] */ ObjectID objectId); HRESULT ( STDMETHODCALLTYPE *ExceptionCatcherLeave )( ICorProfilerCallback5 * This); HRESULT ( STDMETHODCALLTYPE *COMClassicVTableCreated )( ICorProfilerCallback5 * This, /* [in] */ ClassID wrappedClassId, /* [in] */ REFGUID implementedIID, /* [in] */ void *pVTable, /* [in] */ ULONG cSlots); HRESULT ( STDMETHODCALLTYPE *COMClassicVTableDestroyed )( ICorProfilerCallback5 * This, /* [in] */ ClassID wrappedClassId, /* [in] */ REFGUID implementedIID, /* [in] */ void *pVTable); HRESULT ( STDMETHODCALLTYPE *ExceptionCLRCatcherFound )( ICorProfilerCallback5 * This); HRESULT ( STDMETHODCALLTYPE *ExceptionCLRCatcherExecute )( ICorProfilerCallback5 * This); HRESULT ( STDMETHODCALLTYPE *ThreadNameChanged )( ICorProfilerCallback5 * This, /* [in] */ ThreadID threadId, /* [in] */ ULONG cchName, /* [annotation][in] */ _In_reads_opt_(cchName) WCHAR name[ ]); HRESULT ( STDMETHODCALLTYPE *GarbageCollectionStarted )( ICorProfilerCallback5 * This, /* [in] */ int cGenerations, /* [size_is][in] */ BOOL generationCollected[ ], /* [in] */ COR_PRF_GC_REASON reason); HRESULT ( STDMETHODCALLTYPE *SurvivingReferences )( ICorProfilerCallback5 * This, /* [in] */ ULONG cSurvivingObjectIDRanges, /* [size_is][in] */ ObjectID objectIDRangeStart[ ], /* [size_is][in] */ ULONG cObjectIDRangeLength[ ]); HRESULT ( STDMETHODCALLTYPE *GarbageCollectionFinished )( ICorProfilerCallback5 * This); HRESULT ( STDMETHODCALLTYPE *FinalizeableObjectQueued )( ICorProfilerCallback5 * This, /* [in] */ DWORD finalizerFlags, /* [in] */ ObjectID objectID); HRESULT ( STDMETHODCALLTYPE *RootReferences2 )( ICorProfilerCallback5 * This, /* [in] */ ULONG cRootRefs, /* [size_is][in] */ ObjectID rootRefIds[ ], /* [size_is][in] */ COR_PRF_GC_ROOT_KIND rootKinds[ ], /* [size_is][in] */ COR_PRF_GC_ROOT_FLAGS rootFlags[ ], /* [size_is][in] */ UINT_PTR rootIds[ ]); HRESULT ( STDMETHODCALLTYPE *HandleCreated )( ICorProfilerCallback5 * This, /* [in] */ GCHandleID handleId, /* [in] */ ObjectID initialObjectId); HRESULT ( STDMETHODCALLTYPE *HandleDestroyed )( ICorProfilerCallback5 * This, /* [in] */ GCHandleID handleId); HRESULT ( STDMETHODCALLTYPE *InitializeForAttach )( ICorProfilerCallback5 * This, /* [in] */ IUnknown *pCorProfilerInfoUnk, /* [in] */ void *pvClientData, /* [in] */ UINT cbClientData); HRESULT ( STDMETHODCALLTYPE *ProfilerAttachComplete )( ICorProfilerCallback5 * This); HRESULT ( STDMETHODCALLTYPE *ProfilerDetachSucceeded )( ICorProfilerCallback5 * This); HRESULT ( STDMETHODCALLTYPE *ReJITCompilationStarted )( ICorProfilerCallback5 * This, /* [in] */ FunctionID functionId, /* [in] */ ReJITID rejitId, /* [in] */ BOOL fIsSafeToBlock); HRESULT ( STDMETHODCALLTYPE *GetReJITParameters )( ICorProfilerCallback5 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdMethodDef methodId, /* [in] */ ICorProfilerFunctionControl *pFunctionControl); HRESULT ( STDMETHODCALLTYPE *ReJITCompilationFinished )( ICorProfilerCallback5 * This, /* [in] */ FunctionID functionId, /* [in] */ ReJITID rejitId, /* [in] */ HRESULT hrStatus, /* [in] */ BOOL fIsSafeToBlock); HRESULT ( STDMETHODCALLTYPE *ReJITError )( ICorProfilerCallback5 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdMethodDef methodId, /* [in] */ FunctionID functionId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *MovedReferences2 )( ICorProfilerCallback5 * This, /* [in] */ ULONG cMovedObjectIDRanges, /* [size_is][in] */ ObjectID oldObjectIDRangeStart[ ], /* [size_is][in] */ ObjectID newObjectIDRangeStart[ ], /* [size_is][in] */ SIZE_T cObjectIDRangeLength[ ]); HRESULT ( STDMETHODCALLTYPE *SurvivingReferences2 )( ICorProfilerCallback5 * This, /* [in] */ ULONG cSurvivingObjectIDRanges, /* [size_is][in] */ ObjectID objectIDRangeStart[ ], /* [size_is][in] */ SIZE_T cObjectIDRangeLength[ ]); HRESULT ( STDMETHODCALLTYPE *ConditionalWeakTableElementReferences )( ICorProfilerCallback5 * This, /* [in] */ ULONG cRootRefs, /* [size_is][in] */ ObjectID keyRefIds[ ], /* [size_is][in] */ ObjectID valueRefIds[ ], /* [size_is][in] */ GCHandleID rootIds[ ]); END_INTERFACE } ICorProfilerCallback5Vtbl; interface ICorProfilerCallback5 { CONST_VTBL struct ICorProfilerCallback5Vtbl *lpVtbl; }; #ifdef COBJMACROS #define ICorProfilerCallback5_QueryInterface(This,riid,ppvObject) \ ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) ) #define ICorProfilerCallback5_AddRef(This) \ ( (This)->lpVtbl -> AddRef(This) ) #define ICorProfilerCallback5_Release(This) \ ( (This)->lpVtbl -> Release(This) ) #define ICorProfilerCallback5_Initialize(This,pICorProfilerInfoUnk) \ ( (This)->lpVtbl -> Initialize(This,pICorProfilerInfoUnk) ) #define ICorProfilerCallback5_Shutdown(This) \ ( (This)->lpVtbl -> Shutdown(This) ) #define ICorProfilerCallback5_AppDomainCreationStarted(This,appDomainId) \ ( (This)->lpVtbl -> AppDomainCreationStarted(This,appDomainId) ) #define ICorProfilerCallback5_AppDomainCreationFinished(This,appDomainId,hrStatus) \ ( (This)->lpVtbl -> AppDomainCreationFinished(This,appDomainId,hrStatus) ) #define ICorProfilerCallback5_AppDomainShutdownStarted(This,appDomainId) \ ( (This)->lpVtbl -> AppDomainShutdownStarted(This,appDomainId) ) #define ICorProfilerCallback5_AppDomainShutdownFinished(This,appDomainId,hrStatus) \ ( (This)->lpVtbl -> AppDomainShutdownFinished(This,appDomainId,hrStatus) ) #define ICorProfilerCallback5_AssemblyLoadStarted(This,assemblyId) \ ( (This)->lpVtbl -> AssemblyLoadStarted(This,assemblyId) ) #define ICorProfilerCallback5_AssemblyLoadFinished(This,assemblyId,hrStatus) \ ( (This)->lpVtbl -> AssemblyLoadFinished(This,assemblyId,hrStatus) ) #define ICorProfilerCallback5_AssemblyUnloadStarted(This,assemblyId) \ ( (This)->lpVtbl -> AssemblyUnloadStarted(This,assemblyId) ) #define ICorProfilerCallback5_AssemblyUnloadFinished(This,assemblyId,hrStatus) \ ( (This)->lpVtbl -> AssemblyUnloadFinished(This,assemblyId,hrStatus) ) #define ICorProfilerCallback5_ModuleLoadStarted(This,moduleId) \ ( (This)->lpVtbl -> ModuleLoadStarted(This,moduleId) ) #define ICorProfilerCallback5_ModuleLoadFinished(This,moduleId,hrStatus) \ ( (This)->lpVtbl -> ModuleLoadFinished(This,moduleId,hrStatus) ) #define ICorProfilerCallback5_ModuleUnloadStarted(This,moduleId) \ ( (This)->lpVtbl -> ModuleUnloadStarted(This,moduleId) ) #define ICorProfilerCallback5_ModuleUnloadFinished(This,moduleId,hrStatus) \ ( (This)->lpVtbl -> ModuleUnloadFinished(This,moduleId,hrStatus) ) #define ICorProfilerCallback5_ModuleAttachedToAssembly(This,moduleId,AssemblyId) \ ( (This)->lpVtbl -> ModuleAttachedToAssembly(This,moduleId,AssemblyId) ) #define ICorProfilerCallback5_ClassLoadStarted(This,classId) \ ( (This)->lpVtbl -> ClassLoadStarted(This,classId) ) #define ICorProfilerCallback5_ClassLoadFinished(This,classId,hrStatus) \ ( (This)->lpVtbl -> ClassLoadFinished(This,classId,hrStatus) ) #define ICorProfilerCallback5_ClassUnloadStarted(This,classId) \ ( (This)->lpVtbl -> ClassUnloadStarted(This,classId) ) #define ICorProfilerCallback5_ClassUnloadFinished(This,classId,hrStatus) \ ( (This)->lpVtbl -> ClassUnloadFinished(This,classId,hrStatus) ) #define ICorProfilerCallback5_FunctionUnloadStarted(This,functionId) \ ( (This)->lpVtbl -> FunctionUnloadStarted(This,functionId) ) #define ICorProfilerCallback5_JITCompilationStarted(This,functionId,fIsSafeToBlock) \ ( (This)->lpVtbl -> JITCompilationStarted(This,functionId,fIsSafeToBlock) ) #define ICorProfilerCallback5_JITCompilationFinished(This,functionId,hrStatus,fIsSafeToBlock) \ ( (This)->lpVtbl -> JITCompilationFinished(This,functionId,hrStatus,fIsSafeToBlock) ) #define ICorProfilerCallback5_JITCachedFunctionSearchStarted(This,functionId,pbUseCachedFunction) \ ( (This)->lpVtbl -> JITCachedFunctionSearchStarted(This,functionId,pbUseCachedFunction) ) #define ICorProfilerCallback5_JITCachedFunctionSearchFinished(This,functionId,result) \ ( (This)->lpVtbl -> JITCachedFunctionSearchFinished(This,functionId,result) ) #define ICorProfilerCallback5_JITFunctionPitched(This,functionId) \ ( (This)->lpVtbl -> JITFunctionPitched(This,functionId) ) #define ICorProfilerCallback5_JITInlining(This,callerId,calleeId,pfShouldInline) \ ( (This)->lpVtbl -> JITInlining(This,callerId,calleeId,pfShouldInline) ) #define ICorProfilerCallback5_ThreadCreated(This,threadId) \ ( (This)->lpVtbl -> ThreadCreated(This,threadId) ) #define ICorProfilerCallback5_ThreadDestroyed(This,threadId) \ ( (This)->lpVtbl -> ThreadDestroyed(This,threadId) ) #define ICorProfilerCallback5_ThreadAssignedToOSThread(This,managedThreadId,osThreadId) \ ( (This)->lpVtbl -> ThreadAssignedToOSThread(This,managedThreadId,osThreadId) ) #define ICorProfilerCallback5_RemotingClientInvocationStarted(This) \ ( (This)->lpVtbl -> RemotingClientInvocationStarted(This) ) #define ICorProfilerCallback5_RemotingClientSendingMessage(This,pCookie,fIsAsync) \ ( (This)->lpVtbl -> RemotingClientSendingMessage(This,pCookie,fIsAsync) ) #define ICorProfilerCallback5_RemotingClientReceivingReply(This,pCookie,fIsAsync) \ ( (This)->lpVtbl -> RemotingClientReceivingReply(This,pCookie,fIsAsync) ) #define ICorProfilerCallback5_RemotingClientInvocationFinished(This) \ ( (This)->lpVtbl -> RemotingClientInvocationFinished(This) ) #define ICorProfilerCallback5_RemotingServerReceivingMessage(This,pCookie,fIsAsync) \ ( (This)->lpVtbl -> RemotingServerReceivingMessage(This,pCookie,fIsAsync) ) #define ICorProfilerCallback5_RemotingServerInvocationStarted(This) \ ( (This)->lpVtbl -> RemotingServerInvocationStarted(This) ) #define ICorProfilerCallback5_RemotingServerInvocationReturned(This) \ ( (This)->lpVtbl -> RemotingServerInvocationReturned(This) ) #define ICorProfilerCallback5_RemotingServerSendingReply(This,pCookie,fIsAsync) \ ( (This)->lpVtbl -> RemotingServerSendingReply(This,pCookie,fIsAsync) ) #define ICorProfilerCallback5_UnmanagedToManagedTransition(This,functionId,reason) \ ( (This)->lpVtbl -> UnmanagedToManagedTransition(This,functionId,reason) ) #define ICorProfilerCallback5_ManagedToUnmanagedTransition(This,functionId,reason) \ ( (This)->lpVtbl -> ManagedToUnmanagedTransition(This,functionId,reason) ) #define ICorProfilerCallback5_RuntimeSuspendStarted(This,suspendReason) \ ( (This)->lpVtbl -> RuntimeSuspendStarted(This,suspendReason) ) #define ICorProfilerCallback5_RuntimeSuspendFinished(This) \ ( (This)->lpVtbl -> RuntimeSuspendFinished(This) ) #define ICorProfilerCallback5_RuntimeSuspendAborted(This) \ ( (This)->lpVtbl -> RuntimeSuspendAborted(This) ) #define ICorProfilerCallback5_RuntimeResumeStarted(This) \ ( (This)->lpVtbl -> RuntimeResumeStarted(This) ) #define ICorProfilerCallback5_RuntimeResumeFinished(This) \ ( (This)->lpVtbl -> RuntimeResumeFinished(This) ) #define ICorProfilerCallback5_RuntimeThreadSuspended(This,threadId) \ ( (This)->lpVtbl -> RuntimeThreadSuspended(This,threadId) ) #define ICorProfilerCallback5_RuntimeThreadResumed(This,threadId) \ ( (This)->lpVtbl -> RuntimeThreadResumed(This,threadId) ) #define ICorProfilerCallback5_MovedReferences(This,cMovedObjectIDRanges,oldObjectIDRangeStart,newObjectIDRangeStart,cObjectIDRangeLength) \ ( (This)->lpVtbl -> MovedReferences(This,cMovedObjectIDRanges,oldObjectIDRangeStart,newObjectIDRangeStart,cObjectIDRangeLength) ) #define ICorProfilerCallback5_ObjectAllocated(This,objectId,classId) \ ( (This)->lpVtbl -> ObjectAllocated(This,objectId,classId) ) #define ICorProfilerCallback5_ObjectsAllocatedByClass(This,cClassCount,classIds,cObjects) \ ( (This)->lpVtbl -> ObjectsAllocatedByClass(This,cClassCount,classIds,cObjects) ) #define ICorProfilerCallback5_ObjectReferences(This,objectId,classId,cObjectRefs,objectRefIds) \ ( (This)->lpVtbl -> ObjectReferences(This,objectId,classId,cObjectRefs,objectRefIds) ) #define ICorProfilerCallback5_RootReferences(This,cRootRefs,rootRefIds) \ ( (This)->lpVtbl -> RootReferences(This,cRootRefs,rootRefIds) ) #define ICorProfilerCallback5_ExceptionThrown(This,thrownObjectId) \ ( (This)->lpVtbl -> ExceptionThrown(This,thrownObjectId) ) #define ICorProfilerCallback5_ExceptionSearchFunctionEnter(This,functionId) \ ( (This)->lpVtbl -> ExceptionSearchFunctionEnter(This,functionId) ) #define ICorProfilerCallback5_ExceptionSearchFunctionLeave(This) \ ( (This)->lpVtbl -> ExceptionSearchFunctionLeave(This) ) #define ICorProfilerCallback5_ExceptionSearchFilterEnter(This,functionId) \ ( (This)->lpVtbl -> ExceptionSearchFilterEnter(This,functionId) ) #define ICorProfilerCallback5_ExceptionSearchFilterLeave(This) \ ( (This)->lpVtbl -> ExceptionSearchFilterLeave(This) ) #define ICorProfilerCallback5_ExceptionSearchCatcherFound(This,functionId) \ ( (This)->lpVtbl -> ExceptionSearchCatcherFound(This,functionId) ) #define ICorProfilerCallback5_ExceptionOSHandlerEnter(This,__unused) \ ( (This)->lpVtbl -> ExceptionOSHandlerEnter(This,__unused) ) #define ICorProfilerCallback5_ExceptionOSHandlerLeave(This,__unused) \ ( (This)->lpVtbl -> ExceptionOSHandlerLeave(This,__unused) ) #define ICorProfilerCallback5_ExceptionUnwindFunctionEnter(This,functionId) \ ( (This)->lpVtbl -> ExceptionUnwindFunctionEnter(This,functionId) ) #define ICorProfilerCallback5_ExceptionUnwindFunctionLeave(This) \ ( (This)->lpVtbl -> ExceptionUnwindFunctionLeave(This) ) #define ICorProfilerCallback5_ExceptionUnwindFinallyEnter(This,functionId) \ ( (This)->lpVtbl -> ExceptionUnwindFinallyEnter(This,functionId) ) #define ICorProfilerCallback5_ExceptionUnwindFinallyLeave(This) \ ( (This)->lpVtbl -> ExceptionUnwindFinallyLeave(This) ) #define ICorProfilerCallback5_ExceptionCatcherEnter(This,functionId,objectId) \ ( (This)->lpVtbl -> ExceptionCatcherEnter(This,functionId,objectId) ) #define ICorProfilerCallback5_ExceptionCatcherLeave(This) \ ( (This)->lpVtbl -> ExceptionCatcherLeave(This) ) #define ICorProfilerCallback5_COMClassicVTableCreated(This,wrappedClassId,implementedIID,pVTable,cSlots) \ ( (This)->lpVtbl -> COMClassicVTableCreated(This,wrappedClassId,implementedIID,pVTable,cSlots) ) #define ICorProfilerCallback5_COMClassicVTableDestroyed(This,wrappedClassId,implementedIID,pVTable) \ ( (This)->lpVtbl -> COMClassicVTableDestroyed(This,wrappedClassId,implementedIID,pVTable) ) #define ICorProfilerCallback5_ExceptionCLRCatcherFound(This) \ ( (This)->lpVtbl -> ExceptionCLRCatcherFound(This) ) #define ICorProfilerCallback5_ExceptionCLRCatcherExecute(This) \ ( (This)->lpVtbl -> ExceptionCLRCatcherExecute(This) ) #define ICorProfilerCallback5_ThreadNameChanged(This,threadId,cchName,name) \ ( (This)->lpVtbl -> ThreadNameChanged(This,threadId,cchName,name) ) #define ICorProfilerCallback5_GarbageCollectionStarted(This,cGenerations,generationCollected,reason) \ ( (This)->lpVtbl -> GarbageCollectionStarted(This,cGenerations,generationCollected,reason) ) #define ICorProfilerCallback5_SurvivingReferences(This,cSurvivingObjectIDRanges,objectIDRangeStart,cObjectIDRangeLength) \ ( (This)->lpVtbl -> SurvivingReferences(This,cSurvivingObjectIDRanges,objectIDRangeStart,cObjectIDRangeLength) ) #define ICorProfilerCallback5_GarbageCollectionFinished(This) \ ( (This)->lpVtbl -> GarbageCollectionFinished(This) ) #define ICorProfilerCallback5_FinalizeableObjectQueued(This,finalizerFlags,objectID) \ ( (This)->lpVtbl -> FinalizeableObjectQueued(This,finalizerFlags,objectID) ) #define ICorProfilerCallback5_RootReferences2(This,cRootRefs,rootRefIds,rootKinds,rootFlags,rootIds) \ ( (This)->lpVtbl -> RootReferences2(This,cRootRefs,rootRefIds,rootKinds,rootFlags,rootIds) ) #define ICorProfilerCallback5_HandleCreated(This,handleId,initialObjectId) \ ( (This)->lpVtbl -> HandleCreated(This,handleId,initialObjectId) ) #define ICorProfilerCallback5_HandleDestroyed(This,handleId) \ ( (This)->lpVtbl -> HandleDestroyed(This,handleId) ) #define ICorProfilerCallback5_InitializeForAttach(This,pCorProfilerInfoUnk,pvClientData,cbClientData) \ ( (This)->lpVtbl -> InitializeForAttach(This,pCorProfilerInfoUnk,pvClientData,cbClientData) ) #define ICorProfilerCallback5_ProfilerAttachComplete(This) \ ( (This)->lpVtbl -> ProfilerAttachComplete(This) ) #define ICorProfilerCallback5_ProfilerDetachSucceeded(This) \ ( (This)->lpVtbl -> ProfilerDetachSucceeded(This) ) #define ICorProfilerCallback5_ReJITCompilationStarted(This,functionId,rejitId,fIsSafeToBlock) \ ( (This)->lpVtbl -> ReJITCompilationStarted(This,functionId,rejitId,fIsSafeToBlock) ) #define ICorProfilerCallback5_GetReJITParameters(This,moduleId,methodId,pFunctionControl) \ ( (This)->lpVtbl -> GetReJITParameters(This,moduleId,methodId,pFunctionControl) ) #define ICorProfilerCallback5_ReJITCompilationFinished(This,functionId,rejitId,hrStatus,fIsSafeToBlock) \ ( (This)->lpVtbl -> ReJITCompilationFinished(This,functionId,rejitId,hrStatus,fIsSafeToBlock) ) #define ICorProfilerCallback5_ReJITError(This,moduleId,methodId,functionId,hrStatus) \ ( (This)->lpVtbl -> ReJITError(This,moduleId,methodId,functionId,hrStatus) ) #define ICorProfilerCallback5_MovedReferences2(This,cMovedObjectIDRanges,oldObjectIDRangeStart,newObjectIDRangeStart,cObjectIDRangeLength) \ ( (This)->lpVtbl -> MovedReferences2(This,cMovedObjectIDRanges,oldObjectIDRangeStart,newObjectIDRangeStart,cObjectIDRangeLength) ) #define ICorProfilerCallback5_SurvivingReferences2(This,cSurvivingObjectIDRanges,objectIDRangeStart,cObjectIDRangeLength) \ ( (This)->lpVtbl -> SurvivingReferences2(This,cSurvivingObjectIDRanges,objectIDRangeStart,cObjectIDRangeLength) ) #define ICorProfilerCallback5_ConditionalWeakTableElementReferences(This,cRootRefs,keyRefIds,valueRefIds,rootIds) \ ( (This)->lpVtbl -> ConditionalWeakTableElementReferences(This,cRootRefs,keyRefIds,valueRefIds,rootIds) ) #endif /* COBJMACROS */ #endif /* C style interface */ #endif /* __ICorProfilerCallback5_INTERFACE_DEFINED__ */ #ifndef __ICorProfilerCallback6_INTERFACE_DEFINED__ #define __ICorProfilerCallback6_INTERFACE_DEFINED__ /* interface ICorProfilerCallback6 */ /* [local][unique][uuid][object] */ EXTERN_C const IID IID_ICorProfilerCallback6; #if defined(__cplusplus) && !defined(CINTERFACE) MIDL_INTERFACE("FC13DF4B-4448-4F4F-950C-BA8D19D00C36") ICorProfilerCallback6 : public ICorProfilerCallback5 { public: virtual HRESULT STDMETHODCALLTYPE GetAssemblyReferences( /* [string][in] */ const WCHAR *wszAssemblyPath, /* [in] */ ICorProfilerAssemblyReferenceProvider *pAsmRefProvider) = 0; }; #else /* C style interface */ typedef struct ICorProfilerCallback6Vtbl { BEGIN_INTERFACE HRESULT ( STDMETHODCALLTYPE *QueryInterface )( ICorProfilerCallback6 * This, /* [in] */ REFIID riid, /* [annotation][iid_is][out] */ _COM_Outptr_ void **ppvObject); ULONG ( STDMETHODCALLTYPE *AddRef )( ICorProfilerCallback6 * This); ULONG ( STDMETHODCALLTYPE *Release )( ICorProfilerCallback6 * This); HRESULT ( STDMETHODCALLTYPE *Initialize )( ICorProfilerCallback6 * This, /* [in] */ IUnknown *pICorProfilerInfoUnk); HRESULT ( STDMETHODCALLTYPE *Shutdown )( ICorProfilerCallback6 * This); HRESULT ( STDMETHODCALLTYPE *AppDomainCreationStarted )( ICorProfilerCallback6 * This, /* [in] */ AppDomainID appDomainId); HRESULT ( STDMETHODCALLTYPE *AppDomainCreationFinished )( ICorProfilerCallback6 * This, /* [in] */ AppDomainID appDomainId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *AppDomainShutdownStarted )( ICorProfilerCallback6 * This, /* [in] */ AppDomainID appDomainId); HRESULT ( STDMETHODCALLTYPE *AppDomainShutdownFinished )( ICorProfilerCallback6 * This, /* [in] */ AppDomainID appDomainId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *AssemblyLoadStarted )( ICorProfilerCallback6 * This, /* [in] */ AssemblyID assemblyId); HRESULT ( STDMETHODCALLTYPE *AssemblyLoadFinished )( ICorProfilerCallback6 * This, /* [in] */ AssemblyID assemblyId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *AssemblyUnloadStarted )( ICorProfilerCallback6 * This, /* [in] */ AssemblyID assemblyId); HRESULT ( STDMETHODCALLTYPE *AssemblyUnloadFinished )( ICorProfilerCallback6 * This, /* [in] */ AssemblyID assemblyId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *ModuleLoadStarted )( ICorProfilerCallback6 * This, /* [in] */ ModuleID moduleId); HRESULT ( STDMETHODCALLTYPE *ModuleLoadFinished )( ICorProfilerCallback6 * This, /* [in] */ ModuleID moduleId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *ModuleUnloadStarted )( ICorProfilerCallback6 * This, /* [in] */ ModuleID moduleId); HRESULT ( STDMETHODCALLTYPE *ModuleUnloadFinished )( ICorProfilerCallback6 * This, /* [in] */ ModuleID moduleId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *ModuleAttachedToAssembly )( ICorProfilerCallback6 * This, /* [in] */ ModuleID moduleId, /* [in] */ AssemblyID AssemblyId); HRESULT ( STDMETHODCALLTYPE *ClassLoadStarted )( ICorProfilerCallback6 * This, /* [in] */ ClassID classId); HRESULT ( STDMETHODCALLTYPE *ClassLoadFinished )( ICorProfilerCallback6 * This, /* [in] */ ClassID classId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *ClassUnloadStarted )( ICorProfilerCallback6 * This, /* [in] */ ClassID classId); HRESULT ( STDMETHODCALLTYPE *ClassUnloadFinished )( ICorProfilerCallback6 * This, /* [in] */ ClassID classId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *FunctionUnloadStarted )( ICorProfilerCallback6 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *JITCompilationStarted )( ICorProfilerCallback6 * This, /* [in] */ FunctionID functionId, /* [in] */ BOOL fIsSafeToBlock); HRESULT ( STDMETHODCALLTYPE *JITCompilationFinished )( ICorProfilerCallback6 * This, /* [in] */ FunctionID functionId, /* [in] */ HRESULT hrStatus, /* [in] */ BOOL fIsSafeToBlock); HRESULT ( STDMETHODCALLTYPE *JITCachedFunctionSearchStarted )( ICorProfilerCallback6 * This, /* [in] */ FunctionID functionId, /* [out] */ BOOL *pbUseCachedFunction); HRESULT ( STDMETHODCALLTYPE *JITCachedFunctionSearchFinished )( ICorProfilerCallback6 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_JIT_CACHE result); HRESULT ( STDMETHODCALLTYPE *JITFunctionPitched )( ICorProfilerCallback6 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *JITInlining )( ICorProfilerCallback6 * This, /* [in] */ FunctionID callerId, /* [in] */ FunctionID calleeId, /* [out] */ BOOL *pfShouldInline); HRESULT ( STDMETHODCALLTYPE *ThreadCreated )( ICorProfilerCallback6 * This, /* [in] */ ThreadID threadId); HRESULT ( STDMETHODCALLTYPE *ThreadDestroyed )( ICorProfilerCallback6 * This, /* [in] */ ThreadID threadId); HRESULT ( STDMETHODCALLTYPE *ThreadAssignedToOSThread )( ICorProfilerCallback6 * This, /* [in] */ ThreadID managedThreadId, /* [in] */ DWORD osThreadId); HRESULT ( STDMETHODCALLTYPE *RemotingClientInvocationStarted )( ICorProfilerCallback6 * This); HRESULT ( STDMETHODCALLTYPE *RemotingClientSendingMessage )( ICorProfilerCallback6 * This, /* [in] */ GUID *pCookie, /* [in] */ BOOL fIsAsync); HRESULT ( STDMETHODCALLTYPE *RemotingClientReceivingReply )( ICorProfilerCallback6 * This, /* [in] */ GUID *pCookie, /* [in] */ BOOL fIsAsync); HRESULT ( STDMETHODCALLTYPE *RemotingClientInvocationFinished )( ICorProfilerCallback6 * This); HRESULT ( STDMETHODCALLTYPE *RemotingServerReceivingMessage )( ICorProfilerCallback6 * This, /* [in] */ GUID *pCookie, /* [in] */ BOOL fIsAsync); HRESULT ( STDMETHODCALLTYPE *RemotingServerInvocationStarted )( ICorProfilerCallback6 * This); HRESULT ( STDMETHODCALLTYPE *RemotingServerInvocationReturned )( ICorProfilerCallback6 * This); HRESULT ( STDMETHODCALLTYPE *RemotingServerSendingReply )( ICorProfilerCallback6 * This, /* [in] */ GUID *pCookie, /* [in] */ BOOL fIsAsync); HRESULT ( STDMETHODCALLTYPE *UnmanagedToManagedTransition )( ICorProfilerCallback6 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_TRANSITION_REASON reason); HRESULT ( STDMETHODCALLTYPE *ManagedToUnmanagedTransition )( ICorProfilerCallback6 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_TRANSITION_REASON reason); HRESULT ( STDMETHODCALLTYPE *RuntimeSuspendStarted )( ICorProfilerCallback6 * This, /* [in] */ COR_PRF_SUSPEND_REASON suspendReason); HRESULT ( STDMETHODCALLTYPE *RuntimeSuspendFinished )( ICorProfilerCallback6 * This); HRESULT ( STDMETHODCALLTYPE *RuntimeSuspendAborted )( ICorProfilerCallback6 * This); HRESULT ( STDMETHODCALLTYPE *RuntimeResumeStarted )( ICorProfilerCallback6 * This); HRESULT ( STDMETHODCALLTYPE *RuntimeResumeFinished )( ICorProfilerCallback6 * This); HRESULT ( STDMETHODCALLTYPE *RuntimeThreadSuspended )( ICorProfilerCallback6 * This, /* [in] */ ThreadID threadId); HRESULT ( STDMETHODCALLTYPE *RuntimeThreadResumed )( ICorProfilerCallback6 * This, /* [in] */ ThreadID threadId); HRESULT ( STDMETHODCALLTYPE *MovedReferences )( ICorProfilerCallback6 * This, /* [in] */ ULONG cMovedObjectIDRanges, /* [size_is][in] */ ObjectID oldObjectIDRangeStart[ ], /* [size_is][in] */ ObjectID newObjectIDRangeStart[ ], /* [size_is][in] */ ULONG cObjectIDRangeLength[ ]); HRESULT ( STDMETHODCALLTYPE *ObjectAllocated )( ICorProfilerCallback6 * This, /* [in] */ ObjectID objectId, /* [in] */ ClassID classId); HRESULT ( STDMETHODCALLTYPE *ObjectsAllocatedByClass )( ICorProfilerCallback6 * This, /* [in] */ ULONG cClassCount, /* [size_is][in] */ ClassID classIds[ ], /* [size_is][in] */ ULONG cObjects[ ]); HRESULT ( STDMETHODCALLTYPE *ObjectReferences )( ICorProfilerCallback6 * This, /* [in] */ ObjectID objectId, /* [in] */ ClassID classId, /* [in] */ ULONG cObjectRefs, /* [size_is][in] */ ObjectID objectRefIds[ ]); HRESULT ( STDMETHODCALLTYPE *RootReferences )( ICorProfilerCallback6 * This, /* [in] */ ULONG cRootRefs, /* [size_is][in] */ ObjectID rootRefIds[ ]); HRESULT ( STDMETHODCALLTYPE *ExceptionThrown )( ICorProfilerCallback6 * This, /* [in] */ ObjectID thrownObjectId); HRESULT ( STDMETHODCALLTYPE *ExceptionSearchFunctionEnter )( ICorProfilerCallback6 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ExceptionSearchFunctionLeave )( ICorProfilerCallback6 * This); HRESULT ( STDMETHODCALLTYPE *ExceptionSearchFilterEnter )( ICorProfilerCallback6 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ExceptionSearchFilterLeave )( ICorProfilerCallback6 * This); HRESULT ( STDMETHODCALLTYPE *ExceptionSearchCatcherFound )( ICorProfilerCallback6 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ExceptionOSHandlerEnter )( ICorProfilerCallback6 * This, /* [in] */ UINT_PTR __unused); HRESULT ( STDMETHODCALLTYPE *ExceptionOSHandlerLeave )( ICorProfilerCallback6 * This, /* [in] */ UINT_PTR __unused); HRESULT ( STDMETHODCALLTYPE *ExceptionUnwindFunctionEnter )( ICorProfilerCallback6 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ExceptionUnwindFunctionLeave )( ICorProfilerCallback6 * This); HRESULT ( STDMETHODCALLTYPE *ExceptionUnwindFinallyEnter )( ICorProfilerCallback6 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ExceptionUnwindFinallyLeave )( ICorProfilerCallback6 * This); HRESULT ( STDMETHODCALLTYPE *ExceptionCatcherEnter )( ICorProfilerCallback6 * This, /* [in] */ FunctionID functionId, /* [in] */ ObjectID objectId); HRESULT ( STDMETHODCALLTYPE *ExceptionCatcherLeave )( ICorProfilerCallback6 * This); HRESULT ( STDMETHODCALLTYPE *COMClassicVTableCreated )( ICorProfilerCallback6 * This, /* [in] */ ClassID wrappedClassId, /* [in] */ REFGUID implementedIID, /* [in] */ void *pVTable, /* [in] */ ULONG cSlots); HRESULT ( STDMETHODCALLTYPE *COMClassicVTableDestroyed )( ICorProfilerCallback6 * This, /* [in] */ ClassID wrappedClassId, /* [in] */ REFGUID implementedIID, /* [in] */ void *pVTable); HRESULT ( STDMETHODCALLTYPE *ExceptionCLRCatcherFound )( ICorProfilerCallback6 * This); HRESULT ( STDMETHODCALLTYPE *ExceptionCLRCatcherExecute )( ICorProfilerCallback6 * This); HRESULT ( STDMETHODCALLTYPE *ThreadNameChanged )( ICorProfilerCallback6 * This, /* [in] */ ThreadID threadId, /* [in] */ ULONG cchName, /* [annotation][in] */ _In_reads_opt_(cchName) WCHAR name[ ]); HRESULT ( STDMETHODCALLTYPE *GarbageCollectionStarted )( ICorProfilerCallback6 * This, /* [in] */ int cGenerations, /* [size_is][in] */ BOOL generationCollected[ ], /* [in] */ COR_PRF_GC_REASON reason); HRESULT ( STDMETHODCALLTYPE *SurvivingReferences )( ICorProfilerCallback6 * This, /* [in] */ ULONG cSurvivingObjectIDRanges, /* [size_is][in] */ ObjectID objectIDRangeStart[ ], /* [size_is][in] */ ULONG cObjectIDRangeLength[ ]); HRESULT ( STDMETHODCALLTYPE *GarbageCollectionFinished )( ICorProfilerCallback6 * This); HRESULT ( STDMETHODCALLTYPE *FinalizeableObjectQueued )( ICorProfilerCallback6 * This, /* [in] */ DWORD finalizerFlags, /* [in] */ ObjectID objectID); HRESULT ( STDMETHODCALLTYPE *RootReferences2 )( ICorProfilerCallback6 * This, /* [in] */ ULONG cRootRefs, /* [size_is][in] */ ObjectID rootRefIds[ ], /* [size_is][in] */ COR_PRF_GC_ROOT_KIND rootKinds[ ], /* [size_is][in] */ COR_PRF_GC_ROOT_FLAGS rootFlags[ ], /* [size_is][in] */ UINT_PTR rootIds[ ]); HRESULT ( STDMETHODCALLTYPE *HandleCreated )( ICorProfilerCallback6 * This, /* [in] */ GCHandleID handleId, /* [in] */ ObjectID initialObjectId); HRESULT ( STDMETHODCALLTYPE *HandleDestroyed )( ICorProfilerCallback6 * This, /* [in] */ GCHandleID handleId); HRESULT ( STDMETHODCALLTYPE *InitializeForAttach )( ICorProfilerCallback6 * This, /* [in] */ IUnknown *pCorProfilerInfoUnk, /* [in] */ void *pvClientData, /* [in] */ UINT cbClientData); HRESULT ( STDMETHODCALLTYPE *ProfilerAttachComplete )( ICorProfilerCallback6 * This); HRESULT ( STDMETHODCALLTYPE *ProfilerDetachSucceeded )( ICorProfilerCallback6 * This); HRESULT ( STDMETHODCALLTYPE *ReJITCompilationStarted )( ICorProfilerCallback6 * This, /* [in] */ FunctionID functionId, /* [in] */ ReJITID rejitId, /* [in] */ BOOL fIsSafeToBlock); HRESULT ( STDMETHODCALLTYPE *GetReJITParameters )( ICorProfilerCallback6 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdMethodDef methodId, /* [in] */ ICorProfilerFunctionControl *pFunctionControl); HRESULT ( STDMETHODCALLTYPE *ReJITCompilationFinished )( ICorProfilerCallback6 * This, /* [in] */ FunctionID functionId, /* [in] */ ReJITID rejitId, /* [in] */ HRESULT hrStatus, /* [in] */ BOOL fIsSafeToBlock); HRESULT ( STDMETHODCALLTYPE *ReJITError )( ICorProfilerCallback6 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdMethodDef methodId, /* [in] */ FunctionID functionId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *MovedReferences2 )( ICorProfilerCallback6 * This, /* [in] */ ULONG cMovedObjectIDRanges, /* [size_is][in] */ ObjectID oldObjectIDRangeStart[ ], /* [size_is][in] */ ObjectID newObjectIDRangeStart[ ], /* [size_is][in] */ SIZE_T cObjectIDRangeLength[ ]); HRESULT ( STDMETHODCALLTYPE *SurvivingReferences2 )( ICorProfilerCallback6 * This, /* [in] */ ULONG cSurvivingObjectIDRanges, /* [size_is][in] */ ObjectID objectIDRangeStart[ ], /* [size_is][in] */ SIZE_T cObjectIDRangeLength[ ]); HRESULT ( STDMETHODCALLTYPE *ConditionalWeakTableElementReferences )( ICorProfilerCallback6 * This, /* [in] */ ULONG cRootRefs, /* [size_is][in] */ ObjectID keyRefIds[ ], /* [size_is][in] */ ObjectID valueRefIds[ ], /* [size_is][in] */ GCHandleID rootIds[ ]); HRESULT ( STDMETHODCALLTYPE *GetAssemblyReferences )( ICorProfilerCallback6 * This, /* [string][in] */ const WCHAR *wszAssemblyPath, /* [in] */ ICorProfilerAssemblyReferenceProvider *pAsmRefProvider); END_INTERFACE } ICorProfilerCallback6Vtbl; interface ICorProfilerCallback6 { CONST_VTBL struct ICorProfilerCallback6Vtbl *lpVtbl; }; #ifdef COBJMACROS #define ICorProfilerCallback6_QueryInterface(This,riid,ppvObject) \ ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) ) #define ICorProfilerCallback6_AddRef(This) \ ( (This)->lpVtbl -> AddRef(This) ) #define ICorProfilerCallback6_Release(This) \ ( (This)->lpVtbl -> Release(This) ) #define ICorProfilerCallback6_Initialize(This,pICorProfilerInfoUnk) \ ( (This)->lpVtbl -> Initialize(This,pICorProfilerInfoUnk) ) #define ICorProfilerCallback6_Shutdown(This) \ ( (This)->lpVtbl -> Shutdown(This) ) #define ICorProfilerCallback6_AppDomainCreationStarted(This,appDomainId) \ ( (This)->lpVtbl -> AppDomainCreationStarted(This,appDomainId) ) #define ICorProfilerCallback6_AppDomainCreationFinished(This,appDomainId,hrStatus) \ ( (This)->lpVtbl -> AppDomainCreationFinished(This,appDomainId,hrStatus) ) #define ICorProfilerCallback6_AppDomainShutdownStarted(This,appDomainId) \ ( (This)->lpVtbl -> AppDomainShutdownStarted(This,appDomainId) ) #define ICorProfilerCallback6_AppDomainShutdownFinished(This,appDomainId,hrStatus) \ ( (This)->lpVtbl -> AppDomainShutdownFinished(This,appDomainId,hrStatus) ) #define ICorProfilerCallback6_AssemblyLoadStarted(This,assemblyId) \ ( (This)->lpVtbl -> AssemblyLoadStarted(This,assemblyId) ) #define ICorProfilerCallback6_AssemblyLoadFinished(This,assemblyId,hrStatus) \ ( (This)->lpVtbl -> AssemblyLoadFinished(This,assemblyId,hrStatus) ) #define ICorProfilerCallback6_AssemblyUnloadStarted(This,assemblyId) \ ( (This)->lpVtbl -> AssemblyUnloadStarted(This,assemblyId) ) #define ICorProfilerCallback6_AssemblyUnloadFinished(This,assemblyId,hrStatus) \ ( (This)->lpVtbl -> AssemblyUnloadFinished(This,assemblyId,hrStatus) ) #define ICorProfilerCallback6_ModuleLoadStarted(This,moduleId) \ ( (This)->lpVtbl -> ModuleLoadStarted(This,moduleId) ) #define ICorProfilerCallback6_ModuleLoadFinished(This,moduleId,hrStatus) \ ( (This)->lpVtbl -> ModuleLoadFinished(This,moduleId,hrStatus) ) #define ICorProfilerCallback6_ModuleUnloadStarted(This,moduleId) \ ( (This)->lpVtbl -> ModuleUnloadStarted(This,moduleId) ) #define ICorProfilerCallback6_ModuleUnloadFinished(This,moduleId,hrStatus) \ ( (This)->lpVtbl -> ModuleUnloadFinished(This,moduleId,hrStatus) ) #define ICorProfilerCallback6_ModuleAttachedToAssembly(This,moduleId,AssemblyId) \ ( (This)->lpVtbl -> ModuleAttachedToAssembly(This,moduleId,AssemblyId) ) #define ICorProfilerCallback6_ClassLoadStarted(This,classId) \ ( (This)->lpVtbl -> ClassLoadStarted(This,classId) ) #define ICorProfilerCallback6_ClassLoadFinished(This,classId,hrStatus) \ ( (This)->lpVtbl -> ClassLoadFinished(This,classId,hrStatus) ) #define ICorProfilerCallback6_ClassUnloadStarted(This,classId) \ ( (This)->lpVtbl -> ClassUnloadStarted(This,classId) ) #define ICorProfilerCallback6_ClassUnloadFinished(This,classId,hrStatus) \ ( (This)->lpVtbl -> ClassUnloadFinished(This,classId,hrStatus) ) #define ICorProfilerCallback6_FunctionUnloadStarted(This,functionId) \ ( (This)->lpVtbl -> FunctionUnloadStarted(This,functionId) ) #define ICorProfilerCallback6_JITCompilationStarted(This,functionId,fIsSafeToBlock) \ ( (This)->lpVtbl -> JITCompilationStarted(This,functionId,fIsSafeToBlock) ) #define ICorProfilerCallback6_JITCompilationFinished(This,functionId,hrStatus,fIsSafeToBlock) \ ( (This)->lpVtbl -> JITCompilationFinished(This,functionId,hrStatus,fIsSafeToBlock) ) #define ICorProfilerCallback6_JITCachedFunctionSearchStarted(This,functionId,pbUseCachedFunction) \ ( (This)->lpVtbl -> JITCachedFunctionSearchStarted(This,functionId,pbUseCachedFunction) ) #define ICorProfilerCallback6_JITCachedFunctionSearchFinished(This,functionId,result) \ ( (This)->lpVtbl -> JITCachedFunctionSearchFinished(This,functionId,result) ) #define ICorProfilerCallback6_JITFunctionPitched(This,functionId) \ ( (This)->lpVtbl -> JITFunctionPitched(This,functionId) ) #define ICorProfilerCallback6_JITInlining(This,callerId,calleeId,pfShouldInline) \ ( (This)->lpVtbl -> JITInlining(This,callerId,calleeId,pfShouldInline) ) #define ICorProfilerCallback6_ThreadCreated(This,threadId) \ ( (This)->lpVtbl -> ThreadCreated(This,threadId) ) #define ICorProfilerCallback6_ThreadDestroyed(This,threadId) \ ( (This)->lpVtbl -> ThreadDestroyed(This,threadId) ) #define ICorProfilerCallback6_ThreadAssignedToOSThread(This,managedThreadId,osThreadId) \ ( (This)->lpVtbl -> ThreadAssignedToOSThread(This,managedThreadId,osThreadId) ) #define ICorProfilerCallback6_RemotingClientInvocationStarted(This) \ ( (This)->lpVtbl -> RemotingClientInvocationStarted(This) ) #define ICorProfilerCallback6_RemotingClientSendingMessage(This,pCookie,fIsAsync) \ ( (This)->lpVtbl -> RemotingClientSendingMessage(This,pCookie,fIsAsync) ) #define ICorProfilerCallback6_RemotingClientReceivingReply(This,pCookie,fIsAsync) \ ( (This)->lpVtbl -> RemotingClientReceivingReply(This,pCookie,fIsAsync) ) #define ICorProfilerCallback6_RemotingClientInvocationFinished(This) \ ( (This)->lpVtbl -> RemotingClientInvocationFinished(This) ) #define ICorProfilerCallback6_RemotingServerReceivingMessage(This,pCookie,fIsAsync) \ ( (This)->lpVtbl -> RemotingServerReceivingMessage(This,pCookie,fIsAsync) ) #define ICorProfilerCallback6_RemotingServerInvocationStarted(This) \ ( (This)->lpVtbl -> RemotingServerInvocationStarted(This) ) #define ICorProfilerCallback6_RemotingServerInvocationReturned(This) \ ( (This)->lpVtbl -> RemotingServerInvocationReturned(This) ) #define ICorProfilerCallback6_RemotingServerSendingReply(This,pCookie,fIsAsync) \ ( (This)->lpVtbl -> RemotingServerSendingReply(This,pCookie,fIsAsync) ) #define ICorProfilerCallback6_UnmanagedToManagedTransition(This,functionId,reason) \ ( (This)->lpVtbl -> UnmanagedToManagedTransition(This,functionId,reason) ) #define ICorProfilerCallback6_ManagedToUnmanagedTransition(This,functionId,reason) \ ( (This)->lpVtbl -> ManagedToUnmanagedTransition(This,functionId,reason) ) #define ICorProfilerCallback6_RuntimeSuspendStarted(This,suspendReason) \ ( (This)->lpVtbl -> RuntimeSuspendStarted(This,suspendReason) ) #define ICorProfilerCallback6_RuntimeSuspendFinished(This) \ ( (This)->lpVtbl -> RuntimeSuspendFinished(This) ) #define ICorProfilerCallback6_RuntimeSuspendAborted(This) \ ( (This)->lpVtbl -> RuntimeSuspendAborted(This) ) #define ICorProfilerCallback6_RuntimeResumeStarted(This) \ ( (This)->lpVtbl -> RuntimeResumeStarted(This) ) #define ICorProfilerCallback6_RuntimeResumeFinished(This) \ ( (This)->lpVtbl -> RuntimeResumeFinished(This) ) #define ICorProfilerCallback6_RuntimeThreadSuspended(This,threadId) \ ( (This)->lpVtbl -> RuntimeThreadSuspended(This,threadId) ) #define ICorProfilerCallback6_RuntimeThreadResumed(This,threadId) \ ( (This)->lpVtbl -> RuntimeThreadResumed(This,threadId) ) #define ICorProfilerCallback6_MovedReferences(This,cMovedObjectIDRanges,oldObjectIDRangeStart,newObjectIDRangeStart,cObjectIDRangeLength) \ ( (This)->lpVtbl -> MovedReferences(This,cMovedObjectIDRanges,oldObjectIDRangeStart,newObjectIDRangeStart,cObjectIDRangeLength) ) #define ICorProfilerCallback6_ObjectAllocated(This,objectId,classId) \ ( (This)->lpVtbl -> ObjectAllocated(This,objectId,classId) ) #define ICorProfilerCallback6_ObjectsAllocatedByClass(This,cClassCount,classIds,cObjects) \ ( (This)->lpVtbl -> ObjectsAllocatedByClass(This,cClassCount,classIds,cObjects) ) #define ICorProfilerCallback6_ObjectReferences(This,objectId,classId,cObjectRefs,objectRefIds) \ ( (This)->lpVtbl -> ObjectReferences(This,objectId,classId,cObjectRefs,objectRefIds) ) #define ICorProfilerCallback6_RootReferences(This,cRootRefs,rootRefIds) \ ( (This)->lpVtbl -> RootReferences(This,cRootRefs,rootRefIds) ) #define ICorProfilerCallback6_ExceptionThrown(This,thrownObjectId) \ ( (This)->lpVtbl -> ExceptionThrown(This,thrownObjectId) ) #define ICorProfilerCallback6_ExceptionSearchFunctionEnter(This,functionId) \ ( (This)->lpVtbl -> ExceptionSearchFunctionEnter(This,functionId) ) #define ICorProfilerCallback6_ExceptionSearchFunctionLeave(This) \ ( (This)->lpVtbl -> ExceptionSearchFunctionLeave(This) ) #define ICorProfilerCallback6_ExceptionSearchFilterEnter(This,functionId) \ ( (This)->lpVtbl -> ExceptionSearchFilterEnter(This,functionId) ) #define ICorProfilerCallback6_ExceptionSearchFilterLeave(This) \ ( (This)->lpVtbl -> ExceptionSearchFilterLeave(This) ) #define ICorProfilerCallback6_ExceptionSearchCatcherFound(This,functionId) \ ( (This)->lpVtbl -> ExceptionSearchCatcherFound(This,functionId) ) #define ICorProfilerCallback6_ExceptionOSHandlerEnter(This,__unused) \ ( (This)->lpVtbl -> ExceptionOSHandlerEnter(This,__unused) ) #define ICorProfilerCallback6_ExceptionOSHandlerLeave(This,__unused) \ ( (This)->lpVtbl -> ExceptionOSHandlerLeave(This,__unused) ) #define ICorProfilerCallback6_ExceptionUnwindFunctionEnter(This,functionId) \ ( (This)->lpVtbl -> ExceptionUnwindFunctionEnter(This,functionId) ) #define ICorProfilerCallback6_ExceptionUnwindFunctionLeave(This) \ ( (This)->lpVtbl -> ExceptionUnwindFunctionLeave(This) ) #define ICorProfilerCallback6_ExceptionUnwindFinallyEnter(This,functionId) \ ( (This)->lpVtbl -> ExceptionUnwindFinallyEnter(This,functionId) ) #define ICorProfilerCallback6_ExceptionUnwindFinallyLeave(This) \ ( (This)->lpVtbl -> ExceptionUnwindFinallyLeave(This) ) #define ICorProfilerCallback6_ExceptionCatcherEnter(This,functionId,objectId) \ ( (This)->lpVtbl -> ExceptionCatcherEnter(This,functionId,objectId) ) #define ICorProfilerCallback6_ExceptionCatcherLeave(This) \ ( (This)->lpVtbl -> ExceptionCatcherLeave(This) ) #define ICorProfilerCallback6_COMClassicVTableCreated(This,wrappedClassId,implementedIID,pVTable,cSlots) \ ( (This)->lpVtbl -> COMClassicVTableCreated(This,wrappedClassId,implementedIID,pVTable,cSlots) ) #define ICorProfilerCallback6_COMClassicVTableDestroyed(This,wrappedClassId,implementedIID,pVTable) \ ( (This)->lpVtbl -> COMClassicVTableDestroyed(This,wrappedClassId,implementedIID,pVTable) ) #define ICorProfilerCallback6_ExceptionCLRCatcherFound(This) \ ( (This)->lpVtbl -> ExceptionCLRCatcherFound(This) ) #define ICorProfilerCallback6_ExceptionCLRCatcherExecute(This) \ ( (This)->lpVtbl -> ExceptionCLRCatcherExecute(This) ) #define ICorProfilerCallback6_ThreadNameChanged(This,threadId,cchName,name) \ ( (This)->lpVtbl -> ThreadNameChanged(This,threadId,cchName,name) ) #define ICorProfilerCallback6_GarbageCollectionStarted(This,cGenerations,generationCollected,reason) \ ( (This)->lpVtbl -> GarbageCollectionStarted(This,cGenerations,generationCollected,reason) ) #define ICorProfilerCallback6_SurvivingReferences(This,cSurvivingObjectIDRanges,objectIDRangeStart,cObjectIDRangeLength) \ ( (This)->lpVtbl -> SurvivingReferences(This,cSurvivingObjectIDRanges,objectIDRangeStart,cObjectIDRangeLength) ) #define ICorProfilerCallback6_GarbageCollectionFinished(This) \ ( (This)->lpVtbl -> GarbageCollectionFinished(This) ) #define ICorProfilerCallback6_FinalizeableObjectQueued(This,finalizerFlags,objectID) \ ( (This)->lpVtbl -> FinalizeableObjectQueued(This,finalizerFlags,objectID) ) #define ICorProfilerCallback6_RootReferences2(This,cRootRefs,rootRefIds,rootKinds,rootFlags,rootIds) \ ( (This)->lpVtbl -> RootReferences2(This,cRootRefs,rootRefIds,rootKinds,rootFlags,rootIds) ) #define ICorProfilerCallback6_HandleCreated(This,handleId,initialObjectId) \ ( (This)->lpVtbl -> HandleCreated(This,handleId,initialObjectId) ) #define ICorProfilerCallback6_HandleDestroyed(This,handleId) \ ( (This)->lpVtbl -> HandleDestroyed(This,handleId) ) #define ICorProfilerCallback6_InitializeForAttach(This,pCorProfilerInfoUnk,pvClientData,cbClientData) \ ( (This)->lpVtbl -> InitializeForAttach(This,pCorProfilerInfoUnk,pvClientData,cbClientData) ) #define ICorProfilerCallback6_ProfilerAttachComplete(This) \ ( (This)->lpVtbl -> ProfilerAttachComplete(This) ) #define ICorProfilerCallback6_ProfilerDetachSucceeded(This) \ ( (This)->lpVtbl -> ProfilerDetachSucceeded(This) ) #define ICorProfilerCallback6_ReJITCompilationStarted(This,functionId,rejitId,fIsSafeToBlock) \ ( (This)->lpVtbl -> ReJITCompilationStarted(This,functionId,rejitId,fIsSafeToBlock) ) #define ICorProfilerCallback6_GetReJITParameters(This,moduleId,methodId,pFunctionControl) \ ( (This)->lpVtbl -> GetReJITParameters(This,moduleId,methodId,pFunctionControl) ) #define ICorProfilerCallback6_ReJITCompilationFinished(This,functionId,rejitId,hrStatus,fIsSafeToBlock) \ ( (This)->lpVtbl -> ReJITCompilationFinished(This,functionId,rejitId,hrStatus,fIsSafeToBlock) ) #define ICorProfilerCallback6_ReJITError(This,moduleId,methodId,functionId,hrStatus) \ ( (This)->lpVtbl -> ReJITError(This,moduleId,methodId,functionId,hrStatus) ) #define ICorProfilerCallback6_MovedReferences2(This,cMovedObjectIDRanges,oldObjectIDRangeStart,newObjectIDRangeStart,cObjectIDRangeLength) \ ( (This)->lpVtbl -> MovedReferences2(This,cMovedObjectIDRanges,oldObjectIDRangeStart,newObjectIDRangeStart,cObjectIDRangeLength) ) #define ICorProfilerCallback6_SurvivingReferences2(This,cSurvivingObjectIDRanges,objectIDRangeStart,cObjectIDRangeLength) \ ( (This)->lpVtbl -> SurvivingReferences2(This,cSurvivingObjectIDRanges,objectIDRangeStart,cObjectIDRangeLength) ) #define ICorProfilerCallback6_ConditionalWeakTableElementReferences(This,cRootRefs,keyRefIds,valueRefIds,rootIds) \ ( (This)->lpVtbl -> ConditionalWeakTableElementReferences(This,cRootRefs,keyRefIds,valueRefIds,rootIds) ) #define ICorProfilerCallback6_GetAssemblyReferences(This,wszAssemblyPath,pAsmRefProvider) \ ( (This)->lpVtbl -> GetAssemblyReferences(This,wszAssemblyPath,pAsmRefProvider) ) #endif /* COBJMACROS */ #endif /* C style interface */ #endif /* __ICorProfilerCallback6_INTERFACE_DEFINED__ */ #ifndef __ICorProfilerCallback7_INTERFACE_DEFINED__ #define __ICorProfilerCallback7_INTERFACE_DEFINED__ /* interface ICorProfilerCallback7 */ /* [local][unique][uuid][object] */ EXTERN_C const IID IID_ICorProfilerCallback7; #if defined(__cplusplus) && !defined(CINTERFACE) MIDL_INTERFACE("F76A2DBA-1D52-4539-866C-2AA518F9EFC3") ICorProfilerCallback7 : public ICorProfilerCallback6 { public: virtual HRESULT STDMETHODCALLTYPE ModuleInMemorySymbolsUpdated( ModuleID moduleId) = 0; }; #else /* C style interface */ typedef struct ICorProfilerCallback7Vtbl { BEGIN_INTERFACE HRESULT ( STDMETHODCALLTYPE *QueryInterface )( ICorProfilerCallback7 * This, /* [in] */ REFIID riid, /* [annotation][iid_is][out] */ _COM_Outptr_ void **ppvObject); ULONG ( STDMETHODCALLTYPE *AddRef )( ICorProfilerCallback7 * This); ULONG ( STDMETHODCALLTYPE *Release )( ICorProfilerCallback7 * This); HRESULT ( STDMETHODCALLTYPE *Initialize )( ICorProfilerCallback7 * This, /* [in] */ IUnknown *pICorProfilerInfoUnk); HRESULT ( STDMETHODCALLTYPE *Shutdown )( ICorProfilerCallback7 * This); HRESULT ( STDMETHODCALLTYPE *AppDomainCreationStarted )( ICorProfilerCallback7 * This, /* [in] */ AppDomainID appDomainId); HRESULT ( STDMETHODCALLTYPE *AppDomainCreationFinished )( ICorProfilerCallback7 * This, /* [in] */ AppDomainID appDomainId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *AppDomainShutdownStarted )( ICorProfilerCallback7 * This, /* [in] */ AppDomainID appDomainId); HRESULT ( STDMETHODCALLTYPE *AppDomainShutdownFinished )( ICorProfilerCallback7 * This, /* [in] */ AppDomainID appDomainId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *AssemblyLoadStarted )( ICorProfilerCallback7 * This, /* [in] */ AssemblyID assemblyId); HRESULT ( STDMETHODCALLTYPE *AssemblyLoadFinished )( ICorProfilerCallback7 * This, /* [in] */ AssemblyID assemblyId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *AssemblyUnloadStarted )( ICorProfilerCallback7 * This, /* [in] */ AssemblyID assemblyId); HRESULT ( STDMETHODCALLTYPE *AssemblyUnloadFinished )( ICorProfilerCallback7 * This, /* [in] */ AssemblyID assemblyId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *ModuleLoadStarted )( ICorProfilerCallback7 * This, /* [in] */ ModuleID moduleId); HRESULT ( STDMETHODCALLTYPE *ModuleLoadFinished )( ICorProfilerCallback7 * This, /* [in] */ ModuleID moduleId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *ModuleUnloadStarted )( ICorProfilerCallback7 * This, /* [in] */ ModuleID moduleId); HRESULT ( STDMETHODCALLTYPE *ModuleUnloadFinished )( ICorProfilerCallback7 * This, /* [in] */ ModuleID moduleId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *ModuleAttachedToAssembly )( ICorProfilerCallback7 * This, /* [in] */ ModuleID moduleId, /* [in] */ AssemblyID AssemblyId); HRESULT ( STDMETHODCALLTYPE *ClassLoadStarted )( ICorProfilerCallback7 * This, /* [in] */ ClassID classId); HRESULT ( STDMETHODCALLTYPE *ClassLoadFinished )( ICorProfilerCallback7 * This, /* [in] */ ClassID classId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *ClassUnloadStarted )( ICorProfilerCallback7 * This, /* [in] */ ClassID classId); HRESULT ( STDMETHODCALLTYPE *ClassUnloadFinished )( ICorProfilerCallback7 * This, /* [in] */ ClassID classId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *FunctionUnloadStarted )( ICorProfilerCallback7 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *JITCompilationStarted )( ICorProfilerCallback7 * This, /* [in] */ FunctionID functionId, /* [in] */ BOOL fIsSafeToBlock); HRESULT ( STDMETHODCALLTYPE *JITCompilationFinished )( ICorProfilerCallback7 * This, /* [in] */ FunctionID functionId, /* [in] */ HRESULT hrStatus, /* [in] */ BOOL fIsSafeToBlock); HRESULT ( STDMETHODCALLTYPE *JITCachedFunctionSearchStarted )( ICorProfilerCallback7 * This, /* [in] */ FunctionID functionId, /* [out] */ BOOL *pbUseCachedFunction); HRESULT ( STDMETHODCALLTYPE *JITCachedFunctionSearchFinished )( ICorProfilerCallback7 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_JIT_CACHE result); HRESULT ( STDMETHODCALLTYPE *JITFunctionPitched )( ICorProfilerCallback7 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *JITInlining )( ICorProfilerCallback7 * This, /* [in] */ FunctionID callerId, /* [in] */ FunctionID calleeId, /* [out] */ BOOL *pfShouldInline); HRESULT ( STDMETHODCALLTYPE *ThreadCreated )( ICorProfilerCallback7 * This, /* [in] */ ThreadID threadId); HRESULT ( STDMETHODCALLTYPE *ThreadDestroyed )( ICorProfilerCallback7 * This, /* [in] */ ThreadID threadId); HRESULT ( STDMETHODCALLTYPE *ThreadAssignedToOSThread )( ICorProfilerCallback7 * This, /* [in] */ ThreadID managedThreadId, /* [in] */ DWORD osThreadId); HRESULT ( STDMETHODCALLTYPE *RemotingClientInvocationStarted )( ICorProfilerCallback7 * This); HRESULT ( STDMETHODCALLTYPE *RemotingClientSendingMessage )( ICorProfilerCallback7 * This, /* [in] */ GUID *pCookie, /* [in] */ BOOL fIsAsync); HRESULT ( STDMETHODCALLTYPE *RemotingClientReceivingReply )( ICorProfilerCallback7 * This, /* [in] */ GUID *pCookie, /* [in] */ BOOL fIsAsync); HRESULT ( STDMETHODCALLTYPE *RemotingClientInvocationFinished )( ICorProfilerCallback7 * This); HRESULT ( STDMETHODCALLTYPE *RemotingServerReceivingMessage )( ICorProfilerCallback7 * This, /* [in] */ GUID *pCookie, /* [in] */ BOOL fIsAsync); HRESULT ( STDMETHODCALLTYPE *RemotingServerInvocationStarted )( ICorProfilerCallback7 * This); HRESULT ( STDMETHODCALLTYPE *RemotingServerInvocationReturned )( ICorProfilerCallback7 * This); HRESULT ( STDMETHODCALLTYPE *RemotingServerSendingReply )( ICorProfilerCallback7 * This, /* [in] */ GUID *pCookie, /* [in] */ BOOL fIsAsync); HRESULT ( STDMETHODCALLTYPE *UnmanagedToManagedTransition )( ICorProfilerCallback7 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_TRANSITION_REASON reason); HRESULT ( STDMETHODCALLTYPE *ManagedToUnmanagedTransition )( ICorProfilerCallback7 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_TRANSITION_REASON reason); HRESULT ( STDMETHODCALLTYPE *RuntimeSuspendStarted )( ICorProfilerCallback7 * This, /* [in] */ COR_PRF_SUSPEND_REASON suspendReason); HRESULT ( STDMETHODCALLTYPE *RuntimeSuspendFinished )( ICorProfilerCallback7 * This); HRESULT ( STDMETHODCALLTYPE *RuntimeSuspendAborted )( ICorProfilerCallback7 * This); HRESULT ( STDMETHODCALLTYPE *RuntimeResumeStarted )( ICorProfilerCallback7 * This); HRESULT ( STDMETHODCALLTYPE *RuntimeResumeFinished )( ICorProfilerCallback7 * This); HRESULT ( STDMETHODCALLTYPE *RuntimeThreadSuspended )( ICorProfilerCallback7 * This, /* [in] */ ThreadID threadId); HRESULT ( STDMETHODCALLTYPE *RuntimeThreadResumed )( ICorProfilerCallback7 * This, /* [in] */ ThreadID threadId); HRESULT ( STDMETHODCALLTYPE *MovedReferences )( ICorProfilerCallback7 * This, /* [in] */ ULONG cMovedObjectIDRanges, /* [size_is][in] */ ObjectID oldObjectIDRangeStart[ ], /* [size_is][in] */ ObjectID newObjectIDRangeStart[ ], /* [size_is][in] */ ULONG cObjectIDRangeLength[ ]); HRESULT ( STDMETHODCALLTYPE *ObjectAllocated )( ICorProfilerCallback7 * This, /* [in] */ ObjectID objectId, /* [in] */ ClassID classId); HRESULT ( STDMETHODCALLTYPE *ObjectsAllocatedByClass )( ICorProfilerCallback7 * This, /* [in] */ ULONG cClassCount, /* [size_is][in] */ ClassID classIds[ ], /* [size_is][in] */ ULONG cObjects[ ]); HRESULT ( STDMETHODCALLTYPE *ObjectReferences )( ICorProfilerCallback7 * This, /* [in] */ ObjectID objectId, /* [in] */ ClassID classId, /* [in] */ ULONG cObjectRefs, /* [size_is][in] */ ObjectID objectRefIds[ ]); HRESULT ( STDMETHODCALLTYPE *RootReferences )( ICorProfilerCallback7 * This, /* [in] */ ULONG cRootRefs, /* [size_is][in] */ ObjectID rootRefIds[ ]); HRESULT ( STDMETHODCALLTYPE *ExceptionThrown )( ICorProfilerCallback7 * This, /* [in] */ ObjectID thrownObjectId); HRESULT ( STDMETHODCALLTYPE *ExceptionSearchFunctionEnter )( ICorProfilerCallback7 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ExceptionSearchFunctionLeave )( ICorProfilerCallback7 * This); HRESULT ( STDMETHODCALLTYPE *ExceptionSearchFilterEnter )( ICorProfilerCallback7 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ExceptionSearchFilterLeave )( ICorProfilerCallback7 * This); HRESULT ( STDMETHODCALLTYPE *ExceptionSearchCatcherFound )( ICorProfilerCallback7 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ExceptionOSHandlerEnter )( ICorProfilerCallback7 * This, /* [in] */ UINT_PTR __unused); HRESULT ( STDMETHODCALLTYPE *ExceptionOSHandlerLeave )( ICorProfilerCallback7 * This, /* [in] */ UINT_PTR __unused); HRESULT ( STDMETHODCALLTYPE *ExceptionUnwindFunctionEnter )( ICorProfilerCallback7 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ExceptionUnwindFunctionLeave )( ICorProfilerCallback7 * This); HRESULT ( STDMETHODCALLTYPE *ExceptionUnwindFinallyEnter )( ICorProfilerCallback7 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ExceptionUnwindFinallyLeave )( ICorProfilerCallback7 * This); HRESULT ( STDMETHODCALLTYPE *ExceptionCatcherEnter )( ICorProfilerCallback7 * This, /* [in] */ FunctionID functionId, /* [in] */ ObjectID objectId); HRESULT ( STDMETHODCALLTYPE *ExceptionCatcherLeave )( ICorProfilerCallback7 * This); HRESULT ( STDMETHODCALLTYPE *COMClassicVTableCreated )( ICorProfilerCallback7 * This, /* [in] */ ClassID wrappedClassId, /* [in] */ REFGUID implementedIID, /* [in] */ void *pVTable, /* [in] */ ULONG cSlots); HRESULT ( STDMETHODCALLTYPE *COMClassicVTableDestroyed )( ICorProfilerCallback7 * This, /* [in] */ ClassID wrappedClassId, /* [in] */ REFGUID implementedIID, /* [in] */ void *pVTable); HRESULT ( STDMETHODCALLTYPE *ExceptionCLRCatcherFound )( ICorProfilerCallback7 * This); HRESULT ( STDMETHODCALLTYPE *ExceptionCLRCatcherExecute )( ICorProfilerCallback7 * This); HRESULT ( STDMETHODCALLTYPE *ThreadNameChanged )( ICorProfilerCallback7 * This, /* [in] */ ThreadID threadId, /* [in] */ ULONG cchName, /* [annotation][in] */ _In_reads_opt_(cchName) WCHAR name[ ]); HRESULT ( STDMETHODCALLTYPE *GarbageCollectionStarted )( ICorProfilerCallback7 * This, /* [in] */ int cGenerations, /* [size_is][in] */ BOOL generationCollected[ ], /* [in] */ COR_PRF_GC_REASON reason); HRESULT ( STDMETHODCALLTYPE *SurvivingReferences )( ICorProfilerCallback7 * This, /* [in] */ ULONG cSurvivingObjectIDRanges, /* [size_is][in] */ ObjectID objectIDRangeStart[ ], /* [size_is][in] */ ULONG cObjectIDRangeLength[ ]); HRESULT ( STDMETHODCALLTYPE *GarbageCollectionFinished )( ICorProfilerCallback7 * This); HRESULT ( STDMETHODCALLTYPE *FinalizeableObjectQueued )( ICorProfilerCallback7 * This, /* [in] */ DWORD finalizerFlags, /* [in] */ ObjectID objectID); HRESULT ( STDMETHODCALLTYPE *RootReferences2 )( ICorProfilerCallback7 * This, /* [in] */ ULONG cRootRefs, /* [size_is][in] */ ObjectID rootRefIds[ ], /* [size_is][in] */ COR_PRF_GC_ROOT_KIND rootKinds[ ], /* [size_is][in] */ COR_PRF_GC_ROOT_FLAGS rootFlags[ ], /* [size_is][in] */ UINT_PTR rootIds[ ]); HRESULT ( STDMETHODCALLTYPE *HandleCreated )( ICorProfilerCallback7 * This, /* [in] */ GCHandleID handleId, /* [in] */ ObjectID initialObjectId); HRESULT ( STDMETHODCALLTYPE *HandleDestroyed )( ICorProfilerCallback7 * This, /* [in] */ GCHandleID handleId); HRESULT ( STDMETHODCALLTYPE *InitializeForAttach )( ICorProfilerCallback7 * This, /* [in] */ IUnknown *pCorProfilerInfoUnk, /* [in] */ void *pvClientData, /* [in] */ UINT cbClientData); HRESULT ( STDMETHODCALLTYPE *ProfilerAttachComplete )( ICorProfilerCallback7 * This); HRESULT ( STDMETHODCALLTYPE *ProfilerDetachSucceeded )( ICorProfilerCallback7 * This); HRESULT ( STDMETHODCALLTYPE *ReJITCompilationStarted )( ICorProfilerCallback7 * This, /* [in] */ FunctionID functionId, /* [in] */ ReJITID rejitId, /* [in] */ BOOL fIsSafeToBlock); HRESULT ( STDMETHODCALLTYPE *GetReJITParameters )( ICorProfilerCallback7 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdMethodDef methodId, /* [in] */ ICorProfilerFunctionControl *pFunctionControl); HRESULT ( STDMETHODCALLTYPE *ReJITCompilationFinished )( ICorProfilerCallback7 * This, /* [in] */ FunctionID functionId, /* [in] */ ReJITID rejitId, /* [in] */ HRESULT hrStatus, /* [in] */ BOOL fIsSafeToBlock); HRESULT ( STDMETHODCALLTYPE *ReJITError )( ICorProfilerCallback7 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdMethodDef methodId, /* [in] */ FunctionID functionId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *MovedReferences2 )( ICorProfilerCallback7 * This, /* [in] */ ULONG cMovedObjectIDRanges, /* [size_is][in] */ ObjectID oldObjectIDRangeStart[ ], /* [size_is][in] */ ObjectID newObjectIDRangeStart[ ], /* [size_is][in] */ SIZE_T cObjectIDRangeLength[ ]); HRESULT ( STDMETHODCALLTYPE *SurvivingReferences2 )( ICorProfilerCallback7 * This, /* [in] */ ULONG cSurvivingObjectIDRanges, /* [size_is][in] */ ObjectID objectIDRangeStart[ ], /* [size_is][in] */ SIZE_T cObjectIDRangeLength[ ]); HRESULT ( STDMETHODCALLTYPE *ConditionalWeakTableElementReferences )( ICorProfilerCallback7 * This, /* [in] */ ULONG cRootRefs, /* [size_is][in] */ ObjectID keyRefIds[ ], /* [size_is][in] */ ObjectID valueRefIds[ ], /* [size_is][in] */ GCHandleID rootIds[ ]); HRESULT ( STDMETHODCALLTYPE *GetAssemblyReferences )( ICorProfilerCallback7 * This, /* [string][in] */ const WCHAR *wszAssemblyPath, /* [in] */ ICorProfilerAssemblyReferenceProvider *pAsmRefProvider); HRESULT ( STDMETHODCALLTYPE *ModuleInMemorySymbolsUpdated )( ICorProfilerCallback7 * This, ModuleID moduleId); END_INTERFACE } ICorProfilerCallback7Vtbl; interface ICorProfilerCallback7 { CONST_VTBL struct ICorProfilerCallback7Vtbl *lpVtbl; }; #ifdef COBJMACROS #define ICorProfilerCallback7_QueryInterface(This,riid,ppvObject) \ ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) ) #define ICorProfilerCallback7_AddRef(This) \ ( (This)->lpVtbl -> AddRef(This) ) #define ICorProfilerCallback7_Release(This) \ ( (This)->lpVtbl -> Release(This) ) #define ICorProfilerCallback7_Initialize(This,pICorProfilerInfoUnk) \ ( (This)->lpVtbl -> Initialize(This,pICorProfilerInfoUnk) ) #define ICorProfilerCallback7_Shutdown(This) \ ( (This)->lpVtbl -> Shutdown(This) ) #define ICorProfilerCallback7_AppDomainCreationStarted(This,appDomainId) \ ( (This)->lpVtbl -> AppDomainCreationStarted(This,appDomainId) ) #define ICorProfilerCallback7_AppDomainCreationFinished(This,appDomainId,hrStatus) \ ( (This)->lpVtbl -> AppDomainCreationFinished(This,appDomainId,hrStatus) ) #define ICorProfilerCallback7_AppDomainShutdownStarted(This,appDomainId) \ ( (This)->lpVtbl -> AppDomainShutdownStarted(This,appDomainId) ) #define ICorProfilerCallback7_AppDomainShutdownFinished(This,appDomainId,hrStatus) \ ( (This)->lpVtbl -> AppDomainShutdownFinished(This,appDomainId,hrStatus) ) #define ICorProfilerCallback7_AssemblyLoadStarted(This,assemblyId) \ ( (This)->lpVtbl -> AssemblyLoadStarted(This,assemblyId) ) #define ICorProfilerCallback7_AssemblyLoadFinished(This,assemblyId,hrStatus) \ ( (This)->lpVtbl -> AssemblyLoadFinished(This,assemblyId,hrStatus) ) #define ICorProfilerCallback7_AssemblyUnloadStarted(This,assemblyId) \ ( (This)->lpVtbl -> AssemblyUnloadStarted(This,assemblyId) ) #define ICorProfilerCallback7_AssemblyUnloadFinished(This,assemblyId,hrStatus) \ ( (This)->lpVtbl -> AssemblyUnloadFinished(This,assemblyId,hrStatus) ) #define ICorProfilerCallback7_ModuleLoadStarted(This,moduleId) \ ( (This)->lpVtbl -> ModuleLoadStarted(This,moduleId) ) #define ICorProfilerCallback7_ModuleLoadFinished(This,moduleId,hrStatus) \ ( (This)->lpVtbl -> ModuleLoadFinished(This,moduleId,hrStatus) ) #define ICorProfilerCallback7_ModuleUnloadStarted(This,moduleId) \ ( (This)->lpVtbl -> ModuleUnloadStarted(This,moduleId) ) #define ICorProfilerCallback7_ModuleUnloadFinished(This,moduleId,hrStatus) \ ( (This)->lpVtbl -> ModuleUnloadFinished(This,moduleId,hrStatus) ) #define ICorProfilerCallback7_ModuleAttachedToAssembly(This,moduleId,AssemblyId) \ ( (This)->lpVtbl -> ModuleAttachedToAssembly(This,moduleId,AssemblyId) ) #define ICorProfilerCallback7_ClassLoadStarted(This,classId) \ ( (This)->lpVtbl -> ClassLoadStarted(This,classId) ) #define ICorProfilerCallback7_ClassLoadFinished(This,classId,hrStatus) \ ( (This)->lpVtbl -> ClassLoadFinished(This,classId,hrStatus) ) #define ICorProfilerCallback7_ClassUnloadStarted(This,classId) \ ( (This)->lpVtbl -> ClassUnloadStarted(This,classId) ) #define ICorProfilerCallback7_ClassUnloadFinished(This,classId,hrStatus) \ ( (This)->lpVtbl -> ClassUnloadFinished(This,classId,hrStatus) ) #define ICorProfilerCallback7_FunctionUnloadStarted(This,functionId) \ ( (This)->lpVtbl -> FunctionUnloadStarted(This,functionId) ) #define ICorProfilerCallback7_JITCompilationStarted(This,functionId,fIsSafeToBlock) \ ( (This)->lpVtbl -> JITCompilationStarted(This,functionId,fIsSafeToBlock) ) #define ICorProfilerCallback7_JITCompilationFinished(This,functionId,hrStatus,fIsSafeToBlock) \ ( (This)->lpVtbl -> JITCompilationFinished(This,functionId,hrStatus,fIsSafeToBlock) ) #define ICorProfilerCallback7_JITCachedFunctionSearchStarted(This,functionId,pbUseCachedFunction) \ ( (This)->lpVtbl -> JITCachedFunctionSearchStarted(This,functionId,pbUseCachedFunction) ) #define ICorProfilerCallback7_JITCachedFunctionSearchFinished(This,functionId,result) \ ( (This)->lpVtbl -> JITCachedFunctionSearchFinished(This,functionId,result) ) #define ICorProfilerCallback7_JITFunctionPitched(This,functionId) \ ( (This)->lpVtbl -> JITFunctionPitched(This,functionId) ) #define ICorProfilerCallback7_JITInlining(This,callerId,calleeId,pfShouldInline) \ ( (This)->lpVtbl -> JITInlining(This,callerId,calleeId,pfShouldInline) ) #define ICorProfilerCallback7_ThreadCreated(This,threadId) \ ( (This)->lpVtbl -> ThreadCreated(This,threadId) ) #define ICorProfilerCallback7_ThreadDestroyed(This,threadId) \ ( (This)->lpVtbl -> ThreadDestroyed(This,threadId) ) #define ICorProfilerCallback7_ThreadAssignedToOSThread(This,managedThreadId,osThreadId) \ ( (This)->lpVtbl -> ThreadAssignedToOSThread(This,managedThreadId,osThreadId) ) #define ICorProfilerCallback7_RemotingClientInvocationStarted(This) \ ( (This)->lpVtbl -> RemotingClientInvocationStarted(This) ) #define ICorProfilerCallback7_RemotingClientSendingMessage(This,pCookie,fIsAsync) \ ( (This)->lpVtbl -> RemotingClientSendingMessage(This,pCookie,fIsAsync) ) #define ICorProfilerCallback7_RemotingClientReceivingReply(This,pCookie,fIsAsync) \ ( (This)->lpVtbl -> RemotingClientReceivingReply(This,pCookie,fIsAsync) ) #define ICorProfilerCallback7_RemotingClientInvocationFinished(This) \ ( (This)->lpVtbl -> RemotingClientInvocationFinished(This) ) #define ICorProfilerCallback7_RemotingServerReceivingMessage(This,pCookie,fIsAsync) \ ( (This)->lpVtbl -> RemotingServerReceivingMessage(This,pCookie,fIsAsync) ) #define ICorProfilerCallback7_RemotingServerInvocationStarted(This) \ ( (This)->lpVtbl -> RemotingServerInvocationStarted(This) ) #define ICorProfilerCallback7_RemotingServerInvocationReturned(This) \ ( (This)->lpVtbl -> RemotingServerInvocationReturned(This) ) #define ICorProfilerCallback7_RemotingServerSendingReply(This,pCookie,fIsAsync) \ ( (This)->lpVtbl -> RemotingServerSendingReply(This,pCookie,fIsAsync) ) #define ICorProfilerCallback7_UnmanagedToManagedTransition(This,functionId,reason) \ ( (This)->lpVtbl -> UnmanagedToManagedTransition(This,functionId,reason) ) #define ICorProfilerCallback7_ManagedToUnmanagedTransition(This,functionId,reason) \ ( (This)->lpVtbl -> ManagedToUnmanagedTransition(This,functionId,reason) ) #define ICorProfilerCallback7_RuntimeSuspendStarted(This,suspendReason) \ ( (This)->lpVtbl -> RuntimeSuspendStarted(This,suspendReason) ) #define ICorProfilerCallback7_RuntimeSuspendFinished(This) \ ( (This)->lpVtbl -> RuntimeSuspendFinished(This) ) #define ICorProfilerCallback7_RuntimeSuspendAborted(This) \ ( (This)->lpVtbl -> RuntimeSuspendAborted(This) ) #define ICorProfilerCallback7_RuntimeResumeStarted(This) \ ( (This)->lpVtbl -> RuntimeResumeStarted(This) ) #define ICorProfilerCallback7_RuntimeResumeFinished(This) \ ( (This)->lpVtbl -> RuntimeResumeFinished(This) ) #define ICorProfilerCallback7_RuntimeThreadSuspended(This,threadId) \ ( (This)->lpVtbl -> RuntimeThreadSuspended(This,threadId) ) #define ICorProfilerCallback7_RuntimeThreadResumed(This,threadId) \ ( (This)->lpVtbl -> RuntimeThreadResumed(This,threadId) ) #define ICorProfilerCallback7_MovedReferences(This,cMovedObjectIDRanges,oldObjectIDRangeStart,newObjectIDRangeStart,cObjectIDRangeLength) \ ( (This)->lpVtbl -> MovedReferences(This,cMovedObjectIDRanges,oldObjectIDRangeStart,newObjectIDRangeStart,cObjectIDRangeLength) ) #define ICorProfilerCallback7_ObjectAllocated(This,objectId,classId) \ ( (This)->lpVtbl -> ObjectAllocated(This,objectId,classId) ) #define ICorProfilerCallback7_ObjectsAllocatedByClass(This,cClassCount,classIds,cObjects) \ ( (This)->lpVtbl -> ObjectsAllocatedByClass(This,cClassCount,classIds,cObjects) ) #define ICorProfilerCallback7_ObjectReferences(This,objectId,classId,cObjectRefs,objectRefIds) \ ( (This)->lpVtbl -> ObjectReferences(This,objectId,classId,cObjectRefs,objectRefIds) ) #define ICorProfilerCallback7_RootReferences(This,cRootRefs,rootRefIds) \ ( (This)->lpVtbl -> RootReferences(This,cRootRefs,rootRefIds) ) #define ICorProfilerCallback7_ExceptionThrown(This,thrownObjectId) \ ( (This)->lpVtbl -> ExceptionThrown(This,thrownObjectId) ) #define ICorProfilerCallback7_ExceptionSearchFunctionEnter(This,functionId) \ ( (This)->lpVtbl -> ExceptionSearchFunctionEnter(This,functionId) ) #define ICorProfilerCallback7_ExceptionSearchFunctionLeave(This) \ ( (This)->lpVtbl -> ExceptionSearchFunctionLeave(This) ) #define ICorProfilerCallback7_ExceptionSearchFilterEnter(This,functionId) \ ( (This)->lpVtbl -> ExceptionSearchFilterEnter(This,functionId) ) #define ICorProfilerCallback7_ExceptionSearchFilterLeave(This) \ ( (This)->lpVtbl -> ExceptionSearchFilterLeave(This) ) #define ICorProfilerCallback7_ExceptionSearchCatcherFound(This,functionId) \ ( (This)->lpVtbl -> ExceptionSearchCatcherFound(This,functionId) ) #define ICorProfilerCallback7_ExceptionOSHandlerEnter(This,__unused) \ ( (This)->lpVtbl -> ExceptionOSHandlerEnter(This,__unused) ) #define ICorProfilerCallback7_ExceptionOSHandlerLeave(This,__unused) \ ( (This)->lpVtbl -> ExceptionOSHandlerLeave(This,__unused) ) #define ICorProfilerCallback7_ExceptionUnwindFunctionEnter(This,functionId) \ ( (This)->lpVtbl -> ExceptionUnwindFunctionEnter(This,functionId) ) #define ICorProfilerCallback7_ExceptionUnwindFunctionLeave(This) \ ( (This)->lpVtbl -> ExceptionUnwindFunctionLeave(This) ) #define ICorProfilerCallback7_ExceptionUnwindFinallyEnter(This,functionId) \ ( (This)->lpVtbl -> ExceptionUnwindFinallyEnter(This,functionId) ) #define ICorProfilerCallback7_ExceptionUnwindFinallyLeave(This) \ ( (This)->lpVtbl -> ExceptionUnwindFinallyLeave(This) ) #define ICorProfilerCallback7_ExceptionCatcherEnter(This,functionId,objectId) \ ( (This)->lpVtbl -> ExceptionCatcherEnter(This,functionId,objectId) ) #define ICorProfilerCallback7_ExceptionCatcherLeave(This) \ ( (This)->lpVtbl -> ExceptionCatcherLeave(This) ) #define ICorProfilerCallback7_COMClassicVTableCreated(This,wrappedClassId,implementedIID,pVTable,cSlots) \ ( (This)->lpVtbl -> COMClassicVTableCreated(This,wrappedClassId,implementedIID,pVTable,cSlots) ) #define ICorProfilerCallback7_COMClassicVTableDestroyed(This,wrappedClassId,implementedIID,pVTable) \ ( (This)->lpVtbl -> COMClassicVTableDestroyed(This,wrappedClassId,implementedIID,pVTable) ) #define ICorProfilerCallback7_ExceptionCLRCatcherFound(This) \ ( (This)->lpVtbl -> ExceptionCLRCatcherFound(This) ) #define ICorProfilerCallback7_ExceptionCLRCatcherExecute(This) \ ( (This)->lpVtbl -> ExceptionCLRCatcherExecute(This) ) #define ICorProfilerCallback7_ThreadNameChanged(This,threadId,cchName,name) \ ( (This)->lpVtbl -> ThreadNameChanged(This,threadId,cchName,name) ) #define ICorProfilerCallback7_GarbageCollectionStarted(This,cGenerations,generationCollected,reason) \ ( (This)->lpVtbl -> GarbageCollectionStarted(This,cGenerations,generationCollected,reason) ) #define ICorProfilerCallback7_SurvivingReferences(This,cSurvivingObjectIDRanges,objectIDRangeStart,cObjectIDRangeLength) \ ( (This)->lpVtbl -> SurvivingReferences(This,cSurvivingObjectIDRanges,objectIDRangeStart,cObjectIDRangeLength) ) #define ICorProfilerCallback7_GarbageCollectionFinished(This) \ ( (This)->lpVtbl -> GarbageCollectionFinished(This) ) #define ICorProfilerCallback7_FinalizeableObjectQueued(This,finalizerFlags,objectID) \ ( (This)->lpVtbl -> FinalizeableObjectQueued(This,finalizerFlags,objectID) ) #define ICorProfilerCallback7_RootReferences2(This,cRootRefs,rootRefIds,rootKinds,rootFlags,rootIds) \ ( (This)->lpVtbl -> RootReferences2(This,cRootRefs,rootRefIds,rootKinds,rootFlags,rootIds) ) #define ICorProfilerCallback7_HandleCreated(This,handleId,initialObjectId) \ ( (This)->lpVtbl -> HandleCreated(This,handleId,initialObjectId) ) #define ICorProfilerCallback7_HandleDestroyed(This,handleId) \ ( (This)->lpVtbl -> HandleDestroyed(This,handleId) ) #define ICorProfilerCallback7_InitializeForAttach(This,pCorProfilerInfoUnk,pvClientData,cbClientData) \ ( (This)->lpVtbl -> InitializeForAttach(This,pCorProfilerInfoUnk,pvClientData,cbClientData) ) #define ICorProfilerCallback7_ProfilerAttachComplete(This) \ ( (This)->lpVtbl -> ProfilerAttachComplete(This) ) #define ICorProfilerCallback7_ProfilerDetachSucceeded(This) \ ( (This)->lpVtbl -> ProfilerDetachSucceeded(This) ) #define ICorProfilerCallback7_ReJITCompilationStarted(This,functionId,rejitId,fIsSafeToBlock) \ ( (This)->lpVtbl -> ReJITCompilationStarted(This,functionId,rejitId,fIsSafeToBlock) ) #define ICorProfilerCallback7_GetReJITParameters(This,moduleId,methodId,pFunctionControl) \ ( (This)->lpVtbl -> GetReJITParameters(This,moduleId,methodId,pFunctionControl) ) #define ICorProfilerCallback7_ReJITCompilationFinished(This,functionId,rejitId,hrStatus,fIsSafeToBlock) \ ( (This)->lpVtbl -> ReJITCompilationFinished(This,functionId,rejitId,hrStatus,fIsSafeToBlock) ) #define ICorProfilerCallback7_ReJITError(This,moduleId,methodId,functionId,hrStatus) \ ( (This)->lpVtbl -> ReJITError(This,moduleId,methodId,functionId,hrStatus) ) #define ICorProfilerCallback7_MovedReferences2(This,cMovedObjectIDRanges,oldObjectIDRangeStart,newObjectIDRangeStart,cObjectIDRangeLength) \ ( (This)->lpVtbl -> MovedReferences2(This,cMovedObjectIDRanges,oldObjectIDRangeStart,newObjectIDRangeStart,cObjectIDRangeLength) ) #define ICorProfilerCallback7_SurvivingReferences2(This,cSurvivingObjectIDRanges,objectIDRangeStart,cObjectIDRangeLength) \ ( (This)->lpVtbl -> SurvivingReferences2(This,cSurvivingObjectIDRanges,objectIDRangeStart,cObjectIDRangeLength) ) #define ICorProfilerCallback7_ConditionalWeakTableElementReferences(This,cRootRefs,keyRefIds,valueRefIds,rootIds) \ ( (This)->lpVtbl -> ConditionalWeakTableElementReferences(This,cRootRefs,keyRefIds,valueRefIds,rootIds) ) #define ICorProfilerCallback7_GetAssemblyReferences(This,wszAssemblyPath,pAsmRefProvider) \ ( (This)->lpVtbl -> GetAssemblyReferences(This,wszAssemblyPath,pAsmRefProvider) ) #define ICorProfilerCallback7_ModuleInMemorySymbolsUpdated(This,moduleId) \ ( (This)->lpVtbl -> ModuleInMemorySymbolsUpdated(This,moduleId) ) #endif /* COBJMACROS */ #endif /* C style interface */ #endif /* __ICorProfilerCallback7_INTERFACE_DEFINED__ */ #ifndef __ICorProfilerCallback8_INTERFACE_DEFINED__ #define __ICorProfilerCallback8_INTERFACE_DEFINED__ /* interface ICorProfilerCallback8 */ /* [local][unique][uuid][object] */ EXTERN_C const IID IID_ICorProfilerCallback8; #if defined(__cplusplus) && !defined(CINTERFACE) MIDL_INTERFACE("5BED9B15-C079-4D47-BFE2-215A140C07E0") ICorProfilerCallback8 : public ICorProfilerCallback7 { public: virtual HRESULT STDMETHODCALLTYPE DynamicMethodJITCompilationStarted( /* [in] */ FunctionID functionId, /* [in] */ BOOL fIsSafeToBlock, /* [in] */ LPCBYTE pILHeader, /* [in] */ ULONG cbILHeader) = 0; virtual HRESULT STDMETHODCALLTYPE DynamicMethodJITCompilationFinished( /* [in] */ FunctionID functionId, /* [in] */ HRESULT hrStatus, /* [in] */ BOOL fIsSafeToBlock) = 0; }; #else /* C style interface */ typedef struct ICorProfilerCallback8Vtbl { BEGIN_INTERFACE HRESULT ( STDMETHODCALLTYPE *QueryInterface )( ICorProfilerCallback8 * This, /* [in] */ REFIID riid, /* [annotation][iid_is][out] */ _COM_Outptr_ void **ppvObject); ULONG ( STDMETHODCALLTYPE *AddRef )( ICorProfilerCallback8 * This); ULONG ( STDMETHODCALLTYPE *Release )( ICorProfilerCallback8 * This); HRESULT ( STDMETHODCALLTYPE *Initialize )( ICorProfilerCallback8 * This, /* [in] */ IUnknown *pICorProfilerInfoUnk); HRESULT ( STDMETHODCALLTYPE *Shutdown )( ICorProfilerCallback8 * This); HRESULT ( STDMETHODCALLTYPE *AppDomainCreationStarted )( ICorProfilerCallback8 * This, /* [in] */ AppDomainID appDomainId); HRESULT ( STDMETHODCALLTYPE *AppDomainCreationFinished )( ICorProfilerCallback8 * This, /* [in] */ AppDomainID appDomainId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *AppDomainShutdownStarted )( ICorProfilerCallback8 * This, /* [in] */ AppDomainID appDomainId); HRESULT ( STDMETHODCALLTYPE *AppDomainShutdownFinished )( ICorProfilerCallback8 * This, /* [in] */ AppDomainID appDomainId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *AssemblyLoadStarted )( ICorProfilerCallback8 * This, /* [in] */ AssemblyID assemblyId); HRESULT ( STDMETHODCALLTYPE *AssemblyLoadFinished )( ICorProfilerCallback8 * This, /* [in] */ AssemblyID assemblyId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *AssemblyUnloadStarted )( ICorProfilerCallback8 * This, /* [in] */ AssemblyID assemblyId); HRESULT ( STDMETHODCALLTYPE *AssemblyUnloadFinished )( ICorProfilerCallback8 * This, /* [in] */ AssemblyID assemblyId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *ModuleLoadStarted )( ICorProfilerCallback8 * This, /* [in] */ ModuleID moduleId); HRESULT ( STDMETHODCALLTYPE *ModuleLoadFinished )( ICorProfilerCallback8 * This, /* [in] */ ModuleID moduleId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *ModuleUnloadStarted )( ICorProfilerCallback8 * This, /* [in] */ ModuleID moduleId); HRESULT ( STDMETHODCALLTYPE *ModuleUnloadFinished )( ICorProfilerCallback8 * This, /* [in] */ ModuleID moduleId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *ModuleAttachedToAssembly )( ICorProfilerCallback8 * This, /* [in] */ ModuleID moduleId, /* [in] */ AssemblyID AssemblyId); HRESULT ( STDMETHODCALLTYPE *ClassLoadStarted )( ICorProfilerCallback8 * This, /* [in] */ ClassID classId); HRESULT ( STDMETHODCALLTYPE *ClassLoadFinished )( ICorProfilerCallback8 * This, /* [in] */ ClassID classId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *ClassUnloadStarted )( ICorProfilerCallback8 * This, /* [in] */ ClassID classId); HRESULT ( STDMETHODCALLTYPE *ClassUnloadFinished )( ICorProfilerCallback8 * This, /* [in] */ ClassID classId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *FunctionUnloadStarted )( ICorProfilerCallback8 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *JITCompilationStarted )( ICorProfilerCallback8 * This, /* [in] */ FunctionID functionId, /* [in] */ BOOL fIsSafeToBlock); HRESULT ( STDMETHODCALLTYPE *JITCompilationFinished )( ICorProfilerCallback8 * This, /* [in] */ FunctionID functionId, /* [in] */ HRESULT hrStatus, /* [in] */ BOOL fIsSafeToBlock); HRESULT ( STDMETHODCALLTYPE *JITCachedFunctionSearchStarted )( ICorProfilerCallback8 * This, /* [in] */ FunctionID functionId, /* [out] */ BOOL *pbUseCachedFunction); HRESULT ( STDMETHODCALLTYPE *JITCachedFunctionSearchFinished )( ICorProfilerCallback8 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_JIT_CACHE result); HRESULT ( STDMETHODCALLTYPE *JITFunctionPitched )( ICorProfilerCallback8 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *JITInlining )( ICorProfilerCallback8 * This, /* [in] */ FunctionID callerId, /* [in] */ FunctionID calleeId, /* [out] */ BOOL *pfShouldInline); HRESULT ( STDMETHODCALLTYPE *ThreadCreated )( ICorProfilerCallback8 * This, /* [in] */ ThreadID threadId); HRESULT ( STDMETHODCALLTYPE *ThreadDestroyed )( ICorProfilerCallback8 * This, /* [in] */ ThreadID threadId); HRESULT ( STDMETHODCALLTYPE *ThreadAssignedToOSThread )( ICorProfilerCallback8 * This, /* [in] */ ThreadID managedThreadId, /* [in] */ DWORD osThreadId); HRESULT ( STDMETHODCALLTYPE *RemotingClientInvocationStarted )( ICorProfilerCallback8 * This); HRESULT ( STDMETHODCALLTYPE *RemotingClientSendingMessage )( ICorProfilerCallback8 * This, /* [in] */ GUID *pCookie, /* [in] */ BOOL fIsAsync); HRESULT ( STDMETHODCALLTYPE *RemotingClientReceivingReply )( ICorProfilerCallback8 * This, /* [in] */ GUID *pCookie, /* [in] */ BOOL fIsAsync); HRESULT ( STDMETHODCALLTYPE *RemotingClientInvocationFinished )( ICorProfilerCallback8 * This); HRESULT ( STDMETHODCALLTYPE *RemotingServerReceivingMessage )( ICorProfilerCallback8 * This, /* [in] */ GUID *pCookie, /* [in] */ BOOL fIsAsync); HRESULT ( STDMETHODCALLTYPE *RemotingServerInvocationStarted )( ICorProfilerCallback8 * This); HRESULT ( STDMETHODCALLTYPE *RemotingServerInvocationReturned )( ICorProfilerCallback8 * This); HRESULT ( STDMETHODCALLTYPE *RemotingServerSendingReply )( ICorProfilerCallback8 * This, /* [in] */ GUID *pCookie, /* [in] */ BOOL fIsAsync); HRESULT ( STDMETHODCALLTYPE *UnmanagedToManagedTransition )( ICorProfilerCallback8 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_TRANSITION_REASON reason); HRESULT ( STDMETHODCALLTYPE *ManagedToUnmanagedTransition )( ICorProfilerCallback8 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_TRANSITION_REASON reason); HRESULT ( STDMETHODCALLTYPE *RuntimeSuspendStarted )( ICorProfilerCallback8 * This, /* [in] */ COR_PRF_SUSPEND_REASON suspendReason); HRESULT ( STDMETHODCALLTYPE *RuntimeSuspendFinished )( ICorProfilerCallback8 * This); HRESULT ( STDMETHODCALLTYPE *RuntimeSuspendAborted )( ICorProfilerCallback8 * This); HRESULT ( STDMETHODCALLTYPE *RuntimeResumeStarted )( ICorProfilerCallback8 * This); HRESULT ( STDMETHODCALLTYPE *RuntimeResumeFinished )( ICorProfilerCallback8 * This); HRESULT ( STDMETHODCALLTYPE *RuntimeThreadSuspended )( ICorProfilerCallback8 * This, /* [in] */ ThreadID threadId); HRESULT ( STDMETHODCALLTYPE *RuntimeThreadResumed )( ICorProfilerCallback8 * This, /* [in] */ ThreadID threadId); HRESULT ( STDMETHODCALLTYPE *MovedReferences )( ICorProfilerCallback8 * This, /* [in] */ ULONG cMovedObjectIDRanges, /* [size_is][in] */ ObjectID oldObjectIDRangeStart[ ], /* [size_is][in] */ ObjectID newObjectIDRangeStart[ ], /* [size_is][in] */ ULONG cObjectIDRangeLength[ ]); HRESULT ( STDMETHODCALLTYPE *ObjectAllocated )( ICorProfilerCallback8 * This, /* [in] */ ObjectID objectId, /* [in] */ ClassID classId); HRESULT ( STDMETHODCALLTYPE *ObjectsAllocatedByClass )( ICorProfilerCallback8 * This, /* [in] */ ULONG cClassCount, /* [size_is][in] */ ClassID classIds[ ], /* [size_is][in] */ ULONG cObjects[ ]); HRESULT ( STDMETHODCALLTYPE *ObjectReferences )( ICorProfilerCallback8 * This, /* [in] */ ObjectID objectId, /* [in] */ ClassID classId, /* [in] */ ULONG cObjectRefs, /* [size_is][in] */ ObjectID objectRefIds[ ]); HRESULT ( STDMETHODCALLTYPE *RootReferences )( ICorProfilerCallback8 * This, /* [in] */ ULONG cRootRefs, /* [size_is][in] */ ObjectID rootRefIds[ ]); HRESULT ( STDMETHODCALLTYPE *ExceptionThrown )( ICorProfilerCallback8 * This, /* [in] */ ObjectID thrownObjectId); HRESULT ( STDMETHODCALLTYPE *ExceptionSearchFunctionEnter )( ICorProfilerCallback8 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ExceptionSearchFunctionLeave )( ICorProfilerCallback8 * This); HRESULT ( STDMETHODCALLTYPE *ExceptionSearchFilterEnter )( ICorProfilerCallback8 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ExceptionSearchFilterLeave )( ICorProfilerCallback8 * This); HRESULT ( STDMETHODCALLTYPE *ExceptionSearchCatcherFound )( ICorProfilerCallback8 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ExceptionOSHandlerEnter )( ICorProfilerCallback8 * This, /* [in] */ UINT_PTR __unused); HRESULT ( STDMETHODCALLTYPE *ExceptionOSHandlerLeave )( ICorProfilerCallback8 * This, /* [in] */ UINT_PTR __unused); HRESULT ( STDMETHODCALLTYPE *ExceptionUnwindFunctionEnter )( ICorProfilerCallback8 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ExceptionUnwindFunctionLeave )( ICorProfilerCallback8 * This); HRESULT ( STDMETHODCALLTYPE *ExceptionUnwindFinallyEnter )( ICorProfilerCallback8 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ExceptionUnwindFinallyLeave )( ICorProfilerCallback8 * This); HRESULT ( STDMETHODCALLTYPE *ExceptionCatcherEnter )( ICorProfilerCallback8 * This, /* [in] */ FunctionID functionId, /* [in] */ ObjectID objectId); HRESULT ( STDMETHODCALLTYPE *ExceptionCatcherLeave )( ICorProfilerCallback8 * This); HRESULT ( STDMETHODCALLTYPE *COMClassicVTableCreated )( ICorProfilerCallback8 * This, /* [in] */ ClassID wrappedClassId, /* [in] */ REFGUID implementedIID, /* [in] */ void *pVTable, /* [in] */ ULONG cSlots); HRESULT ( STDMETHODCALLTYPE *COMClassicVTableDestroyed )( ICorProfilerCallback8 * This, /* [in] */ ClassID wrappedClassId, /* [in] */ REFGUID implementedIID, /* [in] */ void *pVTable); HRESULT ( STDMETHODCALLTYPE *ExceptionCLRCatcherFound )( ICorProfilerCallback8 * This); HRESULT ( STDMETHODCALLTYPE *ExceptionCLRCatcherExecute )( ICorProfilerCallback8 * This); HRESULT ( STDMETHODCALLTYPE *ThreadNameChanged )( ICorProfilerCallback8 * This, /* [in] */ ThreadID threadId, /* [in] */ ULONG cchName, /* [annotation][in] */ _In_reads_opt_(cchName) WCHAR name[ ]); HRESULT ( STDMETHODCALLTYPE *GarbageCollectionStarted )( ICorProfilerCallback8 * This, /* [in] */ int cGenerations, /* [size_is][in] */ BOOL generationCollected[ ], /* [in] */ COR_PRF_GC_REASON reason); HRESULT ( STDMETHODCALLTYPE *SurvivingReferences )( ICorProfilerCallback8 * This, /* [in] */ ULONG cSurvivingObjectIDRanges, /* [size_is][in] */ ObjectID objectIDRangeStart[ ], /* [size_is][in] */ ULONG cObjectIDRangeLength[ ]); HRESULT ( STDMETHODCALLTYPE *GarbageCollectionFinished )( ICorProfilerCallback8 * This); HRESULT ( STDMETHODCALLTYPE *FinalizeableObjectQueued )( ICorProfilerCallback8 * This, /* [in] */ DWORD finalizerFlags, /* [in] */ ObjectID objectID); HRESULT ( STDMETHODCALLTYPE *RootReferences2 )( ICorProfilerCallback8 * This, /* [in] */ ULONG cRootRefs, /* [size_is][in] */ ObjectID rootRefIds[ ], /* [size_is][in] */ COR_PRF_GC_ROOT_KIND rootKinds[ ], /* [size_is][in] */ COR_PRF_GC_ROOT_FLAGS rootFlags[ ], /* [size_is][in] */ UINT_PTR rootIds[ ]); HRESULT ( STDMETHODCALLTYPE *HandleCreated )( ICorProfilerCallback8 * This, /* [in] */ GCHandleID handleId, /* [in] */ ObjectID initialObjectId); HRESULT ( STDMETHODCALLTYPE *HandleDestroyed )( ICorProfilerCallback8 * This, /* [in] */ GCHandleID handleId); HRESULT ( STDMETHODCALLTYPE *InitializeForAttach )( ICorProfilerCallback8 * This, /* [in] */ IUnknown *pCorProfilerInfoUnk, /* [in] */ void *pvClientData, /* [in] */ UINT cbClientData); HRESULT ( STDMETHODCALLTYPE *ProfilerAttachComplete )( ICorProfilerCallback8 * This); HRESULT ( STDMETHODCALLTYPE *ProfilerDetachSucceeded )( ICorProfilerCallback8 * This); HRESULT ( STDMETHODCALLTYPE *ReJITCompilationStarted )( ICorProfilerCallback8 * This, /* [in] */ FunctionID functionId, /* [in] */ ReJITID rejitId, /* [in] */ BOOL fIsSafeToBlock); HRESULT ( STDMETHODCALLTYPE *GetReJITParameters )( ICorProfilerCallback8 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdMethodDef methodId, /* [in] */ ICorProfilerFunctionControl *pFunctionControl); HRESULT ( STDMETHODCALLTYPE *ReJITCompilationFinished )( ICorProfilerCallback8 * This, /* [in] */ FunctionID functionId, /* [in] */ ReJITID rejitId, /* [in] */ HRESULT hrStatus, /* [in] */ BOOL fIsSafeToBlock); HRESULT ( STDMETHODCALLTYPE *ReJITError )( ICorProfilerCallback8 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdMethodDef methodId, /* [in] */ FunctionID functionId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *MovedReferences2 )( ICorProfilerCallback8 * This, /* [in] */ ULONG cMovedObjectIDRanges, /* [size_is][in] */ ObjectID oldObjectIDRangeStart[ ], /* [size_is][in] */ ObjectID newObjectIDRangeStart[ ], /* [size_is][in] */ SIZE_T cObjectIDRangeLength[ ]); HRESULT ( STDMETHODCALLTYPE *SurvivingReferences2 )( ICorProfilerCallback8 * This, /* [in] */ ULONG cSurvivingObjectIDRanges, /* [size_is][in] */ ObjectID objectIDRangeStart[ ], /* [size_is][in] */ SIZE_T cObjectIDRangeLength[ ]); HRESULT ( STDMETHODCALLTYPE *ConditionalWeakTableElementReferences )( ICorProfilerCallback8 * This, /* [in] */ ULONG cRootRefs, /* [size_is][in] */ ObjectID keyRefIds[ ], /* [size_is][in] */ ObjectID valueRefIds[ ], /* [size_is][in] */ GCHandleID rootIds[ ]); HRESULT ( STDMETHODCALLTYPE *GetAssemblyReferences )( ICorProfilerCallback8 * This, /* [string][in] */ const WCHAR *wszAssemblyPath, /* [in] */ ICorProfilerAssemblyReferenceProvider *pAsmRefProvider); HRESULT ( STDMETHODCALLTYPE *ModuleInMemorySymbolsUpdated )( ICorProfilerCallback8 * This, ModuleID moduleId); HRESULT ( STDMETHODCALLTYPE *DynamicMethodJITCompilationStarted )( ICorProfilerCallback8 * This, /* [in] */ FunctionID functionId, /* [in] */ BOOL fIsSafeToBlock, /* [in] */ LPCBYTE pILHeader, /* [in] */ ULONG cbILHeader); HRESULT ( STDMETHODCALLTYPE *DynamicMethodJITCompilationFinished )( ICorProfilerCallback8 * This, /* [in] */ FunctionID functionId, /* [in] */ HRESULT hrStatus, /* [in] */ BOOL fIsSafeToBlock); END_INTERFACE } ICorProfilerCallback8Vtbl; interface ICorProfilerCallback8 { CONST_VTBL struct ICorProfilerCallback8Vtbl *lpVtbl; }; #ifdef COBJMACROS #define ICorProfilerCallback8_QueryInterface(This,riid,ppvObject) \ ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) ) #define ICorProfilerCallback8_AddRef(This) \ ( (This)->lpVtbl -> AddRef(This) ) #define ICorProfilerCallback8_Release(This) \ ( (This)->lpVtbl -> Release(This) ) #define ICorProfilerCallback8_Initialize(This,pICorProfilerInfoUnk) \ ( (This)->lpVtbl -> Initialize(This,pICorProfilerInfoUnk) ) #define ICorProfilerCallback8_Shutdown(This) \ ( (This)->lpVtbl -> Shutdown(This) ) #define ICorProfilerCallback8_AppDomainCreationStarted(This,appDomainId) \ ( (This)->lpVtbl -> AppDomainCreationStarted(This,appDomainId) ) #define ICorProfilerCallback8_AppDomainCreationFinished(This,appDomainId,hrStatus) \ ( (This)->lpVtbl -> AppDomainCreationFinished(This,appDomainId,hrStatus) ) #define ICorProfilerCallback8_AppDomainShutdownStarted(This,appDomainId) \ ( (This)->lpVtbl -> AppDomainShutdownStarted(This,appDomainId) ) #define ICorProfilerCallback8_AppDomainShutdownFinished(This,appDomainId,hrStatus) \ ( (This)->lpVtbl -> AppDomainShutdownFinished(This,appDomainId,hrStatus) ) #define ICorProfilerCallback8_AssemblyLoadStarted(This,assemblyId) \ ( (This)->lpVtbl -> AssemblyLoadStarted(This,assemblyId) ) #define ICorProfilerCallback8_AssemblyLoadFinished(This,assemblyId,hrStatus) \ ( (This)->lpVtbl -> AssemblyLoadFinished(This,assemblyId,hrStatus) ) #define ICorProfilerCallback8_AssemblyUnloadStarted(This,assemblyId) \ ( (This)->lpVtbl -> AssemblyUnloadStarted(This,assemblyId) ) #define ICorProfilerCallback8_AssemblyUnloadFinished(This,assemblyId,hrStatus) \ ( (This)->lpVtbl -> AssemblyUnloadFinished(This,assemblyId,hrStatus) ) #define ICorProfilerCallback8_ModuleLoadStarted(This,moduleId) \ ( (This)->lpVtbl -> ModuleLoadStarted(This,moduleId) ) #define ICorProfilerCallback8_ModuleLoadFinished(This,moduleId,hrStatus) \ ( (This)->lpVtbl -> ModuleLoadFinished(This,moduleId,hrStatus) ) #define ICorProfilerCallback8_ModuleUnloadStarted(This,moduleId) \ ( (This)->lpVtbl -> ModuleUnloadStarted(This,moduleId) ) #define ICorProfilerCallback8_ModuleUnloadFinished(This,moduleId,hrStatus) \ ( (This)->lpVtbl -> ModuleUnloadFinished(This,moduleId,hrStatus) ) #define ICorProfilerCallback8_ModuleAttachedToAssembly(This,moduleId,AssemblyId) \ ( (This)->lpVtbl -> ModuleAttachedToAssembly(This,moduleId,AssemblyId) ) #define ICorProfilerCallback8_ClassLoadStarted(This,classId) \ ( (This)->lpVtbl -> ClassLoadStarted(This,classId) ) #define ICorProfilerCallback8_ClassLoadFinished(This,classId,hrStatus) \ ( (This)->lpVtbl -> ClassLoadFinished(This,classId,hrStatus) ) #define ICorProfilerCallback8_ClassUnloadStarted(This,classId) \ ( (This)->lpVtbl -> ClassUnloadStarted(This,classId) ) #define ICorProfilerCallback8_ClassUnloadFinished(This,classId,hrStatus) \ ( (This)->lpVtbl -> ClassUnloadFinished(This,classId,hrStatus) ) #define ICorProfilerCallback8_FunctionUnloadStarted(This,functionId) \ ( (This)->lpVtbl -> FunctionUnloadStarted(This,functionId) ) #define ICorProfilerCallback8_JITCompilationStarted(This,functionId,fIsSafeToBlock) \ ( (This)->lpVtbl -> JITCompilationStarted(This,functionId,fIsSafeToBlock) ) #define ICorProfilerCallback8_JITCompilationFinished(This,functionId,hrStatus,fIsSafeToBlock) \ ( (This)->lpVtbl -> JITCompilationFinished(This,functionId,hrStatus,fIsSafeToBlock) ) #define ICorProfilerCallback8_JITCachedFunctionSearchStarted(This,functionId,pbUseCachedFunction) \ ( (This)->lpVtbl -> JITCachedFunctionSearchStarted(This,functionId,pbUseCachedFunction) ) #define ICorProfilerCallback8_JITCachedFunctionSearchFinished(This,functionId,result) \ ( (This)->lpVtbl -> JITCachedFunctionSearchFinished(This,functionId,result) ) #define ICorProfilerCallback8_JITFunctionPitched(This,functionId) \ ( (This)->lpVtbl -> JITFunctionPitched(This,functionId) ) #define ICorProfilerCallback8_JITInlining(This,callerId,calleeId,pfShouldInline) \ ( (This)->lpVtbl -> JITInlining(This,callerId,calleeId,pfShouldInline) ) #define ICorProfilerCallback8_ThreadCreated(This,threadId) \ ( (This)->lpVtbl -> ThreadCreated(This,threadId) ) #define ICorProfilerCallback8_ThreadDestroyed(This,threadId) \ ( (This)->lpVtbl -> ThreadDestroyed(This,threadId) ) #define ICorProfilerCallback8_ThreadAssignedToOSThread(This,managedThreadId,osThreadId) \ ( (This)->lpVtbl -> ThreadAssignedToOSThread(This,managedThreadId,osThreadId) ) #define ICorProfilerCallback8_RemotingClientInvocationStarted(This) \ ( (This)->lpVtbl -> RemotingClientInvocationStarted(This) ) #define ICorProfilerCallback8_RemotingClientSendingMessage(This,pCookie,fIsAsync) \ ( (This)->lpVtbl -> RemotingClientSendingMessage(This,pCookie,fIsAsync) ) #define ICorProfilerCallback8_RemotingClientReceivingReply(This,pCookie,fIsAsync) \ ( (This)->lpVtbl -> RemotingClientReceivingReply(This,pCookie,fIsAsync) ) #define ICorProfilerCallback8_RemotingClientInvocationFinished(This) \ ( (This)->lpVtbl -> RemotingClientInvocationFinished(This) ) #define ICorProfilerCallback8_RemotingServerReceivingMessage(This,pCookie,fIsAsync) \ ( (This)->lpVtbl -> RemotingServerReceivingMessage(This,pCookie,fIsAsync) ) #define ICorProfilerCallback8_RemotingServerInvocationStarted(This) \ ( (This)->lpVtbl -> RemotingServerInvocationStarted(This) ) #define ICorProfilerCallback8_RemotingServerInvocationReturned(This) \ ( (This)->lpVtbl -> RemotingServerInvocationReturned(This) ) #define ICorProfilerCallback8_RemotingServerSendingReply(This,pCookie,fIsAsync) \ ( (This)->lpVtbl -> RemotingServerSendingReply(This,pCookie,fIsAsync) ) #define ICorProfilerCallback8_UnmanagedToManagedTransition(This,functionId,reason) \ ( (This)->lpVtbl -> UnmanagedToManagedTransition(This,functionId,reason) ) #define ICorProfilerCallback8_ManagedToUnmanagedTransition(This,functionId,reason) \ ( (This)->lpVtbl -> ManagedToUnmanagedTransition(This,functionId,reason) ) #define ICorProfilerCallback8_RuntimeSuspendStarted(This,suspendReason) \ ( (This)->lpVtbl -> RuntimeSuspendStarted(This,suspendReason) ) #define ICorProfilerCallback8_RuntimeSuspendFinished(This) \ ( (This)->lpVtbl -> RuntimeSuspendFinished(This) ) #define ICorProfilerCallback8_RuntimeSuspendAborted(This) \ ( (This)->lpVtbl -> RuntimeSuspendAborted(This) ) #define ICorProfilerCallback8_RuntimeResumeStarted(This) \ ( (This)->lpVtbl -> RuntimeResumeStarted(This) ) #define ICorProfilerCallback8_RuntimeResumeFinished(This) \ ( (This)->lpVtbl -> RuntimeResumeFinished(This) ) #define ICorProfilerCallback8_RuntimeThreadSuspended(This,threadId) \ ( (This)->lpVtbl -> RuntimeThreadSuspended(This,threadId) ) #define ICorProfilerCallback8_RuntimeThreadResumed(This,threadId) \ ( (This)->lpVtbl -> RuntimeThreadResumed(This,threadId) ) #define ICorProfilerCallback8_MovedReferences(This,cMovedObjectIDRanges,oldObjectIDRangeStart,newObjectIDRangeStart,cObjectIDRangeLength) \ ( (This)->lpVtbl -> MovedReferences(This,cMovedObjectIDRanges,oldObjectIDRangeStart,newObjectIDRangeStart,cObjectIDRangeLength) ) #define ICorProfilerCallback8_ObjectAllocated(This,objectId,classId) \ ( (This)->lpVtbl -> ObjectAllocated(This,objectId,classId) ) #define ICorProfilerCallback8_ObjectsAllocatedByClass(This,cClassCount,classIds,cObjects) \ ( (This)->lpVtbl -> ObjectsAllocatedByClass(This,cClassCount,classIds,cObjects) ) #define ICorProfilerCallback8_ObjectReferences(This,objectId,classId,cObjectRefs,objectRefIds) \ ( (This)->lpVtbl -> ObjectReferences(This,objectId,classId,cObjectRefs,objectRefIds) ) #define ICorProfilerCallback8_RootReferences(This,cRootRefs,rootRefIds) \ ( (This)->lpVtbl -> RootReferences(This,cRootRefs,rootRefIds) ) #define ICorProfilerCallback8_ExceptionThrown(This,thrownObjectId) \ ( (This)->lpVtbl -> ExceptionThrown(This,thrownObjectId) ) #define ICorProfilerCallback8_ExceptionSearchFunctionEnter(This,functionId) \ ( (This)->lpVtbl -> ExceptionSearchFunctionEnter(This,functionId) ) #define ICorProfilerCallback8_ExceptionSearchFunctionLeave(This) \ ( (This)->lpVtbl -> ExceptionSearchFunctionLeave(This) ) #define ICorProfilerCallback8_ExceptionSearchFilterEnter(This,functionId) \ ( (This)->lpVtbl -> ExceptionSearchFilterEnter(This,functionId) ) #define ICorProfilerCallback8_ExceptionSearchFilterLeave(This) \ ( (This)->lpVtbl -> ExceptionSearchFilterLeave(This) ) #define ICorProfilerCallback8_ExceptionSearchCatcherFound(This,functionId) \ ( (This)->lpVtbl -> ExceptionSearchCatcherFound(This,functionId) ) #define ICorProfilerCallback8_ExceptionOSHandlerEnter(This,__unused) \ ( (This)->lpVtbl -> ExceptionOSHandlerEnter(This,__unused) ) #define ICorProfilerCallback8_ExceptionOSHandlerLeave(This,__unused) \ ( (This)->lpVtbl -> ExceptionOSHandlerLeave(This,__unused) ) #define ICorProfilerCallback8_ExceptionUnwindFunctionEnter(This,functionId) \ ( (This)->lpVtbl -> ExceptionUnwindFunctionEnter(This,functionId) ) #define ICorProfilerCallback8_ExceptionUnwindFunctionLeave(This) \ ( (This)->lpVtbl -> ExceptionUnwindFunctionLeave(This) ) #define ICorProfilerCallback8_ExceptionUnwindFinallyEnter(This,functionId) \ ( (This)->lpVtbl -> ExceptionUnwindFinallyEnter(This,functionId) ) #define ICorProfilerCallback8_ExceptionUnwindFinallyLeave(This) \ ( (This)->lpVtbl -> ExceptionUnwindFinallyLeave(This) ) #define ICorProfilerCallback8_ExceptionCatcherEnter(This,functionId,objectId) \ ( (This)->lpVtbl -> ExceptionCatcherEnter(This,functionId,objectId) ) #define ICorProfilerCallback8_ExceptionCatcherLeave(This) \ ( (This)->lpVtbl -> ExceptionCatcherLeave(This) ) #define ICorProfilerCallback8_COMClassicVTableCreated(This,wrappedClassId,implementedIID,pVTable,cSlots) \ ( (This)->lpVtbl -> COMClassicVTableCreated(This,wrappedClassId,implementedIID,pVTable,cSlots) ) #define ICorProfilerCallback8_COMClassicVTableDestroyed(This,wrappedClassId,implementedIID,pVTable) \ ( (This)->lpVtbl -> COMClassicVTableDestroyed(This,wrappedClassId,implementedIID,pVTable) ) #define ICorProfilerCallback8_ExceptionCLRCatcherFound(This) \ ( (This)->lpVtbl -> ExceptionCLRCatcherFound(This) ) #define ICorProfilerCallback8_ExceptionCLRCatcherExecute(This) \ ( (This)->lpVtbl -> ExceptionCLRCatcherExecute(This) ) #define ICorProfilerCallback8_ThreadNameChanged(This,threadId,cchName,name) \ ( (This)->lpVtbl -> ThreadNameChanged(This,threadId,cchName,name) ) #define ICorProfilerCallback8_GarbageCollectionStarted(This,cGenerations,generationCollected,reason) \ ( (This)->lpVtbl -> GarbageCollectionStarted(This,cGenerations,generationCollected,reason) ) #define ICorProfilerCallback8_SurvivingReferences(This,cSurvivingObjectIDRanges,objectIDRangeStart,cObjectIDRangeLength) \ ( (This)->lpVtbl -> SurvivingReferences(This,cSurvivingObjectIDRanges,objectIDRangeStart,cObjectIDRangeLength) ) #define ICorProfilerCallback8_GarbageCollectionFinished(This) \ ( (This)->lpVtbl -> GarbageCollectionFinished(This) ) #define ICorProfilerCallback8_FinalizeableObjectQueued(This,finalizerFlags,objectID) \ ( (This)->lpVtbl -> FinalizeableObjectQueued(This,finalizerFlags,objectID) ) #define ICorProfilerCallback8_RootReferences2(This,cRootRefs,rootRefIds,rootKinds,rootFlags,rootIds) \ ( (This)->lpVtbl -> RootReferences2(This,cRootRefs,rootRefIds,rootKinds,rootFlags,rootIds) ) #define ICorProfilerCallback8_HandleCreated(This,handleId,initialObjectId) \ ( (This)->lpVtbl -> HandleCreated(This,handleId,initialObjectId) ) #define ICorProfilerCallback8_HandleDestroyed(This,handleId) \ ( (This)->lpVtbl -> HandleDestroyed(This,handleId) ) #define ICorProfilerCallback8_InitializeForAttach(This,pCorProfilerInfoUnk,pvClientData,cbClientData) \ ( (This)->lpVtbl -> InitializeForAttach(This,pCorProfilerInfoUnk,pvClientData,cbClientData) ) #define ICorProfilerCallback8_ProfilerAttachComplete(This) \ ( (This)->lpVtbl -> ProfilerAttachComplete(This) ) #define ICorProfilerCallback8_ProfilerDetachSucceeded(This) \ ( (This)->lpVtbl -> ProfilerDetachSucceeded(This) ) #define ICorProfilerCallback8_ReJITCompilationStarted(This,functionId,rejitId,fIsSafeToBlock) \ ( (This)->lpVtbl -> ReJITCompilationStarted(This,functionId,rejitId,fIsSafeToBlock) ) #define ICorProfilerCallback8_GetReJITParameters(This,moduleId,methodId,pFunctionControl) \ ( (This)->lpVtbl -> GetReJITParameters(This,moduleId,methodId,pFunctionControl) ) #define ICorProfilerCallback8_ReJITCompilationFinished(This,functionId,rejitId,hrStatus,fIsSafeToBlock) \ ( (This)->lpVtbl -> ReJITCompilationFinished(This,functionId,rejitId,hrStatus,fIsSafeToBlock) ) #define ICorProfilerCallback8_ReJITError(This,moduleId,methodId,functionId,hrStatus) \ ( (This)->lpVtbl -> ReJITError(This,moduleId,methodId,functionId,hrStatus) ) #define ICorProfilerCallback8_MovedReferences2(This,cMovedObjectIDRanges,oldObjectIDRangeStart,newObjectIDRangeStart,cObjectIDRangeLength) \ ( (This)->lpVtbl -> MovedReferences2(This,cMovedObjectIDRanges,oldObjectIDRangeStart,newObjectIDRangeStart,cObjectIDRangeLength) ) #define ICorProfilerCallback8_SurvivingReferences2(This,cSurvivingObjectIDRanges,objectIDRangeStart,cObjectIDRangeLength) \ ( (This)->lpVtbl -> SurvivingReferences2(This,cSurvivingObjectIDRanges,objectIDRangeStart,cObjectIDRangeLength) ) #define ICorProfilerCallback8_ConditionalWeakTableElementReferences(This,cRootRefs,keyRefIds,valueRefIds,rootIds) \ ( (This)->lpVtbl -> ConditionalWeakTableElementReferences(This,cRootRefs,keyRefIds,valueRefIds,rootIds) ) #define ICorProfilerCallback8_GetAssemblyReferences(This,wszAssemblyPath,pAsmRefProvider) \ ( (This)->lpVtbl -> GetAssemblyReferences(This,wszAssemblyPath,pAsmRefProvider) ) #define ICorProfilerCallback8_ModuleInMemorySymbolsUpdated(This,moduleId) \ ( (This)->lpVtbl -> ModuleInMemorySymbolsUpdated(This,moduleId) ) #define ICorProfilerCallback8_DynamicMethodJITCompilationStarted(This,functionId,fIsSafeToBlock,pILHeader,cbILHeader) \ ( (This)->lpVtbl -> DynamicMethodJITCompilationStarted(This,functionId,fIsSafeToBlock,pILHeader,cbILHeader) ) #define ICorProfilerCallback8_DynamicMethodJITCompilationFinished(This,functionId,hrStatus,fIsSafeToBlock) \ ( (This)->lpVtbl -> DynamicMethodJITCompilationFinished(This,functionId,hrStatus,fIsSafeToBlock) ) #endif /* COBJMACROS */ #endif /* C style interface */ #endif /* __ICorProfilerCallback8_INTERFACE_DEFINED__ */ #ifndef __ICorProfilerCallback9_INTERFACE_DEFINED__ #define __ICorProfilerCallback9_INTERFACE_DEFINED__ /* interface ICorProfilerCallback9 */ /* [local][unique][uuid][object] */ EXTERN_C const IID IID_ICorProfilerCallback9; #if defined(__cplusplus) && !defined(CINTERFACE) MIDL_INTERFACE("27583EC3-C8F5-482F-8052-194B8CE4705A") ICorProfilerCallback9 : public ICorProfilerCallback8 { public: virtual HRESULT STDMETHODCALLTYPE DynamicMethodUnloaded( /* [in] */ FunctionID functionId) = 0; }; #else /* C style interface */ typedef struct ICorProfilerCallback9Vtbl { BEGIN_INTERFACE HRESULT ( STDMETHODCALLTYPE *QueryInterface )( ICorProfilerCallback9 * This, /* [in] */ REFIID riid, /* [annotation][iid_is][out] */ _COM_Outptr_ void **ppvObject); ULONG ( STDMETHODCALLTYPE *AddRef )( ICorProfilerCallback9 * This); ULONG ( STDMETHODCALLTYPE *Release )( ICorProfilerCallback9 * This); HRESULT ( STDMETHODCALLTYPE *Initialize )( ICorProfilerCallback9 * This, /* [in] */ IUnknown *pICorProfilerInfoUnk); HRESULT ( STDMETHODCALLTYPE *Shutdown )( ICorProfilerCallback9 * This); HRESULT ( STDMETHODCALLTYPE *AppDomainCreationStarted )( ICorProfilerCallback9 * This, /* [in] */ AppDomainID appDomainId); HRESULT ( STDMETHODCALLTYPE *AppDomainCreationFinished )( ICorProfilerCallback9 * This, /* [in] */ AppDomainID appDomainId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *AppDomainShutdownStarted )( ICorProfilerCallback9 * This, /* [in] */ AppDomainID appDomainId); HRESULT ( STDMETHODCALLTYPE *AppDomainShutdownFinished )( ICorProfilerCallback9 * This, /* [in] */ AppDomainID appDomainId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *AssemblyLoadStarted )( ICorProfilerCallback9 * This, /* [in] */ AssemblyID assemblyId); HRESULT ( STDMETHODCALLTYPE *AssemblyLoadFinished )( ICorProfilerCallback9 * This, /* [in] */ AssemblyID assemblyId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *AssemblyUnloadStarted )( ICorProfilerCallback9 * This, /* [in] */ AssemblyID assemblyId); HRESULT ( STDMETHODCALLTYPE *AssemblyUnloadFinished )( ICorProfilerCallback9 * This, /* [in] */ AssemblyID assemblyId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *ModuleLoadStarted )( ICorProfilerCallback9 * This, /* [in] */ ModuleID moduleId); HRESULT ( STDMETHODCALLTYPE *ModuleLoadFinished )( ICorProfilerCallback9 * This, /* [in] */ ModuleID moduleId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *ModuleUnloadStarted )( ICorProfilerCallback9 * This, /* [in] */ ModuleID moduleId); HRESULT ( STDMETHODCALLTYPE *ModuleUnloadFinished )( ICorProfilerCallback9 * This, /* [in] */ ModuleID moduleId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *ModuleAttachedToAssembly )( ICorProfilerCallback9 * This, /* [in] */ ModuleID moduleId, /* [in] */ AssemblyID AssemblyId); HRESULT ( STDMETHODCALLTYPE *ClassLoadStarted )( ICorProfilerCallback9 * This, /* [in] */ ClassID classId); HRESULT ( STDMETHODCALLTYPE *ClassLoadFinished )( ICorProfilerCallback9 * This, /* [in] */ ClassID classId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *ClassUnloadStarted )( ICorProfilerCallback9 * This, /* [in] */ ClassID classId); HRESULT ( STDMETHODCALLTYPE *ClassUnloadFinished )( ICorProfilerCallback9 * This, /* [in] */ ClassID classId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *FunctionUnloadStarted )( ICorProfilerCallback9 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *JITCompilationStarted )( ICorProfilerCallback9 * This, /* [in] */ FunctionID functionId, /* [in] */ BOOL fIsSafeToBlock); HRESULT ( STDMETHODCALLTYPE *JITCompilationFinished )( ICorProfilerCallback9 * This, /* [in] */ FunctionID functionId, /* [in] */ HRESULT hrStatus, /* [in] */ BOOL fIsSafeToBlock); HRESULT ( STDMETHODCALLTYPE *JITCachedFunctionSearchStarted )( ICorProfilerCallback9 * This, /* [in] */ FunctionID functionId, /* [out] */ BOOL *pbUseCachedFunction); HRESULT ( STDMETHODCALLTYPE *JITCachedFunctionSearchFinished )( ICorProfilerCallback9 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_JIT_CACHE result); HRESULT ( STDMETHODCALLTYPE *JITFunctionPitched )( ICorProfilerCallback9 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *JITInlining )( ICorProfilerCallback9 * This, /* [in] */ FunctionID callerId, /* [in] */ FunctionID calleeId, /* [out] */ BOOL *pfShouldInline); HRESULT ( STDMETHODCALLTYPE *ThreadCreated )( ICorProfilerCallback9 * This, /* [in] */ ThreadID threadId); HRESULT ( STDMETHODCALLTYPE *ThreadDestroyed )( ICorProfilerCallback9 * This, /* [in] */ ThreadID threadId); HRESULT ( STDMETHODCALLTYPE *ThreadAssignedToOSThread )( ICorProfilerCallback9 * This, /* [in] */ ThreadID managedThreadId, /* [in] */ DWORD osThreadId); HRESULT ( STDMETHODCALLTYPE *RemotingClientInvocationStarted )( ICorProfilerCallback9 * This); HRESULT ( STDMETHODCALLTYPE *RemotingClientSendingMessage )( ICorProfilerCallback9 * This, /* [in] */ GUID *pCookie, /* [in] */ BOOL fIsAsync); HRESULT ( STDMETHODCALLTYPE *RemotingClientReceivingReply )( ICorProfilerCallback9 * This, /* [in] */ GUID *pCookie, /* [in] */ BOOL fIsAsync); HRESULT ( STDMETHODCALLTYPE *RemotingClientInvocationFinished )( ICorProfilerCallback9 * This); HRESULT ( STDMETHODCALLTYPE *RemotingServerReceivingMessage )( ICorProfilerCallback9 * This, /* [in] */ GUID *pCookie, /* [in] */ BOOL fIsAsync); HRESULT ( STDMETHODCALLTYPE *RemotingServerInvocationStarted )( ICorProfilerCallback9 * This); HRESULT ( STDMETHODCALLTYPE *RemotingServerInvocationReturned )( ICorProfilerCallback9 * This); HRESULT ( STDMETHODCALLTYPE *RemotingServerSendingReply )( ICorProfilerCallback9 * This, /* [in] */ GUID *pCookie, /* [in] */ BOOL fIsAsync); HRESULT ( STDMETHODCALLTYPE *UnmanagedToManagedTransition )( ICorProfilerCallback9 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_TRANSITION_REASON reason); HRESULT ( STDMETHODCALLTYPE *ManagedToUnmanagedTransition )( ICorProfilerCallback9 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_TRANSITION_REASON reason); HRESULT ( STDMETHODCALLTYPE *RuntimeSuspendStarted )( ICorProfilerCallback9 * This, /* [in] */ COR_PRF_SUSPEND_REASON suspendReason); HRESULT ( STDMETHODCALLTYPE *RuntimeSuspendFinished )( ICorProfilerCallback9 * This); HRESULT ( STDMETHODCALLTYPE *RuntimeSuspendAborted )( ICorProfilerCallback9 * This); HRESULT ( STDMETHODCALLTYPE *RuntimeResumeStarted )( ICorProfilerCallback9 * This); HRESULT ( STDMETHODCALLTYPE *RuntimeResumeFinished )( ICorProfilerCallback9 * This); HRESULT ( STDMETHODCALLTYPE *RuntimeThreadSuspended )( ICorProfilerCallback9 * This, /* [in] */ ThreadID threadId); HRESULT ( STDMETHODCALLTYPE *RuntimeThreadResumed )( ICorProfilerCallback9 * This, /* [in] */ ThreadID threadId); HRESULT ( STDMETHODCALLTYPE *MovedReferences )( ICorProfilerCallback9 * This, /* [in] */ ULONG cMovedObjectIDRanges, /* [size_is][in] */ ObjectID oldObjectIDRangeStart[ ], /* [size_is][in] */ ObjectID newObjectIDRangeStart[ ], /* [size_is][in] */ ULONG cObjectIDRangeLength[ ]); HRESULT ( STDMETHODCALLTYPE *ObjectAllocated )( ICorProfilerCallback9 * This, /* [in] */ ObjectID objectId, /* [in] */ ClassID classId); HRESULT ( STDMETHODCALLTYPE *ObjectsAllocatedByClass )( ICorProfilerCallback9 * This, /* [in] */ ULONG cClassCount, /* [size_is][in] */ ClassID classIds[ ], /* [size_is][in] */ ULONG cObjects[ ]); HRESULT ( STDMETHODCALLTYPE *ObjectReferences )( ICorProfilerCallback9 * This, /* [in] */ ObjectID objectId, /* [in] */ ClassID classId, /* [in] */ ULONG cObjectRefs, /* [size_is][in] */ ObjectID objectRefIds[ ]); HRESULT ( STDMETHODCALLTYPE *RootReferences )( ICorProfilerCallback9 * This, /* [in] */ ULONG cRootRefs, /* [size_is][in] */ ObjectID rootRefIds[ ]); HRESULT ( STDMETHODCALLTYPE *ExceptionThrown )( ICorProfilerCallback9 * This, /* [in] */ ObjectID thrownObjectId); HRESULT ( STDMETHODCALLTYPE *ExceptionSearchFunctionEnter )( ICorProfilerCallback9 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ExceptionSearchFunctionLeave )( ICorProfilerCallback9 * This); HRESULT ( STDMETHODCALLTYPE *ExceptionSearchFilterEnter )( ICorProfilerCallback9 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ExceptionSearchFilterLeave )( ICorProfilerCallback9 * This); HRESULT ( STDMETHODCALLTYPE *ExceptionSearchCatcherFound )( ICorProfilerCallback9 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ExceptionOSHandlerEnter )( ICorProfilerCallback9 * This, /* [in] */ UINT_PTR __unused); HRESULT ( STDMETHODCALLTYPE *ExceptionOSHandlerLeave )( ICorProfilerCallback9 * This, /* [in] */ UINT_PTR __unused); HRESULT ( STDMETHODCALLTYPE *ExceptionUnwindFunctionEnter )( ICorProfilerCallback9 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ExceptionUnwindFunctionLeave )( ICorProfilerCallback9 * This); HRESULT ( STDMETHODCALLTYPE *ExceptionUnwindFinallyEnter )( ICorProfilerCallback9 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ExceptionUnwindFinallyLeave )( ICorProfilerCallback9 * This); HRESULT ( STDMETHODCALLTYPE *ExceptionCatcherEnter )( ICorProfilerCallback9 * This, /* [in] */ FunctionID functionId, /* [in] */ ObjectID objectId); HRESULT ( STDMETHODCALLTYPE *ExceptionCatcherLeave )( ICorProfilerCallback9 * This); HRESULT ( STDMETHODCALLTYPE *COMClassicVTableCreated )( ICorProfilerCallback9 * This, /* [in] */ ClassID wrappedClassId, /* [in] */ REFGUID implementedIID, /* [in] */ void *pVTable, /* [in] */ ULONG cSlots); HRESULT ( STDMETHODCALLTYPE *COMClassicVTableDestroyed )( ICorProfilerCallback9 * This, /* [in] */ ClassID wrappedClassId, /* [in] */ REFGUID implementedIID, /* [in] */ void *pVTable); HRESULT ( STDMETHODCALLTYPE *ExceptionCLRCatcherFound )( ICorProfilerCallback9 * This); HRESULT ( STDMETHODCALLTYPE *ExceptionCLRCatcherExecute )( ICorProfilerCallback9 * This); HRESULT ( STDMETHODCALLTYPE *ThreadNameChanged )( ICorProfilerCallback9 * This, /* [in] */ ThreadID threadId, /* [in] */ ULONG cchName, /* [annotation][in] */ _In_reads_opt_(cchName) WCHAR name[ ]); HRESULT ( STDMETHODCALLTYPE *GarbageCollectionStarted )( ICorProfilerCallback9 * This, /* [in] */ int cGenerations, /* [size_is][in] */ BOOL generationCollected[ ], /* [in] */ COR_PRF_GC_REASON reason); HRESULT ( STDMETHODCALLTYPE *SurvivingReferences )( ICorProfilerCallback9 * This, /* [in] */ ULONG cSurvivingObjectIDRanges, /* [size_is][in] */ ObjectID objectIDRangeStart[ ], /* [size_is][in] */ ULONG cObjectIDRangeLength[ ]); HRESULT ( STDMETHODCALLTYPE *GarbageCollectionFinished )( ICorProfilerCallback9 * This); HRESULT ( STDMETHODCALLTYPE *FinalizeableObjectQueued )( ICorProfilerCallback9 * This, /* [in] */ DWORD finalizerFlags, /* [in] */ ObjectID objectID); HRESULT ( STDMETHODCALLTYPE *RootReferences2 )( ICorProfilerCallback9 * This, /* [in] */ ULONG cRootRefs, /* [size_is][in] */ ObjectID rootRefIds[ ], /* [size_is][in] */ COR_PRF_GC_ROOT_KIND rootKinds[ ], /* [size_is][in] */ COR_PRF_GC_ROOT_FLAGS rootFlags[ ], /* [size_is][in] */ UINT_PTR rootIds[ ]); HRESULT ( STDMETHODCALLTYPE *HandleCreated )( ICorProfilerCallback9 * This, /* [in] */ GCHandleID handleId, /* [in] */ ObjectID initialObjectId); HRESULT ( STDMETHODCALLTYPE *HandleDestroyed )( ICorProfilerCallback9 * This, /* [in] */ GCHandleID handleId); HRESULT ( STDMETHODCALLTYPE *InitializeForAttach )( ICorProfilerCallback9 * This, /* [in] */ IUnknown *pCorProfilerInfoUnk, /* [in] */ void *pvClientData, /* [in] */ UINT cbClientData); HRESULT ( STDMETHODCALLTYPE *ProfilerAttachComplete )( ICorProfilerCallback9 * This); HRESULT ( STDMETHODCALLTYPE *ProfilerDetachSucceeded )( ICorProfilerCallback9 * This); HRESULT ( STDMETHODCALLTYPE *ReJITCompilationStarted )( ICorProfilerCallback9 * This, /* [in] */ FunctionID functionId, /* [in] */ ReJITID rejitId, /* [in] */ BOOL fIsSafeToBlock); HRESULT ( STDMETHODCALLTYPE *GetReJITParameters )( ICorProfilerCallback9 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdMethodDef methodId, /* [in] */ ICorProfilerFunctionControl *pFunctionControl); HRESULT ( STDMETHODCALLTYPE *ReJITCompilationFinished )( ICorProfilerCallback9 * This, /* [in] */ FunctionID functionId, /* [in] */ ReJITID rejitId, /* [in] */ HRESULT hrStatus, /* [in] */ BOOL fIsSafeToBlock); HRESULT ( STDMETHODCALLTYPE *ReJITError )( ICorProfilerCallback9 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdMethodDef methodId, /* [in] */ FunctionID functionId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *MovedReferences2 )( ICorProfilerCallback9 * This, /* [in] */ ULONG cMovedObjectIDRanges, /* [size_is][in] */ ObjectID oldObjectIDRangeStart[ ], /* [size_is][in] */ ObjectID newObjectIDRangeStart[ ], /* [size_is][in] */ SIZE_T cObjectIDRangeLength[ ]); HRESULT ( STDMETHODCALLTYPE *SurvivingReferences2 )( ICorProfilerCallback9 * This, /* [in] */ ULONG cSurvivingObjectIDRanges, /* [size_is][in] */ ObjectID objectIDRangeStart[ ], /* [size_is][in] */ SIZE_T cObjectIDRangeLength[ ]); HRESULT ( STDMETHODCALLTYPE *ConditionalWeakTableElementReferences )( ICorProfilerCallback9 * This, /* [in] */ ULONG cRootRefs, /* [size_is][in] */ ObjectID keyRefIds[ ], /* [size_is][in] */ ObjectID valueRefIds[ ], /* [size_is][in] */ GCHandleID rootIds[ ]); HRESULT ( STDMETHODCALLTYPE *GetAssemblyReferences )( ICorProfilerCallback9 * This, /* [string][in] */ const WCHAR *wszAssemblyPath, /* [in] */ ICorProfilerAssemblyReferenceProvider *pAsmRefProvider); HRESULT ( STDMETHODCALLTYPE *ModuleInMemorySymbolsUpdated )( ICorProfilerCallback9 * This, ModuleID moduleId); HRESULT ( STDMETHODCALLTYPE *DynamicMethodJITCompilationStarted )( ICorProfilerCallback9 * This, /* [in] */ FunctionID functionId, /* [in] */ BOOL fIsSafeToBlock, /* [in] */ LPCBYTE pILHeader, /* [in] */ ULONG cbILHeader); HRESULT ( STDMETHODCALLTYPE *DynamicMethodJITCompilationFinished )( ICorProfilerCallback9 * This, /* [in] */ FunctionID functionId, /* [in] */ HRESULT hrStatus, /* [in] */ BOOL fIsSafeToBlock); HRESULT ( STDMETHODCALLTYPE *DynamicMethodUnloaded )( ICorProfilerCallback9 * This, /* [in] */ FunctionID functionId); END_INTERFACE } ICorProfilerCallback9Vtbl; interface ICorProfilerCallback9 { CONST_VTBL struct ICorProfilerCallback9Vtbl *lpVtbl; }; #ifdef COBJMACROS #define ICorProfilerCallback9_QueryInterface(This,riid,ppvObject) \ ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) ) #define ICorProfilerCallback9_AddRef(This) \ ( (This)->lpVtbl -> AddRef(This) ) #define ICorProfilerCallback9_Release(This) \ ( (This)->lpVtbl -> Release(This) ) #define ICorProfilerCallback9_Initialize(This,pICorProfilerInfoUnk) \ ( (This)->lpVtbl -> Initialize(This,pICorProfilerInfoUnk) ) #define ICorProfilerCallback9_Shutdown(This) \ ( (This)->lpVtbl -> Shutdown(This) ) #define ICorProfilerCallback9_AppDomainCreationStarted(This,appDomainId) \ ( (This)->lpVtbl -> AppDomainCreationStarted(This,appDomainId) ) #define ICorProfilerCallback9_AppDomainCreationFinished(This,appDomainId,hrStatus) \ ( (This)->lpVtbl -> AppDomainCreationFinished(This,appDomainId,hrStatus) ) #define ICorProfilerCallback9_AppDomainShutdownStarted(This,appDomainId) \ ( (This)->lpVtbl -> AppDomainShutdownStarted(This,appDomainId) ) #define ICorProfilerCallback9_AppDomainShutdownFinished(This,appDomainId,hrStatus) \ ( (This)->lpVtbl -> AppDomainShutdownFinished(This,appDomainId,hrStatus) ) #define ICorProfilerCallback9_AssemblyLoadStarted(This,assemblyId) \ ( (This)->lpVtbl -> AssemblyLoadStarted(This,assemblyId) ) #define ICorProfilerCallback9_AssemblyLoadFinished(This,assemblyId,hrStatus) \ ( (This)->lpVtbl -> AssemblyLoadFinished(This,assemblyId,hrStatus) ) #define ICorProfilerCallback9_AssemblyUnloadStarted(This,assemblyId) \ ( (This)->lpVtbl -> AssemblyUnloadStarted(This,assemblyId) ) #define ICorProfilerCallback9_AssemblyUnloadFinished(This,assemblyId,hrStatus) \ ( (This)->lpVtbl -> AssemblyUnloadFinished(This,assemblyId,hrStatus) ) #define ICorProfilerCallback9_ModuleLoadStarted(This,moduleId) \ ( (This)->lpVtbl -> ModuleLoadStarted(This,moduleId) ) #define ICorProfilerCallback9_ModuleLoadFinished(This,moduleId,hrStatus) \ ( (This)->lpVtbl -> ModuleLoadFinished(This,moduleId,hrStatus) ) #define ICorProfilerCallback9_ModuleUnloadStarted(This,moduleId) \ ( (This)->lpVtbl -> ModuleUnloadStarted(This,moduleId) ) #define ICorProfilerCallback9_ModuleUnloadFinished(This,moduleId,hrStatus) \ ( (This)->lpVtbl -> ModuleUnloadFinished(This,moduleId,hrStatus) ) #define ICorProfilerCallback9_ModuleAttachedToAssembly(This,moduleId,AssemblyId) \ ( (This)->lpVtbl -> ModuleAttachedToAssembly(This,moduleId,AssemblyId) ) #define ICorProfilerCallback9_ClassLoadStarted(This,classId) \ ( (This)->lpVtbl -> ClassLoadStarted(This,classId) ) #define ICorProfilerCallback9_ClassLoadFinished(This,classId,hrStatus) \ ( (This)->lpVtbl -> ClassLoadFinished(This,classId,hrStatus) ) #define ICorProfilerCallback9_ClassUnloadStarted(This,classId) \ ( (This)->lpVtbl -> ClassUnloadStarted(This,classId) ) #define ICorProfilerCallback9_ClassUnloadFinished(This,classId,hrStatus) \ ( (This)->lpVtbl -> ClassUnloadFinished(This,classId,hrStatus) ) #define ICorProfilerCallback9_FunctionUnloadStarted(This,functionId) \ ( (This)->lpVtbl -> FunctionUnloadStarted(This,functionId) ) #define ICorProfilerCallback9_JITCompilationStarted(This,functionId,fIsSafeToBlock) \ ( (This)->lpVtbl -> JITCompilationStarted(This,functionId,fIsSafeToBlock) ) #define ICorProfilerCallback9_JITCompilationFinished(This,functionId,hrStatus,fIsSafeToBlock) \ ( (This)->lpVtbl -> JITCompilationFinished(This,functionId,hrStatus,fIsSafeToBlock) ) #define ICorProfilerCallback9_JITCachedFunctionSearchStarted(This,functionId,pbUseCachedFunction) \ ( (This)->lpVtbl -> JITCachedFunctionSearchStarted(This,functionId,pbUseCachedFunction) ) #define ICorProfilerCallback9_JITCachedFunctionSearchFinished(This,functionId,result) \ ( (This)->lpVtbl -> JITCachedFunctionSearchFinished(This,functionId,result) ) #define ICorProfilerCallback9_JITFunctionPitched(This,functionId) \ ( (This)->lpVtbl -> JITFunctionPitched(This,functionId) ) #define ICorProfilerCallback9_JITInlining(This,callerId,calleeId,pfShouldInline) \ ( (This)->lpVtbl -> JITInlining(This,callerId,calleeId,pfShouldInline) ) #define ICorProfilerCallback9_ThreadCreated(This,threadId) \ ( (This)->lpVtbl -> ThreadCreated(This,threadId) ) #define ICorProfilerCallback9_ThreadDestroyed(This,threadId) \ ( (This)->lpVtbl -> ThreadDestroyed(This,threadId) ) #define ICorProfilerCallback9_ThreadAssignedToOSThread(This,managedThreadId,osThreadId) \ ( (This)->lpVtbl -> ThreadAssignedToOSThread(This,managedThreadId,osThreadId) ) #define ICorProfilerCallback9_RemotingClientInvocationStarted(This) \ ( (This)->lpVtbl -> RemotingClientInvocationStarted(This) ) #define ICorProfilerCallback9_RemotingClientSendingMessage(This,pCookie,fIsAsync) \ ( (This)->lpVtbl -> RemotingClientSendingMessage(This,pCookie,fIsAsync) ) #define ICorProfilerCallback9_RemotingClientReceivingReply(This,pCookie,fIsAsync) \ ( (This)->lpVtbl -> RemotingClientReceivingReply(This,pCookie,fIsAsync) ) #define ICorProfilerCallback9_RemotingClientInvocationFinished(This) \ ( (This)->lpVtbl -> RemotingClientInvocationFinished(This) ) #define ICorProfilerCallback9_RemotingServerReceivingMessage(This,pCookie,fIsAsync) \ ( (This)->lpVtbl -> RemotingServerReceivingMessage(This,pCookie,fIsAsync) ) #define ICorProfilerCallback9_RemotingServerInvocationStarted(This) \ ( (This)->lpVtbl -> RemotingServerInvocationStarted(This) ) #define ICorProfilerCallback9_RemotingServerInvocationReturned(This) \ ( (This)->lpVtbl -> RemotingServerInvocationReturned(This) ) #define ICorProfilerCallback9_RemotingServerSendingReply(This,pCookie,fIsAsync) \ ( (This)->lpVtbl -> RemotingServerSendingReply(This,pCookie,fIsAsync) ) #define ICorProfilerCallback9_UnmanagedToManagedTransition(This,functionId,reason) \ ( (This)->lpVtbl -> UnmanagedToManagedTransition(This,functionId,reason) ) #define ICorProfilerCallback9_ManagedToUnmanagedTransition(This,functionId,reason) \ ( (This)->lpVtbl -> ManagedToUnmanagedTransition(This,functionId,reason) ) #define ICorProfilerCallback9_RuntimeSuspendStarted(This,suspendReason) \ ( (This)->lpVtbl -> RuntimeSuspendStarted(This,suspendReason) ) #define ICorProfilerCallback9_RuntimeSuspendFinished(This) \ ( (This)->lpVtbl -> RuntimeSuspendFinished(This) ) #define ICorProfilerCallback9_RuntimeSuspendAborted(This) \ ( (This)->lpVtbl -> RuntimeSuspendAborted(This) ) #define ICorProfilerCallback9_RuntimeResumeStarted(This) \ ( (This)->lpVtbl -> RuntimeResumeStarted(This) ) #define ICorProfilerCallback9_RuntimeResumeFinished(This) \ ( (This)->lpVtbl -> RuntimeResumeFinished(This) ) #define ICorProfilerCallback9_RuntimeThreadSuspended(This,threadId) \ ( (This)->lpVtbl -> RuntimeThreadSuspended(This,threadId) ) #define ICorProfilerCallback9_RuntimeThreadResumed(This,threadId) \ ( (This)->lpVtbl -> RuntimeThreadResumed(This,threadId) ) #define ICorProfilerCallback9_MovedReferences(This,cMovedObjectIDRanges,oldObjectIDRangeStart,newObjectIDRangeStart,cObjectIDRangeLength) \ ( (This)->lpVtbl -> MovedReferences(This,cMovedObjectIDRanges,oldObjectIDRangeStart,newObjectIDRangeStart,cObjectIDRangeLength) ) #define ICorProfilerCallback9_ObjectAllocated(This,objectId,classId) \ ( (This)->lpVtbl -> ObjectAllocated(This,objectId,classId) ) #define ICorProfilerCallback9_ObjectsAllocatedByClass(This,cClassCount,classIds,cObjects) \ ( (This)->lpVtbl -> ObjectsAllocatedByClass(This,cClassCount,classIds,cObjects) ) #define ICorProfilerCallback9_ObjectReferences(This,objectId,classId,cObjectRefs,objectRefIds) \ ( (This)->lpVtbl -> ObjectReferences(This,objectId,classId,cObjectRefs,objectRefIds) ) #define ICorProfilerCallback9_RootReferences(This,cRootRefs,rootRefIds) \ ( (This)->lpVtbl -> RootReferences(This,cRootRefs,rootRefIds) ) #define ICorProfilerCallback9_ExceptionThrown(This,thrownObjectId) \ ( (This)->lpVtbl -> ExceptionThrown(This,thrownObjectId) ) #define ICorProfilerCallback9_ExceptionSearchFunctionEnter(This,functionId) \ ( (This)->lpVtbl -> ExceptionSearchFunctionEnter(This,functionId) ) #define ICorProfilerCallback9_ExceptionSearchFunctionLeave(This) \ ( (This)->lpVtbl -> ExceptionSearchFunctionLeave(This) ) #define ICorProfilerCallback9_ExceptionSearchFilterEnter(This,functionId) \ ( (This)->lpVtbl -> ExceptionSearchFilterEnter(This,functionId) ) #define ICorProfilerCallback9_ExceptionSearchFilterLeave(This) \ ( (This)->lpVtbl -> ExceptionSearchFilterLeave(This) ) #define ICorProfilerCallback9_ExceptionSearchCatcherFound(This,functionId) \ ( (This)->lpVtbl -> ExceptionSearchCatcherFound(This,functionId) ) #define ICorProfilerCallback9_ExceptionOSHandlerEnter(This,__unused) \ ( (This)->lpVtbl -> ExceptionOSHandlerEnter(This,__unused) ) #define ICorProfilerCallback9_ExceptionOSHandlerLeave(This,__unused) \ ( (This)->lpVtbl -> ExceptionOSHandlerLeave(This,__unused) ) #define ICorProfilerCallback9_ExceptionUnwindFunctionEnter(This,functionId) \ ( (This)->lpVtbl -> ExceptionUnwindFunctionEnter(This,functionId) ) #define ICorProfilerCallback9_ExceptionUnwindFunctionLeave(This) \ ( (This)->lpVtbl -> ExceptionUnwindFunctionLeave(This) ) #define ICorProfilerCallback9_ExceptionUnwindFinallyEnter(This,functionId) \ ( (This)->lpVtbl -> ExceptionUnwindFinallyEnter(This,functionId) ) #define ICorProfilerCallback9_ExceptionUnwindFinallyLeave(This) \ ( (This)->lpVtbl -> ExceptionUnwindFinallyLeave(This) ) #define ICorProfilerCallback9_ExceptionCatcherEnter(This,functionId,objectId) \ ( (This)->lpVtbl -> ExceptionCatcherEnter(This,functionId,objectId) ) #define ICorProfilerCallback9_ExceptionCatcherLeave(This) \ ( (This)->lpVtbl -> ExceptionCatcherLeave(This) ) #define ICorProfilerCallback9_COMClassicVTableCreated(This,wrappedClassId,implementedIID,pVTable,cSlots) \ ( (This)->lpVtbl -> COMClassicVTableCreated(This,wrappedClassId,implementedIID,pVTable,cSlots) ) #define ICorProfilerCallback9_COMClassicVTableDestroyed(This,wrappedClassId,implementedIID,pVTable) \ ( (This)->lpVtbl -> COMClassicVTableDestroyed(This,wrappedClassId,implementedIID,pVTable) ) #define ICorProfilerCallback9_ExceptionCLRCatcherFound(This) \ ( (This)->lpVtbl -> ExceptionCLRCatcherFound(This) ) #define ICorProfilerCallback9_ExceptionCLRCatcherExecute(This) \ ( (This)->lpVtbl -> ExceptionCLRCatcherExecute(This) ) #define ICorProfilerCallback9_ThreadNameChanged(This,threadId,cchName,name) \ ( (This)->lpVtbl -> ThreadNameChanged(This,threadId,cchName,name) ) #define ICorProfilerCallback9_GarbageCollectionStarted(This,cGenerations,generationCollected,reason) \ ( (This)->lpVtbl -> GarbageCollectionStarted(This,cGenerations,generationCollected,reason) ) #define ICorProfilerCallback9_SurvivingReferences(This,cSurvivingObjectIDRanges,objectIDRangeStart,cObjectIDRangeLength) \ ( (This)->lpVtbl -> SurvivingReferences(This,cSurvivingObjectIDRanges,objectIDRangeStart,cObjectIDRangeLength) ) #define ICorProfilerCallback9_GarbageCollectionFinished(This) \ ( (This)->lpVtbl -> GarbageCollectionFinished(This) ) #define ICorProfilerCallback9_FinalizeableObjectQueued(This,finalizerFlags,objectID) \ ( (This)->lpVtbl -> FinalizeableObjectQueued(This,finalizerFlags,objectID) ) #define ICorProfilerCallback9_RootReferences2(This,cRootRefs,rootRefIds,rootKinds,rootFlags,rootIds) \ ( (This)->lpVtbl -> RootReferences2(This,cRootRefs,rootRefIds,rootKinds,rootFlags,rootIds) ) #define ICorProfilerCallback9_HandleCreated(This,handleId,initialObjectId) \ ( (This)->lpVtbl -> HandleCreated(This,handleId,initialObjectId) ) #define ICorProfilerCallback9_HandleDestroyed(This,handleId) \ ( (This)->lpVtbl -> HandleDestroyed(This,handleId) ) #define ICorProfilerCallback9_InitializeForAttach(This,pCorProfilerInfoUnk,pvClientData,cbClientData) \ ( (This)->lpVtbl -> InitializeForAttach(This,pCorProfilerInfoUnk,pvClientData,cbClientData) ) #define ICorProfilerCallback9_ProfilerAttachComplete(This) \ ( (This)->lpVtbl -> ProfilerAttachComplete(This) ) #define ICorProfilerCallback9_ProfilerDetachSucceeded(This) \ ( (This)->lpVtbl -> ProfilerDetachSucceeded(This) ) #define ICorProfilerCallback9_ReJITCompilationStarted(This,functionId,rejitId,fIsSafeToBlock) \ ( (This)->lpVtbl -> ReJITCompilationStarted(This,functionId,rejitId,fIsSafeToBlock) ) #define ICorProfilerCallback9_GetReJITParameters(This,moduleId,methodId,pFunctionControl) \ ( (This)->lpVtbl -> GetReJITParameters(This,moduleId,methodId,pFunctionControl) ) #define ICorProfilerCallback9_ReJITCompilationFinished(This,functionId,rejitId,hrStatus,fIsSafeToBlock) \ ( (This)->lpVtbl -> ReJITCompilationFinished(This,functionId,rejitId,hrStatus,fIsSafeToBlock) ) #define ICorProfilerCallback9_ReJITError(This,moduleId,methodId,functionId,hrStatus) \ ( (This)->lpVtbl -> ReJITError(This,moduleId,methodId,functionId,hrStatus) ) #define ICorProfilerCallback9_MovedReferences2(This,cMovedObjectIDRanges,oldObjectIDRangeStart,newObjectIDRangeStart,cObjectIDRangeLength) \ ( (This)->lpVtbl -> MovedReferences2(This,cMovedObjectIDRanges,oldObjectIDRangeStart,newObjectIDRangeStart,cObjectIDRangeLength) ) #define ICorProfilerCallback9_SurvivingReferences2(This,cSurvivingObjectIDRanges,objectIDRangeStart,cObjectIDRangeLength) \ ( (This)->lpVtbl -> SurvivingReferences2(This,cSurvivingObjectIDRanges,objectIDRangeStart,cObjectIDRangeLength) ) #define ICorProfilerCallback9_ConditionalWeakTableElementReferences(This,cRootRefs,keyRefIds,valueRefIds,rootIds) \ ( (This)->lpVtbl -> ConditionalWeakTableElementReferences(This,cRootRefs,keyRefIds,valueRefIds,rootIds) ) #define ICorProfilerCallback9_GetAssemblyReferences(This,wszAssemblyPath,pAsmRefProvider) \ ( (This)->lpVtbl -> GetAssemblyReferences(This,wszAssemblyPath,pAsmRefProvider) ) #define ICorProfilerCallback9_ModuleInMemorySymbolsUpdated(This,moduleId) \ ( (This)->lpVtbl -> ModuleInMemorySymbolsUpdated(This,moduleId) ) #define ICorProfilerCallback9_DynamicMethodJITCompilationStarted(This,functionId,fIsSafeToBlock,pILHeader,cbILHeader) \ ( (This)->lpVtbl -> DynamicMethodJITCompilationStarted(This,functionId,fIsSafeToBlock,pILHeader,cbILHeader) ) #define ICorProfilerCallback9_DynamicMethodJITCompilationFinished(This,functionId,hrStatus,fIsSafeToBlock) \ ( (This)->lpVtbl -> DynamicMethodJITCompilationFinished(This,functionId,hrStatus,fIsSafeToBlock) ) #define ICorProfilerCallback9_DynamicMethodUnloaded(This,functionId) \ ( (This)->lpVtbl -> DynamicMethodUnloaded(This,functionId) ) #endif /* COBJMACROS */ #endif /* C style interface */ #endif /* __ICorProfilerCallback9_INTERFACE_DEFINED__ */ #ifndef __ICorProfilerCallback10_INTERFACE_DEFINED__ #define __ICorProfilerCallback10_INTERFACE_DEFINED__ /* interface ICorProfilerCallback10 */ /* [local][unique][uuid][object] */ EXTERN_C const IID IID_ICorProfilerCallback10; #if defined(__cplusplus) && !defined(CINTERFACE) MIDL_INTERFACE("CEC5B60E-C69C-495F-87F6-84D28EE16FFB") ICorProfilerCallback10 : public ICorProfilerCallback9 { public: virtual HRESULT STDMETHODCALLTYPE EventPipeEventDelivered( /* [in] */ EVENTPIPE_PROVIDER provider, /* [in] */ DWORD eventId, /* [in] */ DWORD eventVersion, /* [in] */ ULONG cbMetadataBlob, /* [size_is][in] */ LPCBYTE metadataBlob, /* [in] */ ULONG cbEventData, /* [size_is][in] */ LPCBYTE eventData, /* [in] */ LPCGUID pActivityId, /* [in] */ LPCGUID pRelatedActivityId, /* [in] */ ThreadID eventThread, /* [in] */ ULONG numStackFrames, /* [length_is][in] */ UINT_PTR stackFrames[ ]) = 0; virtual HRESULT STDMETHODCALLTYPE EventPipeProviderCreated( /* [in] */ EVENTPIPE_PROVIDER provider) = 0; }; #else /* C style interface */ typedef struct ICorProfilerCallback10Vtbl { BEGIN_INTERFACE HRESULT ( STDMETHODCALLTYPE *QueryInterface )( ICorProfilerCallback10 * This, /* [in] */ REFIID riid, /* [annotation][iid_is][out] */ _COM_Outptr_ void **ppvObject); ULONG ( STDMETHODCALLTYPE *AddRef )( ICorProfilerCallback10 * This); ULONG ( STDMETHODCALLTYPE *Release )( ICorProfilerCallback10 * This); HRESULT ( STDMETHODCALLTYPE *Initialize )( ICorProfilerCallback10 * This, /* [in] */ IUnknown *pICorProfilerInfoUnk); HRESULT ( STDMETHODCALLTYPE *Shutdown )( ICorProfilerCallback10 * This); HRESULT ( STDMETHODCALLTYPE *AppDomainCreationStarted )( ICorProfilerCallback10 * This, /* [in] */ AppDomainID appDomainId); HRESULT ( STDMETHODCALLTYPE *AppDomainCreationFinished )( ICorProfilerCallback10 * This, /* [in] */ AppDomainID appDomainId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *AppDomainShutdownStarted )( ICorProfilerCallback10 * This, /* [in] */ AppDomainID appDomainId); HRESULT ( STDMETHODCALLTYPE *AppDomainShutdownFinished )( ICorProfilerCallback10 * This, /* [in] */ AppDomainID appDomainId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *AssemblyLoadStarted )( ICorProfilerCallback10 * This, /* [in] */ AssemblyID assemblyId); HRESULT ( STDMETHODCALLTYPE *AssemblyLoadFinished )( ICorProfilerCallback10 * This, /* [in] */ AssemblyID assemblyId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *AssemblyUnloadStarted )( ICorProfilerCallback10 * This, /* [in] */ AssemblyID assemblyId); HRESULT ( STDMETHODCALLTYPE *AssemblyUnloadFinished )( ICorProfilerCallback10 * This, /* [in] */ AssemblyID assemblyId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *ModuleLoadStarted )( ICorProfilerCallback10 * This, /* [in] */ ModuleID moduleId); HRESULT ( STDMETHODCALLTYPE *ModuleLoadFinished )( ICorProfilerCallback10 * This, /* [in] */ ModuleID moduleId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *ModuleUnloadStarted )( ICorProfilerCallback10 * This, /* [in] */ ModuleID moduleId); HRESULT ( STDMETHODCALLTYPE *ModuleUnloadFinished )( ICorProfilerCallback10 * This, /* [in] */ ModuleID moduleId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *ModuleAttachedToAssembly )( ICorProfilerCallback10 * This, /* [in] */ ModuleID moduleId, /* [in] */ AssemblyID AssemblyId); HRESULT ( STDMETHODCALLTYPE *ClassLoadStarted )( ICorProfilerCallback10 * This, /* [in] */ ClassID classId); HRESULT ( STDMETHODCALLTYPE *ClassLoadFinished )( ICorProfilerCallback10 * This, /* [in] */ ClassID classId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *ClassUnloadStarted )( ICorProfilerCallback10 * This, /* [in] */ ClassID classId); HRESULT ( STDMETHODCALLTYPE *ClassUnloadFinished )( ICorProfilerCallback10 * This, /* [in] */ ClassID classId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *FunctionUnloadStarted )( ICorProfilerCallback10 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *JITCompilationStarted )( ICorProfilerCallback10 * This, /* [in] */ FunctionID functionId, /* [in] */ BOOL fIsSafeToBlock); HRESULT ( STDMETHODCALLTYPE *JITCompilationFinished )( ICorProfilerCallback10 * This, /* [in] */ FunctionID functionId, /* [in] */ HRESULT hrStatus, /* [in] */ BOOL fIsSafeToBlock); HRESULT ( STDMETHODCALLTYPE *JITCachedFunctionSearchStarted )( ICorProfilerCallback10 * This, /* [in] */ FunctionID functionId, /* [out] */ BOOL *pbUseCachedFunction); HRESULT ( STDMETHODCALLTYPE *JITCachedFunctionSearchFinished )( ICorProfilerCallback10 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_JIT_CACHE result); HRESULT ( STDMETHODCALLTYPE *JITFunctionPitched )( ICorProfilerCallback10 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *JITInlining )( ICorProfilerCallback10 * This, /* [in] */ FunctionID callerId, /* [in] */ FunctionID calleeId, /* [out] */ BOOL *pfShouldInline); HRESULT ( STDMETHODCALLTYPE *ThreadCreated )( ICorProfilerCallback10 * This, /* [in] */ ThreadID threadId); HRESULT ( STDMETHODCALLTYPE *ThreadDestroyed )( ICorProfilerCallback10 * This, /* [in] */ ThreadID threadId); HRESULT ( STDMETHODCALLTYPE *ThreadAssignedToOSThread )( ICorProfilerCallback10 * This, /* [in] */ ThreadID managedThreadId, /* [in] */ DWORD osThreadId); HRESULT ( STDMETHODCALLTYPE *RemotingClientInvocationStarted )( ICorProfilerCallback10 * This); HRESULT ( STDMETHODCALLTYPE *RemotingClientSendingMessage )( ICorProfilerCallback10 * This, /* [in] */ GUID *pCookie, /* [in] */ BOOL fIsAsync); HRESULT ( STDMETHODCALLTYPE *RemotingClientReceivingReply )( ICorProfilerCallback10 * This, /* [in] */ GUID *pCookie, /* [in] */ BOOL fIsAsync); HRESULT ( STDMETHODCALLTYPE *RemotingClientInvocationFinished )( ICorProfilerCallback10 * This); HRESULT ( STDMETHODCALLTYPE *RemotingServerReceivingMessage )( ICorProfilerCallback10 * This, /* [in] */ GUID *pCookie, /* [in] */ BOOL fIsAsync); HRESULT ( STDMETHODCALLTYPE *RemotingServerInvocationStarted )( ICorProfilerCallback10 * This); HRESULT ( STDMETHODCALLTYPE *RemotingServerInvocationReturned )( ICorProfilerCallback10 * This); HRESULT ( STDMETHODCALLTYPE *RemotingServerSendingReply )( ICorProfilerCallback10 * This, /* [in] */ GUID *pCookie, /* [in] */ BOOL fIsAsync); HRESULT ( STDMETHODCALLTYPE *UnmanagedToManagedTransition )( ICorProfilerCallback10 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_TRANSITION_REASON reason); HRESULT ( STDMETHODCALLTYPE *ManagedToUnmanagedTransition )( ICorProfilerCallback10 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_TRANSITION_REASON reason); HRESULT ( STDMETHODCALLTYPE *RuntimeSuspendStarted )( ICorProfilerCallback10 * This, /* [in] */ COR_PRF_SUSPEND_REASON suspendReason); HRESULT ( STDMETHODCALLTYPE *RuntimeSuspendFinished )( ICorProfilerCallback10 * This); HRESULT ( STDMETHODCALLTYPE *RuntimeSuspendAborted )( ICorProfilerCallback10 * This); HRESULT ( STDMETHODCALLTYPE *RuntimeResumeStarted )( ICorProfilerCallback10 * This); HRESULT ( STDMETHODCALLTYPE *RuntimeResumeFinished )( ICorProfilerCallback10 * This); HRESULT ( STDMETHODCALLTYPE *RuntimeThreadSuspended )( ICorProfilerCallback10 * This, /* [in] */ ThreadID threadId); HRESULT ( STDMETHODCALLTYPE *RuntimeThreadResumed )( ICorProfilerCallback10 * This, /* [in] */ ThreadID threadId); HRESULT ( STDMETHODCALLTYPE *MovedReferences )( ICorProfilerCallback10 * This, /* [in] */ ULONG cMovedObjectIDRanges, /* [size_is][in] */ ObjectID oldObjectIDRangeStart[ ], /* [size_is][in] */ ObjectID newObjectIDRangeStart[ ], /* [size_is][in] */ ULONG cObjectIDRangeLength[ ]); HRESULT ( STDMETHODCALLTYPE *ObjectAllocated )( ICorProfilerCallback10 * This, /* [in] */ ObjectID objectId, /* [in] */ ClassID classId); HRESULT ( STDMETHODCALLTYPE *ObjectsAllocatedByClass )( ICorProfilerCallback10 * This, /* [in] */ ULONG cClassCount, /* [size_is][in] */ ClassID classIds[ ], /* [size_is][in] */ ULONG cObjects[ ]); HRESULT ( STDMETHODCALLTYPE *ObjectReferences )( ICorProfilerCallback10 * This, /* [in] */ ObjectID objectId, /* [in] */ ClassID classId, /* [in] */ ULONG cObjectRefs, /* [size_is][in] */ ObjectID objectRefIds[ ]); HRESULT ( STDMETHODCALLTYPE *RootReferences )( ICorProfilerCallback10 * This, /* [in] */ ULONG cRootRefs, /* [size_is][in] */ ObjectID rootRefIds[ ]); HRESULT ( STDMETHODCALLTYPE *ExceptionThrown )( ICorProfilerCallback10 * This, /* [in] */ ObjectID thrownObjectId); HRESULT ( STDMETHODCALLTYPE *ExceptionSearchFunctionEnter )( ICorProfilerCallback10 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ExceptionSearchFunctionLeave )( ICorProfilerCallback10 * This); HRESULT ( STDMETHODCALLTYPE *ExceptionSearchFilterEnter )( ICorProfilerCallback10 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ExceptionSearchFilterLeave )( ICorProfilerCallback10 * This); HRESULT ( STDMETHODCALLTYPE *ExceptionSearchCatcherFound )( ICorProfilerCallback10 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ExceptionOSHandlerEnter )( ICorProfilerCallback10 * This, /* [in] */ UINT_PTR __unused); HRESULT ( STDMETHODCALLTYPE *ExceptionOSHandlerLeave )( ICorProfilerCallback10 * This, /* [in] */ UINT_PTR __unused); HRESULT ( STDMETHODCALLTYPE *ExceptionUnwindFunctionEnter )( ICorProfilerCallback10 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ExceptionUnwindFunctionLeave )( ICorProfilerCallback10 * This); HRESULT ( STDMETHODCALLTYPE *ExceptionUnwindFinallyEnter )( ICorProfilerCallback10 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ExceptionUnwindFinallyLeave )( ICorProfilerCallback10 * This); HRESULT ( STDMETHODCALLTYPE *ExceptionCatcherEnter )( ICorProfilerCallback10 * This, /* [in] */ FunctionID functionId, /* [in] */ ObjectID objectId); HRESULT ( STDMETHODCALLTYPE *ExceptionCatcherLeave )( ICorProfilerCallback10 * This); HRESULT ( STDMETHODCALLTYPE *COMClassicVTableCreated )( ICorProfilerCallback10 * This, /* [in] */ ClassID wrappedClassId, /* [in] */ REFGUID implementedIID, /* [in] */ void *pVTable, /* [in] */ ULONG cSlots); HRESULT ( STDMETHODCALLTYPE *COMClassicVTableDestroyed )( ICorProfilerCallback10 * This, /* [in] */ ClassID wrappedClassId, /* [in] */ REFGUID implementedIID, /* [in] */ void *pVTable); HRESULT ( STDMETHODCALLTYPE *ExceptionCLRCatcherFound )( ICorProfilerCallback10 * This); HRESULT ( STDMETHODCALLTYPE *ExceptionCLRCatcherExecute )( ICorProfilerCallback10 * This); HRESULT ( STDMETHODCALLTYPE *ThreadNameChanged )( ICorProfilerCallback10 * This, /* [in] */ ThreadID threadId, /* [in] */ ULONG cchName, /* [annotation][in] */ _In_reads_opt_(cchName) WCHAR name[ ]); HRESULT ( STDMETHODCALLTYPE *GarbageCollectionStarted )( ICorProfilerCallback10 * This, /* [in] */ int cGenerations, /* [size_is][in] */ BOOL generationCollected[ ], /* [in] */ COR_PRF_GC_REASON reason); HRESULT ( STDMETHODCALLTYPE *SurvivingReferences )( ICorProfilerCallback10 * This, /* [in] */ ULONG cSurvivingObjectIDRanges, /* [size_is][in] */ ObjectID objectIDRangeStart[ ], /* [size_is][in] */ ULONG cObjectIDRangeLength[ ]); HRESULT ( STDMETHODCALLTYPE *GarbageCollectionFinished )( ICorProfilerCallback10 * This); HRESULT ( STDMETHODCALLTYPE *FinalizeableObjectQueued )( ICorProfilerCallback10 * This, /* [in] */ DWORD finalizerFlags, /* [in] */ ObjectID objectID); HRESULT ( STDMETHODCALLTYPE *RootReferences2 )( ICorProfilerCallback10 * This, /* [in] */ ULONG cRootRefs, /* [size_is][in] */ ObjectID rootRefIds[ ], /* [size_is][in] */ COR_PRF_GC_ROOT_KIND rootKinds[ ], /* [size_is][in] */ COR_PRF_GC_ROOT_FLAGS rootFlags[ ], /* [size_is][in] */ UINT_PTR rootIds[ ]); HRESULT ( STDMETHODCALLTYPE *HandleCreated )( ICorProfilerCallback10 * This, /* [in] */ GCHandleID handleId, /* [in] */ ObjectID initialObjectId); HRESULT ( STDMETHODCALLTYPE *HandleDestroyed )( ICorProfilerCallback10 * This, /* [in] */ GCHandleID handleId); HRESULT ( STDMETHODCALLTYPE *InitializeForAttach )( ICorProfilerCallback10 * This, /* [in] */ IUnknown *pCorProfilerInfoUnk, /* [in] */ void *pvClientData, /* [in] */ UINT cbClientData); HRESULT ( STDMETHODCALLTYPE *ProfilerAttachComplete )( ICorProfilerCallback10 * This); HRESULT ( STDMETHODCALLTYPE *ProfilerDetachSucceeded )( ICorProfilerCallback10 * This); HRESULT ( STDMETHODCALLTYPE *ReJITCompilationStarted )( ICorProfilerCallback10 * This, /* [in] */ FunctionID functionId, /* [in] */ ReJITID rejitId, /* [in] */ BOOL fIsSafeToBlock); HRESULT ( STDMETHODCALLTYPE *GetReJITParameters )( ICorProfilerCallback10 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdMethodDef methodId, /* [in] */ ICorProfilerFunctionControl *pFunctionControl); HRESULT ( STDMETHODCALLTYPE *ReJITCompilationFinished )( ICorProfilerCallback10 * This, /* [in] */ FunctionID functionId, /* [in] */ ReJITID rejitId, /* [in] */ HRESULT hrStatus, /* [in] */ BOOL fIsSafeToBlock); HRESULT ( STDMETHODCALLTYPE *ReJITError )( ICorProfilerCallback10 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdMethodDef methodId, /* [in] */ FunctionID functionId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *MovedReferences2 )( ICorProfilerCallback10 * This, /* [in] */ ULONG cMovedObjectIDRanges, /* [size_is][in] */ ObjectID oldObjectIDRangeStart[ ], /* [size_is][in] */ ObjectID newObjectIDRangeStart[ ], /* [size_is][in] */ SIZE_T cObjectIDRangeLength[ ]); HRESULT ( STDMETHODCALLTYPE *SurvivingReferences2 )( ICorProfilerCallback10 * This, /* [in] */ ULONG cSurvivingObjectIDRanges, /* [size_is][in] */ ObjectID objectIDRangeStart[ ], /* [size_is][in] */ SIZE_T cObjectIDRangeLength[ ]); HRESULT ( STDMETHODCALLTYPE *ConditionalWeakTableElementReferences )( ICorProfilerCallback10 * This, /* [in] */ ULONG cRootRefs, /* [size_is][in] */ ObjectID keyRefIds[ ], /* [size_is][in] */ ObjectID valueRefIds[ ], /* [size_is][in] */ GCHandleID rootIds[ ]); HRESULT ( STDMETHODCALLTYPE *GetAssemblyReferences )( ICorProfilerCallback10 * This, /* [string][in] */ const WCHAR *wszAssemblyPath, /* [in] */ ICorProfilerAssemblyReferenceProvider *pAsmRefProvider); HRESULT ( STDMETHODCALLTYPE *ModuleInMemorySymbolsUpdated )( ICorProfilerCallback10 * This, ModuleID moduleId); HRESULT ( STDMETHODCALLTYPE *DynamicMethodJITCompilationStarted )( ICorProfilerCallback10 * This, /* [in] */ FunctionID functionId, /* [in] */ BOOL fIsSafeToBlock, /* [in] */ LPCBYTE pILHeader, /* [in] */ ULONG cbILHeader); HRESULT ( STDMETHODCALLTYPE *DynamicMethodJITCompilationFinished )( ICorProfilerCallback10 * This, /* [in] */ FunctionID functionId, /* [in] */ HRESULT hrStatus, /* [in] */ BOOL fIsSafeToBlock); HRESULT ( STDMETHODCALLTYPE *DynamicMethodUnloaded )( ICorProfilerCallback10 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *EventPipeEventDelivered )( ICorProfilerCallback10 * This, /* [in] */ EVENTPIPE_PROVIDER provider, /* [in] */ DWORD eventId, /* [in] */ DWORD eventVersion, /* [in] */ ULONG cbMetadataBlob, /* [size_is][in] */ LPCBYTE metadataBlob, /* [in] */ ULONG cbEventData, /* [size_is][in] */ LPCBYTE eventData, /* [in] */ LPCGUID pActivityId, /* [in] */ LPCGUID pRelatedActivityId, /* [in] */ ThreadID eventThread, /* [in] */ ULONG numStackFrames, /* [length_is][in] */ UINT_PTR stackFrames[ ]); HRESULT ( STDMETHODCALLTYPE *EventPipeProviderCreated )( ICorProfilerCallback10 * This, /* [in] */ EVENTPIPE_PROVIDER provider); END_INTERFACE } ICorProfilerCallback10Vtbl; interface ICorProfilerCallback10 { CONST_VTBL struct ICorProfilerCallback10Vtbl *lpVtbl; }; #ifdef COBJMACROS #define ICorProfilerCallback10_QueryInterface(This,riid,ppvObject) \ ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) ) #define ICorProfilerCallback10_AddRef(This) \ ( (This)->lpVtbl -> AddRef(This) ) #define ICorProfilerCallback10_Release(This) \ ( (This)->lpVtbl -> Release(This) ) #define ICorProfilerCallback10_Initialize(This,pICorProfilerInfoUnk) \ ( (This)->lpVtbl -> Initialize(This,pICorProfilerInfoUnk) ) #define ICorProfilerCallback10_Shutdown(This) \ ( (This)->lpVtbl -> Shutdown(This) ) #define ICorProfilerCallback10_AppDomainCreationStarted(This,appDomainId) \ ( (This)->lpVtbl -> AppDomainCreationStarted(This,appDomainId) ) #define ICorProfilerCallback10_AppDomainCreationFinished(This,appDomainId,hrStatus) \ ( (This)->lpVtbl -> AppDomainCreationFinished(This,appDomainId,hrStatus) ) #define ICorProfilerCallback10_AppDomainShutdownStarted(This,appDomainId) \ ( (This)->lpVtbl -> AppDomainShutdownStarted(This,appDomainId) ) #define ICorProfilerCallback10_AppDomainShutdownFinished(This,appDomainId,hrStatus) \ ( (This)->lpVtbl -> AppDomainShutdownFinished(This,appDomainId,hrStatus) ) #define ICorProfilerCallback10_AssemblyLoadStarted(This,assemblyId) \ ( (This)->lpVtbl -> AssemblyLoadStarted(This,assemblyId) ) #define ICorProfilerCallback10_AssemblyLoadFinished(This,assemblyId,hrStatus) \ ( (This)->lpVtbl -> AssemblyLoadFinished(This,assemblyId,hrStatus) ) #define ICorProfilerCallback10_AssemblyUnloadStarted(This,assemblyId) \ ( (This)->lpVtbl -> AssemblyUnloadStarted(This,assemblyId) ) #define ICorProfilerCallback10_AssemblyUnloadFinished(This,assemblyId,hrStatus) \ ( (This)->lpVtbl -> AssemblyUnloadFinished(This,assemblyId,hrStatus) ) #define ICorProfilerCallback10_ModuleLoadStarted(This,moduleId) \ ( (This)->lpVtbl -> ModuleLoadStarted(This,moduleId) ) #define ICorProfilerCallback10_ModuleLoadFinished(This,moduleId,hrStatus) \ ( (This)->lpVtbl -> ModuleLoadFinished(This,moduleId,hrStatus) ) #define ICorProfilerCallback10_ModuleUnloadStarted(This,moduleId) \ ( (This)->lpVtbl -> ModuleUnloadStarted(This,moduleId) ) #define ICorProfilerCallback10_ModuleUnloadFinished(This,moduleId,hrStatus) \ ( (This)->lpVtbl -> ModuleUnloadFinished(This,moduleId,hrStatus) ) #define ICorProfilerCallback10_ModuleAttachedToAssembly(This,moduleId,AssemblyId) \ ( (This)->lpVtbl -> ModuleAttachedToAssembly(This,moduleId,AssemblyId) ) #define ICorProfilerCallback10_ClassLoadStarted(This,classId) \ ( (This)->lpVtbl -> ClassLoadStarted(This,classId) ) #define ICorProfilerCallback10_ClassLoadFinished(This,classId,hrStatus) \ ( (This)->lpVtbl -> ClassLoadFinished(This,classId,hrStatus) ) #define ICorProfilerCallback10_ClassUnloadStarted(This,classId) \ ( (This)->lpVtbl -> ClassUnloadStarted(This,classId) ) #define ICorProfilerCallback10_ClassUnloadFinished(This,classId,hrStatus) \ ( (This)->lpVtbl -> ClassUnloadFinished(This,classId,hrStatus) ) #define ICorProfilerCallback10_FunctionUnloadStarted(This,functionId) \ ( (This)->lpVtbl -> FunctionUnloadStarted(This,functionId) ) #define ICorProfilerCallback10_JITCompilationStarted(This,functionId,fIsSafeToBlock) \ ( (This)->lpVtbl -> JITCompilationStarted(This,functionId,fIsSafeToBlock) ) #define ICorProfilerCallback10_JITCompilationFinished(This,functionId,hrStatus,fIsSafeToBlock) \ ( (This)->lpVtbl -> JITCompilationFinished(This,functionId,hrStatus,fIsSafeToBlock) ) #define ICorProfilerCallback10_JITCachedFunctionSearchStarted(This,functionId,pbUseCachedFunction) \ ( (This)->lpVtbl -> JITCachedFunctionSearchStarted(This,functionId,pbUseCachedFunction) ) #define ICorProfilerCallback10_JITCachedFunctionSearchFinished(This,functionId,result) \ ( (This)->lpVtbl -> JITCachedFunctionSearchFinished(This,functionId,result) ) #define ICorProfilerCallback10_JITFunctionPitched(This,functionId) \ ( (This)->lpVtbl -> JITFunctionPitched(This,functionId) ) #define ICorProfilerCallback10_JITInlining(This,callerId,calleeId,pfShouldInline) \ ( (This)->lpVtbl -> JITInlining(This,callerId,calleeId,pfShouldInline) ) #define ICorProfilerCallback10_ThreadCreated(This,threadId) \ ( (This)->lpVtbl -> ThreadCreated(This,threadId) ) #define ICorProfilerCallback10_ThreadDestroyed(This,threadId) \ ( (This)->lpVtbl -> ThreadDestroyed(This,threadId) ) #define ICorProfilerCallback10_ThreadAssignedToOSThread(This,managedThreadId,osThreadId) \ ( (This)->lpVtbl -> ThreadAssignedToOSThread(This,managedThreadId,osThreadId) ) #define ICorProfilerCallback10_RemotingClientInvocationStarted(This) \ ( (This)->lpVtbl -> RemotingClientInvocationStarted(This) ) #define ICorProfilerCallback10_RemotingClientSendingMessage(This,pCookie,fIsAsync) \ ( (This)->lpVtbl -> RemotingClientSendingMessage(This,pCookie,fIsAsync) ) #define ICorProfilerCallback10_RemotingClientReceivingReply(This,pCookie,fIsAsync) \ ( (This)->lpVtbl -> RemotingClientReceivingReply(This,pCookie,fIsAsync) ) #define ICorProfilerCallback10_RemotingClientInvocationFinished(This) \ ( (This)->lpVtbl -> RemotingClientInvocationFinished(This) ) #define ICorProfilerCallback10_RemotingServerReceivingMessage(This,pCookie,fIsAsync) \ ( (This)->lpVtbl -> RemotingServerReceivingMessage(This,pCookie,fIsAsync) ) #define ICorProfilerCallback10_RemotingServerInvocationStarted(This) \ ( (This)->lpVtbl -> RemotingServerInvocationStarted(This) ) #define ICorProfilerCallback10_RemotingServerInvocationReturned(This) \ ( (This)->lpVtbl -> RemotingServerInvocationReturned(This) ) #define ICorProfilerCallback10_RemotingServerSendingReply(This,pCookie,fIsAsync) \ ( (This)->lpVtbl -> RemotingServerSendingReply(This,pCookie,fIsAsync) ) #define ICorProfilerCallback10_UnmanagedToManagedTransition(This,functionId,reason) \ ( (This)->lpVtbl -> UnmanagedToManagedTransition(This,functionId,reason) ) #define ICorProfilerCallback10_ManagedToUnmanagedTransition(This,functionId,reason) \ ( (This)->lpVtbl -> ManagedToUnmanagedTransition(This,functionId,reason) ) #define ICorProfilerCallback10_RuntimeSuspendStarted(This,suspendReason) \ ( (This)->lpVtbl -> RuntimeSuspendStarted(This,suspendReason) ) #define ICorProfilerCallback10_RuntimeSuspendFinished(This) \ ( (This)->lpVtbl -> RuntimeSuspendFinished(This) ) #define ICorProfilerCallback10_RuntimeSuspendAborted(This) \ ( (This)->lpVtbl -> RuntimeSuspendAborted(This) ) #define ICorProfilerCallback10_RuntimeResumeStarted(This) \ ( (This)->lpVtbl -> RuntimeResumeStarted(This) ) #define ICorProfilerCallback10_RuntimeResumeFinished(This) \ ( (This)->lpVtbl -> RuntimeResumeFinished(This) ) #define ICorProfilerCallback10_RuntimeThreadSuspended(This,threadId) \ ( (This)->lpVtbl -> RuntimeThreadSuspended(This,threadId) ) #define ICorProfilerCallback10_RuntimeThreadResumed(This,threadId) \ ( (This)->lpVtbl -> RuntimeThreadResumed(This,threadId) ) #define ICorProfilerCallback10_MovedReferences(This,cMovedObjectIDRanges,oldObjectIDRangeStart,newObjectIDRangeStart,cObjectIDRangeLength) \ ( (This)->lpVtbl -> MovedReferences(This,cMovedObjectIDRanges,oldObjectIDRangeStart,newObjectIDRangeStart,cObjectIDRangeLength) ) #define ICorProfilerCallback10_ObjectAllocated(This,objectId,classId) \ ( (This)->lpVtbl -> ObjectAllocated(This,objectId,classId) ) #define ICorProfilerCallback10_ObjectsAllocatedByClass(This,cClassCount,classIds,cObjects) \ ( (This)->lpVtbl -> ObjectsAllocatedByClass(This,cClassCount,classIds,cObjects) ) #define ICorProfilerCallback10_ObjectReferences(This,objectId,classId,cObjectRefs,objectRefIds) \ ( (This)->lpVtbl -> ObjectReferences(This,objectId,classId,cObjectRefs,objectRefIds) ) #define ICorProfilerCallback10_RootReferences(This,cRootRefs,rootRefIds) \ ( (This)->lpVtbl -> RootReferences(This,cRootRefs,rootRefIds) ) #define ICorProfilerCallback10_ExceptionThrown(This,thrownObjectId) \ ( (This)->lpVtbl -> ExceptionThrown(This,thrownObjectId) ) #define ICorProfilerCallback10_ExceptionSearchFunctionEnter(This,functionId) \ ( (This)->lpVtbl -> ExceptionSearchFunctionEnter(This,functionId) ) #define ICorProfilerCallback10_ExceptionSearchFunctionLeave(This) \ ( (This)->lpVtbl -> ExceptionSearchFunctionLeave(This) ) #define ICorProfilerCallback10_ExceptionSearchFilterEnter(This,functionId) \ ( (This)->lpVtbl -> ExceptionSearchFilterEnter(This,functionId) ) #define ICorProfilerCallback10_ExceptionSearchFilterLeave(This) \ ( (This)->lpVtbl -> ExceptionSearchFilterLeave(This) ) #define ICorProfilerCallback10_ExceptionSearchCatcherFound(This,functionId) \ ( (This)->lpVtbl -> ExceptionSearchCatcherFound(This,functionId) ) #define ICorProfilerCallback10_ExceptionOSHandlerEnter(This,__unused) \ ( (This)->lpVtbl -> ExceptionOSHandlerEnter(This,__unused) ) #define ICorProfilerCallback10_ExceptionOSHandlerLeave(This,__unused) \ ( (This)->lpVtbl -> ExceptionOSHandlerLeave(This,__unused) ) #define ICorProfilerCallback10_ExceptionUnwindFunctionEnter(This,functionId) \ ( (This)->lpVtbl -> ExceptionUnwindFunctionEnter(This,functionId) ) #define ICorProfilerCallback10_ExceptionUnwindFunctionLeave(This) \ ( (This)->lpVtbl -> ExceptionUnwindFunctionLeave(This) ) #define ICorProfilerCallback10_ExceptionUnwindFinallyEnter(This,functionId) \ ( (This)->lpVtbl -> ExceptionUnwindFinallyEnter(This,functionId) ) #define ICorProfilerCallback10_ExceptionUnwindFinallyLeave(This) \ ( (This)->lpVtbl -> ExceptionUnwindFinallyLeave(This) ) #define ICorProfilerCallback10_ExceptionCatcherEnter(This,functionId,objectId) \ ( (This)->lpVtbl -> ExceptionCatcherEnter(This,functionId,objectId) ) #define ICorProfilerCallback10_ExceptionCatcherLeave(This) \ ( (This)->lpVtbl -> ExceptionCatcherLeave(This) ) #define ICorProfilerCallback10_COMClassicVTableCreated(This,wrappedClassId,implementedIID,pVTable,cSlots) \ ( (This)->lpVtbl -> COMClassicVTableCreated(This,wrappedClassId,implementedIID,pVTable,cSlots) ) #define ICorProfilerCallback10_COMClassicVTableDestroyed(This,wrappedClassId,implementedIID,pVTable) \ ( (This)->lpVtbl -> COMClassicVTableDestroyed(This,wrappedClassId,implementedIID,pVTable) ) #define ICorProfilerCallback10_ExceptionCLRCatcherFound(This) \ ( (This)->lpVtbl -> ExceptionCLRCatcherFound(This) ) #define ICorProfilerCallback10_ExceptionCLRCatcherExecute(This) \ ( (This)->lpVtbl -> ExceptionCLRCatcherExecute(This) ) #define ICorProfilerCallback10_ThreadNameChanged(This,threadId,cchName,name) \ ( (This)->lpVtbl -> ThreadNameChanged(This,threadId,cchName,name) ) #define ICorProfilerCallback10_GarbageCollectionStarted(This,cGenerations,generationCollected,reason) \ ( (This)->lpVtbl -> GarbageCollectionStarted(This,cGenerations,generationCollected,reason) ) #define ICorProfilerCallback10_SurvivingReferences(This,cSurvivingObjectIDRanges,objectIDRangeStart,cObjectIDRangeLength) \ ( (This)->lpVtbl -> SurvivingReferences(This,cSurvivingObjectIDRanges,objectIDRangeStart,cObjectIDRangeLength) ) #define ICorProfilerCallback10_GarbageCollectionFinished(This) \ ( (This)->lpVtbl -> GarbageCollectionFinished(This) ) #define ICorProfilerCallback10_FinalizeableObjectQueued(This,finalizerFlags,objectID) \ ( (This)->lpVtbl -> FinalizeableObjectQueued(This,finalizerFlags,objectID) ) #define ICorProfilerCallback10_RootReferences2(This,cRootRefs,rootRefIds,rootKinds,rootFlags,rootIds) \ ( (This)->lpVtbl -> RootReferences2(This,cRootRefs,rootRefIds,rootKinds,rootFlags,rootIds) ) #define ICorProfilerCallback10_HandleCreated(This,handleId,initialObjectId) \ ( (This)->lpVtbl -> HandleCreated(This,handleId,initialObjectId) ) #define ICorProfilerCallback10_HandleDestroyed(This,handleId) \ ( (This)->lpVtbl -> HandleDestroyed(This,handleId) ) #define ICorProfilerCallback10_InitializeForAttach(This,pCorProfilerInfoUnk,pvClientData,cbClientData) \ ( (This)->lpVtbl -> InitializeForAttach(This,pCorProfilerInfoUnk,pvClientData,cbClientData) ) #define ICorProfilerCallback10_ProfilerAttachComplete(This) \ ( (This)->lpVtbl -> ProfilerAttachComplete(This) ) #define ICorProfilerCallback10_ProfilerDetachSucceeded(This) \ ( (This)->lpVtbl -> ProfilerDetachSucceeded(This) ) #define ICorProfilerCallback10_ReJITCompilationStarted(This,functionId,rejitId,fIsSafeToBlock) \ ( (This)->lpVtbl -> ReJITCompilationStarted(This,functionId,rejitId,fIsSafeToBlock) ) #define ICorProfilerCallback10_GetReJITParameters(This,moduleId,methodId,pFunctionControl) \ ( (This)->lpVtbl -> GetReJITParameters(This,moduleId,methodId,pFunctionControl) ) #define ICorProfilerCallback10_ReJITCompilationFinished(This,functionId,rejitId,hrStatus,fIsSafeToBlock) \ ( (This)->lpVtbl -> ReJITCompilationFinished(This,functionId,rejitId,hrStatus,fIsSafeToBlock) ) #define ICorProfilerCallback10_ReJITError(This,moduleId,methodId,functionId,hrStatus) \ ( (This)->lpVtbl -> ReJITError(This,moduleId,methodId,functionId,hrStatus) ) #define ICorProfilerCallback10_MovedReferences2(This,cMovedObjectIDRanges,oldObjectIDRangeStart,newObjectIDRangeStart,cObjectIDRangeLength) \ ( (This)->lpVtbl -> MovedReferences2(This,cMovedObjectIDRanges,oldObjectIDRangeStart,newObjectIDRangeStart,cObjectIDRangeLength) ) #define ICorProfilerCallback10_SurvivingReferences2(This,cSurvivingObjectIDRanges,objectIDRangeStart,cObjectIDRangeLength) \ ( (This)->lpVtbl -> SurvivingReferences2(This,cSurvivingObjectIDRanges,objectIDRangeStart,cObjectIDRangeLength) ) #define ICorProfilerCallback10_ConditionalWeakTableElementReferences(This,cRootRefs,keyRefIds,valueRefIds,rootIds) \ ( (This)->lpVtbl -> ConditionalWeakTableElementReferences(This,cRootRefs,keyRefIds,valueRefIds,rootIds) ) #define ICorProfilerCallback10_GetAssemblyReferences(This,wszAssemblyPath,pAsmRefProvider) \ ( (This)->lpVtbl -> GetAssemblyReferences(This,wszAssemblyPath,pAsmRefProvider) ) #define ICorProfilerCallback10_ModuleInMemorySymbolsUpdated(This,moduleId) \ ( (This)->lpVtbl -> ModuleInMemorySymbolsUpdated(This,moduleId) ) #define ICorProfilerCallback10_DynamicMethodJITCompilationStarted(This,functionId,fIsSafeToBlock,pILHeader,cbILHeader) \ ( (This)->lpVtbl -> DynamicMethodJITCompilationStarted(This,functionId,fIsSafeToBlock,pILHeader,cbILHeader) ) #define ICorProfilerCallback10_DynamicMethodJITCompilationFinished(This,functionId,hrStatus,fIsSafeToBlock) \ ( (This)->lpVtbl -> DynamicMethodJITCompilationFinished(This,functionId,hrStatus,fIsSafeToBlock) ) #define ICorProfilerCallback10_DynamicMethodUnloaded(This,functionId) \ ( (This)->lpVtbl -> DynamicMethodUnloaded(This,functionId) ) #define ICorProfilerCallback10_EventPipeEventDelivered(This,provider,eventId,eventVersion,cbMetadataBlob,metadataBlob,cbEventData,eventData,pActivityId,pRelatedActivityId,eventThread,numStackFrames,stackFrames) \ ( (This)->lpVtbl -> EventPipeEventDelivered(This,provider,eventId,eventVersion,cbMetadataBlob,metadataBlob,cbEventData,eventData,pActivityId,pRelatedActivityId,eventThread,numStackFrames,stackFrames) ) #define ICorProfilerCallback10_EventPipeProviderCreated(This,provider) \ ( (This)->lpVtbl -> EventPipeProviderCreated(This,provider) ) #endif /* COBJMACROS */ #endif /* C style interface */ #endif /* __ICorProfilerCallback10_INTERFACE_DEFINED__ */ #ifndef __ICorProfilerCallback11_INTERFACE_DEFINED__ #define __ICorProfilerCallback11_INTERFACE_DEFINED__ /* interface ICorProfilerCallback11 */ /* [local][unique][uuid][object] */ EXTERN_C const IID IID_ICorProfilerCallback11; #if defined(__cplusplus) && !defined(CINTERFACE) MIDL_INTERFACE("42350846-AAED-47F7-B128-FD0C98881CDE") ICorProfilerCallback11 : public ICorProfilerCallback10 { public: virtual HRESULT STDMETHODCALLTYPE LoadAsNotificationOnly( BOOL *pbNotificationOnly) = 0; }; #else /* C style interface */ typedef struct ICorProfilerCallback11Vtbl { BEGIN_INTERFACE HRESULT ( STDMETHODCALLTYPE *QueryInterface )( ICorProfilerCallback11 * This, /* [in] */ REFIID riid, /* [annotation][iid_is][out] */ _COM_Outptr_ void **ppvObject); ULONG ( STDMETHODCALLTYPE *AddRef )( ICorProfilerCallback11 * This); ULONG ( STDMETHODCALLTYPE *Release )( ICorProfilerCallback11 * This); HRESULT ( STDMETHODCALLTYPE *Initialize )( ICorProfilerCallback11 * This, /* [in] */ IUnknown *pICorProfilerInfoUnk); HRESULT ( STDMETHODCALLTYPE *Shutdown )( ICorProfilerCallback11 * This); HRESULT ( STDMETHODCALLTYPE *AppDomainCreationStarted )( ICorProfilerCallback11 * This, /* [in] */ AppDomainID appDomainId); HRESULT ( STDMETHODCALLTYPE *AppDomainCreationFinished )( ICorProfilerCallback11 * This, /* [in] */ AppDomainID appDomainId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *AppDomainShutdownStarted )( ICorProfilerCallback11 * This, /* [in] */ AppDomainID appDomainId); HRESULT ( STDMETHODCALLTYPE *AppDomainShutdownFinished )( ICorProfilerCallback11 * This, /* [in] */ AppDomainID appDomainId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *AssemblyLoadStarted )( ICorProfilerCallback11 * This, /* [in] */ AssemblyID assemblyId); HRESULT ( STDMETHODCALLTYPE *AssemblyLoadFinished )( ICorProfilerCallback11 * This, /* [in] */ AssemblyID assemblyId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *AssemblyUnloadStarted )( ICorProfilerCallback11 * This, /* [in] */ AssemblyID assemblyId); HRESULT ( STDMETHODCALLTYPE *AssemblyUnloadFinished )( ICorProfilerCallback11 * This, /* [in] */ AssemblyID assemblyId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *ModuleLoadStarted )( ICorProfilerCallback11 * This, /* [in] */ ModuleID moduleId); HRESULT ( STDMETHODCALLTYPE *ModuleLoadFinished )( ICorProfilerCallback11 * This, /* [in] */ ModuleID moduleId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *ModuleUnloadStarted )( ICorProfilerCallback11 * This, /* [in] */ ModuleID moduleId); HRESULT ( STDMETHODCALLTYPE *ModuleUnloadFinished )( ICorProfilerCallback11 * This, /* [in] */ ModuleID moduleId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *ModuleAttachedToAssembly )( ICorProfilerCallback11 * This, /* [in] */ ModuleID moduleId, /* [in] */ AssemblyID AssemblyId); HRESULT ( STDMETHODCALLTYPE *ClassLoadStarted )( ICorProfilerCallback11 * This, /* [in] */ ClassID classId); HRESULT ( STDMETHODCALLTYPE *ClassLoadFinished )( ICorProfilerCallback11 * This, /* [in] */ ClassID classId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *ClassUnloadStarted )( ICorProfilerCallback11 * This, /* [in] */ ClassID classId); HRESULT ( STDMETHODCALLTYPE *ClassUnloadFinished )( ICorProfilerCallback11 * This, /* [in] */ ClassID classId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *FunctionUnloadStarted )( ICorProfilerCallback11 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *JITCompilationStarted )( ICorProfilerCallback11 * This, /* [in] */ FunctionID functionId, /* [in] */ BOOL fIsSafeToBlock); HRESULT ( STDMETHODCALLTYPE *JITCompilationFinished )( ICorProfilerCallback11 * This, /* [in] */ FunctionID functionId, /* [in] */ HRESULT hrStatus, /* [in] */ BOOL fIsSafeToBlock); HRESULT ( STDMETHODCALLTYPE *JITCachedFunctionSearchStarted )( ICorProfilerCallback11 * This, /* [in] */ FunctionID functionId, /* [out] */ BOOL *pbUseCachedFunction); HRESULT ( STDMETHODCALLTYPE *JITCachedFunctionSearchFinished )( ICorProfilerCallback11 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_JIT_CACHE result); HRESULT ( STDMETHODCALLTYPE *JITFunctionPitched )( ICorProfilerCallback11 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *JITInlining )( ICorProfilerCallback11 * This, /* [in] */ FunctionID callerId, /* [in] */ FunctionID calleeId, /* [out] */ BOOL *pfShouldInline); HRESULT ( STDMETHODCALLTYPE *ThreadCreated )( ICorProfilerCallback11 * This, /* [in] */ ThreadID threadId); HRESULT ( STDMETHODCALLTYPE *ThreadDestroyed )( ICorProfilerCallback11 * This, /* [in] */ ThreadID threadId); HRESULT ( STDMETHODCALLTYPE *ThreadAssignedToOSThread )( ICorProfilerCallback11 * This, /* [in] */ ThreadID managedThreadId, /* [in] */ DWORD osThreadId); HRESULT ( STDMETHODCALLTYPE *RemotingClientInvocationStarted )( ICorProfilerCallback11 * This); HRESULT ( STDMETHODCALLTYPE *RemotingClientSendingMessage )( ICorProfilerCallback11 * This, /* [in] */ GUID *pCookie, /* [in] */ BOOL fIsAsync); HRESULT ( STDMETHODCALLTYPE *RemotingClientReceivingReply )( ICorProfilerCallback11 * This, /* [in] */ GUID *pCookie, /* [in] */ BOOL fIsAsync); HRESULT ( STDMETHODCALLTYPE *RemotingClientInvocationFinished )( ICorProfilerCallback11 * This); HRESULT ( STDMETHODCALLTYPE *RemotingServerReceivingMessage )( ICorProfilerCallback11 * This, /* [in] */ GUID *pCookie, /* [in] */ BOOL fIsAsync); HRESULT ( STDMETHODCALLTYPE *RemotingServerInvocationStarted )( ICorProfilerCallback11 * This); HRESULT ( STDMETHODCALLTYPE *RemotingServerInvocationReturned )( ICorProfilerCallback11 * This); HRESULT ( STDMETHODCALLTYPE *RemotingServerSendingReply )( ICorProfilerCallback11 * This, /* [in] */ GUID *pCookie, /* [in] */ BOOL fIsAsync); HRESULT ( STDMETHODCALLTYPE *UnmanagedToManagedTransition )( ICorProfilerCallback11 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_TRANSITION_REASON reason); HRESULT ( STDMETHODCALLTYPE *ManagedToUnmanagedTransition )( ICorProfilerCallback11 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_TRANSITION_REASON reason); HRESULT ( STDMETHODCALLTYPE *RuntimeSuspendStarted )( ICorProfilerCallback11 * This, /* [in] */ COR_PRF_SUSPEND_REASON suspendReason); HRESULT ( STDMETHODCALLTYPE *RuntimeSuspendFinished )( ICorProfilerCallback11 * This); HRESULT ( STDMETHODCALLTYPE *RuntimeSuspendAborted )( ICorProfilerCallback11 * This); HRESULT ( STDMETHODCALLTYPE *RuntimeResumeStarted )( ICorProfilerCallback11 * This); HRESULT ( STDMETHODCALLTYPE *RuntimeResumeFinished )( ICorProfilerCallback11 * This); HRESULT ( STDMETHODCALLTYPE *RuntimeThreadSuspended )( ICorProfilerCallback11 * This, /* [in] */ ThreadID threadId); HRESULT ( STDMETHODCALLTYPE *RuntimeThreadResumed )( ICorProfilerCallback11 * This, /* [in] */ ThreadID threadId); HRESULT ( STDMETHODCALLTYPE *MovedReferences )( ICorProfilerCallback11 * This, /* [in] */ ULONG cMovedObjectIDRanges, /* [size_is][in] */ ObjectID oldObjectIDRangeStart[ ], /* [size_is][in] */ ObjectID newObjectIDRangeStart[ ], /* [size_is][in] */ ULONG cObjectIDRangeLength[ ]); HRESULT ( STDMETHODCALLTYPE *ObjectAllocated )( ICorProfilerCallback11 * This, /* [in] */ ObjectID objectId, /* [in] */ ClassID classId); HRESULT ( STDMETHODCALLTYPE *ObjectsAllocatedByClass )( ICorProfilerCallback11 * This, /* [in] */ ULONG cClassCount, /* [size_is][in] */ ClassID classIds[ ], /* [size_is][in] */ ULONG cObjects[ ]); HRESULT ( STDMETHODCALLTYPE *ObjectReferences )( ICorProfilerCallback11 * This, /* [in] */ ObjectID objectId, /* [in] */ ClassID classId, /* [in] */ ULONG cObjectRefs, /* [size_is][in] */ ObjectID objectRefIds[ ]); HRESULT ( STDMETHODCALLTYPE *RootReferences )( ICorProfilerCallback11 * This, /* [in] */ ULONG cRootRefs, /* [size_is][in] */ ObjectID rootRefIds[ ]); HRESULT ( STDMETHODCALLTYPE *ExceptionThrown )( ICorProfilerCallback11 * This, /* [in] */ ObjectID thrownObjectId); HRESULT ( STDMETHODCALLTYPE *ExceptionSearchFunctionEnter )( ICorProfilerCallback11 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ExceptionSearchFunctionLeave )( ICorProfilerCallback11 * This); HRESULT ( STDMETHODCALLTYPE *ExceptionSearchFilterEnter )( ICorProfilerCallback11 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ExceptionSearchFilterLeave )( ICorProfilerCallback11 * This); HRESULT ( STDMETHODCALLTYPE *ExceptionSearchCatcherFound )( ICorProfilerCallback11 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ExceptionOSHandlerEnter )( ICorProfilerCallback11 * This, /* [in] */ UINT_PTR __unused); HRESULT ( STDMETHODCALLTYPE *ExceptionOSHandlerLeave )( ICorProfilerCallback11 * This, /* [in] */ UINT_PTR __unused); HRESULT ( STDMETHODCALLTYPE *ExceptionUnwindFunctionEnter )( ICorProfilerCallback11 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ExceptionUnwindFunctionLeave )( ICorProfilerCallback11 * This); HRESULT ( STDMETHODCALLTYPE *ExceptionUnwindFinallyEnter )( ICorProfilerCallback11 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ExceptionUnwindFinallyLeave )( ICorProfilerCallback11 * This); HRESULT ( STDMETHODCALLTYPE *ExceptionCatcherEnter )( ICorProfilerCallback11 * This, /* [in] */ FunctionID functionId, /* [in] */ ObjectID objectId); HRESULT ( STDMETHODCALLTYPE *ExceptionCatcherLeave )( ICorProfilerCallback11 * This); HRESULT ( STDMETHODCALLTYPE *COMClassicVTableCreated )( ICorProfilerCallback11 * This, /* [in] */ ClassID wrappedClassId, /* [in] */ REFGUID implementedIID, /* [in] */ void *pVTable, /* [in] */ ULONG cSlots); HRESULT ( STDMETHODCALLTYPE *COMClassicVTableDestroyed )( ICorProfilerCallback11 * This, /* [in] */ ClassID wrappedClassId, /* [in] */ REFGUID implementedIID, /* [in] */ void *pVTable); HRESULT ( STDMETHODCALLTYPE *ExceptionCLRCatcherFound )( ICorProfilerCallback11 * This); HRESULT ( STDMETHODCALLTYPE *ExceptionCLRCatcherExecute )( ICorProfilerCallback11 * This); HRESULT ( STDMETHODCALLTYPE *ThreadNameChanged )( ICorProfilerCallback11 * This, /* [in] */ ThreadID threadId, /* [in] */ ULONG cchName, /* [annotation][in] */ _In_reads_opt_(cchName) WCHAR name[ ]); HRESULT ( STDMETHODCALLTYPE *GarbageCollectionStarted )( ICorProfilerCallback11 * This, /* [in] */ int cGenerations, /* [size_is][in] */ BOOL generationCollected[ ], /* [in] */ COR_PRF_GC_REASON reason); HRESULT ( STDMETHODCALLTYPE *SurvivingReferences )( ICorProfilerCallback11 * This, /* [in] */ ULONG cSurvivingObjectIDRanges, /* [size_is][in] */ ObjectID objectIDRangeStart[ ], /* [size_is][in] */ ULONG cObjectIDRangeLength[ ]); HRESULT ( STDMETHODCALLTYPE *GarbageCollectionFinished )( ICorProfilerCallback11 * This); HRESULT ( STDMETHODCALLTYPE *FinalizeableObjectQueued )( ICorProfilerCallback11 * This, /* [in] */ DWORD finalizerFlags, /* [in] */ ObjectID objectID); HRESULT ( STDMETHODCALLTYPE *RootReferences2 )( ICorProfilerCallback11 * This, /* [in] */ ULONG cRootRefs, /* [size_is][in] */ ObjectID rootRefIds[ ], /* [size_is][in] */ COR_PRF_GC_ROOT_KIND rootKinds[ ], /* [size_is][in] */ COR_PRF_GC_ROOT_FLAGS rootFlags[ ], /* [size_is][in] */ UINT_PTR rootIds[ ]); HRESULT ( STDMETHODCALLTYPE *HandleCreated )( ICorProfilerCallback11 * This, /* [in] */ GCHandleID handleId, /* [in] */ ObjectID initialObjectId); HRESULT ( STDMETHODCALLTYPE *HandleDestroyed )( ICorProfilerCallback11 * This, /* [in] */ GCHandleID handleId); HRESULT ( STDMETHODCALLTYPE *InitializeForAttach )( ICorProfilerCallback11 * This, /* [in] */ IUnknown *pCorProfilerInfoUnk, /* [in] */ void *pvClientData, /* [in] */ UINT cbClientData); HRESULT ( STDMETHODCALLTYPE *ProfilerAttachComplete )( ICorProfilerCallback11 * This); HRESULT ( STDMETHODCALLTYPE *ProfilerDetachSucceeded )( ICorProfilerCallback11 * This); HRESULT ( STDMETHODCALLTYPE *ReJITCompilationStarted )( ICorProfilerCallback11 * This, /* [in] */ FunctionID functionId, /* [in] */ ReJITID rejitId, /* [in] */ BOOL fIsSafeToBlock); HRESULT ( STDMETHODCALLTYPE *GetReJITParameters )( ICorProfilerCallback11 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdMethodDef methodId, /* [in] */ ICorProfilerFunctionControl *pFunctionControl); HRESULT ( STDMETHODCALLTYPE *ReJITCompilationFinished )( ICorProfilerCallback11 * This, /* [in] */ FunctionID functionId, /* [in] */ ReJITID rejitId, /* [in] */ HRESULT hrStatus, /* [in] */ BOOL fIsSafeToBlock); HRESULT ( STDMETHODCALLTYPE *ReJITError )( ICorProfilerCallback11 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdMethodDef methodId, /* [in] */ FunctionID functionId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *MovedReferences2 )( ICorProfilerCallback11 * This, /* [in] */ ULONG cMovedObjectIDRanges, /* [size_is][in] */ ObjectID oldObjectIDRangeStart[ ], /* [size_is][in] */ ObjectID newObjectIDRangeStart[ ], /* [size_is][in] */ SIZE_T cObjectIDRangeLength[ ]); HRESULT ( STDMETHODCALLTYPE *SurvivingReferences2 )( ICorProfilerCallback11 * This, /* [in] */ ULONG cSurvivingObjectIDRanges, /* [size_is][in] */ ObjectID objectIDRangeStart[ ], /* [size_is][in] */ SIZE_T cObjectIDRangeLength[ ]); HRESULT ( STDMETHODCALLTYPE *ConditionalWeakTableElementReferences )( ICorProfilerCallback11 * This, /* [in] */ ULONG cRootRefs, /* [size_is][in] */ ObjectID keyRefIds[ ], /* [size_is][in] */ ObjectID valueRefIds[ ], /* [size_is][in] */ GCHandleID rootIds[ ]); HRESULT ( STDMETHODCALLTYPE *GetAssemblyReferences )( ICorProfilerCallback11 * This, /* [string][in] */ const WCHAR *wszAssemblyPath, /* [in] */ ICorProfilerAssemblyReferenceProvider *pAsmRefProvider); HRESULT ( STDMETHODCALLTYPE *ModuleInMemorySymbolsUpdated )( ICorProfilerCallback11 * This, ModuleID moduleId); HRESULT ( STDMETHODCALLTYPE *DynamicMethodJITCompilationStarted )( ICorProfilerCallback11 * This, /* [in] */ FunctionID functionId, /* [in] */ BOOL fIsSafeToBlock, /* [in] */ LPCBYTE pILHeader, /* [in] */ ULONG cbILHeader); HRESULT ( STDMETHODCALLTYPE *DynamicMethodJITCompilationFinished )( ICorProfilerCallback11 * This, /* [in] */ FunctionID functionId, /* [in] */ HRESULT hrStatus, /* [in] */ BOOL fIsSafeToBlock); HRESULT ( STDMETHODCALLTYPE *DynamicMethodUnloaded )( ICorProfilerCallback11 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *EventPipeEventDelivered )( ICorProfilerCallback11 * This, /* [in] */ EVENTPIPE_PROVIDER provider, /* [in] */ DWORD eventId, /* [in] */ DWORD eventVersion, /* [in] */ ULONG cbMetadataBlob, /* [size_is][in] */ LPCBYTE metadataBlob, /* [in] */ ULONG cbEventData, /* [size_is][in] */ LPCBYTE eventData, /* [in] */ LPCGUID pActivityId, /* [in] */ LPCGUID pRelatedActivityId, /* [in] */ ThreadID eventThread, /* [in] */ ULONG numStackFrames, /* [length_is][in] */ UINT_PTR stackFrames[ ]); HRESULT ( STDMETHODCALLTYPE *EventPipeProviderCreated )( ICorProfilerCallback11 * This, /* [in] */ EVENTPIPE_PROVIDER provider); HRESULT ( STDMETHODCALLTYPE *LoadAsNotificationOnly )( ICorProfilerCallback11 * This, BOOL *pbNotificationOnly); END_INTERFACE } ICorProfilerCallback11Vtbl; interface ICorProfilerCallback11 { CONST_VTBL struct ICorProfilerCallback11Vtbl *lpVtbl; }; #ifdef COBJMACROS #define ICorProfilerCallback11_QueryInterface(This,riid,ppvObject) \ ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) ) #define ICorProfilerCallback11_AddRef(This) \ ( (This)->lpVtbl -> AddRef(This) ) #define ICorProfilerCallback11_Release(This) \ ( (This)->lpVtbl -> Release(This) ) #define ICorProfilerCallback11_Initialize(This,pICorProfilerInfoUnk) \ ( (This)->lpVtbl -> Initialize(This,pICorProfilerInfoUnk) ) #define ICorProfilerCallback11_Shutdown(This) \ ( (This)->lpVtbl -> Shutdown(This) ) #define ICorProfilerCallback11_AppDomainCreationStarted(This,appDomainId) \ ( (This)->lpVtbl -> AppDomainCreationStarted(This,appDomainId) ) #define ICorProfilerCallback11_AppDomainCreationFinished(This,appDomainId,hrStatus) \ ( (This)->lpVtbl -> AppDomainCreationFinished(This,appDomainId,hrStatus) ) #define ICorProfilerCallback11_AppDomainShutdownStarted(This,appDomainId) \ ( (This)->lpVtbl -> AppDomainShutdownStarted(This,appDomainId) ) #define ICorProfilerCallback11_AppDomainShutdownFinished(This,appDomainId,hrStatus) \ ( (This)->lpVtbl -> AppDomainShutdownFinished(This,appDomainId,hrStatus) ) #define ICorProfilerCallback11_AssemblyLoadStarted(This,assemblyId) \ ( (This)->lpVtbl -> AssemblyLoadStarted(This,assemblyId) ) #define ICorProfilerCallback11_AssemblyLoadFinished(This,assemblyId,hrStatus) \ ( (This)->lpVtbl -> AssemblyLoadFinished(This,assemblyId,hrStatus) ) #define ICorProfilerCallback11_AssemblyUnloadStarted(This,assemblyId) \ ( (This)->lpVtbl -> AssemblyUnloadStarted(This,assemblyId) ) #define ICorProfilerCallback11_AssemblyUnloadFinished(This,assemblyId,hrStatus) \ ( (This)->lpVtbl -> AssemblyUnloadFinished(This,assemblyId,hrStatus) ) #define ICorProfilerCallback11_ModuleLoadStarted(This,moduleId) \ ( (This)->lpVtbl -> ModuleLoadStarted(This,moduleId) ) #define ICorProfilerCallback11_ModuleLoadFinished(This,moduleId,hrStatus) \ ( (This)->lpVtbl -> ModuleLoadFinished(This,moduleId,hrStatus) ) #define ICorProfilerCallback11_ModuleUnloadStarted(This,moduleId) \ ( (This)->lpVtbl -> ModuleUnloadStarted(This,moduleId) ) #define ICorProfilerCallback11_ModuleUnloadFinished(This,moduleId,hrStatus) \ ( (This)->lpVtbl -> ModuleUnloadFinished(This,moduleId,hrStatus) ) #define ICorProfilerCallback11_ModuleAttachedToAssembly(This,moduleId,AssemblyId) \ ( (This)->lpVtbl -> ModuleAttachedToAssembly(This,moduleId,AssemblyId) ) #define ICorProfilerCallback11_ClassLoadStarted(This,classId) \ ( (This)->lpVtbl -> ClassLoadStarted(This,classId) ) #define ICorProfilerCallback11_ClassLoadFinished(This,classId,hrStatus) \ ( (This)->lpVtbl -> ClassLoadFinished(This,classId,hrStatus) ) #define ICorProfilerCallback11_ClassUnloadStarted(This,classId) \ ( (This)->lpVtbl -> ClassUnloadStarted(This,classId) ) #define ICorProfilerCallback11_ClassUnloadFinished(This,classId,hrStatus) \ ( (This)->lpVtbl -> ClassUnloadFinished(This,classId,hrStatus) ) #define ICorProfilerCallback11_FunctionUnloadStarted(This,functionId) \ ( (This)->lpVtbl -> FunctionUnloadStarted(This,functionId) ) #define ICorProfilerCallback11_JITCompilationStarted(This,functionId,fIsSafeToBlock) \ ( (This)->lpVtbl -> JITCompilationStarted(This,functionId,fIsSafeToBlock) ) #define ICorProfilerCallback11_JITCompilationFinished(This,functionId,hrStatus,fIsSafeToBlock) \ ( (This)->lpVtbl -> JITCompilationFinished(This,functionId,hrStatus,fIsSafeToBlock) ) #define ICorProfilerCallback11_JITCachedFunctionSearchStarted(This,functionId,pbUseCachedFunction) \ ( (This)->lpVtbl -> JITCachedFunctionSearchStarted(This,functionId,pbUseCachedFunction) ) #define ICorProfilerCallback11_JITCachedFunctionSearchFinished(This,functionId,result) \ ( (This)->lpVtbl -> JITCachedFunctionSearchFinished(This,functionId,result) ) #define ICorProfilerCallback11_JITFunctionPitched(This,functionId) \ ( (This)->lpVtbl -> JITFunctionPitched(This,functionId) ) #define ICorProfilerCallback11_JITInlining(This,callerId,calleeId,pfShouldInline) \ ( (This)->lpVtbl -> JITInlining(This,callerId,calleeId,pfShouldInline) ) #define ICorProfilerCallback11_ThreadCreated(This,threadId) \ ( (This)->lpVtbl -> ThreadCreated(This,threadId) ) #define ICorProfilerCallback11_ThreadDestroyed(This,threadId) \ ( (This)->lpVtbl -> ThreadDestroyed(This,threadId) ) #define ICorProfilerCallback11_ThreadAssignedToOSThread(This,managedThreadId,osThreadId) \ ( (This)->lpVtbl -> ThreadAssignedToOSThread(This,managedThreadId,osThreadId) ) #define ICorProfilerCallback11_RemotingClientInvocationStarted(This) \ ( (This)->lpVtbl -> RemotingClientInvocationStarted(This) ) #define ICorProfilerCallback11_RemotingClientSendingMessage(This,pCookie,fIsAsync) \ ( (This)->lpVtbl -> RemotingClientSendingMessage(This,pCookie,fIsAsync) ) #define ICorProfilerCallback11_RemotingClientReceivingReply(This,pCookie,fIsAsync) \ ( (This)->lpVtbl -> RemotingClientReceivingReply(This,pCookie,fIsAsync) ) #define ICorProfilerCallback11_RemotingClientInvocationFinished(This) \ ( (This)->lpVtbl -> RemotingClientInvocationFinished(This) ) #define ICorProfilerCallback11_RemotingServerReceivingMessage(This,pCookie,fIsAsync) \ ( (This)->lpVtbl -> RemotingServerReceivingMessage(This,pCookie,fIsAsync) ) #define ICorProfilerCallback11_RemotingServerInvocationStarted(This) \ ( (This)->lpVtbl -> RemotingServerInvocationStarted(This) ) #define ICorProfilerCallback11_RemotingServerInvocationReturned(This) \ ( (This)->lpVtbl -> RemotingServerInvocationReturned(This) ) #define ICorProfilerCallback11_RemotingServerSendingReply(This,pCookie,fIsAsync) \ ( (This)->lpVtbl -> RemotingServerSendingReply(This,pCookie,fIsAsync) ) #define ICorProfilerCallback11_UnmanagedToManagedTransition(This,functionId,reason) \ ( (This)->lpVtbl -> UnmanagedToManagedTransition(This,functionId,reason) ) #define ICorProfilerCallback11_ManagedToUnmanagedTransition(This,functionId,reason) \ ( (This)->lpVtbl -> ManagedToUnmanagedTransition(This,functionId,reason) ) #define ICorProfilerCallback11_RuntimeSuspendStarted(This,suspendReason) \ ( (This)->lpVtbl -> RuntimeSuspendStarted(This,suspendReason) ) #define ICorProfilerCallback11_RuntimeSuspendFinished(This) \ ( (This)->lpVtbl -> RuntimeSuspendFinished(This) ) #define ICorProfilerCallback11_RuntimeSuspendAborted(This) \ ( (This)->lpVtbl -> RuntimeSuspendAborted(This) ) #define ICorProfilerCallback11_RuntimeResumeStarted(This) \ ( (This)->lpVtbl -> RuntimeResumeStarted(This) ) #define ICorProfilerCallback11_RuntimeResumeFinished(This) \ ( (This)->lpVtbl -> RuntimeResumeFinished(This) ) #define ICorProfilerCallback11_RuntimeThreadSuspended(This,threadId) \ ( (This)->lpVtbl -> RuntimeThreadSuspended(This,threadId) ) #define ICorProfilerCallback11_RuntimeThreadResumed(This,threadId) \ ( (This)->lpVtbl -> RuntimeThreadResumed(This,threadId) ) #define ICorProfilerCallback11_MovedReferences(This,cMovedObjectIDRanges,oldObjectIDRangeStart,newObjectIDRangeStart,cObjectIDRangeLength) \ ( (This)->lpVtbl -> MovedReferences(This,cMovedObjectIDRanges,oldObjectIDRangeStart,newObjectIDRangeStart,cObjectIDRangeLength) ) #define ICorProfilerCallback11_ObjectAllocated(This,objectId,classId) \ ( (This)->lpVtbl -> ObjectAllocated(This,objectId,classId) ) #define ICorProfilerCallback11_ObjectsAllocatedByClass(This,cClassCount,classIds,cObjects) \ ( (This)->lpVtbl -> ObjectsAllocatedByClass(This,cClassCount,classIds,cObjects) ) #define ICorProfilerCallback11_ObjectReferences(This,objectId,classId,cObjectRefs,objectRefIds) \ ( (This)->lpVtbl -> ObjectReferences(This,objectId,classId,cObjectRefs,objectRefIds) ) #define ICorProfilerCallback11_RootReferences(This,cRootRefs,rootRefIds) \ ( (This)->lpVtbl -> RootReferences(This,cRootRefs,rootRefIds) ) #define ICorProfilerCallback11_ExceptionThrown(This,thrownObjectId) \ ( (This)->lpVtbl -> ExceptionThrown(This,thrownObjectId) ) #define ICorProfilerCallback11_ExceptionSearchFunctionEnter(This,functionId) \ ( (This)->lpVtbl -> ExceptionSearchFunctionEnter(This,functionId) ) #define ICorProfilerCallback11_ExceptionSearchFunctionLeave(This) \ ( (This)->lpVtbl -> ExceptionSearchFunctionLeave(This) ) #define ICorProfilerCallback11_ExceptionSearchFilterEnter(This,functionId) \ ( (This)->lpVtbl -> ExceptionSearchFilterEnter(This,functionId) ) #define ICorProfilerCallback11_ExceptionSearchFilterLeave(This) \ ( (This)->lpVtbl -> ExceptionSearchFilterLeave(This) ) #define ICorProfilerCallback11_ExceptionSearchCatcherFound(This,functionId) \ ( (This)->lpVtbl -> ExceptionSearchCatcherFound(This,functionId) ) #define ICorProfilerCallback11_ExceptionOSHandlerEnter(This,__unused) \ ( (This)->lpVtbl -> ExceptionOSHandlerEnter(This,__unused) ) #define ICorProfilerCallback11_ExceptionOSHandlerLeave(This,__unused) \ ( (This)->lpVtbl -> ExceptionOSHandlerLeave(This,__unused) ) #define ICorProfilerCallback11_ExceptionUnwindFunctionEnter(This,functionId) \ ( (This)->lpVtbl -> ExceptionUnwindFunctionEnter(This,functionId) ) #define ICorProfilerCallback11_ExceptionUnwindFunctionLeave(This) \ ( (This)->lpVtbl -> ExceptionUnwindFunctionLeave(This) ) #define ICorProfilerCallback11_ExceptionUnwindFinallyEnter(This,functionId) \ ( (This)->lpVtbl -> ExceptionUnwindFinallyEnter(This,functionId) ) #define ICorProfilerCallback11_ExceptionUnwindFinallyLeave(This) \ ( (This)->lpVtbl -> ExceptionUnwindFinallyLeave(This) ) #define ICorProfilerCallback11_ExceptionCatcherEnter(This,functionId,objectId) \ ( (This)->lpVtbl -> ExceptionCatcherEnter(This,functionId,objectId) ) #define ICorProfilerCallback11_ExceptionCatcherLeave(This) \ ( (This)->lpVtbl -> ExceptionCatcherLeave(This) ) #define ICorProfilerCallback11_COMClassicVTableCreated(This,wrappedClassId,implementedIID,pVTable,cSlots) \ ( (This)->lpVtbl -> COMClassicVTableCreated(This,wrappedClassId,implementedIID,pVTable,cSlots) ) #define ICorProfilerCallback11_COMClassicVTableDestroyed(This,wrappedClassId,implementedIID,pVTable) \ ( (This)->lpVtbl -> COMClassicVTableDestroyed(This,wrappedClassId,implementedIID,pVTable) ) #define ICorProfilerCallback11_ExceptionCLRCatcherFound(This) \ ( (This)->lpVtbl -> ExceptionCLRCatcherFound(This) ) #define ICorProfilerCallback11_ExceptionCLRCatcherExecute(This) \ ( (This)->lpVtbl -> ExceptionCLRCatcherExecute(This) ) #define ICorProfilerCallback11_ThreadNameChanged(This,threadId,cchName,name) \ ( (This)->lpVtbl -> ThreadNameChanged(This,threadId,cchName,name) ) #define ICorProfilerCallback11_GarbageCollectionStarted(This,cGenerations,generationCollected,reason) \ ( (This)->lpVtbl -> GarbageCollectionStarted(This,cGenerations,generationCollected,reason) ) #define ICorProfilerCallback11_SurvivingReferences(This,cSurvivingObjectIDRanges,objectIDRangeStart,cObjectIDRangeLength) \ ( (This)->lpVtbl -> SurvivingReferences(This,cSurvivingObjectIDRanges,objectIDRangeStart,cObjectIDRangeLength) ) #define ICorProfilerCallback11_GarbageCollectionFinished(This) \ ( (This)->lpVtbl -> GarbageCollectionFinished(This) ) #define ICorProfilerCallback11_FinalizeableObjectQueued(This,finalizerFlags,objectID) \ ( (This)->lpVtbl -> FinalizeableObjectQueued(This,finalizerFlags,objectID) ) #define ICorProfilerCallback11_RootReferences2(This,cRootRefs,rootRefIds,rootKinds,rootFlags,rootIds) \ ( (This)->lpVtbl -> RootReferences2(This,cRootRefs,rootRefIds,rootKinds,rootFlags,rootIds) ) #define ICorProfilerCallback11_HandleCreated(This,handleId,initialObjectId) \ ( (This)->lpVtbl -> HandleCreated(This,handleId,initialObjectId) ) #define ICorProfilerCallback11_HandleDestroyed(This,handleId) \ ( (This)->lpVtbl -> HandleDestroyed(This,handleId) ) #define ICorProfilerCallback11_InitializeForAttach(This,pCorProfilerInfoUnk,pvClientData,cbClientData) \ ( (This)->lpVtbl -> InitializeForAttach(This,pCorProfilerInfoUnk,pvClientData,cbClientData) ) #define ICorProfilerCallback11_ProfilerAttachComplete(This) \ ( (This)->lpVtbl -> ProfilerAttachComplete(This) ) #define ICorProfilerCallback11_ProfilerDetachSucceeded(This) \ ( (This)->lpVtbl -> ProfilerDetachSucceeded(This) ) #define ICorProfilerCallback11_ReJITCompilationStarted(This,functionId,rejitId,fIsSafeToBlock) \ ( (This)->lpVtbl -> ReJITCompilationStarted(This,functionId,rejitId,fIsSafeToBlock) ) #define ICorProfilerCallback11_GetReJITParameters(This,moduleId,methodId,pFunctionControl) \ ( (This)->lpVtbl -> GetReJITParameters(This,moduleId,methodId,pFunctionControl) ) #define ICorProfilerCallback11_ReJITCompilationFinished(This,functionId,rejitId,hrStatus,fIsSafeToBlock) \ ( (This)->lpVtbl -> ReJITCompilationFinished(This,functionId,rejitId,hrStatus,fIsSafeToBlock) ) #define ICorProfilerCallback11_ReJITError(This,moduleId,methodId,functionId,hrStatus) \ ( (This)->lpVtbl -> ReJITError(This,moduleId,methodId,functionId,hrStatus) ) #define ICorProfilerCallback11_MovedReferences2(This,cMovedObjectIDRanges,oldObjectIDRangeStart,newObjectIDRangeStart,cObjectIDRangeLength) \ ( (This)->lpVtbl -> MovedReferences2(This,cMovedObjectIDRanges,oldObjectIDRangeStart,newObjectIDRangeStart,cObjectIDRangeLength) ) #define ICorProfilerCallback11_SurvivingReferences2(This,cSurvivingObjectIDRanges,objectIDRangeStart,cObjectIDRangeLength) \ ( (This)->lpVtbl -> SurvivingReferences2(This,cSurvivingObjectIDRanges,objectIDRangeStart,cObjectIDRangeLength) ) #define ICorProfilerCallback11_ConditionalWeakTableElementReferences(This,cRootRefs,keyRefIds,valueRefIds,rootIds) \ ( (This)->lpVtbl -> ConditionalWeakTableElementReferences(This,cRootRefs,keyRefIds,valueRefIds,rootIds) ) #define ICorProfilerCallback11_GetAssemblyReferences(This,wszAssemblyPath,pAsmRefProvider) \ ( (This)->lpVtbl -> GetAssemblyReferences(This,wszAssemblyPath,pAsmRefProvider) ) #define ICorProfilerCallback11_ModuleInMemorySymbolsUpdated(This,moduleId) \ ( (This)->lpVtbl -> ModuleInMemorySymbolsUpdated(This,moduleId) ) #define ICorProfilerCallback11_DynamicMethodJITCompilationStarted(This,functionId,fIsSafeToBlock,pILHeader,cbILHeader) \ ( (This)->lpVtbl -> DynamicMethodJITCompilationStarted(This,functionId,fIsSafeToBlock,pILHeader,cbILHeader) ) #define ICorProfilerCallback11_DynamicMethodJITCompilationFinished(This,functionId,hrStatus,fIsSafeToBlock) \ ( (This)->lpVtbl -> DynamicMethodJITCompilationFinished(This,functionId,hrStatus,fIsSafeToBlock) ) #define ICorProfilerCallback11_DynamicMethodUnloaded(This,functionId) \ ( (This)->lpVtbl -> DynamicMethodUnloaded(This,functionId) ) #define ICorProfilerCallback11_EventPipeEventDelivered(This,provider,eventId,eventVersion,cbMetadataBlob,metadataBlob,cbEventData,eventData,pActivityId,pRelatedActivityId,eventThread,numStackFrames,stackFrames) \ ( (This)->lpVtbl -> EventPipeEventDelivered(This,provider,eventId,eventVersion,cbMetadataBlob,metadataBlob,cbEventData,eventData,pActivityId,pRelatedActivityId,eventThread,numStackFrames,stackFrames) ) #define ICorProfilerCallback11_EventPipeProviderCreated(This,provider) \ ( (This)->lpVtbl -> EventPipeProviderCreated(This,provider) ) #define ICorProfilerCallback11_LoadAsNotificationOnly(This,pbNotificationOnly) \ ( (This)->lpVtbl -> LoadAsNotificationOnly(This,pbNotificationOnly) ) #endif /* COBJMACROS */ #endif /* C style interface */ #endif /* __ICorProfilerCallback11_INTERFACE_DEFINED__ */ /* interface __MIDL_itf_corprof_0000_0011 */ /* [local] */ typedef /* [public] */ enum __MIDL___MIDL_itf_corprof_0000_0011_0001 { COR_PRF_CODEGEN_DISABLE_INLINING = 0x1, COR_PRF_CODEGEN_DISABLE_ALL_OPTIMIZATIONS = 0x2 } COR_PRF_CODEGEN_FLAGS; extern RPC_IF_HANDLE __MIDL_itf_corprof_0000_0011_v0_0_c_ifspec; extern RPC_IF_HANDLE __MIDL_itf_corprof_0000_0011_v0_0_s_ifspec; #ifndef __ICorProfilerInfo_INTERFACE_DEFINED__ #define __ICorProfilerInfo_INTERFACE_DEFINED__ /* interface ICorProfilerInfo */ /* [local][unique][uuid][object] */ EXTERN_C const IID IID_ICorProfilerInfo; #if defined(__cplusplus) && !defined(CINTERFACE) MIDL_INTERFACE("28B5557D-3F3F-48b4-90B2-5F9EEA2F6C48") ICorProfilerInfo : public IUnknown { public: virtual HRESULT STDMETHODCALLTYPE GetClassFromObject( /* [in] */ ObjectID objectId, /* [out] */ ClassID *pClassId) = 0; virtual HRESULT STDMETHODCALLTYPE GetClassFromToken( /* [in] */ ModuleID moduleId, /* [in] */ mdTypeDef typeDef, /* [out] */ ClassID *pClassId) = 0; virtual HRESULT STDMETHODCALLTYPE GetCodeInfo( /* [in] */ FunctionID functionId, /* [out] */ LPCBYTE *pStart, /* [out] */ ULONG *pcSize) = 0; virtual HRESULT STDMETHODCALLTYPE GetEventMask( /* [out] */ DWORD *pdwEvents) = 0; virtual HRESULT STDMETHODCALLTYPE GetFunctionFromIP( /* [in] */ LPCBYTE ip, /* [out] */ FunctionID *pFunctionId) = 0; virtual HRESULT STDMETHODCALLTYPE GetFunctionFromToken( /* [in] */ ModuleID moduleId, /* [in] */ mdToken token, /* [out] */ FunctionID *pFunctionId) = 0; virtual HRESULT STDMETHODCALLTYPE GetHandleFromThread( /* [in] */ ThreadID threadId, /* [out] */ HANDLE *phThread) = 0; virtual HRESULT STDMETHODCALLTYPE GetObjectSize( /* [in] */ ObjectID objectId, /* [out] */ ULONG *pcSize) = 0; virtual HRESULT STDMETHODCALLTYPE IsArrayClass( /* [in] */ ClassID classId, /* [out] */ CorElementType *pBaseElemType, /* [out] */ ClassID *pBaseClassId, /* [out] */ ULONG *pcRank) = 0; virtual HRESULT STDMETHODCALLTYPE GetThreadInfo( /* [in] */ ThreadID threadId, /* [out] */ DWORD *pdwWin32ThreadId) = 0; virtual HRESULT STDMETHODCALLTYPE GetCurrentThreadID( /* [out] */ ThreadID *pThreadId) = 0; virtual HRESULT STDMETHODCALLTYPE GetClassIDInfo( /* [in] */ ClassID classId, /* [out] */ ModuleID *pModuleId, /* [out] */ mdTypeDef *pTypeDefToken) = 0; virtual HRESULT STDMETHODCALLTYPE GetFunctionInfo( /* [in] */ FunctionID functionId, /* [out] */ ClassID *pClassId, /* [out] */ ModuleID *pModuleId, /* [out] */ mdToken *pToken) = 0; virtual HRESULT STDMETHODCALLTYPE SetEventMask( /* [in] */ DWORD dwEvents) = 0; virtual HRESULT STDMETHODCALLTYPE SetEnterLeaveFunctionHooks( /* [in] */ FunctionEnter *pFuncEnter, /* [in] */ FunctionLeave *pFuncLeave, /* [in] */ FunctionTailcall *pFuncTailcall) = 0; virtual HRESULT STDMETHODCALLTYPE SetFunctionIDMapper( /* [in] */ FunctionIDMapper *pFunc) = 0; virtual HRESULT STDMETHODCALLTYPE GetTokenAndMetaDataFromFunction( /* [in] */ FunctionID functionId, /* [in] */ REFIID riid, /* [out] */ IUnknown **ppImport, /* [out] */ mdToken *pToken) = 0; virtual HRESULT STDMETHODCALLTYPE GetModuleInfo( /* [in] */ ModuleID moduleId, /* [out] */ LPCBYTE *ppBaseLoadAddress, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [annotation][out] */ _Out_writes_to_(cchName, *pcchName) WCHAR szName[ ], /* [out] */ AssemblyID *pAssemblyId) = 0; virtual HRESULT STDMETHODCALLTYPE GetModuleMetaData( /* [in] */ ModuleID moduleId, /* [in] */ DWORD dwOpenFlags, /* [in] */ REFIID riid, /* [out] */ IUnknown **ppOut) = 0; virtual HRESULT STDMETHODCALLTYPE GetILFunctionBody( /* [in] */ ModuleID moduleId, /* [in] */ mdMethodDef methodId, /* [out] */ LPCBYTE *ppMethodHeader, /* [out] */ ULONG *pcbMethodSize) = 0; virtual HRESULT STDMETHODCALLTYPE GetILFunctionBodyAllocator( /* [in] */ ModuleID moduleId, /* [out] */ IMethodMalloc **ppMalloc) = 0; virtual HRESULT STDMETHODCALLTYPE SetILFunctionBody( /* [in] */ ModuleID moduleId, /* [in] */ mdMethodDef methodid, /* [in] */ LPCBYTE pbNewILMethodHeader) = 0; virtual HRESULT STDMETHODCALLTYPE GetAppDomainInfo( /* [in] */ AppDomainID appDomainId, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [annotation][out] */ _Out_writes_to_(cchName, *pcchName) WCHAR szName[ ], /* [out] */ ProcessID *pProcessId) = 0; virtual HRESULT STDMETHODCALLTYPE GetAssemblyInfo( /* [in] */ AssemblyID assemblyId, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [annotation][out] */ _Out_writes_to_(cchName, *pcchName) WCHAR szName[ ], /* [out] */ AppDomainID *pAppDomainId, /* [out] */ ModuleID *pModuleId) = 0; virtual HRESULT STDMETHODCALLTYPE SetFunctionReJIT( /* [in] */ FunctionID functionId) = 0; virtual HRESULT STDMETHODCALLTYPE ForceGC( void) = 0; virtual HRESULT STDMETHODCALLTYPE SetILInstrumentedCodeMap( /* [in] */ FunctionID functionId, /* [in] */ BOOL fStartJit, /* [in] */ ULONG cILMapEntries, /* [size_is][in] */ COR_IL_MAP rgILMapEntries[ ]) = 0; virtual HRESULT STDMETHODCALLTYPE GetInprocInspectionInterface( /* [out] */ IUnknown **ppicd) = 0; virtual HRESULT STDMETHODCALLTYPE GetInprocInspectionIThisThread( /* [out] */ IUnknown **ppicd) = 0; virtual HRESULT STDMETHODCALLTYPE GetThreadContext( /* [in] */ ThreadID threadId, /* [out] */ ContextID *pContextId) = 0; virtual HRESULT STDMETHODCALLTYPE BeginInprocDebugging( /* [in] */ BOOL fThisThreadOnly, /* [out] */ DWORD *pdwProfilerContext) = 0; virtual HRESULT STDMETHODCALLTYPE EndInprocDebugging( /* [in] */ DWORD dwProfilerContext) = 0; virtual HRESULT STDMETHODCALLTYPE GetILToNativeMapping( /* [in] */ FunctionID functionId, /* [in] */ ULONG32 cMap, /* [out] */ ULONG32 *pcMap, /* [length_is][size_is][out] */ COR_DEBUG_IL_TO_NATIVE_MAP map[ ]) = 0; }; #else /* C style interface */ typedef struct ICorProfilerInfoVtbl { BEGIN_INTERFACE HRESULT ( STDMETHODCALLTYPE *QueryInterface )( ICorProfilerInfo * This, /* [in] */ REFIID riid, /* [annotation][iid_is][out] */ _COM_Outptr_ void **ppvObject); ULONG ( STDMETHODCALLTYPE *AddRef )( ICorProfilerInfo * This); ULONG ( STDMETHODCALLTYPE *Release )( ICorProfilerInfo * This); HRESULT ( STDMETHODCALLTYPE *GetClassFromObject )( ICorProfilerInfo * This, /* [in] */ ObjectID objectId, /* [out] */ ClassID *pClassId); HRESULT ( STDMETHODCALLTYPE *GetClassFromToken )( ICorProfilerInfo * This, /* [in] */ ModuleID moduleId, /* [in] */ mdTypeDef typeDef, /* [out] */ ClassID *pClassId); HRESULT ( STDMETHODCALLTYPE *GetCodeInfo )( ICorProfilerInfo * This, /* [in] */ FunctionID functionId, /* [out] */ LPCBYTE *pStart, /* [out] */ ULONG *pcSize); HRESULT ( STDMETHODCALLTYPE *GetEventMask )( ICorProfilerInfo * This, /* [out] */ DWORD *pdwEvents); HRESULT ( STDMETHODCALLTYPE *GetFunctionFromIP )( ICorProfilerInfo * This, /* [in] */ LPCBYTE ip, /* [out] */ FunctionID *pFunctionId); HRESULT ( STDMETHODCALLTYPE *GetFunctionFromToken )( ICorProfilerInfo * This, /* [in] */ ModuleID moduleId, /* [in] */ mdToken token, /* [out] */ FunctionID *pFunctionId); HRESULT ( STDMETHODCALLTYPE *GetHandleFromThread )( ICorProfilerInfo * This, /* [in] */ ThreadID threadId, /* [out] */ HANDLE *phThread); HRESULT ( STDMETHODCALLTYPE *GetObjectSize )( ICorProfilerInfo * This, /* [in] */ ObjectID objectId, /* [out] */ ULONG *pcSize); HRESULT ( STDMETHODCALLTYPE *IsArrayClass )( ICorProfilerInfo * This, /* [in] */ ClassID classId, /* [out] */ CorElementType *pBaseElemType, /* [out] */ ClassID *pBaseClassId, /* [out] */ ULONG *pcRank); HRESULT ( STDMETHODCALLTYPE *GetThreadInfo )( ICorProfilerInfo * This, /* [in] */ ThreadID threadId, /* [out] */ DWORD *pdwWin32ThreadId); HRESULT ( STDMETHODCALLTYPE *GetCurrentThreadID )( ICorProfilerInfo * This, /* [out] */ ThreadID *pThreadId); HRESULT ( STDMETHODCALLTYPE *GetClassIDInfo )( ICorProfilerInfo * This, /* [in] */ ClassID classId, /* [out] */ ModuleID *pModuleId, /* [out] */ mdTypeDef *pTypeDefToken); HRESULT ( STDMETHODCALLTYPE *GetFunctionInfo )( ICorProfilerInfo * This, /* [in] */ FunctionID functionId, /* [out] */ ClassID *pClassId, /* [out] */ ModuleID *pModuleId, /* [out] */ mdToken *pToken); HRESULT ( STDMETHODCALLTYPE *SetEventMask )( ICorProfilerInfo * This, /* [in] */ DWORD dwEvents); HRESULT ( STDMETHODCALLTYPE *SetEnterLeaveFunctionHooks )( ICorProfilerInfo * This, /* [in] */ FunctionEnter *pFuncEnter, /* [in] */ FunctionLeave *pFuncLeave, /* [in] */ FunctionTailcall *pFuncTailcall); HRESULT ( STDMETHODCALLTYPE *SetFunctionIDMapper )( ICorProfilerInfo * This, /* [in] */ FunctionIDMapper *pFunc); HRESULT ( STDMETHODCALLTYPE *GetTokenAndMetaDataFromFunction )( ICorProfilerInfo * This, /* [in] */ FunctionID functionId, /* [in] */ REFIID riid, /* [out] */ IUnknown **ppImport, /* [out] */ mdToken *pToken); HRESULT ( STDMETHODCALLTYPE *GetModuleInfo )( ICorProfilerInfo * This, /* [in] */ ModuleID moduleId, /* [out] */ LPCBYTE *ppBaseLoadAddress, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [annotation][out] */ _Out_writes_to_(cchName, *pcchName) WCHAR szName[ ], /* [out] */ AssemblyID *pAssemblyId); HRESULT ( STDMETHODCALLTYPE *GetModuleMetaData )( ICorProfilerInfo * This, /* [in] */ ModuleID moduleId, /* [in] */ DWORD dwOpenFlags, /* [in] */ REFIID riid, /* [out] */ IUnknown **ppOut); HRESULT ( STDMETHODCALLTYPE *GetILFunctionBody )( ICorProfilerInfo * This, /* [in] */ ModuleID moduleId, /* [in] */ mdMethodDef methodId, /* [out] */ LPCBYTE *ppMethodHeader, /* [out] */ ULONG *pcbMethodSize); HRESULT ( STDMETHODCALLTYPE *GetILFunctionBodyAllocator )( ICorProfilerInfo * This, /* [in] */ ModuleID moduleId, /* [out] */ IMethodMalloc **ppMalloc); HRESULT ( STDMETHODCALLTYPE *SetILFunctionBody )( ICorProfilerInfo * This, /* [in] */ ModuleID moduleId, /* [in] */ mdMethodDef methodid, /* [in] */ LPCBYTE pbNewILMethodHeader); HRESULT ( STDMETHODCALLTYPE *GetAppDomainInfo )( ICorProfilerInfo * This, /* [in] */ AppDomainID appDomainId, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [annotation][out] */ _Out_writes_to_(cchName, *pcchName) WCHAR szName[ ], /* [out] */ ProcessID *pProcessId); HRESULT ( STDMETHODCALLTYPE *GetAssemblyInfo )( ICorProfilerInfo * This, /* [in] */ AssemblyID assemblyId, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [annotation][out] */ _Out_writes_to_(cchName, *pcchName) WCHAR szName[ ], /* [out] */ AppDomainID *pAppDomainId, /* [out] */ ModuleID *pModuleId); HRESULT ( STDMETHODCALLTYPE *SetFunctionReJIT )( ICorProfilerInfo * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ForceGC )( ICorProfilerInfo * This); HRESULT ( STDMETHODCALLTYPE *SetILInstrumentedCodeMap )( ICorProfilerInfo * This, /* [in] */ FunctionID functionId, /* [in] */ BOOL fStartJit, /* [in] */ ULONG cILMapEntries, /* [size_is][in] */ COR_IL_MAP rgILMapEntries[ ]); HRESULT ( STDMETHODCALLTYPE *GetInprocInspectionInterface )( ICorProfilerInfo * This, /* [out] */ IUnknown **ppicd); HRESULT ( STDMETHODCALLTYPE *GetInprocInspectionIThisThread )( ICorProfilerInfo * This, /* [out] */ IUnknown **ppicd); HRESULT ( STDMETHODCALLTYPE *GetThreadContext )( ICorProfilerInfo * This, /* [in] */ ThreadID threadId, /* [out] */ ContextID *pContextId); HRESULT ( STDMETHODCALLTYPE *BeginInprocDebugging )( ICorProfilerInfo * This, /* [in] */ BOOL fThisThreadOnly, /* [out] */ DWORD *pdwProfilerContext); HRESULT ( STDMETHODCALLTYPE *EndInprocDebugging )( ICorProfilerInfo * This, /* [in] */ DWORD dwProfilerContext); HRESULT ( STDMETHODCALLTYPE *GetILToNativeMapping )( ICorProfilerInfo * This, /* [in] */ FunctionID functionId, /* [in] */ ULONG32 cMap, /* [out] */ ULONG32 *pcMap, /* [length_is][size_is][out] */ COR_DEBUG_IL_TO_NATIVE_MAP map[ ]); END_INTERFACE } ICorProfilerInfoVtbl; interface ICorProfilerInfo { CONST_VTBL struct ICorProfilerInfoVtbl *lpVtbl; }; #ifdef COBJMACROS #define ICorProfilerInfo_QueryInterface(This,riid,ppvObject) \ ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) ) #define ICorProfilerInfo_AddRef(This) \ ( (This)->lpVtbl -> AddRef(This) ) #define ICorProfilerInfo_Release(This) \ ( (This)->lpVtbl -> Release(This) ) #define ICorProfilerInfo_GetClassFromObject(This,objectId,pClassId) \ ( (This)->lpVtbl -> GetClassFromObject(This,objectId,pClassId) ) #define ICorProfilerInfo_GetClassFromToken(This,moduleId,typeDef,pClassId) \ ( (This)->lpVtbl -> GetClassFromToken(This,moduleId,typeDef,pClassId) ) #define ICorProfilerInfo_GetCodeInfo(This,functionId,pStart,pcSize) \ ( (This)->lpVtbl -> GetCodeInfo(This,functionId,pStart,pcSize) ) #define ICorProfilerInfo_GetEventMask(This,pdwEvents) \ ( (This)->lpVtbl -> GetEventMask(This,pdwEvents) ) #define ICorProfilerInfo_GetFunctionFromIP(This,ip,pFunctionId) \ ( (This)->lpVtbl -> GetFunctionFromIP(This,ip,pFunctionId) ) #define ICorProfilerInfo_GetFunctionFromToken(This,moduleId,token,pFunctionId) \ ( (This)->lpVtbl -> GetFunctionFromToken(This,moduleId,token,pFunctionId) ) #define ICorProfilerInfo_GetHandleFromThread(This,threadId,phThread) \ ( (This)->lpVtbl -> GetHandleFromThread(This,threadId,phThread) ) #define ICorProfilerInfo_GetObjectSize(This,objectId,pcSize) \ ( (This)->lpVtbl -> GetObjectSize(This,objectId,pcSize) ) #define ICorProfilerInfo_IsArrayClass(This,classId,pBaseElemType,pBaseClassId,pcRank) \ ( (This)->lpVtbl -> IsArrayClass(This,classId,pBaseElemType,pBaseClassId,pcRank) ) #define ICorProfilerInfo_GetThreadInfo(This,threadId,pdwWin32ThreadId) \ ( (This)->lpVtbl -> GetThreadInfo(This,threadId,pdwWin32ThreadId) ) #define ICorProfilerInfo_GetCurrentThreadID(This,pThreadId) \ ( (This)->lpVtbl -> GetCurrentThreadID(This,pThreadId) ) #define ICorProfilerInfo_GetClassIDInfo(This,classId,pModuleId,pTypeDefToken) \ ( (This)->lpVtbl -> GetClassIDInfo(This,classId,pModuleId,pTypeDefToken) ) #define ICorProfilerInfo_GetFunctionInfo(This,functionId,pClassId,pModuleId,pToken) \ ( (This)->lpVtbl -> GetFunctionInfo(This,functionId,pClassId,pModuleId,pToken) ) #define ICorProfilerInfo_SetEventMask(This,dwEvents) \ ( (This)->lpVtbl -> SetEventMask(This,dwEvents) ) #define ICorProfilerInfo_SetEnterLeaveFunctionHooks(This,pFuncEnter,pFuncLeave,pFuncTailcall) \ ( (This)->lpVtbl -> SetEnterLeaveFunctionHooks(This,pFuncEnter,pFuncLeave,pFuncTailcall) ) #define ICorProfilerInfo_SetFunctionIDMapper(This,pFunc) \ ( (This)->lpVtbl -> SetFunctionIDMapper(This,pFunc) ) #define ICorProfilerInfo_GetTokenAndMetaDataFromFunction(This,functionId,riid,ppImport,pToken) \ ( (This)->lpVtbl -> GetTokenAndMetaDataFromFunction(This,functionId,riid,ppImport,pToken) ) #define ICorProfilerInfo_GetModuleInfo(This,moduleId,ppBaseLoadAddress,cchName,pcchName,szName,pAssemblyId) \ ( (This)->lpVtbl -> GetModuleInfo(This,moduleId,ppBaseLoadAddress,cchName,pcchName,szName,pAssemblyId) ) #define ICorProfilerInfo_GetModuleMetaData(This,moduleId,dwOpenFlags,riid,ppOut) \ ( (This)->lpVtbl -> GetModuleMetaData(This,moduleId,dwOpenFlags,riid,ppOut) ) #define ICorProfilerInfo_GetILFunctionBody(This,moduleId,methodId,ppMethodHeader,pcbMethodSize) \ ( (This)->lpVtbl -> GetILFunctionBody(This,moduleId,methodId,ppMethodHeader,pcbMethodSize) ) #define ICorProfilerInfo_GetILFunctionBodyAllocator(This,moduleId,ppMalloc) \ ( (This)->lpVtbl -> GetILFunctionBodyAllocator(This,moduleId,ppMalloc) ) #define ICorProfilerInfo_SetILFunctionBody(This,moduleId,methodid,pbNewILMethodHeader) \ ( (This)->lpVtbl -> SetILFunctionBody(This,moduleId,methodid,pbNewILMethodHeader) ) #define ICorProfilerInfo_GetAppDomainInfo(This,appDomainId,cchName,pcchName,szName,pProcessId) \ ( (This)->lpVtbl -> GetAppDomainInfo(This,appDomainId,cchName,pcchName,szName,pProcessId) ) #define ICorProfilerInfo_GetAssemblyInfo(This,assemblyId,cchName,pcchName,szName,pAppDomainId,pModuleId) \ ( (This)->lpVtbl -> GetAssemblyInfo(This,assemblyId,cchName,pcchName,szName,pAppDomainId,pModuleId) ) #define ICorProfilerInfo_SetFunctionReJIT(This,functionId) \ ( (This)->lpVtbl -> SetFunctionReJIT(This,functionId) ) #define ICorProfilerInfo_ForceGC(This) \ ( (This)->lpVtbl -> ForceGC(This) ) #define ICorProfilerInfo_SetILInstrumentedCodeMap(This,functionId,fStartJit,cILMapEntries,rgILMapEntries) \ ( (This)->lpVtbl -> SetILInstrumentedCodeMap(This,functionId,fStartJit,cILMapEntries,rgILMapEntries) ) #define ICorProfilerInfo_GetInprocInspectionInterface(This,ppicd) \ ( (This)->lpVtbl -> GetInprocInspectionInterface(This,ppicd) ) #define ICorProfilerInfo_GetInprocInspectionIThisThread(This,ppicd) \ ( (This)->lpVtbl -> GetInprocInspectionIThisThread(This,ppicd) ) #define ICorProfilerInfo_GetThreadContext(This,threadId,pContextId) \ ( (This)->lpVtbl -> GetThreadContext(This,threadId,pContextId) ) #define ICorProfilerInfo_BeginInprocDebugging(This,fThisThreadOnly,pdwProfilerContext) \ ( (This)->lpVtbl -> BeginInprocDebugging(This,fThisThreadOnly,pdwProfilerContext) ) #define ICorProfilerInfo_EndInprocDebugging(This,dwProfilerContext) \ ( (This)->lpVtbl -> EndInprocDebugging(This,dwProfilerContext) ) #define ICorProfilerInfo_GetILToNativeMapping(This,functionId,cMap,pcMap,map) \ ( (This)->lpVtbl -> GetILToNativeMapping(This,functionId,cMap,pcMap,map) ) #endif /* COBJMACROS */ #endif /* C style interface */ #endif /* __ICorProfilerInfo_INTERFACE_DEFINED__ */ #ifndef __ICorProfilerInfo2_INTERFACE_DEFINED__ #define __ICorProfilerInfo2_INTERFACE_DEFINED__ /* interface ICorProfilerInfo2 */ /* [local][unique][uuid][object] */ EXTERN_C const IID IID_ICorProfilerInfo2; #if defined(__cplusplus) && !defined(CINTERFACE) MIDL_INTERFACE("CC0935CD-A518-487d-B0BB-A93214E65478") ICorProfilerInfo2 : public ICorProfilerInfo { public: virtual HRESULT STDMETHODCALLTYPE DoStackSnapshot( /* [in] */ ThreadID thread, /* [in] */ StackSnapshotCallback *callback, /* [in] */ ULONG32 infoFlags, /* [in] */ void *clientData, /* [size_is][in] */ BYTE context[ ], /* [in] */ ULONG32 contextSize) = 0; virtual HRESULT STDMETHODCALLTYPE SetEnterLeaveFunctionHooks2( /* [in] */ FunctionEnter2 *pFuncEnter, /* [in] */ FunctionLeave2 *pFuncLeave, /* [in] */ FunctionTailcall2 *pFuncTailcall) = 0; virtual HRESULT STDMETHODCALLTYPE GetFunctionInfo2( /* [in] */ FunctionID funcId, /* [in] */ COR_PRF_FRAME_INFO frameInfo, /* [out] */ ClassID *pClassId, /* [out] */ ModuleID *pModuleId, /* [out] */ mdToken *pToken, /* [in] */ ULONG32 cTypeArgs, /* [out] */ ULONG32 *pcTypeArgs, /* [out] */ ClassID typeArgs[ ]) = 0; virtual HRESULT STDMETHODCALLTYPE GetStringLayout( /* [out] */ ULONG *pBufferLengthOffset, /* [out] */ ULONG *pStringLengthOffset, /* [out] */ ULONG *pBufferOffset) = 0; virtual HRESULT STDMETHODCALLTYPE GetClassLayout( /* [in] */ ClassID classID, /* [out][in] */ COR_FIELD_OFFSET rFieldOffset[ ], /* [in] */ ULONG cFieldOffset, /* [out] */ ULONG *pcFieldOffset, /* [out] */ ULONG *pulClassSize) = 0; virtual HRESULT STDMETHODCALLTYPE GetClassIDInfo2( /* [in] */ ClassID classId, /* [out] */ ModuleID *pModuleId, /* [out] */ mdTypeDef *pTypeDefToken, /* [out] */ ClassID *pParentClassId, /* [in] */ ULONG32 cNumTypeArgs, /* [out] */ ULONG32 *pcNumTypeArgs, /* [out] */ ClassID typeArgs[ ]) = 0; virtual HRESULT STDMETHODCALLTYPE GetCodeInfo2( /* [in] */ FunctionID functionID, /* [in] */ ULONG32 cCodeInfos, /* [out] */ ULONG32 *pcCodeInfos, /* [length_is][size_is][out] */ COR_PRF_CODE_INFO codeInfos[ ]) = 0; virtual HRESULT STDMETHODCALLTYPE GetClassFromTokenAndTypeArgs( /* [in] */ ModuleID moduleID, /* [in] */ mdTypeDef typeDef, /* [in] */ ULONG32 cTypeArgs, /* [size_is][in] */ ClassID typeArgs[ ], /* [out] */ ClassID *pClassID) = 0; virtual HRESULT STDMETHODCALLTYPE GetFunctionFromTokenAndTypeArgs( /* [in] */ ModuleID moduleID, /* [in] */ mdMethodDef funcDef, /* [in] */ ClassID classId, /* [in] */ ULONG32 cTypeArgs, /* [size_is][in] */ ClassID typeArgs[ ], /* [out] */ FunctionID *pFunctionID) = 0; virtual HRESULT STDMETHODCALLTYPE EnumModuleFrozenObjects( /* [in] */ ModuleID moduleID, /* [out] */ ICorProfilerObjectEnum **ppEnum) = 0; virtual HRESULT STDMETHODCALLTYPE GetArrayObjectInfo( /* [in] */ ObjectID objectId, /* [in] */ ULONG32 cDimensions, /* [size_is][out] */ ULONG32 pDimensionSizes[ ], /* [size_is][out] */ int pDimensionLowerBounds[ ], /* [out] */ BYTE **ppData) = 0; virtual HRESULT STDMETHODCALLTYPE GetBoxClassLayout( /* [in] */ ClassID classId, /* [out] */ ULONG32 *pBufferOffset) = 0; virtual HRESULT STDMETHODCALLTYPE GetThreadAppDomain( /* [in] */ ThreadID threadId, /* [out] */ AppDomainID *pAppDomainId) = 0; virtual HRESULT STDMETHODCALLTYPE GetRVAStaticAddress( /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [out] */ void **ppAddress) = 0; virtual HRESULT STDMETHODCALLTYPE GetAppDomainStaticAddress( /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [in] */ AppDomainID appDomainId, /* [out] */ void **ppAddress) = 0; virtual HRESULT STDMETHODCALLTYPE GetThreadStaticAddress( /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [in] */ ThreadID threadId, /* [out] */ void **ppAddress) = 0; virtual HRESULT STDMETHODCALLTYPE GetContextStaticAddress( /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [in] */ ContextID contextId, /* [out] */ void **ppAddress) = 0; virtual HRESULT STDMETHODCALLTYPE GetStaticFieldInfo( /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [out] */ COR_PRF_STATIC_TYPE *pFieldInfo) = 0; virtual HRESULT STDMETHODCALLTYPE GetGenerationBounds( /* [in] */ ULONG cObjectRanges, /* [out] */ ULONG *pcObjectRanges, /* [length_is][size_is][out] */ COR_PRF_GC_GENERATION_RANGE ranges[ ]) = 0; virtual HRESULT STDMETHODCALLTYPE GetObjectGeneration( /* [in] */ ObjectID objectId, /* [out] */ COR_PRF_GC_GENERATION_RANGE *range) = 0; virtual HRESULT STDMETHODCALLTYPE GetNotifiedExceptionClauseInfo( /* [out] */ COR_PRF_EX_CLAUSE_INFO *pinfo) = 0; }; #else /* C style interface */ typedef struct ICorProfilerInfo2Vtbl { BEGIN_INTERFACE HRESULT ( STDMETHODCALLTYPE *QueryInterface )( ICorProfilerInfo2 * This, /* [in] */ REFIID riid, /* [annotation][iid_is][out] */ _COM_Outptr_ void **ppvObject); ULONG ( STDMETHODCALLTYPE *AddRef )( ICorProfilerInfo2 * This); ULONG ( STDMETHODCALLTYPE *Release )( ICorProfilerInfo2 * This); HRESULT ( STDMETHODCALLTYPE *GetClassFromObject )( ICorProfilerInfo2 * This, /* [in] */ ObjectID objectId, /* [out] */ ClassID *pClassId); HRESULT ( STDMETHODCALLTYPE *GetClassFromToken )( ICorProfilerInfo2 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdTypeDef typeDef, /* [out] */ ClassID *pClassId); HRESULT ( STDMETHODCALLTYPE *GetCodeInfo )( ICorProfilerInfo2 * This, /* [in] */ FunctionID functionId, /* [out] */ LPCBYTE *pStart, /* [out] */ ULONG *pcSize); HRESULT ( STDMETHODCALLTYPE *GetEventMask )( ICorProfilerInfo2 * This, /* [out] */ DWORD *pdwEvents); HRESULT ( STDMETHODCALLTYPE *GetFunctionFromIP )( ICorProfilerInfo2 * This, /* [in] */ LPCBYTE ip, /* [out] */ FunctionID *pFunctionId); HRESULT ( STDMETHODCALLTYPE *GetFunctionFromToken )( ICorProfilerInfo2 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdToken token, /* [out] */ FunctionID *pFunctionId); HRESULT ( STDMETHODCALLTYPE *GetHandleFromThread )( ICorProfilerInfo2 * This, /* [in] */ ThreadID threadId, /* [out] */ HANDLE *phThread); HRESULT ( STDMETHODCALLTYPE *GetObjectSize )( ICorProfilerInfo2 * This, /* [in] */ ObjectID objectId, /* [out] */ ULONG *pcSize); HRESULT ( STDMETHODCALLTYPE *IsArrayClass )( ICorProfilerInfo2 * This, /* [in] */ ClassID classId, /* [out] */ CorElementType *pBaseElemType, /* [out] */ ClassID *pBaseClassId, /* [out] */ ULONG *pcRank); HRESULT ( STDMETHODCALLTYPE *GetThreadInfo )( ICorProfilerInfo2 * This, /* [in] */ ThreadID threadId, /* [out] */ DWORD *pdwWin32ThreadId); HRESULT ( STDMETHODCALLTYPE *GetCurrentThreadID )( ICorProfilerInfo2 * This, /* [out] */ ThreadID *pThreadId); HRESULT ( STDMETHODCALLTYPE *GetClassIDInfo )( ICorProfilerInfo2 * This, /* [in] */ ClassID classId, /* [out] */ ModuleID *pModuleId, /* [out] */ mdTypeDef *pTypeDefToken); HRESULT ( STDMETHODCALLTYPE *GetFunctionInfo )( ICorProfilerInfo2 * This, /* [in] */ FunctionID functionId, /* [out] */ ClassID *pClassId, /* [out] */ ModuleID *pModuleId, /* [out] */ mdToken *pToken); HRESULT ( STDMETHODCALLTYPE *SetEventMask )( ICorProfilerInfo2 * This, /* [in] */ DWORD dwEvents); HRESULT ( STDMETHODCALLTYPE *SetEnterLeaveFunctionHooks )( ICorProfilerInfo2 * This, /* [in] */ FunctionEnter *pFuncEnter, /* [in] */ FunctionLeave *pFuncLeave, /* [in] */ FunctionTailcall *pFuncTailcall); HRESULT ( STDMETHODCALLTYPE *SetFunctionIDMapper )( ICorProfilerInfo2 * This, /* [in] */ FunctionIDMapper *pFunc); HRESULT ( STDMETHODCALLTYPE *GetTokenAndMetaDataFromFunction )( ICorProfilerInfo2 * This, /* [in] */ FunctionID functionId, /* [in] */ REFIID riid, /* [out] */ IUnknown **ppImport, /* [out] */ mdToken *pToken); HRESULT ( STDMETHODCALLTYPE *GetModuleInfo )( ICorProfilerInfo2 * This, /* [in] */ ModuleID moduleId, /* [out] */ LPCBYTE *ppBaseLoadAddress, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [annotation][out] */ _Out_writes_to_(cchName, *pcchName) WCHAR szName[ ], /* [out] */ AssemblyID *pAssemblyId); HRESULT ( STDMETHODCALLTYPE *GetModuleMetaData )( ICorProfilerInfo2 * This, /* [in] */ ModuleID moduleId, /* [in] */ DWORD dwOpenFlags, /* [in] */ REFIID riid, /* [out] */ IUnknown **ppOut); HRESULT ( STDMETHODCALLTYPE *GetILFunctionBody )( ICorProfilerInfo2 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdMethodDef methodId, /* [out] */ LPCBYTE *ppMethodHeader, /* [out] */ ULONG *pcbMethodSize); HRESULT ( STDMETHODCALLTYPE *GetILFunctionBodyAllocator )( ICorProfilerInfo2 * This, /* [in] */ ModuleID moduleId, /* [out] */ IMethodMalloc **ppMalloc); HRESULT ( STDMETHODCALLTYPE *SetILFunctionBody )( ICorProfilerInfo2 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdMethodDef methodid, /* [in] */ LPCBYTE pbNewILMethodHeader); HRESULT ( STDMETHODCALLTYPE *GetAppDomainInfo )( ICorProfilerInfo2 * This, /* [in] */ AppDomainID appDomainId, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [annotation][out] */ _Out_writes_to_(cchName, *pcchName) WCHAR szName[ ], /* [out] */ ProcessID *pProcessId); HRESULT ( STDMETHODCALLTYPE *GetAssemblyInfo )( ICorProfilerInfo2 * This, /* [in] */ AssemblyID assemblyId, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [annotation][out] */ _Out_writes_to_(cchName, *pcchName) WCHAR szName[ ], /* [out] */ AppDomainID *pAppDomainId, /* [out] */ ModuleID *pModuleId); HRESULT ( STDMETHODCALLTYPE *SetFunctionReJIT )( ICorProfilerInfo2 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ForceGC )( ICorProfilerInfo2 * This); HRESULT ( STDMETHODCALLTYPE *SetILInstrumentedCodeMap )( ICorProfilerInfo2 * This, /* [in] */ FunctionID functionId, /* [in] */ BOOL fStartJit, /* [in] */ ULONG cILMapEntries, /* [size_is][in] */ COR_IL_MAP rgILMapEntries[ ]); HRESULT ( STDMETHODCALLTYPE *GetInprocInspectionInterface )( ICorProfilerInfo2 * This, /* [out] */ IUnknown **ppicd); HRESULT ( STDMETHODCALLTYPE *GetInprocInspectionIThisThread )( ICorProfilerInfo2 * This, /* [out] */ IUnknown **ppicd); HRESULT ( STDMETHODCALLTYPE *GetThreadContext )( ICorProfilerInfo2 * This, /* [in] */ ThreadID threadId, /* [out] */ ContextID *pContextId); HRESULT ( STDMETHODCALLTYPE *BeginInprocDebugging )( ICorProfilerInfo2 * This, /* [in] */ BOOL fThisThreadOnly, /* [out] */ DWORD *pdwProfilerContext); HRESULT ( STDMETHODCALLTYPE *EndInprocDebugging )( ICorProfilerInfo2 * This, /* [in] */ DWORD dwProfilerContext); HRESULT ( STDMETHODCALLTYPE *GetILToNativeMapping )( ICorProfilerInfo2 * This, /* [in] */ FunctionID functionId, /* [in] */ ULONG32 cMap, /* [out] */ ULONG32 *pcMap, /* [length_is][size_is][out] */ COR_DEBUG_IL_TO_NATIVE_MAP map[ ]); HRESULT ( STDMETHODCALLTYPE *DoStackSnapshot )( ICorProfilerInfo2 * This, /* [in] */ ThreadID thread, /* [in] */ StackSnapshotCallback *callback, /* [in] */ ULONG32 infoFlags, /* [in] */ void *clientData, /* [size_is][in] */ BYTE context[ ], /* [in] */ ULONG32 contextSize); HRESULT ( STDMETHODCALLTYPE *SetEnterLeaveFunctionHooks2 )( ICorProfilerInfo2 * This, /* [in] */ FunctionEnter2 *pFuncEnter, /* [in] */ FunctionLeave2 *pFuncLeave, /* [in] */ FunctionTailcall2 *pFuncTailcall); HRESULT ( STDMETHODCALLTYPE *GetFunctionInfo2 )( ICorProfilerInfo2 * This, /* [in] */ FunctionID funcId, /* [in] */ COR_PRF_FRAME_INFO frameInfo, /* [out] */ ClassID *pClassId, /* [out] */ ModuleID *pModuleId, /* [out] */ mdToken *pToken, /* [in] */ ULONG32 cTypeArgs, /* [out] */ ULONG32 *pcTypeArgs, /* [out] */ ClassID typeArgs[ ]); HRESULT ( STDMETHODCALLTYPE *GetStringLayout )( ICorProfilerInfo2 * This, /* [out] */ ULONG *pBufferLengthOffset, /* [out] */ ULONG *pStringLengthOffset, /* [out] */ ULONG *pBufferOffset); HRESULT ( STDMETHODCALLTYPE *GetClassLayout )( ICorProfilerInfo2 * This, /* [in] */ ClassID classID, /* [out][in] */ COR_FIELD_OFFSET rFieldOffset[ ], /* [in] */ ULONG cFieldOffset, /* [out] */ ULONG *pcFieldOffset, /* [out] */ ULONG *pulClassSize); HRESULT ( STDMETHODCALLTYPE *GetClassIDInfo2 )( ICorProfilerInfo2 * This, /* [in] */ ClassID classId, /* [out] */ ModuleID *pModuleId, /* [out] */ mdTypeDef *pTypeDefToken, /* [out] */ ClassID *pParentClassId, /* [in] */ ULONG32 cNumTypeArgs, /* [out] */ ULONG32 *pcNumTypeArgs, /* [out] */ ClassID typeArgs[ ]); HRESULT ( STDMETHODCALLTYPE *GetCodeInfo2 )( ICorProfilerInfo2 * This, /* [in] */ FunctionID functionID, /* [in] */ ULONG32 cCodeInfos, /* [out] */ ULONG32 *pcCodeInfos, /* [length_is][size_is][out] */ COR_PRF_CODE_INFO codeInfos[ ]); HRESULT ( STDMETHODCALLTYPE *GetClassFromTokenAndTypeArgs )( ICorProfilerInfo2 * This, /* [in] */ ModuleID moduleID, /* [in] */ mdTypeDef typeDef, /* [in] */ ULONG32 cTypeArgs, /* [size_is][in] */ ClassID typeArgs[ ], /* [out] */ ClassID *pClassID); HRESULT ( STDMETHODCALLTYPE *GetFunctionFromTokenAndTypeArgs )( ICorProfilerInfo2 * This, /* [in] */ ModuleID moduleID, /* [in] */ mdMethodDef funcDef, /* [in] */ ClassID classId, /* [in] */ ULONG32 cTypeArgs, /* [size_is][in] */ ClassID typeArgs[ ], /* [out] */ FunctionID *pFunctionID); HRESULT ( STDMETHODCALLTYPE *EnumModuleFrozenObjects )( ICorProfilerInfo2 * This, /* [in] */ ModuleID moduleID, /* [out] */ ICorProfilerObjectEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *GetArrayObjectInfo )( ICorProfilerInfo2 * This, /* [in] */ ObjectID objectId, /* [in] */ ULONG32 cDimensions, /* [size_is][out] */ ULONG32 pDimensionSizes[ ], /* [size_is][out] */ int pDimensionLowerBounds[ ], /* [out] */ BYTE **ppData); HRESULT ( STDMETHODCALLTYPE *GetBoxClassLayout )( ICorProfilerInfo2 * This, /* [in] */ ClassID classId, /* [out] */ ULONG32 *pBufferOffset); HRESULT ( STDMETHODCALLTYPE *GetThreadAppDomain )( ICorProfilerInfo2 * This, /* [in] */ ThreadID threadId, /* [out] */ AppDomainID *pAppDomainId); HRESULT ( STDMETHODCALLTYPE *GetRVAStaticAddress )( ICorProfilerInfo2 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [out] */ void **ppAddress); HRESULT ( STDMETHODCALLTYPE *GetAppDomainStaticAddress )( ICorProfilerInfo2 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [in] */ AppDomainID appDomainId, /* [out] */ void **ppAddress); HRESULT ( STDMETHODCALLTYPE *GetThreadStaticAddress )( ICorProfilerInfo2 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [in] */ ThreadID threadId, /* [out] */ void **ppAddress); HRESULT ( STDMETHODCALLTYPE *GetContextStaticAddress )( ICorProfilerInfo2 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [in] */ ContextID contextId, /* [out] */ void **ppAddress); HRESULT ( STDMETHODCALLTYPE *GetStaticFieldInfo )( ICorProfilerInfo2 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [out] */ COR_PRF_STATIC_TYPE *pFieldInfo); HRESULT ( STDMETHODCALLTYPE *GetGenerationBounds )( ICorProfilerInfo2 * This, /* [in] */ ULONG cObjectRanges, /* [out] */ ULONG *pcObjectRanges, /* [length_is][size_is][out] */ COR_PRF_GC_GENERATION_RANGE ranges[ ]); HRESULT ( STDMETHODCALLTYPE *GetObjectGeneration )( ICorProfilerInfo2 * This, /* [in] */ ObjectID objectId, /* [out] */ COR_PRF_GC_GENERATION_RANGE *range); HRESULT ( STDMETHODCALLTYPE *GetNotifiedExceptionClauseInfo )( ICorProfilerInfo2 * This, /* [out] */ COR_PRF_EX_CLAUSE_INFO *pinfo); END_INTERFACE } ICorProfilerInfo2Vtbl; interface ICorProfilerInfo2 { CONST_VTBL struct ICorProfilerInfo2Vtbl *lpVtbl; }; #ifdef COBJMACROS #define ICorProfilerInfo2_QueryInterface(This,riid,ppvObject) \ ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) ) #define ICorProfilerInfo2_AddRef(This) \ ( (This)->lpVtbl -> AddRef(This) ) #define ICorProfilerInfo2_Release(This) \ ( (This)->lpVtbl -> Release(This) ) #define ICorProfilerInfo2_GetClassFromObject(This,objectId,pClassId) \ ( (This)->lpVtbl -> GetClassFromObject(This,objectId,pClassId) ) #define ICorProfilerInfo2_GetClassFromToken(This,moduleId,typeDef,pClassId) \ ( (This)->lpVtbl -> GetClassFromToken(This,moduleId,typeDef,pClassId) ) #define ICorProfilerInfo2_GetCodeInfo(This,functionId,pStart,pcSize) \ ( (This)->lpVtbl -> GetCodeInfo(This,functionId,pStart,pcSize) ) #define ICorProfilerInfo2_GetEventMask(This,pdwEvents) \ ( (This)->lpVtbl -> GetEventMask(This,pdwEvents) ) #define ICorProfilerInfo2_GetFunctionFromIP(This,ip,pFunctionId) \ ( (This)->lpVtbl -> GetFunctionFromIP(This,ip,pFunctionId) ) #define ICorProfilerInfo2_GetFunctionFromToken(This,moduleId,token,pFunctionId) \ ( (This)->lpVtbl -> GetFunctionFromToken(This,moduleId,token,pFunctionId) ) #define ICorProfilerInfo2_GetHandleFromThread(This,threadId,phThread) \ ( (This)->lpVtbl -> GetHandleFromThread(This,threadId,phThread) ) #define ICorProfilerInfo2_GetObjectSize(This,objectId,pcSize) \ ( (This)->lpVtbl -> GetObjectSize(This,objectId,pcSize) ) #define ICorProfilerInfo2_IsArrayClass(This,classId,pBaseElemType,pBaseClassId,pcRank) \ ( (This)->lpVtbl -> IsArrayClass(This,classId,pBaseElemType,pBaseClassId,pcRank) ) #define ICorProfilerInfo2_GetThreadInfo(This,threadId,pdwWin32ThreadId) \ ( (This)->lpVtbl -> GetThreadInfo(This,threadId,pdwWin32ThreadId) ) #define ICorProfilerInfo2_GetCurrentThreadID(This,pThreadId) \ ( (This)->lpVtbl -> GetCurrentThreadID(This,pThreadId) ) #define ICorProfilerInfo2_GetClassIDInfo(This,classId,pModuleId,pTypeDefToken) \ ( (This)->lpVtbl -> GetClassIDInfo(This,classId,pModuleId,pTypeDefToken) ) #define ICorProfilerInfo2_GetFunctionInfo(This,functionId,pClassId,pModuleId,pToken) \ ( (This)->lpVtbl -> GetFunctionInfo(This,functionId,pClassId,pModuleId,pToken) ) #define ICorProfilerInfo2_SetEventMask(This,dwEvents) \ ( (This)->lpVtbl -> SetEventMask(This,dwEvents) ) #define ICorProfilerInfo2_SetEnterLeaveFunctionHooks(This,pFuncEnter,pFuncLeave,pFuncTailcall) \ ( (This)->lpVtbl -> SetEnterLeaveFunctionHooks(This,pFuncEnter,pFuncLeave,pFuncTailcall) ) #define ICorProfilerInfo2_SetFunctionIDMapper(This,pFunc) \ ( (This)->lpVtbl -> SetFunctionIDMapper(This,pFunc) ) #define ICorProfilerInfo2_GetTokenAndMetaDataFromFunction(This,functionId,riid,ppImport,pToken) \ ( (This)->lpVtbl -> GetTokenAndMetaDataFromFunction(This,functionId,riid,ppImport,pToken) ) #define ICorProfilerInfo2_GetModuleInfo(This,moduleId,ppBaseLoadAddress,cchName,pcchName,szName,pAssemblyId) \ ( (This)->lpVtbl -> GetModuleInfo(This,moduleId,ppBaseLoadAddress,cchName,pcchName,szName,pAssemblyId) ) #define ICorProfilerInfo2_GetModuleMetaData(This,moduleId,dwOpenFlags,riid,ppOut) \ ( (This)->lpVtbl -> GetModuleMetaData(This,moduleId,dwOpenFlags,riid,ppOut) ) #define ICorProfilerInfo2_GetILFunctionBody(This,moduleId,methodId,ppMethodHeader,pcbMethodSize) \ ( (This)->lpVtbl -> GetILFunctionBody(This,moduleId,methodId,ppMethodHeader,pcbMethodSize) ) #define ICorProfilerInfo2_GetILFunctionBodyAllocator(This,moduleId,ppMalloc) \ ( (This)->lpVtbl -> GetILFunctionBodyAllocator(This,moduleId,ppMalloc) ) #define ICorProfilerInfo2_SetILFunctionBody(This,moduleId,methodid,pbNewILMethodHeader) \ ( (This)->lpVtbl -> SetILFunctionBody(This,moduleId,methodid,pbNewILMethodHeader) ) #define ICorProfilerInfo2_GetAppDomainInfo(This,appDomainId,cchName,pcchName,szName,pProcessId) \ ( (This)->lpVtbl -> GetAppDomainInfo(This,appDomainId,cchName,pcchName,szName,pProcessId) ) #define ICorProfilerInfo2_GetAssemblyInfo(This,assemblyId,cchName,pcchName,szName,pAppDomainId,pModuleId) \ ( (This)->lpVtbl -> GetAssemblyInfo(This,assemblyId,cchName,pcchName,szName,pAppDomainId,pModuleId) ) #define ICorProfilerInfo2_SetFunctionReJIT(This,functionId) \ ( (This)->lpVtbl -> SetFunctionReJIT(This,functionId) ) #define ICorProfilerInfo2_ForceGC(This) \ ( (This)->lpVtbl -> ForceGC(This) ) #define ICorProfilerInfo2_SetILInstrumentedCodeMap(This,functionId,fStartJit,cILMapEntries,rgILMapEntries) \ ( (This)->lpVtbl -> SetILInstrumentedCodeMap(This,functionId,fStartJit,cILMapEntries,rgILMapEntries) ) #define ICorProfilerInfo2_GetInprocInspectionInterface(This,ppicd) \ ( (This)->lpVtbl -> GetInprocInspectionInterface(This,ppicd) ) #define ICorProfilerInfo2_GetInprocInspectionIThisThread(This,ppicd) \ ( (This)->lpVtbl -> GetInprocInspectionIThisThread(This,ppicd) ) #define ICorProfilerInfo2_GetThreadContext(This,threadId,pContextId) \ ( (This)->lpVtbl -> GetThreadContext(This,threadId,pContextId) ) #define ICorProfilerInfo2_BeginInprocDebugging(This,fThisThreadOnly,pdwProfilerContext) \ ( (This)->lpVtbl -> BeginInprocDebugging(This,fThisThreadOnly,pdwProfilerContext) ) #define ICorProfilerInfo2_EndInprocDebugging(This,dwProfilerContext) \ ( (This)->lpVtbl -> EndInprocDebugging(This,dwProfilerContext) ) #define ICorProfilerInfo2_GetILToNativeMapping(This,functionId,cMap,pcMap,map) \ ( (This)->lpVtbl -> GetILToNativeMapping(This,functionId,cMap,pcMap,map) ) #define ICorProfilerInfo2_DoStackSnapshot(This,thread,callback,infoFlags,clientData,context,contextSize) \ ( (This)->lpVtbl -> DoStackSnapshot(This,thread,callback,infoFlags,clientData,context,contextSize) ) #define ICorProfilerInfo2_SetEnterLeaveFunctionHooks2(This,pFuncEnter,pFuncLeave,pFuncTailcall) \ ( (This)->lpVtbl -> SetEnterLeaveFunctionHooks2(This,pFuncEnter,pFuncLeave,pFuncTailcall) ) #define ICorProfilerInfo2_GetFunctionInfo2(This,funcId,frameInfo,pClassId,pModuleId,pToken,cTypeArgs,pcTypeArgs,typeArgs) \ ( (This)->lpVtbl -> GetFunctionInfo2(This,funcId,frameInfo,pClassId,pModuleId,pToken,cTypeArgs,pcTypeArgs,typeArgs) ) #define ICorProfilerInfo2_GetStringLayout(This,pBufferLengthOffset,pStringLengthOffset,pBufferOffset) \ ( (This)->lpVtbl -> GetStringLayout(This,pBufferLengthOffset,pStringLengthOffset,pBufferOffset) ) #define ICorProfilerInfo2_GetClassLayout(This,classID,rFieldOffset,cFieldOffset,pcFieldOffset,pulClassSize) \ ( (This)->lpVtbl -> GetClassLayout(This,classID,rFieldOffset,cFieldOffset,pcFieldOffset,pulClassSize) ) #define ICorProfilerInfo2_GetClassIDInfo2(This,classId,pModuleId,pTypeDefToken,pParentClassId,cNumTypeArgs,pcNumTypeArgs,typeArgs) \ ( (This)->lpVtbl -> GetClassIDInfo2(This,classId,pModuleId,pTypeDefToken,pParentClassId,cNumTypeArgs,pcNumTypeArgs,typeArgs) ) #define ICorProfilerInfo2_GetCodeInfo2(This,functionID,cCodeInfos,pcCodeInfos,codeInfos) \ ( (This)->lpVtbl -> GetCodeInfo2(This,functionID,cCodeInfos,pcCodeInfos,codeInfos) ) #define ICorProfilerInfo2_GetClassFromTokenAndTypeArgs(This,moduleID,typeDef,cTypeArgs,typeArgs,pClassID) \ ( (This)->lpVtbl -> GetClassFromTokenAndTypeArgs(This,moduleID,typeDef,cTypeArgs,typeArgs,pClassID) ) #define ICorProfilerInfo2_GetFunctionFromTokenAndTypeArgs(This,moduleID,funcDef,classId,cTypeArgs,typeArgs,pFunctionID) \ ( (This)->lpVtbl -> GetFunctionFromTokenAndTypeArgs(This,moduleID,funcDef,classId,cTypeArgs,typeArgs,pFunctionID) ) #define ICorProfilerInfo2_EnumModuleFrozenObjects(This,moduleID,ppEnum) \ ( (This)->lpVtbl -> EnumModuleFrozenObjects(This,moduleID,ppEnum) ) #define ICorProfilerInfo2_GetArrayObjectInfo(This,objectId,cDimensions,pDimensionSizes,pDimensionLowerBounds,ppData) \ ( (This)->lpVtbl -> GetArrayObjectInfo(This,objectId,cDimensions,pDimensionSizes,pDimensionLowerBounds,ppData) ) #define ICorProfilerInfo2_GetBoxClassLayout(This,classId,pBufferOffset) \ ( (This)->lpVtbl -> GetBoxClassLayout(This,classId,pBufferOffset) ) #define ICorProfilerInfo2_GetThreadAppDomain(This,threadId,pAppDomainId) \ ( (This)->lpVtbl -> GetThreadAppDomain(This,threadId,pAppDomainId) ) #define ICorProfilerInfo2_GetRVAStaticAddress(This,classId,fieldToken,ppAddress) \ ( (This)->lpVtbl -> GetRVAStaticAddress(This,classId,fieldToken,ppAddress) ) #define ICorProfilerInfo2_GetAppDomainStaticAddress(This,classId,fieldToken,appDomainId,ppAddress) \ ( (This)->lpVtbl -> GetAppDomainStaticAddress(This,classId,fieldToken,appDomainId,ppAddress) ) #define ICorProfilerInfo2_GetThreadStaticAddress(This,classId,fieldToken,threadId,ppAddress) \ ( (This)->lpVtbl -> GetThreadStaticAddress(This,classId,fieldToken,threadId,ppAddress) ) #define ICorProfilerInfo2_GetContextStaticAddress(This,classId,fieldToken,contextId,ppAddress) \ ( (This)->lpVtbl -> GetContextStaticAddress(This,classId,fieldToken,contextId,ppAddress) ) #define ICorProfilerInfo2_GetStaticFieldInfo(This,classId,fieldToken,pFieldInfo) \ ( (This)->lpVtbl -> GetStaticFieldInfo(This,classId,fieldToken,pFieldInfo) ) #define ICorProfilerInfo2_GetGenerationBounds(This,cObjectRanges,pcObjectRanges,ranges) \ ( (This)->lpVtbl -> GetGenerationBounds(This,cObjectRanges,pcObjectRanges,ranges) ) #define ICorProfilerInfo2_GetObjectGeneration(This,objectId,range) \ ( (This)->lpVtbl -> GetObjectGeneration(This,objectId,range) ) #define ICorProfilerInfo2_GetNotifiedExceptionClauseInfo(This,pinfo) \ ( (This)->lpVtbl -> GetNotifiedExceptionClauseInfo(This,pinfo) ) #endif /* COBJMACROS */ #endif /* C style interface */ #endif /* __ICorProfilerInfo2_INTERFACE_DEFINED__ */ #ifndef __ICorProfilerInfo3_INTERFACE_DEFINED__ #define __ICorProfilerInfo3_INTERFACE_DEFINED__ /* interface ICorProfilerInfo3 */ /* [local][unique][uuid][object] */ EXTERN_C const IID IID_ICorProfilerInfo3; #if defined(__cplusplus) && !defined(CINTERFACE) MIDL_INTERFACE("B555ED4F-452A-4E54-8B39-B5360BAD32A0") ICorProfilerInfo3 : public ICorProfilerInfo2 { public: virtual HRESULT STDMETHODCALLTYPE EnumJITedFunctions( /* [out] */ ICorProfilerFunctionEnum **ppEnum) = 0; virtual HRESULT STDMETHODCALLTYPE RequestProfilerDetach( /* [in] */ DWORD dwExpectedCompletionMilliseconds) = 0; virtual HRESULT STDMETHODCALLTYPE SetFunctionIDMapper2( /* [in] */ FunctionIDMapper2 *pFunc, /* [in] */ void *clientData) = 0; virtual HRESULT STDMETHODCALLTYPE GetStringLayout2( /* [out] */ ULONG *pStringLengthOffset, /* [out] */ ULONG *pBufferOffset) = 0; virtual HRESULT STDMETHODCALLTYPE SetEnterLeaveFunctionHooks3( /* [in] */ FunctionEnter3 *pFuncEnter3, /* [in] */ FunctionLeave3 *pFuncLeave3, /* [in] */ FunctionTailcall3 *pFuncTailcall3) = 0; virtual HRESULT STDMETHODCALLTYPE SetEnterLeaveFunctionHooks3WithInfo( /* [in] */ FunctionEnter3WithInfo *pFuncEnter3WithInfo, /* [in] */ FunctionLeave3WithInfo *pFuncLeave3WithInfo, /* [in] */ FunctionTailcall3WithInfo *pFuncTailcall3WithInfo) = 0; virtual HRESULT STDMETHODCALLTYPE GetFunctionEnter3Info( /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_ELT_INFO eltInfo, /* [out] */ COR_PRF_FRAME_INFO *pFrameInfo, /* [out][in] */ ULONG *pcbArgumentInfo, /* [size_is][out] */ COR_PRF_FUNCTION_ARGUMENT_INFO *pArgumentInfo) = 0; virtual HRESULT STDMETHODCALLTYPE GetFunctionLeave3Info( /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_ELT_INFO eltInfo, /* [out] */ COR_PRF_FRAME_INFO *pFrameInfo, /* [out] */ COR_PRF_FUNCTION_ARGUMENT_RANGE *pRetvalRange) = 0; virtual HRESULT STDMETHODCALLTYPE GetFunctionTailcall3Info( /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_ELT_INFO eltInfo, /* [out] */ COR_PRF_FRAME_INFO *pFrameInfo) = 0; virtual HRESULT STDMETHODCALLTYPE EnumModules( /* [out] */ ICorProfilerModuleEnum **ppEnum) = 0; virtual HRESULT STDMETHODCALLTYPE GetRuntimeInformation( /* [out] */ USHORT *pClrInstanceId, /* [out] */ COR_PRF_RUNTIME_TYPE *pRuntimeType, /* [out] */ USHORT *pMajorVersion, /* [out] */ USHORT *pMinorVersion, /* [out] */ USHORT *pBuildNumber, /* [out] */ USHORT *pQFEVersion, /* [in] */ ULONG cchVersionString, /* [out] */ ULONG *pcchVersionString, /* [annotation][out] */ _Out_writes_to_(cchVersionString, *pcchVersionString) WCHAR szVersionString[ ]) = 0; virtual HRESULT STDMETHODCALLTYPE GetThreadStaticAddress2( /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [in] */ AppDomainID appDomainId, /* [in] */ ThreadID threadId, /* [out] */ void **ppAddress) = 0; virtual HRESULT STDMETHODCALLTYPE GetAppDomainsContainingModule( /* [in] */ ModuleID moduleId, /* [in] */ ULONG32 cAppDomainIds, /* [out] */ ULONG32 *pcAppDomainIds, /* [length_is][size_is][out] */ AppDomainID appDomainIds[ ]) = 0; virtual HRESULT STDMETHODCALLTYPE GetModuleInfo2( /* [in] */ ModuleID moduleId, /* [out] */ LPCBYTE *ppBaseLoadAddress, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [annotation][out] */ _Out_writes_to_(cchName, *pcchName) WCHAR szName[ ], /* [out] */ AssemblyID *pAssemblyId, /* [out] */ DWORD *pdwModuleFlags) = 0; }; #else /* C style interface */ typedef struct ICorProfilerInfo3Vtbl { BEGIN_INTERFACE HRESULT ( STDMETHODCALLTYPE *QueryInterface )( ICorProfilerInfo3 * This, /* [in] */ REFIID riid, /* [annotation][iid_is][out] */ _COM_Outptr_ void **ppvObject); ULONG ( STDMETHODCALLTYPE *AddRef )( ICorProfilerInfo3 * This); ULONG ( STDMETHODCALLTYPE *Release )( ICorProfilerInfo3 * This); HRESULT ( STDMETHODCALLTYPE *GetClassFromObject )( ICorProfilerInfo3 * This, /* [in] */ ObjectID objectId, /* [out] */ ClassID *pClassId); HRESULT ( STDMETHODCALLTYPE *GetClassFromToken )( ICorProfilerInfo3 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdTypeDef typeDef, /* [out] */ ClassID *pClassId); HRESULT ( STDMETHODCALLTYPE *GetCodeInfo )( ICorProfilerInfo3 * This, /* [in] */ FunctionID functionId, /* [out] */ LPCBYTE *pStart, /* [out] */ ULONG *pcSize); HRESULT ( STDMETHODCALLTYPE *GetEventMask )( ICorProfilerInfo3 * This, /* [out] */ DWORD *pdwEvents); HRESULT ( STDMETHODCALLTYPE *GetFunctionFromIP )( ICorProfilerInfo3 * This, /* [in] */ LPCBYTE ip, /* [out] */ FunctionID *pFunctionId); HRESULT ( STDMETHODCALLTYPE *GetFunctionFromToken )( ICorProfilerInfo3 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdToken token, /* [out] */ FunctionID *pFunctionId); HRESULT ( STDMETHODCALLTYPE *GetHandleFromThread )( ICorProfilerInfo3 * This, /* [in] */ ThreadID threadId, /* [out] */ HANDLE *phThread); HRESULT ( STDMETHODCALLTYPE *GetObjectSize )( ICorProfilerInfo3 * This, /* [in] */ ObjectID objectId, /* [out] */ ULONG *pcSize); HRESULT ( STDMETHODCALLTYPE *IsArrayClass )( ICorProfilerInfo3 * This, /* [in] */ ClassID classId, /* [out] */ CorElementType *pBaseElemType, /* [out] */ ClassID *pBaseClassId, /* [out] */ ULONG *pcRank); HRESULT ( STDMETHODCALLTYPE *GetThreadInfo )( ICorProfilerInfo3 * This, /* [in] */ ThreadID threadId, /* [out] */ DWORD *pdwWin32ThreadId); HRESULT ( STDMETHODCALLTYPE *GetCurrentThreadID )( ICorProfilerInfo3 * This, /* [out] */ ThreadID *pThreadId); HRESULT ( STDMETHODCALLTYPE *GetClassIDInfo )( ICorProfilerInfo3 * This, /* [in] */ ClassID classId, /* [out] */ ModuleID *pModuleId, /* [out] */ mdTypeDef *pTypeDefToken); HRESULT ( STDMETHODCALLTYPE *GetFunctionInfo )( ICorProfilerInfo3 * This, /* [in] */ FunctionID functionId, /* [out] */ ClassID *pClassId, /* [out] */ ModuleID *pModuleId, /* [out] */ mdToken *pToken); HRESULT ( STDMETHODCALLTYPE *SetEventMask )( ICorProfilerInfo3 * This, /* [in] */ DWORD dwEvents); HRESULT ( STDMETHODCALLTYPE *SetEnterLeaveFunctionHooks )( ICorProfilerInfo3 * This, /* [in] */ FunctionEnter *pFuncEnter, /* [in] */ FunctionLeave *pFuncLeave, /* [in] */ FunctionTailcall *pFuncTailcall); HRESULT ( STDMETHODCALLTYPE *SetFunctionIDMapper )( ICorProfilerInfo3 * This, /* [in] */ FunctionIDMapper *pFunc); HRESULT ( STDMETHODCALLTYPE *GetTokenAndMetaDataFromFunction )( ICorProfilerInfo3 * This, /* [in] */ FunctionID functionId, /* [in] */ REFIID riid, /* [out] */ IUnknown **ppImport, /* [out] */ mdToken *pToken); HRESULT ( STDMETHODCALLTYPE *GetModuleInfo )( ICorProfilerInfo3 * This, /* [in] */ ModuleID moduleId, /* [out] */ LPCBYTE *ppBaseLoadAddress, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [annotation][out] */ _Out_writes_to_(cchName, *pcchName) WCHAR szName[ ], /* [out] */ AssemblyID *pAssemblyId); HRESULT ( STDMETHODCALLTYPE *GetModuleMetaData )( ICorProfilerInfo3 * This, /* [in] */ ModuleID moduleId, /* [in] */ DWORD dwOpenFlags, /* [in] */ REFIID riid, /* [out] */ IUnknown **ppOut); HRESULT ( STDMETHODCALLTYPE *GetILFunctionBody )( ICorProfilerInfo3 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdMethodDef methodId, /* [out] */ LPCBYTE *ppMethodHeader, /* [out] */ ULONG *pcbMethodSize); HRESULT ( STDMETHODCALLTYPE *GetILFunctionBodyAllocator )( ICorProfilerInfo3 * This, /* [in] */ ModuleID moduleId, /* [out] */ IMethodMalloc **ppMalloc); HRESULT ( STDMETHODCALLTYPE *SetILFunctionBody )( ICorProfilerInfo3 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdMethodDef methodid, /* [in] */ LPCBYTE pbNewILMethodHeader); HRESULT ( STDMETHODCALLTYPE *GetAppDomainInfo )( ICorProfilerInfo3 * This, /* [in] */ AppDomainID appDomainId, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [annotation][out] */ _Out_writes_to_(cchName, *pcchName) WCHAR szName[ ], /* [out] */ ProcessID *pProcessId); HRESULT ( STDMETHODCALLTYPE *GetAssemblyInfo )( ICorProfilerInfo3 * This, /* [in] */ AssemblyID assemblyId, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [annotation][out] */ _Out_writes_to_(cchName, *pcchName) WCHAR szName[ ], /* [out] */ AppDomainID *pAppDomainId, /* [out] */ ModuleID *pModuleId); HRESULT ( STDMETHODCALLTYPE *SetFunctionReJIT )( ICorProfilerInfo3 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ForceGC )( ICorProfilerInfo3 * This); HRESULT ( STDMETHODCALLTYPE *SetILInstrumentedCodeMap )( ICorProfilerInfo3 * This, /* [in] */ FunctionID functionId, /* [in] */ BOOL fStartJit, /* [in] */ ULONG cILMapEntries, /* [size_is][in] */ COR_IL_MAP rgILMapEntries[ ]); HRESULT ( STDMETHODCALLTYPE *GetInprocInspectionInterface )( ICorProfilerInfo3 * This, /* [out] */ IUnknown **ppicd); HRESULT ( STDMETHODCALLTYPE *GetInprocInspectionIThisThread )( ICorProfilerInfo3 * This, /* [out] */ IUnknown **ppicd); HRESULT ( STDMETHODCALLTYPE *GetThreadContext )( ICorProfilerInfo3 * This, /* [in] */ ThreadID threadId, /* [out] */ ContextID *pContextId); HRESULT ( STDMETHODCALLTYPE *BeginInprocDebugging )( ICorProfilerInfo3 * This, /* [in] */ BOOL fThisThreadOnly, /* [out] */ DWORD *pdwProfilerContext); HRESULT ( STDMETHODCALLTYPE *EndInprocDebugging )( ICorProfilerInfo3 * This, /* [in] */ DWORD dwProfilerContext); HRESULT ( STDMETHODCALLTYPE *GetILToNativeMapping )( ICorProfilerInfo3 * This, /* [in] */ FunctionID functionId, /* [in] */ ULONG32 cMap, /* [out] */ ULONG32 *pcMap, /* [length_is][size_is][out] */ COR_DEBUG_IL_TO_NATIVE_MAP map[ ]); HRESULT ( STDMETHODCALLTYPE *DoStackSnapshot )( ICorProfilerInfo3 * This, /* [in] */ ThreadID thread, /* [in] */ StackSnapshotCallback *callback, /* [in] */ ULONG32 infoFlags, /* [in] */ void *clientData, /* [size_is][in] */ BYTE context[ ], /* [in] */ ULONG32 contextSize); HRESULT ( STDMETHODCALLTYPE *SetEnterLeaveFunctionHooks2 )( ICorProfilerInfo3 * This, /* [in] */ FunctionEnter2 *pFuncEnter, /* [in] */ FunctionLeave2 *pFuncLeave, /* [in] */ FunctionTailcall2 *pFuncTailcall); HRESULT ( STDMETHODCALLTYPE *GetFunctionInfo2 )( ICorProfilerInfo3 * This, /* [in] */ FunctionID funcId, /* [in] */ COR_PRF_FRAME_INFO frameInfo, /* [out] */ ClassID *pClassId, /* [out] */ ModuleID *pModuleId, /* [out] */ mdToken *pToken, /* [in] */ ULONG32 cTypeArgs, /* [out] */ ULONG32 *pcTypeArgs, /* [out] */ ClassID typeArgs[ ]); HRESULT ( STDMETHODCALLTYPE *GetStringLayout )( ICorProfilerInfo3 * This, /* [out] */ ULONG *pBufferLengthOffset, /* [out] */ ULONG *pStringLengthOffset, /* [out] */ ULONG *pBufferOffset); HRESULT ( STDMETHODCALLTYPE *GetClassLayout )( ICorProfilerInfo3 * This, /* [in] */ ClassID classID, /* [out][in] */ COR_FIELD_OFFSET rFieldOffset[ ], /* [in] */ ULONG cFieldOffset, /* [out] */ ULONG *pcFieldOffset, /* [out] */ ULONG *pulClassSize); HRESULT ( STDMETHODCALLTYPE *GetClassIDInfo2 )( ICorProfilerInfo3 * This, /* [in] */ ClassID classId, /* [out] */ ModuleID *pModuleId, /* [out] */ mdTypeDef *pTypeDefToken, /* [out] */ ClassID *pParentClassId, /* [in] */ ULONG32 cNumTypeArgs, /* [out] */ ULONG32 *pcNumTypeArgs, /* [out] */ ClassID typeArgs[ ]); HRESULT ( STDMETHODCALLTYPE *GetCodeInfo2 )( ICorProfilerInfo3 * This, /* [in] */ FunctionID functionID, /* [in] */ ULONG32 cCodeInfos, /* [out] */ ULONG32 *pcCodeInfos, /* [length_is][size_is][out] */ COR_PRF_CODE_INFO codeInfos[ ]); HRESULT ( STDMETHODCALLTYPE *GetClassFromTokenAndTypeArgs )( ICorProfilerInfo3 * This, /* [in] */ ModuleID moduleID, /* [in] */ mdTypeDef typeDef, /* [in] */ ULONG32 cTypeArgs, /* [size_is][in] */ ClassID typeArgs[ ], /* [out] */ ClassID *pClassID); HRESULT ( STDMETHODCALLTYPE *GetFunctionFromTokenAndTypeArgs )( ICorProfilerInfo3 * This, /* [in] */ ModuleID moduleID, /* [in] */ mdMethodDef funcDef, /* [in] */ ClassID classId, /* [in] */ ULONG32 cTypeArgs, /* [size_is][in] */ ClassID typeArgs[ ], /* [out] */ FunctionID *pFunctionID); HRESULT ( STDMETHODCALLTYPE *EnumModuleFrozenObjects )( ICorProfilerInfo3 * This, /* [in] */ ModuleID moduleID, /* [out] */ ICorProfilerObjectEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *GetArrayObjectInfo )( ICorProfilerInfo3 * This, /* [in] */ ObjectID objectId, /* [in] */ ULONG32 cDimensions, /* [size_is][out] */ ULONG32 pDimensionSizes[ ], /* [size_is][out] */ int pDimensionLowerBounds[ ], /* [out] */ BYTE **ppData); HRESULT ( STDMETHODCALLTYPE *GetBoxClassLayout )( ICorProfilerInfo3 * This, /* [in] */ ClassID classId, /* [out] */ ULONG32 *pBufferOffset); HRESULT ( STDMETHODCALLTYPE *GetThreadAppDomain )( ICorProfilerInfo3 * This, /* [in] */ ThreadID threadId, /* [out] */ AppDomainID *pAppDomainId); HRESULT ( STDMETHODCALLTYPE *GetRVAStaticAddress )( ICorProfilerInfo3 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [out] */ void **ppAddress); HRESULT ( STDMETHODCALLTYPE *GetAppDomainStaticAddress )( ICorProfilerInfo3 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [in] */ AppDomainID appDomainId, /* [out] */ void **ppAddress); HRESULT ( STDMETHODCALLTYPE *GetThreadStaticAddress )( ICorProfilerInfo3 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [in] */ ThreadID threadId, /* [out] */ void **ppAddress); HRESULT ( STDMETHODCALLTYPE *GetContextStaticAddress )( ICorProfilerInfo3 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [in] */ ContextID contextId, /* [out] */ void **ppAddress); HRESULT ( STDMETHODCALLTYPE *GetStaticFieldInfo )( ICorProfilerInfo3 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [out] */ COR_PRF_STATIC_TYPE *pFieldInfo); HRESULT ( STDMETHODCALLTYPE *GetGenerationBounds )( ICorProfilerInfo3 * This, /* [in] */ ULONG cObjectRanges, /* [out] */ ULONG *pcObjectRanges, /* [length_is][size_is][out] */ COR_PRF_GC_GENERATION_RANGE ranges[ ]); HRESULT ( STDMETHODCALLTYPE *GetObjectGeneration )( ICorProfilerInfo3 * This, /* [in] */ ObjectID objectId, /* [out] */ COR_PRF_GC_GENERATION_RANGE *range); HRESULT ( STDMETHODCALLTYPE *GetNotifiedExceptionClauseInfo )( ICorProfilerInfo3 * This, /* [out] */ COR_PRF_EX_CLAUSE_INFO *pinfo); HRESULT ( STDMETHODCALLTYPE *EnumJITedFunctions )( ICorProfilerInfo3 * This, /* [out] */ ICorProfilerFunctionEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *RequestProfilerDetach )( ICorProfilerInfo3 * This, /* [in] */ DWORD dwExpectedCompletionMilliseconds); HRESULT ( STDMETHODCALLTYPE *SetFunctionIDMapper2 )( ICorProfilerInfo3 * This, /* [in] */ FunctionIDMapper2 *pFunc, /* [in] */ void *clientData); HRESULT ( STDMETHODCALLTYPE *GetStringLayout2 )( ICorProfilerInfo3 * This, /* [out] */ ULONG *pStringLengthOffset, /* [out] */ ULONG *pBufferOffset); HRESULT ( STDMETHODCALLTYPE *SetEnterLeaveFunctionHooks3 )( ICorProfilerInfo3 * This, /* [in] */ FunctionEnter3 *pFuncEnter3, /* [in] */ FunctionLeave3 *pFuncLeave3, /* [in] */ FunctionTailcall3 *pFuncTailcall3); HRESULT ( STDMETHODCALLTYPE *SetEnterLeaveFunctionHooks3WithInfo )( ICorProfilerInfo3 * This, /* [in] */ FunctionEnter3WithInfo *pFuncEnter3WithInfo, /* [in] */ FunctionLeave3WithInfo *pFuncLeave3WithInfo, /* [in] */ FunctionTailcall3WithInfo *pFuncTailcall3WithInfo); HRESULT ( STDMETHODCALLTYPE *GetFunctionEnter3Info )( ICorProfilerInfo3 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_ELT_INFO eltInfo, /* [out] */ COR_PRF_FRAME_INFO *pFrameInfo, /* [out][in] */ ULONG *pcbArgumentInfo, /* [size_is][out] */ COR_PRF_FUNCTION_ARGUMENT_INFO *pArgumentInfo); HRESULT ( STDMETHODCALLTYPE *GetFunctionLeave3Info )( ICorProfilerInfo3 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_ELT_INFO eltInfo, /* [out] */ COR_PRF_FRAME_INFO *pFrameInfo, /* [out] */ COR_PRF_FUNCTION_ARGUMENT_RANGE *pRetvalRange); HRESULT ( STDMETHODCALLTYPE *GetFunctionTailcall3Info )( ICorProfilerInfo3 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_ELT_INFO eltInfo, /* [out] */ COR_PRF_FRAME_INFO *pFrameInfo); HRESULT ( STDMETHODCALLTYPE *EnumModules )( ICorProfilerInfo3 * This, /* [out] */ ICorProfilerModuleEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *GetRuntimeInformation )( ICorProfilerInfo3 * This, /* [out] */ USHORT *pClrInstanceId, /* [out] */ COR_PRF_RUNTIME_TYPE *pRuntimeType, /* [out] */ USHORT *pMajorVersion, /* [out] */ USHORT *pMinorVersion, /* [out] */ USHORT *pBuildNumber, /* [out] */ USHORT *pQFEVersion, /* [in] */ ULONG cchVersionString, /* [out] */ ULONG *pcchVersionString, /* [annotation][out] */ _Out_writes_to_(cchVersionString, *pcchVersionString) WCHAR szVersionString[ ]); HRESULT ( STDMETHODCALLTYPE *GetThreadStaticAddress2 )( ICorProfilerInfo3 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [in] */ AppDomainID appDomainId, /* [in] */ ThreadID threadId, /* [out] */ void **ppAddress); HRESULT ( STDMETHODCALLTYPE *GetAppDomainsContainingModule )( ICorProfilerInfo3 * This, /* [in] */ ModuleID moduleId, /* [in] */ ULONG32 cAppDomainIds, /* [out] */ ULONG32 *pcAppDomainIds, /* [length_is][size_is][out] */ AppDomainID appDomainIds[ ]); HRESULT ( STDMETHODCALLTYPE *GetModuleInfo2 )( ICorProfilerInfo3 * This, /* [in] */ ModuleID moduleId, /* [out] */ LPCBYTE *ppBaseLoadAddress, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [annotation][out] */ _Out_writes_to_(cchName, *pcchName) WCHAR szName[ ], /* [out] */ AssemblyID *pAssemblyId, /* [out] */ DWORD *pdwModuleFlags); END_INTERFACE } ICorProfilerInfo3Vtbl; interface ICorProfilerInfo3 { CONST_VTBL struct ICorProfilerInfo3Vtbl *lpVtbl; }; #ifdef COBJMACROS #define ICorProfilerInfo3_QueryInterface(This,riid,ppvObject) \ ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) ) #define ICorProfilerInfo3_AddRef(This) \ ( (This)->lpVtbl -> AddRef(This) ) #define ICorProfilerInfo3_Release(This) \ ( (This)->lpVtbl -> Release(This) ) #define ICorProfilerInfo3_GetClassFromObject(This,objectId,pClassId) \ ( (This)->lpVtbl -> GetClassFromObject(This,objectId,pClassId) ) #define ICorProfilerInfo3_GetClassFromToken(This,moduleId,typeDef,pClassId) \ ( (This)->lpVtbl -> GetClassFromToken(This,moduleId,typeDef,pClassId) ) #define ICorProfilerInfo3_GetCodeInfo(This,functionId,pStart,pcSize) \ ( (This)->lpVtbl -> GetCodeInfo(This,functionId,pStart,pcSize) ) #define ICorProfilerInfo3_GetEventMask(This,pdwEvents) \ ( (This)->lpVtbl -> GetEventMask(This,pdwEvents) ) #define ICorProfilerInfo3_GetFunctionFromIP(This,ip,pFunctionId) \ ( (This)->lpVtbl -> GetFunctionFromIP(This,ip,pFunctionId) ) #define ICorProfilerInfo3_GetFunctionFromToken(This,moduleId,token,pFunctionId) \ ( (This)->lpVtbl -> GetFunctionFromToken(This,moduleId,token,pFunctionId) ) #define ICorProfilerInfo3_GetHandleFromThread(This,threadId,phThread) \ ( (This)->lpVtbl -> GetHandleFromThread(This,threadId,phThread) ) #define ICorProfilerInfo3_GetObjectSize(This,objectId,pcSize) \ ( (This)->lpVtbl -> GetObjectSize(This,objectId,pcSize) ) #define ICorProfilerInfo3_IsArrayClass(This,classId,pBaseElemType,pBaseClassId,pcRank) \ ( (This)->lpVtbl -> IsArrayClass(This,classId,pBaseElemType,pBaseClassId,pcRank) ) #define ICorProfilerInfo3_GetThreadInfo(This,threadId,pdwWin32ThreadId) \ ( (This)->lpVtbl -> GetThreadInfo(This,threadId,pdwWin32ThreadId) ) #define ICorProfilerInfo3_GetCurrentThreadID(This,pThreadId) \ ( (This)->lpVtbl -> GetCurrentThreadID(This,pThreadId) ) #define ICorProfilerInfo3_GetClassIDInfo(This,classId,pModuleId,pTypeDefToken) \ ( (This)->lpVtbl -> GetClassIDInfo(This,classId,pModuleId,pTypeDefToken) ) #define ICorProfilerInfo3_GetFunctionInfo(This,functionId,pClassId,pModuleId,pToken) \ ( (This)->lpVtbl -> GetFunctionInfo(This,functionId,pClassId,pModuleId,pToken) ) #define ICorProfilerInfo3_SetEventMask(This,dwEvents) \ ( (This)->lpVtbl -> SetEventMask(This,dwEvents) ) #define ICorProfilerInfo3_SetEnterLeaveFunctionHooks(This,pFuncEnter,pFuncLeave,pFuncTailcall) \ ( (This)->lpVtbl -> SetEnterLeaveFunctionHooks(This,pFuncEnter,pFuncLeave,pFuncTailcall) ) #define ICorProfilerInfo3_SetFunctionIDMapper(This,pFunc) \ ( (This)->lpVtbl -> SetFunctionIDMapper(This,pFunc) ) #define ICorProfilerInfo3_GetTokenAndMetaDataFromFunction(This,functionId,riid,ppImport,pToken) \ ( (This)->lpVtbl -> GetTokenAndMetaDataFromFunction(This,functionId,riid,ppImport,pToken) ) #define ICorProfilerInfo3_GetModuleInfo(This,moduleId,ppBaseLoadAddress,cchName,pcchName,szName,pAssemblyId) \ ( (This)->lpVtbl -> GetModuleInfo(This,moduleId,ppBaseLoadAddress,cchName,pcchName,szName,pAssemblyId) ) #define ICorProfilerInfo3_GetModuleMetaData(This,moduleId,dwOpenFlags,riid,ppOut) \ ( (This)->lpVtbl -> GetModuleMetaData(This,moduleId,dwOpenFlags,riid,ppOut) ) #define ICorProfilerInfo3_GetILFunctionBody(This,moduleId,methodId,ppMethodHeader,pcbMethodSize) \ ( (This)->lpVtbl -> GetILFunctionBody(This,moduleId,methodId,ppMethodHeader,pcbMethodSize) ) #define ICorProfilerInfo3_GetILFunctionBodyAllocator(This,moduleId,ppMalloc) \ ( (This)->lpVtbl -> GetILFunctionBodyAllocator(This,moduleId,ppMalloc) ) #define ICorProfilerInfo3_SetILFunctionBody(This,moduleId,methodid,pbNewILMethodHeader) \ ( (This)->lpVtbl -> SetILFunctionBody(This,moduleId,methodid,pbNewILMethodHeader) ) #define ICorProfilerInfo3_GetAppDomainInfo(This,appDomainId,cchName,pcchName,szName,pProcessId) \ ( (This)->lpVtbl -> GetAppDomainInfo(This,appDomainId,cchName,pcchName,szName,pProcessId) ) #define ICorProfilerInfo3_GetAssemblyInfo(This,assemblyId,cchName,pcchName,szName,pAppDomainId,pModuleId) \ ( (This)->lpVtbl -> GetAssemblyInfo(This,assemblyId,cchName,pcchName,szName,pAppDomainId,pModuleId) ) #define ICorProfilerInfo3_SetFunctionReJIT(This,functionId) \ ( (This)->lpVtbl -> SetFunctionReJIT(This,functionId) ) #define ICorProfilerInfo3_ForceGC(This) \ ( (This)->lpVtbl -> ForceGC(This) ) #define ICorProfilerInfo3_SetILInstrumentedCodeMap(This,functionId,fStartJit,cILMapEntries,rgILMapEntries) \ ( (This)->lpVtbl -> SetILInstrumentedCodeMap(This,functionId,fStartJit,cILMapEntries,rgILMapEntries) ) #define ICorProfilerInfo3_GetInprocInspectionInterface(This,ppicd) \ ( (This)->lpVtbl -> GetInprocInspectionInterface(This,ppicd) ) #define ICorProfilerInfo3_GetInprocInspectionIThisThread(This,ppicd) \ ( (This)->lpVtbl -> GetInprocInspectionIThisThread(This,ppicd) ) #define ICorProfilerInfo3_GetThreadContext(This,threadId,pContextId) \ ( (This)->lpVtbl -> GetThreadContext(This,threadId,pContextId) ) #define ICorProfilerInfo3_BeginInprocDebugging(This,fThisThreadOnly,pdwProfilerContext) \ ( (This)->lpVtbl -> BeginInprocDebugging(This,fThisThreadOnly,pdwProfilerContext) ) #define ICorProfilerInfo3_EndInprocDebugging(This,dwProfilerContext) \ ( (This)->lpVtbl -> EndInprocDebugging(This,dwProfilerContext) ) #define ICorProfilerInfo3_GetILToNativeMapping(This,functionId,cMap,pcMap,map) \ ( (This)->lpVtbl -> GetILToNativeMapping(This,functionId,cMap,pcMap,map) ) #define ICorProfilerInfo3_DoStackSnapshot(This,thread,callback,infoFlags,clientData,context,contextSize) \ ( (This)->lpVtbl -> DoStackSnapshot(This,thread,callback,infoFlags,clientData,context,contextSize) ) #define ICorProfilerInfo3_SetEnterLeaveFunctionHooks2(This,pFuncEnter,pFuncLeave,pFuncTailcall) \ ( (This)->lpVtbl -> SetEnterLeaveFunctionHooks2(This,pFuncEnter,pFuncLeave,pFuncTailcall) ) #define ICorProfilerInfo3_GetFunctionInfo2(This,funcId,frameInfo,pClassId,pModuleId,pToken,cTypeArgs,pcTypeArgs,typeArgs) \ ( (This)->lpVtbl -> GetFunctionInfo2(This,funcId,frameInfo,pClassId,pModuleId,pToken,cTypeArgs,pcTypeArgs,typeArgs) ) #define ICorProfilerInfo3_GetStringLayout(This,pBufferLengthOffset,pStringLengthOffset,pBufferOffset) \ ( (This)->lpVtbl -> GetStringLayout(This,pBufferLengthOffset,pStringLengthOffset,pBufferOffset) ) #define ICorProfilerInfo3_GetClassLayout(This,classID,rFieldOffset,cFieldOffset,pcFieldOffset,pulClassSize) \ ( (This)->lpVtbl -> GetClassLayout(This,classID,rFieldOffset,cFieldOffset,pcFieldOffset,pulClassSize) ) #define ICorProfilerInfo3_GetClassIDInfo2(This,classId,pModuleId,pTypeDefToken,pParentClassId,cNumTypeArgs,pcNumTypeArgs,typeArgs) \ ( (This)->lpVtbl -> GetClassIDInfo2(This,classId,pModuleId,pTypeDefToken,pParentClassId,cNumTypeArgs,pcNumTypeArgs,typeArgs) ) #define ICorProfilerInfo3_GetCodeInfo2(This,functionID,cCodeInfos,pcCodeInfos,codeInfos) \ ( (This)->lpVtbl -> GetCodeInfo2(This,functionID,cCodeInfos,pcCodeInfos,codeInfos) ) #define ICorProfilerInfo3_GetClassFromTokenAndTypeArgs(This,moduleID,typeDef,cTypeArgs,typeArgs,pClassID) \ ( (This)->lpVtbl -> GetClassFromTokenAndTypeArgs(This,moduleID,typeDef,cTypeArgs,typeArgs,pClassID) ) #define ICorProfilerInfo3_GetFunctionFromTokenAndTypeArgs(This,moduleID,funcDef,classId,cTypeArgs,typeArgs,pFunctionID) \ ( (This)->lpVtbl -> GetFunctionFromTokenAndTypeArgs(This,moduleID,funcDef,classId,cTypeArgs,typeArgs,pFunctionID) ) #define ICorProfilerInfo3_EnumModuleFrozenObjects(This,moduleID,ppEnum) \ ( (This)->lpVtbl -> EnumModuleFrozenObjects(This,moduleID,ppEnum) ) #define ICorProfilerInfo3_GetArrayObjectInfo(This,objectId,cDimensions,pDimensionSizes,pDimensionLowerBounds,ppData) \ ( (This)->lpVtbl -> GetArrayObjectInfo(This,objectId,cDimensions,pDimensionSizes,pDimensionLowerBounds,ppData) ) #define ICorProfilerInfo3_GetBoxClassLayout(This,classId,pBufferOffset) \ ( (This)->lpVtbl -> GetBoxClassLayout(This,classId,pBufferOffset) ) #define ICorProfilerInfo3_GetThreadAppDomain(This,threadId,pAppDomainId) \ ( (This)->lpVtbl -> GetThreadAppDomain(This,threadId,pAppDomainId) ) #define ICorProfilerInfo3_GetRVAStaticAddress(This,classId,fieldToken,ppAddress) \ ( (This)->lpVtbl -> GetRVAStaticAddress(This,classId,fieldToken,ppAddress) ) #define ICorProfilerInfo3_GetAppDomainStaticAddress(This,classId,fieldToken,appDomainId,ppAddress) \ ( (This)->lpVtbl -> GetAppDomainStaticAddress(This,classId,fieldToken,appDomainId,ppAddress) ) #define ICorProfilerInfo3_GetThreadStaticAddress(This,classId,fieldToken,threadId,ppAddress) \ ( (This)->lpVtbl -> GetThreadStaticAddress(This,classId,fieldToken,threadId,ppAddress) ) #define ICorProfilerInfo3_GetContextStaticAddress(This,classId,fieldToken,contextId,ppAddress) \ ( (This)->lpVtbl -> GetContextStaticAddress(This,classId,fieldToken,contextId,ppAddress) ) #define ICorProfilerInfo3_GetStaticFieldInfo(This,classId,fieldToken,pFieldInfo) \ ( (This)->lpVtbl -> GetStaticFieldInfo(This,classId,fieldToken,pFieldInfo) ) #define ICorProfilerInfo3_GetGenerationBounds(This,cObjectRanges,pcObjectRanges,ranges) \ ( (This)->lpVtbl -> GetGenerationBounds(This,cObjectRanges,pcObjectRanges,ranges) ) #define ICorProfilerInfo3_GetObjectGeneration(This,objectId,range) \ ( (This)->lpVtbl -> GetObjectGeneration(This,objectId,range) ) #define ICorProfilerInfo3_GetNotifiedExceptionClauseInfo(This,pinfo) \ ( (This)->lpVtbl -> GetNotifiedExceptionClauseInfo(This,pinfo) ) #define ICorProfilerInfo3_EnumJITedFunctions(This,ppEnum) \ ( (This)->lpVtbl -> EnumJITedFunctions(This,ppEnum) ) #define ICorProfilerInfo3_RequestProfilerDetach(This,dwExpectedCompletionMilliseconds) \ ( (This)->lpVtbl -> RequestProfilerDetach(This,dwExpectedCompletionMilliseconds) ) #define ICorProfilerInfo3_SetFunctionIDMapper2(This,pFunc,clientData) \ ( (This)->lpVtbl -> SetFunctionIDMapper2(This,pFunc,clientData) ) #define ICorProfilerInfo3_GetStringLayout2(This,pStringLengthOffset,pBufferOffset) \ ( (This)->lpVtbl -> GetStringLayout2(This,pStringLengthOffset,pBufferOffset) ) #define ICorProfilerInfo3_SetEnterLeaveFunctionHooks3(This,pFuncEnter3,pFuncLeave3,pFuncTailcall3) \ ( (This)->lpVtbl -> SetEnterLeaveFunctionHooks3(This,pFuncEnter3,pFuncLeave3,pFuncTailcall3) ) #define ICorProfilerInfo3_SetEnterLeaveFunctionHooks3WithInfo(This,pFuncEnter3WithInfo,pFuncLeave3WithInfo,pFuncTailcall3WithInfo) \ ( (This)->lpVtbl -> SetEnterLeaveFunctionHooks3WithInfo(This,pFuncEnter3WithInfo,pFuncLeave3WithInfo,pFuncTailcall3WithInfo) ) #define ICorProfilerInfo3_GetFunctionEnter3Info(This,functionId,eltInfo,pFrameInfo,pcbArgumentInfo,pArgumentInfo) \ ( (This)->lpVtbl -> GetFunctionEnter3Info(This,functionId,eltInfo,pFrameInfo,pcbArgumentInfo,pArgumentInfo) ) #define ICorProfilerInfo3_GetFunctionLeave3Info(This,functionId,eltInfo,pFrameInfo,pRetvalRange) \ ( (This)->lpVtbl -> GetFunctionLeave3Info(This,functionId,eltInfo,pFrameInfo,pRetvalRange) ) #define ICorProfilerInfo3_GetFunctionTailcall3Info(This,functionId,eltInfo,pFrameInfo) \ ( (This)->lpVtbl -> GetFunctionTailcall3Info(This,functionId,eltInfo,pFrameInfo) ) #define ICorProfilerInfo3_EnumModules(This,ppEnum) \ ( (This)->lpVtbl -> EnumModules(This,ppEnum) ) #define ICorProfilerInfo3_GetRuntimeInformation(This,pClrInstanceId,pRuntimeType,pMajorVersion,pMinorVersion,pBuildNumber,pQFEVersion,cchVersionString,pcchVersionString,szVersionString) \ ( (This)->lpVtbl -> GetRuntimeInformation(This,pClrInstanceId,pRuntimeType,pMajorVersion,pMinorVersion,pBuildNumber,pQFEVersion,cchVersionString,pcchVersionString,szVersionString) ) #define ICorProfilerInfo3_GetThreadStaticAddress2(This,classId,fieldToken,appDomainId,threadId,ppAddress) \ ( (This)->lpVtbl -> GetThreadStaticAddress2(This,classId,fieldToken,appDomainId,threadId,ppAddress) ) #define ICorProfilerInfo3_GetAppDomainsContainingModule(This,moduleId,cAppDomainIds,pcAppDomainIds,appDomainIds) \ ( (This)->lpVtbl -> GetAppDomainsContainingModule(This,moduleId,cAppDomainIds,pcAppDomainIds,appDomainIds) ) #define ICorProfilerInfo3_GetModuleInfo2(This,moduleId,ppBaseLoadAddress,cchName,pcchName,szName,pAssemblyId,pdwModuleFlags) \ ( (This)->lpVtbl -> GetModuleInfo2(This,moduleId,ppBaseLoadAddress,cchName,pcchName,szName,pAssemblyId,pdwModuleFlags) ) #endif /* COBJMACROS */ #endif /* C style interface */ #endif /* __ICorProfilerInfo3_INTERFACE_DEFINED__ */ #ifndef __ICorProfilerObjectEnum_INTERFACE_DEFINED__ #define __ICorProfilerObjectEnum_INTERFACE_DEFINED__ /* interface ICorProfilerObjectEnum */ /* [local][unique][uuid][object] */ EXTERN_C const IID IID_ICorProfilerObjectEnum; #if defined(__cplusplus) && !defined(CINTERFACE) MIDL_INTERFACE("2C6269BD-2D13-4321-AE12-6686365FD6AF") ICorProfilerObjectEnum : public IUnknown { public: virtual HRESULT STDMETHODCALLTYPE Skip( /* [in] */ ULONG celt) = 0; virtual HRESULT STDMETHODCALLTYPE Reset( void) = 0; virtual HRESULT STDMETHODCALLTYPE Clone( /* [out] */ ICorProfilerObjectEnum **ppEnum) = 0; virtual HRESULT STDMETHODCALLTYPE GetCount( /* [out] */ ULONG *pcelt) = 0; virtual HRESULT STDMETHODCALLTYPE Next( /* [in] */ ULONG celt, /* [length_is][size_is][out] */ ObjectID objects[ ], /* [out] */ ULONG *pceltFetched) = 0; }; #else /* C style interface */ typedef struct ICorProfilerObjectEnumVtbl { BEGIN_INTERFACE HRESULT ( STDMETHODCALLTYPE *QueryInterface )( ICorProfilerObjectEnum * This, /* [in] */ REFIID riid, /* [annotation][iid_is][out] */ _COM_Outptr_ void **ppvObject); ULONG ( STDMETHODCALLTYPE *AddRef )( ICorProfilerObjectEnum * This); ULONG ( STDMETHODCALLTYPE *Release )( ICorProfilerObjectEnum * This); HRESULT ( STDMETHODCALLTYPE *Skip )( ICorProfilerObjectEnum * This, /* [in] */ ULONG celt); HRESULT ( STDMETHODCALLTYPE *Reset )( ICorProfilerObjectEnum * This); HRESULT ( STDMETHODCALLTYPE *Clone )( ICorProfilerObjectEnum * This, /* [out] */ ICorProfilerObjectEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *GetCount )( ICorProfilerObjectEnum * This, /* [out] */ ULONG *pcelt); HRESULT ( STDMETHODCALLTYPE *Next )( ICorProfilerObjectEnum * This, /* [in] */ ULONG celt, /* [length_is][size_is][out] */ ObjectID objects[ ], /* [out] */ ULONG *pceltFetched); END_INTERFACE } ICorProfilerObjectEnumVtbl; interface ICorProfilerObjectEnum { CONST_VTBL struct ICorProfilerObjectEnumVtbl *lpVtbl; }; #ifdef COBJMACROS #define ICorProfilerObjectEnum_QueryInterface(This,riid,ppvObject) \ ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) ) #define ICorProfilerObjectEnum_AddRef(This) \ ( (This)->lpVtbl -> AddRef(This) ) #define ICorProfilerObjectEnum_Release(This) \ ( (This)->lpVtbl -> Release(This) ) #define ICorProfilerObjectEnum_Skip(This,celt) \ ( (This)->lpVtbl -> Skip(This,celt) ) #define ICorProfilerObjectEnum_Reset(This) \ ( (This)->lpVtbl -> Reset(This) ) #define ICorProfilerObjectEnum_Clone(This,ppEnum) \ ( (This)->lpVtbl -> Clone(This,ppEnum) ) #define ICorProfilerObjectEnum_GetCount(This,pcelt) \ ( (This)->lpVtbl -> GetCount(This,pcelt) ) #define ICorProfilerObjectEnum_Next(This,celt,objects,pceltFetched) \ ( (This)->lpVtbl -> Next(This,celt,objects,pceltFetched) ) #endif /* COBJMACROS */ #endif /* C style interface */ #endif /* __ICorProfilerObjectEnum_INTERFACE_DEFINED__ */ #ifndef __ICorProfilerFunctionEnum_INTERFACE_DEFINED__ #define __ICorProfilerFunctionEnum_INTERFACE_DEFINED__ /* interface ICorProfilerFunctionEnum */ /* [local][unique][uuid][object] */ EXTERN_C const IID IID_ICorProfilerFunctionEnum; #if defined(__cplusplus) && !defined(CINTERFACE) MIDL_INTERFACE("FF71301A-B994-429D-A10B-B345A65280EF") ICorProfilerFunctionEnum : public IUnknown { public: virtual HRESULT STDMETHODCALLTYPE Skip( /* [in] */ ULONG celt) = 0; virtual HRESULT STDMETHODCALLTYPE Reset( void) = 0; virtual HRESULT STDMETHODCALLTYPE Clone( /* [out] */ ICorProfilerFunctionEnum **ppEnum) = 0; virtual HRESULT STDMETHODCALLTYPE GetCount( /* [out] */ ULONG *pcelt) = 0; virtual HRESULT STDMETHODCALLTYPE Next( /* [in] */ ULONG celt, /* [length_is][size_is][out] */ COR_PRF_FUNCTION ids[ ], /* [out] */ ULONG *pceltFetched) = 0; }; #else /* C style interface */ typedef struct ICorProfilerFunctionEnumVtbl { BEGIN_INTERFACE HRESULT ( STDMETHODCALLTYPE *QueryInterface )( ICorProfilerFunctionEnum * This, /* [in] */ REFIID riid, /* [annotation][iid_is][out] */ _COM_Outptr_ void **ppvObject); ULONG ( STDMETHODCALLTYPE *AddRef )( ICorProfilerFunctionEnum * This); ULONG ( STDMETHODCALLTYPE *Release )( ICorProfilerFunctionEnum * This); HRESULT ( STDMETHODCALLTYPE *Skip )( ICorProfilerFunctionEnum * This, /* [in] */ ULONG celt); HRESULT ( STDMETHODCALLTYPE *Reset )( ICorProfilerFunctionEnum * This); HRESULT ( STDMETHODCALLTYPE *Clone )( ICorProfilerFunctionEnum * This, /* [out] */ ICorProfilerFunctionEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *GetCount )( ICorProfilerFunctionEnum * This, /* [out] */ ULONG *pcelt); HRESULT ( STDMETHODCALLTYPE *Next )( ICorProfilerFunctionEnum * This, /* [in] */ ULONG celt, /* [length_is][size_is][out] */ COR_PRF_FUNCTION ids[ ], /* [out] */ ULONG *pceltFetched); END_INTERFACE } ICorProfilerFunctionEnumVtbl; interface ICorProfilerFunctionEnum { CONST_VTBL struct ICorProfilerFunctionEnumVtbl *lpVtbl; }; #ifdef COBJMACROS #define ICorProfilerFunctionEnum_QueryInterface(This,riid,ppvObject) \ ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) ) #define ICorProfilerFunctionEnum_AddRef(This) \ ( (This)->lpVtbl -> AddRef(This) ) #define ICorProfilerFunctionEnum_Release(This) \ ( (This)->lpVtbl -> Release(This) ) #define ICorProfilerFunctionEnum_Skip(This,celt) \ ( (This)->lpVtbl -> Skip(This,celt) ) #define ICorProfilerFunctionEnum_Reset(This) \ ( (This)->lpVtbl -> Reset(This) ) #define ICorProfilerFunctionEnum_Clone(This,ppEnum) \ ( (This)->lpVtbl -> Clone(This,ppEnum) ) #define ICorProfilerFunctionEnum_GetCount(This,pcelt) \ ( (This)->lpVtbl -> GetCount(This,pcelt) ) #define ICorProfilerFunctionEnum_Next(This,celt,ids,pceltFetched) \ ( (This)->lpVtbl -> Next(This,celt,ids,pceltFetched) ) #endif /* COBJMACROS */ #endif /* C style interface */ #endif /* __ICorProfilerFunctionEnum_INTERFACE_DEFINED__ */ #ifndef __ICorProfilerModuleEnum_INTERFACE_DEFINED__ #define __ICorProfilerModuleEnum_INTERFACE_DEFINED__ /* interface ICorProfilerModuleEnum */ /* [local][unique][uuid][object] */ EXTERN_C const IID IID_ICorProfilerModuleEnum; #if defined(__cplusplus) && !defined(CINTERFACE) MIDL_INTERFACE("b0266d75-2081-4493-af7f-028ba34db891") ICorProfilerModuleEnum : public IUnknown { public: virtual HRESULT STDMETHODCALLTYPE Skip( /* [in] */ ULONG celt) = 0; virtual HRESULT STDMETHODCALLTYPE Reset( void) = 0; virtual HRESULT STDMETHODCALLTYPE Clone( /* [out] */ ICorProfilerModuleEnum **ppEnum) = 0; virtual HRESULT STDMETHODCALLTYPE GetCount( /* [out] */ ULONG *pcelt) = 0; virtual HRESULT STDMETHODCALLTYPE Next( /* [in] */ ULONG celt, /* [length_is][size_is][out] */ ModuleID ids[ ], /* [out] */ ULONG *pceltFetched) = 0; }; #else /* C style interface */ typedef struct ICorProfilerModuleEnumVtbl { BEGIN_INTERFACE HRESULT ( STDMETHODCALLTYPE *QueryInterface )( ICorProfilerModuleEnum * This, /* [in] */ REFIID riid, /* [annotation][iid_is][out] */ _COM_Outptr_ void **ppvObject); ULONG ( STDMETHODCALLTYPE *AddRef )( ICorProfilerModuleEnum * This); ULONG ( STDMETHODCALLTYPE *Release )( ICorProfilerModuleEnum * This); HRESULT ( STDMETHODCALLTYPE *Skip )( ICorProfilerModuleEnum * This, /* [in] */ ULONG celt); HRESULT ( STDMETHODCALLTYPE *Reset )( ICorProfilerModuleEnum * This); HRESULT ( STDMETHODCALLTYPE *Clone )( ICorProfilerModuleEnum * This, /* [out] */ ICorProfilerModuleEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *GetCount )( ICorProfilerModuleEnum * This, /* [out] */ ULONG *pcelt); HRESULT ( STDMETHODCALLTYPE *Next )( ICorProfilerModuleEnum * This, /* [in] */ ULONG celt, /* [length_is][size_is][out] */ ModuleID ids[ ], /* [out] */ ULONG *pceltFetched); END_INTERFACE } ICorProfilerModuleEnumVtbl; interface ICorProfilerModuleEnum { CONST_VTBL struct ICorProfilerModuleEnumVtbl *lpVtbl; }; #ifdef COBJMACROS #define ICorProfilerModuleEnum_QueryInterface(This,riid,ppvObject) \ ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) ) #define ICorProfilerModuleEnum_AddRef(This) \ ( (This)->lpVtbl -> AddRef(This) ) #define ICorProfilerModuleEnum_Release(This) \ ( (This)->lpVtbl -> Release(This) ) #define ICorProfilerModuleEnum_Skip(This,celt) \ ( (This)->lpVtbl -> Skip(This,celt) ) #define ICorProfilerModuleEnum_Reset(This) \ ( (This)->lpVtbl -> Reset(This) ) #define ICorProfilerModuleEnum_Clone(This,ppEnum) \ ( (This)->lpVtbl -> Clone(This,ppEnum) ) #define ICorProfilerModuleEnum_GetCount(This,pcelt) \ ( (This)->lpVtbl -> GetCount(This,pcelt) ) #define ICorProfilerModuleEnum_Next(This,celt,ids,pceltFetched) \ ( (This)->lpVtbl -> Next(This,celt,ids,pceltFetched) ) #endif /* COBJMACROS */ #endif /* C style interface */ #endif /* __ICorProfilerModuleEnum_INTERFACE_DEFINED__ */ #ifndef __IMethodMalloc_INTERFACE_DEFINED__ #define __IMethodMalloc_INTERFACE_DEFINED__ /* interface IMethodMalloc */ /* [local][unique][uuid][object] */ EXTERN_C const IID IID_IMethodMalloc; #if defined(__cplusplus) && !defined(CINTERFACE) MIDL_INTERFACE("A0EFB28B-6EE2-4d7b-B983-A75EF7BEEDB8") IMethodMalloc : public IUnknown { public: virtual PVOID STDMETHODCALLTYPE Alloc( /* [in] */ ULONG cb) = 0; }; #else /* C style interface */ typedef struct IMethodMallocVtbl { BEGIN_INTERFACE HRESULT ( STDMETHODCALLTYPE *QueryInterface )( IMethodMalloc * This, /* [in] */ REFIID riid, /* [annotation][iid_is][out] */ _COM_Outptr_ void **ppvObject); ULONG ( STDMETHODCALLTYPE *AddRef )( IMethodMalloc * This); ULONG ( STDMETHODCALLTYPE *Release )( IMethodMalloc * This); PVOID ( STDMETHODCALLTYPE *Alloc )( IMethodMalloc * This, /* [in] */ ULONG cb); END_INTERFACE } IMethodMallocVtbl; interface IMethodMalloc { CONST_VTBL struct IMethodMallocVtbl *lpVtbl; }; #ifdef COBJMACROS #define IMethodMalloc_QueryInterface(This,riid,ppvObject) \ ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) ) #define IMethodMalloc_AddRef(This) \ ( (This)->lpVtbl -> AddRef(This) ) #define IMethodMalloc_Release(This) \ ( (This)->lpVtbl -> Release(This) ) #define IMethodMalloc_Alloc(This,cb) \ ( (This)->lpVtbl -> Alloc(This,cb) ) #endif /* COBJMACROS */ #endif /* C style interface */ #endif /* __IMethodMalloc_INTERFACE_DEFINED__ */ #ifndef __ICorProfilerFunctionControl_INTERFACE_DEFINED__ #define __ICorProfilerFunctionControl_INTERFACE_DEFINED__ /* interface ICorProfilerFunctionControl */ /* [local][unique][uuid][object] */ EXTERN_C const IID IID_ICorProfilerFunctionControl; #if defined(__cplusplus) && !defined(CINTERFACE) MIDL_INTERFACE("F0963021-E1EA-4732-8581-E01B0BD3C0C6") ICorProfilerFunctionControl : public IUnknown { public: virtual HRESULT STDMETHODCALLTYPE SetCodegenFlags( /* [in] */ DWORD flags) = 0; virtual HRESULT STDMETHODCALLTYPE SetILFunctionBody( /* [in] */ ULONG cbNewILMethodHeader, /* [size_is][in] */ LPCBYTE pbNewILMethodHeader) = 0; virtual HRESULT STDMETHODCALLTYPE SetILInstrumentedCodeMap( /* [in] */ ULONG cILMapEntries, /* [size_is][in] */ COR_IL_MAP rgILMapEntries[ ]) = 0; }; #else /* C style interface */ typedef struct ICorProfilerFunctionControlVtbl { BEGIN_INTERFACE HRESULT ( STDMETHODCALLTYPE *QueryInterface )( ICorProfilerFunctionControl * This, /* [in] */ REFIID riid, /* [annotation][iid_is][out] */ _COM_Outptr_ void **ppvObject); ULONG ( STDMETHODCALLTYPE *AddRef )( ICorProfilerFunctionControl * This); ULONG ( STDMETHODCALLTYPE *Release )( ICorProfilerFunctionControl * This); HRESULT ( STDMETHODCALLTYPE *SetCodegenFlags )( ICorProfilerFunctionControl * This, /* [in] */ DWORD flags); HRESULT ( STDMETHODCALLTYPE *SetILFunctionBody )( ICorProfilerFunctionControl * This, /* [in] */ ULONG cbNewILMethodHeader, /* [size_is][in] */ LPCBYTE pbNewILMethodHeader); HRESULT ( STDMETHODCALLTYPE *SetILInstrumentedCodeMap )( ICorProfilerFunctionControl * This, /* [in] */ ULONG cILMapEntries, /* [size_is][in] */ COR_IL_MAP rgILMapEntries[ ]); END_INTERFACE } ICorProfilerFunctionControlVtbl; interface ICorProfilerFunctionControl { CONST_VTBL struct ICorProfilerFunctionControlVtbl *lpVtbl; }; #ifdef COBJMACROS #define ICorProfilerFunctionControl_QueryInterface(This,riid,ppvObject) \ ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) ) #define ICorProfilerFunctionControl_AddRef(This) \ ( (This)->lpVtbl -> AddRef(This) ) #define ICorProfilerFunctionControl_Release(This) \ ( (This)->lpVtbl -> Release(This) ) #define ICorProfilerFunctionControl_SetCodegenFlags(This,flags) \ ( (This)->lpVtbl -> SetCodegenFlags(This,flags) ) #define ICorProfilerFunctionControl_SetILFunctionBody(This,cbNewILMethodHeader,pbNewILMethodHeader) \ ( (This)->lpVtbl -> SetILFunctionBody(This,cbNewILMethodHeader,pbNewILMethodHeader) ) #define ICorProfilerFunctionControl_SetILInstrumentedCodeMap(This,cILMapEntries,rgILMapEntries) \ ( (This)->lpVtbl -> SetILInstrumentedCodeMap(This,cILMapEntries,rgILMapEntries) ) #endif /* COBJMACROS */ #endif /* C style interface */ #endif /* __ICorProfilerFunctionControl_INTERFACE_DEFINED__ */ #ifndef __ICorProfilerInfo4_INTERFACE_DEFINED__ #define __ICorProfilerInfo4_INTERFACE_DEFINED__ /* interface ICorProfilerInfo4 */ /* [local][unique][uuid][object] */ EXTERN_C const IID IID_ICorProfilerInfo4; #if defined(__cplusplus) && !defined(CINTERFACE) MIDL_INTERFACE("0d8fdcaa-6257-47bf-b1bf-94dac88466ee") ICorProfilerInfo4 : public ICorProfilerInfo3 { public: virtual HRESULT STDMETHODCALLTYPE EnumThreads( /* [out] */ ICorProfilerThreadEnum **ppEnum) = 0; virtual HRESULT STDMETHODCALLTYPE InitializeCurrentThread( void) = 0; virtual HRESULT STDMETHODCALLTYPE RequestReJIT( /* [in] */ ULONG cFunctions, /* [size_is][in] */ ModuleID moduleIds[ ], /* [size_is][in] */ mdMethodDef methodIds[ ]) = 0; virtual HRESULT STDMETHODCALLTYPE RequestRevert( /* [in] */ ULONG cFunctions, /* [size_is][in] */ ModuleID moduleIds[ ], /* [size_is][in] */ mdMethodDef methodIds[ ], /* [size_is][out] */ HRESULT status[ ]) = 0; virtual HRESULT STDMETHODCALLTYPE GetCodeInfo3( /* [in] */ FunctionID functionID, /* [in] */ ReJITID reJitId, /* [in] */ ULONG32 cCodeInfos, /* [out] */ ULONG32 *pcCodeInfos, /* [length_is][size_is][out] */ COR_PRF_CODE_INFO codeInfos[ ]) = 0; virtual HRESULT STDMETHODCALLTYPE GetFunctionFromIP2( /* [in] */ LPCBYTE ip, /* [out] */ FunctionID *pFunctionId, /* [out] */ ReJITID *pReJitId) = 0; virtual HRESULT STDMETHODCALLTYPE GetReJITIDs( /* [in] */ FunctionID functionId, /* [in] */ ULONG cReJitIds, /* [out] */ ULONG *pcReJitIds, /* [length_is][size_is][out] */ ReJITID reJitIds[ ]) = 0; virtual HRESULT STDMETHODCALLTYPE GetILToNativeMapping2( /* [in] */ FunctionID functionId, /* [in] */ ReJITID reJitId, /* [in] */ ULONG32 cMap, /* [out] */ ULONG32 *pcMap, /* [length_is][size_is][out] */ COR_DEBUG_IL_TO_NATIVE_MAP map[ ]) = 0; virtual HRESULT STDMETHODCALLTYPE EnumJITedFunctions2( /* [out] */ ICorProfilerFunctionEnum **ppEnum) = 0; virtual HRESULT STDMETHODCALLTYPE GetObjectSize2( /* [in] */ ObjectID objectId, /* [out] */ SIZE_T *pcSize) = 0; }; #else /* C style interface */ typedef struct ICorProfilerInfo4Vtbl { BEGIN_INTERFACE HRESULT ( STDMETHODCALLTYPE *QueryInterface )( ICorProfilerInfo4 * This, /* [in] */ REFIID riid, /* [annotation][iid_is][out] */ _COM_Outptr_ void **ppvObject); ULONG ( STDMETHODCALLTYPE *AddRef )( ICorProfilerInfo4 * This); ULONG ( STDMETHODCALLTYPE *Release )( ICorProfilerInfo4 * This); HRESULT ( STDMETHODCALLTYPE *GetClassFromObject )( ICorProfilerInfo4 * This, /* [in] */ ObjectID objectId, /* [out] */ ClassID *pClassId); HRESULT ( STDMETHODCALLTYPE *GetClassFromToken )( ICorProfilerInfo4 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdTypeDef typeDef, /* [out] */ ClassID *pClassId); HRESULT ( STDMETHODCALLTYPE *GetCodeInfo )( ICorProfilerInfo4 * This, /* [in] */ FunctionID functionId, /* [out] */ LPCBYTE *pStart, /* [out] */ ULONG *pcSize); HRESULT ( STDMETHODCALLTYPE *GetEventMask )( ICorProfilerInfo4 * This, /* [out] */ DWORD *pdwEvents); HRESULT ( STDMETHODCALLTYPE *GetFunctionFromIP )( ICorProfilerInfo4 * This, /* [in] */ LPCBYTE ip, /* [out] */ FunctionID *pFunctionId); HRESULT ( STDMETHODCALLTYPE *GetFunctionFromToken )( ICorProfilerInfo4 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdToken token, /* [out] */ FunctionID *pFunctionId); HRESULT ( STDMETHODCALLTYPE *GetHandleFromThread )( ICorProfilerInfo4 * This, /* [in] */ ThreadID threadId, /* [out] */ HANDLE *phThread); HRESULT ( STDMETHODCALLTYPE *GetObjectSize )( ICorProfilerInfo4 * This, /* [in] */ ObjectID objectId, /* [out] */ ULONG *pcSize); HRESULT ( STDMETHODCALLTYPE *IsArrayClass )( ICorProfilerInfo4 * This, /* [in] */ ClassID classId, /* [out] */ CorElementType *pBaseElemType, /* [out] */ ClassID *pBaseClassId, /* [out] */ ULONG *pcRank); HRESULT ( STDMETHODCALLTYPE *GetThreadInfo )( ICorProfilerInfo4 * This, /* [in] */ ThreadID threadId, /* [out] */ DWORD *pdwWin32ThreadId); HRESULT ( STDMETHODCALLTYPE *GetCurrentThreadID )( ICorProfilerInfo4 * This, /* [out] */ ThreadID *pThreadId); HRESULT ( STDMETHODCALLTYPE *GetClassIDInfo )( ICorProfilerInfo4 * This, /* [in] */ ClassID classId, /* [out] */ ModuleID *pModuleId, /* [out] */ mdTypeDef *pTypeDefToken); HRESULT ( STDMETHODCALLTYPE *GetFunctionInfo )( ICorProfilerInfo4 * This, /* [in] */ FunctionID functionId, /* [out] */ ClassID *pClassId, /* [out] */ ModuleID *pModuleId, /* [out] */ mdToken *pToken); HRESULT ( STDMETHODCALLTYPE *SetEventMask )( ICorProfilerInfo4 * This, /* [in] */ DWORD dwEvents); HRESULT ( STDMETHODCALLTYPE *SetEnterLeaveFunctionHooks )( ICorProfilerInfo4 * This, /* [in] */ FunctionEnter *pFuncEnter, /* [in] */ FunctionLeave *pFuncLeave, /* [in] */ FunctionTailcall *pFuncTailcall); HRESULT ( STDMETHODCALLTYPE *SetFunctionIDMapper )( ICorProfilerInfo4 * This, /* [in] */ FunctionIDMapper *pFunc); HRESULT ( STDMETHODCALLTYPE *GetTokenAndMetaDataFromFunction )( ICorProfilerInfo4 * This, /* [in] */ FunctionID functionId, /* [in] */ REFIID riid, /* [out] */ IUnknown **ppImport, /* [out] */ mdToken *pToken); HRESULT ( STDMETHODCALLTYPE *GetModuleInfo )( ICorProfilerInfo4 * This, /* [in] */ ModuleID moduleId, /* [out] */ LPCBYTE *ppBaseLoadAddress, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [annotation][out] */ _Out_writes_to_(cchName, *pcchName) WCHAR szName[ ], /* [out] */ AssemblyID *pAssemblyId); HRESULT ( STDMETHODCALLTYPE *GetModuleMetaData )( ICorProfilerInfo4 * This, /* [in] */ ModuleID moduleId, /* [in] */ DWORD dwOpenFlags, /* [in] */ REFIID riid, /* [out] */ IUnknown **ppOut); HRESULT ( STDMETHODCALLTYPE *GetILFunctionBody )( ICorProfilerInfo4 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdMethodDef methodId, /* [out] */ LPCBYTE *ppMethodHeader, /* [out] */ ULONG *pcbMethodSize); HRESULT ( STDMETHODCALLTYPE *GetILFunctionBodyAllocator )( ICorProfilerInfo4 * This, /* [in] */ ModuleID moduleId, /* [out] */ IMethodMalloc **ppMalloc); HRESULT ( STDMETHODCALLTYPE *SetILFunctionBody )( ICorProfilerInfo4 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdMethodDef methodid, /* [in] */ LPCBYTE pbNewILMethodHeader); HRESULT ( STDMETHODCALLTYPE *GetAppDomainInfo )( ICorProfilerInfo4 * This, /* [in] */ AppDomainID appDomainId, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [annotation][out] */ _Out_writes_to_(cchName, *pcchName) WCHAR szName[ ], /* [out] */ ProcessID *pProcessId); HRESULT ( STDMETHODCALLTYPE *GetAssemblyInfo )( ICorProfilerInfo4 * This, /* [in] */ AssemblyID assemblyId, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [annotation][out] */ _Out_writes_to_(cchName, *pcchName) WCHAR szName[ ], /* [out] */ AppDomainID *pAppDomainId, /* [out] */ ModuleID *pModuleId); HRESULT ( STDMETHODCALLTYPE *SetFunctionReJIT )( ICorProfilerInfo4 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ForceGC )( ICorProfilerInfo4 * This); HRESULT ( STDMETHODCALLTYPE *SetILInstrumentedCodeMap )( ICorProfilerInfo4 * This, /* [in] */ FunctionID functionId, /* [in] */ BOOL fStartJit, /* [in] */ ULONG cILMapEntries, /* [size_is][in] */ COR_IL_MAP rgILMapEntries[ ]); HRESULT ( STDMETHODCALLTYPE *GetInprocInspectionInterface )( ICorProfilerInfo4 * This, /* [out] */ IUnknown **ppicd); HRESULT ( STDMETHODCALLTYPE *GetInprocInspectionIThisThread )( ICorProfilerInfo4 * This, /* [out] */ IUnknown **ppicd); HRESULT ( STDMETHODCALLTYPE *GetThreadContext )( ICorProfilerInfo4 * This, /* [in] */ ThreadID threadId, /* [out] */ ContextID *pContextId); HRESULT ( STDMETHODCALLTYPE *BeginInprocDebugging )( ICorProfilerInfo4 * This, /* [in] */ BOOL fThisThreadOnly, /* [out] */ DWORD *pdwProfilerContext); HRESULT ( STDMETHODCALLTYPE *EndInprocDebugging )( ICorProfilerInfo4 * This, /* [in] */ DWORD dwProfilerContext); HRESULT ( STDMETHODCALLTYPE *GetILToNativeMapping )( ICorProfilerInfo4 * This, /* [in] */ FunctionID functionId, /* [in] */ ULONG32 cMap, /* [out] */ ULONG32 *pcMap, /* [length_is][size_is][out] */ COR_DEBUG_IL_TO_NATIVE_MAP map[ ]); HRESULT ( STDMETHODCALLTYPE *DoStackSnapshot )( ICorProfilerInfo4 * This, /* [in] */ ThreadID thread, /* [in] */ StackSnapshotCallback *callback, /* [in] */ ULONG32 infoFlags, /* [in] */ void *clientData, /* [size_is][in] */ BYTE context[ ], /* [in] */ ULONG32 contextSize); HRESULT ( STDMETHODCALLTYPE *SetEnterLeaveFunctionHooks2 )( ICorProfilerInfo4 * This, /* [in] */ FunctionEnter2 *pFuncEnter, /* [in] */ FunctionLeave2 *pFuncLeave, /* [in] */ FunctionTailcall2 *pFuncTailcall); HRESULT ( STDMETHODCALLTYPE *GetFunctionInfo2 )( ICorProfilerInfo4 * This, /* [in] */ FunctionID funcId, /* [in] */ COR_PRF_FRAME_INFO frameInfo, /* [out] */ ClassID *pClassId, /* [out] */ ModuleID *pModuleId, /* [out] */ mdToken *pToken, /* [in] */ ULONG32 cTypeArgs, /* [out] */ ULONG32 *pcTypeArgs, /* [out] */ ClassID typeArgs[ ]); HRESULT ( STDMETHODCALLTYPE *GetStringLayout )( ICorProfilerInfo4 * This, /* [out] */ ULONG *pBufferLengthOffset, /* [out] */ ULONG *pStringLengthOffset, /* [out] */ ULONG *pBufferOffset); HRESULT ( STDMETHODCALLTYPE *GetClassLayout )( ICorProfilerInfo4 * This, /* [in] */ ClassID classID, /* [out][in] */ COR_FIELD_OFFSET rFieldOffset[ ], /* [in] */ ULONG cFieldOffset, /* [out] */ ULONG *pcFieldOffset, /* [out] */ ULONG *pulClassSize); HRESULT ( STDMETHODCALLTYPE *GetClassIDInfo2 )( ICorProfilerInfo4 * This, /* [in] */ ClassID classId, /* [out] */ ModuleID *pModuleId, /* [out] */ mdTypeDef *pTypeDefToken, /* [out] */ ClassID *pParentClassId, /* [in] */ ULONG32 cNumTypeArgs, /* [out] */ ULONG32 *pcNumTypeArgs, /* [out] */ ClassID typeArgs[ ]); HRESULT ( STDMETHODCALLTYPE *GetCodeInfo2 )( ICorProfilerInfo4 * This, /* [in] */ FunctionID functionID, /* [in] */ ULONG32 cCodeInfos, /* [out] */ ULONG32 *pcCodeInfos, /* [length_is][size_is][out] */ COR_PRF_CODE_INFO codeInfos[ ]); HRESULT ( STDMETHODCALLTYPE *GetClassFromTokenAndTypeArgs )( ICorProfilerInfo4 * This, /* [in] */ ModuleID moduleID, /* [in] */ mdTypeDef typeDef, /* [in] */ ULONG32 cTypeArgs, /* [size_is][in] */ ClassID typeArgs[ ], /* [out] */ ClassID *pClassID); HRESULT ( STDMETHODCALLTYPE *GetFunctionFromTokenAndTypeArgs )( ICorProfilerInfo4 * This, /* [in] */ ModuleID moduleID, /* [in] */ mdMethodDef funcDef, /* [in] */ ClassID classId, /* [in] */ ULONG32 cTypeArgs, /* [size_is][in] */ ClassID typeArgs[ ], /* [out] */ FunctionID *pFunctionID); HRESULT ( STDMETHODCALLTYPE *EnumModuleFrozenObjects )( ICorProfilerInfo4 * This, /* [in] */ ModuleID moduleID, /* [out] */ ICorProfilerObjectEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *GetArrayObjectInfo )( ICorProfilerInfo4 * This, /* [in] */ ObjectID objectId, /* [in] */ ULONG32 cDimensions, /* [size_is][out] */ ULONG32 pDimensionSizes[ ], /* [size_is][out] */ int pDimensionLowerBounds[ ], /* [out] */ BYTE **ppData); HRESULT ( STDMETHODCALLTYPE *GetBoxClassLayout )( ICorProfilerInfo4 * This, /* [in] */ ClassID classId, /* [out] */ ULONG32 *pBufferOffset); HRESULT ( STDMETHODCALLTYPE *GetThreadAppDomain )( ICorProfilerInfo4 * This, /* [in] */ ThreadID threadId, /* [out] */ AppDomainID *pAppDomainId); HRESULT ( STDMETHODCALLTYPE *GetRVAStaticAddress )( ICorProfilerInfo4 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [out] */ void **ppAddress); HRESULT ( STDMETHODCALLTYPE *GetAppDomainStaticAddress )( ICorProfilerInfo4 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [in] */ AppDomainID appDomainId, /* [out] */ void **ppAddress); HRESULT ( STDMETHODCALLTYPE *GetThreadStaticAddress )( ICorProfilerInfo4 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [in] */ ThreadID threadId, /* [out] */ void **ppAddress); HRESULT ( STDMETHODCALLTYPE *GetContextStaticAddress )( ICorProfilerInfo4 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [in] */ ContextID contextId, /* [out] */ void **ppAddress); HRESULT ( STDMETHODCALLTYPE *GetStaticFieldInfo )( ICorProfilerInfo4 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [out] */ COR_PRF_STATIC_TYPE *pFieldInfo); HRESULT ( STDMETHODCALLTYPE *GetGenerationBounds )( ICorProfilerInfo4 * This, /* [in] */ ULONG cObjectRanges, /* [out] */ ULONG *pcObjectRanges, /* [length_is][size_is][out] */ COR_PRF_GC_GENERATION_RANGE ranges[ ]); HRESULT ( STDMETHODCALLTYPE *GetObjectGeneration )( ICorProfilerInfo4 * This, /* [in] */ ObjectID objectId, /* [out] */ COR_PRF_GC_GENERATION_RANGE *range); HRESULT ( STDMETHODCALLTYPE *GetNotifiedExceptionClauseInfo )( ICorProfilerInfo4 * This, /* [out] */ COR_PRF_EX_CLAUSE_INFO *pinfo); HRESULT ( STDMETHODCALLTYPE *EnumJITedFunctions )( ICorProfilerInfo4 * This, /* [out] */ ICorProfilerFunctionEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *RequestProfilerDetach )( ICorProfilerInfo4 * This, /* [in] */ DWORD dwExpectedCompletionMilliseconds); HRESULT ( STDMETHODCALLTYPE *SetFunctionIDMapper2 )( ICorProfilerInfo4 * This, /* [in] */ FunctionIDMapper2 *pFunc, /* [in] */ void *clientData); HRESULT ( STDMETHODCALLTYPE *GetStringLayout2 )( ICorProfilerInfo4 * This, /* [out] */ ULONG *pStringLengthOffset, /* [out] */ ULONG *pBufferOffset); HRESULT ( STDMETHODCALLTYPE *SetEnterLeaveFunctionHooks3 )( ICorProfilerInfo4 * This, /* [in] */ FunctionEnter3 *pFuncEnter3, /* [in] */ FunctionLeave3 *pFuncLeave3, /* [in] */ FunctionTailcall3 *pFuncTailcall3); HRESULT ( STDMETHODCALLTYPE *SetEnterLeaveFunctionHooks3WithInfo )( ICorProfilerInfo4 * This, /* [in] */ FunctionEnter3WithInfo *pFuncEnter3WithInfo, /* [in] */ FunctionLeave3WithInfo *pFuncLeave3WithInfo, /* [in] */ FunctionTailcall3WithInfo *pFuncTailcall3WithInfo); HRESULT ( STDMETHODCALLTYPE *GetFunctionEnter3Info )( ICorProfilerInfo4 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_ELT_INFO eltInfo, /* [out] */ COR_PRF_FRAME_INFO *pFrameInfo, /* [out][in] */ ULONG *pcbArgumentInfo, /* [size_is][out] */ COR_PRF_FUNCTION_ARGUMENT_INFO *pArgumentInfo); HRESULT ( STDMETHODCALLTYPE *GetFunctionLeave3Info )( ICorProfilerInfo4 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_ELT_INFO eltInfo, /* [out] */ COR_PRF_FRAME_INFO *pFrameInfo, /* [out] */ COR_PRF_FUNCTION_ARGUMENT_RANGE *pRetvalRange); HRESULT ( STDMETHODCALLTYPE *GetFunctionTailcall3Info )( ICorProfilerInfo4 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_ELT_INFO eltInfo, /* [out] */ COR_PRF_FRAME_INFO *pFrameInfo); HRESULT ( STDMETHODCALLTYPE *EnumModules )( ICorProfilerInfo4 * This, /* [out] */ ICorProfilerModuleEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *GetRuntimeInformation )( ICorProfilerInfo4 * This, /* [out] */ USHORT *pClrInstanceId, /* [out] */ COR_PRF_RUNTIME_TYPE *pRuntimeType, /* [out] */ USHORT *pMajorVersion, /* [out] */ USHORT *pMinorVersion, /* [out] */ USHORT *pBuildNumber, /* [out] */ USHORT *pQFEVersion, /* [in] */ ULONG cchVersionString, /* [out] */ ULONG *pcchVersionString, /* [annotation][out] */ _Out_writes_to_(cchVersionString, *pcchVersionString) WCHAR szVersionString[ ]); HRESULT ( STDMETHODCALLTYPE *GetThreadStaticAddress2 )( ICorProfilerInfo4 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [in] */ AppDomainID appDomainId, /* [in] */ ThreadID threadId, /* [out] */ void **ppAddress); HRESULT ( STDMETHODCALLTYPE *GetAppDomainsContainingModule )( ICorProfilerInfo4 * This, /* [in] */ ModuleID moduleId, /* [in] */ ULONG32 cAppDomainIds, /* [out] */ ULONG32 *pcAppDomainIds, /* [length_is][size_is][out] */ AppDomainID appDomainIds[ ]); HRESULT ( STDMETHODCALLTYPE *GetModuleInfo2 )( ICorProfilerInfo4 * This, /* [in] */ ModuleID moduleId, /* [out] */ LPCBYTE *ppBaseLoadAddress, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [annotation][out] */ _Out_writes_to_(cchName, *pcchName) WCHAR szName[ ], /* [out] */ AssemblyID *pAssemblyId, /* [out] */ DWORD *pdwModuleFlags); HRESULT ( STDMETHODCALLTYPE *EnumThreads )( ICorProfilerInfo4 * This, /* [out] */ ICorProfilerThreadEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *InitializeCurrentThread )( ICorProfilerInfo4 * This); HRESULT ( STDMETHODCALLTYPE *RequestReJIT )( ICorProfilerInfo4 * This, /* [in] */ ULONG cFunctions, /* [size_is][in] */ ModuleID moduleIds[ ], /* [size_is][in] */ mdMethodDef methodIds[ ]); HRESULT ( STDMETHODCALLTYPE *RequestRevert )( ICorProfilerInfo4 * This, /* [in] */ ULONG cFunctions, /* [size_is][in] */ ModuleID moduleIds[ ], /* [size_is][in] */ mdMethodDef methodIds[ ], /* [size_is][out] */ HRESULT status[ ]); HRESULT ( STDMETHODCALLTYPE *GetCodeInfo3 )( ICorProfilerInfo4 * This, /* [in] */ FunctionID functionID, /* [in] */ ReJITID reJitId, /* [in] */ ULONG32 cCodeInfos, /* [out] */ ULONG32 *pcCodeInfos, /* [length_is][size_is][out] */ COR_PRF_CODE_INFO codeInfos[ ]); HRESULT ( STDMETHODCALLTYPE *GetFunctionFromIP2 )( ICorProfilerInfo4 * This, /* [in] */ LPCBYTE ip, /* [out] */ FunctionID *pFunctionId, /* [out] */ ReJITID *pReJitId); HRESULT ( STDMETHODCALLTYPE *GetReJITIDs )( ICorProfilerInfo4 * This, /* [in] */ FunctionID functionId, /* [in] */ ULONG cReJitIds, /* [out] */ ULONG *pcReJitIds, /* [length_is][size_is][out] */ ReJITID reJitIds[ ]); HRESULT ( STDMETHODCALLTYPE *GetILToNativeMapping2 )( ICorProfilerInfo4 * This, /* [in] */ FunctionID functionId, /* [in] */ ReJITID reJitId, /* [in] */ ULONG32 cMap, /* [out] */ ULONG32 *pcMap, /* [length_is][size_is][out] */ COR_DEBUG_IL_TO_NATIVE_MAP map[ ]); HRESULT ( STDMETHODCALLTYPE *EnumJITedFunctions2 )( ICorProfilerInfo4 * This, /* [out] */ ICorProfilerFunctionEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *GetObjectSize2 )( ICorProfilerInfo4 * This, /* [in] */ ObjectID objectId, /* [out] */ SIZE_T *pcSize); END_INTERFACE } ICorProfilerInfo4Vtbl; interface ICorProfilerInfo4 { CONST_VTBL struct ICorProfilerInfo4Vtbl *lpVtbl; }; #ifdef COBJMACROS #define ICorProfilerInfo4_QueryInterface(This,riid,ppvObject) \ ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) ) #define ICorProfilerInfo4_AddRef(This) \ ( (This)->lpVtbl -> AddRef(This) ) #define ICorProfilerInfo4_Release(This) \ ( (This)->lpVtbl -> Release(This) ) #define ICorProfilerInfo4_GetClassFromObject(This,objectId,pClassId) \ ( (This)->lpVtbl -> GetClassFromObject(This,objectId,pClassId) ) #define ICorProfilerInfo4_GetClassFromToken(This,moduleId,typeDef,pClassId) \ ( (This)->lpVtbl -> GetClassFromToken(This,moduleId,typeDef,pClassId) ) #define ICorProfilerInfo4_GetCodeInfo(This,functionId,pStart,pcSize) \ ( (This)->lpVtbl -> GetCodeInfo(This,functionId,pStart,pcSize) ) #define ICorProfilerInfo4_GetEventMask(This,pdwEvents) \ ( (This)->lpVtbl -> GetEventMask(This,pdwEvents) ) #define ICorProfilerInfo4_GetFunctionFromIP(This,ip,pFunctionId) \ ( (This)->lpVtbl -> GetFunctionFromIP(This,ip,pFunctionId) ) #define ICorProfilerInfo4_GetFunctionFromToken(This,moduleId,token,pFunctionId) \ ( (This)->lpVtbl -> GetFunctionFromToken(This,moduleId,token,pFunctionId) ) #define ICorProfilerInfo4_GetHandleFromThread(This,threadId,phThread) \ ( (This)->lpVtbl -> GetHandleFromThread(This,threadId,phThread) ) #define ICorProfilerInfo4_GetObjectSize(This,objectId,pcSize) \ ( (This)->lpVtbl -> GetObjectSize(This,objectId,pcSize) ) #define ICorProfilerInfo4_IsArrayClass(This,classId,pBaseElemType,pBaseClassId,pcRank) \ ( (This)->lpVtbl -> IsArrayClass(This,classId,pBaseElemType,pBaseClassId,pcRank) ) #define ICorProfilerInfo4_GetThreadInfo(This,threadId,pdwWin32ThreadId) \ ( (This)->lpVtbl -> GetThreadInfo(This,threadId,pdwWin32ThreadId) ) #define ICorProfilerInfo4_GetCurrentThreadID(This,pThreadId) \ ( (This)->lpVtbl -> GetCurrentThreadID(This,pThreadId) ) #define ICorProfilerInfo4_GetClassIDInfo(This,classId,pModuleId,pTypeDefToken) \ ( (This)->lpVtbl -> GetClassIDInfo(This,classId,pModuleId,pTypeDefToken) ) #define ICorProfilerInfo4_GetFunctionInfo(This,functionId,pClassId,pModuleId,pToken) \ ( (This)->lpVtbl -> GetFunctionInfo(This,functionId,pClassId,pModuleId,pToken) ) #define ICorProfilerInfo4_SetEventMask(This,dwEvents) \ ( (This)->lpVtbl -> SetEventMask(This,dwEvents) ) #define ICorProfilerInfo4_SetEnterLeaveFunctionHooks(This,pFuncEnter,pFuncLeave,pFuncTailcall) \ ( (This)->lpVtbl -> SetEnterLeaveFunctionHooks(This,pFuncEnter,pFuncLeave,pFuncTailcall) ) #define ICorProfilerInfo4_SetFunctionIDMapper(This,pFunc) \ ( (This)->lpVtbl -> SetFunctionIDMapper(This,pFunc) ) #define ICorProfilerInfo4_GetTokenAndMetaDataFromFunction(This,functionId,riid,ppImport,pToken) \ ( (This)->lpVtbl -> GetTokenAndMetaDataFromFunction(This,functionId,riid,ppImport,pToken) ) #define ICorProfilerInfo4_GetModuleInfo(This,moduleId,ppBaseLoadAddress,cchName,pcchName,szName,pAssemblyId) \ ( (This)->lpVtbl -> GetModuleInfo(This,moduleId,ppBaseLoadAddress,cchName,pcchName,szName,pAssemblyId) ) #define ICorProfilerInfo4_GetModuleMetaData(This,moduleId,dwOpenFlags,riid,ppOut) \ ( (This)->lpVtbl -> GetModuleMetaData(This,moduleId,dwOpenFlags,riid,ppOut) ) #define ICorProfilerInfo4_GetILFunctionBody(This,moduleId,methodId,ppMethodHeader,pcbMethodSize) \ ( (This)->lpVtbl -> GetILFunctionBody(This,moduleId,methodId,ppMethodHeader,pcbMethodSize) ) #define ICorProfilerInfo4_GetILFunctionBodyAllocator(This,moduleId,ppMalloc) \ ( (This)->lpVtbl -> GetILFunctionBodyAllocator(This,moduleId,ppMalloc) ) #define ICorProfilerInfo4_SetILFunctionBody(This,moduleId,methodid,pbNewILMethodHeader) \ ( (This)->lpVtbl -> SetILFunctionBody(This,moduleId,methodid,pbNewILMethodHeader) ) #define ICorProfilerInfo4_GetAppDomainInfo(This,appDomainId,cchName,pcchName,szName,pProcessId) \ ( (This)->lpVtbl -> GetAppDomainInfo(This,appDomainId,cchName,pcchName,szName,pProcessId) ) #define ICorProfilerInfo4_GetAssemblyInfo(This,assemblyId,cchName,pcchName,szName,pAppDomainId,pModuleId) \ ( (This)->lpVtbl -> GetAssemblyInfo(This,assemblyId,cchName,pcchName,szName,pAppDomainId,pModuleId) ) #define ICorProfilerInfo4_SetFunctionReJIT(This,functionId) \ ( (This)->lpVtbl -> SetFunctionReJIT(This,functionId) ) #define ICorProfilerInfo4_ForceGC(This) \ ( (This)->lpVtbl -> ForceGC(This) ) #define ICorProfilerInfo4_SetILInstrumentedCodeMap(This,functionId,fStartJit,cILMapEntries,rgILMapEntries) \ ( (This)->lpVtbl -> SetILInstrumentedCodeMap(This,functionId,fStartJit,cILMapEntries,rgILMapEntries) ) #define ICorProfilerInfo4_GetInprocInspectionInterface(This,ppicd) \ ( (This)->lpVtbl -> GetInprocInspectionInterface(This,ppicd) ) #define ICorProfilerInfo4_GetInprocInspectionIThisThread(This,ppicd) \ ( (This)->lpVtbl -> GetInprocInspectionIThisThread(This,ppicd) ) #define ICorProfilerInfo4_GetThreadContext(This,threadId,pContextId) \ ( (This)->lpVtbl -> GetThreadContext(This,threadId,pContextId) ) #define ICorProfilerInfo4_BeginInprocDebugging(This,fThisThreadOnly,pdwProfilerContext) \ ( (This)->lpVtbl -> BeginInprocDebugging(This,fThisThreadOnly,pdwProfilerContext) ) #define ICorProfilerInfo4_EndInprocDebugging(This,dwProfilerContext) \ ( (This)->lpVtbl -> EndInprocDebugging(This,dwProfilerContext) ) #define ICorProfilerInfo4_GetILToNativeMapping(This,functionId,cMap,pcMap,map) \ ( (This)->lpVtbl -> GetILToNativeMapping(This,functionId,cMap,pcMap,map) ) #define ICorProfilerInfo4_DoStackSnapshot(This,thread,callback,infoFlags,clientData,context,contextSize) \ ( (This)->lpVtbl -> DoStackSnapshot(This,thread,callback,infoFlags,clientData,context,contextSize) ) #define ICorProfilerInfo4_SetEnterLeaveFunctionHooks2(This,pFuncEnter,pFuncLeave,pFuncTailcall) \ ( (This)->lpVtbl -> SetEnterLeaveFunctionHooks2(This,pFuncEnter,pFuncLeave,pFuncTailcall) ) #define ICorProfilerInfo4_GetFunctionInfo2(This,funcId,frameInfo,pClassId,pModuleId,pToken,cTypeArgs,pcTypeArgs,typeArgs) \ ( (This)->lpVtbl -> GetFunctionInfo2(This,funcId,frameInfo,pClassId,pModuleId,pToken,cTypeArgs,pcTypeArgs,typeArgs) ) #define ICorProfilerInfo4_GetStringLayout(This,pBufferLengthOffset,pStringLengthOffset,pBufferOffset) \ ( (This)->lpVtbl -> GetStringLayout(This,pBufferLengthOffset,pStringLengthOffset,pBufferOffset) ) #define ICorProfilerInfo4_GetClassLayout(This,classID,rFieldOffset,cFieldOffset,pcFieldOffset,pulClassSize) \ ( (This)->lpVtbl -> GetClassLayout(This,classID,rFieldOffset,cFieldOffset,pcFieldOffset,pulClassSize) ) #define ICorProfilerInfo4_GetClassIDInfo2(This,classId,pModuleId,pTypeDefToken,pParentClassId,cNumTypeArgs,pcNumTypeArgs,typeArgs) \ ( (This)->lpVtbl -> GetClassIDInfo2(This,classId,pModuleId,pTypeDefToken,pParentClassId,cNumTypeArgs,pcNumTypeArgs,typeArgs) ) #define ICorProfilerInfo4_GetCodeInfo2(This,functionID,cCodeInfos,pcCodeInfos,codeInfos) \ ( (This)->lpVtbl -> GetCodeInfo2(This,functionID,cCodeInfos,pcCodeInfos,codeInfos) ) #define ICorProfilerInfo4_GetClassFromTokenAndTypeArgs(This,moduleID,typeDef,cTypeArgs,typeArgs,pClassID) \ ( (This)->lpVtbl -> GetClassFromTokenAndTypeArgs(This,moduleID,typeDef,cTypeArgs,typeArgs,pClassID) ) #define ICorProfilerInfo4_GetFunctionFromTokenAndTypeArgs(This,moduleID,funcDef,classId,cTypeArgs,typeArgs,pFunctionID) \ ( (This)->lpVtbl -> GetFunctionFromTokenAndTypeArgs(This,moduleID,funcDef,classId,cTypeArgs,typeArgs,pFunctionID) ) #define ICorProfilerInfo4_EnumModuleFrozenObjects(This,moduleID,ppEnum) \ ( (This)->lpVtbl -> EnumModuleFrozenObjects(This,moduleID,ppEnum) ) #define ICorProfilerInfo4_GetArrayObjectInfo(This,objectId,cDimensions,pDimensionSizes,pDimensionLowerBounds,ppData) \ ( (This)->lpVtbl -> GetArrayObjectInfo(This,objectId,cDimensions,pDimensionSizes,pDimensionLowerBounds,ppData) ) #define ICorProfilerInfo4_GetBoxClassLayout(This,classId,pBufferOffset) \ ( (This)->lpVtbl -> GetBoxClassLayout(This,classId,pBufferOffset) ) #define ICorProfilerInfo4_GetThreadAppDomain(This,threadId,pAppDomainId) \ ( (This)->lpVtbl -> GetThreadAppDomain(This,threadId,pAppDomainId) ) #define ICorProfilerInfo4_GetRVAStaticAddress(This,classId,fieldToken,ppAddress) \ ( (This)->lpVtbl -> GetRVAStaticAddress(This,classId,fieldToken,ppAddress) ) #define ICorProfilerInfo4_GetAppDomainStaticAddress(This,classId,fieldToken,appDomainId,ppAddress) \ ( (This)->lpVtbl -> GetAppDomainStaticAddress(This,classId,fieldToken,appDomainId,ppAddress) ) #define ICorProfilerInfo4_GetThreadStaticAddress(This,classId,fieldToken,threadId,ppAddress) \ ( (This)->lpVtbl -> GetThreadStaticAddress(This,classId,fieldToken,threadId,ppAddress) ) #define ICorProfilerInfo4_GetContextStaticAddress(This,classId,fieldToken,contextId,ppAddress) \ ( (This)->lpVtbl -> GetContextStaticAddress(This,classId,fieldToken,contextId,ppAddress) ) #define ICorProfilerInfo4_GetStaticFieldInfo(This,classId,fieldToken,pFieldInfo) \ ( (This)->lpVtbl -> GetStaticFieldInfo(This,classId,fieldToken,pFieldInfo) ) #define ICorProfilerInfo4_GetGenerationBounds(This,cObjectRanges,pcObjectRanges,ranges) \ ( (This)->lpVtbl -> GetGenerationBounds(This,cObjectRanges,pcObjectRanges,ranges) ) #define ICorProfilerInfo4_GetObjectGeneration(This,objectId,range) \ ( (This)->lpVtbl -> GetObjectGeneration(This,objectId,range) ) #define ICorProfilerInfo4_GetNotifiedExceptionClauseInfo(This,pinfo) \ ( (This)->lpVtbl -> GetNotifiedExceptionClauseInfo(This,pinfo) ) #define ICorProfilerInfo4_EnumJITedFunctions(This,ppEnum) \ ( (This)->lpVtbl -> EnumJITedFunctions(This,ppEnum) ) #define ICorProfilerInfo4_RequestProfilerDetach(This,dwExpectedCompletionMilliseconds) \ ( (This)->lpVtbl -> RequestProfilerDetach(This,dwExpectedCompletionMilliseconds) ) #define ICorProfilerInfo4_SetFunctionIDMapper2(This,pFunc,clientData) \ ( (This)->lpVtbl -> SetFunctionIDMapper2(This,pFunc,clientData) ) #define ICorProfilerInfo4_GetStringLayout2(This,pStringLengthOffset,pBufferOffset) \ ( (This)->lpVtbl -> GetStringLayout2(This,pStringLengthOffset,pBufferOffset) ) #define ICorProfilerInfo4_SetEnterLeaveFunctionHooks3(This,pFuncEnter3,pFuncLeave3,pFuncTailcall3) \ ( (This)->lpVtbl -> SetEnterLeaveFunctionHooks3(This,pFuncEnter3,pFuncLeave3,pFuncTailcall3) ) #define ICorProfilerInfo4_SetEnterLeaveFunctionHooks3WithInfo(This,pFuncEnter3WithInfo,pFuncLeave3WithInfo,pFuncTailcall3WithInfo) \ ( (This)->lpVtbl -> SetEnterLeaveFunctionHooks3WithInfo(This,pFuncEnter3WithInfo,pFuncLeave3WithInfo,pFuncTailcall3WithInfo) ) #define ICorProfilerInfo4_GetFunctionEnter3Info(This,functionId,eltInfo,pFrameInfo,pcbArgumentInfo,pArgumentInfo) \ ( (This)->lpVtbl -> GetFunctionEnter3Info(This,functionId,eltInfo,pFrameInfo,pcbArgumentInfo,pArgumentInfo) ) #define ICorProfilerInfo4_GetFunctionLeave3Info(This,functionId,eltInfo,pFrameInfo,pRetvalRange) \ ( (This)->lpVtbl -> GetFunctionLeave3Info(This,functionId,eltInfo,pFrameInfo,pRetvalRange) ) #define ICorProfilerInfo4_GetFunctionTailcall3Info(This,functionId,eltInfo,pFrameInfo) \ ( (This)->lpVtbl -> GetFunctionTailcall3Info(This,functionId,eltInfo,pFrameInfo) ) #define ICorProfilerInfo4_EnumModules(This,ppEnum) \ ( (This)->lpVtbl -> EnumModules(This,ppEnum) ) #define ICorProfilerInfo4_GetRuntimeInformation(This,pClrInstanceId,pRuntimeType,pMajorVersion,pMinorVersion,pBuildNumber,pQFEVersion,cchVersionString,pcchVersionString,szVersionString) \ ( (This)->lpVtbl -> GetRuntimeInformation(This,pClrInstanceId,pRuntimeType,pMajorVersion,pMinorVersion,pBuildNumber,pQFEVersion,cchVersionString,pcchVersionString,szVersionString) ) #define ICorProfilerInfo4_GetThreadStaticAddress2(This,classId,fieldToken,appDomainId,threadId,ppAddress) \ ( (This)->lpVtbl -> GetThreadStaticAddress2(This,classId,fieldToken,appDomainId,threadId,ppAddress) ) #define ICorProfilerInfo4_GetAppDomainsContainingModule(This,moduleId,cAppDomainIds,pcAppDomainIds,appDomainIds) \ ( (This)->lpVtbl -> GetAppDomainsContainingModule(This,moduleId,cAppDomainIds,pcAppDomainIds,appDomainIds) ) #define ICorProfilerInfo4_GetModuleInfo2(This,moduleId,ppBaseLoadAddress,cchName,pcchName,szName,pAssemblyId,pdwModuleFlags) \ ( (This)->lpVtbl -> GetModuleInfo2(This,moduleId,ppBaseLoadAddress,cchName,pcchName,szName,pAssemblyId,pdwModuleFlags) ) #define ICorProfilerInfo4_EnumThreads(This,ppEnum) \ ( (This)->lpVtbl -> EnumThreads(This,ppEnum) ) #define ICorProfilerInfo4_InitializeCurrentThread(This) \ ( (This)->lpVtbl -> InitializeCurrentThread(This) ) #define ICorProfilerInfo4_RequestReJIT(This,cFunctions,moduleIds,methodIds) \ ( (This)->lpVtbl -> RequestReJIT(This,cFunctions,moduleIds,methodIds) ) #define ICorProfilerInfo4_RequestRevert(This,cFunctions,moduleIds,methodIds,status) \ ( (This)->lpVtbl -> RequestRevert(This,cFunctions,moduleIds,methodIds,status) ) #define ICorProfilerInfo4_GetCodeInfo3(This,functionID,reJitId,cCodeInfos,pcCodeInfos,codeInfos) \ ( (This)->lpVtbl -> GetCodeInfo3(This,functionID,reJitId,cCodeInfos,pcCodeInfos,codeInfos) ) #define ICorProfilerInfo4_GetFunctionFromIP2(This,ip,pFunctionId,pReJitId) \ ( (This)->lpVtbl -> GetFunctionFromIP2(This,ip,pFunctionId,pReJitId) ) #define ICorProfilerInfo4_GetReJITIDs(This,functionId,cReJitIds,pcReJitIds,reJitIds) \ ( (This)->lpVtbl -> GetReJITIDs(This,functionId,cReJitIds,pcReJitIds,reJitIds) ) #define ICorProfilerInfo4_GetILToNativeMapping2(This,functionId,reJitId,cMap,pcMap,map) \ ( (This)->lpVtbl -> GetILToNativeMapping2(This,functionId,reJitId,cMap,pcMap,map) ) #define ICorProfilerInfo4_EnumJITedFunctions2(This,ppEnum) \ ( (This)->lpVtbl -> EnumJITedFunctions2(This,ppEnum) ) #define ICorProfilerInfo4_GetObjectSize2(This,objectId,pcSize) \ ( (This)->lpVtbl -> GetObjectSize2(This,objectId,pcSize) ) #endif /* COBJMACROS */ #endif /* C style interface */ #endif /* __ICorProfilerInfo4_INTERFACE_DEFINED__ */ #ifndef __ICorProfilerInfo5_INTERFACE_DEFINED__ #define __ICorProfilerInfo5_INTERFACE_DEFINED__ /* interface ICorProfilerInfo5 */ /* [local][unique][uuid][object] */ EXTERN_C const IID IID_ICorProfilerInfo5; #if defined(__cplusplus) && !defined(CINTERFACE) MIDL_INTERFACE("07602928-CE38-4B83-81E7-74ADAF781214") ICorProfilerInfo5 : public ICorProfilerInfo4 { public: virtual HRESULT STDMETHODCALLTYPE GetEventMask2( /* [out] */ DWORD *pdwEventsLow, /* [out] */ DWORD *pdwEventsHigh) = 0; virtual HRESULT STDMETHODCALLTYPE SetEventMask2( /* [in] */ DWORD dwEventsLow, /* [in] */ DWORD dwEventsHigh) = 0; }; #else /* C style interface */ typedef struct ICorProfilerInfo5Vtbl { BEGIN_INTERFACE HRESULT ( STDMETHODCALLTYPE *QueryInterface )( ICorProfilerInfo5 * This, /* [in] */ REFIID riid, /* [annotation][iid_is][out] */ _COM_Outptr_ void **ppvObject); ULONG ( STDMETHODCALLTYPE *AddRef )( ICorProfilerInfo5 * This); ULONG ( STDMETHODCALLTYPE *Release )( ICorProfilerInfo5 * This); HRESULT ( STDMETHODCALLTYPE *GetClassFromObject )( ICorProfilerInfo5 * This, /* [in] */ ObjectID objectId, /* [out] */ ClassID *pClassId); HRESULT ( STDMETHODCALLTYPE *GetClassFromToken )( ICorProfilerInfo5 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdTypeDef typeDef, /* [out] */ ClassID *pClassId); HRESULT ( STDMETHODCALLTYPE *GetCodeInfo )( ICorProfilerInfo5 * This, /* [in] */ FunctionID functionId, /* [out] */ LPCBYTE *pStart, /* [out] */ ULONG *pcSize); HRESULT ( STDMETHODCALLTYPE *GetEventMask )( ICorProfilerInfo5 * This, /* [out] */ DWORD *pdwEvents); HRESULT ( STDMETHODCALLTYPE *GetFunctionFromIP )( ICorProfilerInfo5 * This, /* [in] */ LPCBYTE ip, /* [out] */ FunctionID *pFunctionId); HRESULT ( STDMETHODCALLTYPE *GetFunctionFromToken )( ICorProfilerInfo5 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdToken token, /* [out] */ FunctionID *pFunctionId); HRESULT ( STDMETHODCALLTYPE *GetHandleFromThread )( ICorProfilerInfo5 * This, /* [in] */ ThreadID threadId, /* [out] */ HANDLE *phThread); HRESULT ( STDMETHODCALLTYPE *GetObjectSize )( ICorProfilerInfo5 * This, /* [in] */ ObjectID objectId, /* [out] */ ULONG *pcSize); HRESULT ( STDMETHODCALLTYPE *IsArrayClass )( ICorProfilerInfo5 * This, /* [in] */ ClassID classId, /* [out] */ CorElementType *pBaseElemType, /* [out] */ ClassID *pBaseClassId, /* [out] */ ULONG *pcRank); HRESULT ( STDMETHODCALLTYPE *GetThreadInfo )( ICorProfilerInfo5 * This, /* [in] */ ThreadID threadId, /* [out] */ DWORD *pdwWin32ThreadId); HRESULT ( STDMETHODCALLTYPE *GetCurrentThreadID )( ICorProfilerInfo5 * This, /* [out] */ ThreadID *pThreadId); HRESULT ( STDMETHODCALLTYPE *GetClassIDInfo )( ICorProfilerInfo5 * This, /* [in] */ ClassID classId, /* [out] */ ModuleID *pModuleId, /* [out] */ mdTypeDef *pTypeDefToken); HRESULT ( STDMETHODCALLTYPE *GetFunctionInfo )( ICorProfilerInfo5 * This, /* [in] */ FunctionID functionId, /* [out] */ ClassID *pClassId, /* [out] */ ModuleID *pModuleId, /* [out] */ mdToken *pToken); HRESULT ( STDMETHODCALLTYPE *SetEventMask )( ICorProfilerInfo5 * This, /* [in] */ DWORD dwEvents); HRESULT ( STDMETHODCALLTYPE *SetEnterLeaveFunctionHooks )( ICorProfilerInfo5 * This, /* [in] */ FunctionEnter *pFuncEnter, /* [in] */ FunctionLeave *pFuncLeave, /* [in] */ FunctionTailcall *pFuncTailcall); HRESULT ( STDMETHODCALLTYPE *SetFunctionIDMapper )( ICorProfilerInfo5 * This, /* [in] */ FunctionIDMapper *pFunc); HRESULT ( STDMETHODCALLTYPE *GetTokenAndMetaDataFromFunction )( ICorProfilerInfo5 * This, /* [in] */ FunctionID functionId, /* [in] */ REFIID riid, /* [out] */ IUnknown **ppImport, /* [out] */ mdToken *pToken); HRESULT ( STDMETHODCALLTYPE *GetModuleInfo )( ICorProfilerInfo5 * This, /* [in] */ ModuleID moduleId, /* [out] */ LPCBYTE *ppBaseLoadAddress, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [annotation][out] */ _Out_writes_to_(cchName, *pcchName) WCHAR szName[ ], /* [out] */ AssemblyID *pAssemblyId); HRESULT ( STDMETHODCALLTYPE *GetModuleMetaData )( ICorProfilerInfo5 * This, /* [in] */ ModuleID moduleId, /* [in] */ DWORD dwOpenFlags, /* [in] */ REFIID riid, /* [out] */ IUnknown **ppOut); HRESULT ( STDMETHODCALLTYPE *GetILFunctionBody )( ICorProfilerInfo5 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdMethodDef methodId, /* [out] */ LPCBYTE *ppMethodHeader, /* [out] */ ULONG *pcbMethodSize); HRESULT ( STDMETHODCALLTYPE *GetILFunctionBodyAllocator )( ICorProfilerInfo5 * This, /* [in] */ ModuleID moduleId, /* [out] */ IMethodMalloc **ppMalloc); HRESULT ( STDMETHODCALLTYPE *SetILFunctionBody )( ICorProfilerInfo5 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdMethodDef methodid, /* [in] */ LPCBYTE pbNewILMethodHeader); HRESULT ( STDMETHODCALLTYPE *GetAppDomainInfo )( ICorProfilerInfo5 * This, /* [in] */ AppDomainID appDomainId, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [annotation][out] */ _Out_writes_to_(cchName, *pcchName) WCHAR szName[ ], /* [out] */ ProcessID *pProcessId); HRESULT ( STDMETHODCALLTYPE *GetAssemblyInfo )( ICorProfilerInfo5 * This, /* [in] */ AssemblyID assemblyId, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [annotation][out] */ _Out_writes_to_(cchName, *pcchName) WCHAR szName[ ], /* [out] */ AppDomainID *pAppDomainId, /* [out] */ ModuleID *pModuleId); HRESULT ( STDMETHODCALLTYPE *SetFunctionReJIT )( ICorProfilerInfo5 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ForceGC )( ICorProfilerInfo5 * This); HRESULT ( STDMETHODCALLTYPE *SetILInstrumentedCodeMap )( ICorProfilerInfo5 * This, /* [in] */ FunctionID functionId, /* [in] */ BOOL fStartJit, /* [in] */ ULONG cILMapEntries, /* [size_is][in] */ COR_IL_MAP rgILMapEntries[ ]); HRESULT ( STDMETHODCALLTYPE *GetInprocInspectionInterface )( ICorProfilerInfo5 * This, /* [out] */ IUnknown **ppicd); HRESULT ( STDMETHODCALLTYPE *GetInprocInspectionIThisThread )( ICorProfilerInfo5 * This, /* [out] */ IUnknown **ppicd); HRESULT ( STDMETHODCALLTYPE *GetThreadContext )( ICorProfilerInfo5 * This, /* [in] */ ThreadID threadId, /* [out] */ ContextID *pContextId); HRESULT ( STDMETHODCALLTYPE *BeginInprocDebugging )( ICorProfilerInfo5 * This, /* [in] */ BOOL fThisThreadOnly, /* [out] */ DWORD *pdwProfilerContext); HRESULT ( STDMETHODCALLTYPE *EndInprocDebugging )( ICorProfilerInfo5 * This, /* [in] */ DWORD dwProfilerContext); HRESULT ( STDMETHODCALLTYPE *GetILToNativeMapping )( ICorProfilerInfo5 * This, /* [in] */ FunctionID functionId, /* [in] */ ULONG32 cMap, /* [out] */ ULONG32 *pcMap, /* [length_is][size_is][out] */ COR_DEBUG_IL_TO_NATIVE_MAP map[ ]); HRESULT ( STDMETHODCALLTYPE *DoStackSnapshot )( ICorProfilerInfo5 * This, /* [in] */ ThreadID thread, /* [in] */ StackSnapshotCallback *callback, /* [in] */ ULONG32 infoFlags, /* [in] */ void *clientData, /* [size_is][in] */ BYTE context[ ], /* [in] */ ULONG32 contextSize); HRESULT ( STDMETHODCALLTYPE *SetEnterLeaveFunctionHooks2 )( ICorProfilerInfo5 * This, /* [in] */ FunctionEnter2 *pFuncEnter, /* [in] */ FunctionLeave2 *pFuncLeave, /* [in] */ FunctionTailcall2 *pFuncTailcall); HRESULT ( STDMETHODCALLTYPE *GetFunctionInfo2 )( ICorProfilerInfo5 * This, /* [in] */ FunctionID funcId, /* [in] */ COR_PRF_FRAME_INFO frameInfo, /* [out] */ ClassID *pClassId, /* [out] */ ModuleID *pModuleId, /* [out] */ mdToken *pToken, /* [in] */ ULONG32 cTypeArgs, /* [out] */ ULONG32 *pcTypeArgs, /* [out] */ ClassID typeArgs[ ]); HRESULT ( STDMETHODCALLTYPE *GetStringLayout )( ICorProfilerInfo5 * This, /* [out] */ ULONG *pBufferLengthOffset, /* [out] */ ULONG *pStringLengthOffset, /* [out] */ ULONG *pBufferOffset); HRESULT ( STDMETHODCALLTYPE *GetClassLayout )( ICorProfilerInfo5 * This, /* [in] */ ClassID classID, /* [out][in] */ COR_FIELD_OFFSET rFieldOffset[ ], /* [in] */ ULONG cFieldOffset, /* [out] */ ULONG *pcFieldOffset, /* [out] */ ULONG *pulClassSize); HRESULT ( STDMETHODCALLTYPE *GetClassIDInfo2 )( ICorProfilerInfo5 * This, /* [in] */ ClassID classId, /* [out] */ ModuleID *pModuleId, /* [out] */ mdTypeDef *pTypeDefToken, /* [out] */ ClassID *pParentClassId, /* [in] */ ULONG32 cNumTypeArgs, /* [out] */ ULONG32 *pcNumTypeArgs, /* [out] */ ClassID typeArgs[ ]); HRESULT ( STDMETHODCALLTYPE *GetCodeInfo2 )( ICorProfilerInfo5 * This, /* [in] */ FunctionID functionID, /* [in] */ ULONG32 cCodeInfos, /* [out] */ ULONG32 *pcCodeInfos, /* [length_is][size_is][out] */ COR_PRF_CODE_INFO codeInfos[ ]); HRESULT ( STDMETHODCALLTYPE *GetClassFromTokenAndTypeArgs )( ICorProfilerInfo5 * This, /* [in] */ ModuleID moduleID, /* [in] */ mdTypeDef typeDef, /* [in] */ ULONG32 cTypeArgs, /* [size_is][in] */ ClassID typeArgs[ ], /* [out] */ ClassID *pClassID); HRESULT ( STDMETHODCALLTYPE *GetFunctionFromTokenAndTypeArgs )( ICorProfilerInfo5 * This, /* [in] */ ModuleID moduleID, /* [in] */ mdMethodDef funcDef, /* [in] */ ClassID classId, /* [in] */ ULONG32 cTypeArgs, /* [size_is][in] */ ClassID typeArgs[ ], /* [out] */ FunctionID *pFunctionID); HRESULT ( STDMETHODCALLTYPE *EnumModuleFrozenObjects )( ICorProfilerInfo5 * This, /* [in] */ ModuleID moduleID, /* [out] */ ICorProfilerObjectEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *GetArrayObjectInfo )( ICorProfilerInfo5 * This, /* [in] */ ObjectID objectId, /* [in] */ ULONG32 cDimensions, /* [size_is][out] */ ULONG32 pDimensionSizes[ ], /* [size_is][out] */ int pDimensionLowerBounds[ ], /* [out] */ BYTE **ppData); HRESULT ( STDMETHODCALLTYPE *GetBoxClassLayout )( ICorProfilerInfo5 * This, /* [in] */ ClassID classId, /* [out] */ ULONG32 *pBufferOffset); HRESULT ( STDMETHODCALLTYPE *GetThreadAppDomain )( ICorProfilerInfo5 * This, /* [in] */ ThreadID threadId, /* [out] */ AppDomainID *pAppDomainId); HRESULT ( STDMETHODCALLTYPE *GetRVAStaticAddress )( ICorProfilerInfo5 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [out] */ void **ppAddress); HRESULT ( STDMETHODCALLTYPE *GetAppDomainStaticAddress )( ICorProfilerInfo5 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [in] */ AppDomainID appDomainId, /* [out] */ void **ppAddress); HRESULT ( STDMETHODCALLTYPE *GetThreadStaticAddress )( ICorProfilerInfo5 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [in] */ ThreadID threadId, /* [out] */ void **ppAddress); HRESULT ( STDMETHODCALLTYPE *GetContextStaticAddress )( ICorProfilerInfo5 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [in] */ ContextID contextId, /* [out] */ void **ppAddress); HRESULT ( STDMETHODCALLTYPE *GetStaticFieldInfo )( ICorProfilerInfo5 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [out] */ COR_PRF_STATIC_TYPE *pFieldInfo); HRESULT ( STDMETHODCALLTYPE *GetGenerationBounds )( ICorProfilerInfo5 * This, /* [in] */ ULONG cObjectRanges, /* [out] */ ULONG *pcObjectRanges, /* [length_is][size_is][out] */ COR_PRF_GC_GENERATION_RANGE ranges[ ]); HRESULT ( STDMETHODCALLTYPE *GetObjectGeneration )( ICorProfilerInfo5 * This, /* [in] */ ObjectID objectId, /* [out] */ COR_PRF_GC_GENERATION_RANGE *range); HRESULT ( STDMETHODCALLTYPE *GetNotifiedExceptionClauseInfo )( ICorProfilerInfo5 * This, /* [out] */ COR_PRF_EX_CLAUSE_INFO *pinfo); HRESULT ( STDMETHODCALLTYPE *EnumJITedFunctions )( ICorProfilerInfo5 * This, /* [out] */ ICorProfilerFunctionEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *RequestProfilerDetach )( ICorProfilerInfo5 * This, /* [in] */ DWORD dwExpectedCompletionMilliseconds); HRESULT ( STDMETHODCALLTYPE *SetFunctionIDMapper2 )( ICorProfilerInfo5 * This, /* [in] */ FunctionIDMapper2 *pFunc, /* [in] */ void *clientData); HRESULT ( STDMETHODCALLTYPE *GetStringLayout2 )( ICorProfilerInfo5 * This, /* [out] */ ULONG *pStringLengthOffset, /* [out] */ ULONG *pBufferOffset); HRESULT ( STDMETHODCALLTYPE *SetEnterLeaveFunctionHooks3 )( ICorProfilerInfo5 * This, /* [in] */ FunctionEnter3 *pFuncEnter3, /* [in] */ FunctionLeave3 *pFuncLeave3, /* [in] */ FunctionTailcall3 *pFuncTailcall3); HRESULT ( STDMETHODCALLTYPE *SetEnterLeaveFunctionHooks3WithInfo )( ICorProfilerInfo5 * This, /* [in] */ FunctionEnter3WithInfo *pFuncEnter3WithInfo, /* [in] */ FunctionLeave3WithInfo *pFuncLeave3WithInfo, /* [in] */ FunctionTailcall3WithInfo *pFuncTailcall3WithInfo); HRESULT ( STDMETHODCALLTYPE *GetFunctionEnter3Info )( ICorProfilerInfo5 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_ELT_INFO eltInfo, /* [out] */ COR_PRF_FRAME_INFO *pFrameInfo, /* [out][in] */ ULONG *pcbArgumentInfo, /* [size_is][out] */ COR_PRF_FUNCTION_ARGUMENT_INFO *pArgumentInfo); HRESULT ( STDMETHODCALLTYPE *GetFunctionLeave3Info )( ICorProfilerInfo5 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_ELT_INFO eltInfo, /* [out] */ COR_PRF_FRAME_INFO *pFrameInfo, /* [out] */ COR_PRF_FUNCTION_ARGUMENT_RANGE *pRetvalRange); HRESULT ( STDMETHODCALLTYPE *GetFunctionTailcall3Info )( ICorProfilerInfo5 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_ELT_INFO eltInfo, /* [out] */ COR_PRF_FRAME_INFO *pFrameInfo); HRESULT ( STDMETHODCALLTYPE *EnumModules )( ICorProfilerInfo5 * This, /* [out] */ ICorProfilerModuleEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *GetRuntimeInformation )( ICorProfilerInfo5 * This, /* [out] */ USHORT *pClrInstanceId, /* [out] */ COR_PRF_RUNTIME_TYPE *pRuntimeType, /* [out] */ USHORT *pMajorVersion, /* [out] */ USHORT *pMinorVersion, /* [out] */ USHORT *pBuildNumber, /* [out] */ USHORT *pQFEVersion, /* [in] */ ULONG cchVersionString, /* [out] */ ULONG *pcchVersionString, /* [annotation][out] */ _Out_writes_to_(cchVersionString, *pcchVersionString) WCHAR szVersionString[ ]); HRESULT ( STDMETHODCALLTYPE *GetThreadStaticAddress2 )( ICorProfilerInfo5 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [in] */ AppDomainID appDomainId, /* [in] */ ThreadID threadId, /* [out] */ void **ppAddress); HRESULT ( STDMETHODCALLTYPE *GetAppDomainsContainingModule )( ICorProfilerInfo5 * This, /* [in] */ ModuleID moduleId, /* [in] */ ULONG32 cAppDomainIds, /* [out] */ ULONG32 *pcAppDomainIds, /* [length_is][size_is][out] */ AppDomainID appDomainIds[ ]); HRESULT ( STDMETHODCALLTYPE *GetModuleInfo2 )( ICorProfilerInfo5 * This, /* [in] */ ModuleID moduleId, /* [out] */ LPCBYTE *ppBaseLoadAddress, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [annotation][out] */ _Out_writes_to_(cchName, *pcchName) WCHAR szName[ ], /* [out] */ AssemblyID *pAssemblyId, /* [out] */ DWORD *pdwModuleFlags); HRESULT ( STDMETHODCALLTYPE *EnumThreads )( ICorProfilerInfo5 * This, /* [out] */ ICorProfilerThreadEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *InitializeCurrentThread )( ICorProfilerInfo5 * This); HRESULT ( STDMETHODCALLTYPE *RequestReJIT )( ICorProfilerInfo5 * This, /* [in] */ ULONG cFunctions, /* [size_is][in] */ ModuleID moduleIds[ ], /* [size_is][in] */ mdMethodDef methodIds[ ]); HRESULT ( STDMETHODCALLTYPE *RequestRevert )( ICorProfilerInfo5 * This, /* [in] */ ULONG cFunctions, /* [size_is][in] */ ModuleID moduleIds[ ], /* [size_is][in] */ mdMethodDef methodIds[ ], /* [size_is][out] */ HRESULT status[ ]); HRESULT ( STDMETHODCALLTYPE *GetCodeInfo3 )( ICorProfilerInfo5 * This, /* [in] */ FunctionID functionID, /* [in] */ ReJITID reJitId, /* [in] */ ULONG32 cCodeInfos, /* [out] */ ULONG32 *pcCodeInfos, /* [length_is][size_is][out] */ COR_PRF_CODE_INFO codeInfos[ ]); HRESULT ( STDMETHODCALLTYPE *GetFunctionFromIP2 )( ICorProfilerInfo5 * This, /* [in] */ LPCBYTE ip, /* [out] */ FunctionID *pFunctionId, /* [out] */ ReJITID *pReJitId); HRESULT ( STDMETHODCALLTYPE *GetReJITIDs )( ICorProfilerInfo5 * This, /* [in] */ FunctionID functionId, /* [in] */ ULONG cReJitIds, /* [out] */ ULONG *pcReJitIds, /* [length_is][size_is][out] */ ReJITID reJitIds[ ]); HRESULT ( STDMETHODCALLTYPE *GetILToNativeMapping2 )( ICorProfilerInfo5 * This, /* [in] */ FunctionID functionId, /* [in] */ ReJITID reJitId, /* [in] */ ULONG32 cMap, /* [out] */ ULONG32 *pcMap, /* [length_is][size_is][out] */ COR_DEBUG_IL_TO_NATIVE_MAP map[ ]); HRESULT ( STDMETHODCALLTYPE *EnumJITedFunctions2 )( ICorProfilerInfo5 * This, /* [out] */ ICorProfilerFunctionEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *GetObjectSize2 )( ICorProfilerInfo5 * This, /* [in] */ ObjectID objectId, /* [out] */ SIZE_T *pcSize); HRESULT ( STDMETHODCALLTYPE *GetEventMask2 )( ICorProfilerInfo5 * This, /* [out] */ DWORD *pdwEventsLow, /* [out] */ DWORD *pdwEventsHigh); HRESULT ( STDMETHODCALLTYPE *SetEventMask2 )( ICorProfilerInfo5 * This, /* [in] */ DWORD dwEventsLow, /* [in] */ DWORD dwEventsHigh); END_INTERFACE } ICorProfilerInfo5Vtbl; interface ICorProfilerInfo5 { CONST_VTBL struct ICorProfilerInfo5Vtbl *lpVtbl; }; #ifdef COBJMACROS #define ICorProfilerInfo5_QueryInterface(This,riid,ppvObject) \ ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) ) #define ICorProfilerInfo5_AddRef(This) \ ( (This)->lpVtbl -> AddRef(This) ) #define ICorProfilerInfo5_Release(This) \ ( (This)->lpVtbl -> Release(This) ) #define ICorProfilerInfo5_GetClassFromObject(This,objectId,pClassId) \ ( (This)->lpVtbl -> GetClassFromObject(This,objectId,pClassId) ) #define ICorProfilerInfo5_GetClassFromToken(This,moduleId,typeDef,pClassId) \ ( (This)->lpVtbl -> GetClassFromToken(This,moduleId,typeDef,pClassId) ) #define ICorProfilerInfo5_GetCodeInfo(This,functionId,pStart,pcSize) \ ( (This)->lpVtbl -> GetCodeInfo(This,functionId,pStart,pcSize) ) #define ICorProfilerInfo5_GetEventMask(This,pdwEvents) \ ( (This)->lpVtbl -> GetEventMask(This,pdwEvents) ) #define ICorProfilerInfo5_GetFunctionFromIP(This,ip,pFunctionId) \ ( (This)->lpVtbl -> GetFunctionFromIP(This,ip,pFunctionId) ) #define ICorProfilerInfo5_GetFunctionFromToken(This,moduleId,token,pFunctionId) \ ( (This)->lpVtbl -> GetFunctionFromToken(This,moduleId,token,pFunctionId) ) #define ICorProfilerInfo5_GetHandleFromThread(This,threadId,phThread) \ ( (This)->lpVtbl -> GetHandleFromThread(This,threadId,phThread) ) #define ICorProfilerInfo5_GetObjectSize(This,objectId,pcSize) \ ( (This)->lpVtbl -> GetObjectSize(This,objectId,pcSize) ) #define ICorProfilerInfo5_IsArrayClass(This,classId,pBaseElemType,pBaseClassId,pcRank) \ ( (This)->lpVtbl -> IsArrayClass(This,classId,pBaseElemType,pBaseClassId,pcRank) ) #define ICorProfilerInfo5_GetThreadInfo(This,threadId,pdwWin32ThreadId) \ ( (This)->lpVtbl -> GetThreadInfo(This,threadId,pdwWin32ThreadId) ) #define ICorProfilerInfo5_GetCurrentThreadID(This,pThreadId) \ ( (This)->lpVtbl -> GetCurrentThreadID(This,pThreadId) ) #define ICorProfilerInfo5_GetClassIDInfo(This,classId,pModuleId,pTypeDefToken) \ ( (This)->lpVtbl -> GetClassIDInfo(This,classId,pModuleId,pTypeDefToken) ) #define ICorProfilerInfo5_GetFunctionInfo(This,functionId,pClassId,pModuleId,pToken) \ ( (This)->lpVtbl -> GetFunctionInfo(This,functionId,pClassId,pModuleId,pToken) ) #define ICorProfilerInfo5_SetEventMask(This,dwEvents) \ ( (This)->lpVtbl -> SetEventMask(This,dwEvents) ) #define ICorProfilerInfo5_SetEnterLeaveFunctionHooks(This,pFuncEnter,pFuncLeave,pFuncTailcall) \ ( (This)->lpVtbl -> SetEnterLeaveFunctionHooks(This,pFuncEnter,pFuncLeave,pFuncTailcall) ) #define ICorProfilerInfo5_SetFunctionIDMapper(This,pFunc) \ ( (This)->lpVtbl -> SetFunctionIDMapper(This,pFunc) ) #define ICorProfilerInfo5_GetTokenAndMetaDataFromFunction(This,functionId,riid,ppImport,pToken) \ ( (This)->lpVtbl -> GetTokenAndMetaDataFromFunction(This,functionId,riid,ppImport,pToken) ) #define ICorProfilerInfo5_GetModuleInfo(This,moduleId,ppBaseLoadAddress,cchName,pcchName,szName,pAssemblyId) \ ( (This)->lpVtbl -> GetModuleInfo(This,moduleId,ppBaseLoadAddress,cchName,pcchName,szName,pAssemblyId) ) #define ICorProfilerInfo5_GetModuleMetaData(This,moduleId,dwOpenFlags,riid,ppOut) \ ( (This)->lpVtbl -> GetModuleMetaData(This,moduleId,dwOpenFlags,riid,ppOut) ) #define ICorProfilerInfo5_GetILFunctionBody(This,moduleId,methodId,ppMethodHeader,pcbMethodSize) \ ( (This)->lpVtbl -> GetILFunctionBody(This,moduleId,methodId,ppMethodHeader,pcbMethodSize) ) #define ICorProfilerInfo5_GetILFunctionBodyAllocator(This,moduleId,ppMalloc) \ ( (This)->lpVtbl -> GetILFunctionBodyAllocator(This,moduleId,ppMalloc) ) #define ICorProfilerInfo5_SetILFunctionBody(This,moduleId,methodid,pbNewILMethodHeader) \ ( (This)->lpVtbl -> SetILFunctionBody(This,moduleId,methodid,pbNewILMethodHeader) ) #define ICorProfilerInfo5_GetAppDomainInfo(This,appDomainId,cchName,pcchName,szName,pProcessId) \ ( (This)->lpVtbl -> GetAppDomainInfo(This,appDomainId,cchName,pcchName,szName,pProcessId) ) #define ICorProfilerInfo5_GetAssemblyInfo(This,assemblyId,cchName,pcchName,szName,pAppDomainId,pModuleId) \ ( (This)->lpVtbl -> GetAssemblyInfo(This,assemblyId,cchName,pcchName,szName,pAppDomainId,pModuleId) ) #define ICorProfilerInfo5_SetFunctionReJIT(This,functionId) \ ( (This)->lpVtbl -> SetFunctionReJIT(This,functionId) ) #define ICorProfilerInfo5_ForceGC(This) \ ( (This)->lpVtbl -> ForceGC(This) ) #define ICorProfilerInfo5_SetILInstrumentedCodeMap(This,functionId,fStartJit,cILMapEntries,rgILMapEntries) \ ( (This)->lpVtbl -> SetILInstrumentedCodeMap(This,functionId,fStartJit,cILMapEntries,rgILMapEntries) ) #define ICorProfilerInfo5_GetInprocInspectionInterface(This,ppicd) \ ( (This)->lpVtbl -> GetInprocInspectionInterface(This,ppicd) ) #define ICorProfilerInfo5_GetInprocInspectionIThisThread(This,ppicd) \ ( (This)->lpVtbl -> GetInprocInspectionIThisThread(This,ppicd) ) #define ICorProfilerInfo5_GetThreadContext(This,threadId,pContextId) \ ( (This)->lpVtbl -> GetThreadContext(This,threadId,pContextId) ) #define ICorProfilerInfo5_BeginInprocDebugging(This,fThisThreadOnly,pdwProfilerContext) \ ( (This)->lpVtbl -> BeginInprocDebugging(This,fThisThreadOnly,pdwProfilerContext) ) #define ICorProfilerInfo5_EndInprocDebugging(This,dwProfilerContext) \ ( (This)->lpVtbl -> EndInprocDebugging(This,dwProfilerContext) ) #define ICorProfilerInfo5_GetILToNativeMapping(This,functionId,cMap,pcMap,map) \ ( (This)->lpVtbl -> GetILToNativeMapping(This,functionId,cMap,pcMap,map) ) #define ICorProfilerInfo5_DoStackSnapshot(This,thread,callback,infoFlags,clientData,context,contextSize) \ ( (This)->lpVtbl -> DoStackSnapshot(This,thread,callback,infoFlags,clientData,context,contextSize) ) #define ICorProfilerInfo5_SetEnterLeaveFunctionHooks2(This,pFuncEnter,pFuncLeave,pFuncTailcall) \ ( (This)->lpVtbl -> SetEnterLeaveFunctionHooks2(This,pFuncEnter,pFuncLeave,pFuncTailcall) ) #define ICorProfilerInfo5_GetFunctionInfo2(This,funcId,frameInfo,pClassId,pModuleId,pToken,cTypeArgs,pcTypeArgs,typeArgs) \ ( (This)->lpVtbl -> GetFunctionInfo2(This,funcId,frameInfo,pClassId,pModuleId,pToken,cTypeArgs,pcTypeArgs,typeArgs) ) #define ICorProfilerInfo5_GetStringLayout(This,pBufferLengthOffset,pStringLengthOffset,pBufferOffset) \ ( (This)->lpVtbl -> GetStringLayout(This,pBufferLengthOffset,pStringLengthOffset,pBufferOffset) ) #define ICorProfilerInfo5_GetClassLayout(This,classID,rFieldOffset,cFieldOffset,pcFieldOffset,pulClassSize) \ ( (This)->lpVtbl -> GetClassLayout(This,classID,rFieldOffset,cFieldOffset,pcFieldOffset,pulClassSize) ) #define ICorProfilerInfo5_GetClassIDInfo2(This,classId,pModuleId,pTypeDefToken,pParentClassId,cNumTypeArgs,pcNumTypeArgs,typeArgs) \ ( (This)->lpVtbl -> GetClassIDInfo2(This,classId,pModuleId,pTypeDefToken,pParentClassId,cNumTypeArgs,pcNumTypeArgs,typeArgs) ) #define ICorProfilerInfo5_GetCodeInfo2(This,functionID,cCodeInfos,pcCodeInfos,codeInfos) \ ( (This)->lpVtbl -> GetCodeInfo2(This,functionID,cCodeInfos,pcCodeInfos,codeInfos) ) #define ICorProfilerInfo5_GetClassFromTokenAndTypeArgs(This,moduleID,typeDef,cTypeArgs,typeArgs,pClassID) \ ( (This)->lpVtbl -> GetClassFromTokenAndTypeArgs(This,moduleID,typeDef,cTypeArgs,typeArgs,pClassID) ) #define ICorProfilerInfo5_GetFunctionFromTokenAndTypeArgs(This,moduleID,funcDef,classId,cTypeArgs,typeArgs,pFunctionID) \ ( (This)->lpVtbl -> GetFunctionFromTokenAndTypeArgs(This,moduleID,funcDef,classId,cTypeArgs,typeArgs,pFunctionID) ) #define ICorProfilerInfo5_EnumModuleFrozenObjects(This,moduleID,ppEnum) \ ( (This)->lpVtbl -> EnumModuleFrozenObjects(This,moduleID,ppEnum) ) #define ICorProfilerInfo5_GetArrayObjectInfo(This,objectId,cDimensions,pDimensionSizes,pDimensionLowerBounds,ppData) \ ( (This)->lpVtbl -> GetArrayObjectInfo(This,objectId,cDimensions,pDimensionSizes,pDimensionLowerBounds,ppData) ) #define ICorProfilerInfo5_GetBoxClassLayout(This,classId,pBufferOffset) \ ( (This)->lpVtbl -> GetBoxClassLayout(This,classId,pBufferOffset) ) #define ICorProfilerInfo5_GetThreadAppDomain(This,threadId,pAppDomainId) \ ( (This)->lpVtbl -> GetThreadAppDomain(This,threadId,pAppDomainId) ) #define ICorProfilerInfo5_GetRVAStaticAddress(This,classId,fieldToken,ppAddress) \ ( (This)->lpVtbl -> GetRVAStaticAddress(This,classId,fieldToken,ppAddress) ) #define ICorProfilerInfo5_GetAppDomainStaticAddress(This,classId,fieldToken,appDomainId,ppAddress) \ ( (This)->lpVtbl -> GetAppDomainStaticAddress(This,classId,fieldToken,appDomainId,ppAddress) ) #define ICorProfilerInfo5_GetThreadStaticAddress(This,classId,fieldToken,threadId,ppAddress) \ ( (This)->lpVtbl -> GetThreadStaticAddress(This,classId,fieldToken,threadId,ppAddress) ) #define ICorProfilerInfo5_GetContextStaticAddress(This,classId,fieldToken,contextId,ppAddress) \ ( (This)->lpVtbl -> GetContextStaticAddress(This,classId,fieldToken,contextId,ppAddress) ) #define ICorProfilerInfo5_GetStaticFieldInfo(This,classId,fieldToken,pFieldInfo) \ ( (This)->lpVtbl -> GetStaticFieldInfo(This,classId,fieldToken,pFieldInfo) ) #define ICorProfilerInfo5_GetGenerationBounds(This,cObjectRanges,pcObjectRanges,ranges) \ ( (This)->lpVtbl -> GetGenerationBounds(This,cObjectRanges,pcObjectRanges,ranges) ) #define ICorProfilerInfo5_GetObjectGeneration(This,objectId,range) \ ( (This)->lpVtbl -> GetObjectGeneration(This,objectId,range) ) #define ICorProfilerInfo5_GetNotifiedExceptionClauseInfo(This,pinfo) \ ( (This)->lpVtbl -> GetNotifiedExceptionClauseInfo(This,pinfo) ) #define ICorProfilerInfo5_EnumJITedFunctions(This,ppEnum) \ ( (This)->lpVtbl -> EnumJITedFunctions(This,ppEnum) ) #define ICorProfilerInfo5_RequestProfilerDetach(This,dwExpectedCompletionMilliseconds) \ ( (This)->lpVtbl -> RequestProfilerDetach(This,dwExpectedCompletionMilliseconds) ) #define ICorProfilerInfo5_SetFunctionIDMapper2(This,pFunc,clientData) \ ( (This)->lpVtbl -> SetFunctionIDMapper2(This,pFunc,clientData) ) #define ICorProfilerInfo5_GetStringLayout2(This,pStringLengthOffset,pBufferOffset) \ ( (This)->lpVtbl -> GetStringLayout2(This,pStringLengthOffset,pBufferOffset) ) #define ICorProfilerInfo5_SetEnterLeaveFunctionHooks3(This,pFuncEnter3,pFuncLeave3,pFuncTailcall3) \ ( (This)->lpVtbl -> SetEnterLeaveFunctionHooks3(This,pFuncEnter3,pFuncLeave3,pFuncTailcall3) ) #define ICorProfilerInfo5_SetEnterLeaveFunctionHooks3WithInfo(This,pFuncEnter3WithInfo,pFuncLeave3WithInfo,pFuncTailcall3WithInfo) \ ( (This)->lpVtbl -> SetEnterLeaveFunctionHooks3WithInfo(This,pFuncEnter3WithInfo,pFuncLeave3WithInfo,pFuncTailcall3WithInfo) ) #define ICorProfilerInfo5_GetFunctionEnter3Info(This,functionId,eltInfo,pFrameInfo,pcbArgumentInfo,pArgumentInfo) \ ( (This)->lpVtbl -> GetFunctionEnter3Info(This,functionId,eltInfo,pFrameInfo,pcbArgumentInfo,pArgumentInfo) ) #define ICorProfilerInfo5_GetFunctionLeave3Info(This,functionId,eltInfo,pFrameInfo,pRetvalRange) \ ( (This)->lpVtbl -> GetFunctionLeave3Info(This,functionId,eltInfo,pFrameInfo,pRetvalRange) ) #define ICorProfilerInfo5_GetFunctionTailcall3Info(This,functionId,eltInfo,pFrameInfo) \ ( (This)->lpVtbl -> GetFunctionTailcall3Info(This,functionId,eltInfo,pFrameInfo) ) #define ICorProfilerInfo5_EnumModules(This,ppEnum) \ ( (This)->lpVtbl -> EnumModules(This,ppEnum) ) #define ICorProfilerInfo5_GetRuntimeInformation(This,pClrInstanceId,pRuntimeType,pMajorVersion,pMinorVersion,pBuildNumber,pQFEVersion,cchVersionString,pcchVersionString,szVersionString) \ ( (This)->lpVtbl -> GetRuntimeInformation(This,pClrInstanceId,pRuntimeType,pMajorVersion,pMinorVersion,pBuildNumber,pQFEVersion,cchVersionString,pcchVersionString,szVersionString) ) #define ICorProfilerInfo5_GetThreadStaticAddress2(This,classId,fieldToken,appDomainId,threadId,ppAddress) \ ( (This)->lpVtbl -> GetThreadStaticAddress2(This,classId,fieldToken,appDomainId,threadId,ppAddress) ) #define ICorProfilerInfo5_GetAppDomainsContainingModule(This,moduleId,cAppDomainIds,pcAppDomainIds,appDomainIds) \ ( (This)->lpVtbl -> GetAppDomainsContainingModule(This,moduleId,cAppDomainIds,pcAppDomainIds,appDomainIds) ) #define ICorProfilerInfo5_GetModuleInfo2(This,moduleId,ppBaseLoadAddress,cchName,pcchName,szName,pAssemblyId,pdwModuleFlags) \ ( (This)->lpVtbl -> GetModuleInfo2(This,moduleId,ppBaseLoadAddress,cchName,pcchName,szName,pAssemblyId,pdwModuleFlags) ) #define ICorProfilerInfo5_EnumThreads(This,ppEnum) \ ( (This)->lpVtbl -> EnumThreads(This,ppEnum) ) #define ICorProfilerInfo5_InitializeCurrentThread(This) \ ( (This)->lpVtbl -> InitializeCurrentThread(This) ) #define ICorProfilerInfo5_RequestReJIT(This,cFunctions,moduleIds,methodIds) \ ( (This)->lpVtbl -> RequestReJIT(This,cFunctions,moduleIds,methodIds) ) #define ICorProfilerInfo5_RequestRevert(This,cFunctions,moduleIds,methodIds,status) \ ( (This)->lpVtbl -> RequestRevert(This,cFunctions,moduleIds,methodIds,status) ) #define ICorProfilerInfo5_GetCodeInfo3(This,functionID,reJitId,cCodeInfos,pcCodeInfos,codeInfos) \ ( (This)->lpVtbl -> GetCodeInfo3(This,functionID,reJitId,cCodeInfos,pcCodeInfos,codeInfos) ) #define ICorProfilerInfo5_GetFunctionFromIP2(This,ip,pFunctionId,pReJitId) \ ( (This)->lpVtbl -> GetFunctionFromIP2(This,ip,pFunctionId,pReJitId) ) #define ICorProfilerInfo5_GetReJITIDs(This,functionId,cReJitIds,pcReJitIds,reJitIds) \ ( (This)->lpVtbl -> GetReJITIDs(This,functionId,cReJitIds,pcReJitIds,reJitIds) ) #define ICorProfilerInfo5_GetILToNativeMapping2(This,functionId,reJitId,cMap,pcMap,map) \ ( (This)->lpVtbl -> GetILToNativeMapping2(This,functionId,reJitId,cMap,pcMap,map) ) #define ICorProfilerInfo5_EnumJITedFunctions2(This,ppEnum) \ ( (This)->lpVtbl -> EnumJITedFunctions2(This,ppEnum) ) #define ICorProfilerInfo5_GetObjectSize2(This,objectId,pcSize) \ ( (This)->lpVtbl -> GetObjectSize2(This,objectId,pcSize) ) #define ICorProfilerInfo5_GetEventMask2(This,pdwEventsLow,pdwEventsHigh) \ ( (This)->lpVtbl -> GetEventMask2(This,pdwEventsLow,pdwEventsHigh) ) #define ICorProfilerInfo5_SetEventMask2(This,dwEventsLow,dwEventsHigh) \ ( (This)->lpVtbl -> SetEventMask2(This,dwEventsLow,dwEventsHigh) ) #endif /* COBJMACROS */ #endif /* C style interface */ #endif /* __ICorProfilerInfo5_INTERFACE_DEFINED__ */ #ifndef __ICorProfilerInfo6_INTERFACE_DEFINED__ #define __ICorProfilerInfo6_INTERFACE_DEFINED__ /* interface ICorProfilerInfo6 */ /* [local][unique][uuid][object] */ EXTERN_C const IID IID_ICorProfilerInfo6; #if defined(__cplusplus) && !defined(CINTERFACE) MIDL_INTERFACE("F30A070D-BFFB-46A7-B1D8-8781EF7B698A") ICorProfilerInfo6 : public ICorProfilerInfo5 { public: virtual HRESULT STDMETHODCALLTYPE EnumNgenModuleMethodsInliningThisMethod( /* [in] */ ModuleID inlinersModuleId, /* [in] */ ModuleID inlineeModuleId, /* [in] */ mdMethodDef inlineeMethodId, /* [out] */ BOOL *incompleteData, /* [out] */ ICorProfilerMethodEnum **ppEnum) = 0; }; #else /* C style interface */ typedef struct ICorProfilerInfo6Vtbl { BEGIN_INTERFACE HRESULT ( STDMETHODCALLTYPE *QueryInterface )( ICorProfilerInfo6 * This, /* [in] */ REFIID riid, /* [annotation][iid_is][out] */ _COM_Outptr_ void **ppvObject); ULONG ( STDMETHODCALLTYPE *AddRef )( ICorProfilerInfo6 * This); ULONG ( STDMETHODCALLTYPE *Release )( ICorProfilerInfo6 * This); HRESULT ( STDMETHODCALLTYPE *GetClassFromObject )( ICorProfilerInfo6 * This, /* [in] */ ObjectID objectId, /* [out] */ ClassID *pClassId); HRESULT ( STDMETHODCALLTYPE *GetClassFromToken )( ICorProfilerInfo6 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdTypeDef typeDef, /* [out] */ ClassID *pClassId); HRESULT ( STDMETHODCALLTYPE *GetCodeInfo )( ICorProfilerInfo6 * This, /* [in] */ FunctionID functionId, /* [out] */ LPCBYTE *pStart, /* [out] */ ULONG *pcSize); HRESULT ( STDMETHODCALLTYPE *GetEventMask )( ICorProfilerInfo6 * This, /* [out] */ DWORD *pdwEvents); HRESULT ( STDMETHODCALLTYPE *GetFunctionFromIP )( ICorProfilerInfo6 * This, /* [in] */ LPCBYTE ip, /* [out] */ FunctionID *pFunctionId); HRESULT ( STDMETHODCALLTYPE *GetFunctionFromToken )( ICorProfilerInfo6 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdToken token, /* [out] */ FunctionID *pFunctionId); HRESULT ( STDMETHODCALLTYPE *GetHandleFromThread )( ICorProfilerInfo6 * This, /* [in] */ ThreadID threadId, /* [out] */ HANDLE *phThread); HRESULT ( STDMETHODCALLTYPE *GetObjectSize )( ICorProfilerInfo6 * This, /* [in] */ ObjectID objectId, /* [out] */ ULONG *pcSize); HRESULT ( STDMETHODCALLTYPE *IsArrayClass )( ICorProfilerInfo6 * This, /* [in] */ ClassID classId, /* [out] */ CorElementType *pBaseElemType, /* [out] */ ClassID *pBaseClassId, /* [out] */ ULONG *pcRank); HRESULT ( STDMETHODCALLTYPE *GetThreadInfo )( ICorProfilerInfo6 * This, /* [in] */ ThreadID threadId, /* [out] */ DWORD *pdwWin32ThreadId); HRESULT ( STDMETHODCALLTYPE *GetCurrentThreadID )( ICorProfilerInfo6 * This, /* [out] */ ThreadID *pThreadId); HRESULT ( STDMETHODCALLTYPE *GetClassIDInfo )( ICorProfilerInfo6 * This, /* [in] */ ClassID classId, /* [out] */ ModuleID *pModuleId, /* [out] */ mdTypeDef *pTypeDefToken); HRESULT ( STDMETHODCALLTYPE *GetFunctionInfo )( ICorProfilerInfo6 * This, /* [in] */ FunctionID functionId, /* [out] */ ClassID *pClassId, /* [out] */ ModuleID *pModuleId, /* [out] */ mdToken *pToken); HRESULT ( STDMETHODCALLTYPE *SetEventMask )( ICorProfilerInfo6 * This, /* [in] */ DWORD dwEvents); HRESULT ( STDMETHODCALLTYPE *SetEnterLeaveFunctionHooks )( ICorProfilerInfo6 * This, /* [in] */ FunctionEnter *pFuncEnter, /* [in] */ FunctionLeave *pFuncLeave, /* [in] */ FunctionTailcall *pFuncTailcall); HRESULT ( STDMETHODCALLTYPE *SetFunctionIDMapper )( ICorProfilerInfo6 * This, /* [in] */ FunctionIDMapper *pFunc); HRESULT ( STDMETHODCALLTYPE *GetTokenAndMetaDataFromFunction )( ICorProfilerInfo6 * This, /* [in] */ FunctionID functionId, /* [in] */ REFIID riid, /* [out] */ IUnknown **ppImport, /* [out] */ mdToken *pToken); HRESULT ( STDMETHODCALLTYPE *GetModuleInfo )( ICorProfilerInfo6 * This, /* [in] */ ModuleID moduleId, /* [out] */ LPCBYTE *ppBaseLoadAddress, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [annotation][out] */ _Out_writes_to_(cchName, *pcchName) WCHAR szName[ ], /* [out] */ AssemblyID *pAssemblyId); HRESULT ( STDMETHODCALLTYPE *GetModuleMetaData )( ICorProfilerInfo6 * This, /* [in] */ ModuleID moduleId, /* [in] */ DWORD dwOpenFlags, /* [in] */ REFIID riid, /* [out] */ IUnknown **ppOut); HRESULT ( STDMETHODCALLTYPE *GetILFunctionBody )( ICorProfilerInfo6 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdMethodDef methodId, /* [out] */ LPCBYTE *ppMethodHeader, /* [out] */ ULONG *pcbMethodSize); HRESULT ( STDMETHODCALLTYPE *GetILFunctionBodyAllocator )( ICorProfilerInfo6 * This, /* [in] */ ModuleID moduleId, /* [out] */ IMethodMalloc **ppMalloc); HRESULT ( STDMETHODCALLTYPE *SetILFunctionBody )( ICorProfilerInfo6 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdMethodDef methodid, /* [in] */ LPCBYTE pbNewILMethodHeader); HRESULT ( STDMETHODCALLTYPE *GetAppDomainInfo )( ICorProfilerInfo6 * This, /* [in] */ AppDomainID appDomainId, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [annotation][out] */ _Out_writes_to_(cchName, *pcchName) WCHAR szName[ ], /* [out] */ ProcessID *pProcessId); HRESULT ( STDMETHODCALLTYPE *GetAssemblyInfo )( ICorProfilerInfo6 * This, /* [in] */ AssemblyID assemblyId, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [annotation][out] */ _Out_writes_to_(cchName, *pcchName) WCHAR szName[ ], /* [out] */ AppDomainID *pAppDomainId, /* [out] */ ModuleID *pModuleId); HRESULT ( STDMETHODCALLTYPE *SetFunctionReJIT )( ICorProfilerInfo6 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ForceGC )( ICorProfilerInfo6 * This); HRESULT ( STDMETHODCALLTYPE *SetILInstrumentedCodeMap )( ICorProfilerInfo6 * This, /* [in] */ FunctionID functionId, /* [in] */ BOOL fStartJit, /* [in] */ ULONG cILMapEntries, /* [size_is][in] */ COR_IL_MAP rgILMapEntries[ ]); HRESULT ( STDMETHODCALLTYPE *GetInprocInspectionInterface )( ICorProfilerInfo6 * This, /* [out] */ IUnknown **ppicd); HRESULT ( STDMETHODCALLTYPE *GetInprocInspectionIThisThread )( ICorProfilerInfo6 * This, /* [out] */ IUnknown **ppicd); HRESULT ( STDMETHODCALLTYPE *GetThreadContext )( ICorProfilerInfo6 * This, /* [in] */ ThreadID threadId, /* [out] */ ContextID *pContextId); HRESULT ( STDMETHODCALLTYPE *BeginInprocDebugging )( ICorProfilerInfo6 * This, /* [in] */ BOOL fThisThreadOnly, /* [out] */ DWORD *pdwProfilerContext); HRESULT ( STDMETHODCALLTYPE *EndInprocDebugging )( ICorProfilerInfo6 * This, /* [in] */ DWORD dwProfilerContext); HRESULT ( STDMETHODCALLTYPE *GetILToNativeMapping )( ICorProfilerInfo6 * This, /* [in] */ FunctionID functionId, /* [in] */ ULONG32 cMap, /* [out] */ ULONG32 *pcMap, /* [length_is][size_is][out] */ COR_DEBUG_IL_TO_NATIVE_MAP map[ ]); HRESULT ( STDMETHODCALLTYPE *DoStackSnapshot )( ICorProfilerInfo6 * This, /* [in] */ ThreadID thread, /* [in] */ StackSnapshotCallback *callback, /* [in] */ ULONG32 infoFlags, /* [in] */ void *clientData, /* [size_is][in] */ BYTE context[ ], /* [in] */ ULONG32 contextSize); HRESULT ( STDMETHODCALLTYPE *SetEnterLeaveFunctionHooks2 )( ICorProfilerInfo6 * This, /* [in] */ FunctionEnter2 *pFuncEnter, /* [in] */ FunctionLeave2 *pFuncLeave, /* [in] */ FunctionTailcall2 *pFuncTailcall); HRESULT ( STDMETHODCALLTYPE *GetFunctionInfo2 )( ICorProfilerInfo6 * This, /* [in] */ FunctionID funcId, /* [in] */ COR_PRF_FRAME_INFO frameInfo, /* [out] */ ClassID *pClassId, /* [out] */ ModuleID *pModuleId, /* [out] */ mdToken *pToken, /* [in] */ ULONG32 cTypeArgs, /* [out] */ ULONG32 *pcTypeArgs, /* [out] */ ClassID typeArgs[ ]); HRESULT ( STDMETHODCALLTYPE *GetStringLayout )( ICorProfilerInfo6 * This, /* [out] */ ULONG *pBufferLengthOffset, /* [out] */ ULONG *pStringLengthOffset, /* [out] */ ULONG *pBufferOffset); HRESULT ( STDMETHODCALLTYPE *GetClassLayout )( ICorProfilerInfo6 * This, /* [in] */ ClassID classID, /* [out][in] */ COR_FIELD_OFFSET rFieldOffset[ ], /* [in] */ ULONG cFieldOffset, /* [out] */ ULONG *pcFieldOffset, /* [out] */ ULONG *pulClassSize); HRESULT ( STDMETHODCALLTYPE *GetClassIDInfo2 )( ICorProfilerInfo6 * This, /* [in] */ ClassID classId, /* [out] */ ModuleID *pModuleId, /* [out] */ mdTypeDef *pTypeDefToken, /* [out] */ ClassID *pParentClassId, /* [in] */ ULONG32 cNumTypeArgs, /* [out] */ ULONG32 *pcNumTypeArgs, /* [out] */ ClassID typeArgs[ ]); HRESULT ( STDMETHODCALLTYPE *GetCodeInfo2 )( ICorProfilerInfo6 * This, /* [in] */ FunctionID functionID, /* [in] */ ULONG32 cCodeInfos, /* [out] */ ULONG32 *pcCodeInfos, /* [length_is][size_is][out] */ COR_PRF_CODE_INFO codeInfos[ ]); HRESULT ( STDMETHODCALLTYPE *GetClassFromTokenAndTypeArgs )( ICorProfilerInfo6 * This, /* [in] */ ModuleID moduleID, /* [in] */ mdTypeDef typeDef, /* [in] */ ULONG32 cTypeArgs, /* [size_is][in] */ ClassID typeArgs[ ], /* [out] */ ClassID *pClassID); HRESULT ( STDMETHODCALLTYPE *GetFunctionFromTokenAndTypeArgs )( ICorProfilerInfo6 * This, /* [in] */ ModuleID moduleID, /* [in] */ mdMethodDef funcDef, /* [in] */ ClassID classId, /* [in] */ ULONG32 cTypeArgs, /* [size_is][in] */ ClassID typeArgs[ ], /* [out] */ FunctionID *pFunctionID); HRESULT ( STDMETHODCALLTYPE *EnumModuleFrozenObjects )( ICorProfilerInfo6 * This, /* [in] */ ModuleID moduleID, /* [out] */ ICorProfilerObjectEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *GetArrayObjectInfo )( ICorProfilerInfo6 * This, /* [in] */ ObjectID objectId, /* [in] */ ULONG32 cDimensions, /* [size_is][out] */ ULONG32 pDimensionSizes[ ], /* [size_is][out] */ int pDimensionLowerBounds[ ], /* [out] */ BYTE **ppData); HRESULT ( STDMETHODCALLTYPE *GetBoxClassLayout )( ICorProfilerInfo6 * This, /* [in] */ ClassID classId, /* [out] */ ULONG32 *pBufferOffset); HRESULT ( STDMETHODCALLTYPE *GetThreadAppDomain )( ICorProfilerInfo6 * This, /* [in] */ ThreadID threadId, /* [out] */ AppDomainID *pAppDomainId); HRESULT ( STDMETHODCALLTYPE *GetRVAStaticAddress )( ICorProfilerInfo6 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [out] */ void **ppAddress); HRESULT ( STDMETHODCALLTYPE *GetAppDomainStaticAddress )( ICorProfilerInfo6 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [in] */ AppDomainID appDomainId, /* [out] */ void **ppAddress); HRESULT ( STDMETHODCALLTYPE *GetThreadStaticAddress )( ICorProfilerInfo6 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [in] */ ThreadID threadId, /* [out] */ void **ppAddress); HRESULT ( STDMETHODCALLTYPE *GetContextStaticAddress )( ICorProfilerInfo6 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [in] */ ContextID contextId, /* [out] */ void **ppAddress); HRESULT ( STDMETHODCALLTYPE *GetStaticFieldInfo )( ICorProfilerInfo6 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [out] */ COR_PRF_STATIC_TYPE *pFieldInfo); HRESULT ( STDMETHODCALLTYPE *GetGenerationBounds )( ICorProfilerInfo6 * This, /* [in] */ ULONG cObjectRanges, /* [out] */ ULONG *pcObjectRanges, /* [length_is][size_is][out] */ COR_PRF_GC_GENERATION_RANGE ranges[ ]); HRESULT ( STDMETHODCALLTYPE *GetObjectGeneration )( ICorProfilerInfo6 * This, /* [in] */ ObjectID objectId, /* [out] */ COR_PRF_GC_GENERATION_RANGE *range); HRESULT ( STDMETHODCALLTYPE *GetNotifiedExceptionClauseInfo )( ICorProfilerInfo6 * This, /* [out] */ COR_PRF_EX_CLAUSE_INFO *pinfo); HRESULT ( STDMETHODCALLTYPE *EnumJITedFunctions )( ICorProfilerInfo6 * This, /* [out] */ ICorProfilerFunctionEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *RequestProfilerDetach )( ICorProfilerInfo6 * This, /* [in] */ DWORD dwExpectedCompletionMilliseconds); HRESULT ( STDMETHODCALLTYPE *SetFunctionIDMapper2 )( ICorProfilerInfo6 * This, /* [in] */ FunctionIDMapper2 *pFunc, /* [in] */ void *clientData); HRESULT ( STDMETHODCALLTYPE *GetStringLayout2 )( ICorProfilerInfo6 * This, /* [out] */ ULONG *pStringLengthOffset, /* [out] */ ULONG *pBufferOffset); HRESULT ( STDMETHODCALLTYPE *SetEnterLeaveFunctionHooks3 )( ICorProfilerInfo6 * This, /* [in] */ FunctionEnter3 *pFuncEnter3, /* [in] */ FunctionLeave3 *pFuncLeave3, /* [in] */ FunctionTailcall3 *pFuncTailcall3); HRESULT ( STDMETHODCALLTYPE *SetEnterLeaveFunctionHooks3WithInfo )( ICorProfilerInfo6 * This, /* [in] */ FunctionEnter3WithInfo *pFuncEnter3WithInfo, /* [in] */ FunctionLeave3WithInfo *pFuncLeave3WithInfo, /* [in] */ FunctionTailcall3WithInfo *pFuncTailcall3WithInfo); HRESULT ( STDMETHODCALLTYPE *GetFunctionEnter3Info )( ICorProfilerInfo6 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_ELT_INFO eltInfo, /* [out] */ COR_PRF_FRAME_INFO *pFrameInfo, /* [out][in] */ ULONG *pcbArgumentInfo, /* [size_is][out] */ COR_PRF_FUNCTION_ARGUMENT_INFO *pArgumentInfo); HRESULT ( STDMETHODCALLTYPE *GetFunctionLeave3Info )( ICorProfilerInfo6 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_ELT_INFO eltInfo, /* [out] */ COR_PRF_FRAME_INFO *pFrameInfo, /* [out] */ COR_PRF_FUNCTION_ARGUMENT_RANGE *pRetvalRange); HRESULT ( STDMETHODCALLTYPE *GetFunctionTailcall3Info )( ICorProfilerInfo6 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_ELT_INFO eltInfo, /* [out] */ COR_PRF_FRAME_INFO *pFrameInfo); HRESULT ( STDMETHODCALLTYPE *EnumModules )( ICorProfilerInfo6 * This, /* [out] */ ICorProfilerModuleEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *GetRuntimeInformation )( ICorProfilerInfo6 * This, /* [out] */ USHORT *pClrInstanceId, /* [out] */ COR_PRF_RUNTIME_TYPE *pRuntimeType, /* [out] */ USHORT *pMajorVersion, /* [out] */ USHORT *pMinorVersion, /* [out] */ USHORT *pBuildNumber, /* [out] */ USHORT *pQFEVersion, /* [in] */ ULONG cchVersionString, /* [out] */ ULONG *pcchVersionString, /* [annotation][out] */ _Out_writes_to_(cchVersionString, *pcchVersionString) WCHAR szVersionString[ ]); HRESULT ( STDMETHODCALLTYPE *GetThreadStaticAddress2 )( ICorProfilerInfo6 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [in] */ AppDomainID appDomainId, /* [in] */ ThreadID threadId, /* [out] */ void **ppAddress); HRESULT ( STDMETHODCALLTYPE *GetAppDomainsContainingModule )( ICorProfilerInfo6 * This, /* [in] */ ModuleID moduleId, /* [in] */ ULONG32 cAppDomainIds, /* [out] */ ULONG32 *pcAppDomainIds, /* [length_is][size_is][out] */ AppDomainID appDomainIds[ ]); HRESULT ( STDMETHODCALLTYPE *GetModuleInfo2 )( ICorProfilerInfo6 * This, /* [in] */ ModuleID moduleId, /* [out] */ LPCBYTE *ppBaseLoadAddress, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [annotation][out] */ _Out_writes_to_(cchName, *pcchName) WCHAR szName[ ], /* [out] */ AssemblyID *pAssemblyId, /* [out] */ DWORD *pdwModuleFlags); HRESULT ( STDMETHODCALLTYPE *EnumThreads )( ICorProfilerInfo6 * This, /* [out] */ ICorProfilerThreadEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *InitializeCurrentThread )( ICorProfilerInfo6 * This); HRESULT ( STDMETHODCALLTYPE *RequestReJIT )( ICorProfilerInfo6 * This, /* [in] */ ULONG cFunctions, /* [size_is][in] */ ModuleID moduleIds[ ], /* [size_is][in] */ mdMethodDef methodIds[ ]); HRESULT ( STDMETHODCALLTYPE *RequestRevert )( ICorProfilerInfo6 * This, /* [in] */ ULONG cFunctions, /* [size_is][in] */ ModuleID moduleIds[ ], /* [size_is][in] */ mdMethodDef methodIds[ ], /* [size_is][out] */ HRESULT status[ ]); HRESULT ( STDMETHODCALLTYPE *GetCodeInfo3 )( ICorProfilerInfo6 * This, /* [in] */ FunctionID functionID, /* [in] */ ReJITID reJitId, /* [in] */ ULONG32 cCodeInfos, /* [out] */ ULONG32 *pcCodeInfos, /* [length_is][size_is][out] */ COR_PRF_CODE_INFO codeInfos[ ]); HRESULT ( STDMETHODCALLTYPE *GetFunctionFromIP2 )( ICorProfilerInfo6 * This, /* [in] */ LPCBYTE ip, /* [out] */ FunctionID *pFunctionId, /* [out] */ ReJITID *pReJitId); HRESULT ( STDMETHODCALLTYPE *GetReJITIDs )( ICorProfilerInfo6 * This, /* [in] */ FunctionID functionId, /* [in] */ ULONG cReJitIds, /* [out] */ ULONG *pcReJitIds, /* [length_is][size_is][out] */ ReJITID reJitIds[ ]); HRESULT ( STDMETHODCALLTYPE *GetILToNativeMapping2 )( ICorProfilerInfo6 * This, /* [in] */ FunctionID functionId, /* [in] */ ReJITID reJitId, /* [in] */ ULONG32 cMap, /* [out] */ ULONG32 *pcMap, /* [length_is][size_is][out] */ COR_DEBUG_IL_TO_NATIVE_MAP map[ ]); HRESULT ( STDMETHODCALLTYPE *EnumJITedFunctions2 )( ICorProfilerInfo6 * This, /* [out] */ ICorProfilerFunctionEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *GetObjectSize2 )( ICorProfilerInfo6 * This, /* [in] */ ObjectID objectId, /* [out] */ SIZE_T *pcSize); HRESULT ( STDMETHODCALLTYPE *GetEventMask2 )( ICorProfilerInfo6 * This, /* [out] */ DWORD *pdwEventsLow, /* [out] */ DWORD *pdwEventsHigh); HRESULT ( STDMETHODCALLTYPE *SetEventMask2 )( ICorProfilerInfo6 * This, /* [in] */ DWORD dwEventsLow, /* [in] */ DWORD dwEventsHigh); HRESULT ( STDMETHODCALLTYPE *EnumNgenModuleMethodsInliningThisMethod )( ICorProfilerInfo6 * This, /* [in] */ ModuleID inlinersModuleId, /* [in] */ ModuleID inlineeModuleId, /* [in] */ mdMethodDef inlineeMethodId, /* [out] */ BOOL *incompleteData, /* [out] */ ICorProfilerMethodEnum **ppEnum); END_INTERFACE } ICorProfilerInfo6Vtbl; interface ICorProfilerInfo6 { CONST_VTBL struct ICorProfilerInfo6Vtbl *lpVtbl; }; #ifdef COBJMACROS #define ICorProfilerInfo6_QueryInterface(This,riid,ppvObject) \ ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) ) #define ICorProfilerInfo6_AddRef(This) \ ( (This)->lpVtbl -> AddRef(This) ) #define ICorProfilerInfo6_Release(This) \ ( (This)->lpVtbl -> Release(This) ) #define ICorProfilerInfo6_GetClassFromObject(This,objectId,pClassId) \ ( (This)->lpVtbl -> GetClassFromObject(This,objectId,pClassId) ) #define ICorProfilerInfo6_GetClassFromToken(This,moduleId,typeDef,pClassId) \ ( (This)->lpVtbl -> GetClassFromToken(This,moduleId,typeDef,pClassId) ) #define ICorProfilerInfo6_GetCodeInfo(This,functionId,pStart,pcSize) \ ( (This)->lpVtbl -> GetCodeInfo(This,functionId,pStart,pcSize) ) #define ICorProfilerInfo6_GetEventMask(This,pdwEvents) \ ( (This)->lpVtbl -> GetEventMask(This,pdwEvents) ) #define ICorProfilerInfo6_GetFunctionFromIP(This,ip,pFunctionId) \ ( (This)->lpVtbl -> GetFunctionFromIP(This,ip,pFunctionId) ) #define ICorProfilerInfo6_GetFunctionFromToken(This,moduleId,token,pFunctionId) \ ( (This)->lpVtbl -> GetFunctionFromToken(This,moduleId,token,pFunctionId) ) #define ICorProfilerInfo6_GetHandleFromThread(This,threadId,phThread) \ ( (This)->lpVtbl -> GetHandleFromThread(This,threadId,phThread) ) #define ICorProfilerInfo6_GetObjectSize(This,objectId,pcSize) \ ( (This)->lpVtbl -> GetObjectSize(This,objectId,pcSize) ) #define ICorProfilerInfo6_IsArrayClass(This,classId,pBaseElemType,pBaseClassId,pcRank) \ ( (This)->lpVtbl -> IsArrayClass(This,classId,pBaseElemType,pBaseClassId,pcRank) ) #define ICorProfilerInfo6_GetThreadInfo(This,threadId,pdwWin32ThreadId) \ ( (This)->lpVtbl -> GetThreadInfo(This,threadId,pdwWin32ThreadId) ) #define ICorProfilerInfo6_GetCurrentThreadID(This,pThreadId) \ ( (This)->lpVtbl -> GetCurrentThreadID(This,pThreadId) ) #define ICorProfilerInfo6_GetClassIDInfo(This,classId,pModuleId,pTypeDefToken) \ ( (This)->lpVtbl -> GetClassIDInfo(This,classId,pModuleId,pTypeDefToken) ) #define ICorProfilerInfo6_GetFunctionInfo(This,functionId,pClassId,pModuleId,pToken) \ ( (This)->lpVtbl -> GetFunctionInfo(This,functionId,pClassId,pModuleId,pToken) ) #define ICorProfilerInfo6_SetEventMask(This,dwEvents) \ ( (This)->lpVtbl -> SetEventMask(This,dwEvents) ) #define ICorProfilerInfo6_SetEnterLeaveFunctionHooks(This,pFuncEnter,pFuncLeave,pFuncTailcall) \ ( (This)->lpVtbl -> SetEnterLeaveFunctionHooks(This,pFuncEnter,pFuncLeave,pFuncTailcall) ) #define ICorProfilerInfo6_SetFunctionIDMapper(This,pFunc) \ ( (This)->lpVtbl -> SetFunctionIDMapper(This,pFunc) ) #define ICorProfilerInfo6_GetTokenAndMetaDataFromFunction(This,functionId,riid,ppImport,pToken) \ ( (This)->lpVtbl -> GetTokenAndMetaDataFromFunction(This,functionId,riid,ppImport,pToken) ) #define ICorProfilerInfo6_GetModuleInfo(This,moduleId,ppBaseLoadAddress,cchName,pcchName,szName,pAssemblyId) \ ( (This)->lpVtbl -> GetModuleInfo(This,moduleId,ppBaseLoadAddress,cchName,pcchName,szName,pAssemblyId) ) #define ICorProfilerInfo6_GetModuleMetaData(This,moduleId,dwOpenFlags,riid,ppOut) \ ( (This)->lpVtbl -> GetModuleMetaData(This,moduleId,dwOpenFlags,riid,ppOut) ) #define ICorProfilerInfo6_GetILFunctionBody(This,moduleId,methodId,ppMethodHeader,pcbMethodSize) \ ( (This)->lpVtbl -> GetILFunctionBody(This,moduleId,methodId,ppMethodHeader,pcbMethodSize) ) #define ICorProfilerInfo6_GetILFunctionBodyAllocator(This,moduleId,ppMalloc) \ ( (This)->lpVtbl -> GetILFunctionBodyAllocator(This,moduleId,ppMalloc) ) #define ICorProfilerInfo6_SetILFunctionBody(This,moduleId,methodid,pbNewILMethodHeader) \ ( (This)->lpVtbl -> SetILFunctionBody(This,moduleId,methodid,pbNewILMethodHeader) ) #define ICorProfilerInfo6_GetAppDomainInfo(This,appDomainId,cchName,pcchName,szName,pProcessId) \ ( (This)->lpVtbl -> GetAppDomainInfo(This,appDomainId,cchName,pcchName,szName,pProcessId) ) #define ICorProfilerInfo6_GetAssemblyInfo(This,assemblyId,cchName,pcchName,szName,pAppDomainId,pModuleId) \ ( (This)->lpVtbl -> GetAssemblyInfo(This,assemblyId,cchName,pcchName,szName,pAppDomainId,pModuleId) ) #define ICorProfilerInfo6_SetFunctionReJIT(This,functionId) \ ( (This)->lpVtbl -> SetFunctionReJIT(This,functionId) ) #define ICorProfilerInfo6_ForceGC(This) \ ( (This)->lpVtbl -> ForceGC(This) ) #define ICorProfilerInfo6_SetILInstrumentedCodeMap(This,functionId,fStartJit,cILMapEntries,rgILMapEntries) \ ( (This)->lpVtbl -> SetILInstrumentedCodeMap(This,functionId,fStartJit,cILMapEntries,rgILMapEntries) ) #define ICorProfilerInfo6_GetInprocInspectionInterface(This,ppicd) \ ( (This)->lpVtbl -> GetInprocInspectionInterface(This,ppicd) ) #define ICorProfilerInfo6_GetInprocInspectionIThisThread(This,ppicd) \ ( (This)->lpVtbl -> GetInprocInspectionIThisThread(This,ppicd) ) #define ICorProfilerInfo6_GetThreadContext(This,threadId,pContextId) \ ( (This)->lpVtbl -> GetThreadContext(This,threadId,pContextId) ) #define ICorProfilerInfo6_BeginInprocDebugging(This,fThisThreadOnly,pdwProfilerContext) \ ( (This)->lpVtbl -> BeginInprocDebugging(This,fThisThreadOnly,pdwProfilerContext) ) #define ICorProfilerInfo6_EndInprocDebugging(This,dwProfilerContext) \ ( (This)->lpVtbl -> EndInprocDebugging(This,dwProfilerContext) ) #define ICorProfilerInfo6_GetILToNativeMapping(This,functionId,cMap,pcMap,map) \ ( (This)->lpVtbl -> GetILToNativeMapping(This,functionId,cMap,pcMap,map) ) #define ICorProfilerInfo6_DoStackSnapshot(This,thread,callback,infoFlags,clientData,context,contextSize) \ ( (This)->lpVtbl -> DoStackSnapshot(This,thread,callback,infoFlags,clientData,context,contextSize) ) #define ICorProfilerInfo6_SetEnterLeaveFunctionHooks2(This,pFuncEnter,pFuncLeave,pFuncTailcall) \ ( (This)->lpVtbl -> SetEnterLeaveFunctionHooks2(This,pFuncEnter,pFuncLeave,pFuncTailcall) ) #define ICorProfilerInfo6_GetFunctionInfo2(This,funcId,frameInfo,pClassId,pModuleId,pToken,cTypeArgs,pcTypeArgs,typeArgs) \ ( (This)->lpVtbl -> GetFunctionInfo2(This,funcId,frameInfo,pClassId,pModuleId,pToken,cTypeArgs,pcTypeArgs,typeArgs) ) #define ICorProfilerInfo6_GetStringLayout(This,pBufferLengthOffset,pStringLengthOffset,pBufferOffset) \ ( (This)->lpVtbl -> GetStringLayout(This,pBufferLengthOffset,pStringLengthOffset,pBufferOffset) ) #define ICorProfilerInfo6_GetClassLayout(This,classID,rFieldOffset,cFieldOffset,pcFieldOffset,pulClassSize) \ ( (This)->lpVtbl -> GetClassLayout(This,classID,rFieldOffset,cFieldOffset,pcFieldOffset,pulClassSize) ) #define ICorProfilerInfo6_GetClassIDInfo2(This,classId,pModuleId,pTypeDefToken,pParentClassId,cNumTypeArgs,pcNumTypeArgs,typeArgs) \ ( (This)->lpVtbl -> GetClassIDInfo2(This,classId,pModuleId,pTypeDefToken,pParentClassId,cNumTypeArgs,pcNumTypeArgs,typeArgs) ) #define ICorProfilerInfo6_GetCodeInfo2(This,functionID,cCodeInfos,pcCodeInfos,codeInfos) \ ( (This)->lpVtbl -> GetCodeInfo2(This,functionID,cCodeInfos,pcCodeInfos,codeInfos) ) #define ICorProfilerInfo6_GetClassFromTokenAndTypeArgs(This,moduleID,typeDef,cTypeArgs,typeArgs,pClassID) \ ( (This)->lpVtbl -> GetClassFromTokenAndTypeArgs(This,moduleID,typeDef,cTypeArgs,typeArgs,pClassID) ) #define ICorProfilerInfo6_GetFunctionFromTokenAndTypeArgs(This,moduleID,funcDef,classId,cTypeArgs,typeArgs,pFunctionID) \ ( (This)->lpVtbl -> GetFunctionFromTokenAndTypeArgs(This,moduleID,funcDef,classId,cTypeArgs,typeArgs,pFunctionID) ) #define ICorProfilerInfo6_EnumModuleFrozenObjects(This,moduleID,ppEnum) \ ( (This)->lpVtbl -> EnumModuleFrozenObjects(This,moduleID,ppEnum) ) #define ICorProfilerInfo6_GetArrayObjectInfo(This,objectId,cDimensions,pDimensionSizes,pDimensionLowerBounds,ppData) \ ( (This)->lpVtbl -> GetArrayObjectInfo(This,objectId,cDimensions,pDimensionSizes,pDimensionLowerBounds,ppData) ) #define ICorProfilerInfo6_GetBoxClassLayout(This,classId,pBufferOffset) \ ( (This)->lpVtbl -> GetBoxClassLayout(This,classId,pBufferOffset) ) #define ICorProfilerInfo6_GetThreadAppDomain(This,threadId,pAppDomainId) \ ( (This)->lpVtbl -> GetThreadAppDomain(This,threadId,pAppDomainId) ) #define ICorProfilerInfo6_GetRVAStaticAddress(This,classId,fieldToken,ppAddress) \ ( (This)->lpVtbl -> GetRVAStaticAddress(This,classId,fieldToken,ppAddress) ) #define ICorProfilerInfo6_GetAppDomainStaticAddress(This,classId,fieldToken,appDomainId,ppAddress) \ ( (This)->lpVtbl -> GetAppDomainStaticAddress(This,classId,fieldToken,appDomainId,ppAddress) ) #define ICorProfilerInfo6_GetThreadStaticAddress(This,classId,fieldToken,threadId,ppAddress) \ ( (This)->lpVtbl -> GetThreadStaticAddress(This,classId,fieldToken,threadId,ppAddress) ) #define ICorProfilerInfo6_GetContextStaticAddress(This,classId,fieldToken,contextId,ppAddress) \ ( (This)->lpVtbl -> GetContextStaticAddress(This,classId,fieldToken,contextId,ppAddress) ) #define ICorProfilerInfo6_GetStaticFieldInfo(This,classId,fieldToken,pFieldInfo) \ ( (This)->lpVtbl -> GetStaticFieldInfo(This,classId,fieldToken,pFieldInfo) ) #define ICorProfilerInfo6_GetGenerationBounds(This,cObjectRanges,pcObjectRanges,ranges) \ ( (This)->lpVtbl -> GetGenerationBounds(This,cObjectRanges,pcObjectRanges,ranges) ) #define ICorProfilerInfo6_GetObjectGeneration(This,objectId,range) \ ( (This)->lpVtbl -> GetObjectGeneration(This,objectId,range) ) #define ICorProfilerInfo6_GetNotifiedExceptionClauseInfo(This,pinfo) \ ( (This)->lpVtbl -> GetNotifiedExceptionClauseInfo(This,pinfo) ) #define ICorProfilerInfo6_EnumJITedFunctions(This,ppEnum) \ ( (This)->lpVtbl -> EnumJITedFunctions(This,ppEnum) ) #define ICorProfilerInfo6_RequestProfilerDetach(This,dwExpectedCompletionMilliseconds) \ ( (This)->lpVtbl -> RequestProfilerDetach(This,dwExpectedCompletionMilliseconds) ) #define ICorProfilerInfo6_SetFunctionIDMapper2(This,pFunc,clientData) \ ( (This)->lpVtbl -> SetFunctionIDMapper2(This,pFunc,clientData) ) #define ICorProfilerInfo6_GetStringLayout2(This,pStringLengthOffset,pBufferOffset) \ ( (This)->lpVtbl -> GetStringLayout2(This,pStringLengthOffset,pBufferOffset) ) #define ICorProfilerInfo6_SetEnterLeaveFunctionHooks3(This,pFuncEnter3,pFuncLeave3,pFuncTailcall3) \ ( (This)->lpVtbl -> SetEnterLeaveFunctionHooks3(This,pFuncEnter3,pFuncLeave3,pFuncTailcall3) ) #define ICorProfilerInfo6_SetEnterLeaveFunctionHooks3WithInfo(This,pFuncEnter3WithInfo,pFuncLeave3WithInfo,pFuncTailcall3WithInfo) \ ( (This)->lpVtbl -> SetEnterLeaveFunctionHooks3WithInfo(This,pFuncEnter3WithInfo,pFuncLeave3WithInfo,pFuncTailcall3WithInfo) ) #define ICorProfilerInfo6_GetFunctionEnter3Info(This,functionId,eltInfo,pFrameInfo,pcbArgumentInfo,pArgumentInfo) \ ( (This)->lpVtbl -> GetFunctionEnter3Info(This,functionId,eltInfo,pFrameInfo,pcbArgumentInfo,pArgumentInfo) ) #define ICorProfilerInfo6_GetFunctionLeave3Info(This,functionId,eltInfo,pFrameInfo,pRetvalRange) \ ( (This)->lpVtbl -> GetFunctionLeave3Info(This,functionId,eltInfo,pFrameInfo,pRetvalRange) ) #define ICorProfilerInfo6_GetFunctionTailcall3Info(This,functionId,eltInfo,pFrameInfo) \ ( (This)->lpVtbl -> GetFunctionTailcall3Info(This,functionId,eltInfo,pFrameInfo) ) #define ICorProfilerInfo6_EnumModules(This,ppEnum) \ ( (This)->lpVtbl -> EnumModules(This,ppEnum) ) #define ICorProfilerInfo6_GetRuntimeInformation(This,pClrInstanceId,pRuntimeType,pMajorVersion,pMinorVersion,pBuildNumber,pQFEVersion,cchVersionString,pcchVersionString,szVersionString) \ ( (This)->lpVtbl -> GetRuntimeInformation(This,pClrInstanceId,pRuntimeType,pMajorVersion,pMinorVersion,pBuildNumber,pQFEVersion,cchVersionString,pcchVersionString,szVersionString) ) #define ICorProfilerInfo6_GetThreadStaticAddress2(This,classId,fieldToken,appDomainId,threadId,ppAddress) \ ( (This)->lpVtbl -> GetThreadStaticAddress2(This,classId,fieldToken,appDomainId,threadId,ppAddress) ) #define ICorProfilerInfo6_GetAppDomainsContainingModule(This,moduleId,cAppDomainIds,pcAppDomainIds,appDomainIds) \ ( (This)->lpVtbl -> GetAppDomainsContainingModule(This,moduleId,cAppDomainIds,pcAppDomainIds,appDomainIds) ) #define ICorProfilerInfo6_GetModuleInfo2(This,moduleId,ppBaseLoadAddress,cchName,pcchName,szName,pAssemblyId,pdwModuleFlags) \ ( (This)->lpVtbl -> GetModuleInfo2(This,moduleId,ppBaseLoadAddress,cchName,pcchName,szName,pAssemblyId,pdwModuleFlags) ) #define ICorProfilerInfo6_EnumThreads(This,ppEnum) \ ( (This)->lpVtbl -> EnumThreads(This,ppEnum) ) #define ICorProfilerInfo6_InitializeCurrentThread(This) \ ( (This)->lpVtbl -> InitializeCurrentThread(This) ) #define ICorProfilerInfo6_RequestReJIT(This,cFunctions,moduleIds,methodIds) \ ( (This)->lpVtbl -> RequestReJIT(This,cFunctions,moduleIds,methodIds) ) #define ICorProfilerInfo6_RequestRevert(This,cFunctions,moduleIds,methodIds,status) \ ( (This)->lpVtbl -> RequestRevert(This,cFunctions,moduleIds,methodIds,status) ) #define ICorProfilerInfo6_GetCodeInfo3(This,functionID,reJitId,cCodeInfos,pcCodeInfos,codeInfos) \ ( (This)->lpVtbl -> GetCodeInfo3(This,functionID,reJitId,cCodeInfos,pcCodeInfos,codeInfos) ) #define ICorProfilerInfo6_GetFunctionFromIP2(This,ip,pFunctionId,pReJitId) \ ( (This)->lpVtbl -> GetFunctionFromIP2(This,ip,pFunctionId,pReJitId) ) #define ICorProfilerInfo6_GetReJITIDs(This,functionId,cReJitIds,pcReJitIds,reJitIds) \ ( (This)->lpVtbl -> GetReJITIDs(This,functionId,cReJitIds,pcReJitIds,reJitIds) ) #define ICorProfilerInfo6_GetILToNativeMapping2(This,functionId,reJitId,cMap,pcMap,map) \ ( (This)->lpVtbl -> GetILToNativeMapping2(This,functionId,reJitId,cMap,pcMap,map) ) #define ICorProfilerInfo6_EnumJITedFunctions2(This,ppEnum) \ ( (This)->lpVtbl -> EnumJITedFunctions2(This,ppEnum) ) #define ICorProfilerInfo6_GetObjectSize2(This,objectId,pcSize) \ ( (This)->lpVtbl -> GetObjectSize2(This,objectId,pcSize) ) #define ICorProfilerInfo6_GetEventMask2(This,pdwEventsLow,pdwEventsHigh) \ ( (This)->lpVtbl -> GetEventMask2(This,pdwEventsLow,pdwEventsHigh) ) #define ICorProfilerInfo6_SetEventMask2(This,dwEventsLow,dwEventsHigh) \ ( (This)->lpVtbl -> SetEventMask2(This,dwEventsLow,dwEventsHigh) ) #define ICorProfilerInfo6_EnumNgenModuleMethodsInliningThisMethod(This,inlinersModuleId,inlineeModuleId,inlineeMethodId,incompleteData,ppEnum) \ ( (This)->lpVtbl -> EnumNgenModuleMethodsInliningThisMethod(This,inlinersModuleId,inlineeModuleId,inlineeMethodId,incompleteData,ppEnum) ) #endif /* COBJMACROS */ #endif /* C style interface */ #endif /* __ICorProfilerInfo6_INTERFACE_DEFINED__ */ #ifndef __ICorProfilerInfo7_INTERFACE_DEFINED__ #define __ICorProfilerInfo7_INTERFACE_DEFINED__ /* interface ICorProfilerInfo7 */ /* [local][unique][uuid][object] */ EXTERN_C const IID IID_ICorProfilerInfo7; #if defined(__cplusplus) && !defined(CINTERFACE) MIDL_INTERFACE("9AEECC0D-63E0-4187-8C00-E312F503F663") ICorProfilerInfo7 : public ICorProfilerInfo6 { public: virtual HRESULT STDMETHODCALLTYPE ApplyMetaData( /* [in] */ ModuleID moduleId) = 0; virtual HRESULT STDMETHODCALLTYPE GetInMemorySymbolsLength( /* [in] */ ModuleID moduleId, /* [out] */ DWORD *pCountSymbolBytes) = 0; virtual HRESULT STDMETHODCALLTYPE ReadInMemorySymbols( /* [in] */ ModuleID moduleId, /* [in] */ DWORD symbolsReadOffset, /* [out] */ BYTE *pSymbolBytes, /* [in] */ DWORD countSymbolBytes, /* [out] */ DWORD *pCountSymbolBytesRead) = 0; }; #else /* C style interface */ typedef struct ICorProfilerInfo7Vtbl { BEGIN_INTERFACE HRESULT ( STDMETHODCALLTYPE *QueryInterface )( ICorProfilerInfo7 * This, /* [in] */ REFIID riid, /* [annotation][iid_is][out] */ _COM_Outptr_ void **ppvObject); ULONG ( STDMETHODCALLTYPE *AddRef )( ICorProfilerInfo7 * This); ULONG ( STDMETHODCALLTYPE *Release )( ICorProfilerInfo7 * This); HRESULT ( STDMETHODCALLTYPE *GetClassFromObject )( ICorProfilerInfo7 * This, /* [in] */ ObjectID objectId, /* [out] */ ClassID *pClassId); HRESULT ( STDMETHODCALLTYPE *GetClassFromToken )( ICorProfilerInfo7 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdTypeDef typeDef, /* [out] */ ClassID *pClassId); HRESULT ( STDMETHODCALLTYPE *GetCodeInfo )( ICorProfilerInfo7 * This, /* [in] */ FunctionID functionId, /* [out] */ LPCBYTE *pStart, /* [out] */ ULONG *pcSize); HRESULT ( STDMETHODCALLTYPE *GetEventMask )( ICorProfilerInfo7 * This, /* [out] */ DWORD *pdwEvents); HRESULT ( STDMETHODCALLTYPE *GetFunctionFromIP )( ICorProfilerInfo7 * This, /* [in] */ LPCBYTE ip, /* [out] */ FunctionID *pFunctionId); HRESULT ( STDMETHODCALLTYPE *GetFunctionFromToken )( ICorProfilerInfo7 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdToken token, /* [out] */ FunctionID *pFunctionId); HRESULT ( STDMETHODCALLTYPE *GetHandleFromThread )( ICorProfilerInfo7 * This, /* [in] */ ThreadID threadId, /* [out] */ HANDLE *phThread); HRESULT ( STDMETHODCALLTYPE *GetObjectSize )( ICorProfilerInfo7 * This, /* [in] */ ObjectID objectId, /* [out] */ ULONG *pcSize); HRESULT ( STDMETHODCALLTYPE *IsArrayClass )( ICorProfilerInfo7 * This, /* [in] */ ClassID classId, /* [out] */ CorElementType *pBaseElemType, /* [out] */ ClassID *pBaseClassId, /* [out] */ ULONG *pcRank); HRESULT ( STDMETHODCALLTYPE *GetThreadInfo )( ICorProfilerInfo7 * This, /* [in] */ ThreadID threadId, /* [out] */ DWORD *pdwWin32ThreadId); HRESULT ( STDMETHODCALLTYPE *GetCurrentThreadID )( ICorProfilerInfo7 * This, /* [out] */ ThreadID *pThreadId); HRESULT ( STDMETHODCALLTYPE *GetClassIDInfo )( ICorProfilerInfo7 * This, /* [in] */ ClassID classId, /* [out] */ ModuleID *pModuleId, /* [out] */ mdTypeDef *pTypeDefToken); HRESULT ( STDMETHODCALLTYPE *GetFunctionInfo )( ICorProfilerInfo7 * This, /* [in] */ FunctionID functionId, /* [out] */ ClassID *pClassId, /* [out] */ ModuleID *pModuleId, /* [out] */ mdToken *pToken); HRESULT ( STDMETHODCALLTYPE *SetEventMask )( ICorProfilerInfo7 * This, /* [in] */ DWORD dwEvents); HRESULT ( STDMETHODCALLTYPE *SetEnterLeaveFunctionHooks )( ICorProfilerInfo7 * This, /* [in] */ FunctionEnter *pFuncEnter, /* [in] */ FunctionLeave *pFuncLeave, /* [in] */ FunctionTailcall *pFuncTailcall); HRESULT ( STDMETHODCALLTYPE *SetFunctionIDMapper )( ICorProfilerInfo7 * This, /* [in] */ FunctionIDMapper *pFunc); HRESULT ( STDMETHODCALLTYPE *GetTokenAndMetaDataFromFunction )( ICorProfilerInfo7 * This, /* [in] */ FunctionID functionId, /* [in] */ REFIID riid, /* [out] */ IUnknown **ppImport, /* [out] */ mdToken *pToken); HRESULT ( STDMETHODCALLTYPE *GetModuleInfo )( ICorProfilerInfo7 * This, /* [in] */ ModuleID moduleId, /* [out] */ LPCBYTE *ppBaseLoadAddress, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [annotation][out] */ _Out_writes_to_(cchName, *pcchName) WCHAR szName[ ], /* [out] */ AssemblyID *pAssemblyId); HRESULT ( STDMETHODCALLTYPE *GetModuleMetaData )( ICorProfilerInfo7 * This, /* [in] */ ModuleID moduleId, /* [in] */ DWORD dwOpenFlags, /* [in] */ REFIID riid, /* [out] */ IUnknown **ppOut); HRESULT ( STDMETHODCALLTYPE *GetILFunctionBody )( ICorProfilerInfo7 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdMethodDef methodId, /* [out] */ LPCBYTE *ppMethodHeader, /* [out] */ ULONG *pcbMethodSize); HRESULT ( STDMETHODCALLTYPE *GetILFunctionBodyAllocator )( ICorProfilerInfo7 * This, /* [in] */ ModuleID moduleId, /* [out] */ IMethodMalloc **ppMalloc); HRESULT ( STDMETHODCALLTYPE *SetILFunctionBody )( ICorProfilerInfo7 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdMethodDef methodid, /* [in] */ LPCBYTE pbNewILMethodHeader); HRESULT ( STDMETHODCALLTYPE *GetAppDomainInfo )( ICorProfilerInfo7 * This, /* [in] */ AppDomainID appDomainId, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [annotation][out] */ _Out_writes_to_(cchName, *pcchName) WCHAR szName[ ], /* [out] */ ProcessID *pProcessId); HRESULT ( STDMETHODCALLTYPE *GetAssemblyInfo )( ICorProfilerInfo7 * This, /* [in] */ AssemblyID assemblyId, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [annotation][out] */ _Out_writes_to_(cchName, *pcchName) WCHAR szName[ ], /* [out] */ AppDomainID *pAppDomainId, /* [out] */ ModuleID *pModuleId); HRESULT ( STDMETHODCALLTYPE *SetFunctionReJIT )( ICorProfilerInfo7 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ForceGC )( ICorProfilerInfo7 * This); HRESULT ( STDMETHODCALLTYPE *SetILInstrumentedCodeMap )( ICorProfilerInfo7 * This, /* [in] */ FunctionID functionId, /* [in] */ BOOL fStartJit, /* [in] */ ULONG cILMapEntries, /* [size_is][in] */ COR_IL_MAP rgILMapEntries[ ]); HRESULT ( STDMETHODCALLTYPE *GetInprocInspectionInterface )( ICorProfilerInfo7 * This, /* [out] */ IUnknown **ppicd); HRESULT ( STDMETHODCALLTYPE *GetInprocInspectionIThisThread )( ICorProfilerInfo7 * This, /* [out] */ IUnknown **ppicd); HRESULT ( STDMETHODCALLTYPE *GetThreadContext )( ICorProfilerInfo7 * This, /* [in] */ ThreadID threadId, /* [out] */ ContextID *pContextId); HRESULT ( STDMETHODCALLTYPE *BeginInprocDebugging )( ICorProfilerInfo7 * This, /* [in] */ BOOL fThisThreadOnly, /* [out] */ DWORD *pdwProfilerContext); HRESULT ( STDMETHODCALLTYPE *EndInprocDebugging )( ICorProfilerInfo7 * This, /* [in] */ DWORD dwProfilerContext); HRESULT ( STDMETHODCALLTYPE *GetILToNativeMapping )( ICorProfilerInfo7 * This, /* [in] */ FunctionID functionId, /* [in] */ ULONG32 cMap, /* [out] */ ULONG32 *pcMap, /* [length_is][size_is][out] */ COR_DEBUG_IL_TO_NATIVE_MAP map[ ]); HRESULT ( STDMETHODCALLTYPE *DoStackSnapshot )( ICorProfilerInfo7 * This, /* [in] */ ThreadID thread, /* [in] */ StackSnapshotCallback *callback, /* [in] */ ULONG32 infoFlags, /* [in] */ void *clientData, /* [size_is][in] */ BYTE context[ ], /* [in] */ ULONG32 contextSize); HRESULT ( STDMETHODCALLTYPE *SetEnterLeaveFunctionHooks2 )( ICorProfilerInfo7 * This, /* [in] */ FunctionEnter2 *pFuncEnter, /* [in] */ FunctionLeave2 *pFuncLeave, /* [in] */ FunctionTailcall2 *pFuncTailcall); HRESULT ( STDMETHODCALLTYPE *GetFunctionInfo2 )( ICorProfilerInfo7 * This, /* [in] */ FunctionID funcId, /* [in] */ COR_PRF_FRAME_INFO frameInfo, /* [out] */ ClassID *pClassId, /* [out] */ ModuleID *pModuleId, /* [out] */ mdToken *pToken, /* [in] */ ULONG32 cTypeArgs, /* [out] */ ULONG32 *pcTypeArgs, /* [out] */ ClassID typeArgs[ ]); HRESULT ( STDMETHODCALLTYPE *GetStringLayout )( ICorProfilerInfo7 * This, /* [out] */ ULONG *pBufferLengthOffset, /* [out] */ ULONG *pStringLengthOffset, /* [out] */ ULONG *pBufferOffset); HRESULT ( STDMETHODCALLTYPE *GetClassLayout )( ICorProfilerInfo7 * This, /* [in] */ ClassID classID, /* [out][in] */ COR_FIELD_OFFSET rFieldOffset[ ], /* [in] */ ULONG cFieldOffset, /* [out] */ ULONG *pcFieldOffset, /* [out] */ ULONG *pulClassSize); HRESULT ( STDMETHODCALLTYPE *GetClassIDInfo2 )( ICorProfilerInfo7 * This, /* [in] */ ClassID classId, /* [out] */ ModuleID *pModuleId, /* [out] */ mdTypeDef *pTypeDefToken, /* [out] */ ClassID *pParentClassId, /* [in] */ ULONG32 cNumTypeArgs, /* [out] */ ULONG32 *pcNumTypeArgs, /* [out] */ ClassID typeArgs[ ]); HRESULT ( STDMETHODCALLTYPE *GetCodeInfo2 )( ICorProfilerInfo7 * This, /* [in] */ FunctionID functionID, /* [in] */ ULONG32 cCodeInfos, /* [out] */ ULONG32 *pcCodeInfos, /* [length_is][size_is][out] */ COR_PRF_CODE_INFO codeInfos[ ]); HRESULT ( STDMETHODCALLTYPE *GetClassFromTokenAndTypeArgs )( ICorProfilerInfo7 * This, /* [in] */ ModuleID moduleID, /* [in] */ mdTypeDef typeDef, /* [in] */ ULONG32 cTypeArgs, /* [size_is][in] */ ClassID typeArgs[ ], /* [out] */ ClassID *pClassID); HRESULT ( STDMETHODCALLTYPE *GetFunctionFromTokenAndTypeArgs )( ICorProfilerInfo7 * This, /* [in] */ ModuleID moduleID, /* [in] */ mdMethodDef funcDef, /* [in] */ ClassID classId, /* [in] */ ULONG32 cTypeArgs, /* [size_is][in] */ ClassID typeArgs[ ], /* [out] */ FunctionID *pFunctionID); HRESULT ( STDMETHODCALLTYPE *EnumModuleFrozenObjects )( ICorProfilerInfo7 * This, /* [in] */ ModuleID moduleID, /* [out] */ ICorProfilerObjectEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *GetArrayObjectInfo )( ICorProfilerInfo7 * This, /* [in] */ ObjectID objectId, /* [in] */ ULONG32 cDimensions, /* [size_is][out] */ ULONG32 pDimensionSizes[ ], /* [size_is][out] */ int pDimensionLowerBounds[ ], /* [out] */ BYTE **ppData); HRESULT ( STDMETHODCALLTYPE *GetBoxClassLayout )( ICorProfilerInfo7 * This, /* [in] */ ClassID classId, /* [out] */ ULONG32 *pBufferOffset); HRESULT ( STDMETHODCALLTYPE *GetThreadAppDomain )( ICorProfilerInfo7 * This, /* [in] */ ThreadID threadId, /* [out] */ AppDomainID *pAppDomainId); HRESULT ( STDMETHODCALLTYPE *GetRVAStaticAddress )( ICorProfilerInfo7 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [out] */ void **ppAddress); HRESULT ( STDMETHODCALLTYPE *GetAppDomainStaticAddress )( ICorProfilerInfo7 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [in] */ AppDomainID appDomainId, /* [out] */ void **ppAddress); HRESULT ( STDMETHODCALLTYPE *GetThreadStaticAddress )( ICorProfilerInfo7 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [in] */ ThreadID threadId, /* [out] */ void **ppAddress); HRESULT ( STDMETHODCALLTYPE *GetContextStaticAddress )( ICorProfilerInfo7 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [in] */ ContextID contextId, /* [out] */ void **ppAddress); HRESULT ( STDMETHODCALLTYPE *GetStaticFieldInfo )( ICorProfilerInfo7 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [out] */ COR_PRF_STATIC_TYPE *pFieldInfo); HRESULT ( STDMETHODCALLTYPE *GetGenerationBounds )( ICorProfilerInfo7 * This, /* [in] */ ULONG cObjectRanges, /* [out] */ ULONG *pcObjectRanges, /* [length_is][size_is][out] */ COR_PRF_GC_GENERATION_RANGE ranges[ ]); HRESULT ( STDMETHODCALLTYPE *GetObjectGeneration )( ICorProfilerInfo7 * This, /* [in] */ ObjectID objectId, /* [out] */ COR_PRF_GC_GENERATION_RANGE *range); HRESULT ( STDMETHODCALLTYPE *GetNotifiedExceptionClauseInfo )( ICorProfilerInfo7 * This, /* [out] */ COR_PRF_EX_CLAUSE_INFO *pinfo); HRESULT ( STDMETHODCALLTYPE *EnumJITedFunctions )( ICorProfilerInfo7 * This, /* [out] */ ICorProfilerFunctionEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *RequestProfilerDetach )( ICorProfilerInfo7 * This, /* [in] */ DWORD dwExpectedCompletionMilliseconds); HRESULT ( STDMETHODCALLTYPE *SetFunctionIDMapper2 )( ICorProfilerInfo7 * This, /* [in] */ FunctionIDMapper2 *pFunc, /* [in] */ void *clientData); HRESULT ( STDMETHODCALLTYPE *GetStringLayout2 )( ICorProfilerInfo7 * This, /* [out] */ ULONG *pStringLengthOffset, /* [out] */ ULONG *pBufferOffset); HRESULT ( STDMETHODCALLTYPE *SetEnterLeaveFunctionHooks3 )( ICorProfilerInfo7 * This, /* [in] */ FunctionEnter3 *pFuncEnter3, /* [in] */ FunctionLeave3 *pFuncLeave3, /* [in] */ FunctionTailcall3 *pFuncTailcall3); HRESULT ( STDMETHODCALLTYPE *SetEnterLeaveFunctionHooks3WithInfo )( ICorProfilerInfo7 * This, /* [in] */ FunctionEnter3WithInfo *pFuncEnter3WithInfo, /* [in] */ FunctionLeave3WithInfo *pFuncLeave3WithInfo, /* [in] */ FunctionTailcall3WithInfo *pFuncTailcall3WithInfo); HRESULT ( STDMETHODCALLTYPE *GetFunctionEnter3Info )( ICorProfilerInfo7 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_ELT_INFO eltInfo, /* [out] */ COR_PRF_FRAME_INFO *pFrameInfo, /* [out][in] */ ULONG *pcbArgumentInfo, /* [size_is][out] */ COR_PRF_FUNCTION_ARGUMENT_INFO *pArgumentInfo); HRESULT ( STDMETHODCALLTYPE *GetFunctionLeave3Info )( ICorProfilerInfo7 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_ELT_INFO eltInfo, /* [out] */ COR_PRF_FRAME_INFO *pFrameInfo, /* [out] */ COR_PRF_FUNCTION_ARGUMENT_RANGE *pRetvalRange); HRESULT ( STDMETHODCALLTYPE *GetFunctionTailcall3Info )( ICorProfilerInfo7 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_ELT_INFO eltInfo, /* [out] */ COR_PRF_FRAME_INFO *pFrameInfo); HRESULT ( STDMETHODCALLTYPE *EnumModules )( ICorProfilerInfo7 * This, /* [out] */ ICorProfilerModuleEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *GetRuntimeInformation )( ICorProfilerInfo7 * This, /* [out] */ USHORT *pClrInstanceId, /* [out] */ COR_PRF_RUNTIME_TYPE *pRuntimeType, /* [out] */ USHORT *pMajorVersion, /* [out] */ USHORT *pMinorVersion, /* [out] */ USHORT *pBuildNumber, /* [out] */ USHORT *pQFEVersion, /* [in] */ ULONG cchVersionString, /* [out] */ ULONG *pcchVersionString, /* [annotation][out] */ _Out_writes_to_(cchVersionString, *pcchVersionString) WCHAR szVersionString[ ]); HRESULT ( STDMETHODCALLTYPE *GetThreadStaticAddress2 )( ICorProfilerInfo7 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [in] */ AppDomainID appDomainId, /* [in] */ ThreadID threadId, /* [out] */ void **ppAddress); HRESULT ( STDMETHODCALLTYPE *GetAppDomainsContainingModule )( ICorProfilerInfo7 * This, /* [in] */ ModuleID moduleId, /* [in] */ ULONG32 cAppDomainIds, /* [out] */ ULONG32 *pcAppDomainIds, /* [length_is][size_is][out] */ AppDomainID appDomainIds[ ]); HRESULT ( STDMETHODCALLTYPE *GetModuleInfo2 )( ICorProfilerInfo7 * This, /* [in] */ ModuleID moduleId, /* [out] */ LPCBYTE *ppBaseLoadAddress, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [annotation][out] */ _Out_writes_to_(cchName, *pcchName) WCHAR szName[ ], /* [out] */ AssemblyID *pAssemblyId, /* [out] */ DWORD *pdwModuleFlags); HRESULT ( STDMETHODCALLTYPE *EnumThreads )( ICorProfilerInfo7 * This, /* [out] */ ICorProfilerThreadEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *InitializeCurrentThread )( ICorProfilerInfo7 * This); HRESULT ( STDMETHODCALLTYPE *RequestReJIT )( ICorProfilerInfo7 * This, /* [in] */ ULONG cFunctions, /* [size_is][in] */ ModuleID moduleIds[ ], /* [size_is][in] */ mdMethodDef methodIds[ ]); HRESULT ( STDMETHODCALLTYPE *RequestRevert )( ICorProfilerInfo7 * This, /* [in] */ ULONG cFunctions, /* [size_is][in] */ ModuleID moduleIds[ ], /* [size_is][in] */ mdMethodDef methodIds[ ], /* [size_is][out] */ HRESULT status[ ]); HRESULT ( STDMETHODCALLTYPE *GetCodeInfo3 )( ICorProfilerInfo7 * This, /* [in] */ FunctionID functionID, /* [in] */ ReJITID reJitId, /* [in] */ ULONG32 cCodeInfos, /* [out] */ ULONG32 *pcCodeInfos, /* [length_is][size_is][out] */ COR_PRF_CODE_INFO codeInfos[ ]); HRESULT ( STDMETHODCALLTYPE *GetFunctionFromIP2 )( ICorProfilerInfo7 * This, /* [in] */ LPCBYTE ip, /* [out] */ FunctionID *pFunctionId, /* [out] */ ReJITID *pReJitId); HRESULT ( STDMETHODCALLTYPE *GetReJITIDs )( ICorProfilerInfo7 * This, /* [in] */ FunctionID functionId, /* [in] */ ULONG cReJitIds, /* [out] */ ULONG *pcReJitIds, /* [length_is][size_is][out] */ ReJITID reJitIds[ ]); HRESULT ( STDMETHODCALLTYPE *GetILToNativeMapping2 )( ICorProfilerInfo7 * This, /* [in] */ FunctionID functionId, /* [in] */ ReJITID reJitId, /* [in] */ ULONG32 cMap, /* [out] */ ULONG32 *pcMap, /* [length_is][size_is][out] */ COR_DEBUG_IL_TO_NATIVE_MAP map[ ]); HRESULT ( STDMETHODCALLTYPE *EnumJITedFunctions2 )( ICorProfilerInfo7 * This, /* [out] */ ICorProfilerFunctionEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *GetObjectSize2 )( ICorProfilerInfo7 * This, /* [in] */ ObjectID objectId, /* [out] */ SIZE_T *pcSize); HRESULT ( STDMETHODCALLTYPE *GetEventMask2 )( ICorProfilerInfo7 * This, /* [out] */ DWORD *pdwEventsLow, /* [out] */ DWORD *pdwEventsHigh); HRESULT ( STDMETHODCALLTYPE *SetEventMask2 )( ICorProfilerInfo7 * This, /* [in] */ DWORD dwEventsLow, /* [in] */ DWORD dwEventsHigh); HRESULT ( STDMETHODCALLTYPE *EnumNgenModuleMethodsInliningThisMethod )( ICorProfilerInfo7 * This, /* [in] */ ModuleID inlinersModuleId, /* [in] */ ModuleID inlineeModuleId, /* [in] */ mdMethodDef inlineeMethodId, /* [out] */ BOOL *incompleteData, /* [out] */ ICorProfilerMethodEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *ApplyMetaData )( ICorProfilerInfo7 * This, /* [in] */ ModuleID moduleId); HRESULT ( STDMETHODCALLTYPE *GetInMemorySymbolsLength )( ICorProfilerInfo7 * This, /* [in] */ ModuleID moduleId, /* [out] */ DWORD *pCountSymbolBytes); HRESULT ( STDMETHODCALLTYPE *ReadInMemorySymbols )( ICorProfilerInfo7 * This, /* [in] */ ModuleID moduleId, /* [in] */ DWORD symbolsReadOffset, /* [out] */ BYTE *pSymbolBytes, /* [in] */ DWORD countSymbolBytes, /* [out] */ DWORD *pCountSymbolBytesRead); END_INTERFACE } ICorProfilerInfo7Vtbl; interface ICorProfilerInfo7 { CONST_VTBL struct ICorProfilerInfo7Vtbl *lpVtbl; }; #ifdef COBJMACROS #define ICorProfilerInfo7_QueryInterface(This,riid,ppvObject) \ ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) ) #define ICorProfilerInfo7_AddRef(This) \ ( (This)->lpVtbl -> AddRef(This) ) #define ICorProfilerInfo7_Release(This) \ ( (This)->lpVtbl -> Release(This) ) #define ICorProfilerInfo7_GetClassFromObject(This,objectId,pClassId) \ ( (This)->lpVtbl -> GetClassFromObject(This,objectId,pClassId) ) #define ICorProfilerInfo7_GetClassFromToken(This,moduleId,typeDef,pClassId) \ ( (This)->lpVtbl -> GetClassFromToken(This,moduleId,typeDef,pClassId) ) #define ICorProfilerInfo7_GetCodeInfo(This,functionId,pStart,pcSize) \ ( (This)->lpVtbl -> GetCodeInfo(This,functionId,pStart,pcSize) ) #define ICorProfilerInfo7_GetEventMask(This,pdwEvents) \ ( (This)->lpVtbl -> GetEventMask(This,pdwEvents) ) #define ICorProfilerInfo7_GetFunctionFromIP(This,ip,pFunctionId) \ ( (This)->lpVtbl -> GetFunctionFromIP(This,ip,pFunctionId) ) #define ICorProfilerInfo7_GetFunctionFromToken(This,moduleId,token,pFunctionId) \ ( (This)->lpVtbl -> GetFunctionFromToken(This,moduleId,token,pFunctionId) ) #define ICorProfilerInfo7_GetHandleFromThread(This,threadId,phThread) \ ( (This)->lpVtbl -> GetHandleFromThread(This,threadId,phThread) ) #define ICorProfilerInfo7_GetObjectSize(This,objectId,pcSize) \ ( (This)->lpVtbl -> GetObjectSize(This,objectId,pcSize) ) #define ICorProfilerInfo7_IsArrayClass(This,classId,pBaseElemType,pBaseClassId,pcRank) \ ( (This)->lpVtbl -> IsArrayClass(This,classId,pBaseElemType,pBaseClassId,pcRank) ) #define ICorProfilerInfo7_GetThreadInfo(This,threadId,pdwWin32ThreadId) \ ( (This)->lpVtbl -> GetThreadInfo(This,threadId,pdwWin32ThreadId) ) #define ICorProfilerInfo7_GetCurrentThreadID(This,pThreadId) \ ( (This)->lpVtbl -> GetCurrentThreadID(This,pThreadId) ) #define ICorProfilerInfo7_GetClassIDInfo(This,classId,pModuleId,pTypeDefToken) \ ( (This)->lpVtbl -> GetClassIDInfo(This,classId,pModuleId,pTypeDefToken) ) #define ICorProfilerInfo7_GetFunctionInfo(This,functionId,pClassId,pModuleId,pToken) \ ( (This)->lpVtbl -> GetFunctionInfo(This,functionId,pClassId,pModuleId,pToken) ) #define ICorProfilerInfo7_SetEventMask(This,dwEvents) \ ( (This)->lpVtbl -> SetEventMask(This,dwEvents) ) #define ICorProfilerInfo7_SetEnterLeaveFunctionHooks(This,pFuncEnter,pFuncLeave,pFuncTailcall) \ ( (This)->lpVtbl -> SetEnterLeaveFunctionHooks(This,pFuncEnter,pFuncLeave,pFuncTailcall) ) #define ICorProfilerInfo7_SetFunctionIDMapper(This,pFunc) \ ( (This)->lpVtbl -> SetFunctionIDMapper(This,pFunc) ) #define ICorProfilerInfo7_GetTokenAndMetaDataFromFunction(This,functionId,riid,ppImport,pToken) \ ( (This)->lpVtbl -> GetTokenAndMetaDataFromFunction(This,functionId,riid,ppImport,pToken) ) #define ICorProfilerInfo7_GetModuleInfo(This,moduleId,ppBaseLoadAddress,cchName,pcchName,szName,pAssemblyId) \ ( (This)->lpVtbl -> GetModuleInfo(This,moduleId,ppBaseLoadAddress,cchName,pcchName,szName,pAssemblyId) ) #define ICorProfilerInfo7_GetModuleMetaData(This,moduleId,dwOpenFlags,riid,ppOut) \ ( (This)->lpVtbl -> GetModuleMetaData(This,moduleId,dwOpenFlags,riid,ppOut) ) #define ICorProfilerInfo7_GetILFunctionBody(This,moduleId,methodId,ppMethodHeader,pcbMethodSize) \ ( (This)->lpVtbl -> GetILFunctionBody(This,moduleId,methodId,ppMethodHeader,pcbMethodSize) ) #define ICorProfilerInfo7_GetILFunctionBodyAllocator(This,moduleId,ppMalloc) \ ( (This)->lpVtbl -> GetILFunctionBodyAllocator(This,moduleId,ppMalloc) ) #define ICorProfilerInfo7_SetILFunctionBody(This,moduleId,methodid,pbNewILMethodHeader) \ ( (This)->lpVtbl -> SetILFunctionBody(This,moduleId,methodid,pbNewILMethodHeader) ) #define ICorProfilerInfo7_GetAppDomainInfo(This,appDomainId,cchName,pcchName,szName,pProcessId) \ ( (This)->lpVtbl -> GetAppDomainInfo(This,appDomainId,cchName,pcchName,szName,pProcessId) ) #define ICorProfilerInfo7_GetAssemblyInfo(This,assemblyId,cchName,pcchName,szName,pAppDomainId,pModuleId) \ ( (This)->lpVtbl -> GetAssemblyInfo(This,assemblyId,cchName,pcchName,szName,pAppDomainId,pModuleId) ) #define ICorProfilerInfo7_SetFunctionReJIT(This,functionId) \ ( (This)->lpVtbl -> SetFunctionReJIT(This,functionId) ) #define ICorProfilerInfo7_ForceGC(This) \ ( (This)->lpVtbl -> ForceGC(This) ) #define ICorProfilerInfo7_SetILInstrumentedCodeMap(This,functionId,fStartJit,cILMapEntries,rgILMapEntries) \ ( (This)->lpVtbl -> SetILInstrumentedCodeMap(This,functionId,fStartJit,cILMapEntries,rgILMapEntries) ) #define ICorProfilerInfo7_GetInprocInspectionInterface(This,ppicd) \ ( (This)->lpVtbl -> GetInprocInspectionInterface(This,ppicd) ) #define ICorProfilerInfo7_GetInprocInspectionIThisThread(This,ppicd) \ ( (This)->lpVtbl -> GetInprocInspectionIThisThread(This,ppicd) ) #define ICorProfilerInfo7_GetThreadContext(This,threadId,pContextId) \ ( (This)->lpVtbl -> GetThreadContext(This,threadId,pContextId) ) #define ICorProfilerInfo7_BeginInprocDebugging(This,fThisThreadOnly,pdwProfilerContext) \ ( (This)->lpVtbl -> BeginInprocDebugging(This,fThisThreadOnly,pdwProfilerContext) ) #define ICorProfilerInfo7_EndInprocDebugging(This,dwProfilerContext) \ ( (This)->lpVtbl -> EndInprocDebugging(This,dwProfilerContext) ) #define ICorProfilerInfo7_GetILToNativeMapping(This,functionId,cMap,pcMap,map) \ ( (This)->lpVtbl -> GetILToNativeMapping(This,functionId,cMap,pcMap,map) ) #define ICorProfilerInfo7_DoStackSnapshot(This,thread,callback,infoFlags,clientData,context,contextSize) \ ( (This)->lpVtbl -> DoStackSnapshot(This,thread,callback,infoFlags,clientData,context,contextSize) ) #define ICorProfilerInfo7_SetEnterLeaveFunctionHooks2(This,pFuncEnter,pFuncLeave,pFuncTailcall) \ ( (This)->lpVtbl -> SetEnterLeaveFunctionHooks2(This,pFuncEnter,pFuncLeave,pFuncTailcall) ) #define ICorProfilerInfo7_GetFunctionInfo2(This,funcId,frameInfo,pClassId,pModuleId,pToken,cTypeArgs,pcTypeArgs,typeArgs) \ ( (This)->lpVtbl -> GetFunctionInfo2(This,funcId,frameInfo,pClassId,pModuleId,pToken,cTypeArgs,pcTypeArgs,typeArgs) ) #define ICorProfilerInfo7_GetStringLayout(This,pBufferLengthOffset,pStringLengthOffset,pBufferOffset) \ ( (This)->lpVtbl -> GetStringLayout(This,pBufferLengthOffset,pStringLengthOffset,pBufferOffset) ) #define ICorProfilerInfo7_GetClassLayout(This,classID,rFieldOffset,cFieldOffset,pcFieldOffset,pulClassSize) \ ( (This)->lpVtbl -> GetClassLayout(This,classID,rFieldOffset,cFieldOffset,pcFieldOffset,pulClassSize) ) #define ICorProfilerInfo7_GetClassIDInfo2(This,classId,pModuleId,pTypeDefToken,pParentClassId,cNumTypeArgs,pcNumTypeArgs,typeArgs) \ ( (This)->lpVtbl -> GetClassIDInfo2(This,classId,pModuleId,pTypeDefToken,pParentClassId,cNumTypeArgs,pcNumTypeArgs,typeArgs) ) #define ICorProfilerInfo7_GetCodeInfo2(This,functionID,cCodeInfos,pcCodeInfos,codeInfos) \ ( (This)->lpVtbl -> GetCodeInfo2(This,functionID,cCodeInfos,pcCodeInfos,codeInfos) ) #define ICorProfilerInfo7_GetClassFromTokenAndTypeArgs(This,moduleID,typeDef,cTypeArgs,typeArgs,pClassID) \ ( (This)->lpVtbl -> GetClassFromTokenAndTypeArgs(This,moduleID,typeDef,cTypeArgs,typeArgs,pClassID) ) #define ICorProfilerInfo7_GetFunctionFromTokenAndTypeArgs(This,moduleID,funcDef,classId,cTypeArgs,typeArgs,pFunctionID) \ ( (This)->lpVtbl -> GetFunctionFromTokenAndTypeArgs(This,moduleID,funcDef,classId,cTypeArgs,typeArgs,pFunctionID) ) #define ICorProfilerInfo7_EnumModuleFrozenObjects(This,moduleID,ppEnum) \ ( (This)->lpVtbl -> EnumModuleFrozenObjects(This,moduleID,ppEnum) ) #define ICorProfilerInfo7_GetArrayObjectInfo(This,objectId,cDimensions,pDimensionSizes,pDimensionLowerBounds,ppData) \ ( (This)->lpVtbl -> GetArrayObjectInfo(This,objectId,cDimensions,pDimensionSizes,pDimensionLowerBounds,ppData) ) #define ICorProfilerInfo7_GetBoxClassLayout(This,classId,pBufferOffset) \ ( (This)->lpVtbl -> GetBoxClassLayout(This,classId,pBufferOffset) ) #define ICorProfilerInfo7_GetThreadAppDomain(This,threadId,pAppDomainId) \ ( (This)->lpVtbl -> GetThreadAppDomain(This,threadId,pAppDomainId) ) #define ICorProfilerInfo7_GetRVAStaticAddress(This,classId,fieldToken,ppAddress) \ ( (This)->lpVtbl -> GetRVAStaticAddress(This,classId,fieldToken,ppAddress) ) #define ICorProfilerInfo7_GetAppDomainStaticAddress(This,classId,fieldToken,appDomainId,ppAddress) \ ( (This)->lpVtbl -> GetAppDomainStaticAddress(This,classId,fieldToken,appDomainId,ppAddress) ) #define ICorProfilerInfo7_GetThreadStaticAddress(This,classId,fieldToken,threadId,ppAddress) \ ( (This)->lpVtbl -> GetThreadStaticAddress(This,classId,fieldToken,threadId,ppAddress) ) #define ICorProfilerInfo7_GetContextStaticAddress(This,classId,fieldToken,contextId,ppAddress) \ ( (This)->lpVtbl -> GetContextStaticAddress(This,classId,fieldToken,contextId,ppAddress) ) #define ICorProfilerInfo7_GetStaticFieldInfo(This,classId,fieldToken,pFieldInfo) \ ( (This)->lpVtbl -> GetStaticFieldInfo(This,classId,fieldToken,pFieldInfo) ) #define ICorProfilerInfo7_GetGenerationBounds(This,cObjectRanges,pcObjectRanges,ranges) \ ( (This)->lpVtbl -> GetGenerationBounds(This,cObjectRanges,pcObjectRanges,ranges) ) #define ICorProfilerInfo7_GetObjectGeneration(This,objectId,range) \ ( (This)->lpVtbl -> GetObjectGeneration(This,objectId,range) ) #define ICorProfilerInfo7_GetNotifiedExceptionClauseInfo(This,pinfo) \ ( (This)->lpVtbl -> GetNotifiedExceptionClauseInfo(This,pinfo) ) #define ICorProfilerInfo7_EnumJITedFunctions(This,ppEnum) \ ( (This)->lpVtbl -> EnumJITedFunctions(This,ppEnum) ) #define ICorProfilerInfo7_RequestProfilerDetach(This,dwExpectedCompletionMilliseconds) \ ( (This)->lpVtbl -> RequestProfilerDetach(This,dwExpectedCompletionMilliseconds) ) #define ICorProfilerInfo7_SetFunctionIDMapper2(This,pFunc,clientData) \ ( (This)->lpVtbl -> SetFunctionIDMapper2(This,pFunc,clientData) ) #define ICorProfilerInfo7_GetStringLayout2(This,pStringLengthOffset,pBufferOffset) \ ( (This)->lpVtbl -> GetStringLayout2(This,pStringLengthOffset,pBufferOffset) ) #define ICorProfilerInfo7_SetEnterLeaveFunctionHooks3(This,pFuncEnter3,pFuncLeave3,pFuncTailcall3) \ ( (This)->lpVtbl -> SetEnterLeaveFunctionHooks3(This,pFuncEnter3,pFuncLeave3,pFuncTailcall3) ) #define ICorProfilerInfo7_SetEnterLeaveFunctionHooks3WithInfo(This,pFuncEnter3WithInfo,pFuncLeave3WithInfo,pFuncTailcall3WithInfo) \ ( (This)->lpVtbl -> SetEnterLeaveFunctionHooks3WithInfo(This,pFuncEnter3WithInfo,pFuncLeave3WithInfo,pFuncTailcall3WithInfo) ) #define ICorProfilerInfo7_GetFunctionEnter3Info(This,functionId,eltInfo,pFrameInfo,pcbArgumentInfo,pArgumentInfo) \ ( (This)->lpVtbl -> GetFunctionEnter3Info(This,functionId,eltInfo,pFrameInfo,pcbArgumentInfo,pArgumentInfo) ) #define ICorProfilerInfo7_GetFunctionLeave3Info(This,functionId,eltInfo,pFrameInfo,pRetvalRange) \ ( (This)->lpVtbl -> GetFunctionLeave3Info(This,functionId,eltInfo,pFrameInfo,pRetvalRange) ) #define ICorProfilerInfo7_GetFunctionTailcall3Info(This,functionId,eltInfo,pFrameInfo) \ ( (This)->lpVtbl -> GetFunctionTailcall3Info(This,functionId,eltInfo,pFrameInfo) ) #define ICorProfilerInfo7_EnumModules(This,ppEnum) \ ( (This)->lpVtbl -> EnumModules(This,ppEnum) ) #define ICorProfilerInfo7_GetRuntimeInformation(This,pClrInstanceId,pRuntimeType,pMajorVersion,pMinorVersion,pBuildNumber,pQFEVersion,cchVersionString,pcchVersionString,szVersionString) \ ( (This)->lpVtbl -> GetRuntimeInformation(This,pClrInstanceId,pRuntimeType,pMajorVersion,pMinorVersion,pBuildNumber,pQFEVersion,cchVersionString,pcchVersionString,szVersionString) ) #define ICorProfilerInfo7_GetThreadStaticAddress2(This,classId,fieldToken,appDomainId,threadId,ppAddress) \ ( (This)->lpVtbl -> GetThreadStaticAddress2(This,classId,fieldToken,appDomainId,threadId,ppAddress) ) #define ICorProfilerInfo7_GetAppDomainsContainingModule(This,moduleId,cAppDomainIds,pcAppDomainIds,appDomainIds) \ ( (This)->lpVtbl -> GetAppDomainsContainingModule(This,moduleId,cAppDomainIds,pcAppDomainIds,appDomainIds) ) #define ICorProfilerInfo7_GetModuleInfo2(This,moduleId,ppBaseLoadAddress,cchName,pcchName,szName,pAssemblyId,pdwModuleFlags) \ ( (This)->lpVtbl -> GetModuleInfo2(This,moduleId,ppBaseLoadAddress,cchName,pcchName,szName,pAssemblyId,pdwModuleFlags) ) #define ICorProfilerInfo7_EnumThreads(This,ppEnum) \ ( (This)->lpVtbl -> EnumThreads(This,ppEnum) ) #define ICorProfilerInfo7_InitializeCurrentThread(This) \ ( (This)->lpVtbl -> InitializeCurrentThread(This) ) #define ICorProfilerInfo7_RequestReJIT(This,cFunctions,moduleIds,methodIds) \ ( (This)->lpVtbl -> RequestReJIT(This,cFunctions,moduleIds,methodIds) ) #define ICorProfilerInfo7_RequestRevert(This,cFunctions,moduleIds,methodIds,status) \ ( (This)->lpVtbl -> RequestRevert(This,cFunctions,moduleIds,methodIds,status) ) #define ICorProfilerInfo7_GetCodeInfo3(This,functionID,reJitId,cCodeInfos,pcCodeInfos,codeInfos) \ ( (This)->lpVtbl -> GetCodeInfo3(This,functionID,reJitId,cCodeInfos,pcCodeInfos,codeInfos) ) #define ICorProfilerInfo7_GetFunctionFromIP2(This,ip,pFunctionId,pReJitId) \ ( (This)->lpVtbl -> GetFunctionFromIP2(This,ip,pFunctionId,pReJitId) ) #define ICorProfilerInfo7_GetReJITIDs(This,functionId,cReJitIds,pcReJitIds,reJitIds) \ ( (This)->lpVtbl -> GetReJITIDs(This,functionId,cReJitIds,pcReJitIds,reJitIds) ) #define ICorProfilerInfo7_GetILToNativeMapping2(This,functionId,reJitId,cMap,pcMap,map) \ ( (This)->lpVtbl -> GetILToNativeMapping2(This,functionId,reJitId,cMap,pcMap,map) ) #define ICorProfilerInfo7_EnumJITedFunctions2(This,ppEnum) \ ( (This)->lpVtbl -> EnumJITedFunctions2(This,ppEnum) ) #define ICorProfilerInfo7_GetObjectSize2(This,objectId,pcSize) \ ( (This)->lpVtbl -> GetObjectSize2(This,objectId,pcSize) ) #define ICorProfilerInfo7_GetEventMask2(This,pdwEventsLow,pdwEventsHigh) \ ( (This)->lpVtbl -> GetEventMask2(This,pdwEventsLow,pdwEventsHigh) ) #define ICorProfilerInfo7_SetEventMask2(This,dwEventsLow,dwEventsHigh) \ ( (This)->lpVtbl -> SetEventMask2(This,dwEventsLow,dwEventsHigh) ) #define ICorProfilerInfo7_EnumNgenModuleMethodsInliningThisMethod(This,inlinersModuleId,inlineeModuleId,inlineeMethodId,incompleteData,ppEnum) \ ( (This)->lpVtbl -> EnumNgenModuleMethodsInliningThisMethod(This,inlinersModuleId,inlineeModuleId,inlineeMethodId,incompleteData,ppEnum) ) #define ICorProfilerInfo7_ApplyMetaData(This,moduleId) \ ( (This)->lpVtbl -> ApplyMetaData(This,moduleId) ) #define ICorProfilerInfo7_GetInMemorySymbolsLength(This,moduleId,pCountSymbolBytes) \ ( (This)->lpVtbl -> GetInMemorySymbolsLength(This,moduleId,pCountSymbolBytes) ) #define ICorProfilerInfo7_ReadInMemorySymbols(This,moduleId,symbolsReadOffset,pSymbolBytes,countSymbolBytes,pCountSymbolBytesRead) \ ( (This)->lpVtbl -> ReadInMemorySymbols(This,moduleId,symbolsReadOffset,pSymbolBytes,countSymbolBytes,pCountSymbolBytesRead) ) #endif /* COBJMACROS */ #endif /* C style interface */ #endif /* __ICorProfilerInfo7_INTERFACE_DEFINED__ */ #ifndef __ICorProfilerInfo8_INTERFACE_DEFINED__ #define __ICorProfilerInfo8_INTERFACE_DEFINED__ /* interface ICorProfilerInfo8 */ /* [local][unique][uuid][object] */ EXTERN_C const IID IID_ICorProfilerInfo8; #if defined(__cplusplus) && !defined(CINTERFACE) MIDL_INTERFACE("C5AC80A6-782E-4716-8044-39598C60CFBF") ICorProfilerInfo8 : public ICorProfilerInfo7 { public: virtual HRESULT STDMETHODCALLTYPE IsFunctionDynamic( /* [in] */ FunctionID functionId, /* [out] */ BOOL *isDynamic) = 0; virtual HRESULT STDMETHODCALLTYPE GetFunctionFromIP3( /* [in] */ LPCBYTE ip, /* [out] */ FunctionID *functionId, /* [out] */ ReJITID *pReJitId) = 0; virtual HRESULT STDMETHODCALLTYPE GetDynamicFunctionInfo( /* [in] */ FunctionID functionId, /* [out] */ ModuleID *moduleId, /* [out] */ PCCOR_SIGNATURE *ppvSig, /* [out] */ ULONG *pbSig, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [out] */ WCHAR wszName[ ]) = 0; }; #else /* C style interface */ typedef struct ICorProfilerInfo8Vtbl { BEGIN_INTERFACE HRESULT ( STDMETHODCALLTYPE *QueryInterface )( ICorProfilerInfo8 * This, /* [in] */ REFIID riid, /* [annotation][iid_is][out] */ _COM_Outptr_ void **ppvObject); ULONG ( STDMETHODCALLTYPE *AddRef )( ICorProfilerInfo8 * This); ULONG ( STDMETHODCALLTYPE *Release )( ICorProfilerInfo8 * This); HRESULT ( STDMETHODCALLTYPE *GetClassFromObject )( ICorProfilerInfo8 * This, /* [in] */ ObjectID objectId, /* [out] */ ClassID *pClassId); HRESULT ( STDMETHODCALLTYPE *GetClassFromToken )( ICorProfilerInfo8 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdTypeDef typeDef, /* [out] */ ClassID *pClassId); HRESULT ( STDMETHODCALLTYPE *GetCodeInfo )( ICorProfilerInfo8 * This, /* [in] */ FunctionID functionId, /* [out] */ LPCBYTE *pStart, /* [out] */ ULONG *pcSize); HRESULT ( STDMETHODCALLTYPE *GetEventMask )( ICorProfilerInfo8 * This, /* [out] */ DWORD *pdwEvents); HRESULT ( STDMETHODCALLTYPE *GetFunctionFromIP )( ICorProfilerInfo8 * This, /* [in] */ LPCBYTE ip, /* [out] */ FunctionID *pFunctionId); HRESULT ( STDMETHODCALLTYPE *GetFunctionFromToken )( ICorProfilerInfo8 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdToken token, /* [out] */ FunctionID *pFunctionId); HRESULT ( STDMETHODCALLTYPE *GetHandleFromThread )( ICorProfilerInfo8 * This, /* [in] */ ThreadID threadId, /* [out] */ HANDLE *phThread); HRESULT ( STDMETHODCALLTYPE *GetObjectSize )( ICorProfilerInfo8 * This, /* [in] */ ObjectID objectId, /* [out] */ ULONG *pcSize); HRESULT ( STDMETHODCALLTYPE *IsArrayClass )( ICorProfilerInfo8 * This, /* [in] */ ClassID classId, /* [out] */ CorElementType *pBaseElemType, /* [out] */ ClassID *pBaseClassId, /* [out] */ ULONG *pcRank); HRESULT ( STDMETHODCALLTYPE *GetThreadInfo )( ICorProfilerInfo8 * This, /* [in] */ ThreadID threadId, /* [out] */ DWORD *pdwWin32ThreadId); HRESULT ( STDMETHODCALLTYPE *GetCurrentThreadID )( ICorProfilerInfo8 * This, /* [out] */ ThreadID *pThreadId); HRESULT ( STDMETHODCALLTYPE *GetClassIDInfo )( ICorProfilerInfo8 * This, /* [in] */ ClassID classId, /* [out] */ ModuleID *pModuleId, /* [out] */ mdTypeDef *pTypeDefToken); HRESULT ( STDMETHODCALLTYPE *GetFunctionInfo )( ICorProfilerInfo8 * This, /* [in] */ FunctionID functionId, /* [out] */ ClassID *pClassId, /* [out] */ ModuleID *pModuleId, /* [out] */ mdToken *pToken); HRESULT ( STDMETHODCALLTYPE *SetEventMask )( ICorProfilerInfo8 * This, /* [in] */ DWORD dwEvents); HRESULT ( STDMETHODCALLTYPE *SetEnterLeaveFunctionHooks )( ICorProfilerInfo8 * This, /* [in] */ FunctionEnter *pFuncEnter, /* [in] */ FunctionLeave *pFuncLeave, /* [in] */ FunctionTailcall *pFuncTailcall); HRESULT ( STDMETHODCALLTYPE *SetFunctionIDMapper )( ICorProfilerInfo8 * This, /* [in] */ FunctionIDMapper *pFunc); HRESULT ( STDMETHODCALLTYPE *GetTokenAndMetaDataFromFunction )( ICorProfilerInfo8 * This, /* [in] */ FunctionID functionId, /* [in] */ REFIID riid, /* [out] */ IUnknown **ppImport, /* [out] */ mdToken *pToken); HRESULT ( STDMETHODCALLTYPE *GetModuleInfo )( ICorProfilerInfo8 * This, /* [in] */ ModuleID moduleId, /* [out] */ LPCBYTE *ppBaseLoadAddress, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [annotation][out] */ _Out_writes_to_(cchName, *pcchName) WCHAR szName[ ], /* [out] */ AssemblyID *pAssemblyId); HRESULT ( STDMETHODCALLTYPE *GetModuleMetaData )( ICorProfilerInfo8 * This, /* [in] */ ModuleID moduleId, /* [in] */ DWORD dwOpenFlags, /* [in] */ REFIID riid, /* [out] */ IUnknown **ppOut); HRESULT ( STDMETHODCALLTYPE *GetILFunctionBody )( ICorProfilerInfo8 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdMethodDef methodId, /* [out] */ LPCBYTE *ppMethodHeader, /* [out] */ ULONG *pcbMethodSize); HRESULT ( STDMETHODCALLTYPE *GetILFunctionBodyAllocator )( ICorProfilerInfo8 * This, /* [in] */ ModuleID moduleId, /* [out] */ IMethodMalloc **ppMalloc); HRESULT ( STDMETHODCALLTYPE *SetILFunctionBody )( ICorProfilerInfo8 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdMethodDef methodid, /* [in] */ LPCBYTE pbNewILMethodHeader); HRESULT ( STDMETHODCALLTYPE *GetAppDomainInfo )( ICorProfilerInfo8 * This, /* [in] */ AppDomainID appDomainId, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [annotation][out] */ _Out_writes_to_(cchName, *pcchName) WCHAR szName[ ], /* [out] */ ProcessID *pProcessId); HRESULT ( STDMETHODCALLTYPE *GetAssemblyInfo )( ICorProfilerInfo8 * This, /* [in] */ AssemblyID assemblyId, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [annotation][out] */ _Out_writes_to_(cchName, *pcchName) WCHAR szName[ ], /* [out] */ AppDomainID *pAppDomainId, /* [out] */ ModuleID *pModuleId); HRESULT ( STDMETHODCALLTYPE *SetFunctionReJIT )( ICorProfilerInfo8 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ForceGC )( ICorProfilerInfo8 * This); HRESULT ( STDMETHODCALLTYPE *SetILInstrumentedCodeMap )( ICorProfilerInfo8 * This, /* [in] */ FunctionID functionId, /* [in] */ BOOL fStartJit, /* [in] */ ULONG cILMapEntries, /* [size_is][in] */ COR_IL_MAP rgILMapEntries[ ]); HRESULT ( STDMETHODCALLTYPE *GetInprocInspectionInterface )( ICorProfilerInfo8 * This, /* [out] */ IUnknown **ppicd); HRESULT ( STDMETHODCALLTYPE *GetInprocInspectionIThisThread )( ICorProfilerInfo8 * This, /* [out] */ IUnknown **ppicd); HRESULT ( STDMETHODCALLTYPE *GetThreadContext )( ICorProfilerInfo8 * This, /* [in] */ ThreadID threadId, /* [out] */ ContextID *pContextId); HRESULT ( STDMETHODCALLTYPE *BeginInprocDebugging )( ICorProfilerInfo8 * This, /* [in] */ BOOL fThisThreadOnly, /* [out] */ DWORD *pdwProfilerContext); HRESULT ( STDMETHODCALLTYPE *EndInprocDebugging )( ICorProfilerInfo8 * This, /* [in] */ DWORD dwProfilerContext); HRESULT ( STDMETHODCALLTYPE *GetILToNativeMapping )( ICorProfilerInfo8 * This, /* [in] */ FunctionID functionId, /* [in] */ ULONG32 cMap, /* [out] */ ULONG32 *pcMap, /* [length_is][size_is][out] */ COR_DEBUG_IL_TO_NATIVE_MAP map[ ]); HRESULT ( STDMETHODCALLTYPE *DoStackSnapshot )( ICorProfilerInfo8 * This, /* [in] */ ThreadID thread, /* [in] */ StackSnapshotCallback *callback, /* [in] */ ULONG32 infoFlags, /* [in] */ void *clientData, /* [size_is][in] */ BYTE context[ ], /* [in] */ ULONG32 contextSize); HRESULT ( STDMETHODCALLTYPE *SetEnterLeaveFunctionHooks2 )( ICorProfilerInfo8 * This, /* [in] */ FunctionEnter2 *pFuncEnter, /* [in] */ FunctionLeave2 *pFuncLeave, /* [in] */ FunctionTailcall2 *pFuncTailcall); HRESULT ( STDMETHODCALLTYPE *GetFunctionInfo2 )( ICorProfilerInfo8 * This, /* [in] */ FunctionID funcId, /* [in] */ COR_PRF_FRAME_INFO frameInfo, /* [out] */ ClassID *pClassId, /* [out] */ ModuleID *pModuleId, /* [out] */ mdToken *pToken, /* [in] */ ULONG32 cTypeArgs, /* [out] */ ULONG32 *pcTypeArgs, /* [out] */ ClassID typeArgs[ ]); HRESULT ( STDMETHODCALLTYPE *GetStringLayout )( ICorProfilerInfo8 * This, /* [out] */ ULONG *pBufferLengthOffset, /* [out] */ ULONG *pStringLengthOffset, /* [out] */ ULONG *pBufferOffset); HRESULT ( STDMETHODCALLTYPE *GetClassLayout )( ICorProfilerInfo8 * This, /* [in] */ ClassID classID, /* [out][in] */ COR_FIELD_OFFSET rFieldOffset[ ], /* [in] */ ULONG cFieldOffset, /* [out] */ ULONG *pcFieldOffset, /* [out] */ ULONG *pulClassSize); HRESULT ( STDMETHODCALLTYPE *GetClassIDInfo2 )( ICorProfilerInfo8 * This, /* [in] */ ClassID classId, /* [out] */ ModuleID *pModuleId, /* [out] */ mdTypeDef *pTypeDefToken, /* [out] */ ClassID *pParentClassId, /* [in] */ ULONG32 cNumTypeArgs, /* [out] */ ULONG32 *pcNumTypeArgs, /* [out] */ ClassID typeArgs[ ]); HRESULT ( STDMETHODCALLTYPE *GetCodeInfo2 )( ICorProfilerInfo8 * This, /* [in] */ FunctionID functionID, /* [in] */ ULONG32 cCodeInfos, /* [out] */ ULONG32 *pcCodeInfos, /* [length_is][size_is][out] */ COR_PRF_CODE_INFO codeInfos[ ]); HRESULT ( STDMETHODCALLTYPE *GetClassFromTokenAndTypeArgs )( ICorProfilerInfo8 * This, /* [in] */ ModuleID moduleID, /* [in] */ mdTypeDef typeDef, /* [in] */ ULONG32 cTypeArgs, /* [size_is][in] */ ClassID typeArgs[ ], /* [out] */ ClassID *pClassID); HRESULT ( STDMETHODCALLTYPE *GetFunctionFromTokenAndTypeArgs )( ICorProfilerInfo8 * This, /* [in] */ ModuleID moduleID, /* [in] */ mdMethodDef funcDef, /* [in] */ ClassID classId, /* [in] */ ULONG32 cTypeArgs, /* [size_is][in] */ ClassID typeArgs[ ], /* [out] */ FunctionID *pFunctionID); HRESULT ( STDMETHODCALLTYPE *EnumModuleFrozenObjects )( ICorProfilerInfo8 * This, /* [in] */ ModuleID moduleID, /* [out] */ ICorProfilerObjectEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *GetArrayObjectInfo )( ICorProfilerInfo8 * This, /* [in] */ ObjectID objectId, /* [in] */ ULONG32 cDimensions, /* [size_is][out] */ ULONG32 pDimensionSizes[ ], /* [size_is][out] */ int pDimensionLowerBounds[ ], /* [out] */ BYTE **ppData); HRESULT ( STDMETHODCALLTYPE *GetBoxClassLayout )( ICorProfilerInfo8 * This, /* [in] */ ClassID classId, /* [out] */ ULONG32 *pBufferOffset); HRESULT ( STDMETHODCALLTYPE *GetThreadAppDomain )( ICorProfilerInfo8 * This, /* [in] */ ThreadID threadId, /* [out] */ AppDomainID *pAppDomainId); HRESULT ( STDMETHODCALLTYPE *GetRVAStaticAddress )( ICorProfilerInfo8 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [out] */ void **ppAddress); HRESULT ( STDMETHODCALLTYPE *GetAppDomainStaticAddress )( ICorProfilerInfo8 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [in] */ AppDomainID appDomainId, /* [out] */ void **ppAddress); HRESULT ( STDMETHODCALLTYPE *GetThreadStaticAddress )( ICorProfilerInfo8 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [in] */ ThreadID threadId, /* [out] */ void **ppAddress); HRESULT ( STDMETHODCALLTYPE *GetContextStaticAddress )( ICorProfilerInfo8 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [in] */ ContextID contextId, /* [out] */ void **ppAddress); HRESULT ( STDMETHODCALLTYPE *GetStaticFieldInfo )( ICorProfilerInfo8 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [out] */ COR_PRF_STATIC_TYPE *pFieldInfo); HRESULT ( STDMETHODCALLTYPE *GetGenerationBounds )( ICorProfilerInfo8 * This, /* [in] */ ULONG cObjectRanges, /* [out] */ ULONG *pcObjectRanges, /* [length_is][size_is][out] */ COR_PRF_GC_GENERATION_RANGE ranges[ ]); HRESULT ( STDMETHODCALLTYPE *GetObjectGeneration )( ICorProfilerInfo8 * This, /* [in] */ ObjectID objectId, /* [out] */ COR_PRF_GC_GENERATION_RANGE *range); HRESULT ( STDMETHODCALLTYPE *GetNotifiedExceptionClauseInfo )( ICorProfilerInfo8 * This, /* [out] */ COR_PRF_EX_CLAUSE_INFO *pinfo); HRESULT ( STDMETHODCALLTYPE *EnumJITedFunctions )( ICorProfilerInfo8 * This, /* [out] */ ICorProfilerFunctionEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *RequestProfilerDetach )( ICorProfilerInfo8 * This, /* [in] */ DWORD dwExpectedCompletionMilliseconds); HRESULT ( STDMETHODCALLTYPE *SetFunctionIDMapper2 )( ICorProfilerInfo8 * This, /* [in] */ FunctionIDMapper2 *pFunc, /* [in] */ void *clientData); HRESULT ( STDMETHODCALLTYPE *GetStringLayout2 )( ICorProfilerInfo8 * This, /* [out] */ ULONG *pStringLengthOffset, /* [out] */ ULONG *pBufferOffset); HRESULT ( STDMETHODCALLTYPE *SetEnterLeaveFunctionHooks3 )( ICorProfilerInfo8 * This, /* [in] */ FunctionEnter3 *pFuncEnter3, /* [in] */ FunctionLeave3 *pFuncLeave3, /* [in] */ FunctionTailcall3 *pFuncTailcall3); HRESULT ( STDMETHODCALLTYPE *SetEnterLeaveFunctionHooks3WithInfo )( ICorProfilerInfo8 * This, /* [in] */ FunctionEnter3WithInfo *pFuncEnter3WithInfo, /* [in] */ FunctionLeave3WithInfo *pFuncLeave3WithInfo, /* [in] */ FunctionTailcall3WithInfo *pFuncTailcall3WithInfo); HRESULT ( STDMETHODCALLTYPE *GetFunctionEnter3Info )( ICorProfilerInfo8 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_ELT_INFO eltInfo, /* [out] */ COR_PRF_FRAME_INFO *pFrameInfo, /* [out][in] */ ULONG *pcbArgumentInfo, /* [size_is][out] */ COR_PRF_FUNCTION_ARGUMENT_INFO *pArgumentInfo); HRESULT ( STDMETHODCALLTYPE *GetFunctionLeave3Info )( ICorProfilerInfo8 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_ELT_INFO eltInfo, /* [out] */ COR_PRF_FRAME_INFO *pFrameInfo, /* [out] */ COR_PRF_FUNCTION_ARGUMENT_RANGE *pRetvalRange); HRESULT ( STDMETHODCALLTYPE *GetFunctionTailcall3Info )( ICorProfilerInfo8 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_ELT_INFO eltInfo, /* [out] */ COR_PRF_FRAME_INFO *pFrameInfo); HRESULT ( STDMETHODCALLTYPE *EnumModules )( ICorProfilerInfo8 * This, /* [out] */ ICorProfilerModuleEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *GetRuntimeInformation )( ICorProfilerInfo8 * This, /* [out] */ USHORT *pClrInstanceId, /* [out] */ COR_PRF_RUNTIME_TYPE *pRuntimeType, /* [out] */ USHORT *pMajorVersion, /* [out] */ USHORT *pMinorVersion, /* [out] */ USHORT *pBuildNumber, /* [out] */ USHORT *pQFEVersion, /* [in] */ ULONG cchVersionString, /* [out] */ ULONG *pcchVersionString, /* [annotation][out] */ _Out_writes_to_(cchVersionString, *pcchVersionString) WCHAR szVersionString[ ]); HRESULT ( STDMETHODCALLTYPE *GetThreadStaticAddress2 )( ICorProfilerInfo8 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [in] */ AppDomainID appDomainId, /* [in] */ ThreadID threadId, /* [out] */ void **ppAddress); HRESULT ( STDMETHODCALLTYPE *GetAppDomainsContainingModule )( ICorProfilerInfo8 * This, /* [in] */ ModuleID moduleId, /* [in] */ ULONG32 cAppDomainIds, /* [out] */ ULONG32 *pcAppDomainIds, /* [length_is][size_is][out] */ AppDomainID appDomainIds[ ]); HRESULT ( STDMETHODCALLTYPE *GetModuleInfo2 )( ICorProfilerInfo8 * This, /* [in] */ ModuleID moduleId, /* [out] */ LPCBYTE *ppBaseLoadAddress, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [annotation][out] */ _Out_writes_to_(cchName, *pcchName) WCHAR szName[ ], /* [out] */ AssemblyID *pAssemblyId, /* [out] */ DWORD *pdwModuleFlags); HRESULT ( STDMETHODCALLTYPE *EnumThreads )( ICorProfilerInfo8 * This, /* [out] */ ICorProfilerThreadEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *InitializeCurrentThread )( ICorProfilerInfo8 * This); HRESULT ( STDMETHODCALLTYPE *RequestReJIT )( ICorProfilerInfo8 * This, /* [in] */ ULONG cFunctions, /* [size_is][in] */ ModuleID moduleIds[ ], /* [size_is][in] */ mdMethodDef methodIds[ ]); HRESULT ( STDMETHODCALLTYPE *RequestRevert )( ICorProfilerInfo8 * This, /* [in] */ ULONG cFunctions, /* [size_is][in] */ ModuleID moduleIds[ ], /* [size_is][in] */ mdMethodDef methodIds[ ], /* [size_is][out] */ HRESULT status[ ]); HRESULT ( STDMETHODCALLTYPE *GetCodeInfo3 )( ICorProfilerInfo8 * This, /* [in] */ FunctionID functionID, /* [in] */ ReJITID reJitId, /* [in] */ ULONG32 cCodeInfos, /* [out] */ ULONG32 *pcCodeInfos, /* [length_is][size_is][out] */ COR_PRF_CODE_INFO codeInfos[ ]); HRESULT ( STDMETHODCALLTYPE *GetFunctionFromIP2 )( ICorProfilerInfo8 * This, /* [in] */ LPCBYTE ip, /* [out] */ FunctionID *pFunctionId, /* [out] */ ReJITID *pReJitId); HRESULT ( STDMETHODCALLTYPE *GetReJITIDs )( ICorProfilerInfo8 * This, /* [in] */ FunctionID functionId, /* [in] */ ULONG cReJitIds, /* [out] */ ULONG *pcReJitIds, /* [length_is][size_is][out] */ ReJITID reJitIds[ ]); HRESULT ( STDMETHODCALLTYPE *GetILToNativeMapping2 )( ICorProfilerInfo8 * This, /* [in] */ FunctionID functionId, /* [in] */ ReJITID reJitId, /* [in] */ ULONG32 cMap, /* [out] */ ULONG32 *pcMap, /* [length_is][size_is][out] */ COR_DEBUG_IL_TO_NATIVE_MAP map[ ]); HRESULT ( STDMETHODCALLTYPE *EnumJITedFunctions2 )( ICorProfilerInfo8 * This, /* [out] */ ICorProfilerFunctionEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *GetObjectSize2 )( ICorProfilerInfo8 * This, /* [in] */ ObjectID objectId, /* [out] */ SIZE_T *pcSize); HRESULT ( STDMETHODCALLTYPE *GetEventMask2 )( ICorProfilerInfo8 * This, /* [out] */ DWORD *pdwEventsLow, /* [out] */ DWORD *pdwEventsHigh); HRESULT ( STDMETHODCALLTYPE *SetEventMask2 )( ICorProfilerInfo8 * This, /* [in] */ DWORD dwEventsLow, /* [in] */ DWORD dwEventsHigh); HRESULT ( STDMETHODCALLTYPE *EnumNgenModuleMethodsInliningThisMethod )( ICorProfilerInfo8 * This, /* [in] */ ModuleID inlinersModuleId, /* [in] */ ModuleID inlineeModuleId, /* [in] */ mdMethodDef inlineeMethodId, /* [out] */ BOOL *incompleteData, /* [out] */ ICorProfilerMethodEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *ApplyMetaData )( ICorProfilerInfo8 * This, /* [in] */ ModuleID moduleId); HRESULT ( STDMETHODCALLTYPE *GetInMemorySymbolsLength )( ICorProfilerInfo8 * This, /* [in] */ ModuleID moduleId, /* [out] */ DWORD *pCountSymbolBytes); HRESULT ( STDMETHODCALLTYPE *ReadInMemorySymbols )( ICorProfilerInfo8 * This, /* [in] */ ModuleID moduleId, /* [in] */ DWORD symbolsReadOffset, /* [out] */ BYTE *pSymbolBytes, /* [in] */ DWORD countSymbolBytes, /* [out] */ DWORD *pCountSymbolBytesRead); HRESULT ( STDMETHODCALLTYPE *IsFunctionDynamic )( ICorProfilerInfo8 * This, /* [in] */ FunctionID functionId, /* [out] */ BOOL *isDynamic); HRESULT ( STDMETHODCALLTYPE *GetFunctionFromIP3 )( ICorProfilerInfo8 * This, /* [in] */ LPCBYTE ip, /* [out] */ FunctionID *functionId, /* [out] */ ReJITID *pReJitId); HRESULT ( STDMETHODCALLTYPE *GetDynamicFunctionInfo )( ICorProfilerInfo8 * This, /* [in] */ FunctionID functionId, /* [out] */ ModuleID *moduleId, /* [out] */ PCCOR_SIGNATURE *ppvSig, /* [out] */ ULONG *pbSig, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [out] */ WCHAR wszName[ ]); END_INTERFACE } ICorProfilerInfo8Vtbl; interface ICorProfilerInfo8 { CONST_VTBL struct ICorProfilerInfo8Vtbl *lpVtbl; }; #ifdef COBJMACROS #define ICorProfilerInfo8_QueryInterface(This,riid,ppvObject) \ ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) ) #define ICorProfilerInfo8_AddRef(This) \ ( (This)->lpVtbl -> AddRef(This) ) #define ICorProfilerInfo8_Release(This) \ ( (This)->lpVtbl -> Release(This) ) #define ICorProfilerInfo8_GetClassFromObject(This,objectId,pClassId) \ ( (This)->lpVtbl -> GetClassFromObject(This,objectId,pClassId) ) #define ICorProfilerInfo8_GetClassFromToken(This,moduleId,typeDef,pClassId) \ ( (This)->lpVtbl -> GetClassFromToken(This,moduleId,typeDef,pClassId) ) #define ICorProfilerInfo8_GetCodeInfo(This,functionId,pStart,pcSize) \ ( (This)->lpVtbl -> GetCodeInfo(This,functionId,pStart,pcSize) ) #define ICorProfilerInfo8_GetEventMask(This,pdwEvents) \ ( (This)->lpVtbl -> GetEventMask(This,pdwEvents) ) #define ICorProfilerInfo8_GetFunctionFromIP(This,ip,pFunctionId) \ ( (This)->lpVtbl -> GetFunctionFromIP(This,ip,pFunctionId) ) #define ICorProfilerInfo8_GetFunctionFromToken(This,moduleId,token,pFunctionId) \ ( (This)->lpVtbl -> GetFunctionFromToken(This,moduleId,token,pFunctionId) ) #define ICorProfilerInfo8_GetHandleFromThread(This,threadId,phThread) \ ( (This)->lpVtbl -> GetHandleFromThread(This,threadId,phThread) ) #define ICorProfilerInfo8_GetObjectSize(This,objectId,pcSize) \ ( (This)->lpVtbl -> GetObjectSize(This,objectId,pcSize) ) #define ICorProfilerInfo8_IsArrayClass(This,classId,pBaseElemType,pBaseClassId,pcRank) \ ( (This)->lpVtbl -> IsArrayClass(This,classId,pBaseElemType,pBaseClassId,pcRank) ) #define ICorProfilerInfo8_GetThreadInfo(This,threadId,pdwWin32ThreadId) \ ( (This)->lpVtbl -> GetThreadInfo(This,threadId,pdwWin32ThreadId) ) #define ICorProfilerInfo8_GetCurrentThreadID(This,pThreadId) \ ( (This)->lpVtbl -> GetCurrentThreadID(This,pThreadId) ) #define ICorProfilerInfo8_GetClassIDInfo(This,classId,pModuleId,pTypeDefToken) \ ( (This)->lpVtbl -> GetClassIDInfo(This,classId,pModuleId,pTypeDefToken) ) #define ICorProfilerInfo8_GetFunctionInfo(This,functionId,pClassId,pModuleId,pToken) \ ( (This)->lpVtbl -> GetFunctionInfo(This,functionId,pClassId,pModuleId,pToken) ) #define ICorProfilerInfo8_SetEventMask(This,dwEvents) \ ( (This)->lpVtbl -> SetEventMask(This,dwEvents) ) #define ICorProfilerInfo8_SetEnterLeaveFunctionHooks(This,pFuncEnter,pFuncLeave,pFuncTailcall) \ ( (This)->lpVtbl -> SetEnterLeaveFunctionHooks(This,pFuncEnter,pFuncLeave,pFuncTailcall) ) #define ICorProfilerInfo8_SetFunctionIDMapper(This,pFunc) \ ( (This)->lpVtbl -> SetFunctionIDMapper(This,pFunc) ) #define ICorProfilerInfo8_GetTokenAndMetaDataFromFunction(This,functionId,riid,ppImport,pToken) \ ( (This)->lpVtbl -> GetTokenAndMetaDataFromFunction(This,functionId,riid,ppImport,pToken) ) #define ICorProfilerInfo8_GetModuleInfo(This,moduleId,ppBaseLoadAddress,cchName,pcchName,szName,pAssemblyId) \ ( (This)->lpVtbl -> GetModuleInfo(This,moduleId,ppBaseLoadAddress,cchName,pcchName,szName,pAssemblyId) ) #define ICorProfilerInfo8_GetModuleMetaData(This,moduleId,dwOpenFlags,riid,ppOut) \ ( (This)->lpVtbl -> GetModuleMetaData(This,moduleId,dwOpenFlags,riid,ppOut) ) #define ICorProfilerInfo8_GetILFunctionBody(This,moduleId,methodId,ppMethodHeader,pcbMethodSize) \ ( (This)->lpVtbl -> GetILFunctionBody(This,moduleId,methodId,ppMethodHeader,pcbMethodSize) ) #define ICorProfilerInfo8_GetILFunctionBodyAllocator(This,moduleId,ppMalloc) \ ( (This)->lpVtbl -> GetILFunctionBodyAllocator(This,moduleId,ppMalloc) ) #define ICorProfilerInfo8_SetILFunctionBody(This,moduleId,methodid,pbNewILMethodHeader) \ ( (This)->lpVtbl -> SetILFunctionBody(This,moduleId,methodid,pbNewILMethodHeader) ) #define ICorProfilerInfo8_GetAppDomainInfo(This,appDomainId,cchName,pcchName,szName,pProcessId) \ ( (This)->lpVtbl -> GetAppDomainInfo(This,appDomainId,cchName,pcchName,szName,pProcessId) ) #define ICorProfilerInfo8_GetAssemblyInfo(This,assemblyId,cchName,pcchName,szName,pAppDomainId,pModuleId) \ ( (This)->lpVtbl -> GetAssemblyInfo(This,assemblyId,cchName,pcchName,szName,pAppDomainId,pModuleId) ) #define ICorProfilerInfo8_SetFunctionReJIT(This,functionId) \ ( (This)->lpVtbl -> SetFunctionReJIT(This,functionId) ) #define ICorProfilerInfo8_ForceGC(This) \ ( (This)->lpVtbl -> ForceGC(This) ) #define ICorProfilerInfo8_SetILInstrumentedCodeMap(This,functionId,fStartJit,cILMapEntries,rgILMapEntries) \ ( (This)->lpVtbl -> SetILInstrumentedCodeMap(This,functionId,fStartJit,cILMapEntries,rgILMapEntries) ) #define ICorProfilerInfo8_GetInprocInspectionInterface(This,ppicd) \ ( (This)->lpVtbl -> GetInprocInspectionInterface(This,ppicd) ) #define ICorProfilerInfo8_GetInprocInspectionIThisThread(This,ppicd) \ ( (This)->lpVtbl -> GetInprocInspectionIThisThread(This,ppicd) ) #define ICorProfilerInfo8_GetThreadContext(This,threadId,pContextId) \ ( (This)->lpVtbl -> GetThreadContext(This,threadId,pContextId) ) #define ICorProfilerInfo8_BeginInprocDebugging(This,fThisThreadOnly,pdwProfilerContext) \ ( (This)->lpVtbl -> BeginInprocDebugging(This,fThisThreadOnly,pdwProfilerContext) ) #define ICorProfilerInfo8_EndInprocDebugging(This,dwProfilerContext) \ ( (This)->lpVtbl -> EndInprocDebugging(This,dwProfilerContext) ) #define ICorProfilerInfo8_GetILToNativeMapping(This,functionId,cMap,pcMap,map) \ ( (This)->lpVtbl -> GetILToNativeMapping(This,functionId,cMap,pcMap,map) ) #define ICorProfilerInfo8_DoStackSnapshot(This,thread,callback,infoFlags,clientData,context,contextSize) \ ( (This)->lpVtbl -> DoStackSnapshot(This,thread,callback,infoFlags,clientData,context,contextSize) ) #define ICorProfilerInfo8_SetEnterLeaveFunctionHooks2(This,pFuncEnter,pFuncLeave,pFuncTailcall) \ ( (This)->lpVtbl -> SetEnterLeaveFunctionHooks2(This,pFuncEnter,pFuncLeave,pFuncTailcall) ) #define ICorProfilerInfo8_GetFunctionInfo2(This,funcId,frameInfo,pClassId,pModuleId,pToken,cTypeArgs,pcTypeArgs,typeArgs) \ ( (This)->lpVtbl -> GetFunctionInfo2(This,funcId,frameInfo,pClassId,pModuleId,pToken,cTypeArgs,pcTypeArgs,typeArgs) ) #define ICorProfilerInfo8_GetStringLayout(This,pBufferLengthOffset,pStringLengthOffset,pBufferOffset) \ ( (This)->lpVtbl -> GetStringLayout(This,pBufferLengthOffset,pStringLengthOffset,pBufferOffset) ) #define ICorProfilerInfo8_GetClassLayout(This,classID,rFieldOffset,cFieldOffset,pcFieldOffset,pulClassSize) \ ( (This)->lpVtbl -> GetClassLayout(This,classID,rFieldOffset,cFieldOffset,pcFieldOffset,pulClassSize) ) #define ICorProfilerInfo8_GetClassIDInfo2(This,classId,pModuleId,pTypeDefToken,pParentClassId,cNumTypeArgs,pcNumTypeArgs,typeArgs) \ ( (This)->lpVtbl -> GetClassIDInfo2(This,classId,pModuleId,pTypeDefToken,pParentClassId,cNumTypeArgs,pcNumTypeArgs,typeArgs) ) #define ICorProfilerInfo8_GetCodeInfo2(This,functionID,cCodeInfos,pcCodeInfos,codeInfos) \ ( (This)->lpVtbl -> GetCodeInfo2(This,functionID,cCodeInfos,pcCodeInfos,codeInfos) ) #define ICorProfilerInfo8_GetClassFromTokenAndTypeArgs(This,moduleID,typeDef,cTypeArgs,typeArgs,pClassID) \ ( (This)->lpVtbl -> GetClassFromTokenAndTypeArgs(This,moduleID,typeDef,cTypeArgs,typeArgs,pClassID) ) #define ICorProfilerInfo8_GetFunctionFromTokenAndTypeArgs(This,moduleID,funcDef,classId,cTypeArgs,typeArgs,pFunctionID) \ ( (This)->lpVtbl -> GetFunctionFromTokenAndTypeArgs(This,moduleID,funcDef,classId,cTypeArgs,typeArgs,pFunctionID) ) #define ICorProfilerInfo8_EnumModuleFrozenObjects(This,moduleID,ppEnum) \ ( (This)->lpVtbl -> EnumModuleFrozenObjects(This,moduleID,ppEnum) ) #define ICorProfilerInfo8_GetArrayObjectInfo(This,objectId,cDimensions,pDimensionSizes,pDimensionLowerBounds,ppData) \ ( (This)->lpVtbl -> GetArrayObjectInfo(This,objectId,cDimensions,pDimensionSizes,pDimensionLowerBounds,ppData) ) #define ICorProfilerInfo8_GetBoxClassLayout(This,classId,pBufferOffset) \ ( (This)->lpVtbl -> GetBoxClassLayout(This,classId,pBufferOffset) ) #define ICorProfilerInfo8_GetThreadAppDomain(This,threadId,pAppDomainId) \ ( (This)->lpVtbl -> GetThreadAppDomain(This,threadId,pAppDomainId) ) #define ICorProfilerInfo8_GetRVAStaticAddress(This,classId,fieldToken,ppAddress) \ ( (This)->lpVtbl -> GetRVAStaticAddress(This,classId,fieldToken,ppAddress) ) #define ICorProfilerInfo8_GetAppDomainStaticAddress(This,classId,fieldToken,appDomainId,ppAddress) \ ( (This)->lpVtbl -> GetAppDomainStaticAddress(This,classId,fieldToken,appDomainId,ppAddress) ) #define ICorProfilerInfo8_GetThreadStaticAddress(This,classId,fieldToken,threadId,ppAddress) \ ( (This)->lpVtbl -> GetThreadStaticAddress(This,classId,fieldToken,threadId,ppAddress) ) #define ICorProfilerInfo8_GetContextStaticAddress(This,classId,fieldToken,contextId,ppAddress) \ ( (This)->lpVtbl -> GetContextStaticAddress(This,classId,fieldToken,contextId,ppAddress) ) #define ICorProfilerInfo8_GetStaticFieldInfo(This,classId,fieldToken,pFieldInfo) \ ( (This)->lpVtbl -> GetStaticFieldInfo(This,classId,fieldToken,pFieldInfo) ) #define ICorProfilerInfo8_GetGenerationBounds(This,cObjectRanges,pcObjectRanges,ranges) \ ( (This)->lpVtbl -> GetGenerationBounds(This,cObjectRanges,pcObjectRanges,ranges) ) #define ICorProfilerInfo8_GetObjectGeneration(This,objectId,range) \ ( (This)->lpVtbl -> GetObjectGeneration(This,objectId,range) ) #define ICorProfilerInfo8_GetNotifiedExceptionClauseInfo(This,pinfo) \ ( (This)->lpVtbl -> GetNotifiedExceptionClauseInfo(This,pinfo) ) #define ICorProfilerInfo8_EnumJITedFunctions(This,ppEnum) \ ( (This)->lpVtbl -> EnumJITedFunctions(This,ppEnum) ) #define ICorProfilerInfo8_RequestProfilerDetach(This,dwExpectedCompletionMilliseconds) \ ( (This)->lpVtbl -> RequestProfilerDetach(This,dwExpectedCompletionMilliseconds) ) #define ICorProfilerInfo8_SetFunctionIDMapper2(This,pFunc,clientData) \ ( (This)->lpVtbl -> SetFunctionIDMapper2(This,pFunc,clientData) ) #define ICorProfilerInfo8_GetStringLayout2(This,pStringLengthOffset,pBufferOffset) \ ( (This)->lpVtbl -> GetStringLayout2(This,pStringLengthOffset,pBufferOffset) ) #define ICorProfilerInfo8_SetEnterLeaveFunctionHooks3(This,pFuncEnter3,pFuncLeave3,pFuncTailcall3) \ ( (This)->lpVtbl -> SetEnterLeaveFunctionHooks3(This,pFuncEnter3,pFuncLeave3,pFuncTailcall3) ) #define ICorProfilerInfo8_SetEnterLeaveFunctionHooks3WithInfo(This,pFuncEnter3WithInfo,pFuncLeave3WithInfo,pFuncTailcall3WithInfo) \ ( (This)->lpVtbl -> SetEnterLeaveFunctionHooks3WithInfo(This,pFuncEnter3WithInfo,pFuncLeave3WithInfo,pFuncTailcall3WithInfo) ) #define ICorProfilerInfo8_GetFunctionEnter3Info(This,functionId,eltInfo,pFrameInfo,pcbArgumentInfo,pArgumentInfo) \ ( (This)->lpVtbl -> GetFunctionEnter3Info(This,functionId,eltInfo,pFrameInfo,pcbArgumentInfo,pArgumentInfo) ) #define ICorProfilerInfo8_GetFunctionLeave3Info(This,functionId,eltInfo,pFrameInfo,pRetvalRange) \ ( (This)->lpVtbl -> GetFunctionLeave3Info(This,functionId,eltInfo,pFrameInfo,pRetvalRange) ) #define ICorProfilerInfo8_GetFunctionTailcall3Info(This,functionId,eltInfo,pFrameInfo) \ ( (This)->lpVtbl -> GetFunctionTailcall3Info(This,functionId,eltInfo,pFrameInfo) ) #define ICorProfilerInfo8_EnumModules(This,ppEnum) \ ( (This)->lpVtbl -> EnumModules(This,ppEnum) ) #define ICorProfilerInfo8_GetRuntimeInformation(This,pClrInstanceId,pRuntimeType,pMajorVersion,pMinorVersion,pBuildNumber,pQFEVersion,cchVersionString,pcchVersionString,szVersionString) \ ( (This)->lpVtbl -> GetRuntimeInformation(This,pClrInstanceId,pRuntimeType,pMajorVersion,pMinorVersion,pBuildNumber,pQFEVersion,cchVersionString,pcchVersionString,szVersionString) ) #define ICorProfilerInfo8_GetThreadStaticAddress2(This,classId,fieldToken,appDomainId,threadId,ppAddress) \ ( (This)->lpVtbl -> GetThreadStaticAddress2(This,classId,fieldToken,appDomainId,threadId,ppAddress) ) #define ICorProfilerInfo8_GetAppDomainsContainingModule(This,moduleId,cAppDomainIds,pcAppDomainIds,appDomainIds) \ ( (This)->lpVtbl -> GetAppDomainsContainingModule(This,moduleId,cAppDomainIds,pcAppDomainIds,appDomainIds) ) #define ICorProfilerInfo8_GetModuleInfo2(This,moduleId,ppBaseLoadAddress,cchName,pcchName,szName,pAssemblyId,pdwModuleFlags) \ ( (This)->lpVtbl -> GetModuleInfo2(This,moduleId,ppBaseLoadAddress,cchName,pcchName,szName,pAssemblyId,pdwModuleFlags) ) #define ICorProfilerInfo8_EnumThreads(This,ppEnum) \ ( (This)->lpVtbl -> EnumThreads(This,ppEnum) ) #define ICorProfilerInfo8_InitializeCurrentThread(This) \ ( (This)->lpVtbl -> InitializeCurrentThread(This) ) #define ICorProfilerInfo8_RequestReJIT(This,cFunctions,moduleIds,methodIds) \ ( (This)->lpVtbl -> RequestReJIT(This,cFunctions,moduleIds,methodIds) ) #define ICorProfilerInfo8_RequestRevert(This,cFunctions,moduleIds,methodIds,status) \ ( (This)->lpVtbl -> RequestRevert(This,cFunctions,moduleIds,methodIds,status) ) #define ICorProfilerInfo8_GetCodeInfo3(This,functionID,reJitId,cCodeInfos,pcCodeInfos,codeInfos) \ ( (This)->lpVtbl -> GetCodeInfo3(This,functionID,reJitId,cCodeInfos,pcCodeInfos,codeInfos) ) #define ICorProfilerInfo8_GetFunctionFromIP2(This,ip,pFunctionId,pReJitId) \ ( (This)->lpVtbl -> GetFunctionFromIP2(This,ip,pFunctionId,pReJitId) ) #define ICorProfilerInfo8_GetReJITIDs(This,functionId,cReJitIds,pcReJitIds,reJitIds) \ ( (This)->lpVtbl -> GetReJITIDs(This,functionId,cReJitIds,pcReJitIds,reJitIds) ) #define ICorProfilerInfo8_GetILToNativeMapping2(This,functionId,reJitId,cMap,pcMap,map) \ ( (This)->lpVtbl -> GetILToNativeMapping2(This,functionId,reJitId,cMap,pcMap,map) ) #define ICorProfilerInfo8_EnumJITedFunctions2(This,ppEnum) \ ( (This)->lpVtbl -> EnumJITedFunctions2(This,ppEnum) ) #define ICorProfilerInfo8_GetObjectSize2(This,objectId,pcSize) \ ( (This)->lpVtbl -> GetObjectSize2(This,objectId,pcSize) ) #define ICorProfilerInfo8_GetEventMask2(This,pdwEventsLow,pdwEventsHigh) \ ( (This)->lpVtbl -> GetEventMask2(This,pdwEventsLow,pdwEventsHigh) ) #define ICorProfilerInfo8_SetEventMask2(This,dwEventsLow,dwEventsHigh) \ ( (This)->lpVtbl -> SetEventMask2(This,dwEventsLow,dwEventsHigh) ) #define ICorProfilerInfo8_EnumNgenModuleMethodsInliningThisMethod(This,inlinersModuleId,inlineeModuleId,inlineeMethodId,incompleteData,ppEnum) \ ( (This)->lpVtbl -> EnumNgenModuleMethodsInliningThisMethod(This,inlinersModuleId,inlineeModuleId,inlineeMethodId,incompleteData,ppEnum) ) #define ICorProfilerInfo8_ApplyMetaData(This,moduleId) \ ( (This)->lpVtbl -> ApplyMetaData(This,moduleId) ) #define ICorProfilerInfo8_GetInMemorySymbolsLength(This,moduleId,pCountSymbolBytes) \ ( (This)->lpVtbl -> GetInMemorySymbolsLength(This,moduleId,pCountSymbolBytes) ) #define ICorProfilerInfo8_ReadInMemorySymbols(This,moduleId,symbolsReadOffset,pSymbolBytes,countSymbolBytes,pCountSymbolBytesRead) \ ( (This)->lpVtbl -> ReadInMemorySymbols(This,moduleId,symbolsReadOffset,pSymbolBytes,countSymbolBytes,pCountSymbolBytesRead) ) #define ICorProfilerInfo8_IsFunctionDynamic(This,functionId,isDynamic) \ ( (This)->lpVtbl -> IsFunctionDynamic(This,functionId,isDynamic) ) #define ICorProfilerInfo8_GetFunctionFromIP3(This,ip,functionId,pReJitId) \ ( (This)->lpVtbl -> GetFunctionFromIP3(This,ip,functionId,pReJitId) ) #define ICorProfilerInfo8_GetDynamicFunctionInfo(This,functionId,moduleId,ppvSig,pbSig,cchName,pcchName,wszName) \ ( (This)->lpVtbl -> GetDynamicFunctionInfo(This,functionId,moduleId,ppvSig,pbSig,cchName,pcchName,wszName) ) #endif /* COBJMACROS */ #endif /* C style interface */ #endif /* __ICorProfilerInfo8_INTERFACE_DEFINED__ */ #ifndef __ICorProfilerInfo9_INTERFACE_DEFINED__ #define __ICorProfilerInfo9_INTERFACE_DEFINED__ /* interface ICorProfilerInfo9 */ /* [local][unique][uuid][object] */ EXTERN_C const IID IID_ICorProfilerInfo9; #if defined(__cplusplus) && !defined(CINTERFACE) MIDL_INTERFACE("008170DB-F8CC-4796-9A51-DC8AA0B47012") ICorProfilerInfo9 : public ICorProfilerInfo8 { public: virtual HRESULT STDMETHODCALLTYPE GetNativeCodeStartAddresses( FunctionID functionID, ReJITID reJitId, ULONG32 cCodeStartAddresses, ULONG32 *pcCodeStartAddresses, UINT_PTR codeStartAddresses[ ]) = 0; virtual HRESULT STDMETHODCALLTYPE GetILToNativeMapping3( UINT_PTR pNativeCodeStartAddress, ULONG32 cMap, ULONG32 *pcMap, COR_DEBUG_IL_TO_NATIVE_MAP map[ ]) = 0; virtual HRESULT STDMETHODCALLTYPE GetCodeInfo4( UINT_PTR pNativeCodeStartAddress, ULONG32 cCodeInfos, ULONG32 *pcCodeInfos, COR_PRF_CODE_INFO codeInfos[ ]) = 0; }; #else /* C style interface */ typedef struct ICorProfilerInfo9Vtbl { BEGIN_INTERFACE HRESULT ( STDMETHODCALLTYPE *QueryInterface )( ICorProfilerInfo9 * This, /* [in] */ REFIID riid, /* [annotation][iid_is][out] */ _COM_Outptr_ void **ppvObject); ULONG ( STDMETHODCALLTYPE *AddRef )( ICorProfilerInfo9 * This); ULONG ( STDMETHODCALLTYPE *Release )( ICorProfilerInfo9 * This); HRESULT ( STDMETHODCALLTYPE *GetClassFromObject )( ICorProfilerInfo9 * This, /* [in] */ ObjectID objectId, /* [out] */ ClassID *pClassId); HRESULT ( STDMETHODCALLTYPE *GetClassFromToken )( ICorProfilerInfo9 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdTypeDef typeDef, /* [out] */ ClassID *pClassId); HRESULT ( STDMETHODCALLTYPE *GetCodeInfo )( ICorProfilerInfo9 * This, /* [in] */ FunctionID functionId, /* [out] */ LPCBYTE *pStart, /* [out] */ ULONG *pcSize); HRESULT ( STDMETHODCALLTYPE *GetEventMask )( ICorProfilerInfo9 * This, /* [out] */ DWORD *pdwEvents); HRESULT ( STDMETHODCALLTYPE *GetFunctionFromIP )( ICorProfilerInfo9 * This, /* [in] */ LPCBYTE ip, /* [out] */ FunctionID *pFunctionId); HRESULT ( STDMETHODCALLTYPE *GetFunctionFromToken )( ICorProfilerInfo9 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdToken token, /* [out] */ FunctionID *pFunctionId); HRESULT ( STDMETHODCALLTYPE *GetHandleFromThread )( ICorProfilerInfo9 * This, /* [in] */ ThreadID threadId, /* [out] */ HANDLE *phThread); HRESULT ( STDMETHODCALLTYPE *GetObjectSize )( ICorProfilerInfo9 * This, /* [in] */ ObjectID objectId, /* [out] */ ULONG *pcSize); HRESULT ( STDMETHODCALLTYPE *IsArrayClass )( ICorProfilerInfo9 * This, /* [in] */ ClassID classId, /* [out] */ CorElementType *pBaseElemType, /* [out] */ ClassID *pBaseClassId, /* [out] */ ULONG *pcRank); HRESULT ( STDMETHODCALLTYPE *GetThreadInfo )( ICorProfilerInfo9 * This, /* [in] */ ThreadID threadId, /* [out] */ DWORD *pdwWin32ThreadId); HRESULT ( STDMETHODCALLTYPE *GetCurrentThreadID )( ICorProfilerInfo9 * This, /* [out] */ ThreadID *pThreadId); HRESULT ( STDMETHODCALLTYPE *GetClassIDInfo )( ICorProfilerInfo9 * This, /* [in] */ ClassID classId, /* [out] */ ModuleID *pModuleId, /* [out] */ mdTypeDef *pTypeDefToken); HRESULT ( STDMETHODCALLTYPE *GetFunctionInfo )( ICorProfilerInfo9 * This, /* [in] */ FunctionID functionId, /* [out] */ ClassID *pClassId, /* [out] */ ModuleID *pModuleId, /* [out] */ mdToken *pToken); HRESULT ( STDMETHODCALLTYPE *SetEventMask )( ICorProfilerInfo9 * This, /* [in] */ DWORD dwEvents); HRESULT ( STDMETHODCALLTYPE *SetEnterLeaveFunctionHooks )( ICorProfilerInfo9 * This, /* [in] */ FunctionEnter *pFuncEnter, /* [in] */ FunctionLeave *pFuncLeave, /* [in] */ FunctionTailcall *pFuncTailcall); HRESULT ( STDMETHODCALLTYPE *SetFunctionIDMapper )( ICorProfilerInfo9 * This, /* [in] */ FunctionIDMapper *pFunc); HRESULT ( STDMETHODCALLTYPE *GetTokenAndMetaDataFromFunction )( ICorProfilerInfo9 * This, /* [in] */ FunctionID functionId, /* [in] */ REFIID riid, /* [out] */ IUnknown **ppImport, /* [out] */ mdToken *pToken); HRESULT ( STDMETHODCALLTYPE *GetModuleInfo )( ICorProfilerInfo9 * This, /* [in] */ ModuleID moduleId, /* [out] */ LPCBYTE *ppBaseLoadAddress, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [annotation][out] */ _Out_writes_to_(cchName, *pcchName) WCHAR szName[ ], /* [out] */ AssemblyID *pAssemblyId); HRESULT ( STDMETHODCALLTYPE *GetModuleMetaData )( ICorProfilerInfo9 * This, /* [in] */ ModuleID moduleId, /* [in] */ DWORD dwOpenFlags, /* [in] */ REFIID riid, /* [out] */ IUnknown **ppOut); HRESULT ( STDMETHODCALLTYPE *GetILFunctionBody )( ICorProfilerInfo9 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdMethodDef methodId, /* [out] */ LPCBYTE *ppMethodHeader, /* [out] */ ULONG *pcbMethodSize); HRESULT ( STDMETHODCALLTYPE *GetILFunctionBodyAllocator )( ICorProfilerInfo9 * This, /* [in] */ ModuleID moduleId, /* [out] */ IMethodMalloc **ppMalloc); HRESULT ( STDMETHODCALLTYPE *SetILFunctionBody )( ICorProfilerInfo9 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdMethodDef methodid, /* [in] */ LPCBYTE pbNewILMethodHeader); HRESULT ( STDMETHODCALLTYPE *GetAppDomainInfo )( ICorProfilerInfo9 * This, /* [in] */ AppDomainID appDomainId, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [annotation][out] */ _Out_writes_to_(cchName, *pcchName) WCHAR szName[ ], /* [out] */ ProcessID *pProcessId); HRESULT ( STDMETHODCALLTYPE *GetAssemblyInfo )( ICorProfilerInfo9 * This, /* [in] */ AssemblyID assemblyId, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [annotation][out] */ _Out_writes_to_(cchName, *pcchName) WCHAR szName[ ], /* [out] */ AppDomainID *pAppDomainId, /* [out] */ ModuleID *pModuleId); HRESULT ( STDMETHODCALLTYPE *SetFunctionReJIT )( ICorProfilerInfo9 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ForceGC )( ICorProfilerInfo9 * This); HRESULT ( STDMETHODCALLTYPE *SetILInstrumentedCodeMap )( ICorProfilerInfo9 * This, /* [in] */ FunctionID functionId, /* [in] */ BOOL fStartJit, /* [in] */ ULONG cILMapEntries, /* [size_is][in] */ COR_IL_MAP rgILMapEntries[ ]); HRESULT ( STDMETHODCALLTYPE *GetInprocInspectionInterface )( ICorProfilerInfo9 * This, /* [out] */ IUnknown **ppicd); HRESULT ( STDMETHODCALLTYPE *GetInprocInspectionIThisThread )( ICorProfilerInfo9 * This, /* [out] */ IUnknown **ppicd); HRESULT ( STDMETHODCALLTYPE *GetThreadContext )( ICorProfilerInfo9 * This, /* [in] */ ThreadID threadId, /* [out] */ ContextID *pContextId); HRESULT ( STDMETHODCALLTYPE *BeginInprocDebugging )( ICorProfilerInfo9 * This, /* [in] */ BOOL fThisThreadOnly, /* [out] */ DWORD *pdwProfilerContext); HRESULT ( STDMETHODCALLTYPE *EndInprocDebugging )( ICorProfilerInfo9 * This, /* [in] */ DWORD dwProfilerContext); HRESULT ( STDMETHODCALLTYPE *GetILToNativeMapping )( ICorProfilerInfo9 * This, /* [in] */ FunctionID functionId, /* [in] */ ULONG32 cMap, /* [out] */ ULONG32 *pcMap, /* [length_is][size_is][out] */ COR_DEBUG_IL_TO_NATIVE_MAP map[ ]); HRESULT ( STDMETHODCALLTYPE *DoStackSnapshot )( ICorProfilerInfo9 * This, /* [in] */ ThreadID thread, /* [in] */ StackSnapshotCallback *callback, /* [in] */ ULONG32 infoFlags, /* [in] */ void *clientData, /* [size_is][in] */ BYTE context[ ], /* [in] */ ULONG32 contextSize); HRESULT ( STDMETHODCALLTYPE *SetEnterLeaveFunctionHooks2 )( ICorProfilerInfo9 * This, /* [in] */ FunctionEnter2 *pFuncEnter, /* [in] */ FunctionLeave2 *pFuncLeave, /* [in] */ FunctionTailcall2 *pFuncTailcall); HRESULT ( STDMETHODCALLTYPE *GetFunctionInfo2 )( ICorProfilerInfo9 * This, /* [in] */ FunctionID funcId, /* [in] */ COR_PRF_FRAME_INFO frameInfo, /* [out] */ ClassID *pClassId, /* [out] */ ModuleID *pModuleId, /* [out] */ mdToken *pToken, /* [in] */ ULONG32 cTypeArgs, /* [out] */ ULONG32 *pcTypeArgs, /* [out] */ ClassID typeArgs[ ]); HRESULT ( STDMETHODCALLTYPE *GetStringLayout )( ICorProfilerInfo9 * This, /* [out] */ ULONG *pBufferLengthOffset, /* [out] */ ULONG *pStringLengthOffset, /* [out] */ ULONG *pBufferOffset); HRESULT ( STDMETHODCALLTYPE *GetClassLayout )( ICorProfilerInfo9 * This, /* [in] */ ClassID classID, /* [out][in] */ COR_FIELD_OFFSET rFieldOffset[ ], /* [in] */ ULONG cFieldOffset, /* [out] */ ULONG *pcFieldOffset, /* [out] */ ULONG *pulClassSize); HRESULT ( STDMETHODCALLTYPE *GetClassIDInfo2 )( ICorProfilerInfo9 * This, /* [in] */ ClassID classId, /* [out] */ ModuleID *pModuleId, /* [out] */ mdTypeDef *pTypeDefToken, /* [out] */ ClassID *pParentClassId, /* [in] */ ULONG32 cNumTypeArgs, /* [out] */ ULONG32 *pcNumTypeArgs, /* [out] */ ClassID typeArgs[ ]); HRESULT ( STDMETHODCALLTYPE *GetCodeInfo2 )( ICorProfilerInfo9 * This, /* [in] */ FunctionID functionID, /* [in] */ ULONG32 cCodeInfos, /* [out] */ ULONG32 *pcCodeInfos, /* [length_is][size_is][out] */ COR_PRF_CODE_INFO codeInfos[ ]); HRESULT ( STDMETHODCALLTYPE *GetClassFromTokenAndTypeArgs )( ICorProfilerInfo9 * This, /* [in] */ ModuleID moduleID, /* [in] */ mdTypeDef typeDef, /* [in] */ ULONG32 cTypeArgs, /* [size_is][in] */ ClassID typeArgs[ ], /* [out] */ ClassID *pClassID); HRESULT ( STDMETHODCALLTYPE *GetFunctionFromTokenAndTypeArgs )( ICorProfilerInfo9 * This, /* [in] */ ModuleID moduleID, /* [in] */ mdMethodDef funcDef, /* [in] */ ClassID classId, /* [in] */ ULONG32 cTypeArgs, /* [size_is][in] */ ClassID typeArgs[ ], /* [out] */ FunctionID *pFunctionID); HRESULT ( STDMETHODCALLTYPE *EnumModuleFrozenObjects )( ICorProfilerInfo9 * This, /* [in] */ ModuleID moduleID, /* [out] */ ICorProfilerObjectEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *GetArrayObjectInfo )( ICorProfilerInfo9 * This, /* [in] */ ObjectID objectId, /* [in] */ ULONG32 cDimensions, /* [size_is][out] */ ULONG32 pDimensionSizes[ ], /* [size_is][out] */ int pDimensionLowerBounds[ ], /* [out] */ BYTE **ppData); HRESULT ( STDMETHODCALLTYPE *GetBoxClassLayout )( ICorProfilerInfo9 * This, /* [in] */ ClassID classId, /* [out] */ ULONG32 *pBufferOffset); HRESULT ( STDMETHODCALLTYPE *GetThreadAppDomain )( ICorProfilerInfo9 * This, /* [in] */ ThreadID threadId, /* [out] */ AppDomainID *pAppDomainId); HRESULT ( STDMETHODCALLTYPE *GetRVAStaticAddress )( ICorProfilerInfo9 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [out] */ void **ppAddress); HRESULT ( STDMETHODCALLTYPE *GetAppDomainStaticAddress )( ICorProfilerInfo9 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [in] */ AppDomainID appDomainId, /* [out] */ void **ppAddress); HRESULT ( STDMETHODCALLTYPE *GetThreadStaticAddress )( ICorProfilerInfo9 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [in] */ ThreadID threadId, /* [out] */ void **ppAddress); HRESULT ( STDMETHODCALLTYPE *GetContextStaticAddress )( ICorProfilerInfo9 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [in] */ ContextID contextId, /* [out] */ void **ppAddress); HRESULT ( STDMETHODCALLTYPE *GetStaticFieldInfo )( ICorProfilerInfo9 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [out] */ COR_PRF_STATIC_TYPE *pFieldInfo); HRESULT ( STDMETHODCALLTYPE *GetGenerationBounds )( ICorProfilerInfo9 * This, /* [in] */ ULONG cObjectRanges, /* [out] */ ULONG *pcObjectRanges, /* [length_is][size_is][out] */ COR_PRF_GC_GENERATION_RANGE ranges[ ]); HRESULT ( STDMETHODCALLTYPE *GetObjectGeneration )( ICorProfilerInfo9 * This, /* [in] */ ObjectID objectId, /* [out] */ COR_PRF_GC_GENERATION_RANGE *range); HRESULT ( STDMETHODCALLTYPE *GetNotifiedExceptionClauseInfo )( ICorProfilerInfo9 * This, /* [out] */ COR_PRF_EX_CLAUSE_INFO *pinfo); HRESULT ( STDMETHODCALLTYPE *EnumJITedFunctions )( ICorProfilerInfo9 * This, /* [out] */ ICorProfilerFunctionEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *RequestProfilerDetach )( ICorProfilerInfo9 * This, /* [in] */ DWORD dwExpectedCompletionMilliseconds); HRESULT ( STDMETHODCALLTYPE *SetFunctionIDMapper2 )( ICorProfilerInfo9 * This, /* [in] */ FunctionIDMapper2 *pFunc, /* [in] */ void *clientData); HRESULT ( STDMETHODCALLTYPE *GetStringLayout2 )( ICorProfilerInfo9 * This, /* [out] */ ULONG *pStringLengthOffset, /* [out] */ ULONG *pBufferOffset); HRESULT ( STDMETHODCALLTYPE *SetEnterLeaveFunctionHooks3 )( ICorProfilerInfo9 * This, /* [in] */ FunctionEnter3 *pFuncEnter3, /* [in] */ FunctionLeave3 *pFuncLeave3, /* [in] */ FunctionTailcall3 *pFuncTailcall3); HRESULT ( STDMETHODCALLTYPE *SetEnterLeaveFunctionHooks3WithInfo )( ICorProfilerInfo9 * This, /* [in] */ FunctionEnter3WithInfo *pFuncEnter3WithInfo, /* [in] */ FunctionLeave3WithInfo *pFuncLeave3WithInfo, /* [in] */ FunctionTailcall3WithInfo *pFuncTailcall3WithInfo); HRESULT ( STDMETHODCALLTYPE *GetFunctionEnter3Info )( ICorProfilerInfo9 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_ELT_INFO eltInfo, /* [out] */ COR_PRF_FRAME_INFO *pFrameInfo, /* [out][in] */ ULONG *pcbArgumentInfo, /* [size_is][out] */ COR_PRF_FUNCTION_ARGUMENT_INFO *pArgumentInfo); HRESULT ( STDMETHODCALLTYPE *GetFunctionLeave3Info )( ICorProfilerInfo9 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_ELT_INFO eltInfo, /* [out] */ COR_PRF_FRAME_INFO *pFrameInfo, /* [out] */ COR_PRF_FUNCTION_ARGUMENT_RANGE *pRetvalRange); HRESULT ( STDMETHODCALLTYPE *GetFunctionTailcall3Info )( ICorProfilerInfo9 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_ELT_INFO eltInfo, /* [out] */ COR_PRF_FRAME_INFO *pFrameInfo); HRESULT ( STDMETHODCALLTYPE *EnumModules )( ICorProfilerInfo9 * This, /* [out] */ ICorProfilerModuleEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *GetRuntimeInformation )( ICorProfilerInfo9 * This, /* [out] */ USHORT *pClrInstanceId, /* [out] */ COR_PRF_RUNTIME_TYPE *pRuntimeType, /* [out] */ USHORT *pMajorVersion, /* [out] */ USHORT *pMinorVersion, /* [out] */ USHORT *pBuildNumber, /* [out] */ USHORT *pQFEVersion, /* [in] */ ULONG cchVersionString, /* [out] */ ULONG *pcchVersionString, /* [annotation][out] */ _Out_writes_to_(cchVersionString, *pcchVersionString) WCHAR szVersionString[ ]); HRESULT ( STDMETHODCALLTYPE *GetThreadStaticAddress2 )( ICorProfilerInfo9 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [in] */ AppDomainID appDomainId, /* [in] */ ThreadID threadId, /* [out] */ void **ppAddress); HRESULT ( STDMETHODCALLTYPE *GetAppDomainsContainingModule )( ICorProfilerInfo9 * This, /* [in] */ ModuleID moduleId, /* [in] */ ULONG32 cAppDomainIds, /* [out] */ ULONG32 *pcAppDomainIds, /* [length_is][size_is][out] */ AppDomainID appDomainIds[ ]); HRESULT ( STDMETHODCALLTYPE *GetModuleInfo2 )( ICorProfilerInfo9 * This, /* [in] */ ModuleID moduleId, /* [out] */ LPCBYTE *ppBaseLoadAddress, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [annotation][out] */ _Out_writes_to_(cchName, *pcchName) WCHAR szName[ ], /* [out] */ AssemblyID *pAssemblyId, /* [out] */ DWORD *pdwModuleFlags); HRESULT ( STDMETHODCALLTYPE *EnumThreads )( ICorProfilerInfo9 * This, /* [out] */ ICorProfilerThreadEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *InitializeCurrentThread )( ICorProfilerInfo9 * This); HRESULT ( STDMETHODCALLTYPE *RequestReJIT )( ICorProfilerInfo9 * This, /* [in] */ ULONG cFunctions, /* [size_is][in] */ ModuleID moduleIds[ ], /* [size_is][in] */ mdMethodDef methodIds[ ]); HRESULT ( STDMETHODCALLTYPE *RequestRevert )( ICorProfilerInfo9 * This, /* [in] */ ULONG cFunctions, /* [size_is][in] */ ModuleID moduleIds[ ], /* [size_is][in] */ mdMethodDef methodIds[ ], /* [size_is][out] */ HRESULT status[ ]); HRESULT ( STDMETHODCALLTYPE *GetCodeInfo3 )( ICorProfilerInfo9 * This, /* [in] */ FunctionID functionID, /* [in] */ ReJITID reJitId, /* [in] */ ULONG32 cCodeInfos, /* [out] */ ULONG32 *pcCodeInfos, /* [length_is][size_is][out] */ COR_PRF_CODE_INFO codeInfos[ ]); HRESULT ( STDMETHODCALLTYPE *GetFunctionFromIP2 )( ICorProfilerInfo9 * This, /* [in] */ LPCBYTE ip, /* [out] */ FunctionID *pFunctionId, /* [out] */ ReJITID *pReJitId); HRESULT ( STDMETHODCALLTYPE *GetReJITIDs )( ICorProfilerInfo9 * This, /* [in] */ FunctionID functionId, /* [in] */ ULONG cReJitIds, /* [out] */ ULONG *pcReJitIds, /* [length_is][size_is][out] */ ReJITID reJitIds[ ]); HRESULT ( STDMETHODCALLTYPE *GetILToNativeMapping2 )( ICorProfilerInfo9 * This, /* [in] */ FunctionID functionId, /* [in] */ ReJITID reJitId, /* [in] */ ULONG32 cMap, /* [out] */ ULONG32 *pcMap, /* [length_is][size_is][out] */ COR_DEBUG_IL_TO_NATIVE_MAP map[ ]); HRESULT ( STDMETHODCALLTYPE *EnumJITedFunctions2 )( ICorProfilerInfo9 * This, /* [out] */ ICorProfilerFunctionEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *GetObjectSize2 )( ICorProfilerInfo9 * This, /* [in] */ ObjectID objectId, /* [out] */ SIZE_T *pcSize); HRESULT ( STDMETHODCALLTYPE *GetEventMask2 )( ICorProfilerInfo9 * This, /* [out] */ DWORD *pdwEventsLow, /* [out] */ DWORD *pdwEventsHigh); HRESULT ( STDMETHODCALLTYPE *SetEventMask2 )( ICorProfilerInfo9 * This, /* [in] */ DWORD dwEventsLow, /* [in] */ DWORD dwEventsHigh); HRESULT ( STDMETHODCALLTYPE *EnumNgenModuleMethodsInliningThisMethod )( ICorProfilerInfo9 * This, /* [in] */ ModuleID inlinersModuleId, /* [in] */ ModuleID inlineeModuleId, /* [in] */ mdMethodDef inlineeMethodId, /* [out] */ BOOL *incompleteData, /* [out] */ ICorProfilerMethodEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *ApplyMetaData )( ICorProfilerInfo9 * This, /* [in] */ ModuleID moduleId); HRESULT ( STDMETHODCALLTYPE *GetInMemorySymbolsLength )( ICorProfilerInfo9 * This, /* [in] */ ModuleID moduleId, /* [out] */ DWORD *pCountSymbolBytes); HRESULT ( STDMETHODCALLTYPE *ReadInMemorySymbols )( ICorProfilerInfo9 * This, /* [in] */ ModuleID moduleId, /* [in] */ DWORD symbolsReadOffset, /* [out] */ BYTE *pSymbolBytes, /* [in] */ DWORD countSymbolBytes, /* [out] */ DWORD *pCountSymbolBytesRead); HRESULT ( STDMETHODCALLTYPE *IsFunctionDynamic )( ICorProfilerInfo9 * This, /* [in] */ FunctionID functionId, /* [out] */ BOOL *isDynamic); HRESULT ( STDMETHODCALLTYPE *GetFunctionFromIP3 )( ICorProfilerInfo9 * This, /* [in] */ LPCBYTE ip, /* [out] */ FunctionID *functionId, /* [out] */ ReJITID *pReJitId); HRESULT ( STDMETHODCALLTYPE *GetDynamicFunctionInfo )( ICorProfilerInfo9 * This, /* [in] */ FunctionID functionId, /* [out] */ ModuleID *moduleId, /* [out] */ PCCOR_SIGNATURE *ppvSig, /* [out] */ ULONG *pbSig, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [out] */ WCHAR wszName[ ]); HRESULT ( STDMETHODCALLTYPE *GetNativeCodeStartAddresses )( ICorProfilerInfo9 * This, FunctionID functionID, ReJITID reJitId, ULONG32 cCodeStartAddresses, ULONG32 *pcCodeStartAddresses, UINT_PTR codeStartAddresses[ ]); HRESULT ( STDMETHODCALLTYPE *GetILToNativeMapping3 )( ICorProfilerInfo9 * This, UINT_PTR pNativeCodeStartAddress, ULONG32 cMap, ULONG32 *pcMap, COR_DEBUG_IL_TO_NATIVE_MAP map[ ]); HRESULT ( STDMETHODCALLTYPE *GetCodeInfo4 )( ICorProfilerInfo9 * This, UINT_PTR pNativeCodeStartAddress, ULONG32 cCodeInfos, ULONG32 *pcCodeInfos, COR_PRF_CODE_INFO codeInfos[ ]); END_INTERFACE } ICorProfilerInfo9Vtbl; interface ICorProfilerInfo9 { CONST_VTBL struct ICorProfilerInfo9Vtbl *lpVtbl; }; #ifdef COBJMACROS #define ICorProfilerInfo9_QueryInterface(This,riid,ppvObject) \ ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) ) #define ICorProfilerInfo9_AddRef(This) \ ( (This)->lpVtbl -> AddRef(This) ) #define ICorProfilerInfo9_Release(This) \ ( (This)->lpVtbl -> Release(This) ) #define ICorProfilerInfo9_GetClassFromObject(This,objectId,pClassId) \ ( (This)->lpVtbl -> GetClassFromObject(This,objectId,pClassId) ) #define ICorProfilerInfo9_GetClassFromToken(This,moduleId,typeDef,pClassId) \ ( (This)->lpVtbl -> GetClassFromToken(This,moduleId,typeDef,pClassId) ) #define ICorProfilerInfo9_GetCodeInfo(This,functionId,pStart,pcSize) \ ( (This)->lpVtbl -> GetCodeInfo(This,functionId,pStart,pcSize) ) #define ICorProfilerInfo9_GetEventMask(This,pdwEvents) \ ( (This)->lpVtbl -> GetEventMask(This,pdwEvents) ) #define ICorProfilerInfo9_GetFunctionFromIP(This,ip,pFunctionId) \ ( (This)->lpVtbl -> GetFunctionFromIP(This,ip,pFunctionId) ) #define ICorProfilerInfo9_GetFunctionFromToken(This,moduleId,token,pFunctionId) \ ( (This)->lpVtbl -> GetFunctionFromToken(This,moduleId,token,pFunctionId) ) #define ICorProfilerInfo9_GetHandleFromThread(This,threadId,phThread) \ ( (This)->lpVtbl -> GetHandleFromThread(This,threadId,phThread) ) #define ICorProfilerInfo9_GetObjectSize(This,objectId,pcSize) \ ( (This)->lpVtbl -> GetObjectSize(This,objectId,pcSize) ) #define ICorProfilerInfo9_IsArrayClass(This,classId,pBaseElemType,pBaseClassId,pcRank) \ ( (This)->lpVtbl -> IsArrayClass(This,classId,pBaseElemType,pBaseClassId,pcRank) ) #define ICorProfilerInfo9_GetThreadInfo(This,threadId,pdwWin32ThreadId) \ ( (This)->lpVtbl -> GetThreadInfo(This,threadId,pdwWin32ThreadId) ) #define ICorProfilerInfo9_GetCurrentThreadID(This,pThreadId) \ ( (This)->lpVtbl -> GetCurrentThreadID(This,pThreadId) ) #define ICorProfilerInfo9_GetClassIDInfo(This,classId,pModuleId,pTypeDefToken) \ ( (This)->lpVtbl -> GetClassIDInfo(This,classId,pModuleId,pTypeDefToken) ) #define ICorProfilerInfo9_GetFunctionInfo(This,functionId,pClassId,pModuleId,pToken) \ ( (This)->lpVtbl -> GetFunctionInfo(This,functionId,pClassId,pModuleId,pToken) ) #define ICorProfilerInfo9_SetEventMask(This,dwEvents) \ ( (This)->lpVtbl -> SetEventMask(This,dwEvents) ) #define ICorProfilerInfo9_SetEnterLeaveFunctionHooks(This,pFuncEnter,pFuncLeave,pFuncTailcall) \ ( (This)->lpVtbl -> SetEnterLeaveFunctionHooks(This,pFuncEnter,pFuncLeave,pFuncTailcall) ) #define ICorProfilerInfo9_SetFunctionIDMapper(This,pFunc) \ ( (This)->lpVtbl -> SetFunctionIDMapper(This,pFunc) ) #define ICorProfilerInfo9_GetTokenAndMetaDataFromFunction(This,functionId,riid,ppImport,pToken) \ ( (This)->lpVtbl -> GetTokenAndMetaDataFromFunction(This,functionId,riid,ppImport,pToken) ) #define ICorProfilerInfo9_GetModuleInfo(This,moduleId,ppBaseLoadAddress,cchName,pcchName,szName,pAssemblyId) \ ( (This)->lpVtbl -> GetModuleInfo(This,moduleId,ppBaseLoadAddress,cchName,pcchName,szName,pAssemblyId) ) #define ICorProfilerInfo9_GetModuleMetaData(This,moduleId,dwOpenFlags,riid,ppOut) \ ( (This)->lpVtbl -> GetModuleMetaData(This,moduleId,dwOpenFlags,riid,ppOut) ) #define ICorProfilerInfo9_GetILFunctionBody(This,moduleId,methodId,ppMethodHeader,pcbMethodSize) \ ( (This)->lpVtbl -> GetILFunctionBody(This,moduleId,methodId,ppMethodHeader,pcbMethodSize) ) #define ICorProfilerInfo9_GetILFunctionBodyAllocator(This,moduleId,ppMalloc) \ ( (This)->lpVtbl -> GetILFunctionBodyAllocator(This,moduleId,ppMalloc) ) #define ICorProfilerInfo9_SetILFunctionBody(This,moduleId,methodid,pbNewILMethodHeader) \ ( (This)->lpVtbl -> SetILFunctionBody(This,moduleId,methodid,pbNewILMethodHeader) ) #define ICorProfilerInfo9_GetAppDomainInfo(This,appDomainId,cchName,pcchName,szName,pProcessId) \ ( (This)->lpVtbl -> GetAppDomainInfo(This,appDomainId,cchName,pcchName,szName,pProcessId) ) #define ICorProfilerInfo9_GetAssemblyInfo(This,assemblyId,cchName,pcchName,szName,pAppDomainId,pModuleId) \ ( (This)->lpVtbl -> GetAssemblyInfo(This,assemblyId,cchName,pcchName,szName,pAppDomainId,pModuleId) ) #define ICorProfilerInfo9_SetFunctionReJIT(This,functionId) \ ( (This)->lpVtbl -> SetFunctionReJIT(This,functionId) ) #define ICorProfilerInfo9_ForceGC(This) \ ( (This)->lpVtbl -> ForceGC(This) ) #define ICorProfilerInfo9_SetILInstrumentedCodeMap(This,functionId,fStartJit,cILMapEntries,rgILMapEntries) \ ( (This)->lpVtbl -> SetILInstrumentedCodeMap(This,functionId,fStartJit,cILMapEntries,rgILMapEntries) ) #define ICorProfilerInfo9_GetInprocInspectionInterface(This,ppicd) \ ( (This)->lpVtbl -> GetInprocInspectionInterface(This,ppicd) ) #define ICorProfilerInfo9_GetInprocInspectionIThisThread(This,ppicd) \ ( (This)->lpVtbl -> GetInprocInspectionIThisThread(This,ppicd) ) #define ICorProfilerInfo9_GetThreadContext(This,threadId,pContextId) \ ( (This)->lpVtbl -> GetThreadContext(This,threadId,pContextId) ) #define ICorProfilerInfo9_BeginInprocDebugging(This,fThisThreadOnly,pdwProfilerContext) \ ( (This)->lpVtbl -> BeginInprocDebugging(This,fThisThreadOnly,pdwProfilerContext) ) #define ICorProfilerInfo9_EndInprocDebugging(This,dwProfilerContext) \ ( (This)->lpVtbl -> EndInprocDebugging(This,dwProfilerContext) ) #define ICorProfilerInfo9_GetILToNativeMapping(This,functionId,cMap,pcMap,map) \ ( (This)->lpVtbl -> GetILToNativeMapping(This,functionId,cMap,pcMap,map) ) #define ICorProfilerInfo9_DoStackSnapshot(This,thread,callback,infoFlags,clientData,context,contextSize) \ ( (This)->lpVtbl -> DoStackSnapshot(This,thread,callback,infoFlags,clientData,context,contextSize) ) #define ICorProfilerInfo9_SetEnterLeaveFunctionHooks2(This,pFuncEnter,pFuncLeave,pFuncTailcall) \ ( (This)->lpVtbl -> SetEnterLeaveFunctionHooks2(This,pFuncEnter,pFuncLeave,pFuncTailcall) ) #define ICorProfilerInfo9_GetFunctionInfo2(This,funcId,frameInfo,pClassId,pModuleId,pToken,cTypeArgs,pcTypeArgs,typeArgs) \ ( (This)->lpVtbl -> GetFunctionInfo2(This,funcId,frameInfo,pClassId,pModuleId,pToken,cTypeArgs,pcTypeArgs,typeArgs) ) #define ICorProfilerInfo9_GetStringLayout(This,pBufferLengthOffset,pStringLengthOffset,pBufferOffset) \ ( (This)->lpVtbl -> GetStringLayout(This,pBufferLengthOffset,pStringLengthOffset,pBufferOffset) ) #define ICorProfilerInfo9_GetClassLayout(This,classID,rFieldOffset,cFieldOffset,pcFieldOffset,pulClassSize) \ ( (This)->lpVtbl -> GetClassLayout(This,classID,rFieldOffset,cFieldOffset,pcFieldOffset,pulClassSize) ) #define ICorProfilerInfo9_GetClassIDInfo2(This,classId,pModuleId,pTypeDefToken,pParentClassId,cNumTypeArgs,pcNumTypeArgs,typeArgs) \ ( (This)->lpVtbl -> GetClassIDInfo2(This,classId,pModuleId,pTypeDefToken,pParentClassId,cNumTypeArgs,pcNumTypeArgs,typeArgs) ) #define ICorProfilerInfo9_GetCodeInfo2(This,functionID,cCodeInfos,pcCodeInfos,codeInfos) \ ( (This)->lpVtbl -> GetCodeInfo2(This,functionID,cCodeInfos,pcCodeInfos,codeInfos) ) #define ICorProfilerInfo9_GetClassFromTokenAndTypeArgs(This,moduleID,typeDef,cTypeArgs,typeArgs,pClassID) \ ( (This)->lpVtbl -> GetClassFromTokenAndTypeArgs(This,moduleID,typeDef,cTypeArgs,typeArgs,pClassID) ) #define ICorProfilerInfo9_GetFunctionFromTokenAndTypeArgs(This,moduleID,funcDef,classId,cTypeArgs,typeArgs,pFunctionID) \ ( (This)->lpVtbl -> GetFunctionFromTokenAndTypeArgs(This,moduleID,funcDef,classId,cTypeArgs,typeArgs,pFunctionID) ) #define ICorProfilerInfo9_EnumModuleFrozenObjects(This,moduleID,ppEnum) \ ( (This)->lpVtbl -> EnumModuleFrozenObjects(This,moduleID,ppEnum) ) #define ICorProfilerInfo9_GetArrayObjectInfo(This,objectId,cDimensions,pDimensionSizes,pDimensionLowerBounds,ppData) \ ( (This)->lpVtbl -> GetArrayObjectInfo(This,objectId,cDimensions,pDimensionSizes,pDimensionLowerBounds,ppData) ) #define ICorProfilerInfo9_GetBoxClassLayout(This,classId,pBufferOffset) \ ( (This)->lpVtbl -> GetBoxClassLayout(This,classId,pBufferOffset) ) #define ICorProfilerInfo9_GetThreadAppDomain(This,threadId,pAppDomainId) \ ( (This)->lpVtbl -> GetThreadAppDomain(This,threadId,pAppDomainId) ) #define ICorProfilerInfo9_GetRVAStaticAddress(This,classId,fieldToken,ppAddress) \ ( (This)->lpVtbl -> GetRVAStaticAddress(This,classId,fieldToken,ppAddress) ) #define ICorProfilerInfo9_GetAppDomainStaticAddress(This,classId,fieldToken,appDomainId,ppAddress) \ ( (This)->lpVtbl -> GetAppDomainStaticAddress(This,classId,fieldToken,appDomainId,ppAddress) ) #define ICorProfilerInfo9_GetThreadStaticAddress(This,classId,fieldToken,threadId,ppAddress) \ ( (This)->lpVtbl -> GetThreadStaticAddress(This,classId,fieldToken,threadId,ppAddress) ) #define ICorProfilerInfo9_GetContextStaticAddress(This,classId,fieldToken,contextId,ppAddress) \ ( (This)->lpVtbl -> GetContextStaticAddress(This,classId,fieldToken,contextId,ppAddress) ) #define ICorProfilerInfo9_GetStaticFieldInfo(This,classId,fieldToken,pFieldInfo) \ ( (This)->lpVtbl -> GetStaticFieldInfo(This,classId,fieldToken,pFieldInfo) ) #define ICorProfilerInfo9_GetGenerationBounds(This,cObjectRanges,pcObjectRanges,ranges) \ ( (This)->lpVtbl -> GetGenerationBounds(This,cObjectRanges,pcObjectRanges,ranges) ) #define ICorProfilerInfo9_GetObjectGeneration(This,objectId,range) \ ( (This)->lpVtbl -> GetObjectGeneration(This,objectId,range) ) #define ICorProfilerInfo9_GetNotifiedExceptionClauseInfo(This,pinfo) \ ( (This)->lpVtbl -> GetNotifiedExceptionClauseInfo(This,pinfo) ) #define ICorProfilerInfo9_EnumJITedFunctions(This,ppEnum) \ ( (This)->lpVtbl -> EnumJITedFunctions(This,ppEnum) ) #define ICorProfilerInfo9_RequestProfilerDetach(This,dwExpectedCompletionMilliseconds) \ ( (This)->lpVtbl -> RequestProfilerDetach(This,dwExpectedCompletionMilliseconds) ) #define ICorProfilerInfo9_SetFunctionIDMapper2(This,pFunc,clientData) \ ( (This)->lpVtbl -> SetFunctionIDMapper2(This,pFunc,clientData) ) #define ICorProfilerInfo9_GetStringLayout2(This,pStringLengthOffset,pBufferOffset) \ ( (This)->lpVtbl -> GetStringLayout2(This,pStringLengthOffset,pBufferOffset) ) #define ICorProfilerInfo9_SetEnterLeaveFunctionHooks3(This,pFuncEnter3,pFuncLeave3,pFuncTailcall3) \ ( (This)->lpVtbl -> SetEnterLeaveFunctionHooks3(This,pFuncEnter3,pFuncLeave3,pFuncTailcall3) ) #define ICorProfilerInfo9_SetEnterLeaveFunctionHooks3WithInfo(This,pFuncEnter3WithInfo,pFuncLeave3WithInfo,pFuncTailcall3WithInfo) \ ( (This)->lpVtbl -> SetEnterLeaveFunctionHooks3WithInfo(This,pFuncEnter3WithInfo,pFuncLeave3WithInfo,pFuncTailcall3WithInfo) ) #define ICorProfilerInfo9_GetFunctionEnter3Info(This,functionId,eltInfo,pFrameInfo,pcbArgumentInfo,pArgumentInfo) \ ( (This)->lpVtbl -> GetFunctionEnter3Info(This,functionId,eltInfo,pFrameInfo,pcbArgumentInfo,pArgumentInfo) ) #define ICorProfilerInfo9_GetFunctionLeave3Info(This,functionId,eltInfo,pFrameInfo,pRetvalRange) \ ( (This)->lpVtbl -> GetFunctionLeave3Info(This,functionId,eltInfo,pFrameInfo,pRetvalRange) ) #define ICorProfilerInfo9_GetFunctionTailcall3Info(This,functionId,eltInfo,pFrameInfo) \ ( (This)->lpVtbl -> GetFunctionTailcall3Info(This,functionId,eltInfo,pFrameInfo) ) #define ICorProfilerInfo9_EnumModules(This,ppEnum) \ ( (This)->lpVtbl -> EnumModules(This,ppEnum) ) #define ICorProfilerInfo9_GetRuntimeInformation(This,pClrInstanceId,pRuntimeType,pMajorVersion,pMinorVersion,pBuildNumber,pQFEVersion,cchVersionString,pcchVersionString,szVersionString) \ ( (This)->lpVtbl -> GetRuntimeInformation(This,pClrInstanceId,pRuntimeType,pMajorVersion,pMinorVersion,pBuildNumber,pQFEVersion,cchVersionString,pcchVersionString,szVersionString) ) #define ICorProfilerInfo9_GetThreadStaticAddress2(This,classId,fieldToken,appDomainId,threadId,ppAddress) \ ( (This)->lpVtbl -> GetThreadStaticAddress2(This,classId,fieldToken,appDomainId,threadId,ppAddress) ) #define ICorProfilerInfo9_GetAppDomainsContainingModule(This,moduleId,cAppDomainIds,pcAppDomainIds,appDomainIds) \ ( (This)->lpVtbl -> GetAppDomainsContainingModule(This,moduleId,cAppDomainIds,pcAppDomainIds,appDomainIds) ) #define ICorProfilerInfo9_GetModuleInfo2(This,moduleId,ppBaseLoadAddress,cchName,pcchName,szName,pAssemblyId,pdwModuleFlags) \ ( (This)->lpVtbl -> GetModuleInfo2(This,moduleId,ppBaseLoadAddress,cchName,pcchName,szName,pAssemblyId,pdwModuleFlags) ) #define ICorProfilerInfo9_EnumThreads(This,ppEnum) \ ( (This)->lpVtbl -> EnumThreads(This,ppEnum) ) #define ICorProfilerInfo9_InitializeCurrentThread(This) \ ( (This)->lpVtbl -> InitializeCurrentThread(This) ) #define ICorProfilerInfo9_RequestReJIT(This,cFunctions,moduleIds,methodIds) \ ( (This)->lpVtbl -> RequestReJIT(This,cFunctions,moduleIds,methodIds) ) #define ICorProfilerInfo9_RequestRevert(This,cFunctions,moduleIds,methodIds,status) \ ( (This)->lpVtbl -> RequestRevert(This,cFunctions,moduleIds,methodIds,status) ) #define ICorProfilerInfo9_GetCodeInfo3(This,functionID,reJitId,cCodeInfos,pcCodeInfos,codeInfos) \ ( (This)->lpVtbl -> GetCodeInfo3(This,functionID,reJitId,cCodeInfos,pcCodeInfos,codeInfos) ) #define ICorProfilerInfo9_GetFunctionFromIP2(This,ip,pFunctionId,pReJitId) \ ( (This)->lpVtbl -> GetFunctionFromIP2(This,ip,pFunctionId,pReJitId) ) #define ICorProfilerInfo9_GetReJITIDs(This,functionId,cReJitIds,pcReJitIds,reJitIds) \ ( (This)->lpVtbl -> GetReJITIDs(This,functionId,cReJitIds,pcReJitIds,reJitIds) ) #define ICorProfilerInfo9_GetILToNativeMapping2(This,functionId,reJitId,cMap,pcMap,map) \ ( (This)->lpVtbl -> GetILToNativeMapping2(This,functionId,reJitId,cMap,pcMap,map) ) #define ICorProfilerInfo9_EnumJITedFunctions2(This,ppEnum) \ ( (This)->lpVtbl -> EnumJITedFunctions2(This,ppEnum) ) #define ICorProfilerInfo9_GetObjectSize2(This,objectId,pcSize) \ ( (This)->lpVtbl -> GetObjectSize2(This,objectId,pcSize) ) #define ICorProfilerInfo9_GetEventMask2(This,pdwEventsLow,pdwEventsHigh) \ ( (This)->lpVtbl -> GetEventMask2(This,pdwEventsLow,pdwEventsHigh) ) #define ICorProfilerInfo9_SetEventMask2(This,dwEventsLow,dwEventsHigh) \ ( (This)->lpVtbl -> SetEventMask2(This,dwEventsLow,dwEventsHigh) ) #define ICorProfilerInfo9_EnumNgenModuleMethodsInliningThisMethod(This,inlinersModuleId,inlineeModuleId,inlineeMethodId,incompleteData,ppEnum) \ ( (This)->lpVtbl -> EnumNgenModuleMethodsInliningThisMethod(This,inlinersModuleId,inlineeModuleId,inlineeMethodId,incompleteData,ppEnum) ) #define ICorProfilerInfo9_ApplyMetaData(This,moduleId) \ ( (This)->lpVtbl -> ApplyMetaData(This,moduleId) ) #define ICorProfilerInfo9_GetInMemorySymbolsLength(This,moduleId,pCountSymbolBytes) \ ( (This)->lpVtbl -> GetInMemorySymbolsLength(This,moduleId,pCountSymbolBytes) ) #define ICorProfilerInfo9_ReadInMemorySymbols(This,moduleId,symbolsReadOffset,pSymbolBytes,countSymbolBytes,pCountSymbolBytesRead) \ ( (This)->lpVtbl -> ReadInMemorySymbols(This,moduleId,symbolsReadOffset,pSymbolBytes,countSymbolBytes,pCountSymbolBytesRead) ) #define ICorProfilerInfo9_IsFunctionDynamic(This,functionId,isDynamic) \ ( (This)->lpVtbl -> IsFunctionDynamic(This,functionId,isDynamic) ) #define ICorProfilerInfo9_GetFunctionFromIP3(This,ip,functionId,pReJitId) \ ( (This)->lpVtbl -> GetFunctionFromIP3(This,ip,functionId,pReJitId) ) #define ICorProfilerInfo9_GetDynamicFunctionInfo(This,functionId,moduleId,ppvSig,pbSig,cchName,pcchName,wszName) \ ( (This)->lpVtbl -> GetDynamicFunctionInfo(This,functionId,moduleId,ppvSig,pbSig,cchName,pcchName,wszName) ) #define ICorProfilerInfo9_GetNativeCodeStartAddresses(This,functionID,reJitId,cCodeStartAddresses,pcCodeStartAddresses,codeStartAddresses) \ ( (This)->lpVtbl -> GetNativeCodeStartAddresses(This,functionID,reJitId,cCodeStartAddresses,pcCodeStartAddresses,codeStartAddresses) ) #define ICorProfilerInfo9_GetILToNativeMapping3(This,pNativeCodeStartAddress,cMap,pcMap,map) \ ( (This)->lpVtbl -> GetILToNativeMapping3(This,pNativeCodeStartAddress,cMap,pcMap,map) ) #define ICorProfilerInfo9_GetCodeInfo4(This,pNativeCodeStartAddress,cCodeInfos,pcCodeInfos,codeInfos) \ ( (This)->lpVtbl -> GetCodeInfo4(This,pNativeCodeStartAddress,cCodeInfos,pcCodeInfos,codeInfos) ) #endif /* COBJMACROS */ #endif /* C style interface */ #endif /* __ICorProfilerInfo9_INTERFACE_DEFINED__ */ #ifndef __ICorProfilerInfo10_INTERFACE_DEFINED__ #define __ICorProfilerInfo10_INTERFACE_DEFINED__ /* interface ICorProfilerInfo10 */ /* [local][unique][uuid][object] */ EXTERN_C const IID IID_ICorProfilerInfo10; #if defined(__cplusplus) && !defined(CINTERFACE) MIDL_INTERFACE("2F1B5152-C869-40C9-AA5F-3ABE026BD720") ICorProfilerInfo10 : public ICorProfilerInfo9 { public: virtual HRESULT STDMETHODCALLTYPE EnumerateObjectReferences( ObjectID objectId, ObjectReferenceCallback callback, void *clientData) = 0; virtual HRESULT STDMETHODCALLTYPE IsFrozenObject( ObjectID objectId, BOOL *pbFrozen) = 0; virtual HRESULT STDMETHODCALLTYPE GetLOHObjectSizeThreshold( DWORD *pThreshold) = 0; virtual HRESULT STDMETHODCALLTYPE RequestReJITWithInliners( /* [in] */ DWORD dwRejitFlags, /* [in] */ ULONG cFunctions, /* [size_is][in] */ ModuleID moduleIds[ ], /* [size_is][in] */ mdMethodDef methodIds[ ]) = 0; virtual HRESULT STDMETHODCALLTYPE SuspendRuntime( void) = 0; virtual HRESULT STDMETHODCALLTYPE ResumeRuntime( void) = 0; }; #else /* C style interface */ typedef struct ICorProfilerInfo10Vtbl { BEGIN_INTERFACE HRESULT ( STDMETHODCALLTYPE *QueryInterface )( ICorProfilerInfo10 * This, /* [in] */ REFIID riid, /* [annotation][iid_is][out] */ _COM_Outptr_ void **ppvObject); ULONG ( STDMETHODCALLTYPE *AddRef )( ICorProfilerInfo10 * This); ULONG ( STDMETHODCALLTYPE *Release )( ICorProfilerInfo10 * This); HRESULT ( STDMETHODCALLTYPE *GetClassFromObject )( ICorProfilerInfo10 * This, /* [in] */ ObjectID objectId, /* [out] */ ClassID *pClassId); HRESULT ( STDMETHODCALLTYPE *GetClassFromToken )( ICorProfilerInfo10 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdTypeDef typeDef, /* [out] */ ClassID *pClassId); HRESULT ( STDMETHODCALLTYPE *GetCodeInfo )( ICorProfilerInfo10 * This, /* [in] */ FunctionID functionId, /* [out] */ LPCBYTE *pStart, /* [out] */ ULONG *pcSize); HRESULT ( STDMETHODCALLTYPE *GetEventMask )( ICorProfilerInfo10 * This, /* [out] */ DWORD *pdwEvents); HRESULT ( STDMETHODCALLTYPE *GetFunctionFromIP )( ICorProfilerInfo10 * This, /* [in] */ LPCBYTE ip, /* [out] */ FunctionID *pFunctionId); HRESULT ( STDMETHODCALLTYPE *GetFunctionFromToken )( ICorProfilerInfo10 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdToken token, /* [out] */ FunctionID *pFunctionId); HRESULT ( STDMETHODCALLTYPE *GetHandleFromThread )( ICorProfilerInfo10 * This, /* [in] */ ThreadID threadId, /* [out] */ HANDLE *phThread); HRESULT ( STDMETHODCALLTYPE *GetObjectSize )( ICorProfilerInfo10 * This, /* [in] */ ObjectID objectId, /* [out] */ ULONG *pcSize); HRESULT ( STDMETHODCALLTYPE *IsArrayClass )( ICorProfilerInfo10 * This, /* [in] */ ClassID classId, /* [out] */ CorElementType *pBaseElemType, /* [out] */ ClassID *pBaseClassId, /* [out] */ ULONG *pcRank); HRESULT ( STDMETHODCALLTYPE *GetThreadInfo )( ICorProfilerInfo10 * This, /* [in] */ ThreadID threadId, /* [out] */ DWORD *pdwWin32ThreadId); HRESULT ( STDMETHODCALLTYPE *GetCurrentThreadID )( ICorProfilerInfo10 * This, /* [out] */ ThreadID *pThreadId); HRESULT ( STDMETHODCALLTYPE *GetClassIDInfo )( ICorProfilerInfo10 * This, /* [in] */ ClassID classId, /* [out] */ ModuleID *pModuleId, /* [out] */ mdTypeDef *pTypeDefToken); HRESULT ( STDMETHODCALLTYPE *GetFunctionInfo )( ICorProfilerInfo10 * This, /* [in] */ FunctionID functionId, /* [out] */ ClassID *pClassId, /* [out] */ ModuleID *pModuleId, /* [out] */ mdToken *pToken); HRESULT ( STDMETHODCALLTYPE *SetEventMask )( ICorProfilerInfo10 * This, /* [in] */ DWORD dwEvents); HRESULT ( STDMETHODCALLTYPE *SetEnterLeaveFunctionHooks )( ICorProfilerInfo10 * This, /* [in] */ FunctionEnter *pFuncEnter, /* [in] */ FunctionLeave *pFuncLeave, /* [in] */ FunctionTailcall *pFuncTailcall); HRESULT ( STDMETHODCALLTYPE *SetFunctionIDMapper )( ICorProfilerInfo10 * This, /* [in] */ FunctionIDMapper *pFunc); HRESULT ( STDMETHODCALLTYPE *GetTokenAndMetaDataFromFunction )( ICorProfilerInfo10 * This, /* [in] */ FunctionID functionId, /* [in] */ REFIID riid, /* [out] */ IUnknown **ppImport, /* [out] */ mdToken *pToken); HRESULT ( STDMETHODCALLTYPE *GetModuleInfo )( ICorProfilerInfo10 * This, /* [in] */ ModuleID moduleId, /* [out] */ LPCBYTE *ppBaseLoadAddress, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [annotation][out] */ _Out_writes_to_(cchName, *pcchName) WCHAR szName[ ], /* [out] */ AssemblyID *pAssemblyId); HRESULT ( STDMETHODCALLTYPE *GetModuleMetaData )( ICorProfilerInfo10 * This, /* [in] */ ModuleID moduleId, /* [in] */ DWORD dwOpenFlags, /* [in] */ REFIID riid, /* [out] */ IUnknown **ppOut); HRESULT ( STDMETHODCALLTYPE *GetILFunctionBody )( ICorProfilerInfo10 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdMethodDef methodId, /* [out] */ LPCBYTE *ppMethodHeader, /* [out] */ ULONG *pcbMethodSize); HRESULT ( STDMETHODCALLTYPE *GetILFunctionBodyAllocator )( ICorProfilerInfo10 * This, /* [in] */ ModuleID moduleId, /* [out] */ IMethodMalloc **ppMalloc); HRESULT ( STDMETHODCALLTYPE *SetILFunctionBody )( ICorProfilerInfo10 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdMethodDef methodid, /* [in] */ LPCBYTE pbNewILMethodHeader); HRESULT ( STDMETHODCALLTYPE *GetAppDomainInfo )( ICorProfilerInfo10 * This, /* [in] */ AppDomainID appDomainId, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [annotation][out] */ _Out_writes_to_(cchName, *pcchName) WCHAR szName[ ], /* [out] */ ProcessID *pProcessId); HRESULT ( STDMETHODCALLTYPE *GetAssemblyInfo )( ICorProfilerInfo10 * This, /* [in] */ AssemblyID assemblyId, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [annotation][out] */ _Out_writes_to_(cchName, *pcchName) WCHAR szName[ ], /* [out] */ AppDomainID *pAppDomainId, /* [out] */ ModuleID *pModuleId); HRESULT ( STDMETHODCALLTYPE *SetFunctionReJIT )( ICorProfilerInfo10 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ForceGC )( ICorProfilerInfo10 * This); HRESULT ( STDMETHODCALLTYPE *SetILInstrumentedCodeMap )( ICorProfilerInfo10 * This, /* [in] */ FunctionID functionId, /* [in] */ BOOL fStartJit, /* [in] */ ULONG cILMapEntries, /* [size_is][in] */ COR_IL_MAP rgILMapEntries[ ]); HRESULT ( STDMETHODCALLTYPE *GetInprocInspectionInterface )( ICorProfilerInfo10 * This, /* [out] */ IUnknown **ppicd); HRESULT ( STDMETHODCALLTYPE *GetInprocInspectionIThisThread )( ICorProfilerInfo10 * This, /* [out] */ IUnknown **ppicd); HRESULT ( STDMETHODCALLTYPE *GetThreadContext )( ICorProfilerInfo10 * This, /* [in] */ ThreadID threadId, /* [out] */ ContextID *pContextId); HRESULT ( STDMETHODCALLTYPE *BeginInprocDebugging )( ICorProfilerInfo10 * This, /* [in] */ BOOL fThisThreadOnly, /* [out] */ DWORD *pdwProfilerContext); HRESULT ( STDMETHODCALLTYPE *EndInprocDebugging )( ICorProfilerInfo10 * This, /* [in] */ DWORD dwProfilerContext); HRESULT ( STDMETHODCALLTYPE *GetILToNativeMapping )( ICorProfilerInfo10 * This, /* [in] */ FunctionID functionId, /* [in] */ ULONG32 cMap, /* [out] */ ULONG32 *pcMap, /* [length_is][size_is][out] */ COR_DEBUG_IL_TO_NATIVE_MAP map[ ]); HRESULT ( STDMETHODCALLTYPE *DoStackSnapshot )( ICorProfilerInfo10 * This, /* [in] */ ThreadID thread, /* [in] */ StackSnapshotCallback *callback, /* [in] */ ULONG32 infoFlags, /* [in] */ void *clientData, /* [size_is][in] */ BYTE context[ ], /* [in] */ ULONG32 contextSize); HRESULT ( STDMETHODCALLTYPE *SetEnterLeaveFunctionHooks2 )( ICorProfilerInfo10 * This, /* [in] */ FunctionEnter2 *pFuncEnter, /* [in] */ FunctionLeave2 *pFuncLeave, /* [in] */ FunctionTailcall2 *pFuncTailcall); HRESULT ( STDMETHODCALLTYPE *GetFunctionInfo2 )( ICorProfilerInfo10 * This, /* [in] */ FunctionID funcId, /* [in] */ COR_PRF_FRAME_INFO frameInfo, /* [out] */ ClassID *pClassId, /* [out] */ ModuleID *pModuleId, /* [out] */ mdToken *pToken, /* [in] */ ULONG32 cTypeArgs, /* [out] */ ULONG32 *pcTypeArgs, /* [out] */ ClassID typeArgs[ ]); HRESULT ( STDMETHODCALLTYPE *GetStringLayout )( ICorProfilerInfo10 * This, /* [out] */ ULONG *pBufferLengthOffset, /* [out] */ ULONG *pStringLengthOffset, /* [out] */ ULONG *pBufferOffset); HRESULT ( STDMETHODCALLTYPE *GetClassLayout )( ICorProfilerInfo10 * This, /* [in] */ ClassID classID, /* [out][in] */ COR_FIELD_OFFSET rFieldOffset[ ], /* [in] */ ULONG cFieldOffset, /* [out] */ ULONG *pcFieldOffset, /* [out] */ ULONG *pulClassSize); HRESULT ( STDMETHODCALLTYPE *GetClassIDInfo2 )( ICorProfilerInfo10 * This, /* [in] */ ClassID classId, /* [out] */ ModuleID *pModuleId, /* [out] */ mdTypeDef *pTypeDefToken, /* [out] */ ClassID *pParentClassId, /* [in] */ ULONG32 cNumTypeArgs, /* [out] */ ULONG32 *pcNumTypeArgs, /* [out] */ ClassID typeArgs[ ]); HRESULT ( STDMETHODCALLTYPE *GetCodeInfo2 )( ICorProfilerInfo10 * This, /* [in] */ FunctionID functionID, /* [in] */ ULONG32 cCodeInfos, /* [out] */ ULONG32 *pcCodeInfos, /* [length_is][size_is][out] */ COR_PRF_CODE_INFO codeInfos[ ]); HRESULT ( STDMETHODCALLTYPE *GetClassFromTokenAndTypeArgs )( ICorProfilerInfo10 * This, /* [in] */ ModuleID moduleID, /* [in] */ mdTypeDef typeDef, /* [in] */ ULONG32 cTypeArgs, /* [size_is][in] */ ClassID typeArgs[ ], /* [out] */ ClassID *pClassID); HRESULT ( STDMETHODCALLTYPE *GetFunctionFromTokenAndTypeArgs )( ICorProfilerInfo10 * This, /* [in] */ ModuleID moduleID, /* [in] */ mdMethodDef funcDef, /* [in] */ ClassID classId, /* [in] */ ULONG32 cTypeArgs, /* [size_is][in] */ ClassID typeArgs[ ], /* [out] */ FunctionID *pFunctionID); HRESULT ( STDMETHODCALLTYPE *EnumModuleFrozenObjects )( ICorProfilerInfo10 * This, /* [in] */ ModuleID moduleID, /* [out] */ ICorProfilerObjectEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *GetArrayObjectInfo )( ICorProfilerInfo10 * This, /* [in] */ ObjectID objectId, /* [in] */ ULONG32 cDimensions, /* [size_is][out] */ ULONG32 pDimensionSizes[ ], /* [size_is][out] */ int pDimensionLowerBounds[ ], /* [out] */ BYTE **ppData); HRESULT ( STDMETHODCALLTYPE *GetBoxClassLayout )( ICorProfilerInfo10 * This, /* [in] */ ClassID classId, /* [out] */ ULONG32 *pBufferOffset); HRESULT ( STDMETHODCALLTYPE *GetThreadAppDomain )( ICorProfilerInfo10 * This, /* [in] */ ThreadID threadId, /* [out] */ AppDomainID *pAppDomainId); HRESULT ( STDMETHODCALLTYPE *GetRVAStaticAddress )( ICorProfilerInfo10 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [out] */ void **ppAddress); HRESULT ( STDMETHODCALLTYPE *GetAppDomainStaticAddress )( ICorProfilerInfo10 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [in] */ AppDomainID appDomainId, /* [out] */ void **ppAddress); HRESULT ( STDMETHODCALLTYPE *GetThreadStaticAddress )( ICorProfilerInfo10 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [in] */ ThreadID threadId, /* [out] */ void **ppAddress); HRESULT ( STDMETHODCALLTYPE *GetContextStaticAddress )( ICorProfilerInfo10 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [in] */ ContextID contextId, /* [out] */ void **ppAddress); HRESULT ( STDMETHODCALLTYPE *GetStaticFieldInfo )( ICorProfilerInfo10 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [out] */ COR_PRF_STATIC_TYPE *pFieldInfo); HRESULT ( STDMETHODCALLTYPE *GetGenerationBounds )( ICorProfilerInfo10 * This, /* [in] */ ULONG cObjectRanges, /* [out] */ ULONG *pcObjectRanges, /* [length_is][size_is][out] */ COR_PRF_GC_GENERATION_RANGE ranges[ ]); HRESULT ( STDMETHODCALLTYPE *GetObjectGeneration )( ICorProfilerInfo10 * This, /* [in] */ ObjectID objectId, /* [out] */ COR_PRF_GC_GENERATION_RANGE *range); HRESULT ( STDMETHODCALLTYPE *GetNotifiedExceptionClauseInfo )( ICorProfilerInfo10 * This, /* [out] */ COR_PRF_EX_CLAUSE_INFO *pinfo); HRESULT ( STDMETHODCALLTYPE *EnumJITedFunctions )( ICorProfilerInfo10 * This, /* [out] */ ICorProfilerFunctionEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *RequestProfilerDetach )( ICorProfilerInfo10 * This, /* [in] */ DWORD dwExpectedCompletionMilliseconds); HRESULT ( STDMETHODCALLTYPE *SetFunctionIDMapper2 )( ICorProfilerInfo10 * This, /* [in] */ FunctionIDMapper2 *pFunc, /* [in] */ void *clientData); HRESULT ( STDMETHODCALLTYPE *GetStringLayout2 )( ICorProfilerInfo10 * This, /* [out] */ ULONG *pStringLengthOffset, /* [out] */ ULONG *pBufferOffset); HRESULT ( STDMETHODCALLTYPE *SetEnterLeaveFunctionHooks3 )( ICorProfilerInfo10 * This, /* [in] */ FunctionEnter3 *pFuncEnter3, /* [in] */ FunctionLeave3 *pFuncLeave3, /* [in] */ FunctionTailcall3 *pFuncTailcall3); HRESULT ( STDMETHODCALLTYPE *SetEnterLeaveFunctionHooks3WithInfo )( ICorProfilerInfo10 * This, /* [in] */ FunctionEnter3WithInfo *pFuncEnter3WithInfo, /* [in] */ FunctionLeave3WithInfo *pFuncLeave3WithInfo, /* [in] */ FunctionTailcall3WithInfo *pFuncTailcall3WithInfo); HRESULT ( STDMETHODCALLTYPE *GetFunctionEnter3Info )( ICorProfilerInfo10 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_ELT_INFO eltInfo, /* [out] */ COR_PRF_FRAME_INFO *pFrameInfo, /* [out][in] */ ULONG *pcbArgumentInfo, /* [size_is][out] */ COR_PRF_FUNCTION_ARGUMENT_INFO *pArgumentInfo); HRESULT ( STDMETHODCALLTYPE *GetFunctionLeave3Info )( ICorProfilerInfo10 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_ELT_INFO eltInfo, /* [out] */ COR_PRF_FRAME_INFO *pFrameInfo, /* [out] */ COR_PRF_FUNCTION_ARGUMENT_RANGE *pRetvalRange); HRESULT ( STDMETHODCALLTYPE *GetFunctionTailcall3Info )( ICorProfilerInfo10 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_ELT_INFO eltInfo, /* [out] */ COR_PRF_FRAME_INFO *pFrameInfo); HRESULT ( STDMETHODCALLTYPE *EnumModules )( ICorProfilerInfo10 * This, /* [out] */ ICorProfilerModuleEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *GetRuntimeInformation )( ICorProfilerInfo10 * This, /* [out] */ USHORT *pClrInstanceId, /* [out] */ COR_PRF_RUNTIME_TYPE *pRuntimeType, /* [out] */ USHORT *pMajorVersion, /* [out] */ USHORT *pMinorVersion, /* [out] */ USHORT *pBuildNumber, /* [out] */ USHORT *pQFEVersion, /* [in] */ ULONG cchVersionString, /* [out] */ ULONG *pcchVersionString, /* [annotation][out] */ _Out_writes_to_(cchVersionString, *pcchVersionString) WCHAR szVersionString[ ]); HRESULT ( STDMETHODCALLTYPE *GetThreadStaticAddress2 )( ICorProfilerInfo10 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [in] */ AppDomainID appDomainId, /* [in] */ ThreadID threadId, /* [out] */ void **ppAddress); HRESULT ( STDMETHODCALLTYPE *GetAppDomainsContainingModule )( ICorProfilerInfo10 * This, /* [in] */ ModuleID moduleId, /* [in] */ ULONG32 cAppDomainIds, /* [out] */ ULONG32 *pcAppDomainIds, /* [length_is][size_is][out] */ AppDomainID appDomainIds[ ]); HRESULT ( STDMETHODCALLTYPE *GetModuleInfo2 )( ICorProfilerInfo10 * This, /* [in] */ ModuleID moduleId, /* [out] */ LPCBYTE *ppBaseLoadAddress, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [annotation][out] */ _Out_writes_to_(cchName, *pcchName) WCHAR szName[ ], /* [out] */ AssemblyID *pAssemblyId, /* [out] */ DWORD *pdwModuleFlags); HRESULT ( STDMETHODCALLTYPE *EnumThreads )( ICorProfilerInfo10 * This, /* [out] */ ICorProfilerThreadEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *InitializeCurrentThread )( ICorProfilerInfo10 * This); HRESULT ( STDMETHODCALLTYPE *RequestReJIT )( ICorProfilerInfo10 * This, /* [in] */ ULONG cFunctions, /* [size_is][in] */ ModuleID moduleIds[ ], /* [size_is][in] */ mdMethodDef methodIds[ ]); HRESULT ( STDMETHODCALLTYPE *RequestRevert )( ICorProfilerInfo10 * This, /* [in] */ ULONG cFunctions, /* [size_is][in] */ ModuleID moduleIds[ ], /* [size_is][in] */ mdMethodDef methodIds[ ], /* [size_is][out] */ HRESULT status[ ]); HRESULT ( STDMETHODCALLTYPE *GetCodeInfo3 )( ICorProfilerInfo10 * This, /* [in] */ FunctionID functionID, /* [in] */ ReJITID reJitId, /* [in] */ ULONG32 cCodeInfos, /* [out] */ ULONG32 *pcCodeInfos, /* [length_is][size_is][out] */ COR_PRF_CODE_INFO codeInfos[ ]); HRESULT ( STDMETHODCALLTYPE *GetFunctionFromIP2 )( ICorProfilerInfo10 * This, /* [in] */ LPCBYTE ip, /* [out] */ FunctionID *pFunctionId, /* [out] */ ReJITID *pReJitId); HRESULT ( STDMETHODCALLTYPE *GetReJITIDs )( ICorProfilerInfo10 * This, /* [in] */ FunctionID functionId, /* [in] */ ULONG cReJitIds, /* [out] */ ULONG *pcReJitIds, /* [length_is][size_is][out] */ ReJITID reJitIds[ ]); HRESULT ( STDMETHODCALLTYPE *GetILToNativeMapping2 )( ICorProfilerInfo10 * This, /* [in] */ FunctionID functionId, /* [in] */ ReJITID reJitId, /* [in] */ ULONG32 cMap, /* [out] */ ULONG32 *pcMap, /* [length_is][size_is][out] */ COR_DEBUG_IL_TO_NATIVE_MAP map[ ]); HRESULT ( STDMETHODCALLTYPE *EnumJITedFunctions2 )( ICorProfilerInfo10 * This, /* [out] */ ICorProfilerFunctionEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *GetObjectSize2 )( ICorProfilerInfo10 * This, /* [in] */ ObjectID objectId, /* [out] */ SIZE_T *pcSize); HRESULT ( STDMETHODCALLTYPE *GetEventMask2 )( ICorProfilerInfo10 * This, /* [out] */ DWORD *pdwEventsLow, /* [out] */ DWORD *pdwEventsHigh); HRESULT ( STDMETHODCALLTYPE *SetEventMask2 )( ICorProfilerInfo10 * This, /* [in] */ DWORD dwEventsLow, /* [in] */ DWORD dwEventsHigh); HRESULT ( STDMETHODCALLTYPE *EnumNgenModuleMethodsInliningThisMethod )( ICorProfilerInfo10 * This, /* [in] */ ModuleID inlinersModuleId, /* [in] */ ModuleID inlineeModuleId, /* [in] */ mdMethodDef inlineeMethodId, /* [out] */ BOOL *incompleteData, /* [out] */ ICorProfilerMethodEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *ApplyMetaData )( ICorProfilerInfo10 * This, /* [in] */ ModuleID moduleId); HRESULT ( STDMETHODCALLTYPE *GetInMemorySymbolsLength )( ICorProfilerInfo10 * This, /* [in] */ ModuleID moduleId, /* [out] */ DWORD *pCountSymbolBytes); HRESULT ( STDMETHODCALLTYPE *ReadInMemorySymbols )( ICorProfilerInfo10 * This, /* [in] */ ModuleID moduleId, /* [in] */ DWORD symbolsReadOffset, /* [out] */ BYTE *pSymbolBytes, /* [in] */ DWORD countSymbolBytes, /* [out] */ DWORD *pCountSymbolBytesRead); HRESULT ( STDMETHODCALLTYPE *IsFunctionDynamic )( ICorProfilerInfo10 * This, /* [in] */ FunctionID functionId, /* [out] */ BOOL *isDynamic); HRESULT ( STDMETHODCALLTYPE *GetFunctionFromIP3 )( ICorProfilerInfo10 * This, /* [in] */ LPCBYTE ip, /* [out] */ FunctionID *functionId, /* [out] */ ReJITID *pReJitId); HRESULT ( STDMETHODCALLTYPE *GetDynamicFunctionInfo )( ICorProfilerInfo10 * This, /* [in] */ FunctionID functionId, /* [out] */ ModuleID *moduleId, /* [out] */ PCCOR_SIGNATURE *ppvSig, /* [out] */ ULONG *pbSig, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [out] */ WCHAR wszName[ ]); HRESULT ( STDMETHODCALLTYPE *GetNativeCodeStartAddresses )( ICorProfilerInfo10 * This, FunctionID functionID, ReJITID reJitId, ULONG32 cCodeStartAddresses, ULONG32 *pcCodeStartAddresses, UINT_PTR codeStartAddresses[ ]); HRESULT ( STDMETHODCALLTYPE *GetILToNativeMapping3 )( ICorProfilerInfo10 * This, UINT_PTR pNativeCodeStartAddress, ULONG32 cMap, ULONG32 *pcMap, COR_DEBUG_IL_TO_NATIVE_MAP map[ ]); HRESULT ( STDMETHODCALLTYPE *GetCodeInfo4 )( ICorProfilerInfo10 * This, UINT_PTR pNativeCodeStartAddress, ULONG32 cCodeInfos, ULONG32 *pcCodeInfos, COR_PRF_CODE_INFO codeInfos[ ]); HRESULT ( STDMETHODCALLTYPE *EnumerateObjectReferences )( ICorProfilerInfo10 * This, ObjectID objectId, ObjectReferenceCallback callback, void *clientData); HRESULT ( STDMETHODCALLTYPE *IsFrozenObject )( ICorProfilerInfo10 * This, ObjectID objectId, BOOL *pbFrozen); HRESULT ( STDMETHODCALLTYPE *GetLOHObjectSizeThreshold )( ICorProfilerInfo10 * This, DWORD *pThreshold); HRESULT ( STDMETHODCALLTYPE *RequestReJITWithInliners )( ICorProfilerInfo10 * This, /* [in] */ DWORD dwRejitFlags, /* [in] */ ULONG cFunctions, /* [size_is][in] */ ModuleID moduleIds[ ], /* [size_is][in] */ mdMethodDef methodIds[ ]); HRESULT ( STDMETHODCALLTYPE *SuspendRuntime )( ICorProfilerInfo10 * This); HRESULT ( STDMETHODCALLTYPE *ResumeRuntime )( ICorProfilerInfo10 * This); END_INTERFACE } ICorProfilerInfo10Vtbl; interface ICorProfilerInfo10 { CONST_VTBL struct ICorProfilerInfo10Vtbl *lpVtbl; }; #ifdef COBJMACROS #define ICorProfilerInfo10_QueryInterface(This,riid,ppvObject) \ ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) ) #define ICorProfilerInfo10_AddRef(This) \ ( (This)->lpVtbl -> AddRef(This) ) #define ICorProfilerInfo10_Release(This) \ ( (This)->lpVtbl -> Release(This) ) #define ICorProfilerInfo10_GetClassFromObject(This,objectId,pClassId) \ ( (This)->lpVtbl -> GetClassFromObject(This,objectId,pClassId) ) #define ICorProfilerInfo10_GetClassFromToken(This,moduleId,typeDef,pClassId) \ ( (This)->lpVtbl -> GetClassFromToken(This,moduleId,typeDef,pClassId) ) #define ICorProfilerInfo10_GetCodeInfo(This,functionId,pStart,pcSize) \ ( (This)->lpVtbl -> GetCodeInfo(This,functionId,pStart,pcSize) ) #define ICorProfilerInfo10_GetEventMask(This,pdwEvents) \ ( (This)->lpVtbl -> GetEventMask(This,pdwEvents) ) #define ICorProfilerInfo10_GetFunctionFromIP(This,ip,pFunctionId) \ ( (This)->lpVtbl -> GetFunctionFromIP(This,ip,pFunctionId) ) #define ICorProfilerInfo10_GetFunctionFromToken(This,moduleId,token,pFunctionId) \ ( (This)->lpVtbl -> GetFunctionFromToken(This,moduleId,token,pFunctionId) ) #define ICorProfilerInfo10_GetHandleFromThread(This,threadId,phThread) \ ( (This)->lpVtbl -> GetHandleFromThread(This,threadId,phThread) ) #define ICorProfilerInfo10_GetObjectSize(This,objectId,pcSize) \ ( (This)->lpVtbl -> GetObjectSize(This,objectId,pcSize) ) #define ICorProfilerInfo10_IsArrayClass(This,classId,pBaseElemType,pBaseClassId,pcRank) \ ( (This)->lpVtbl -> IsArrayClass(This,classId,pBaseElemType,pBaseClassId,pcRank) ) #define ICorProfilerInfo10_GetThreadInfo(This,threadId,pdwWin32ThreadId) \ ( (This)->lpVtbl -> GetThreadInfo(This,threadId,pdwWin32ThreadId) ) #define ICorProfilerInfo10_GetCurrentThreadID(This,pThreadId) \ ( (This)->lpVtbl -> GetCurrentThreadID(This,pThreadId) ) #define ICorProfilerInfo10_GetClassIDInfo(This,classId,pModuleId,pTypeDefToken) \ ( (This)->lpVtbl -> GetClassIDInfo(This,classId,pModuleId,pTypeDefToken) ) #define ICorProfilerInfo10_GetFunctionInfo(This,functionId,pClassId,pModuleId,pToken) \ ( (This)->lpVtbl -> GetFunctionInfo(This,functionId,pClassId,pModuleId,pToken) ) #define ICorProfilerInfo10_SetEventMask(This,dwEvents) \ ( (This)->lpVtbl -> SetEventMask(This,dwEvents) ) #define ICorProfilerInfo10_SetEnterLeaveFunctionHooks(This,pFuncEnter,pFuncLeave,pFuncTailcall) \ ( (This)->lpVtbl -> SetEnterLeaveFunctionHooks(This,pFuncEnter,pFuncLeave,pFuncTailcall) ) #define ICorProfilerInfo10_SetFunctionIDMapper(This,pFunc) \ ( (This)->lpVtbl -> SetFunctionIDMapper(This,pFunc) ) #define ICorProfilerInfo10_GetTokenAndMetaDataFromFunction(This,functionId,riid,ppImport,pToken) \ ( (This)->lpVtbl -> GetTokenAndMetaDataFromFunction(This,functionId,riid,ppImport,pToken) ) #define ICorProfilerInfo10_GetModuleInfo(This,moduleId,ppBaseLoadAddress,cchName,pcchName,szName,pAssemblyId) \ ( (This)->lpVtbl -> GetModuleInfo(This,moduleId,ppBaseLoadAddress,cchName,pcchName,szName,pAssemblyId) ) #define ICorProfilerInfo10_GetModuleMetaData(This,moduleId,dwOpenFlags,riid,ppOut) \ ( (This)->lpVtbl -> GetModuleMetaData(This,moduleId,dwOpenFlags,riid,ppOut) ) #define ICorProfilerInfo10_GetILFunctionBody(This,moduleId,methodId,ppMethodHeader,pcbMethodSize) \ ( (This)->lpVtbl -> GetILFunctionBody(This,moduleId,methodId,ppMethodHeader,pcbMethodSize) ) #define ICorProfilerInfo10_GetILFunctionBodyAllocator(This,moduleId,ppMalloc) \ ( (This)->lpVtbl -> GetILFunctionBodyAllocator(This,moduleId,ppMalloc) ) #define ICorProfilerInfo10_SetILFunctionBody(This,moduleId,methodid,pbNewILMethodHeader) \ ( (This)->lpVtbl -> SetILFunctionBody(This,moduleId,methodid,pbNewILMethodHeader) ) #define ICorProfilerInfo10_GetAppDomainInfo(This,appDomainId,cchName,pcchName,szName,pProcessId) \ ( (This)->lpVtbl -> GetAppDomainInfo(This,appDomainId,cchName,pcchName,szName,pProcessId) ) #define ICorProfilerInfo10_GetAssemblyInfo(This,assemblyId,cchName,pcchName,szName,pAppDomainId,pModuleId) \ ( (This)->lpVtbl -> GetAssemblyInfo(This,assemblyId,cchName,pcchName,szName,pAppDomainId,pModuleId) ) #define ICorProfilerInfo10_SetFunctionReJIT(This,functionId) \ ( (This)->lpVtbl -> SetFunctionReJIT(This,functionId) ) #define ICorProfilerInfo10_ForceGC(This) \ ( (This)->lpVtbl -> ForceGC(This) ) #define ICorProfilerInfo10_SetILInstrumentedCodeMap(This,functionId,fStartJit,cILMapEntries,rgILMapEntries) \ ( (This)->lpVtbl -> SetILInstrumentedCodeMap(This,functionId,fStartJit,cILMapEntries,rgILMapEntries) ) #define ICorProfilerInfo10_GetInprocInspectionInterface(This,ppicd) \ ( (This)->lpVtbl -> GetInprocInspectionInterface(This,ppicd) ) #define ICorProfilerInfo10_GetInprocInspectionIThisThread(This,ppicd) \ ( (This)->lpVtbl -> GetInprocInspectionIThisThread(This,ppicd) ) #define ICorProfilerInfo10_GetThreadContext(This,threadId,pContextId) \ ( (This)->lpVtbl -> GetThreadContext(This,threadId,pContextId) ) #define ICorProfilerInfo10_BeginInprocDebugging(This,fThisThreadOnly,pdwProfilerContext) \ ( (This)->lpVtbl -> BeginInprocDebugging(This,fThisThreadOnly,pdwProfilerContext) ) #define ICorProfilerInfo10_EndInprocDebugging(This,dwProfilerContext) \ ( (This)->lpVtbl -> EndInprocDebugging(This,dwProfilerContext) ) #define ICorProfilerInfo10_GetILToNativeMapping(This,functionId,cMap,pcMap,map) \ ( (This)->lpVtbl -> GetILToNativeMapping(This,functionId,cMap,pcMap,map) ) #define ICorProfilerInfo10_DoStackSnapshot(This,thread,callback,infoFlags,clientData,context,contextSize) \ ( (This)->lpVtbl -> DoStackSnapshot(This,thread,callback,infoFlags,clientData,context,contextSize) ) #define ICorProfilerInfo10_SetEnterLeaveFunctionHooks2(This,pFuncEnter,pFuncLeave,pFuncTailcall) \ ( (This)->lpVtbl -> SetEnterLeaveFunctionHooks2(This,pFuncEnter,pFuncLeave,pFuncTailcall) ) #define ICorProfilerInfo10_GetFunctionInfo2(This,funcId,frameInfo,pClassId,pModuleId,pToken,cTypeArgs,pcTypeArgs,typeArgs) \ ( (This)->lpVtbl -> GetFunctionInfo2(This,funcId,frameInfo,pClassId,pModuleId,pToken,cTypeArgs,pcTypeArgs,typeArgs) ) #define ICorProfilerInfo10_GetStringLayout(This,pBufferLengthOffset,pStringLengthOffset,pBufferOffset) \ ( (This)->lpVtbl -> GetStringLayout(This,pBufferLengthOffset,pStringLengthOffset,pBufferOffset) ) #define ICorProfilerInfo10_GetClassLayout(This,classID,rFieldOffset,cFieldOffset,pcFieldOffset,pulClassSize) \ ( (This)->lpVtbl -> GetClassLayout(This,classID,rFieldOffset,cFieldOffset,pcFieldOffset,pulClassSize) ) #define ICorProfilerInfo10_GetClassIDInfo2(This,classId,pModuleId,pTypeDefToken,pParentClassId,cNumTypeArgs,pcNumTypeArgs,typeArgs) \ ( (This)->lpVtbl -> GetClassIDInfo2(This,classId,pModuleId,pTypeDefToken,pParentClassId,cNumTypeArgs,pcNumTypeArgs,typeArgs) ) #define ICorProfilerInfo10_GetCodeInfo2(This,functionID,cCodeInfos,pcCodeInfos,codeInfos) \ ( (This)->lpVtbl -> GetCodeInfo2(This,functionID,cCodeInfos,pcCodeInfos,codeInfos) ) #define ICorProfilerInfo10_GetClassFromTokenAndTypeArgs(This,moduleID,typeDef,cTypeArgs,typeArgs,pClassID) \ ( (This)->lpVtbl -> GetClassFromTokenAndTypeArgs(This,moduleID,typeDef,cTypeArgs,typeArgs,pClassID) ) #define ICorProfilerInfo10_GetFunctionFromTokenAndTypeArgs(This,moduleID,funcDef,classId,cTypeArgs,typeArgs,pFunctionID) \ ( (This)->lpVtbl -> GetFunctionFromTokenAndTypeArgs(This,moduleID,funcDef,classId,cTypeArgs,typeArgs,pFunctionID) ) #define ICorProfilerInfo10_EnumModuleFrozenObjects(This,moduleID,ppEnum) \ ( (This)->lpVtbl -> EnumModuleFrozenObjects(This,moduleID,ppEnum) ) #define ICorProfilerInfo10_GetArrayObjectInfo(This,objectId,cDimensions,pDimensionSizes,pDimensionLowerBounds,ppData) \ ( (This)->lpVtbl -> GetArrayObjectInfo(This,objectId,cDimensions,pDimensionSizes,pDimensionLowerBounds,ppData) ) #define ICorProfilerInfo10_GetBoxClassLayout(This,classId,pBufferOffset) \ ( (This)->lpVtbl -> GetBoxClassLayout(This,classId,pBufferOffset) ) #define ICorProfilerInfo10_GetThreadAppDomain(This,threadId,pAppDomainId) \ ( (This)->lpVtbl -> GetThreadAppDomain(This,threadId,pAppDomainId) ) #define ICorProfilerInfo10_GetRVAStaticAddress(This,classId,fieldToken,ppAddress) \ ( (This)->lpVtbl -> GetRVAStaticAddress(This,classId,fieldToken,ppAddress) ) #define ICorProfilerInfo10_GetAppDomainStaticAddress(This,classId,fieldToken,appDomainId,ppAddress) \ ( (This)->lpVtbl -> GetAppDomainStaticAddress(This,classId,fieldToken,appDomainId,ppAddress) ) #define ICorProfilerInfo10_GetThreadStaticAddress(This,classId,fieldToken,threadId,ppAddress) \ ( (This)->lpVtbl -> GetThreadStaticAddress(This,classId,fieldToken,threadId,ppAddress) ) #define ICorProfilerInfo10_GetContextStaticAddress(This,classId,fieldToken,contextId,ppAddress) \ ( (This)->lpVtbl -> GetContextStaticAddress(This,classId,fieldToken,contextId,ppAddress) ) #define ICorProfilerInfo10_GetStaticFieldInfo(This,classId,fieldToken,pFieldInfo) \ ( (This)->lpVtbl -> GetStaticFieldInfo(This,classId,fieldToken,pFieldInfo) ) #define ICorProfilerInfo10_GetGenerationBounds(This,cObjectRanges,pcObjectRanges,ranges) \ ( (This)->lpVtbl -> GetGenerationBounds(This,cObjectRanges,pcObjectRanges,ranges) ) #define ICorProfilerInfo10_GetObjectGeneration(This,objectId,range) \ ( (This)->lpVtbl -> GetObjectGeneration(This,objectId,range) ) #define ICorProfilerInfo10_GetNotifiedExceptionClauseInfo(This,pinfo) \ ( (This)->lpVtbl -> GetNotifiedExceptionClauseInfo(This,pinfo) ) #define ICorProfilerInfo10_EnumJITedFunctions(This,ppEnum) \ ( (This)->lpVtbl -> EnumJITedFunctions(This,ppEnum) ) #define ICorProfilerInfo10_RequestProfilerDetach(This,dwExpectedCompletionMilliseconds) \ ( (This)->lpVtbl -> RequestProfilerDetach(This,dwExpectedCompletionMilliseconds) ) #define ICorProfilerInfo10_SetFunctionIDMapper2(This,pFunc,clientData) \ ( (This)->lpVtbl -> SetFunctionIDMapper2(This,pFunc,clientData) ) #define ICorProfilerInfo10_GetStringLayout2(This,pStringLengthOffset,pBufferOffset) \ ( (This)->lpVtbl -> GetStringLayout2(This,pStringLengthOffset,pBufferOffset) ) #define ICorProfilerInfo10_SetEnterLeaveFunctionHooks3(This,pFuncEnter3,pFuncLeave3,pFuncTailcall3) \ ( (This)->lpVtbl -> SetEnterLeaveFunctionHooks3(This,pFuncEnter3,pFuncLeave3,pFuncTailcall3) ) #define ICorProfilerInfo10_SetEnterLeaveFunctionHooks3WithInfo(This,pFuncEnter3WithInfo,pFuncLeave3WithInfo,pFuncTailcall3WithInfo) \ ( (This)->lpVtbl -> SetEnterLeaveFunctionHooks3WithInfo(This,pFuncEnter3WithInfo,pFuncLeave3WithInfo,pFuncTailcall3WithInfo) ) #define ICorProfilerInfo10_GetFunctionEnter3Info(This,functionId,eltInfo,pFrameInfo,pcbArgumentInfo,pArgumentInfo) \ ( (This)->lpVtbl -> GetFunctionEnter3Info(This,functionId,eltInfo,pFrameInfo,pcbArgumentInfo,pArgumentInfo) ) #define ICorProfilerInfo10_GetFunctionLeave3Info(This,functionId,eltInfo,pFrameInfo,pRetvalRange) \ ( (This)->lpVtbl -> GetFunctionLeave3Info(This,functionId,eltInfo,pFrameInfo,pRetvalRange) ) #define ICorProfilerInfo10_GetFunctionTailcall3Info(This,functionId,eltInfo,pFrameInfo) \ ( (This)->lpVtbl -> GetFunctionTailcall3Info(This,functionId,eltInfo,pFrameInfo) ) #define ICorProfilerInfo10_EnumModules(This,ppEnum) \ ( (This)->lpVtbl -> EnumModules(This,ppEnum) ) #define ICorProfilerInfo10_GetRuntimeInformation(This,pClrInstanceId,pRuntimeType,pMajorVersion,pMinorVersion,pBuildNumber,pQFEVersion,cchVersionString,pcchVersionString,szVersionString) \ ( (This)->lpVtbl -> GetRuntimeInformation(This,pClrInstanceId,pRuntimeType,pMajorVersion,pMinorVersion,pBuildNumber,pQFEVersion,cchVersionString,pcchVersionString,szVersionString) ) #define ICorProfilerInfo10_GetThreadStaticAddress2(This,classId,fieldToken,appDomainId,threadId,ppAddress) \ ( (This)->lpVtbl -> GetThreadStaticAddress2(This,classId,fieldToken,appDomainId,threadId,ppAddress) ) #define ICorProfilerInfo10_GetAppDomainsContainingModule(This,moduleId,cAppDomainIds,pcAppDomainIds,appDomainIds) \ ( (This)->lpVtbl -> GetAppDomainsContainingModule(This,moduleId,cAppDomainIds,pcAppDomainIds,appDomainIds) ) #define ICorProfilerInfo10_GetModuleInfo2(This,moduleId,ppBaseLoadAddress,cchName,pcchName,szName,pAssemblyId,pdwModuleFlags) \ ( (This)->lpVtbl -> GetModuleInfo2(This,moduleId,ppBaseLoadAddress,cchName,pcchName,szName,pAssemblyId,pdwModuleFlags) ) #define ICorProfilerInfo10_EnumThreads(This,ppEnum) \ ( (This)->lpVtbl -> EnumThreads(This,ppEnum) ) #define ICorProfilerInfo10_InitializeCurrentThread(This) \ ( (This)->lpVtbl -> InitializeCurrentThread(This) ) #define ICorProfilerInfo10_RequestReJIT(This,cFunctions,moduleIds,methodIds) \ ( (This)->lpVtbl -> RequestReJIT(This,cFunctions,moduleIds,methodIds) ) #define ICorProfilerInfo10_RequestRevert(This,cFunctions,moduleIds,methodIds,status) \ ( (This)->lpVtbl -> RequestRevert(This,cFunctions,moduleIds,methodIds,status) ) #define ICorProfilerInfo10_GetCodeInfo3(This,functionID,reJitId,cCodeInfos,pcCodeInfos,codeInfos) \ ( (This)->lpVtbl -> GetCodeInfo3(This,functionID,reJitId,cCodeInfos,pcCodeInfos,codeInfos) ) #define ICorProfilerInfo10_GetFunctionFromIP2(This,ip,pFunctionId,pReJitId) \ ( (This)->lpVtbl -> GetFunctionFromIP2(This,ip,pFunctionId,pReJitId) ) #define ICorProfilerInfo10_GetReJITIDs(This,functionId,cReJitIds,pcReJitIds,reJitIds) \ ( (This)->lpVtbl -> GetReJITIDs(This,functionId,cReJitIds,pcReJitIds,reJitIds) ) #define ICorProfilerInfo10_GetILToNativeMapping2(This,functionId,reJitId,cMap,pcMap,map) \ ( (This)->lpVtbl -> GetILToNativeMapping2(This,functionId,reJitId,cMap,pcMap,map) ) #define ICorProfilerInfo10_EnumJITedFunctions2(This,ppEnum) \ ( (This)->lpVtbl -> EnumJITedFunctions2(This,ppEnum) ) #define ICorProfilerInfo10_GetObjectSize2(This,objectId,pcSize) \ ( (This)->lpVtbl -> GetObjectSize2(This,objectId,pcSize) ) #define ICorProfilerInfo10_GetEventMask2(This,pdwEventsLow,pdwEventsHigh) \ ( (This)->lpVtbl -> GetEventMask2(This,pdwEventsLow,pdwEventsHigh) ) #define ICorProfilerInfo10_SetEventMask2(This,dwEventsLow,dwEventsHigh) \ ( (This)->lpVtbl -> SetEventMask2(This,dwEventsLow,dwEventsHigh) ) #define ICorProfilerInfo10_EnumNgenModuleMethodsInliningThisMethod(This,inlinersModuleId,inlineeModuleId,inlineeMethodId,incompleteData,ppEnum) \ ( (This)->lpVtbl -> EnumNgenModuleMethodsInliningThisMethod(This,inlinersModuleId,inlineeModuleId,inlineeMethodId,incompleteData,ppEnum) ) #define ICorProfilerInfo10_ApplyMetaData(This,moduleId) \ ( (This)->lpVtbl -> ApplyMetaData(This,moduleId) ) #define ICorProfilerInfo10_GetInMemorySymbolsLength(This,moduleId,pCountSymbolBytes) \ ( (This)->lpVtbl -> GetInMemorySymbolsLength(This,moduleId,pCountSymbolBytes) ) #define ICorProfilerInfo10_ReadInMemorySymbols(This,moduleId,symbolsReadOffset,pSymbolBytes,countSymbolBytes,pCountSymbolBytesRead) \ ( (This)->lpVtbl -> ReadInMemorySymbols(This,moduleId,symbolsReadOffset,pSymbolBytes,countSymbolBytes,pCountSymbolBytesRead) ) #define ICorProfilerInfo10_IsFunctionDynamic(This,functionId,isDynamic) \ ( (This)->lpVtbl -> IsFunctionDynamic(This,functionId,isDynamic) ) #define ICorProfilerInfo10_GetFunctionFromIP3(This,ip,functionId,pReJitId) \ ( (This)->lpVtbl -> GetFunctionFromIP3(This,ip,functionId,pReJitId) ) #define ICorProfilerInfo10_GetDynamicFunctionInfo(This,functionId,moduleId,ppvSig,pbSig,cchName,pcchName,wszName) \ ( (This)->lpVtbl -> GetDynamicFunctionInfo(This,functionId,moduleId,ppvSig,pbSig,cchName,pcchName,wszName) ) #define ICorProfilerInfo10_GetNativeCodeStartAddresses(This,functionID,reJitId,cCodeStartAddresses,pcCodeStartAddresses,codeStartAddresses) \ ( (This)->lpVtbl -> GetNativeCodeStartAddresses(This,functionID,reJitId,cCodeStartAddresses,pcCodeStartAddresses,codeStartAddresses) ) #define ICorProfilerInfo10_GetILToNativeMapping3(This,pNativeCodeStartAddress,cMap,pcMap,map) \ ( (This)->lpVtbl -> GetILToNativeMapping3(This,pNativeCodeStartAddress,cMap,pcMap,map) ) #define ICorProfilerInfo10_GetCodeInfo4(This,pNativeCodeStartAddress,cCodeInfos,pcCodeInfos,codeInfos) \ ( (This)->lpVtbl -> GetCodeInfo4(This,pNativeCodeStartAddress,cCodeInfos,pcCodeInfos,codeInfos) ) #define ICorProfilerInfo10_EnumerateObjectReferences(This,objectId,callback,clientData) \ ( (This)->lpVtbl -> EnumerateObjectReferences(This,objectId,callback,clientData) ) #define ICorProfilerInfo10_IsFrozenObject(This,objectId,pbFrozen) \ ( (This)->lpVtbl -> IsFrozenObject(This,objectId,pbFrozen) ) #define ICorProfilerInfo10_GetLOHObjectSizeThreshold(This,pThreshold) \ ( (This)->lpVtbl -> GetLOHObjectSizeThreshold(This,pThreshold) ) #define ICorProfilerInfo10_RequestReJITWithInliners(This,dwRejitFlags,cFunctions,moduleIds,methodIds) \ ( (This)->lpVtbl -> RequestReJITWithInliners(This,dwRejitFlags,cFunctions,moduleIds,methodIds) ) #define ICorProfilerInfo10_SuspendRuntime(This) \ ( (This)->lpVtbl -> SuspendRuntime(This) ) #define ICorProfilerInfo10_ResumeRuntime(This) \ ( (This)->lpVtbl -> ResumeRuntime(This) ) #endif /* COBJMACROS */ #endif /* C style interface */ #endif /* __ICorProfilerInfo10_INTERFACE_DEFINED__ */ #ifndef __ICorProfilerInfo11_INTERFACE_DEFINED__ #define __ICorProfilerInfo11_INTERFACE_DEFINED__ /* interface ICorProfilerInfo11 */ /* [local][unique][uuid][object] */ EXTERN_C const IID IID_ICorProfilerInfo11; #if defined(__cplusplus) && !defined(CINTERFACE) MIDL_INTERFACE("06398876-8987-4154-B621-40A00D6E4D04") ICorProfilerInfo11 : public ICorProfilerInfo10 { public: virtual HRESULT STDMETHODCALLTYPE GetEnvironmentVariable( /* [string][in] */ const WCHAR *szName, /* [in] */ ULONG cchValue, /* [out] */ ULONG *pcchValue, /* [annotation][out] */ _Out_writes_to_(cchValue, *pcchValue) WCHAR szValue[ ]) = 0; virtual HRESULT STDMETHODCALLTYPE SetEnvironmentVariable( /* [string][in] */ const WCHAR *szName, /* [string][in] */ const WCHAR *szValue) = 0; }; #else /* C style interface */ typedef struct ICorProfilerInfo11Vtbl { BEGIN_INTERFACE HRESULT ( STDMETHODCALLTYPE *QueryInterface )( ICorProfilerInfo11 * This, /* [in] */ REFIID riid, /* [annotation][iid_is][out] */ _COM_Outptr_ void **ppvObject); ULONG ( STDMETHODCALLTYPE *AddRef )( ICorProfilerInfo11 * This); ULONG ( STDMETHODCALLTYPE *Release )( ICorProfilerInfo11 * This); HRESULT ( STDMETHODCALLTYPE *GetClassFromObject )( ICorProfilerInfo11 * This, /* [in] */ ObjectID objectId, /* [out] */ ClassID *pClassId); HRESULT ( STDMETHODCALLTYPE *GetClassFromToken )( ICorProfilerInfo11 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdTypeDef typeDef, /* [out] */ ClassID *pClassId); HRESULT ( STDMETHODCALLTYPE *GetCodeInfo )( ICorProfilerInfo11 * This, /* [in] */ FunctionID functionId, /* [out] */ LPCBYTE *pStart, /* [out] */ ULONG *pcSize); HRESULT ( STDMETHODCALLTYPE *GetEventMask )( ICorProfilerInfo11 * This, /* [out] */ DWORD *pdwEvents); HRESULT ( STDMETHODCALLTYPE *GetFunctionFromIP )( ICorProfilerInfo11 * This, /* [in] */ LPCBYTE ip, /* [out] */ FunctionID *pFunctionId); HRESULT ( STDMETHODCALLTYPE *GetFunctionFromToken )( ICorProfilerInfo11 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdToken token, /* [out] */ FunctionID *pFunctionId); HRESULT ( STDMETHODCALLTYPE *GetHandleFromThread )( ICorProfilerInfo11 * This, /* [in] */ ThreadID threadId, /* [out] */ HANDLE *phThread); HRESULT ( STDMETHODCALLTYPE *GetObjectSize )( ICorProfilerInfo11 * This, /* [in] */ ObjectID objectId, /* [out] */ ULONG *pcSize); HRESULT ( STDMETHODCALLTYPE *IsArrayClass )( ICorProfilerInfo11 * This, /* [in] */ ClassID classId, /* [out] */ CorElementType *pBaseElemType, /* [out] */ ClassID *pBaseClassId, /* [out] */ ULONG *pcRank); HRESULT ( STDMETHODCALLTYPE *GetThreadInfo )( ICorProfilerInfo11 * This, /* [in] */ ThreadID threadId, /* [out] */ DWORD *pdwWin32ThreadId); HRESULT ( STDMETHODCALLTYPE *GetCurrentThreadID )( ICorProfilerInfo11 * This, /* [out] */ ThreadID *pThreadId); HRESULT ( STDMETHODCALLTYPE *GetClassIDInfo )( ICorProfilerInfo11 * This, /* [in] */ ClassID classId, /* [out] */ ModuleID *pModuleId, /* [out] */ mdTypeDef *pTypeDefToken); HRESULT ( STDMETHODCALLTYPE *GetFunctionInfo )( ICorProfilerInfo11 * This, /* [in] */ FunctionID functionId, /* [out] */ ClassID *pClassId, /* [out] */ ModuleID *pModuleId, /* [out] */ mdToken *pToken); HRESULT ( STDMETHODCALLTYPE *SetEventMask )( ICorProfilerInfo11 * This, /* [in] */ DWORD dwEvents); HRESULT ( STDMETHODCALLTYPE *SetEnterLeaveFunctionHooks )( ICorProfilerInfo11 * This, /* [in] */ FunctionEnter *pFuncEnter, /* [in] */ FunctionLeave *pFuncLeave, /* [in] */ FunctionTailcall *pFuncTailcall); HRESULT ( STDMETHODCALLTYPE *SetFunctionIDMapper )( ICorProfilerInfo11 * This, /* [in] */ FunctionIDMapper *pFunc); HRESULT ( STDMETHODCALLTYPE *GetTokenAndMetaDataFromFunction )( ICorProfilerInfo11 * This, /* [in] */ FunctionID functionId, /* [in] */ REFIID riid, /* [out] */ IUnknown **ppImport, /* [out] */ mdToken *pToken); HRESULT ( STDMETHODCALLTYPE *GetModuleInfo )( ICorProfilerInfo11 * This, /* [in] */ ModuleID moduleId, /* [out] */ LPCBYTE *ppBaseLoadAddress, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [annotation][out] */ _Out_writes_to_(cchName, *pcchName) WCHAR szName[ ], /* [out] */ AssemblyID *pAssemblyId); HRESULT ( STDMETHODCALLTYPE *GetModuleMetaData )( ICorProfilerInfo11 * This, /* [in] */ ModuleID moduleId, /* [in] */ DWORD dwOpenFlags, /* [in] */ REFIID riid, /* [out] */ IUnknown **ppOut); HRESULT ( STDMETHODCALLTYPE *GetILFunctionBody )( ICorProfilerInfo11 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdMethodDef methodId, /* [out] */ LPCBYTE *ppMethodHeader, /* [out] */ ULONG *pcbMethodSize); HRESULT ( STDMETHODCALLTYPE *GetILFunctionBodyAllocator )( ICorProfilerInfo11 * This, /* [in] */ ModuleID moduleId, /* [out] */ IMethodMalloc **ppMalloc); HRESULT ( STDMETHODCALLTYPE *SetILFunctionBody )( ICorProfilerInfo11 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdMethodDef methodid, /* [in] */ LPCBYTE pbNewILMethodHeader); HRESULT ( STDMETHODCALLTYPE *GetAppDomainInfo )( ICorProfilerInfo11 * This, /* [in] */ AppDomainID appDomainId, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [annotation][out] */ _Out_writes_to_(cchName, *pcchName) WCHAR szName[ ], /* [out] */ ProcessID *pProcessId); HRESULT ( STDMETHODCALLTYPE *GetAssemblyInfo )( ICorProfilerInfo11 * This, /* [in] */ AssemblyID assemblyId, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [annotation][out] */ _Out_writes_to_(cchName, *pcchName) WCHAR szName[ ], /* [out] */ AppDomainID *pAppDomainId, /* [out] */ ModuleID *pModuleId); HRESULT ( STDMETHODCALLTYPE *SetFunctionReJIT )( ICorProfilerInfo11 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ForceGC )( ICorProfilerInfo11 * This); HRESULT ( STDMETHODCALLTYPE *SetILInstrumentedCodeMap )( ICorProfilerInfo11 * This, /* [in] */ FunctionID functionId, /* [in] */ BOOL fStartJit, /* [in] */ ULONG cILMapEntries, /* [size_is][in] */ COR_IL_MAP rgILMapEntries[ ]); HRESULT ( STDMETHODCALLTYPE *GetInprocInspectionInterface )( ICorProfilerInfo11 * This, /* [out] */ IUnknown **ppicd); HRESULT ( STDMETHODCALLTYPE *GetInprocInspectionIThisThread )( ICorProfilerInfo11 * This, /* [out] */ IUnknown **ppicd); HRESULT ( STDMETHODCALLTYPE *GetThreadContext )( ICorProfilerInfo11 * This, /* [in] */ ThreadID threadId, /* [out] */ ContextID *pContextId); HRESULT ( STDMETHODCALLTYPE *BeginInprocDebugging )( ICorProfilerInfo11 * This, /* [in] */ BOOL fThisThreadOnly, /* [out] */ DWORD *pdwProfilerContext); HRESULT ( STDMETHODCALLTYPE *EndInprocDebugging )( ICorProfilerInfo11 * This, /* [in] */ DWORD dwProfilerContext); HRESULT ( STDMETHODCALLTYPE *GetILToNativeMapping )( ICorProfilerInfo11 * This, /* [in] */ FunctionID functionId, /* [in] */ ULONG32 cMap, /* [out] */ ULONG32 *pcMap, /* [length_is][size_is][out] */ COR_DEBUG_IL_TO_NATIVE_MAP map[ ]); HRESULT ( STDMETHODCALLTYPE *DoStackSnapshot )( ICorProfilerInfo11 * This, /* [in] */ ThreadID thread, /* [in] */ StackSnapshotCallback *callback, /* [in] */ ULONG32 infoFlags, /* [in] */ void *clientData, /* [size_is][in] */ BYTE context[ ], /* [in] */ ULONG32 contextSize); HRESULT ( STDMETHODCALLTYPE *SetEnterLeaveFunctionHooks2 )( ICorProfilerInfo11 * This, /* [in] */ FunctionEnter2 *pFuncEnter, /* [in] */ FunctionLeave2 *pFuncLeave, /* [in] */ FunctionTailcall2 *pFuncTailcall); HRESULT ( STDMETHODCALLTYPE *GetFunctionInfo2 )( ICorProfilerInfo11 * This, /* [in] */ FunctionID funcId, /* [in] */ COR_PRF_FRAME_INFO frameInfo, /* [out] */ ClassID *pClassId, /* [out] */ ModuleID *pModuleId, /* [out] */ mdToken *pToken, /* [in] */ ULONG32 cTypeArgs, /* [out] */ ULONG32 *pcTypeArgs, /* [out] */ ClassID typeArgs[ ]); HRESULT ( STDMETHODCALLTYPE *GetStringLayout )( ICorProfilerInfo11 * This, /* [out] */ ULONG *pBufferLengthOffset, /* [out] */ ULONG *pStringLengthOffset, /* [out] */ ULONG *pBufferOffset); HRESULT ( STDMETHODCALLTYPE *GetClassLayout )( ICorProfilerInfo11 * This, /* [in] */ ClassID classID, /* [out][in] */ COR_FIELD_OFFSET rFieldOffset[ ], /* [in] */ ULONG cFieldOffset, /* [out] */ ULONG *pcFieldOffset, /* [out] */ ULONG *pulClassSize); HRESULT ( STDMETHODCALLTYPE *GetClassIDInfo2 )( ICorProfilerInfo11 * This, /* [in] */ ClassID classId, /* [out] */ ModuleID *pModuleId, /* [out] */ mdTypeDef *pTypeDefToken, /* [out] */ ClassID *pParentClassId, /* [in] */ ULONG32 cNumTypeArgs, /* [out] */ ULONG32 *pcNumTypeArgs, /* [out] */ ClassID typeArgs[ ]); HRESULT ( STDMETHODCALLTYPE *GetCodeInfo2 )( ICorProfilerInfo11 * This, /* [in] */ FunctionID functionID, /* [in] */ ULONG32 cCodeInfos, /* [out] */ ULONG32 *pcCodeInfos, /* [length_is][size_is][out] */ COR_PRF_CODE_INFO codeInfos[ ]); HRESULT ( STDMETHODCALLTYPE *GetClassFromTokenAndTypeArgs )( ICorProfilerInfo11 * This, /* [in] */ ModuleID moduleID, /* [in] */ mdTypeDef typeDef, /* [in] */ ULONG32 cTypeArgs, /* [size_is][in] */ ClassID typeArgs[ ], /* [out] */ ClassID *pClassID); HRESULT ( STDMETHODCALLTYPE *GetFunctionFromTokenAndTypeArgs )( ICorProfilerInfo11 * This, /* [in] */ ModuleID moduleID, /* [in] */ mdMethodDef funcDef, /* [in] */ ClassID classId, /* [in] */ ULONG32 cTypeArgs, /* [size_is][in] */ ClassID typeArgs[ ], /* [out] */ FunctionID *pFunctionID); HRESULT ( STDMETHODCALLTYPE *EnumModuleFrozenObjects )( ICorProfilerInfo11 * This, /* [in] */ ModuleID moduleID, /* [out] */ ICorProfilerObjectEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *GetArrayObjectInfo )( ICorProfilerInfo11 * This, /* [in] */ ObjectID objectId, /* [in] */ ULONG32 cDimensions, /* [size_is][out] */ ULONG32 pDimensionSizes[ ], /* [size_is][out] */ int pDimensionLowerBounds[ ], /* [out] */ BYTE **ppData); HRESULT ( STDMETHODCALLTYPE *GetBoxClassLayout )( ICorProfilerInfo11 * This, /* [in] */ ClassID classId, /* [out] */ ULONG32 *pBufferOffset); HRESULT ( STDMETHODCALLTYPE *GetThreadAppDomain )( ICorProfilerInfo11 * This, /* [in] */ ThreadID threadId, /* [out] */ AppDomainID *pAppDomainId); HRESULT ( STDMETHODCALLTYPE *GetRVAStaticAddress )( ICorProfilerInfo11 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [out] */ void **ppAddress); HRESULT ( STDMETHODCALLTYPE *GetAppDomainStaticAddress )( ICorProfilerInfo11 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [in] */ AppDomainID appDomainId, /* [out] */ void **ppAddress); HRESULT ( STDMETHODCALLTYPE *GetThreadStaticAddress )( ICorProfilerInfo11 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [in] */ ThreadID threadId, /* [out] */ void **ppAddress); HRESULT ( STDMETHODCALLTYPE *GetContextStaticAddress )( ICorProfilerInfo11 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [in] */ ContextID contextId, /* [out] */ void **ppAddress); HRESULT ( STDMETHODCALLTYPE *GetStaticFieldInfo )( ICorProfilerInfo11 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [out] */ COR_PRF_STATIC_TYPE *pFieldInfo); HRESULT ( STDMETHODCALLTYPE *GetGenerationBounds )( ICorProfilerInfo11 * This, /* [in] */ ULONG cObjectRanges, /* [out] */ ULONG *pcObjectRanges, /* [length_is][size_is][out] */ COR_PRF_GC_GENERATION_RANGE ranges[ ]); HRESULT ( STDMETHODCALLTYPE *GetObjectGeneration )( ICorProfilerInfo11 * This, /* [in] */ ObjectID objectId, /* [out] */ COR_PRF_GC_GENERATION_RANGE *range); HRESULT ( STDMETHODCALLTYPE *GetNotifiedExceptionClauseInfo )( ICorProfilerInfo11 * This, /* [out] */ COR_PRF_EX_CLAUSE_INFO *pinfo); HRESULT ( STDMETHODCALLTYPE *EnumJITedFunctions )( ICorProfilerInfo11 * This, /* [out] */ ICorProfilerFunctionEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *RequestProfilerDetach )( ICorProfilerInfo11 * This, /* [in] */ DWORD dwExpectedCompletionMilliseconds); HRESULT ( STDMETHODCALLTYPE *SetFunctionIDMapper2 )( ICorProfilerInfo11 * This, /* [in] */ FunctionIDMapper2 *pFunc, /* [in] */ void *clientData); HRESULT ( STDMETHODCALLTYPE *GetStringLayout2 )( ICorProfilerInfo11 * This, /* [out] */ ULONG *pStringLengthOffset, /* [out] */ ULONG *pBufferOffset); HRESULT ( STDMETHODCALLTYPE *SetEnterLeaveFunctionHooks3 )( ICorProfilerInfo11 * This, /* [in] */ FunctionEnter3 *pFuncEnter3, /* [in] */ FunctionLeave3 *pFuncLeave3, /* [in] */ FunctionTailcall3 *pFuncTailcall3); HRESULT ( STDMETHODCALLTYPE *SetEnterLeaveFunctionHooks3WithInfo )( ICorProfilerInfo11 * This, /* [in] */ FunctionEnter3WithInfo *pFuncEnter3WithInfo, /* [in] */ FunctionLeave3WithInfo *pFuncLeave3WithInfo, /* [in] */ FunctionTailcall3WithInfo *pFuncTailcall3WithInfo); HRESULT ( STDMETHODCALLTYPE *GetFunctionEnter3Info )( ICorProfilerInfo11 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_ELT_INFO eltInfo, /* [out] */ COR_PRF_FRAME_INFO *pFrameInfo, /* [out][in] */ ULONG *pcbArgumentInfo, /* [size_is][out] */ COR_PRF_FUNCTION_ARGUMENT_INFO *pArgumentInfo); HRESULT ( STDMETHODCALLTYPE *GetFunctionLeave3Info )( ICorProfilerInfo11 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_ELT_INFO eltInfo, /* [out] */ COR_PRF_FRAME_INFO *pFrameInfo, /* [out] */ COR_PRF_FUNCTION_ARGUMENT_RANGE *pRetvalRange); HRESULT ( STDMETHODCALLTYPE *GetFunctionTailcall3Info )( ICorProfilerInfo11 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_ELT_INFO eltInfo, /* [out] */ COR_PRF_FRAME_INFO *pFrameInfo); HRESULT ( STDMETHODCALLTYPE *EnumModules )( ICorProfilerInfo11 * This, /* [out] */ ICorProfilerModuleEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *GetRuntimeInformation )( ICorProfilerInfo11 * This, /* [out] */ USHORT *pClrInstanceId, /* [out] */ COR_PRF_RUNTIME_TYPE *pRuntimeType, /* [out] */ USHORT *pMajorVersion, /* [out] */ USHORT *pMinorVersion, /* [out] */ USHORT *pBuildNumber, /* [out] */ USHORT *pQFEVersion, /* [in] */ ULONG cchVersionString, /* [out] */ ULONG *pcchVersionString, /* [annotation][out] */ _Out_writes_to_(cchVersionString, *pcchVersionString) WCHAR szVersionString[ ]); HRESULT ( STDMETHODCALLTYPE *GetThreadStaticAddress2 )( ICorProfilerInfo11 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [in] */ AppDomainID appDomainId, /* [in] */ ThreadID threadId, /* [out] */ void **ppAddress); HRESULT ( STDMETHODCALLTYPE *GetAppDomainsContainingModule )( ICorProfilerInfo11 * This, /* [in] */ ModuleID moduleId, /* [in] */ ULONG32 cAppDomainIds, /* [out] */ ULONG32 *pcAppDomainIds, /* [length_is][size_is][out] */ AppDomainID appDomainIds[ ]); HRESULT ( STDMETHODCALLTYPE *GetModuleInfo2 )( ICorProfilerInfo11 * This, /* [in] */ ModuleID moduleId, /* [out] */ LPCBYTE *ppBaseLoadAddress, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [annotation][out] */ _Out_writes_to_(cchName, *pcchName) WCHAR szName[ ], /* [out] */ AssemblyID *pAssemblyId, /* [out] */ DWORD *pdwModuleFlags); HRESULT ( STDMETHODCALLTYPE *EnumThreads )( ICorProfilerInfo11 * This, /* [out] */ ICorProfilerThreadEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *InitializeCurrentThread )( ICorProfilerInfo11 * This); HRESULT ( STDMETHODCALLTYPE *RequestReJIT )( ICorProfilerInfo11 * This, /* [in] */ ULONG cFunctions, /* [size_is][in] */ ModuleID moduleIds[ ], /* [size_is][in] */ mdMethodDef methodIds[ ]); HRESULT ( STDMETHODCALLTYPE *RequestRevert )( ICorProfilerInfo11 * This, /* [in] */ ULONG cFunctions, /* [size_is][in] */ ModuleID moduleIds[ ], /* [size_is][in] */ mdMethodDef methodIds[ ], /* [size_is][out] */ HRESULT status[ ]); HRESULT ( STDMETHODCALLTYPE *GetCodeInfo3 )( ICorProfilerInfo11 * This, /* [in] */ FunctionID functionID, /* [in] */ ReJITID reJitId, /* [in] */ ULONG32 cCodeInfos, /* [out] */ ULONG32 *pcCodeInfos, /* [length_is][size_is][out] */ COR_PRF_CODE_INFO codeInfos[ ]); HRESULT ( STDMETHODCALLTYPE *GetFunctionFromIP2 )( ICorProfilerInfo11 * This, /* [in] */ LPCBYTE ip, /* [out] */ FunctionID *pFunctionId, /* [out] */ ReJITID *pReJitId); HRESULT ( STDMETHODCALLTYPE *GetReJITIDs )( ICorProfilerInfo11 * This, /* [in] */ FunctionID functionId, /* [in] */ ULONG cReJitIds, /* [out] */ ULONG *pcReJitIds, /* [length_is][size_is][out] */ ReJITID reJitIds[ ]); HRESULT ( STDMETHODCALLTYPE *GetILToNativeMapping2 )( ICorProfilerInfo11 * This, /* [in] */ FunctionID functionId, /* [in] */ ReJITID reJitId, /* [in] */ ULONG32 cMap, /* [out] */ ULONG32 *pcMap, /* [length_is][size_is][out] */ COR_DEBUG_IL_TO_NATIVE_MAP map[ ]); HRESULT ( STDMETHODCALLTYPE *EnumJITedFunctions2 )( ICorProfilerInfo11 * This, /* [out] */ ICorProfilerFunctionEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *GetObjectSize2 )( ICorProfilerInfo11 * This, /* [in] */ ObjectID objectId, /* [out] */ SIZE_T *pcSize); HRESULT ( STDMETHODCALLTYPE *GetEventMask2 )( ICorProfilerInfo11 * This, /* [out] */ DWORD *pdwEventsLow, /* [out] */ DWORD *pdwEventsHigh); HRESULT ( STDMETHODCALLTYPE *SetEventMask2 )( ICorProfilerInfo11 * This, /* [in] */ DWORD dwEventsLow, /* [in] */ DWORD dwEventsHigh); HRESULT ( STDMETHODCALLTYPE *EnumNgenModuleMethodsInliningThisMethod )( ICorProfilerInfo11 * This, /* [in] */ ModuleID inlinersModuleId, /* [in] */ ModuleID inlineeModuleId, /* [in] */ mdMethodDef inlineeMethodId, /* [out] */ BOOL *incompleteData, /* [out] */ ICorProfilerMethodEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *ApplyMetaData )( ICorProfilerInfo11 * This, /* [in] */ ModuleID moduleId); HRESULT ( STDMETHODCALLTYPE *GetInMemorySymbolsLength )( ICorProfilerInfo11 * This, /* [in] */ ModuleID moduleId, /* [out] */ DWORD *pCountSymbolBytes); HRESULT ( STDMETHODCALLTYPE *ReadInMemorySymbols )( ICorProfilerInfo11 * This, /* [in] */ ModuleID moduleId, /* [in] */ DWORD symbolsReadOffset, /* [out] */ BYTE *pSymbolBytes, /* [in] */ DWORD countSymbolBytes, /* [out] */ DWORD *pCountSymbolBytesRead); HRESULT ( STDMETHODCALLTYPE *IsFunctionDynamic )( ICorProfilerInfo11 * This, /* [in] */ FunctionID functionId, /* [out] */ BOOL *isDynamic); HRESULT ( STDMETHODCALLTYPE *GetFunctionFromIP3 )( ICorProfilerInfo11 * This, /* [in] */ LPCBYTE ip, /* [out] */ FunctionID *functionId, /* [out] */ ReJITID *pReJitId); HRESULT ( STDMETHODCALLTYPE *GetDynamicFunctionInfo )( ICorProfilerInfo11 * This, /* [in] */ FunctionID functionId, /* [out] */ ModuleID *moduleId, /* [out] */ PCCOR_SIGNATURE *ppvSig, /* [out] */ ULONG *pbSig, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [out] */ WCHAR wszName[ ]); HRESULT ( STDMETHODCALLTYPE *GetNativeCodeStartAddresses )( ICorProfilerInfo11 * This, FunctionID functionID, ReJITID reJitId, ULONG32 cCodeStartAddresses, ULONG32 *pcCodeStartAddresses, UINT_PTR codeStartAddresses[ ]); HRESULT ( STDMETHODCALLTYPE *GetILToNativeMapping3 )( ICorProfilerInfo11 * This, UINT_PTR pNativeCodeStartAddress, ULONG32 cMap, ULONG32 *pcMap, COR_DEBUG_IL_TO_NATIVE_MAP map[ ]); HRESULT ( STDMETHODCALLTYPE *GetCodeInfo4 )( ICorProfilerInfo11 * This, UINT_PTR pNativeCodeStartAddress, ULONG32 cCodeInfos, ULONG32 *pcCodeInfos, COR_PRF_CODE_INFO codeInfos[ ]); HRESULT ( STDMETHODCALLTYPE *EnumerateObjectReferences )( ICorProfilerInfo11 * This, ObjectID objectId, ObjectReferenceCallback callback, void *clientData); HRESULT ( STDMETHODCALLTYPE *IsFrozenObject )( ICorProfilerInfo11 * This, ObjectID objectId, BOOL *pbFrozen); HRESULT ( STDMETHODCALLTYPE *GetLOHObjectSizeThreshold )( ICorProfilerInfo11 * This, DWORD *pThreshold); HRESULT ( STDMETHODCALLTYPE *RequestReJITWithInliners )( ICorProfilerInfo11 * This, /* [in] */ DWORD dwRejitFlags, /* [in] */ ULONG cFunctions, /* [size_is][in] */ ModuleID moduleIds[ ], /* [size_is][in] */ mdMethodDef methodIds[ ]); HRESULT ( STDMETHODCALLTYPE *SuspendRuntime )( ICorProfilerInfo11 * This); HRESULT ( STDMETHODCALLTYPE *ResumeRuntime )( ICorProfilerInfo11 * This); HRESULT ( STDMETHODCALLTYPE *GetEnvironmentVariable )( ICorProfilerInfo11 * This, /* [string][in] */ const WCHAR *szName, /* [in] */ ULONG cchValue, /* [out] */ ULONG *pcchValue, /* [annotation][out] */ _Out_writes_to_(cchValue, *pcchValue) WCHAR szValue[ ]); HRESULT ( STDMETHODCALLTYPE *SetEnvironmentVariable )( ICorProfilerInfo11 * This, /* [string][in] */ const WCHAR *szName, /* [string][in] */ const WCHAR *szValue); END_INTERFACE } ICorProfilerInfo11Vtbl; interface ICorProfilerInfo11 { CONST_VTBL struct ICorProfilerInfo11Vtbl *lpVtbl; }; #ifdef COBJMACROS #define ICorProfilerInfo11_QueryInterface(This,riid,ppvObject) \ ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) ) #define ICorProfilerInfo11_AddRef(This) \ ( (This)->lpVtbl -> AddRef(This) ) #define ICorProfilerInfo11_Release(This) \ ( (This)->lpVtbl -> Release(This) ) #define ICorProfilerInfo11_GetClassFromObject(This,objectId,pClassId) \ ( (This)->lpVtbl -> GetClassFromObject(This,objectId,pClassId) ) #define ICorProfilerInfo11_GetClassFromToken(This,moduleId,typeDef,pClassId) \ ( (This)->lpVtbl -> GetClassFromToken(This,moduleId,typeDef,pClassId) ) #define ICorProfilerInfo11_GetCodeInfo(This,functionId,pStart,pcSize) \ ( (This)->lpVtbl -> GetCodeInfo(This,functionId,pStart,pcSize) ) #define ICorProfilerInfo11_GetEventMask(This,pdwEvents) \ ( (This)->lpVtbl -> GetEventMask(This,pdwEvents) ) #define ICorProfilerInfo11_GetFunctionFromIP(This,ip,pFunctionId) \ ( (This)->lpVtbl -> GetFunctionFromIP(This,ip,pFunctionId) ) #define ICorProfilerInfo11_GetFunctionFromToken(This,moduleId,token,pFunctionId) \ ( (This)->lpVtbl -> GetFunctionFromToken(This,moduleId,token,pFunctionId) ) #define ICorProfilerInfo11_GetHandleFromThread(This,threadId,phThread) \ ( (This)->lpVtbl -> GetHandleFromThread(This,threadId,phThread) ) #define ICorProfilerInfo11_GetObjectSize(This,objectId,pcSize) \ ( (This)->lpVtbl -> GetObjectSize(This,objectId,pcSize) ) #define ICorProfilerInfo11_IsArrayClass(This,classId,pBaseElemType,pBaseClassId,pcRank) \ ( (This)->lpVtbl -> IsArrayClass(This,classId,pBaseElemType,pBaseClassId,pcRank) ) #define ICorProfilerInfo11_GetThreadInfo(This,threadId,pdwWin32ThreadId) \ ( (This)->lpVtbl -> GetThreadInfo(This,threadId,pdwWin32ThreadId) ) #define ICorProfilerInfo11_GetCurrentThreadID(This,pThreadId) \ ( (This)->lpVtbl -> GetCurrentThreadID(This,pThreadId) ) #define ICorProfilerInfo11_GetClassIDInfo(This,classId,pModuleId,pTypeDefToken) \ ( (This)->lpVtbl -> GetClassIDInfo(This,classId,pModuleId,pTypeDefToken) ) #define ICorProfilerInfo11_GetFunctionInfo(This,functionId,pClassId,pModuleId,pToken) \ ( (This)->lpVtbl -> GetFunctionInfo(This,functionId,pClassId,pModuleId,pToken) ) #define ICorProfilerInfo11_SetEventMask(This,dwEvents) \ ( (This)->lpVtbl -> SetEventMask(This,dwEvents) ) #define ICorProfilerInfo11_SetEnterLeaveFunctionHooks(This,pFuncEnter,pFuncLeave,pFuncTailcall) \ ( (This)->lpVtbl -> SetEnterLeaveFunctionHooks(This,pFuncEnter,pFuncLeave,pFuncTailcall) ) #define ICorProfilerInfo11_SetFunctionIDMapper(This,pFunc) \ ( (This)->lpVtbl -> SetFunctionIDMapper(This,pFunc) ) #define ICorProfilerInfo11_GetTokenAndMetaDataFromFunction(This,functionId,riid,ppImport,pToken) \ ( (This)->lpVtbl -> GetTokenAndMetaDataFromFunction(This,functionId,riid,ppImport,pToken) ) #define ICorProfilerInfo11_GetModuleInfo(This,moduleId,ppBaseLoadAddress,cchName,pcchName,szName,pAssemblyId) \ ( (This)->lpVtbl -> GetModuleInfo(This,moduleId,ppBaseLoadAddress,cchName,pcchName,szName,pAssemblyId) ) #define ICorProfilerInfo11_GetModuleMetaData(This,moduleId,dwOpenFlags,riid,ppOut) \ ( (This)->lpVtbl -> GetModuleMetaData(This,moduleId,dwOpenFlags,riid,ppOut) ) #define ICorProfilerInfo11_GetILFunctionBody(This,moduleId,methodId,ppMethodHeader,pcbMethodSize) \ ( (This)->lpVtbl -> GetILFunctionBody(This,moduleId,methodId,ppMethodHeader,pcbMethodSize) ) #define ICorProfilerInfo11_GetILFunctionBodyAllocator(This,moduleId,ppMalloc) \ ( (This)->lpVtbl -> GetILFunctionBodyAllocator(This,moduleId,ppMalloc) ) #define ICorProfilerInfo11_SetILFunctionBody(This,moduleId,methodid,pbNewILMethodHeader) \ ( (This)->lpVtbl -> SetILFunctionBody(This,moduleId,methodid,pbNewILMethodHeader) ) #define ICorProfilerInfo11_GetAppDomainInfo(This,appDomainId,cchName,pcchName,szName,pProcessId) \ ( (This)->lpVtbl -> GetAppDomainInfo(This,appDomainId,cchName,pcchName,szName,pProcessId) ) #define ICorProfilerInfo11_GetAssemblyInfo(This,assemblyId,cchName,pcchName,szName,pAppDomainId,pModuleId) \ ( (This)->lpVtbl -> GetAssemblyInfo(This,assemblyId,cchName,pcchName,szName,pAppDomainId,pModuleId) ) #define ICorProfilerInfo11_SetFunctionReJIT(This,functionId) \ ( (This)->lpVtbl -> SetFunctionReJIT(This,functionId) ) #define ICorProfilerInfo11_ForceGC(This) \ ( (This)->lpVtbl -> ForceGC(This) ) #define ICorProfilerInfo11_SetILInstrumentedCodeMap(This,functionId,fStartJit,cILMapEntries,rgILMapEntries) \ ( (This)->lpVtbl -> SetILInstrumentedCodeMap(This,functionId,fStartJit,cILMapEntries,rgILMapEntries) ) #define ICorProfilerInfo11_GetInprocInspectionInterface(This,ppicd) \ ( (This)->lpVtbl -> GetInprocInspectionInterface(This,ppicd) ) #define ICorProfilerInfo11_GetInprocInspectionIThisThread(This,ppicd) \ ( (This)->lpVtbl -> GetInprocInspectionIThisThread(This,ppicd) ) #define ICorProfilerInfo11_GetThreadContext(This,threadId,pContextId) \ ( (This)->lpVtbl -> GetThreadContext(This,threadId,pContextId) ) #define ICorProfilerInfo11_BeginInprocDebugging(This,fThisThreadOnly,pdwProfilerContext) \ ( (This)->lpVtbl -> BeginInprocDebugging(This,fThisThreadOnly,pdwProfilerContext) ) #define ICorProfilerInfo11_EndInprocDebugging(This,dwProfilerContext) \ ( (This)->lpVtbl -> EndInprocDebugging(This,dwProfilerContext) ) #define ICorProfilerInfo11_GetILToNativeMapping(This,functionId,cMap,pcMap,map) \ ( (This)->lpVtbl -> GetILToNativeMapping(This,functionId,cMap,pcMap,map) ) #define ICorProfilerInfo11_DoStackSnapshot(This,thread,callback,infoFlags,clientData,context,contextSize) \ ( (This)->lpVtbl -> DoStackSnapshot(This,thread,callback,infoFlags,clientData,context,contextSize) ) #define ICorProfilerInfo11_SetEnterLeaveFunctionHooks2(This,pFuncEnter,pFuncLeave,pFuncTailcall) \ ( (This)->lpVtbl -> SetEnterLeaveFunctionHooks2(This,pFuncEnter,pFuncLeave,pFuncTailcall) ) #define ICorProfilerInfo11_GetFunctionInfo2(This,funcId,frameInfo,pClassId,pModuleId,pToken,cTypeArgs,pcTypeArgs,typeArgs) \ ( (This)->lpVtbl -> GetFunctionInfo2(This,funcId,frameInfo,pClassId,pModuleId,pToken,cTypeArgs,pcTypeArgs,typeArgs) ) #define ICorProfilerInfo11_GetStringLayout(This,pBufferLengthOffset,pStringLengthOffset,pBufferOffset) \ ( (This)->lpVtbl -> GetStringLayout(This,pBufferLengthOffset,pStringLengthOffset,pBufferOffset) ) #define ICorProfilerInfo11_GetClassLayout(This,classID,rFieldOffset,cFieldOffset,pcFieldOffset,pulClassSize) \ ( (This)->lpVtbl -> GetClassLayout(This,classID,rFieldOffset,cFieldOffset,pcFieldOffset,pulClassSize) ) #define ICorProfilerInfo11_GetClassIDInfo2(This,classId,pModuleId,pTypeDefToken,pParentClassId,cNumTypeArgs,pcNumTypeArgs,typeArgs) \ ( (This)->lpVtbl -> GetClassIDInfo2(This,classId,pModuleId,pTypeDefToken,pParentClassId,cNumTypeArgs,pcNumTypeArgs,typeArgs) ) #define ICorProfilerInfo11_GetCodeInfo2(This,functionID,cCodeInfos,pcCodeInfos,codeInfos) \ ( (This)->lpVtbl -> GetCodeInfo2(This,functionID,cCodeInfos,pcCodeInfos,codeInfos) ) #define ICorProfilerInfo11_GetClassFromTokenAndTypeArgs(This,moduleID,typeDef,cTypeArgs,typeArgs,pClassID) \ ( (This)->lpVtbl -> GetClassFromTokenAndTypeArgs(This,moduleID,typeDef,cTypeArgs,typeArgs,pClassID) ) #define ICorProfilerInfo11_GetFunctionFromTokenAndTypeArgs(This,moduleID,funcDef,classId,cTypeArgs,typeArgs,pFunctionID) \ ( (This)->lpVtbl -> GetFunctionFromTokenAndTypeArgs(This,moduleID,funcDef,classId,cTypeArgs,typeArgs,pFunctionID) ) #define ICorProfilerInfo11_EnumModuleFrozenObjects(This,moduleID,ppEnum) \ ( (This)->lpVtbl -> EnumModuleFrozenObjects(This,moduleID,ppEnum) ) #define ICorProfilerInfo11_GetArrayObjectInfo(This,objectId,cDimensions,pDimensionSizes,pDimensionLowerBounds,ppData) \ ( (This)->lpVtbl -> GetArrayObjectInfo(This,objectId,cDimensions,pDimensionSizes,pDimensionLowerBounds,ppData) ) #define ICorProfilerInfo11_GetBoxClassLayout(This,classId,pBufferOffset) \ ( (This)->lpVtbl -> GetBoxClassLayout(This,classId,pBufferOffset) ) #define ICorProfilerInfo11_GetThreadAppDomain(This,threadId,pAppDomainId) \ ( (This)->lpVtbl -> GetThreadAppDomain(This,threadId,pAppDomainId) ) #define ICorProfilerInfo11_GetRVAStaticAddress(This,classId,fieldToken,ppAddress) \ ( (This)->lpVtbl -> GetRVAStaticAddress(This,classId,fieldToken,ppAddress) ) #define ICorProfilerInfo11_GetAppDomainStaticAddress(This,classId,fieldToken,appDomainId,ppAddress) \ ( (This)->lpVtbl -> GetAppDomainStaticAddress(This,classId,fieldToken,appDomainId,ppAddress) ) #define ICorProfilerInfo11_GetThreadStaticAddress(This,classId,fieldToken,threadId,ppAddress) \ ( (This)->lpVtbl -> GetThreadStaticAddress(This,classId,fieldToken,threadId,ppAddress) ) #define ICorProfilerInfo11_GetContextStaticAddress(This,classId,fieldToken,contextId,ppAddress) \ ( (This)->lpVtbl -> GetContextStaticAddress(This,classId,fieldToken,contextId,ppAddress) ) #define ICorProfilerInfo11_GetStaticFieldInfo(This,classId,fieldToken,pFieldInfo) \ ( (This)->lpVtbl -> GetStaticFieldInfo(This,classId,fieldToken,pFieldInfo) ) #define ICorProfilerInfo11_GetGenerationBounds(This,cObjectRanges,pcObjectRanges,ranges) \ ( (This)->lpVtbl -> GetGenerationBounds(This,cObjectRanges,pcObjectRanges,ranges) ) #define ICorProfilerInfo11_GetObjectGeneration(This,objectId,range) \ ( (This)->lpVtbl -> GetObjectGeneration(This,objectId,range) ) #define ICorProfilerInfo11_GetNotifiedExceptionClauseInfo(This,pinfo) \ ( (This)->lpVtbl -> GetNotifiedExceptionClauseInfo(This,pinfo) ) #define ICorProfilerInfo11_EnumJITedFunctions(This,ppEnum) \ ( (This)->lpVtbl -> EnumJITedFunctions(This,ppEnum) ) #define ICorProfilerInfo11_RequestProfilerDetach(This,dwExpectedCompletionMilliseconds) \ ( (This)->lpVtbl -> RequestProfilerDetach(This,dwExpectedCompletionMilliseconds) ) #define ICorProfilerInfo11_SetFunctionIDMapper2(This,pFunc,clientData) \ ( (This)->lpVtbl -> SetFunctionIDMapper2(This,pFunc,clientData) ) #define ICorProfilerInfo11_GetStringLayout2(This,pStringLengthOffset,pBufferOffset) \ ( (This)->lpVtbl -> GetStringLayout2(This,pStringLengthOffset,pBufferOffset) ) #define ICorProfilerInfo11_SetEnterLeaveFunctionHooks3(This,pFuncEnter3,pFuncLeave3,pFuncTailcall3) \ ( (This)->lpVtbl -> SetEnterLeaveFunctionHooks3(This,pFuncEnter3,pFuncLeave3,pFuncTailcall3) ) #define ICorProfilerInfo11_SetEnterLeaveFunctionHooks3WithInfo(This,pFuncEnter3WithInfo,pFuncLeave3WithInfo,pFuncTailcall3WithInfo) \ ( (This)->lpVtbl -> SetEnterLeaveFunctionHooks3WithInfo(This,pFuncEnter3WithInfo,pFuncLeave3WithInfo,pFuncTailcall3WithInfo) ) #define ICorProfilerInfo11_GetFunctionEnter3Info(This,functionId,eltInfo,pFrameInfo,pcbArgumentInfo,pArgumentInfo) \ ( (This)->lpVtbl -> GetFunctionEnter3Info(This,functionId,eltInfo,pFrameInfo,pcbArgumentInfo,pArgumentInfo) ) #define ICorProfilerInfo11_GetFunctionLeave3Info(This,functionId,eltInfo,pFrameInfo,pRetvalRange) \ ( (This)->lpVtbl -> GetFunctionLeave3Info(This,functionId,eltInfo,pFrameInfo,pRetvalRange) ) #define ICorProfilerInfo11_GetFunctionTailcall3Info(This,functionId,eltInfo,pFrameInfo) \ ( (This)->lpVtbl -> GetFunctionTailcall3Info(This,functionId,eltInfo,pFrameInfo) ) #define ICorProfilerInfo11_EnumModules(This,ppEnum) \ ( (This)->lpVtbl -> EnumModules(This,ppEnum) ) #define ICorProfilerInfo11_GetRuntimeInformation(This,pClrInstanceId,pRuntimeType,pMajorVersion,pMinorVersion,pBuildNumber,pQFEVersion,cchVersionString,pcchVersionString,szVersionString) \ ( (This)->lpVtbl -> GetRuntimeInformation(This,pClrInstanceId,pRuntimeType,pMajorVersion,pMinorVersion,pBuildNumber,pQFEVersion,cchVersionString,pcchVersionString,szVersionString) ) #define ICorProfilerInfo11_GetThreadStaticAddress2(This,classId,fieldToken,appDomainId,threadId,ppAddress) \ ( (This)->lpVtbl -> GetThreadStaticAddress2(This,classId,fieldToken,appDomainId,threadId,ppAddress) ) #define ICorProfilerInfo11_GetAppDomainsContainingModule(This,moduleId,cAppDomainIds,pcAppDomainIds,appDomainIds) \ ( (This)->lpVtbl -> GetAppDomainsContainingModule(This,moduleId,cAppDomainIds,pcAppDomainIds,appDomainIds) ) #define ICorProfilerInfo11_GetModuleInfo2(This,moduleId,ppBaseLoadAddress,cchName,pcchName,szName,pAssemblyId,pdwModuleFlags) \ ( (This)->lpVtbl -> GetModuleInfo2(This,moduleId,ppBaseLoadAddress,cchName,pcchName,szName,pAssemblyId,pdwModuleFlags) ) #define ICorProfilerInfo11_EnumThreads(This,ppEnum) \ ( (This)->lpVtbl -> EnumThreads(This,ppEnum) ) #define ICorProfilerInfo11_InitializeCurrentThread(This) \ ( (This)->lpVtbl -> InitializeCurrentThread(This) ) #define ICorProfilerInfo11_RequestReJIT(This,cFunctions,moduleIds,methodIds) \ ( (This)->lpVtbl -> RequestReJIT(This,cFunctions,moduleIds,methodIds) ) #define ICorProfilerInfo11_RequestRevert(This,cFunctions,moduleIds,methodIds,status) \ ( (This)->lpVtbl -> RequestRevert(This,cFunctions,moduleIds,methodIds,status) ) #define ICorProfilerInfo11_GetCodeInfo3(This,functionID,reJitId,cCodeInfos,pcCodeInfos,codeInfos) \ ( (This)->lpVtbl -> GetCodeInfo3(This,functionID,reJitId,cCodeInfos,pcCodeInfos,codeInfos) ) #define ICorProfilerInfo11_GetFunctionFromIP2(This,ip,pFunctionId,pReJitId) \ ( (This)->lpVtbl -> GetFunctionFromIP2(This,ip,pFunctionId,pReJitId) ) #define ICorProfilerInfo11_GetReJITIDs(This,functionId,cReJitIds,pcReJitIds,reJitIds) \ ( (This)->lpVtbl -> GetReJITIDs(This,functionId,cReJitIds,pcReJitIds,reJitIds) ) #define ICorProfilerInfo11_GetILToNativeMapping2(This,functionId,reJitId,cMap,pcMap,map) \ ( (This)->lpVtbl -> GetILToNativeMapping2(This,functionId,reJitId,cMap,pcMap,map) ) #define ICorProfilerInfo11_EnumJITedFunctions2(This,ppEnum) \ ( (This)->lpVtbl -> EnumJITedFunctions2(This,ppEnum) ) #define ICorProfilerInfo11_GetObjectSize2(This,objectId,pcSize) \ ( (This)->lpVtbl -> GetObjectSize2(This,objectId,pcSize) ) #define ICorProfilerInfo11_GetEventMask2(This,pdwEventsLow,pdwEventsHigh) \ ( (This)->lpVtbl -> GetEventMask2(This,pdwEventsLow,pdwEventsHigh) ) #define ICorProfilerInfo11_SetEventMask2(This,dwEventsLow,dwEventsHigh) \ ( (This)->lpVtbl -> SetEventMask2(This,dwEventsLow,dwEventsHigh) ) #define ICorProfilerInfo11_EnumNgenModuleMethodsInliningThisMethod(This,inlinersModuleId,inlineeModuleId,inlineeMethodId,incompleteData,ppEnum) \ ( (This)->lpVtbl -> EnumNgenModuleMethodsInliningThisMethod(This,inlinersModuleId,inlineeModuleId,inlineeMethodId,incompleteData,ppEnum) ) #define ICorProfilerInfo11_ApplyMetaData(This,moduleId) \ ( (This)->lpVtbl -> ApplyMetaData(This,moduleId) ) #define ICorProfilerInfo11_GetInMemorySymbolsLength(This,moduleId,pCountSymbolBytes) \ ( (This)->lpVtbl -> GetInMemorySymbolsLength(This,moduleId,pCountSymbolBytes) ) #define ICorProfilerInfo11_ReadInMemorySymbols(This,moduleId,symbolsReadOffset,pSymbolBytes,countSymbolBytes,pCountSymbolBytesRead) \ ( (This)->lpVtbl -> ReadInMemorySymbols(This,moduleId,symbolsReadOffset,pSymbolBytes,countSymbolBytes,pCountSymbolBytesRead) ) #define ICorProfilerInfo11_IsFunctionDynamic(This,functionId,isDynamic) \ ( (This)->lpVtbl -> IsFunctionDynamic(This,functionId,isDynamic) ) #define ICorProfilerInfo11_GetFunctionFromIP3(This,ip,functionId,pReJitId) \ ( (This)->lpVtbl -> GetFunctionFromIP3(This,ip,functionId,pReJitId) ) #define ICorProfilerInfo11_GetDynamicFunctionInfo(This,functionId,moduleId,ppvSig,pbSig,cchName,pcchName,wszName) \ ( (This)->lpVtbl -> GetDynamicFunctionInfo(This,functionId,moduleId,ppvSig,pbSig,cchName,pcchName,wszName) ) #define ICorProfilerInfo11_GetNativeCodeStartAddresses(This,functionID,reJitId,cCodeStartAddresses,pcCodeStartAddresses,codeStartAddresses) \ ( (This)->lpVtbl -> GetNativeCodeStartAddresses(This,functionID,reJitId,cCodeStartAddresses,pcCodeStartAddresses,codeStartAddresses) ) #define ICorProfilerInfo11_GetILToNativeMapping3(This,pNativeCodeStartAddress,cMap,pcMap,map) \ ( (This)->lpVtbl -> GetILToNativeMapping3(This,pNativeCodeStartAddress,cMap,pcMap,map) ) #define ICorProfilerInfo11_GetCodeInfo4(This,pNativeCodeStartAddress,cCodeInfos,pcCodeInfos,codeInfos) \ ( (This)->lpVtbl -> GetCodeInfo4(This,pNativeCodeStartAddress,cCodeInfos,pcCodeInfos,codeInfos) ) #define ICorProfilerInfo11_EnumerateObjectReferences(This,objectId,callback,clientData) \ ( (This)->lpVtbl -> EnumerateObjectReferences(This,objectId,callback,clientData) ) #define ICorProfilerInfo11_IsFrozenObject(This,objectId,pbFrozen) \ ( (This)->lpVtbl -> IsFrozenObject(This,objectId,pbFrozen) ) #define ICorProfilerInfo11_GetLOHObjectSizeThreshold(This,pThreshold) \ ( (This)->lpVtbl -> GetLOHObjectSizeThreshold(This,pThreshold) ) #define ICorProfilerInfo11_RequestReJITWithInliners(This,dwRejitFlags,cFunctions,moduleIds,methodIds) \ ( (This)->lpVtbl -> RequestReJITWithInliners(This,dwRejitFlags,cFunctions,moduleIds,methodIds) ) #define ICorProfilerInfo11_SuspendRuntime(This) \ ( (This)->lpVtbl -> SuspendRuntime(This) ) #define ICorProfilerInfo11_ResumeRuntime(This) \ ( (This)->lpVtbl -> ResumeRuntime(This) ) #define ICorProfilerInfo11_GetEnvironmentVariable(This,szName,cchValue,pcchValue,szValue) \ ( (This)->lpVtbl -> GetEnvironmentVariable(This,szName,cchValue,pcchValue,szValue) ) #define ICorProfilerInfo11_SetEnvironmentVariable(This,szName,szValue) \ ( (This)->lpVtbl -> SetEnvironmentVariable(This,szName,szValue) ) #endif /* COBJMACROS */ #endif /* C style interface */ #endif /* __ICorProfilerInfo11_INTERFACE_DEFINED__ */ #ifndef __ICorProfilerInfo12_INTERFACE_DEFINED__ #define __ICorProfilerInfo12_INTERFACE_DEFINED__ /* interface ICorProfilerInfo12 */ /* [local][unique][uuid][object] */ EXTERN_C const IID IID_ICorProfilerInfo12; #if defined(__cplusplus) && !defined(CINTERFACE) MIDL_INTERFACE("27b24ccd-1cb1-47c5-96ee-98190dc30959") ICorProfilerInfo12 : public ICorProfilerInfo11 { public: virtual HRESULT STDMETHODCALLTYPE EventPipeStartSession( /* [in] */ UINT32 cProviderConfigs, /* [size_is][in] */ COR_PRF_EVENTPIPE_PROVIDER_CONFIG pProviderConfigs[ ], /* [in] */ BOOL requestRundown, /* [out] */ EVENTPIPE_SESSION *pSession) = 0; virtual HRESULT STDMETHODCALLTYPE EventPipeAddProviderToSession( /* [in] */ EVENTPIPE_SESSION session, /* [in] */ COR_PRF_EVENTPIPE_PROVIDER_CONFIG providerConfig) = 0; virtual HRESULT STDMETHODCALLTYPE EventPipeStopSession( /* [in] */ EVENTPIPE_SESSION session) = 0; virtual HRESULT STDMETHODCALLTYPE EventPipeCreateProvider( /* [string][in] */ const WCHAR *providerName, /* [out] */ EVENTPIPE_PROVIDER *pProvider) = 0; virtual HRESULT STDMETHODCALLTYPE EventPipeGetProviderInfo( /* [in] */ EVENTPIPE_PROVIDER provider, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [annotation][out] */ _Out_writes_to_(cchName, *pcchName) WCHAR providerName[ ]) = 0; virtual HRESULT STDMETHODCALLTYPE EventPipeDefineEvent( /* [in] */ EVENTPIPE_PROVIDER provider, /* [string][in] */ const WCHAR *eventName, /* [in] */ UINT32 eventID, /* [in] */ UINT64 keywords, /* [in] */ UINT32 eventVersion, /* [in] */ UINT32 level, /* [in] */ UINT8 opcode, /* [in] */ BOOL needStack, /* [in] */ UINT32 cParamDescs, /* [size_is][in] */ COR_PRF_EVENTPIPE_PARAM_DESC pParamDescs[ ], /* [out] */ EVENTPIPE_EVENT *pEvent) = 0; virtual HRESULT STDMETHODCALLTYPE EventPipeWriteEvent( /* [in] */ EVENTPIPE_EVENT event, /* [in] */ UINT32 cData, /* [size_is][in] */ COR_PRF_EVENT_DATA data[ ], /* [in] */ LPCGUID pActivityId, /* [in] */ LPCGUID pRelatedActivityId) = 0; }; #else /* C style interface */ typedef struct ICorProfilerInfo12Vtbl { BEGIN_INTERFACE HRESULT ( STDMETHODCALLTYPE *QueryInterface )( ICorProfilerInfo12 * This, /* [in] */ REFIID riid, /* [annotation][iid_is][out] */ _COM_Outptr_ void **ppvObject); ULONG ( STDMETHODCALLTYPE *AddRef )( ICorProfilerInfo12 * This); ULONG ( STDMETHODCALLTYPE *Release )( ICorProfilerInfo12 * This); HRESULT ( STDMETHODCALLTYPE *GetClassFromObject )( ICorProfilerInfo12 * This, /* [in] */ ObjectID objectId, /* [out] */ ClassID *pClassId); HRESULT ( STDMETHODCALLTYPE *GetClassFromToken )( ICorProfilerInfo12 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdTypeDef typeDef, /* [out] */ ClassID *pClassId); HRESULT ( STDMETHODCALLTYPE *GetCodeInfo )( ICorProfilerInfo12 * This, /* [in] */ FunctionID functionId, /* [out] */ LPCBYTE *pStart, /* [out] */ ULONG *pcSize); HRESULT ( STDMETHODCALLTYPE *GetEventMask )( ICorProfilerInfo12 * This, /* [out] */ DWORD *pdwEvents); HRESULT ( STDMETHODCALLTYPE *GetFunctionFromIP )( ICorProfilerInfo12 * This, /* [in] */ LPCBYTE ip, /* [out] */ FunctionID *pFunctionId); HRESULT ( STDMETHODCALLTYPE *GetFunctionFromToken )( ICorProfilerInfo12 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdToken token, /* [out] */ FunctionID *pFunctionId); HRESULT ( STDMETHODCALLTYPE *GetHandleFromThread )( ICorProfilerInfo12 * This, /* [in] */ ThreadID threadId, /* [out] */ HANDLE *phThread); HRESULT ( STDMETHODCALLTYPE *GetObjectSize )( ICorProfilerInfo12 * This, /* [in] */ ObjectID objectId, /* [out] */ ULONG *pcSize); HRESULT ( STDMETHODCALLTYPE *IsArrayClass )( ICorProfilerInfo12 * This, /* [in] */ ClassID classId, /* [out] */ CorElementType *pBaseElemType, /* [out] */ ClassID *pBaseClassId, /* [out] */ ULONG *pcRank); HRESULT ( STDMETHODCALLTYPE *GetThreadInfo )( ICorProfilerInfo12 * This, /* [in] */ ThreadID threadId, /* [out] */ DWORD *pdwWin32ThreadId); HRESULT ( STDMETHODCALLTYPE *GetCurrentThreadID )( ICorProfilerInfo12 * This, /* [out] */ ThreadID *pThreadId); HRESULT ( STDMETHODCALLTYPE *GetClassIDInfo )( ICorProfilerInfo12 * This, /* [in] */ ClassID classId, /* [out] */ ModuleID *pModuleId, /* [out] */ mdTypeDef *pTypeDefToken); HRESULT ( STDMETHODCALLTYPE *GetFunctionInfo )( ICorProfilerInfo12 * This, /* [in] */ FunctionID functionId, /* [out] */ ClassID *pClassId, /* [out] */ ModuleID *pModuleId, /* [out] */ mdToken *pToken); HRESULT ( STDMETHODCALLTYPE *SetEventMask )( ICorProfilerInfo12 * This, /* [in] */ DWORD dwEvents); HRESULT ( STDMETHODCALLTYPE *SetEnterLeaveFunctionHooks )( ICorProfilerInfo12 * This, /* [in] */ FunctionEnter *pFuncEnter, /* [in] */ FunctionLeave *pFuncLeave, /* [in] */ FunctionTailcall *pFuncTailcall); HRESULT ( STDMETHODCALLTYPE *SetFunctionIDMapper )( ICorProfilerInfo12 * This, /* [in] */ FunctionIDMapper *pFunc); HRESULT ( STDMETHODCALLTYPE *GetTokenAndMetaDataFromFunction )( ICorProfilerInfo12 * This, /* [in] */ FunctionID functionId, /* [in] */ REFIID riid, /* [out] */ IUnknown **ppImport, /* [out] */ mdToken *pToken); HRESULT ( STDMETHODCALLTYPE *GetModuleInfo )( ICorProfilerInfo12 * This, /* [in] */ ModuleID moduleId, /* [out] */ LPCBYTE *ppBaseLoadAddress, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [annotation][out] */ _Out_writes_to_(cchName, *pcchName) WCHAR szName[ ], /* [out] */ AssemblyID *pAssemblyId); HRESULT ( STDMETHODCALLTYPE *GetModuleMetaData )( ICorProfilerInfo12 * This, /* [in] */ ModuleID moduleId, /* [in] */ DWORD dwOpenFlags, /* [in] */ REFIID riid, /* [out] */ IUnknown **ppOut); HRESULT ( STDMETHODCALLTYPE *GetILFunctionBody )( ICorProfilerInfo12 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdMethodDef methodId, /* [out] */ LPCBYTE *ppMethodHeader, /* [out] */ ULONG *pcbMethodSize); HRESULT ( STDMETHODCALLTYPE *GetILFunctionBodyAllocator )( ICorProfilerInfo12 * This, /* [in] */ ModuleID moduleId, /* [out] */ IMethodMalloc **ppMalloc); HRESULT ( STDMETHODCALLTYPE *SetILFunctionBody )( ICorProfilerInfo12 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdMethodDef methodid, /* [in] */ LPCBYTE pbNewILMethodHeader); HRESULT ( STDMETHODCALLTYPE *GetAppDomainInfo )( ICorProfilerInfo12 * This, /* [in] */ AppDomainID appDomainId, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [annotation][out] */ _Out_writes_to_(cchName, *pcchName) WCHAR szName[ ], /* [out] */ ProcessID *pProcessId); HRESULT ( STDMETHODCALLTYPE *GetAssemblyInfo )( ICorProfilerInfo12 * This, /* [in] */ AssemblyID assemblyId, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [annotation][out] */ _Out_writes_to_(cchName, *pcchName) WCHAR szName[ ], /* [out] */ AppDomainID *pAppDomainId, /* [out] */ ModuleID *pModuleId); HRESULT ( STDMETHODCALLTYPE *SetFunctionReJIT )( ICorProfilerInfo12 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ForceGC )( ICorProfilerInfo12 * This); HRESULT ( STDMETHODCALLTYPE *SetILInstrumentedCodeMap )( ICorProfilerInfo12 * This, /* [in] */ FunctionID functionId, /* [in] */ BOOL fStartJit, /* [in] */ ULONG cILMapEntries, /* [size_is][in] */ COR_IL_MAP rgILMapEntries[ ]); HRESULT ( STDMETHODCALLTYPE *GetInprocInspectionInterface )( ICorProfilerInfo12 * This, /* [out] */ IUnknown **ppicd); HRESULT ( STDMETHODCALLTYPE *GetInprocInspectionIThisThread )( ICorProfilerInfo12 * This, /* [out] */ IUnknown **ppicd); HRESULT ( STDMETHODCALLTYPE *GetThreadContext )( ICorProfilerInfo12 * This, /* [in] */ ThreadID threadId, /* [out] */ ContextID *pContextId); HRESULT ( STDMETHODCALLTYPE *BeginInprocDebugging )( ICorProfilerInfo12 * This, /* [in] */ BOOL fThisThreadOnly, /* [out] */ DWORD *pdwProfilerContext); HRESULT ( STDMETHODCALLTYPE *EndInprocDebugging )( ICorProfilerInfo12 * This, /* [in] */ DWORD dwProfilerContext); HRESULT ( STDMETHODCALLTYPE *GetILToNativeMapping )( ICorProfilerInfo12 * This, /* [in] */ FunctionID functionId, /* [in] */ ULONG32 cMap, /* [out] */ ULONG32 *pcMap, /* [length_is][size_is][out] */ COR_DEBUG_IL_TO_NATIVE_MAP map[ ]); HRESULT ( STDMETHODCALLTYPE *DoStackSnapshot )( ICorProfilerInfo12 * This, /* [in] */ ThreadID thread, /* [in] */ StackSnapshotCallback *callback, /* [in] */ ULONG32 infoFlags, /* [in] */ void *clientData, /* [size_is][in] */ BYTE context[ ], /* [in] */ ULONG32 contextSize); HRESULT ( STDMETHODCALLTYPE *SetEnterLeaveFunctionHooks2 )( ICorProfilerInfo12 * This, /* [in] */ FunctionEnter2 *pFuncEnter, /* [in] */ FunctionLeave2 *pFuncLeave, /* [in] */ FunctionTailcall2 *pFuncTailcall); HRESULT ( STDMETHODCALLTYPE *GetFunctionInfo2 )( ICorProfilerInfo12 * This, /* [in] */ FunctionID funcId, /* [in] */ COR_PRF_FRAME_INFO frameInfo, /* [out] */ ClassID *pClassId, /* [out] */ ModuleID *pModuleId, /* [out] */ mdToken *pToken, /* [in] */ ULONG32 cTypeArgs, /* [out] */ ULONG32 *pcTypeArgs, /* [out] */ ClassID typeArgs[ ]); HRESULT ( STDMETHODCALLTYPE *GetStringLayout )( ICorProfilerInfo12 * This, /* [out] */ ULONG *pBufferLengthOffset, /* [out] */ ULONG *pStringLengthOffset, /* [out] */ ULONG *pBufferOffset); HRESULT ( STDMETHODCALLTYPE *GetClassLayout )( ICorProfilerInfo12 * This, /* [in] */ ClassID classID, /* [out][in] */ COR_FIELD_OFFSET rFieldOffset[ ], /* [in] */ ULONG cFieldOffset, /* [out] */ ULONG *pcFieldOffset, /* [out] */ ULONG *pulClassSize); HRESULT ( STDMETHODCALLTYPE *GetClassIDInfo2 )( ICorProfilerInfo12 * This, /* [in] */ ClassID classId, /* [out] */ ModuleID *pModuleId, /* [out] */ mdTypeDef *pTypeDefToken, /* [out] */ ClassID *pParentClassId, /* [in] */ ULONG32 cNumTypeArgs, /* [out] */ ULONG32 *pcNumTypeArgs, /* [out] */ ClassID typeArgs[ ]); HRESULT ( STDMETHODCALLTYPE *GetCodeInfo2 )( ICorProfilerInfo12 * This, /* [in] */ FunctionID functionID, /* [in] */ ULONG32 cCodeInfos, /* [out] */ ULONG32 *pcCodeInfos, /* [length_is][size_is][out] */ COR_PRF_CODE_INFO codeInfos[ ]); HRESULT ( STDMETHODCALLTYPE *GetClassFromTokenAndTypeArgs )( ICorProfilerInfo12 * This, /* [in] */ ModuleID moduleID, /* [in] */ mdTypeDef typeDef, /* [in] */ ULONG32 cTypeArgs, /* [size_is][in] */ ClassID typeArgs[ ], /* [out] */ ClassID *pClassID); HRESULT ( STDMETHODCALLTYPE *GetFunctionFromTokenAndTypeArgs )( ICorProfilerInfo12 * This, /* [in] */ ModuleID moduleID, /* [in] */ mdMethodDef funcDef, /* [in] */ ClassID classId, /* [in] */ ULONG32 cTypeArgs, /* [size_is][in] */ ClassID typeArgs[ ], /* [out] */ FunctionID *pFunctionID); HRESULT ( STDMETHODCALLTYPE *EnumModuleFrozenObjects )( ICorProfilerInfo12 * This, /* [in] */ ModuleID moduleID, /* [out] */ ICorProfilerObjectEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *GetArrayObjectInfo )( ICorProfilerInfo12 * This, /* [in] */ ObjectID objectId, /* [in] */ ULONG32 cDimensions, /* [size_is][out] */ ULONG32 pDimensionSizes[ ], /* [size_is][out] */ int pDimensionLowerBounds[ ], /* [out] */ BYTE **ppData); HRESULT ( STDMETHODCALLTYPE *GetBoxClassLayout )( ICorProfilerInfo12 * This, /* [in] */ ClassID classId, /* [out] */ ULONG32 *pBufferOffset); HRESULT ( STDMETHODCALLTYPE *GetThreadAppDomain )( ICorProfilerInfo12 * This, /* [in] */ ThreadID threadId, /* [out] */ AppDomainID *pAppDomainId); HRESULT ( STDMETHODCALLTYPE *GetRVAStaticAddress )( ICorProfilerInfo12 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [out] */ void **ppAddress); HRESULT ( STDMETHODCALLTYPE *GetAppDomainStaticAddress )( ICorProfilerInfo12 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [in] */ AppDomainID appDomainId, /* [out] */ void **ppAddress); HRESULT ( STDMETHODCALLTYPE *GetThreadStaticAddress )( ICorProfilerInfo12 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [in] */ ThreadID threadId, /* [out] */ void **ppAddress); HRESULT ( STDMETHODCALLTYPE *GetContextStaticAddress )( ICorProfilerInfo12 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [in] */ ContextID contextId, /* [out] */ void **ppAddress); HRESULT ( STDMETHODCALLTYPE *GetStaticFieldInfo )( ICorProfilerInfo12 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [out] */ COR_PRF_STATIC_TYPE *pFieldInfo); HRESULT ( STDMETHODCALLTYPE *GetGenerationBounds )( ICorProfilerInfo12 * This, /* [in] */ ULONG cObjectRanges, /* [out] */ ULONG *pcObjectRanges, /* [length_is][size_is][out] */ COR_PRF_GC_GENERATION_RANGE ranges[ ]); HRESULT ( STDMETHODCALLTYPE *GetObjectGeneration )( ICorProfilerInfo12 * This, /* [in] */ ObjectID objectId, /* [out] */ COR_PRF_GC_GENERATION_RANGE *range); HRESULT ( STDMETHODCALLTYPE *GetNotifiedExceptionClauseInfo )( ICorProfilerInfo12 * This, /* [out] */ COR_PRF_EX_CLAUSE_INFO *pinfo); HRESULT ( STDMETHODCALLTYPE *EnumJITedFunctions )( ICorProfilerInfo12 * This, /* [out] */ ICorProfilerFunctionEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *RequestProfilerDetach )( ICorProfilerInfo12 * This, /* [in] */ DWORD dwExpectedCompletionMilliseconds); HRESULT ( STDMETHODCALLTYPE *SetFunctionIDMapper2 )( ICorProfilerInfo12 * This, /* [in] */ FunctionIDMapper2 *pFunc, /* [in] */ void *clientData); HRESULT ( STDMETHODCALLTYPE *GetStringLayout2 )( ICorProfilerInfo12 * This, /* [out] */ ULONG *pStringLengthOffset, /* [out] */ ULONG *pBufferOffset); HRESULT ( STDMETHODCALLTYPE *SetEnterLeaveFunctionHooks3 )( ICorProfilerInfo12 * This, /* [in] */ FunctionEnter3 *pFuncEnter3, /* [in] */ FunctionLeave3 *pFuncLeave3, /* [in] */ FunctionTailcall3 *pFuncTailcall3); HRESULT ( STDMETHODCALLTYPE *SetEnterLeaveFunctionHooks3WithInfo )( ICorProfilerInfo12 * This, /* [in] */ FunctionEnter3WithInfo *pFuncEnter3WithInfo, /* [in] */ FunctionLeave3WithInfo *pFuncLeave3WithInfo, /* [in] */ FunctionTailcall3WithInfo *pFuncTailcall3WithInfo); HRESULT ( STDMETHODCALLTYPE *GetFunctionEnter3Info )( ICorProfilerInfo12 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_ELT_INFO eltInfo, /* [out] */ COR_PRF_FRAME_INFO *pFrameInfo, /* [out][in] */ ULONG *pcbArgumentInfo, /* [size_is][out] */ COR_PRF_FUNCTION_ARGUMENT_INFO *pArgumentInfo); HRESULT ( STDMETHODCALLTYPE *GetFunctionLeave3Info )( ICorProfilerInfo12 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_ELT_INFO eltInfo, /* [out] */ COR_PRF_FRAME_INFO *pFrameInfo, /* [out] */ COR_PRF_FUNCTION_ARGUMENT_RANGE *pRetvalRange); HRESULT ( STDMETHODCALLTYPE *GetFunctionTailcall3Info )( ICorProfilerInfo12 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_ELT_INFO eltInfo, /* [out] */ COR_PRF_FRAME_INFO *pFrameInfo); HRESULT ( STDMETHODCALLTYPE *EnumModules )( ICorProfilerInfo12 * This, /* [out] */ ICorProfilerModuleEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *GetRuntimeInformation )( ICorProfilerInfo12 * This, /* [out] */ USHORT *pClrInstanceId, /* [out] */ COR_PRF_RUNTIME_TYPE *pRuntimeType, /* [out] */ USHORT *pMajorVersion, /* [out] */ USHORT *pMinorVersion, /* [out] */ USHORT *pBuildNumber, /* [out] */ USHORT *pQFEVersion, /* [in] */ ULONG cchVersionString, /* [out] */ ULONG *pcchVersionString, /* [annotation][out] */ _Out_writes_to_(cchVersionString, *pcchVersionString) WCHAR szVersionString[ ]); HRESULT ( STDMETHODCALLTYPE *GetThreadStaticAddress2 )( ICorProfilerInfo12 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [in] */ AppDomainID appDomainId, /* [in] */ ThreadID threadId, /* [out] */ void **ppAddress); HRESULT ( STDMETHODCALLTYPE *GetAppDomainsContainingModule )( ICorProfilerInfo12 * This, /* [in] */ ModuleID moduleId, /* [in] */ ULONG32 cAppDomainIds, /* [out] */ ULONG32 *pcAppDomainIds, /* [length_is][size_is][out] */ AppDomainID appDomainIds[ ]); HRESULT ( STDMETHODCALLTYPE *GetModuleInfo2 )( ICorProfilerInfo12 * This, /* [in] */ ModuleID moduleId, /* [out] */ LPCBYTE *ppBaseLoadAddress, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [annotation][out] */ _Out_writes_to_(cchName, *pcchName) WCHAR szName[ ], /* [out] */ AssemblyID *pAssemblyId, /* [out] */ DWORD *pdwModuleFlags); HRESULT ( STDMETHODCALLTYPE *EnumThreads )( ICorProfilerInfo12 * This, /* [out] */ ICorProfilerThreadEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *InitializeCurrentThread )( ICorProfilerInfo12 * This); HRESULT ( STDMETHODCALLTYPE *RequestReJIT )( ICorProfilerInfo12 * This, /* [in] */ ULONG cFunctions, /* [size_is][in] */ ModuleID moduleIds[ ], /* [size_is][in] */ mdMethodDef methodIds[ ]); HRESULT ( STDMETHODCALLTYPE *RequestRevert )( ICorProfilerInfo12 * This, /* [in] */ ULONG cFunctions, /* [size_is][in] */ ModuleID moduleIds[ ], /* [size_is][in] */ mdMethodDef methodIds[ ], /* [size_is][out] */ HRESULT status[ ]); HRESULT ( STDMETHODCALLTYPE *GetCodeInfo3 )( ICorProfilerInfo12 * This, /* [in] */ FunctionID functionID, /* [in] */ ReJITID reJitId, /* [in] */ ULONG32 cCodeInfos, /* [out] */ ULONG32 *pcCodeInfos, /* [length_is][size_is][out] */ COR_PRF_CODE_INFO codeInfos[ ]); HRESULT ( STDMETHODCALLTYPE *GetFunctionFromIP2 )( ICorProfilerInfo12 * This, /* [in] */ LPCBYTE ip, /* [out] */ FunctionID *pFunctionId, /* [out] */ ReJITID *pReJitId); HRESULT ( STDMETHODCALLTYPE *GetReJITIDs )( ICorProfilerInfo12 * This, /* [in] */ FunctionID functionId, /* [in] */ ULONG cReJitIds, /* [out] */ ULONG *pcReJitIds, /* [length_is][size_is][out] */ ReJITID reJitIds[ ]); HRESULT ( STDMETHODCALLTYPE *GetILToNativeMapping2 )( ICorProfilerInfo12 * This, /* [in] */ FunctionID functionId, /* [in] */ ReJITID reJitId, /* [in] */ ULONG32 cMap, /* [out] */ ULONG32 *pcMap, /* [length_is][size_is][out] */ COR_DEBUG_IL_TO_NATIVE_MAP map[ ]); HRESULT ( STDMETHODCALLTYPE *EnumJITedFunctions2 )( ICorProfilerInfo12 * This, /* [out] */ ICorProfilerFunctionEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *GetObjectSize2 )( ICorProfilerInfo12 * This, /* [in] */ ObjectID objectId, /* [out] */ SIZE_T *pcSize); HRESULT ( STDMETHODCALLTYPE *GetEventMask2 )( ICorProfilerInfo12 * This, /* [out] */ DWORD *pdwEventsLow, /* [out] */ DWORD *pdwEventsHigh); HRESULT ( STDMETHODCALLTYPE *SetEventMask2 )( ICorProfilerInfo12 * This, /* [in] */ DWORD dwEventsLow, /* [in] */ DWORD dwEventsHigh); HRESULT ( STDMETHODCALLTYPE *EnumNgenModuleMethodsInliningThisMethod )( ICorProfilerInfo12 * This, /* [in] */ ModuleID inlinersModuleId, /* [in] */ ModuleID inlineeModuleId, /* [in] */ mdMethodDef inlineeMethodId, /* [out] */ BOOL *incompleteData, /* [out] */ ICorProfilerMethodEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *ApplyMetaData )( ICorProfilerInfo12 * This, /* [in] */ ModuleID moduleId); HRESULT ( STDMETHODCALLTYPE *GetInMemorySymbolsLength )( ICorProfilerInfo12 * This, /* [in] */ ModuleID moduleId, /* [out] */ DWORD *pCountSymbolBytes); HRESULT ( STDMETHODCALLTYPE *ReadInMemorySymbols )( ICorProfilerInfo12 * This, /* [in] */ ModuleID moduleId, /* [in] */ DWORD symbolsReadOffset, /* [out] */ BYTE *pSymbolBytes, /* [in] */ DWORD countSymbolBytes, /* [out] */ DWORD *pCountSymbolBytesRead); HRESULT ( STDMETHODCALLTYPE *IsFunctionDynamic )( ICorProfilerInfo12 * This, /* [in] */ FunctionID functionId, /* [out] */ BOOL *isDynamic); HRESULT ( STDMETHODCALLTYPE *GetFunctionFromIP3 )( ICorProfilerInfo12 * This, /* [in] */ LPCBYTE ip, /* [out] */ FunctionID *functionId, /* [out] */ ReJITID *pReJitId); HRESULT ( STDMETHODCALLTYPE *GetDynamicFunctionInfo )( ICorProfilerInfo12 * This, /* [in] */ FunctionID functionId, /* [out] */ ModuleID *moduleId, /* [out] */ PCCOR_SIGNATURE *ppvSig, /* [out] */ ULONG *pbSig, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [out] */ WCHAR wszName[ ]); HRESULT ( STDMETHODCALLTYPE *GetNativeCodeStartAddresses )( ICorProfilerInfo12 * This, FunctionID functionID, ReJITID reJitId, ULONG32 cCodeStartAddresses, ULONG32 *pcCodeStartAddresses, UINT_PTR codeStartAddresses[ ]); HRESULT ( STDMETHODCALLTYPE *GetILToNativeMapping3 )( ICorProfilerInfo12 * This, UINT_PTR pNativeCodeStartAddress, ULONG32 cMap, ULONG32 *pcMap, COR_DEBUG_IL_TO_NATIVE_MAP map[ ]); HRESULT ( STDMETHODCALLTYPE *GetCodeInfo4 )( ICorProfilerInfo12 * This, UINT_PTR pNativeCodeStartAddress, ULONG32 cCodeInfos, ULONG32 *pcCodeInfos, COR_PRF_CODE_INFO codeInfos[ ]); HRESULT ( STDMETHODCALLTYPE *EnumerateObjectReferences )( ICorProfilerInfo12 * This, ObjectID objectId, ObjectReferenceCallback callback, void *clientData); HRESULT ( STDMETHODCALLTYPE *IsFrozenObject )( ICorProfilerInfo12 * This, ObjectID objectId, BOOL *pbFrozen); HRESULT ( STDMETHODCALLTYPE *GetLOHObjectSizeThreshold )( ICorProfilerInfo12 * This, DWORD *pThreshold); HRESULT ( STDMETHODCALLTYPE *RequestReJITWithInliners )( ICorProfilerInfo12 * This, /* [in] */ DWORD dwRejitFlags, /* [in] */ ULONG cFunctions, /* [size_is][in] */ ModuleID moduleIds[ ], /* [size_is][in] */ mdMethodDef methodIds[ ]); HRESULT ( STDMETHODCALLTYPE *SuspendRuntime )( ICorProfilerInfo12 * This); HRESULT ( STDMETHODCALLTYPE *ResumeRuntime )( ICorProfilerInfo12 * This); HRESULT ( STDMETHODCALLTYPE *GetEnvironmentVariable )( ICorProfilerInfo12 * This, /* [string][in] */ const WCHAR *szName, /* [in] */ ULONG cchValue, /* [out] */ ULONG *pcchValue, /* [annotation][out] */ _Out_writes_to_(cchValue, *pcchValue) WCHAR szValue[ ]); HRESULT ( STDMETHODCALLTYPE *SetEnvironmentVariable )( ICorProfilerInfo12 * This, /* [string][in] */ const WCHAR *szName, /* [string][in] */ const WCHAR *szValue); HRESULT ( STDMETHODCALLTYPE *EventPipeStartSession )( ICorProfilerInfo12 * This, /* [in] */ UINT32 cProviderConfigs, /* [size_is][in] */ COR_PRF_EVENTPIPE_PROVIDER_CONFIG pProviderConfigs[ ], /* [in] */ BOOL requestRundown, /* [out] */ EVENTPIPE_SESSION *pSession); HRESULT ( STDMETHODCALLTYPE *EventPipeAddProviderToSession )( ICorProfilerInfo12 * This, /* [in] */ EVENTPIPE_SESSION session, /* [in] */ COR_PRF_EVENTPIPE_PROVIDER_CONFIG providerConfig); HRESULT ( STDMETHODCALLTYPE *EventPipeStopSession )( ICorProfilerInfo12 * This, /* [in] */ EVENTPIPE_SESSION session); HRESULT ( STDMETHODCALLTYPE *EventPipeCreateProvider )( ICorProfilerInfo12 * This, /* [string][in] */ const WCHAR *providerName, /* [out] */ EVENTPIPE_PROVIDER *pProvider); HRESULT ( STDMETHODCALLTYPE *EventPipeGetProviderInfo )( ICorProfilerInfo12 * This, /* [in] */ EVENTPIPE_PROVIDER provider, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [annotation][out] */ _Out_writes_to_(cchName, *pcchName) WCHAR providerName[ ]); HRESULT ( STDMETHODCALLTYPE *EventPipeDefineEvent )( ICorProfilerInfo12 * This, /* [in] */ EVENTPIPE_PROVIDER provider, /* [string][in] */ const WCHAR *eventName, /* [in] */ UINT32 eventID, /* [in] */ UINT64 keywords, /* [in] */ UINT32 eventVersion, /* [in] */ UINT32 level, /* [in] */ UINT8 opcode, /* [in] */ BOOL needStack, /* [in] */ UINT32 cParamDescs, /* [size_is][in] */ COR_PRF_EVENTPIPE_PARAM_DESC pParamDescs[ ], /* [out] */ EVENTPIPE_EVENT *pEvent); HRESULT ( STDMETHODCALLTYPE *EventPipeWriteEvent )( ICorProfilerInfo12 * This, /* [in] */ EVENTPIPE_EVENT event, /* [in] */ UINT32 cData, /* [size_is][in] */ COR_PRF_EVENT_DATA data[ ], /* [in] */ LPCGUID pActivityId, /* [in] */ LPCGUID pRelatedActivityId); END_INTERFACE } ICorProfilerInfo12Vtbl; interface ICorProfilerInfo12 { CONST_VTBL struct ICorProfilerInfo12Vtbl *lpVtbl; }; #ifdef COBJMACROS #define ICorProfilerInfo12_QueryInterface(This,riid,ppvObject) \ ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) ) #define ICorProfilerInfo12_AddRef(This) \ ( (This)->lpVtbl -> AddRef(This) ) #define ICorProfilerInfo12_Release(This) \ ( (This)->lpVtbl -> Release(This) ) #define ICorProfilerInfo12_GetClassFromObject(This,objectId,pClassId) \ ( (This)->lpVtbl -> GetClassFromObject(This,objectId,pClassId) ) #define ICorProfilerInfo12_GetClassFromToken(This,moduleId,typeDef,pClassId) \ ( (This)->lpVtbl -> GetClassFromToken(This,moduleId,typeDef,pClassId) ) #define ICorProfilerInfo12_GetCodeInfo(This,functionId,pStart,pcSize) \ ( (This)->lpVtbl -> GetCodeInfo(This,functionId,pStart,pcSize) ) #define ICorProfilerInfo12_GetEventMask(This,pdwEvents) \ ( (This)->lpVtbl -> GetEventMask(This,pdwEvents) ) #define ICorProfilerInfo12_GetFunctionFromIP(This,ip,pFunctionId) \ ( (This)->lpVtbl -> GetFunctionFromIP(This,ip,pFunctionId) ) #define ICorProfilerInfo12_GetFunctionFromToken(This,moduleId,token,pFunctionId) \ ( (This)->lpVtbl -> GetFunctionFromToken(This,moduleId,token,pFunctionId) ) #define ICorProfilerInfo12_GetHandleFromThread(This,threadId,phThread) \ ( (This)->lpVtbl -> GetHandleFromThread(This,threadId,phThread) ) #define ICorProfilerInfo12_GetObjectSize(This,objectId,pcSize) \ ( (This)->lpVtbl -> GetObjectSize(This,objectId,pcSize) ) #define ICorProfilerInfo12_IsArrayClass(This,classId,pBaseElemType,pBaseClassId,pcRank) \ ( (This)->lpVtbl -> IsArrayClass(This,classId,pBaseElemType,pBaseClassId,pcRank) ) #define ICorProfilerInfo12_GetThreadInfo(This,threadId,pdwWin32ThreadId) \ ( (This)->lpVtbl -> GetThreadInfo(This,threadId,pdwWin32ThreadId) ) #define ICorProfilerInfo12_GetCurrentThreadID(This,pThreadId) \ ( (This)->lpVtbl -> GetCurrentThreadID(This,pThreadId) ) #define ICorProfilerInfo12_GetClassIDInfo(This,classId,pModuleId,pTypeDefToken) \ ( (This)->lpVtbl -> GetClassIDInfo(This,classId,pModuleId,pTypeDefToken) ) #define ICorProfilerInfo12_GetFunctionInfo(This,functionId,pClassId,pModuleId,pToken) \ ( (This)->lpVtbl -> GetFunctionInfo(This,functionId,pClassId,pModuleId,pToken) ) #define ICorProfilerInfo12_SetEventMask(This,dwEvents) \ ( (This)->lpVtbl -> SetEventMask(This,dwEvents) ) #define ICorProfilerInfo12_SetEnterLeaveFunctionHooks(This,pFuncEnter,pFuncLeave,pFuncTailcall) \ ( (This)->lpVtbl -> SetEnterLeaveFunctionHooks(This,pFuncEnter,pFuncLeave,pFuncTailcall) ) #define ICorProfilerInfo12_SetFunctionIDMapper(This,pFunc) \ ( (This)->lpVtbl -> SetFunctionIDMapper(This,pFunc) ) #define ICorProfilerInfo12_GetTokenAndMetaDataFromFunction(This,functionId,riid,ppImport,pToken) \ ( (This)->lpVtbl -> GetTokenAndMetaDataFromFunction(This,functionId,riid,ppImport,pToken) ) #define ICorProfilerInfo12_GetModuleInfo(This,moduleId,ppBaseLoadAddress,cchName,pcchName,szName,pAssemblyId) \ ( (This)->lpVtbl -> GetModuleInfo(This,moduleId,ppBaseLoadAddress,cchName,pcchName,szName,pAssemblyId) ) #define ICorProfilerInfo12_GetModuleMetaData(This,moduleId,dwOpenFlags,riid,ppOut) \ ( (This)->lpVtbl -> GetModuleMetaData(This,moduleId,dwOpenFlags,riid,ppOut) ) #define ICorProfilerInfo12_GetILFunctionBody(This,moduleId,methodId,ppMethodHeader,pcbMethodSize) \ ( (This)->lpVtbl -> GetILFunctionBody(This,moduleId,methodId,ppMethodHeader,pcbMethodSize) ) #define ICorProfilerInfo12_GetILFunctionBodyAllocator(This,moduleId,ppMalloc) \ ( (This)->lpVtbl -> GetILFunctionBodyAllocator(This,moduleId,ppMalloc) ) #define ICorProfilerInfo12_SetILFunctionBody(This,moduleId,methodid,pbNewILMethodHeader) \ ( (This)->lpVtbl -> SetILFunctionBody(This,moduleId,methodid,pbNewILMethodHeader) ) #define ICorProfilerInfo12_GetAppDomainInfo(This,appDomainId,cchName,pcchName,szName,pProcessId) \ ( (This)->lpVtbl -> GetAppDomainInfo(This,appDomainId,cchName,pcchName,szName,pProcessId) ) #define ICorProfilerInfo12_GetAssemblyInfo(This,assemblyId,cchName,pcchName,szName,pAppDomainId,pModuleId) \ ( (This)->lpVtbl -> GetAssemblyInfo(This,assemblyId,cchName,pcchName,szName,pAppDomainId,pModuleId) ) #define ICorProfilerInfo12_SetFunctionReJIT(This,functionId) \ ( (This)->lpVtbl -> SetFunctionReJIT(This,functionId) ) #define ICorProfilerInfo12_ForceGC(This) \ ( (This)->lpVtbl -> ForceGC(This) ) #define ICorProfilerInfo12_SetILInstrumentedCodeMap(This,functionId,fStartJit,cILMapEntries,rgILMapEntries) \ ( (This)->lpVtbl -> SetILInstrumentedCodeMap(This,functionId,fStartJit,cILMapEntries,rgILMapEntries) ) #define ICorProfilerInfo12_GetInprocInspectionInterface(This,ppicd) \ ( (This)->lpVtbl -> GetInprocInspectionInterface(This,ppicd) ) #define ICorProfilerInfo12_GetInprocInspectionIThisThread(This,ppicd) \ ( (This)->lpVtbl -> GetInprocInspectionIThisThread(This,ppicd) ) #define ICorProfilerInfo12_GetThreadContext(This,threadId,pContextId) \ ( (This)->lpVtbl -> GetThreadContext(This,threadId,pContextId) ) #define ICorProfilerInfo12_BeginInprocDebugging(This,fThisThreadOnly,pdwProfilerContext) \ ( (This)->lpVtbl -> BeginInprocDebugging(This,fThisThreadOnly,pdwProfilerContext) ) #define ICorProfilerInfo12_EndInprocDebugging(This,dwProfilerContext) \ ( (This)->lpVtbl -> EndInprocDebugging(This,dwProfilerContext) ) #define ICorProfilerInfo12_GetILToNativeMapping(This,functionId,cMap,pcMap,map) \ ( (This)->lpVtbl -> GetILToNativeMapping(This,functionId,cMap,pcMap,map) ) #define ICorProfilerInfo12_DoStackSnapshot(This,thread,callback,infoFlags,clientData,context,contextSize) \ ( (This)->lpVtbl -> DoStackSnapshot(This,thread,callback,infoFlags,clientData,context,contextSize) ) #define ICorProfilerInfo12_SetEnterLeaveFunctionHooks2(This,pFuncEnter,pFuncLeave,pFuncTailcall) \ ( (This)->lpVtbl -> SetEnterLeaveFunctionHooks2(This,pFuncEnter,pFuncLeave,pFuncTailcall) ) #define ICorProfilerInfo12_GetFunctionInfo2(This,funcId,frameInfo,pClassId,pModuleId,pToken,cTypeArgs,pcTypeArgs,typeArgs) \ ( (This)->lpVtbl -> GetFunctionInfo2(This,funcId,frameInfo,pClassId,pModuleId,pToken,cTypeArgs,pcTypeArgs,typeArgs) ) #define ICorProfilerInfo12_GetStringLayout(This,pBufferLengthOffset,pStringLengthOffset,pBufferOffset) \ ( (This)->lpVtbl -> GetStringLayout(This,pBufferLengthOffset,pStringLengthOffset,pBufferOffset) ) #define ICorProfilerInfo12_GetClassLayout(This,classID,rFieldOffset,cFieldOffset,pcFieldOffset,pulClassSize) \ ( (This)->lpVtbl -> GetClassLayout(This,classID,rFieldOffset,cFieldOffset,pcFieldOffset,pulClassSize) ) #define ICorProfilerInfo12_GetClassIDInfo2(This,classId,pModuleId,pTypeDefToken,pParentClassId,cNumTypeArgs,pcNumTypeArgs,typeArgs) \ ( (This)->lpVtbl -> GetClassIDInfo2(This,classId,pModuleId,pTypeDefToken,pParentClassId,cNumTypeArgs,pcNumTypeArgs,typeArgs) ) #define ICorProfilerInfo12_GetCodeInfo2(This,functionID,cCodeInfos,pcCodeInfos,codeInfos) \ ( (This)->lpVtbl -> GetCodeInfo2(This,functionID,cCodeInfos,pcCodeInfos,codeInfos) ) #define ICorProfilerInfo12_GetClassFromTokenAndTypeArgs(This,moduleID,typeDef,cTypeArgs,typeArgs,pClassID) \ ( (This)->lpVtbl -> GetClassFromTokenAndTypeArgs(This,moduleID,typeDef,cTypeArgs,typeArgs,pClassID) ) #define ICorProfilerInfo12_GetFunctionFromTokenAndTypeArgs(This,moduleID,funcDef,classId,cTypeArgs,typeArgs,pFunctionID) \ ( (This)->lpVtbl -> GetFunctionFromTokenAndTypeArgs(This,moduleID,funcDef,classId,cTypeArgs,typeArgs,pFunctionID) ) #define ICorProfilerInfo12_EnumModuleFrozenObjects(This,moduleID,ppEnum) \ ( (This)->lpVtbl -> EnumModuleFrozenObjects(This,moduleID,ppEnum) ) #define ICorProfilerInfo12_GetArrayObjectInfo(This,objectId,cDimensions,pDimensionSizes,pDimensionLowerBounds,ppData) \ ( (This)->lpVtbl -> GetArrayObjectInfo(This,objectId,cDimensions,pDimensionSizes,pDimensionLowerBounds,ppData) ) #define ICorProfilerInfo12_GetBoxClassLayout(This,classId,pBufferOffset) \ ( (This)->lpVtbl -> GetBoxClassLayout(This,classId,pBufferOffset) ) #define ICorProfilerInfo12_GetThreadAppDomain(This,threadId,pAppDomainId) \ ( (This)->lpVtbl -> GetThreadAppDomain(This,threadId,pAppDomainId) ) #define ICorProfilerInfo12_GetRVAStaticAddress(This,classId,fieldToken,ppAddress) \ ( (This)->lpVtbl -> GetRVAStaticAddress(This,classId,fieldToken,ppAddress) ) #define ICorProfilerInfo12_GetAppDomainStaticAddress(This,classId,fieldToken,appDomainId,ppAddress) \ ( (This)->lpVtbl -> GetAppDomainStaticAddress(This,classId,fieldToken,appDomainId,ppAddress) ) #define ICorProfilerInfo12_GetThreadStaticAddress(This,classId,fieldToken,threadId,ppAddress) \ ( (This)->lpVtbl -> GetThreadStaticAddress(This,classId,fieldToken,threadId,ppAddress) ) #define ICorProfilerInfo12_GetContextStaticAddress(This,classId,fieldToken,contextId,ppAddress) \ ( (This)->lpVtbl -> GetContextStaticAddress(This,classId,fieldToken,contextId,ppAddress) ) #define ICorProfilerInfo12_GetStaticFieldInfo(This,classId,fieldToken,pFieldInfo) \ ( (This)->lpVtbl -> GetStaticFieldInfo(This,classId,fieldToken,pFieldInfo) ) #define ICorProfilerInfo12_GetGenerationBounds(This,cObjectRanges,pcObjectRanges,ranges) \ ( (This)->lpVtbl -> GetGenerationBounds(This,cObjectRanges,pcObjectRanges,ranges) ) #define ICorProfilerInfo12_GetObjectGeneration(This,objectId,range) \ ( (This)->lpVtbl -> GetObjectGeneration(This,objectId,range) ) #define ICorProfilerInfo12_GetNotifiedExceptionClauseInfo(This,pinfo) \ ( (This)->lpVtbl -> GetNotifiedExceptionClauseInfo(This,pinfo) ) #define ICorProfilerInfo12_EnumJITedFunctions(This,ppEnum) \ ( (This)->lpVtbl -> EnumJITedFunctions(This,ppEnum) ) #define ICorProfilerInfo12_RequestProfilerDetach(This,dwExpectedCompletionMilliseconds) \ ( (This)->lpVtbl -> RequestProfilerDetach(This,dwExpectedCompletionMilliseconds) ) #define ICorProfilerInfo12_SetFunctionIDMapper2(This,pFunc,clientData) \ ( (This)->lpVtbl -> SetFunctionIDMapper2(This,pFunc,clientData) ) #define ICorProfilerInfo12_GetStringLayout2(This,pStringLengthOffset,pBufferOffset) \ ( (This)->lpVtbl -> GetStringLayout2(This,pStringLengthOffset,pBufferOffset) ) #define ICorProfilerInfo12_SetEnterLeaveFunctionHooks3(This,pFuncEnter3,pFuncLeave3,pFuncTailcall3) \ ( (This)->lpVtbl -> SetEnterLeaveFunctionHooks3(This,pFuncEnter3,pFuncLeave3,pFuncTailcall3) ) #define ICorProfilerInfo12_SetEnterLeaveFunctionHooks3WithInfo(This,pFuncEnter3WithInfo,pFuncLeave3WithInfo,pFuncTailcall3WithInfo) \ ( (This)->lpVtbl -> SetEnterLeaveFunctionHooks3WithInfo(This,pFuncEnter3WithInfo,pFuncLeave3WithInfo,pFuncTailcall3WithInfo) ) #define ICorProfilerInfo12_GetFunctionEnter3Info(This,functionId,eltInfo,pFrameInfo,pcbArgumentInfo,pArgumentInfo) \ ( (This)->lpVtbl -> GetFunctionEnter3Info(This,functionId,eltInfo,pFrameInfo,pcbArgumentInfo,pArgumentInfo) ) #define ICorProfilerInfo12_GetFunctionLeave3Info(This,functionId,eltInfo,pFrameInfo,pRetvalRange) \ ( (This)->lpVtbl -> GetFunctionLeave3Info(This,functionId,eltInfo,pFrameInfo,pRetvalRange) ) #define ICorProfilerInfo12_GetFunctionTailcall3Info(This,functionId,eltInfo,pFrameInfo) \ ( (This)->lpVtbl -> GetFunctionTailcall3Info(This,functionId,eltInfo,pFrameInfo) ) #define ICorProfilerInfo12_EnumModules(This,ppEnum) \ ( (This)->lpVtbl -> EnumModules(This,ppEnum) ) #define ICorProfilerInfo12_GetRuntimeInformation(This,pClrInstanceId,pRuntimeType,pMajorVersion,pMinorVersion,pBuildNumber,pQFEVersion,cchVersionString,pcchVersionString,szVersionString) \ ( (This)->lpVtbl -> GetRuntimeInformation(This,pClrInstanceId,pRuntimeType,pMajorVersion,pMinorVersion,pBuildNumber,pQFEVersion,cchVersionString,pcchVersionString,szVersionString) ) #define ICorProfilerInfo12_GetThreadStaticAddress2(This,classId,fieldToken,appDomainId,threadId,ppAddress) \ ( (This)->lpVtbl -> GetThreadStaticAddress2(This,classId,fieldToken,appDomainId,threadId,ppAddress) ) #define ICorProfilerInfo12_GetAppDomainsContainingModule(This,moduleId,cAppDomainIds,pcAppDomainIds,appDomainIds) \ ( (This)->lpVtbl -> GetAppDomainsContainingModule(This,moduleId,cAppDomainIds,pcAppDomainIds,appDomainIds) ) #define ICorProfilerInfo12_GetModuleInfo2(This,moduleId,ppBaseLoadAddress,cchName,pcchName,szName,pAssemblyId,pdwModuleFlags) \ ( (This)->lpVtbl -> GetModuleInfo2(This,moduleId,ppBaseLoadAddress,cchName,pcchName,szName,pAssemblyId,pdwModuleFlags) ) #define ICorProfilerInfo12_EnumThreads(This,ppEnum) \ ( (This)->lpVtbl -> EnumThreads(This,ppEnum) ) #define ICorProfilerInfo12_InitializeCurrentThread(This) \ ( (This)->lpVtbl -> InitializeCurrentThread(This) ) #define ICorProfilerInfo12_RequestReJIT(This,cFunctions,moduleIds,methodIds) \ ( (This)->lpVtbl -> RequestReJIT(This,cFunctions,moduleIds,methodIds) ) #define ICorProfilerInfo12_RequestRevert(This,cFunctions,moduleIds,methodIds,status) \ ( (This)->lpVtbl -> RequestRevert(This,cFunctions,moduleIds,methodIds,status) ) #define ICorProfilerInfo12_GetCodeInfo3(This,functionID,reJitId,cCodeInfos,pcCodeInfos,codeInfos) \ ( (This)->lpVtbl -> GetCodeInfo3(This,functionID,reJitId,cCodeInfos,pcCodeInfos,codeInfos) ) #define ICorProfilerInfo12_GetFunctionFromIP2(This,ip,pFunctionId,pReJitId) \ ( (This)->lpVtbl -> GetFunctionFromIP2(This,ip,pFunctionId,pReJitId) ) #define ICorProfilerInfo12_GetReJITIDs(This,functionId,cReJitIds,pcReJitIds,reJitIds) \ ( (This)->lpVtbl -> GetReJITIDs(This,functionId,cReJitIds,pcReJitIds,reJitIds) ) #define ICorProfilerInfo12_GetILToNativeMapping2(This,functionId,reJitId,cMap,pcMap,map) \ ( (This)->lpVtbl -> GetILToNativeMapping2(This,functionId,reJitId,cMap,pcMap,map) ) #define ICorProfilerInfo12_EnumJITedFunctions2(This,ppEnum) \ ( (This)->lpVtbl -> EnumJITedFunctions2(This,ppEnum) ) #define ICorProfilerInfo12_GetObjectSize2(This,objectId,pcSize) \ ( (This)->lpVtbl -> GetObjectSize2(This,objectId,pcSize) ) #define ICorProfilerInfo12_GetEventMask2(This,pdwEventsLow,pdwEventsHigh) \ ( (This)->lpVtbl -> GetEventMask2(This,pdwEventsLow,pdwEventsHigh) ) #define ICorProfilerInfo12_SetEventMask2(This,dwEventsLow,dwEventsHigh) \ ( (This)->lpVtbl -> SetEventMask2(This,dwEventsLow,dwEventsHigh) ) #define ICorProfilerInfo12_EnumNgenModuleMethodsInliningThisMethod(This,inlinersModuleId,inlineeModuleId,inlineeMethodId,incompleteData,ppEnum) \ ( (This)->lpVtbl -> EnumNgenModuleMethodsInliningThisMethod(This,inlinersModuleId,inlineeModuleId,inlineeMethodId,incompleteData,ppEnum) ) #define ICorProfilerInfo12_ApplyMetaData(This,moduleId) \ ( (This)->lpVtbl -> ApplyMetaData(This,moduleId) ) #define ICorProfilerInfo12_GetInMemorySymbolsLength(This,moduleId,pCountSymbolBytes) \ ( (This)->lpVtbl -> GetInMemorySymbolsLength(This,moduleId,pCountSymbolBytes) ) #define ICorProfilerInfo12_ReadInMemorySymbols(This,moduleId,symbolsReadOffset,pSymbolBytes,countSymbolBytes,pCountSymbolBytesRead) \ ( (This)->lpVtbl -> ReadInMemorySymbols(This,moduleId,symbolsReadOffset,pSymbolBytes,countSymbolBytes,pCountSymbolBytesRead) ) #define ICorProfilerInfo12_IsFunctionDynamic(This,functionId,isDynamic) \ ( (This)->lpVtbl -> IsFunctionDynamic(This,functionId,isDynamic) ) #define ICorProfilerInfo12_GetFunctionFromIP3(This,ip,functionId,pReJitId) \ ( (This)->lpVtbl -> GetFunctionFromIP3(This,ip,functionId,pReJitId) ) #define ICorProfilerInfo12_GetDynamicFunctionInfo(This,functionId,moduleId,ppvSig,pbSig,cchName,pcchName,wszName) \ ( (This)->lpVtbl -> GetDynamicFunctionInfo(This,functionId,moduleId,ppvSig,pbSig,cchName,pcchName,wszName) ) #define ICorProfilerInfo12_GetNativeCodeStartAddresses(This,functionID,reJitId,cCodeStartAddresses,pcCodeStartAddresses,codeStartAddresses) \ ( (This)->lpVtbl -> GetNativeCodeStartAddresses(This,functionID,reJitId,cCodeStartAddresses,pcCodeStartAddresses,codeStartAddresses) ) #define ICorProfilerInfo12_GetILToNativeMapping3(This,pNativeCodeStartAddress,cMap,pcMap,map) \ ( (This)->lpVtbl -> GetILToNativeMapping3(This,pNativeCodeStartAddress,cMap,pcMap,map) ) #define ICorProfilerInfo12_GetCodeInfo4(This,pNativeCodeStartAddress,cCodeInfos,pcCodeInfos,codeInfos) \ ( (This)->lpVtbl -> GetCodeInfo4(This,pNativeCodeStartAddress,cCodeInfos,pcCodeInfos,codeInfos) ) #define ICorProfilerInfo12_EnumerateObjectReferences(This,objectId,callback,clientData) \ ( (This)->lpVtbl -> EnumerateObjectReferences(This,objectId,callback,clientData) ) #define ICorProfilerInfo12_IsFrozenObject(This,objectId,pbFrozen) \ ( (This)->lpVtbl -> IsFrozenObject(This,objectId,pbFrozen) ) #define ICorProfilerInfo12_GetLOHObjectSizeThreshold(This,pThreshold) \ ( (This)->lpVtbl -> GetLOHObjectSizeThreshold(This,pThreshold) ) #define ICorProfilerInfo12_RequestReJITWithInliners(This,dwRejitFlags,cFunctions,moduleIds,methodIds) \ ( (This)->lpVtbl -> RequestReJITWithInliners(This,dwRejitFlags,cFunctions,moduleIds,methodIds) ) #define ICorProfilerInfo12_SuspendRuntime(This) \ ( (This)->lpVtbl -> SuspendRuntime(This) ) #define ICorProfilerInfo12_ResumeRuntime(This) \ ( (This)->lpVtbl -> ResumeRuntime(This) ) #define ICorProfilerInfo12_GetEnvironmentVariable(This,szName,cchValue,pcchValue,szValue) \ ( (This)->lpVtbl -> GetEnvironmentVariable(This,szName,cchValue,pcchValue,szValue) ) #define ICorProfilerInfo12_SetEnvironmentVariable(This,szName,szValue) \ ( (This)->lpVtbl -> SetEnvironmentVariable(This,szName,szValue) ) #define ICorProfilerInfo12_EventPipeStartSession(This,cProviderConfigs,pProviderConfigs,requestRundown,pSession) \ ( (This)->lpVtbl -> EventPipeStartSession(This,cProviderConfigs,pProviderConfigs,requestRundown,pSession) ) #define ICorProfilerInfo12_EventPipeAddProviderToSession(This,session,providerConfig) \ ( (This)->lpVtbl -> EventPipeAddProviderToSession(This,session,providerConfig) ) #define ICorProfilerInfo12_EventPipeStopSession(This,session) \ ( (This)->lpVtbl -> EventPipeStopSession(This,session) ) #define ICorProfilerInfo12_EventPipeCreateProvider(This,providerName,pProvider) \ ( (This)->lpVtbl -> EventPipeCreateProvider(This,providerName,pProvider) ) #define ICorProfilerInfo12_EventPipeGetProviderInfo(This,provider,cchName,pcchName,providerName) \ ( (This)->lpVtbl -> EventPipeGetProviderInfo(This,provider,cchName,pcchName,providerName) ) #define ICorProfilerInfo12_EventPipeDefineEvent(This,provider,eventName,eventID,keywords,eventVersion,level,opcode,needStack,cParamDescs,pParamDescs,pEvent) \ ( (This)->lpVtbl -> EventPipeDefineEvent(This,provider,eventName,eventID,keywords,eventVersion,level,opcode,needStack,cParamDescs,pParamDescs,pEvent) ) #define ICorProfilerInfo12_EventPipeWriteEvent(This,event,cData,data,pActivityId,pRelatedActivityId) \ ( (This)->lpVtbl -> EventPipeWriteEvent(This,event,cData,data,pActivityId,pRelatedActivityId) ) #endif /* COBJMACROS */ #endif /* C style interface */ #endif /* __ICorProfilerInfo12_INTERFACE_DEFINED__ */ #ifndef __ICorProfilerMethodEnum_INTERFACE_DEFINED__ #define __ICorProfilerMethodEnum_INTERFACE_DEFINED__ /* interface ICorProfilerMethodEnum */ /* [local][unique][uuid][object] */ EXTERN_C const IID IID_ICorProfilerMethodEnum; #if defined(__cplusplus) && !defined(CINTERFACE) MIDL_INTERFACE("FCCEE788-0088-454B-A811-C99F298D1942") ICorProfilerMethodEnum : public IUnknown { public: virtual HRESULT STDMETHODCALLTYPE Skip( /* [in] */ ULONG celt) = 0; virtual HRESULT STDMETHODCALLTYPE Reset( void) = 0; virtual HRESULT STDMETHODCALLTYPE Clone( /* [out] */ ICorProfilerMethodEnum **ppEnum) = 0; virtual HRESULT STDMETHODCALLTYPE GetCount( /* [out] */ ULONG *pcelt) = 0; virtual HRESULT STDMETHODCALLTYPE Next( /* [in] */ ULONG celt, /* [length_is][size_is][out] */ COR_PRF_METHOD elements[ ], /* [out] */ ULONG *pceltFetched) = 0; }; #else /* C style interface */ typedef struct ICorProfilerMethodEnumVtbl { BEGIN_INTERFACE HRESULT ( STDMETHODCALLTYPE *QueryInterface )( ICorProfilerMethodEnum * This, /* [in] */ REFIID riid, /* [annotation][iid_is][out] */ _COM_Outptr_ void **ppvObject); ULONG ( STDMETHODCALLTYPE *AddRef )( ICorProfilerMethodEnum * This); ULONG ( STDMETHODCALLTYPE *Release )( ICorProfilerMethodEnum * This); HRESULT ( STDMETHODCALLTYPE *Skip )( ICorProfilerMethodEnum * This, /* [in] */ ULONG celt); HRESULT ( STDMETHODCALLTYPE *Reset )( ICorProfilerMethodEnum * This); HRESULT ( STDMETHODCALLTYPE *Clone )( ICorProfilerMethodEnum * This, /* [out] */ ICorProfilerMethodEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *GetCount )( ICorProfilerMethodEnum * This, /* [out] */ ULONG *pcelt); HRESULT ( STDMETHODCALLTYPE *Next )( ICorProfilerMethodEnum * This, /* [in] */ ULONG celt, /* [length_is][size_is][out] */ COR_PRF_METHOD elements[ ], /* [out] */ ULONG *pceltFetched); END_INTERFACE } ICorProfilerMethodEnumVtbl; interface ICorProfilerMethodEnum { CONST_VTBL struct ICorProfilerMethodEnumVtbl *lpVtbl; }; #ifdef COBJMACROS #define ICorProfilerMethodEnum_QueryInterface(This,riid,ppvObject) \ ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) ) #define ICorProfilerMethodEnum_AddRef(This) \ ( (This)->lpVtbl -> AddRef(This) ) #define ICorProfilerMethodEnum_Release(This) \ ( (This)->lpVtbl -> Release(This) ) #define ICorProfilerMethodEnum_Skip(This,celt) \ ( (This)->lpVtbl -> Skip(This,celt) ) #define ICorProfilerMethodEnum_Reset(This) \ ( (This)->lpVtbl -> Reset(This) ) #define ICorProfilerMethodEnum_Clone(This,ppEnum) \ ( (This)->lpVtbl -> Clone(This,ppEnum) ) #define ICorProfilerMethodEnum_GetCount(This,pcelt) \ ( (This)->lpVtbl -> GetCount(This,pcelt) ) #define ICorProfilerMethodEnum_Next(This,celt,elements,pceltFetched) \ ( (This)->lpVtbl -> Next(This,celt,elements,pceltFetched) ) #endif /* COBJMACROS */ #endif /* C style interface */ #endif /* __ICorProfilerMethodEnum_INTERFACE_DEFINED__ */ #ifndef __ICorProfilerThreadEnum_INTERFACE_DEFINED__ #define __ICorProfilerThreadEnum_INTERFACE_DEFINED__ /* interface ICorProfilerThreadEnum */ /* [local][unique][uuid][object] */ EXTERN_C const IID IID_ICorProfilerThreadEnum; #if defined(__cplusplus) && !defined(CINTERFACE) MIDL_INTERFACE("571194f7-25ed-419f-aa8b-7016b3159701") ICorProfilerThreadEnum : public IUnknown { public: virtual HRESULT STDMETHODCALLTYPE Skip( /* [in] */ ULONG celt) = 0; virtual HRESULT STDMETHODCALLTYPE Reset( void) = 0; virtual HRESULT STDMETHODCALLTYPE Clone( /* [out] */ ICorProfilerThreadEnum **ppEnum) = 0; virtual HRESULT STDMETHODCALLTYPE GetCount( /* [out] */ ULONG *pcelt) = 0; virtual HRESULT STDMETHODCALLTYPE Next( /* [in] */ ULONG celt, /* [length_is][size_is][out] */ ThreadID ids[ ], /* [out] */ ULONG *pceltFetched) = 0; }; #else /* C style interface */ typedef struct ICorProfilerThreadEnumVtbl { BEGIN_INTERFACE HRESULT ( STDMETHODCALLTYPE *QueryInterface )( ICorProfilerThreadEnum * This, /* [in] */ REFIID riid, /* [annotation][iid_is][out] */ _COM_Outptr_ void **ppvObject); ULONG ( STDMETHODCALLTYPE *AddRef )( ICorProfilerThreadEnum * This); ULONG ( STDMETHODCALLTYPE *Release )( ICorProfilerThreadEnum * This); HRESULT ( STDMETHODCALLTYPE *Skip )( ICorProfilerThreadEnum * This, /* [in] */ ULONG celt); HRESULT ( STDMETHODCALLTYPE *Reset )( ICorProfilerThreadEnum * This); HRESULT ( STDMETHODCALLTYPE *Clone )( ICorProfilerThreadEnum * This, /* [out] */ ICorProfilerThreadEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *GetCount )( ICorProfilerThreadEnum * This, /* [out] */ ULONG *pcelt); HRESULT ( STDMETHODCALLTYPE *Next )( ICorProfilerThreadEnum * This, /* [in] */ ULONG celt, /* [length_is][size_is][out] */ ThreadID ids[ ], /* [out] */ ULONG *pceltFetched); END_INTERFACE } ICorProfilerThreadEnumVtbl; interface ICorProfilerThreadEnum { CONST_VTBL struct ICorProfilerThreadEnumVtbl *lpVtbl; }; #ifdef COBJMACROS #define ICorProfilerThreadEnum_QueryInterface(This,riid,ppvObject) \ ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) ) #define ICorProfilerThreadEnum_AddRef(This) \ ( (This)->lpVtbl -> AddRef(This) ) #define ICorProfilerThreadEnum_Release(This) \ ( (This)->lpVtbl -> Release(This) ) #define ICorProfilerThreadEnum_Skip(This,celt) \ ( (This)->lpVtbl -> Skip(This,celt) ) #define ICorProfilerThreadEnum_Reset(This) \ ( (This)->lpVtbl -> Reset(This) ) #define ICorProfilerThreadEnum_Clone(This,ppEnum) \ ( (This)->lpVtbl -> Clone(This,ppEnum) ) #define ICorProfilerThreadEnum_GetCount(This,pcelt) \ ( (This)->lpVtbl -> GetCount(This,pcelt) ) #define ICorProfilerThreadEnum_Next(This,celt,ids,pceltFetched) \ ( (This)->lpVtbl -> Next(This,celt,ids,pceltFetched) ) #endif /* COBJMACROS */ #endif /* C style interface */ #endif /* __ICorProfilerThreadEnum_INTERFACE_DEFINED__ */ #ifndef __ICorProfilerAssemblyReferenceProvider_INTERFACE_DEFINED__ #define __ICorProfilerAssemblyReferenceProvider_INTERFACE_DEFINED__ /* interface ICorProfilerAssemblyReferenceProvider */ /* [local][unique][uuid][object] */ EXTERN_C const IID IID_ICorProfilerAssemblyReferenceProvider; #if defined(__cplusplus) && !defined(CINTERFACE) MIDL_INTERFACE("66A78C24-2EEF-4F65-B45F-DD1D8038BF3C") ICorProfilerAssemblyReferenceProvider : public IUnknown { public: virtual HRESULT STDMETHODCALLTYPE AddAssemblyReference( const COR_PRF_ASSEMBLY_REFERENCE_INFO *pAssemblyRefInfo) = 0; }; #else /* C style interface */ typedef struct ICorProfilerAssemblyReferenceProviderVtbl { BEGIN_INTERFACE HRESULT ( STDMETHODCALLTYPE *QueryInterface )( ICorProfilerAssemblyReferenceProvider * This, /* [in] */ REFIID riid, /* [annotation][iid_is][out] */ _COM_Outptr_ void **ppvObject); ULONG ( STDMETHODCALLTYPE *AddRef )( ICorProfilerAssemblyReferenceProvider * This); ULONG ( STDMETHODCALLTYPE *Release )( ICorProfilerAssemblyReferenceProvider * This); HRESULT ( STDMETHODCALLTYPE *AddAssemblyReference )( ICorProfilerAssemblyReferenceProvider * This, const COR_PRF_ASSEMBLY_REFERENCE_INFO *pAssemblyRefInfo); END_INTERFACE } ICorProfilerAssemblyReferenceProviderVtbl; interface ICorProfilerAssemblyReferenceProvider { CONST_VTBL struct ICorProfilerAssemblyReferenceProviderVtbl *lpVtbl; }; #ifdef COBJMACROS #define ICorProfilerAssemblyReferenceProvider_QueryInterface(This,riid,ppvObject) \ ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) ) #define ICorProfilerAssemblyReferenceProvider_AddRef(This) \ ( (This)->lpVtbl -> AddRef(This) ) #define ICorProfilerAssemblyReferenceProvider_Release(This) \ ( (This)->lpVtbl -> Release(This) ) #define ICorProfilerAssemblyReferenceProvider_AddAssemblyReference(This,pAssemblyRefInfo) \ ( (This)->lpVtbl -> AddAssemblyReference(This,pAssemblyRefInfo) ) #endif /* COBJMACROS */ #endif /* C style interface */ #endif /* __ICorProfilerAssemblyReferenceProvider_INTERFACE_DEFINED__ */ /* Additional Prototypes for ALL interfaces */ /* end of Additional Prototypes */ #ifdef __cplusplus } #endif #endif
/* this ALWAYS GENERATED file contains the definitions for the interfaces */ /* File created by MIDL compiler version 8.01.0622 */ /* Compiler settings for corprof.idl: Oicf, W1, Zp8, env=Win64 (32b run), target_arch=AMD64 8.01.0622 protocol : dce , ms_ext, c_ext, robust error checks: allocation ref bounds_check enum stub_data VC __declspec() decoration level: __declspec(uuid()), __declspec(selectany), __declspec(novtable) DECLSPEC_UUID(), MIDL_INTERFACE() */ /* @@MIDL_FILE_HEADING( ) */ #pragma warning( disable: 4049 ) /* more than 64k source lines */ /* verify that the <rpcndr.h> version is high enough to compile this file*/ #ifndef __REQUIRED_RPCNDR_H_VERSION__ #define __REQUIRED_RPCNDR_H_VERSION__ 475 #endif #include "rpc.h" #include "rpcndr.h" #ifndef __RPCNDR_H_VERSION__ #error this stub requires an updated version of <rpcndr.h> #endif /* __RPCNDR_H_VERSION__ */ #ifndef COM_NO_WINDOWS_H #include "windows.h" #include "ole2.h" #endif /*COM_NO_WINDOWS_H*/ #ifndef __corprof_h__ #define __corprof_h__ #if defined(_MSC_VER) && (_MSC_VER >= 1020) #pragma once #endif /* Forward Declarations */ #ifndef __ICorProfilerCallback_FWD_DEFINED__ #define __ICorProfilerCallback_FWD_DEFINED__ typedef interface ICorProfilerCallback ICorProfilerCallback; #endif /* __ICorProfilerCallback_FWD_DEFINED__ */ #ifndef __ICorProfilerCallback2_FWD_DEFINED__ #define __ICorProfilerCallback2_FWD_DEFINED__ typedef interface ICorProfilerCallback2 ICorProfilerCallback2; #endif /* __ICorProfilerCallback2_FWD_DEFINED__ */ #ifndef __ICorProfilerCallback3_FWD_DEFINED__ #define __ICorProfilerCallback3_FWD_DEFINED__ typedef interface ICorProfilerCallback3 ICorProfilerCallback3; #endif /* __ICorProfilerCallback3_FWD_DEFINED__ */ #ifndef __ICorProfilerCallback4_FWD_DEFINED__ #define __ICorProfilerCallback4_FWD_DEFINED__ typedef interface ICorProfilerCallback4 ICorProfilerCallback4; #endif /* __ICorProfilerCallback4_FWD_DEFINED__ */ #ifndef __ICorProfilerCallback5_FWD_DEFINED__ #define __ICorProfilerCallback5_FWD_DEFINED__ typedef interface ICorProfilerCallback5 ICorProfilerCallback5; #endif /* __ICorProfilerCallback5_FWD_DEFINED__ */ #ifndef __ICorProfilerCallback6_FWD_DEFINED__ #define __ICorProfilerCallback6_FWD_DEFINED__ typedef interface ICorProfilerCallback6 ICorProfilerCallback6; #endif /* __ICorProfilerCallback6_FWD_DEFINED__ */ #ifndef __ICorProfilerCallback7_FWD_DEFINED__ #define __ICorProfilerCallback7_FWD_DEFINED__ typedef interface ICorProfilerCallback7 ICorProfilerCallback7; #endif /* __ICorProfilerCallback7_FWD_DEFINED__ */ #ifndef __ICorProfilerCallback8_FWD_DEFINED__ #define __ICorProfilerCallback8_FWD_DEFINED__ typedef interface ICorProfilerCallback8 ICorProfilerCallback8; #endif /* __ICorProfilerCallback8_FWD_DEFINED__ */ #ifndef __ICorProfilerCallback9_FWD_DEFINED__ #define __ICorProfilerCallback9_FWD_DEFINED__ typedef interface ICorProfilerCallback9 ICorProfilerCallback9; #endif /* __ICorProfilerCallback9_FWD_DEFINED__ */ #ifndef __ICorProfilerCallback10_FWD_DEFINED__ #define __ICorProfilerCallback10_FWD_DEFINED__ typedef interface ICorProfilerCallback10 ICorProfilerCallback10; #endif /* __ICorProfilerCallback10_FWD_DEFINED__ */ #ifndef __ICorProfilerCallback11_FWD_DEFINED__ #define __ICorProfilerCallback11_FWD_DEFINED__ typedef interface ICorProfilerCallback11 ICorProfilerCallback11; #endif /* __ICorProfilerCallback11_FWD_DEFINED__ */ #ifndef __ICorProfilerInfo_FWD_DEFINED__ #define __ICorProfilerInfo_FWD_DEFINED__ typedef interface ICorProfilerInfo ICorProfilerInfo; #endif /* __ICorProfilerInfo_FWD_DEFINED__ */ #ifndef __ICorProfilerInfo2_FWD_DEFINED__ #define __ICorProfilerInfo2_FWD_DEFINED__ typedef interface ICorProfilerInfo2 ICorProfilerInfo2; #endif /* __ICorProfilerInfo2_FWD_DEFINED__ */ #ifndef __ICorProfilerInfo3_FWD_DEFINED__ #define __ICorProfilerInfo3_FWD_DEFINED__ typedef interface ICorProfilerInfo3 ICorProfilerInfo3; #endif /* __ICorProfilerInfo3_FWD_DEFINED__ */ #ifndef __ICorProfilerObjectEnum_FWD_DEFINED__ #define __ICorProfilerObjectEnum_FWD_DEFINED__ typedef interface ICorProfilerObjectEnum ICorProfilerObjectEnum; #endif /* __ICorProfilerObjectEnum_FWD_DEFINED__ */ #ifndef __ICorProfilerFunctionEnum_FWD_DEFINED__ #define __ICorProfilerFunctionEnum_FWD_DEFINED__ typedef interface ICorProfilerFunctionEnum ICorProfilerFunctionEnum; #endif /* __ICorProfilerFunctionEnum_FWD_DEFINED__ */ #ifndef __ICorProfilerModuleEnum_FWD_DEFINED__ #define __ICorProfilerModuleEnum_FWD_DEFINED__ typedef interface ICorProfilerModuleEnum ICorProfilerModuleEnum; #endif /* __ICorProfilerModuleEnum_FWD_DEFINED__ */ #ifndef __IMethodMalloc_FWD_DEFINED__ #define __IMethodMalloc_FWD_DEFINED__ typedef interface IMethodMalloc IMethodMalloc; #endif /* __IMethodMalloc_FWD_DEFINED__ */ #ifndef __ICorProfilerFunctionControl_FWD_DEFINED__ #define __ICorProfilerFunctionControl_FWD_DEFINED__ typedef interface ICorProfilerFunctionControl ICorProfilerFunctionControl; #endif /* __ICorProfilerFunctionControl_FWD_DEFINED__ */ #ifndef __ICorProfilerInfo4_FWD_DEFINED__ #define __ICorProfilerInfo4_FWD_DEFINED__ typedef interface ICorProfilerInfo4 ICorProfilerInfo4; #endif /* __ICorProfilerInfo4_FWD_DEFINED__ */ #ifndef __ICorProfilerInfo5_FWD_DEFINED__ #define __ICorProfilerInfo5_FWD_DEFINED__ typedef interface ICorProfilerInfo5 ICorProfilerInfo5; #endif /* __ICorProfilerInfo5_FWD_DEFINED__ */ #ifndef __ICorProfilerInfo6_FWD_DEFINED__ #define __ICorProfilerInfo6_FWD_DEFINED__ typedef interface ICorProfilerInfo6 ICorProfilerInfo6; #endif /* __ICorProfilerInfo6_FWD_DEFINED__ */ #ifndef __ICorProfilerInfo7_FWD_DEFINED__ #define __ICorProfilerInfo7_FWD_DEFINED__ typedef interface ICorProfilerInfo7 ICorProfilerInfo7; #endif /* __ICorProfilerInfo7_FWD_DEFINED__ */ #ifndef __ICorProfilerInfo8_FWD_DEFINED__ #define __ICorProfilerInfo8_FWD_DEFINED__ typedef interface ICorProfilerInfo8 ICorProfilerInfo8; #endif /* __ICorProfilerInfo8_FWD_DEFINED__ */ #ifndef __ICorProfilerInfo9_FWD_DEFINED__ #define __ICorProfilerInfo9_FWD_DEFINED__ typedef interface ICorProfilerInfo9 ICorProfilerInfo9; #endif /* __ICorProfilerInfo9_FWD_DEFINED__ */ #ifndef __ICorProfilerInfo10_FWD_DEFINED__ #define __ICorProfilerInfo10_FWD_DEFINED__ typedef interface ICorProfilerInfo10 ICorProfilerInfo10; #endif /* __ICorProfilerInfo10_FWD_DEFINED__ */ #ifndef __ICorProfilerInfo11_FWD_DEFINED__ #define __ICorProfilerInfo11_FWD_DEFINED__ typedef interface ICorProfilerInfo11 ICorProfilerInfo11; #endif /* __ICorProfilerInfo11_FWD_DEFINED__ */ #ifndef __ICorProfilerInfo12_FWD_DEFINED__ #define __ICorProfilerInfo12_FWD_DEFINED__ typedef interface ICorProfilerInfo12 ICorProfilerInfo12; #endif /* __ICorProfilerInfo12_FWD_DEFINED__ */ #ifndef __ICorProfilerMethodEnum_FWD_DEFINED__ #define __ICorProfilerMethodEnum_FWD_DEFINED__ typedef interface ICorProfilerMethodEnum ICorProfilerMethodEnum; #endif /* __ICorProfilerMethodEnum_FWD_DEFINED__ */ #ifndef __ICorProfilerThreadEnum_FWD_DEFINED__ #define __ICorProfilerThreadEnum_FWD_DEFINED__ typedef interface ICorProfilerThreadEnum ICorProfilerThreadEnum; #endif /* __ICorProfilerThreadEnum_FWD_DEFINED__ */ #ifndef __ICorProfilerAssemblyReferenceProvider_FWD_DEFINED__ #define __ICorProfilerAssemblyReferenceProvider_FWD_DEFINED__ typedef interface ICorProfilerAssemblyReferenceProvider ICorProfilerAssemblyReferenceProvider; #endif /* __ICorProfilerAssemblyReferenceProvider_FWD_DEFINED__ */ /* header files for imported files */ #include "unknwn.h" #ifdef __cplusplus extern "C"{ #endif /* interface __MIDL_itf_corprof_0000_0000 */ /* [local] */ #define CorDB_CONTROL_Profiling "Cor_Enable_Profiling" #define CorDB_CONTROL_ProfilingL L"Cor_Enable_Profiling" #if 0 typedef LONG32 mdToken; typedef mdToken mdModule; typedef mdToken mdTypeDef; typedef mdToken mdMethodDef; typedef mdToken mdFieldDef; typedef ULONG CorElementType; typedef /* [public][public][public][public] */ struct __MIDL___MIDL_itf_corprof_0000_0000_0001 { DWORD dwOSPlatformId; DWORD dwOSMajorVersion; DWORD dwOSMinorVersion; } OSINFO; typedef /* [public][public][public] */ struct __MIDL___MIDL_itf_corprof_0000_0000_0002 { USHORT usMajorVersion; USHORT usMinorVersion; USHORT usBuildNumber; USHORT usRevisionNumber; LPWSTR szLocale; ULONG cbLocale; DWORD *rProcessor; ULONG ulProcessor; OSINFO *rOS; ULONG ulOS; } ASSEMBLYMETADATA; #endif typedef const BYTE *LPCBYTE; typedef BYTE *LPBYTE; typedef BYTE COR_SIGNATURE; typedef COR_SIGNATURE *PCOR_SIGNATURE; typedef const COR_SIGNATURE *PCCOR_SIGNATURE; #ifndef _COR_IL_MAP #define _COR_IL_MAP typedef struct _COR_IL_MAP { ULONG32 oldOffset; ULONG32 newOffset; BOOL fAccurate; } COR_IL_MAP; #endif //_COR_IL_MAP #ifndef _COR_DEBUG_IL_TO_NATIVE_MAP_ #define _COR_DEBUG_IL_TO_NATIVE_MAP_ typedef enum CorDebugIlToNativeMappingTypes { NO_MAPPING = -1, PROLOG = -2, EPILOG = -3 } CorDebugIlToNativeMappingTypes; typedef struct COR_DEBUG_IL_TO_NATIVE_MAP { ULONG32 ilOffset; ULONG32 nativeStartOffset; ULONG32 nativeEndOffset; } COR_DEBUG_IL_TO_NATIVE_MAP; #endif // _COR_DEBUG_IL_TO_NATIVE_MAP_ #ifndef _COR_FIELD_OFFSET_ #define _COR_FIELD_OFFSET_ typedef struct _COR_FIELD_OFFSET { mdFieldDef ridOfField; ULONG ulOffset; } COR_FIELD_OFFSET; #endif // _COR_FIELD_OFFSET_ typedef UINT_PTR ProcessID; typedef UINT_PTR AssemblyID; typedef UINT_PTR AppDomainID; typedef UINT_PTR ModuleID; typedef UINT_PTR ClassID; typedef UINT_PTR ThreadID; typedef UINT_PTR ContextID; typedef UINT_PTR FunctionID; typedef UINT_PTR ObjectID; typedef UINT_PTR GCHandleID; typedef UINT_PTR COR_PRF_ELT_INFO; typedef UINT_PTR ReJITID; typedef /* [public][public][public][public][public][public][public][public][public][public][public][public][public] */ union __MIDL___MIDL_itf_corprof_0000_0000_0003 { FunctionID functionID; UINT_PTR clientID; } FunctionIDOrClientID; typedef UINT_PTR __stdcall __stdcall FunctionIDMapper( FunctionID funcId, BOOL *pbHookFunction); typedef UINT_PTR __stdcall __stdcall FunctionIDMapper2( FunctionID funcId, void *clientData, BOOL *pbHookFunction); typedef enum _COR_PRF_SNAPSHOT_INFO { COR_PRF_SNAPSHOT_DEFAULT = 0, COR_PRF_SNAPSHOT_REGISTER_CONTEXT = 0x1, COR_PRF_SNAPSHOT_X86_OPTIMIZED = 0x2 } COR_PRF_SNAPSHOT_INFO; typedef UINT_PTR COR_PRF_FRAME_INFO; typedef struct _COR_PRF_FUNCTION_ARGUMENT_RANGE { UINT_PTR startAddress; ULONG length; } COR_PRF_FUNCTION_ARGUMENT_RANGE; typedef struct _COR_PRF_FUNCTION_ARGUMENT_INFO { ULONG numRanges; ULONG totalArgumentSize; COR_PRF_FUNCTION_ARGUMENT_RANGE ranges[ 1 ]; } COR_PRF_FUNCTION_ARGUMENT_INFO; typedef struct _COR_PRF_CODE_INFO { UINT_PTR startAddress; SIZE_T size; } COR_PRF_CODE_INFO; typedef /* [public][public] */ enum __MIDL___MIDL_itf_corprof_0000_0000_0004 { COR_PRF_FIELD_NOT_A_STATIC = 0, COR_PRF_FIELD_APP_DOMAIN_STATIC = 0x1, COR_PRF_FIELD_THREAD_STATIC = 0x2, COR_PRF_FIELD_CONTEXT_STATIC = 0x4, COR_PRF_FIELD_RVA_STATIC = 0x8 } COR_PRF_STATIC_TYPE; typedef struct _COR_PRF_FUNCTION { FunctionID functionId; ReJITID reJitId; } COR_PRF_FUNCTION; typedef struct _COR_PRF_ASSEMBLY_REFERENCE_INFO { void *pbPublicKeyOrToken; ULONG cbPublicKeyOrToken; LPCWSTR szName; ASSEMBLYMETADATA *pMetaData; void *pbHashValue; ULONG cbHashValue; DWORD dwAssemblyRefFlags; } COR_PRF_ASSEMBLY_REFERENCE_INFO; typedef struct _COR_PRF_METHOD { ModuleID moduleId; mdMethodDef methodId; } COR_PRF_METHOD; typedef void FunctionEnter( FunctionID funcID); typedef void FunctionLeave( FunctionID funcID); typedef void FunctionTailcall( FunctionID funcID); typedef void FunctionEnter2( FunctionID funcId, UINT_PTR clientData, COR_PRF_FRAME_INFO func, COR_PRF_FUNCTION_ARGUMENT_INFO *argumentInfo); typedef void FunctionLeave2( FunctionID funcId, UINT_PTR clientData, COR_PRF_FRAME_INFO func, COR_PRF_FUNCTION_ARGUMENT_RANGE *retvalRange); typedef void FunctionTailcall2( FunctionID funcId, UINT_PTR clientData, COR_PRF_FRAME_INFO func); typedef void FunctionEnter3( FunctionIDOrClientID functionIDOrClientID); typedef void FunctionLeave3( FunctionIDOrClientID functionIDOrClientID); typedef void FunctionTailcall3( FunctionIDOrClientID functionIDOrClientID); typedef void FunctionEnter3WithInfo( FunctionIDOrClientID functionIDOrClientID, COR_PRF_ELT_INFO eltInfo); typedef void FunctionLeave3WithInfo( FunctionIDOrClientID functionIDOrClientID, COR_PRF_ELT_INFO eltInfo); typedef void FunctionTailcall3WithInfo( FunctionIDOrClientID functionIDOrClientID, COR_PRF_ELT_INFO eltInfo); typedef HRESULT __stdcall __stdcall StackSnapshotCallback( FunctionID funcId, UINT_PTR ip, COR_PRF_FRAME_INFO frameInfo, ULONG32 contextSize, BYTE context[ ], void *clientData); typedef BOOL ObjectReferenceCallback( ObjectID root, ObjectID *reference, void *clientData); typedef /* [public] */ enum __MIDL___MIDL_itf_corprof_0000_0000_0005 { COR_PRF_MONITOR_NONE = 0, COR_PRF_MONITOR_FUNCTION_UNLOADS = 0x1, COR_PRF_MONITOR_CLASS_LOADS = 0x2, COR_PRF_MONITOR_MODULE_LOADS = 0x4, COR_PRF_MONITOR_ASSEMBLY_LOADS = 0x8, COR_PRF_MONITOR_APPDOMAIN_LOADS = 0x10, COR_PRF_MONITOR_JIT_COMPILATION = 0x20, COR_PRF_MONITOR_EXCEPTIONS = 0x40, COR_PRF_MONITOR_GC = 0x80, COR_PRF_MONITOR_OBJECT_ALLOCATED = 0x100, COR_PRF_MONITOR_THREADS = 0x200, COR_PRF_MONITOR_REMOTING = 0x400, COR_PRF_MONITOR_CODE_TRANSITIONS = 0x800, COR_PRF_MONITOR_ENTERLEAVE = 0x1000, COR_PRF_MONITOR_CCW = 0x2000, COR_PRF_MONITOR_REMOTING_COOKIE = ( 0x4000 | COR_PRF_MONITOR_REMOTING ) , COR_PRF_MONITOR_REMOTING_ASYNC = ( 0x8000 | COR_PRF_MONITOR_REMOTING ) , COR_PRF_MONITOR_SUSPENDS = 0x10000, COR_PRF_MONITOR_CACHE_SEARCHES = 0x20000, COR_PRF_ENABLE_REJIT = 0x40000, COR_PRF_ENABLE_INPROC_DEBUGGING = 0x80000, COR_PRF_ENABLE_JIT_MAPS = 0x100000, COR_PRF_DISABLE_INLINING = 0x200000, COR_PRF_DISABLE_OPTIMIZATIONS = 0x400000, COR_PRF_ENABLE_OBJECT_ALLOCATED = 0x800000, COR_PRF_MONITOR_CLR_EXCEPTIONS = 0x1000000, COR_PRF_MONITOR_ALL = 0x107ffff, COR_PRF_ENABLE_FUNCTION_ARGS = 0x2000000, COR_PRF_ENABLE_FUNCTION_RETVAL = 0x4000000, COR_PRF_ENABLE_FRAME_INFO = 0x8000000, COR_PRF_ENABLE_STACK_SNAPSHOT = 0x10000000, COR_PRF_USE_PROFILE_IMAGES = 0x20000000, COR_PRF_DISABLE_TRANSPARENCY_CHECKS_UNDER_FULL_TRUST = 0x40000000, COR_PRF_DISABLE_ALL_NGEN_IMAGES = 0x80000000, COR_PRF_ALL = 0x8fffffff, COR_PRF_REQUIRE_PROFILE_IMAGE = ( ( COR_PRF_USE_PROFILE_IMAGES | COR_PRF_MONITOR_CODE_TRANSITIONS ) | COR_PRF_MONITOR_ENTERLEAVE ) , COR_PRF_ALLOWABLE_AFTER_ATTACH = ( ( ( ( ( ( ( ( ( ( COR_PRF_MONITOR_THREADS | COR_PRF_MONITOR_MODULE_LOADS ) | COR_PRF_MONITOR_ASSEMBLY_LOADS ) | COR_PRF_MONITOR_APPDOMAIN_LOADS ) | COR_PRF_ENABLE_STACK_SNAPSHOT ) | COR_PRF_MONITOR_GC ) | COR_PRF_MONITOR_SUSPENDS ) | COR_PRF_MONITOR_CLASS_LOADS ) | COR_PRF_MONITOR_EXCEPTIONS ) | COR_PRF_MONITOR_JIT_COMPILATION ) | COR_PRF_ENABLE_REJIT ) , COR_PRF_ALLOWABLE_NOTIFICATION_PROFILER = ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( COR_PRF_MONITOR_FUNCTION_UNLOADS | COR_PRF_MONITOR_CLASS_LOADS ) | COR_PRF_MONITOR_MODULE_LOADS ) | COR_PRF_MONITOR_ASSEMBLY_LOADS ) | COR_PRF_MONITOR_APPDOMAIN_LOADS ) | COR_PRF_MONITOR_JIT_COMPILATION ) | COR_PRF_MONITOR_EXCEPTIONS ) | COR_PRF_MONITOR_OBJECT_ALLOCATED ) | COR_PRF_MONITOR_THREADS ) | COR_PRF_MONITOR_CODE_TRANSITIONS ) | COR_PRF_MONITOR_CCW ) | COR_PRF_MONITOR_SUSPENDS ) | COR_PRF_MONITOR_CACHE_SEARCHES ) | COR_PRF_DISABLE_INLINING ) | COR_PRF_DISABLE_OPTIMIZATIONS ) | COR_PRF_ENABLE_OBJECT_ALLOCATED ) | COR_PRF_MONITOR_CLR_EXCEPTIONS ) | COR_PRF_ENABLE_STACK_SNAPSHOT ) | COR_PRF_USE_PROFILE_IMAGES ) | COR_PRF_DISABLE_ALL_NGEN_IMAGES ) , COR_PRF_MONITOR_IMMUTABLE = ( ( ( ( ( ( ( ( ( ( ( ( ( ( COR_PRF_MONITOR_CODE_TRANSITIONS | COR_PRF_MONITOR_REMOTING ) | COR_PRF_MONITOR_REMOTING_COOKIE ) | COR_PRF_MONITOR_REMOTING_ASYNC ) | COR_PRF_ENABLE_INPROC_DEBUGGING ) | COR_PRF_ENABLE_JIT_MAPS ) | COR_PRF_DISABLE_OPTIMIZATIONS ) | COR_PRF_DISABLE_INLINING ) | COR_PRF_ENABLE_OBJECT_ALLOCATED ) | COR_PRF_ENABLE_FUNCTION_ARGS ) | COR_PRF_ENABLE_FUNCTION_RETVAL ) | COR_PRF_ENABLE_FRAME_INFO ) | COR_PRF_USE_PROFILE_IMAGES ) | COR_PRF_DISABLE_TRANSPARENCY_CHECKS_UNDER_FULL_TRUST ) | COR_PRF_DISABLE_ALL_NGEN_IMAGES ) } COR_PRF_MONITOR; typedef /* [public] */ enum __MIDL___MIDL_itf_corprof_0000_0000_0006 { COR_PRF_HIGH_MONITOR_NONE = 0, COR_PRF_HIGH_ADD_ASSEMBLY_REFERENCES = 0x1, COR_PRF_HIGH_IN_MEMORY_SYMBOLS_UPDATED = 0x2, COR_PRF_HIGH_MONITOR_DYNAMIC_FUNCTION_UNLOADS = 0x4, COR_PRF_HIGH_DISABLE_TIERED_COMPILATION = 0x8, COR_PRF_HIGH_BASIC_GC = 0x10, COR_PRF_HIGH_MONITOR_GC_MOVED_OBJECTS = 0x20, COR_PRF_HIGH_REQUIRE_PROFILE_IMAGE = 0, COR_PRF_HIGH_MONITOR_LARGEOBJECT_ALLOCATED = 0x40, COR_PRF_HIGH_MONITOR_EVENT_PIPE = 0x80, COR_PRF_HIGH_MONITOR_PINNEDOBJECT_ALLOCATED = 0x100, COR_PRF_HIGH_ALLOWABLE_AFTER_ATTACH = ( ( ( ( ( COR_PRF_HIGH_IN_MEMORY_SYMBOLS_UPDATED | COR_PRF_HIGH_MONITOR_DYNAMIC_FUNCTION_UNLOADS ) | COR_PRF_HIGH_BASIC_GC ) | COR_PRF_HIGH_MONITOR_GC_MOVED_OBJECTS ) | COR_PRF_HIGH_MONITOR_LARGEOBJECT_ALLOCATED ) | COR_PRF_HIGH_MONITOR_EVENT_PIPE ) , COR_PRF_HIGH_ALLOWABLE_NOTIFICATION_PROFILER = ( ( ( ( ( ( COR_PRF_HIGH_IN_MEMORY_SYMBOLS_UPDATED | COR_PRF_HIGH_MONITOR_DYNAMIC_FUNCTION_UNLOADS ) | COR_PRF_HIGH_DISABLE_TIERED_COMPILATION ) | COR_PRF_HIGH_BASIC_GC ) | COR_PRF_HIGH_MONITOR_GC_MOVED_OBJECTS ) | COR_PRF_HIGH_MONITOR_LARGEOBJECT_ALLOCATED ) | COR_PRF_HIGH_MONITOR_EVENT_PIPE ) , COR_PRF_HIGH_MONITOR_IMMUTABLE = COR_PRF_HIGH_DISABLE_TIERED_COMPILATION } COR_PRF_HIGH_MONITOR; typedef /* [public] */ enum __MIDL___MIDL_itf_corprof_0000_0000_0007 { PROFILER_PARENT_UNKNOWN = 0xfffffffd, PROFILER_GLOBAL_CLASS = 0xfffffffe, PROFILER_GLOBAL_MODULE = 0xffffffff } COR_PRF_MISC; typedef /* [public][public] */ enum __MIDL___MIDL_itf_corprof_0000_0000_0008 { COR_PRF_CACHED_FUNCTION_FOUND = 0, COR_PRF_CACHED_FUNCTION_NOT_FOUND = ( COR_PRF_CACHED_FUNCTION_FOUND + 1 ) } COR_PRF_JIT_CACHE; typedef /* [public][public][public] */ enum __MIDL___MIDL_itf_corprof_0000_0000_0009 { COR_PRF_TRANSITION_CALL = 0, COR_PRF_TRANSITION_RETURN = ( COR_PRF_TRANSITION_CALL + 1 ) } COR_PRF_TRANSITION_REASON; typedef /* [public][public] */ enum __MIDL___MIDL_itf_corprof_0000_0000_0010 { COR_PRF_SUSPEND_OTHER = 0, COR_PRF_SUSPEND_FOR_GC = 1, COR_PRF_SUSPEND_FOR_APPDOMAIN_SHUTDOWN = 2, COR_PRF_SUSPEND_FOR_CODE_PITCHING = 3, COR_PRF_SUSPEND_FOR_SHUTDOWN = 4, COR_PRF_SUSPEND_FOR_INPROC_DEBUGGER = 6, COR_PRF_SUSPEND_FOR_GC_PREP = 7, COR_PRF_SUSPEND_FOR_REJIT = 8, COR_PRF_SUSPEND_FOR_PROFILER = 9 } COR_PRF_SUSPEND_REASON; typedef /* [public][public] */ enum __MIDL___MIDL_itf_corprof_0000_0000_0011 { COR_PRF_DESKTOP_CLR = 0x1, COR_PRF_CORE_CLR = 0x2 } COR_PRF_RUNTIME_TYPE; typedef /* [public] */ enum __MIDL___MIDL_itf_corprof_0000_0000_0012 { COR_PRF_REJIT_BLOCK_INLINING = 0x1, COR_PRF_REJIT_INLINING_CALLBACKS = 0x2 } COR_PRF_REJIT_FLAGS; typedef UINT_PTR EVENTPIPE_PROVIDER; typedef UINT_PTR EVENTPIPE_EVENT; typedef UINT64 EVENTPIPE_SESSION; typedef /* [public] */ enum __MIDL___MIDL_itf_corprof_0000_0000_0013 { COR_PRF_EVENTPIPE_OBJECT = 1, COR_PRF_EVENTPIPE_BOOLEAN = 3, COR_PRF_EVENTPIPE_CHAR = 4, COR_PRF_EVENTPIPE_SBYTE = 5, COR_PRF_EVENTPIPE_BYTE = 6, COR_PRF_EVENTPIPE_INT16 = 7, COR_PRF_EVENTPIPE_UINT16 = 8, COR_PRF_EVENTPIPE_INT32 = 9, COR_PRF_EVENTPIPE_UINT32 = 10, COR_PRF_EVENTPIPE_INT64 = 11, COR_PRF_EVENTPIPE_UINT64 = 12, COR_PRF_EVENTPIPE_SINGLE = 13, COR_PRF_EVENTPIPE_DOUBLE = 14, COR_PRF_EVENTPIPE_DECIMAL = 15, COR_PRF_EVENTPIPE_DATETIME = 16, COR_PRF_EVENTPIPE_GUID = 17, COR_PRF_EVENTPIPE_STRING = 18, COR_PRF_EVENTPIPE_ARRAY = 19 } COR_PRF_EVENTPIPE_PARAM_TYPE; typedef /* [public] */ enum __MIDL___MIDL_itf_corprof_0000_0000_0014 { COR_PRF_EVENTPIPE_LOGALWAYS = 0, COR_PRF_EVENTPIPE_CRITICAL = 1, COR_PRF_EVENTPIPE_ERROR = 2, COR_PRF_EVENTPIPE_WARNING = 3, COR_PRF_EVENTPIPE_INFORMATIONAL = 4, COR_PRF_EVENTPIPE_VERBOSE = 5 } COR_PRF_EVENTPIPE_LEVEL; typedef /* [public][public][public] */ struct __MIDL___MIDL_itf_corprof_0000_0000_0015 { const WCHAR *providerName; UINT64 keywords; UINT32 loggingLevel; const WCHAR *filterData; } COR_PRF_EVENTPIPE_PROVIDER_CONFIG; typedef /* [public][public] */ struct __MIDL___MIDL_itf_corprof_0000_0000_0016 { UINT32 type; UINT32 elementType; const WCHAR *name; } COR_PRF_EVENTPIPE_PARAM_DESC; typedef /* [public][public] */ struct __MIDL___MIDL_itf_corprof_0000_0000_0017 { UINT64 ptr; UINT32 size; UINT32 reserved; } COR_PRF_EVENT_DATA; extern RPC_IF_HANDLE __MIDL_itf_corprof_0000_0000_v0_0_c_ifspec; extern RPC_IF_HANDLE __MIDL_itf_corprof_0000_0000_v0_0_s_ifspec; #ifndef __ICorProfilerCallback_INTERFACE_DEFINED__ #define __ICorProfilerCallback_INTERFACE_DEFINED__ /* interface ICorProfilerCallback */ /* [local][unique][uuid][object] */ EXTERN_C const IID IID_ICorProfilerCallback; #if defined(__cplusplus) && !defined(CINTERFACE) MIDL_INTERFACE("176FBED1-A55C-4796-98CA-A9DA0EF883E7") ICorProfilerCallback : public IUnknown { public: virtual HRESULT STDMETHODCALLTYPE Initialize( /* [in] */ IUnknown *pICorProfilerInfoUnk) = 0; virtual HRESULT STDMETHODCALLTYPE Shutdown( void) = 0; virtual HRESULT STDMETHODCALLTYPE AppDomainCreationStarted( /* [in] */ AppDomainID appDomainId) = 0; virtual HRESULT STDMETHODCALLTYPE AppDomainCreationFinished( /* [in] */ AppDomainID appDomainId, /* [in] */ HRESULT hrStatus) = 0; virtual HRESULT STDMETHODCALLTYPE AppDomainShutdownStarted( /* [in] */ AppDomainID appDomainId) = 0; virtual HRESULT STDMETHODCALLTYPE AppDomainShutdownFinished( /* [in] */ AppDomainID appDomainId, /* [in] */ HRESULT hrStatus) = 0; virtual HRESULT STDMETHODCALLTYPE AssemblyLoadStarted( /* [in] */ AssemblyID assemblyId) = 0; virtual HRESULT STDMETHODCALLTYPE AssemblyLoadFinished( /* [in] */ AssemblyID assemblyId, /* [in] */ HRESULT hrStatus) = 0; virtual HRESULT STDMETHODCALLTYPE AssemblyUnloadStarted( /* [in] */ AssemblyID assemblyId) = 0; virtual HRESULT STDMETHODCALLTYPE AssemblyUnloadFinished( /* [in] */ AssemblyID assemblyId, /* [in] */ HRESULT hrStatus) = 0; virtual HRESULT STDMETHODCALLTYPE ModuleLoadStarted( /* [in] */ ModuleID moduleId) = 0; virtual HRESULT STDMETHODCALLTYPE ModuleLoadFinished( /* [in] */ ModuleID moduleId, /* [in] */ HRESULT hrStatus) = 0; virtual HRESULT STDMETHODCALLTYPE ModuleUnloadStarted( /* [in] */ ModuleID moduleId) = 0; virtual HRESULT STDMETHODCALLTYPE ModuleUnloadFinished( /* [in] */ ModuleID moduleId, /* [in] */ HRESULT hrStatus) = 0; virtual HRESULT STDMETHODCALLTYPE ModuleAttachedToAssembly( /* [in] */ ModuleID moduleId, /* [in] */ AssemblyID AssemblyId) = 0; virtual HRESULT STDMETHODCALLTYPE ClassLoadStarted( /* [in] */ ClassID classId) = 0; virtual HRESULT STDMETHODCALLTYPE ClassLoadFinished( /* [in] */ ClassID classId, /* [in] */ HRESULT hrStatus) = 0; virtual HRESULT STDMETHODCALLTYPE ClassUnloadStarted( /* [in] */ ClassID classId) = 0; virtual HRESULT STDMETHODCALLTYPE ClassUnloadFinished( /* [in] */ ClassID classId, /* [in] */ HRESULT hrStatus) = 0; virtual HRESULT STDMETHODCALLTYPE FunctionUnloadStarted( /* [in] */ FunctionID functionId) = 0; virtual HRESULT STDMETHODCALLTYPE JITCompilationStarted( /* [in] */ FunctionID functionId, /* [in] */ BOOL fIsSafeToBlock) = 0; virtual HRESULT STDMETHODCALLTYPE JITCompilationFinished( /* [in] */ FunctionID functionId, /* [in] */ HRESULT hrStatus, /* [in] */ BOOL fIsSafeToBlock) = 0; virtual HRESULT STDMETHODCALLTYPE JITCachedFunctionSearchStarted( /* [in] */ FunctionID functionId, /* [out] */ BOOL *pbUseCachedFunction) = 0; virtual HRESULT STDMETHODCALLTYPE JITCachedFunctionSearchFinished( /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_JIT_CACHE result) = 0; virtual HRESULT STDMETHODCALLTYPE JITFunctionPitched( /* [in] */ FunctionID functionId) = 0; virtual HRESULT STDMETHODCALLTYPE JITInlining( /* [in] */ FunctionID callerId, /* [in] */ FunctionID calleeId, /* [out] */ BOOL *pfShouldInline) = 0; virtual HRESULT STDMETHODCALLTYPE ThreadCreated( /* [in] */ ThreadID threadId) = 0; virtual HRESULT STDMETHODCALLTYPE ThreadDestroyed( /* [in] */ ThreadID threadId) = 0; virtual HRESULT STDMETHODCALLTYPE ThreadAssignedToOSThread( /* [in] */ ThreadID managedThreadId, /* [in] */ DWORD osThreadId) = 0; virtual HRESULT STDMETHODCALLTYPE RemotingClientInvocationStarted( void) = 0; virtual HRESULT STDMETHODCALLTYPE RemotingClientSendingMessage( /* [in] */ GUID *pCookie, /* [in] */ BOOL fIsAsync) = 0; virtual HRESULT STDMETHODCALLTYPE RemotingClientReceivingReply( /* [in] */ GUID *pCookie, /* [in] */ BOOL fIsAsync) = 0; virtual HRESULT STDMETHODCALLTYPE RemotingClientInvocationFinished( void) = 0; virtual HRESULT STDMETHODCALLTYPE RemotingServerReceivingMessage( /* [in] */ GUID *pCookie, /* [in] */ BOOL fIsAsync) = 0; virtual HRESULT STDMETHODCALLTYPE RemotingServerInvocationStarted( void) = 0; virtual HRESULT STDMETHODCALLTYPE RemotingServerInvocationReturned( void) = 0; virtual HRESULT STDMETHODCALLTYPE RemotingServerSendingReply( /* [in] */ GUID *pCookie, /* [in] */ BOOL fIsAsync) = 0; virtual HRESULT STDMETHODCALLTYPE UnmanagedToManagedTransition( /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_TRANSITION_REASON reason) = 0; virtual HRESULT STDMETHODCALLTYPE ManagedToUnmanagedTransition( /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_TRANSITION_REASON reason) = 0; virtual HRESULT STDMETHODCALLTYPE RuntimeSuspendStarted( /* [in] */ COR_PRF_SUSPEND_REASON suspendReason) = 0; virtual HRESULT STDMETHODCALLTYPE RuntimeSuspendFinished( void) = 0; virtual HRESULT STDMETHODCALLTYPE RuntimeSuspendAborted( void) = 0; virtual HRESULT STDMETHODCALLTYPE RuntimeResumeStarted( void) = 0; virtual HRESULT STDMETHODCALLTYPE RuntimeResumeFinished( void) = 0; virtual HRESULT STDMETHODCALLTYPE RuntimeThreadSuspended( /* [in] */ ThreadID threadId) = 0; virtual HRESULT STDMETHODCALLTYPE RuntimeThreadResumed( /* [in] */ ThreadID threadId) = 0; virtual HRESULT STDMETHODCALLTYPE MovedReferences( /* [in] */ ULONG cMovedObjectIDRanges, /* [size_is][in] */ ObjectID oldObjectIDRangeStart[ ], /* [size_is][in] */ ObjectID newObjectIDRangeStart[ ], /* [size_is][in] */ ULONG cObjectIDRangeLength[ ]) = 0; virtual HRESULT STDMETHODCALLTYPE ObjectAllocated( /* [in] */ ObjectID objectId, /* [in] */ ClassID classId) = 0; virtual HRESULT STDMETHODCALLTYPE ObjectsAllocatedByClass( /* [in] */ ULONG cClassCount, /* [size_is][in] */ ClassID classIds[ ], /* [size_is][in] */ ULONG cObjects[ ]) = 0; virtual HRESULT STDMETHODCALLTYPE ObjectReferences( /* [in] */ ObjectID objectId, /* [in] */ ClassID classId, /* [in] */ ULONG cObjectRefs, /* [size_is][in] */ ObjectID objectRefIds[ ]) = 0; virtual HRESULT STDMETHODCALLTYPE RootReferences( /* [in] */ ULONG cRootRefs, /* [size_is][in] */ ObjectID rootRefIds[ ]) = 0; virtual HRESULT STDMETHODCALLTYPE ExceptionThrown( /* [in] */ ObjectID thrownObjectId) = 0; virtual HRESULT STDMETHODCALLTYPE ExceptionSearchFunctionEnter( /* [in] */ FunctionID functionId) = 0; virtual HRESULT STDMETHODCALLTYPE ExceptionSearchFunctionLeave( void) = 0; virtual HRESULT STDMETHODCALLTYPE ExceptionSearchFilterEnter( /* [in] */ FunctionID functionId) = 0; virtual HRESULT STDMETHODCALLTYPE ExceptionSearchFilterLeave( void) = 0; virtual HRESULT STDMETHODCALLTYPE ExceptionSearchCatcherFound( /* [in] */ FunctionID functionId) = 0; virtual HRESULT STDMETHODCALLTYPE ExceptionOSHandlerEnter( /* [in] */ UINT_PTR __unused) = 0; virtual HRESULT STDMETHODCALLTYPE ExceptionOSHandlerLeave( /* [in] */ UINT_PTR __unused) = 0; virtual HRESULT STDMETHODCALLTYPE ExceptionUnwindFunctionEnter( /* [in] */ FunctionID functionId) = 0; virtual HRESULT STDMETHODCALLTYPE ExceptionUnwindFunctionLeave( void) = 0; virtual HRESULT STDMETHODCALLTYPE ExceptionUnwindFinallyEnter( /* [in] */ FunctionID functionId) = 0; virtual HRESULT STDMETHODCALLTYPE ExceptionUnwindFinallyLeave( void) = 0; virtual HRESULT STDMETHODCALLTYPE ExceptionCatcherEnter( /* [in] */ FunctionID functionId, /* [in] */ ObjectID objectId) = 0; virtual HRESULT STDMETHODCALLTYPE ExceptionCatcherLeave( void) = 0; virtual HRESULT STDMETHODCALLTYPE COMClassicVTableCreated( /* [in] */ ClassID wrappedClassId, /* [in] */ REFGUID implementedIID, /* [in] */ void *pVTable, /* [in] */ ULONG cSlots) = 0; virtual HRESULT STDMETHODCALLTYPE COMClassicVTableDestroyed( /* [in] */ ClassID wrappedClassId, /* [in] */ REFGUID implementedIID, /* [in] */ void *pVTable) = 0; virtual HRESULT STDMETHODCALLTYPE ExceptionCLRCatcherFound( void) = 0; virtual HRESULT STDMETHODCALLTYPE ExceptionCLRCatcherExecute( void) = 0; }; #else /* C style interface */ typedef struct ICorProfilerCallbackVtbl { BEGIN_INTERFACE HRESULT ( STDMETHODCALLTYPE *QueryInterface )( ICorProfilerCallback * This, /* [in] */ REFIID riid, /* [annotation][iid_is][out] */ _COM_Outptr_ void **ppvObject); ULONG ( STDMETHODCALLTYPE *AddRef )( ICorProfilerCallback * This); ULONG ( STDMETHODCALLTYPE *Release )( ICorProfilerCallback * This); HRESULT ( STDMETHODCALLTYPE *Initialize )( ICorProfilerCallback * This, /* [in] */ IUnknown *pICorProfilerInfoUnk); HRESULT ( STDMETHODCALLTYPE *Shutdown )( ICorProfilerCallback * This); HRESULT ( STDMETHODCALLTYPE *AppDomainCreationStarted )( ICorProfilerCallback * This, /* [in] */ AppDomainID appDomainId); HRESULT ( STDMETHODCALLTYPE *AppDomainCreationFinished )( ICorProfilerCallback * This, /* [in] */ AppDomainID appDomainId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *AppDomainShutdownStarted )( ICorProfilerCallback * This, /* [in] */ AppDomainID appDomainId); HRESULT ( STDMETHODCALLTYPE *AppDomainShutdownFinished )( ICorProfilerCallback * This, /* [in] */ AppDomainID appDomainId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *AssemblyLoadStarted )( ICorProfilerCallback * This, /* [in] */ AssemblyID assemblyId); HRESULT ( STDMETHODCALLTYPE *AssemblyLoadFinished )( ICorProfilerCallback * This, /* [in] */ AssemblyID assemblyId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *AssemblyUnloadStarted )( ICorProfilerCallback * This, /* [in] */ AssemblyID assemblyId); HRESULT ( STDMETHODCALLTYPE *AssemblyUnloadFinished )( ICorProfilerCallback * This, /* [in] */ AssemblyID assemblyId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *ModuleLoadStarted )( ICorProfilerCallback * This, /* [in] */ ModuleID moduleId); HRESULT ( STDMETHODCALLTYPE *ModuleLoadFinished )( ICorProfilerCallback * This, /* [in] */ ModuleID moduleId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *ModuleUnloadStarted )( ICorProfilerCallback * This, /* [in] */ ModuleID moduleId); HRESULT ( STDMETHODCALLTYPE *ModuleUnloadFinished )( ICorProfilerCallback * This, /* [in] */ ModuleID moduleId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *ModuleAttachedToAssembly )( ICorProfilerCallback * This, /* [in] */ ModuleID moduleId, /* [in] */ AssemblyID AssemblyId); HRESULT ( STDMETHODCALLTYPE *ClassLoadStarted )( ICorProfilerCallback * This, /* [in] */ ClassID classId); HRESULT ( STDMETHODCALLTYPE *ClassLoadFinished )( ICorProfilerCallback * This, /* [in] */ ClassID classId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *ClassUnloadStarted )( ICorProfilerCallback * This, /* [in] */ ClassID classId); HRESULT ( STDMETHODCALLTYPE *ClassUnloadFinished )( ICorProfilerCallback * This, /* [in] */ ClassID classId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *FunctionUnloadStarted )( ICorProfilerCallback * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *JITCompilationStarted )( ICorProfilerCallback * This, /* [in] */ FunctionID functionId, /* [in] */ BOOL fIsSafeToBlock); HRESULT ( STDMETHODCALLTYPE *JITCompilationFinished )( ICorProfilerCallback * This, /* [in] */ FunctionID functionId, /* [in] */ HRESULT hrStatus, /* [in] */ BOOL fIsSafeToBlock); HRESULT ( STDMETHODCALLTYPE *JITCachedFunctionSearchStarted )( ICorProfilerCallback * This, /* [in] */ FunctionID functionId, /* [out] */ BOOL *pbUseCachedFunction); HRESULT ( STDMETHODCALLTYPE *JITCachedFunctionSearchFinished )( ICorProfilerCallback * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_JIT_CACHE result); HRESULT ( STDMETHODCALLTYPE *JITFunctionPitched )( ICorProfilerCallback * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *JITInlining )( ICorProfilerCallback * This, /* [in] */ FunctionID callerId, /* [in] */ FunctionID calleeId, /* [out] */ BOOL *pfShouldInline); HRESULT ( STDMETHODCALLTYPE *ThreadCreated )( ICorProfilerCallback * This, /* [in] */ ThreadID threadId); HRESULT ( STDMETHODCALLTYPE *ThreadDestroyed )( ICorProfilerCallback * This, /* [in] */ ThreadID threadId); HRESULT ( STDMETHODCALLTYPE *ThreadAssignedToOSThread )( ICorProfilerCallback * This, /* [in] */ ThreadID managedThreadId, /* [in] */ DWORD osThreadId); HRESULT ( STDMETHODCALLTYPE *RemotingClientInvocationStarted )( ICorProfilerCallback * This); HRESULT ( STDMETHODCALLTYPE *RemotingClientSendingMessage )( ICorProfilerCallback * This, /* [in] */ GUID *pCookie, /* [in] */ BOOL fIsAsync); HRESULT ( STDMETHODCALLTYPE *RemotingClientReceivingReply )( ICorProfilerCallback * This, /* [in] */ GUID *pCookie, /* [in] */ BOOL fIsAsync); HRESULT ( STDMETHODCALLTYPE *RemotingClientInvocationFinished )( ICorProfilerCallback * This); HRESULT ( STDMETHODCALLTYPE *RemotingServerReceivingMessage )( ICorProfilerCallback * This, /* [in] */ GUID *pCookie, /* [in] */ BOOL fIsAsync); HRESULT ( STDMETHODCALLTYPE *RemotingServerInvocationStarted )( ICorProfilerCallback * This); HRESULT ( STDMETHODCALLTYPE *RemotingServerInvocationReturned )( ICorProfilerCallback * This); HRESULT ( STDMETHODCALLTYPE *RemotingServerSendingReply )( ICorProfilerCallback * This, /* [in] */ GUID *pCookie, /* [in] */ BOOL fIsAsync); HRESULT ( STDMETHODCALLTYPE *UnmanagedToManagedTransition )( ICorProfilerCallback * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_TRANSITION_REASON reason); HRESULT ( STDMETHODCALLTYPE *ManagedToUnmanagedTransition )( ICorProfilerCallback * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_TRANSITION_REASON reason); HRESULT ( STDMETHODCALLTYPE *RuntimeSuspendStarted )( ICorProfilerCallback * This, /* [in] */ COR_PRF_SUSPEND_REASON suspendReason); HRESULT ( STDMETHODCALLTYPE *RuntimeSuspendFinished )( ICorProfilerCallback * This); HRESULT ( STDMETHODCALLTYPE *RuntimeSuspendAborted )( ICorProfilerCallback * This); HRESULT ( STDMETHODCALLTYPE *RuntimeResumeStarted )( ICorProfilerCallback * This); HRESULT ( STDMETHODCALLTYPE *RuntimeResumeFinished )( ICorProfilerCallback * This); HRESULT ( STDMETHODCALLTYPE *RuntimeThreadSuspended )( ICorProfilerCallback * This, /* [in] */ ThreadID threadId); HRESULT ( STDMETHODCALLTYPE *RuntimeThreadResumed )( ICorProfilerCallback * This, /* [in] */ ThreadID threadId); HRESULT ( STDMETHODCALLTYPE *MovedReferences )( ICorProfilerCallback * This, /* [in] */ ULONG cMovedObjectIDRanges, /* [size_is][in] */ ObjectID oldObjectIDRangeStart[ ], /* [size_is][in] */ ObjectID newObjectIDRangeStart[ ], /* [size_is][in] */ ULONG cObjectIDRangeLength[ ]); HRESULT ( STDMETHODCALLTYPE *ObjectAllocated )( ICorProfilerCallback * This, /* [in] */ ObjectID objectId, /* [in] */ ClassID classId); HRESULT ( STDMETHODCALLTYPE *ObjectsAllocatedByClass )( ICorProfilerCallback * This, /* [in] */ ULONG cClassCount, /* [size_is][in] */ ClassID classIds[ ], /* [size_is][in] */ ULONG cObjects[ ]); HRESULT ( STDMETHODCALLTYPE *ObjectReferences )( ICorProfilerCallback * This, /* [in] */ ObjectID objectId, /* [in] */ ClassID classId, /* [in] */ ULONG cObjectRefs, /* [size_is][in] */ ObjectID objectRefIds[ ]); HRESULT ( STDMETHODCALLTYPE *RootReferences )( ICorProfilerCallback * This, /* [in] */ ULONG cRootRefs, /* [size_is][in] */ ObjectID rootRefIds[ ]); HRESULT ( STDMETHODCALLTYPE *ExceptionThrown )( ICorProfilerCallback * This, /* [in] */ ObjectID thrownObjectId); HRESULT ( STDMETHODCALLTYPE *ExceptionSearchFunctionEnter )( ICorProfilerCallback * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ExceptionSearchFunctionLeave )( ICorProfilerCallback * This); HRESULT ( STDMETHODCALLTYPE *ExceptionSearchFilterEnter )( ICorProfilerCallback * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ExceptionSearchFilterLeave )( ICorProfilerCallback * This); HRESULT ( STDMETHODCALLTYPE *ExceptionSearchCatcherFound )( ICorProfilerCallback * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ExceptionOSHandlerEnter )( ICorProfilerCallback * This, /* [in] */ UINT_PTR __unused); HRESULT ( STDMETHODCALLTYPE *ExceptionOSHandlerLeave )( ICorProfilerCallback * This, /* [in] */ UINT_PTR __unused); HRESULT ( STDMETHODCALLTYPE *ExceptionUnwindFunctionEnter )( ICorProfilerCallback * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ExceptionUnwindFunctionLeave )( ICorProfilerCallback * This); HRESULT ( STDMETHODCALLTYPE *ExceptionUnwindFinallyEnter )( ICorProfilerCallback * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ExceptionUnwindFinallyLeave )( ICorProfilerCallback * This); HRESULT ( STDMETHODCALLTYPE *ExceptionCatcherEnter )( ICorProfilerCallback * This, /* [in] */ FunctionID functionId, /* [in] */ ObjectID objectId); HRESULT ( STDMETHODCALLTYPE *ExceptionCatcherLeave )( ICorProfilerCallback * This); HRESULT ( STDMETHODCALLTYPE *COMClassicVTableCreated )( ICorProfilerCallback * This, /* [in] */ ClassID wrappedClassId, /* [in] */ REFGUID implementedIID, /* [in] */ void *pVTable, /* [in] */ ULONG cSlots); HRESULT ( STDMETHODCALLTYPE *COMClassicVTableDestroyed )( ICorProfilerCallback * This, /* [in] */ ClassID wrappedClassId, /* [in] */ REFGUID implementedIID, /* [in] */ void *pVTable); HRESULT ( STDMETHODCALLTYPE *ExceptionCLRCatcherFound )( ICorProfilerCallback * This); HRESULT ( STDMETHODCALLTYPE *ExceptionCLRCatcherExecute )( ICorProfilerCallback * This); END_INTERFACE } ICorProfilerCallbackVtbl; interface ICorProfilerCallback { CONST_VTBL struct ICorProfilerCallbackVtbl *lpVtbl; }; #ifdef COBJMACROS #define ICorProfilerCallback_QueryInterface(This,riid,ppvObject) \ ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) ) #define ICorProfilerCallback_AddRef(This) \ ( (This)->lpVtbl -> AddRef(This) ) #define ICorProfilerCallback_Release(This) \ ( (This)->lpVtbl -> Release(This) ) #define ICorProfilerCallback_Initialize(This,pICorProfilerInfoUnk) \ ( (This)->lpVtbl -> Initialize(This,pICorProfilerInfoUnk) ) #define ICorProfilerCallback_Shutdown(This) \ ( (This)->lpVtbl -> Shutdown(This) ) #define ICorProfilerCallback_AppDomainCreationStarted(This,appDomainId) \ ( (This)->lpVtbl -> AppDomainCreationStarted(This,appDomainId) ) #define ICorProfilerCallback_AppDomainCreationFinished(This,appDomainId,hrStatus) \ ( (This)->lpVtbl -> AppDomainCreationFinished(This,appDomainId,hrStatus) ) #define ICorProfilerCallback_AppDomainShutdownStarted(This,appDomainId) \ ( (This)->lpVtbl -> AppDomainShutdownStarted(This,appDomainId) ) #define ICorProfilerCallback_AppDomainShutdownFinished(This,appDomainId,hrStatus) \ ( (This)->lpVtbl -> AppDomainShutdownFinished(This,appDomainId,hrStatus) ) #define ICorProfilerCallback_AssemblyLoadStarted(This,assemblyId) \ ( (This)->lpVtbl -> AssemblyLoadStarted(This,assemblyId) ) #define ICorProfilerCallback_AssemblyLoadFinished(This,assemblyId,hrStatus) \ ( (This)->lpVtbl -> AssemblyLoadFinished(This,assemblyId,hrStatus) ) #define ICorProfilerCallback_AssemblyUnloadStarted(This,assemblyId) \ ( (This)->lpVtbl -> AssemblyUnloadStarted(This,assemblyId) ) #define ICorProfilerCallback_AssemblyUnloadFinished(This,assemblyId,hrStatus) \ ( (This)->lpVtbl -> AssemblyUnloadFinished(This,assemblyId,hrStatus) ) #define ICorProfilerCallback_ModuleLoadStarted(This,moduleId) \ ( (This)->lpVtbl -> ModuleLoadStarted(This,moduleId) ) #define ICorProfilerCallback_ModuleLoadFinished(This,moduleId,hrStatus) \ ( (This)->lpVtbl -> ModuleLoadFinished(This,moduleId,hrStatus) ) #define ICorProfilerCallback_ModuleUnloadStarted(This,moduleId) \ ( (This)->lpVtbl -> ModuleUnloadStarted(This,moduleId) ) #define ICorProfilerCallback_ModuleUnloadFinished(This,moduleId,hrStatus) \ ( (This)->lpVtbl -> ModuleUnloadFinished(This,moduleId,hrStatus) ) #define ICorProfilerCallback_ModuleAttachedToAssembly(This,moduleId,AssemblyId) \ ( (This)->lpVtbl -> ModuleAttachedToAssembly(This,moduleId,AssemblyId) ) #define ICorProfilerCallback_ClassLoadStarted(This,classId) \ ( (This)->lpVtbl -> ClassLoadStarted(This,classId) ) #define ICorProfilerCallback_ClassLoadFinished(This,classId,hrStatus) \ ( (This)->lpVtbl -> ClassLoadFinished(This,classId,hrStatus) ) #define ICorProfilerCallback_ClassUnloadStarted(This,classId) \ ( (This)->lpVtbl -> ClassUnloadStarted(This,classId) ) #define ICorProfilerCallback_ClassUnloadFinished(This,classId,hrStatus) \ ( (This)->lpVtbl -> ClassUnloadFinished(This,classId,hrStatus) ) #define ICorProfilerCallback_FunctionUnloadStarted(This,functionId) \ ( (This)->lpVtbl -> FunctionUnloadStarted(This,functionId) ) #define ICorProfilerCallback_JITCompilationStarted(This,functionId,fIsSafeToBlock) \ ( (This)->lpVtbl -> JITCompilationStarted(This,functionId,fIsSafeToBlock) ) #define ICorProfilerCallback_JITCompilationFinished(This,functionId,hrStatus,fIsSafeToBlock) \ ( (This)->lpVtbl -> JITCompilationFinished(This,functionId,hrStatus,fIsSafeToBlock) ) #define ICorProfilerCallback_JITCachedFunctionSearchStarted(This,functionId,pbUseCachedFunction) \ ( (This)->lpVtbl -> JITCachedFunctionSearchStarted(This,functionId,pbUseCachedFunction) ) #define ICorProfilerCallback_JITCachedFunctionSearchFinished(This,functionId,result) \ ( (This)->lpVtbl -> JITCachedFunctionSearchFinished(This,functionId,result) ) #define ICorProfilerCallback_JITFunctionPitched(This,functionId) \ ( (This)->lpVtbl -> JITFunctionPitched(This,functionId) ) #define ICorProfilerCallback_JITInlining(This,callerId,calleeId,pfShouldInline) \ ( (This)->lpVtbl -> JITInlining(This,callerId,calleeId,pfShouldInline) ) #define ICorProfilerCallback_ThreadCreated(This,threadId) \ ( (This)->lpVtbl -> ThreadCreated(This,threadId) ) #define ICorProfilerCallback_ThreadDestroyed(This,threadId) \ ( (This)->lpVtbl -> ThreadDestroyed(This,threadId) ) #define ICorProfilerCallback_ThreadAssignedToOSThread(This,managedThreadId,osThreadId) \ ( (This)->lpVtbl -> ThreadAssignedToOSThread(This,managedThreadId,osThreadId) ) #define ICorProfilerCallback_RemotingClientInvocationStarted(This) \ ( (This)->lpVtbl -> RemotingClientInvocationStarted(This) ) #define ICorProfilerCallback_RemotingClientSendingMessage(This,pCookie,fIsAsync) \ ( (This)->lpVtbl -> RemotingClientSendingMessage(This,pCookie,fIsAsync) ) #define ICorProfilerCallback_RemotingClientReceivingReply(This,pCookie,fIsAsync) \ ( (This)->lpVtbl -> RemotingClientReceivingReply(This,pCookie,fIsAsync) ) #define ICorProfilerCallback_RemotingClientInvocationFinished(This) \ ( (This)->lpVtbl -> RemotingClientInvocationFinished(This) ) #define ICorProfilerCallback_RemotingServerReceivingMessage(This,pCookie,fIsAsync) \ ( (This)->lpVtbl -> RemotingServerReceivingMessage(This,pCookie,fIsAsync) ) #define ICorProfilerCallback_RemotingServerInvocationStarted(This) \ ( (This)->lpVtbl -> RemotingServerInvocationStarted(This) ) #define ICorProfilerCallback_RemotingServerInvocationReturned(This) \ ( (This)->lpVtbl -> RemotingServerInvocationReturned(This) ) #define ICorProfilerCallback_RemotingServerSendingReply(This,pCookie,fIsAsync) \ ( (This)->lpVtbl -> RemotingServerSendingReply(This,pCookie,fIsAsync) ) #define ICorProfilerCallback_UnmanagedToManagedTransition(This,functionId,reason) \ ( (This)->lpVtbl -> UnmanagedToManagedTransition(This,functionId,reason) ) #define ICorProfilerCallback_ManagedToUnmanagedTransition(This,functionId,reason) \ ( (This)->lpVtbl -> ManagedToUnmanagedTransition(This,functionId,reason) ) #define ICorProfilerCallback_RuntimeSuspendStarted(This,suspendReason) \ ( (This)->lpVtbl -> RuntimeSuspendStarted(This,suspendReason) ) #define ICorProfilerCallback_RuntimeSuspendFinished(This) \ ( (This)->lpVtbl -> RuntimeSuspendFinished(This) ) #define ICorProfilerCallback_RuntimeSuspendAborted(This) \ ( (This)->lpVtbl -> RuntimeSuspendAborted(This) ) #define ICorProfilerCallback_RuntimeResumeStarted(This) \ ( (This)->lpVtbl -> RuntimeResumeStarted(This) ) #define ICorProfilerCallback_RuntimeResumeFinished(This) \ ( (This)->lpVtbl -> RuntimeResumeFinished(This) ) #define ICorProfilerCallback_RuntimeThreadSuspended(This,threadId) \ ( (This)->lpVtbl -> RuntimeThreadSuspended(This,threadId) ) #define ICorProfilerCallback_RuntimeThreadResumed(This,threadId) \ ( (This)->lpVtbl -> RuntimeThreadResumed(This,threadId) ) #define ICorProfilerCallback_MovedReferences(This,cMovedObjectIDRanges,oldObjectIDRangeStart,newObjectIDRangeStart,cObjectIDRangeLength) \ ( (This)->lpVtbl -> MovedReferences(This,cMovedObjectIDRanges,oldObjectIDRangeStart,newObjectIDRangeStart,cObjectIDRangeLength) ) #define ICorProfilerCallback_ObjectAllocated(This,objectId,classId) \ ( (This)->lpVtbl -> ObjectAllocated(This,objectId,classId) ) #define ICorProfilerCallback_ObjectsAllocatedByClass(This,cClassCount,classIds,cObjects) \ ( (This)->lpVtbl -> ObjectsAllocatedByClass(This,cClassCount,classIds,cObjects) ) #define ICorProfilerCallback_ObjectReferences(This,objectId,classId,cObjectRefs,objectRefIds) \ ( (This)->lpVtbl -> ObjectReferences(This,objectId,classId,cObjectRefs,objectRefIds) ) #define ICorProfilerCallback_RootReferences(This,cRootRefs,rootRefIds) \ ( (This)->lpVtbl -> RootReferences(This,cRootRefs,rootRefIds) ) #define ICorProfilerCallback_ExceptionThrown(This,thrownObjectId) \ ( (This)->lpVtbl -> ExceptionThrown(This,thrownObjectId) ) #define ICorProfilerCallback_ExceptionSearchFunctionEnter(This,functionId) \ ( (This)->lpVtbl -> ExceptionSearchFunctionEnter(This,functionId) ) #define ICorProfilerCallback_ExceptionSearchFunctionLeave(This) \ ( (This)->lpVtbl -> ExceptionSearchFunctionLeave(This) ) #define ICorProfilerCallback_ExceptionSearchFilterEnter(This,functionId) \ ( (This)->lpVtbl -> ExceptionSearchFilterEnter(This,functionId) ) #define ICorProfilerCallback_ExceptionSearchFilterLeave(This) \ ( (This)->lpVtbl -> ExceptionSearchFilterLeave(This) ) #define ICorProfilerCallback_ExceptionSearchCatcherFound(This,functionId) \ ( (This)->lpVtbl -> ExceptionSearchCatcherFound(This,functionId) ) #define ICorProfilerCallback_ExceptionOSHandlerEnter(This,__unused) \ ( (This)->lpVtbl -> ExceptionOSHandlerEnter(This,__unused) ) #define ICorProfilerCallback_ExceptionOSHandlerLeave(This,__unused) \ ( (This)->lpVtbl -> ExceptionOSHandlerLeave(This,__unused) ) #define ICorProfilerCallback_ExceptionUnwindFunctionEnter(This,functionId) \ ( (This)->lpVtbl -> ExceptionUnwindFunctionEnter(This,functionId) ) #define ICorProfilerCallback_ExceptionUnwindFunctionLeave(This) \ ( (This)->lpVtbl -> ExceptionUnwindFunctionLeave(This) ) #define ICorProfilerCallback_ExceptionUnwindFinallyEnter(This,functionId) \ ( (This)->lpVtbl -> ExceptionUnwindFinallyEnter(This,functionId) ) #define ICorProfilerCallback_ExceptionUnwindFinallyLeave(This) \ ( (This)->lpVtbl -> ExceptionUnwindFinallyLeave(This) ) #define ICorProfilerCallback_ExceptionCatcherEnter(This,functionId,objectId) \ ( (This)->lpVtbl -> ExceptionCatcherEnter(This,functionId,objectId) ) #define ICorProfilerCallback_ExceptionCatcherLeave(This) \ ( (This)->lpVtbl -> ExceptionCatcherLeave(This) ) #define ICorProfilerCallback_COMClassicVTableCreated(This,wrappedClassId,implementedIID,pVTable,cSlots) \ ( (This)->lpVtbl -> COMClassicVTableCreated(This,wrappedClassId,implementedIID,pVTable,cSlots) ) #define ICorProfilerCallback_COMClassicVTableDestroyed(This,wrappedClassId,implementedIID,pVTable) \ ( (This)->lpVtbl -> COMClassicVTableDestroyed(This,wrappedClassId,implementedIID,pVTable) ) #define ICorProfilerCallback_ExceptionCLRCatcherFound(This) \ ( (This)->lpVtbl -> ExceptionCLRCatcherFound(This) ) #define ICorProfilerCallback_ExceptionCLRCatcherExecute(This) \ ( (This)->lpVtbl -> ExceptionCLRCatcherExecute(This) ) #endif /* COBJMACROS */ #endif /* C style interface */ #endif /* __ICorProfilerCallback_INTERFACE_DEFINED__ */ /* interface __MIDL_itf_corprof_0000_0001 */ /* [local] */ typedef /* [public][public] */ enum __MIDL___MIDL_itf_corprof_0000_0001_0001 { COR_PRF_GC_ROOT_STACK = 1, COR_PRF_GC_ROOT_FINALIZER = 2, COR_PRF_GC_ROOT_HANDLE = 3, COR_PRF_GC_ROOT_OTHER = 0 } COR_PRF_GC_ROOT_KIND; typedef /* [public][public] */ enum __MIDL___MIDL_itf_corprof_0000_0001_0002 { COR_PRF_GC_ROOT_PINNING = 0x1, COR_PRF_GC_ROOT_WEAKREF = 0x2, COR_PRF_GC_ROOT_INTERIOR = 0x4, COR_PRF_GC_ROOT_REFCOUNTED = 0x8 } COR_PRF_GC_ROOT_FLAGS; typedef /* [public] */ enum __MIDL___MIDL_itf_corprof_0000_0001_0003 { COR_PRF_FINALIZER_CRITICAL = 0x1 } COR_PRF_FINALIZER_FLAGS; typedef /* [public][public][public][public] */ enum __MIDL___MIDL_itf_corprof_0000_0001_0004 { COR_PRF_GC_GEN_0 = 0, COR_PRF_GC_GEN_1 = 1, COR_PRF_GC_GEN_2 = 2, COR_PRF_GC_LARGE_OBJECT_HEAP = 3, COR_PRF_GC_PINNED_OBJECT_HEAP = 4 } COR_PRF_GC_GENERATION; typedef struct COR_PRF_GC_GENERATION_RANGE { COR_PRF_GC_GENERATION generation; ObjectID rangeStart; UINT_PTR rangeLength; UINT_PTR rangeLengthReserved; } COR_PRF_GC_GENERATION_RANGE; typedef /* [public][public][public] */ enum __MIDL___MIDL_itf_corprof_0000_0001_0005 { COR_PRF_CLAUSE_NONE = 0, COR_PRF_CLAUSE_FILTER = 1, COR_PRF_CLAUSE_CATCH = 2, COR_PRF_CLAUSE_FINALLY = 3 } COR_PRF_CLAUSE_TYPE; typedef struct COR_PRF_EX_CLAUSE_INFO { COR_PRF_CLAUSE_TYPE clauseType; UINT_PTR programCounter; UINT_PTR framePointer; UINT_PTR shadowStackPointer; } COR_PRF_EX_CLAUSE_INFO; typedef /* [public][public] */ enum __MIDL___MIDL_itf_corprof_0000_0001_0006 { COR_PRF_GC_INDUCED = 1, COR_PRF_GC_OTHER = 0 } COR_PRF_GC_REASON; typedef /* [public] */ enum __MIDL___MIDL_itf_corprof_0000_0001_0007 { COR_PRF_MODULE_DISK = 0x1, COR_PRF_MODULE_NGEN = 0x2, COR_PRF_MODULE_DYNAMIC = 0x4, COR_PRF_MODULE_COLLECTIBLE = 0x8, COR_PRF_MODULE_RESOURCE = 0x10, COR_PRF_MODULE_FLAT_LAYOUT = 0x20, COR_PRF_MODULE_WINDOWS_RUNTIME = 0x40 } COR_PRF_MODULE_FLAGS; extern RPC_IF_HANDLE __MIDL_itf_corprof_0000_0001_v0_0_c_ifspec; extern RPC_IF_HANDLE __MIDL_itf_corprof_0000_0001_v0_0_s_ifspec; #ifndef __ICorProfilerCallback2_INTERFACE_DEFINED__ #define __ICorProfilerCallback2_INTERFACE_DEFINED__ /* interface ICorProfilerCallback2 */ /* [local][unique][uuid][object] */ EXTERN_C const IID IID_ICorProfilerCallback2; #if defined(__cplusplus) && !defined(CINTERFACE) MIDL_INTERFACE("8A8CC829-CCF2-49fe-BBAE-0F022228071A") ICorProfilerCallback2 : public ICorProfilerCallback { public: virtual HRESULT STDMETHODCALLTYPE ThreadNameChanged( /* [in] */ ThreadID threadId, /* [in] */ ULONG cchName, /* [annotation][in] */ _In_reads_opt_(cchName) WCHAR name[ ]) = 0; virtual HRESULT STDMETHODCALLTYPE GarbageCollectionStarted( /* [in] */ int cGenerations, /* [size_is][in] */ BOOL generationCollected[ ], /* [in] */ COR_PRF_GC_REASON reason) = 0; virtual HRESULT STDMETHODCALLTYPE SurvivingReferences( /* [in] */ ULONG cSurvivingObjectIDRanges, /* [size_is][in] */ ObjectID objectIDRangeStart[ ], /* [size_is][in] */ ULONG cObjectIDRangeLength[ ]) = 0; virtual HRESULT STDMETHODCALLTYPE GarbageCollectionFinished( void) = 0; virtual HRESULT STDMETHODCALLTYPE FinalizeableObjectQueued( /* [in] */ DWORD finalizerFlags, /* [in] */ ObjectID objectID) = 0; virtual HRESULT STDMETHODCALLTYPE RootReferences2( /* [in] */ ULONG cRootRefs, /* [size_is][in] */ ObjectID rootRefIds[ ], /* [size_is][in] */ COR_PRF_GC_ROOT_KIND rootKinds[ ], /* [size_is][in] */ COR_PRF_GC_ROOT_FLAGS rootFlags[ ], /* [size_is][in] */ UINT_PTR rootIds[ ]) = 0; virtual HRESULT STDMETHODCALLTYPE HandleCreated( /* [in] */ GCHandleID handleId, /* [in] */ ObjectID initialObjectId) = 0; virtual HRESULT STDMETHODCALLTYPE HandleDestroyed( /* [in] */ GCHandleID handleId) = 0; }; #else /* C style interface */ typedef struct ICorProfilerCallback2Vtbl { BEGIN_INTERFACE HRESULT ( STDMETHODCALLTYPE *QueryInterface )( ICorProfilerCallback2 * This, /* [in] */ REFIID riid, /* [annotation][iid_is][out] */ _COM_Outptr_ void **ppvObject); ULONG ( STDMETHODCALLTYPE *AddRef )( ICorProfilerCallback2 * This); ULONG ( STDMETHODCALLTYPE *Release )( ICorProfilerCallback2 * This); HRESULT ( STDMETHODCALLTYPE *Initialize )( ICorProfilerCallback2 * This, /* [in] */ IUnknown *pICorProfilerInfoUnk); HRESULT ( STDMETHODCALLTYPE *Shutdown )( ICorProfilerCallback2 * This); HRESULT ( STDMETHODCALLTYPE *AppDomainCreationStarted )( ICorProfilerCallback2 * This, /* [in] */ AppDomainID appDomainId); HRESULT ( STDMETHODCALLTYPE *AppDomainCreationFinished )( ICorProfilerCallback2 * This, /* [in] */ AppDomainID appDomainId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *AppDomainShutdownStarted )( ICorProfilerCallback2 * This, /* [in] */ AppDomainID appDomainId); HRESULT ( STDMETHODCALLTYPE *AppDomainShutdownFinished )( ICorProfilerCallback2 * This, /* [in] */ AppDomainID appDomainId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *AssemblyLoadStarted )( ICorProfilerCallback2 * This, /* [in] */ AssemblyID assemblyId); HRESULT ( STDMETHODCALLTYPE *AssemblyLoadFinished )( ICorProfilerCallback2 * This, /* [in] */ AssemblyID assemblyId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *AssemblyUnloadStarted )( ICorProfilerCallback2 * This, /* [in] */ AssemblyID assemblyId); HRESULT ( STDMETHODCALLTYPE *AssemblyUnloadFinished )( ICorProfilerCallback2 * This, /* [in] */ AssemblyID assemblyId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *ModuleLoadStarted )( ICorProfilerCallback2 * This, /* [in] */ ModuleID moduleId); HRESULT ( STDMETHODCALLTYPE *ModuleLoadFinished )( ICorProfilerCallback2 * This, /* [in] */ ModuleID moduleId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *ModuleUnloadStarted )( ICorProfilerCallback2 * This, /* [in] */ ModuleID moduleId); HRESULT ( STDMETHODCALLTYPE *ModuleUnloadFinished )( ICorProfilerCallback2 * This, /* [in] */ ModuleID moduleId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *ModuleAttachedToAssembly )( ICorProfilerCallback2 * This, /* [in] */ ModuleID moduleId, /* [in] */ AssemblyID AssemblyId); HRESULT ( STDMETHODCALLTYPE *ClassLoadStarted )( ICorProfilerCallback2 * This, /* [in] */ ClassID classId); HRESULT ( STDMETHODCALLTYPE *ClassLoadFinished )( ICorProfilerCallback2 * This, /* [in] */ ClassID classId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *ClassUnloadStarted )( ICorProfilerCallback2 * This, /* [in] */ ClassID classId); HRESULT ( STDMETHODCALLTYPE *ClassUnloadFinished )( ICorProfilerCallback2 * This, /* [in] */ ClassID classId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *FunctionUnloadStarted )( ICorProfilerCallback2 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *JITCompilationStarted )( ICorProfilerCallback2 * This, /* [in] */ FunctionID functionId, /* [in] */ BOOL fIsSafeToBlock); HRESULT ( STDMETHODCALLTYPE *JITCompilationFinished )( ICorProfilerCallback2 * This, /* [in] */ FunctionID functionId, /* [in] */ HRESULT hrStatus, /* [in] */ BOOL fIsSafeToBlock); HRESULT ( STDMETHODCALLTYPE *JITCachedFunctionSearchStarted )( ICorProfilerCallback2 * This, /* [in] */ FunctionID functionId, /* [out] */ BOOL *pbUseCachedFunction); HRESULT ( STDMETHODCALLTYPE *JITCachedFunctionSearchFinished )( ICorProfilerCallback2 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_JIT_CACHE result); HRESULT ( STDMETHODCALLTYPE *JITFunctionPitched )( ICorProfilerCallback2 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *JITInlining )( ICorProfilerCallback2 * This, /* [in] */ FunctionID callerId, /* [in] */ FunctionID calleeId, /* [out] */ BOOL *pfShouldInline); HRESULT ( STDMETHODCALLTYPE *ThreadCreated )( ICorProfilerCallback2 * This, /* [in] */ ThreadID threadId); HRESULT ( STDMETHODCALLTYPE *ThreadDestroyed )( ICorProfilerCallback2 * This, /* [in] */ ThreadID threadId); HRESULT ( STDMETHODCALLTYPE *ThreadAssignedToOSThread )( ICorProfilerCallback2 * This, /* [in] */ ThreadID managedThreadId, /* [in] */ DWORD osThreadId); HRESULT ( STDMETHODCALLTYPE *RemotingClientInvocationStarted )( ICorProfilerCallback2 * This); HRESULT ( STDMETHODCALLTYPE *RemotingClientSendingMessage )( ICorProfilerCallback2 * This, /* [in] */ GUID *pCookie, /* [in] */ BOOL fIsAsync); HRESULT ( STDMETHODCALLTYPE *RemotingClientReceivingReply )( ICorProfilerCallback2 * This, /* [in] */ GUID *pCookie, /* [in] */ BOOL fIsAsync); HRESULT ( STDMETHODCALLTYPE *RemotingClientInvocationFinished )( ICorProfilerCallback2 * This); HRESULT ( STDMETHODCALLTYPE *RemotingServerReceivingMessage )( ICorProfilerCallback2 * This, /* [in] */ GUID *pCookie, /* [in] */ BOOL fIsAsync); HRESULT ( STDMETHODCALLTYPE *RemotingServerInvocationStarted )( ICorProfilerCallback2 * This); HRESULT ( STDMETHODCALLTYPE *RemotingServerInvocationReturned )( ICorProfilerCallback2 * This); HRESULT ( STDMETHODCALLTYPE *RemotingServerSendingReply )( ICorProfilerCallback2 * This, /* [in] */ GUID *pCookie, /* [in] */ BOOL fIsAsync); HRESULT ( STDMETHODCALLTYPE *UnmanagedToManagedTransition )( ICorProfilerCallback2 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_TRANSITION_REASON reason); HRESULT ( STDMETHODCALLTYPE *ManagedToUnmanagedTransition )( ICorProfilerCallback2 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_TRANSITION_REASON reason); HRESULT ( STDMETHODCALLTYPE *RuntimeSuspendStarted )( ICorProfilerCallback2 * This, /* [in] */ COR_PRF_SUSPEND_REASON suspendReason); HRESULT ( STDMETHODCALLTYPE *RuntimeSuspendFinished )( ICorProfilerCallback2 * This); HRESULT ( STDMETHODCALLTYPE *RuntimeSuspendAborted )( ICorProfilerCallback2 * This); HRESULT ( STDMETHODCALLTYPE *RuntimeResumeStarted )( ICorProfilerCallback2 * This); HRESULT ( STDMETHODCALLTYPE *RuntimeResumeFinished )( ICorProfilerCallback2 * This); HRESULT ( STDMETHODCALLTYPE *RuntimeThreadSuspended )( ICorProfilerCallback2 * This, /* [in] */ ThreadID threadId); HRESULT ( STDMETHODCALLTYPE *RuntimeThreadResumed )( ICorProfilerCallback2 * This, /* [in] */ ThreadID threadId); HRESULT ( STDMETHODCALLTYPE *MovedReferences )( ICorProfilerCallback2 * This, /* [in] */ ULONG cMovedObjectIDRanges, /* [size_is][in] */ ObjectID oldObjectIDRangeStart[ ], /* [size_is][in] */ ObjectID newObjectIDRangeStart[ ], /* [size_is][in] */ ULONG cObjectIDRangeLength[ ]); HRESULT ( STDMETHODCALLTYPE *ObjectAllocated )( ICorProfilerCallback2 * This, /* [in] */ ObjectID objectId, /* [in] */ ClassID classId); HRESULT ( STDMETHODCALLTYPE *ObjectsAllocatedByClass )( ICorProfilerCallback2 * This, /* [in] */ ULONG cClassCount, /* [size_is][in] */ ClassID classIds[ ], /* [size_is][in] */ ULONG cObjects[ ]); HRESULT ( STDMETHODCALLTYPE *ObjectReferences )( ICorProfilerCallback2 * This, /* [in] */ ObjectID objectId, /* [in] */ ClassID classId, /* [in] */ ULONG cObjectRefs, /* [size_is][in] */ ObjectID objectRefIds[ ]); HRESULT ( STDMETHODCALLTYPE *RootReferences )( ICorProfilerCallback2 * This, /* [in] */ ULONG cRootRefs, /* [size_is][in] */ ObjectID rootRefIds[ ]); HRESULT ( STDMETHODCALLTYPE *ExceptionThrown )( ICorProfilerCallback2 * This, /* [in] */ ObjectID thrownObjectId); HRESULT ( STDMETHODCALLTYPE *ExceptionSearchFunctionEnter )( ICorProfilerCallback2 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ExceptionSearchFunctionLeave )( ICorProfilerCallback2 * This); HRESULT ( STDMETHODCALLTYPE *ExceptionSearchFilterEnter )( ICorProfilerCallback2 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ExceptionSearchFilterLeave )( ICorProfilerCallback2 * This); HRESULT ( STDMETHODCALLTYPE *ExceptionSearchCatcherFound )( ICorProfilerCallback2 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ExceptionOSHandlerEnter )( ICorProfilerCallback2 * This, /* [in] */ UINT_PTR __unused); HRESULT ( STDMETHODCALLTYPE *ExceptionOSHandlerLeave )( ICorProfilerCallback2 * This, /* [in] */ UINT_PTR __unused); HRESULT ( STDMETHODCALLTYPE *ExceptionUnwindFunctionEnter )( ICorProfilerCallback2 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ExceptionUnwindFunctionLeave )( ICorProfilerCallback2 * This); HRESULT ( STDMETHODCALLTYPE *ExceptionUnwindFinallyEnter )( ICorProfilerCallback2 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ExceptionUnwindFinallyLeave )( ICorProfilerCallback2 * This); HRESULT ( STDMETHODCALLTYPE *ExceptionCatcherEnter )( ICorProfilerCallback2 * This, /* [in] */ FunctionID functionId, /* [in] */ ObjectID objectId); HRESULT ( STDMETHODCALLTYPE *ExceptionCatcherLeave )( ICorProfilerCallback2 * This); HRESULT ( STDMETHODCALLTYPE *COMClassicVTableCreated )( ICorProfilerCallback2 * This, /* [in] */ ClassID wrappedClassId, /* [in] */ REFGUID implementedIID, /* [in] */ void *pVTable, /* [in] */ ULONG cSlots); HRESULT ( STDMETHODCALLTYPE *COMClassicVTableDestroyed )( ICorProfilerCallback2 * This, /* [in] */ ClassID wrappedClassId, /* [in] */ REFGUID implementedIID, /* [in] */ void *pVTable); HRESULT ( STDMETHODCALLTYPE *ExceptionCLRCatcherFound )( ICorProfilerCallback2 * This); HRESULT ( STDMETHODCALLTYPE *ExceptionCLRCatcherExecute )( ICorProfilerCallback2 * This); HRESULT ( STDMETHODCALLTYPE *ThreadNameChanged )( ICorProfilerCallback2 * This, /* [in] */ ThreadID threadId, /* [in] */ ULONG cchName, /* [annotation][in] */ _In_reads_opt_(cchName) WCHAR name[ ]); HRESULT ( STDMETHODCALLTYPE *GarbageCollectionStarted )( ICorProfilerCallback2 * This, /* [in] */ int cGenerations, /* [size_is][in] */ BOOL generationCollected[ ], /* [in] */ COR_PRF_GC_REASON reason); HRESULT ( STDMETHODCALLTYPE *SurvivingReferences )( ICorProfilerCallback2 * This, /* [in] */ ULONG cSurvivingObjectIDRanges, /* [size_is][in] */ ObjectID objectIDRangeStart[ ], /* [size_is][in] */ ULONG cObjectIDRangeLength[ ]); HRESULT ( STDMETHODCALLTYPE *GarbageCollectionFinished )( ICorProfilerCallback2 * This); HRESULT ( STDMETHODCALLTYPE *FinalizeableObjectQueued )( ICorProfilerCallback2 * This, /* [in] */ DWORD finalizerFlags, /* [in] */ ObjectID objectID); HRESULT ( STDMETHODCALLTYPE *RootReferences2 )( ICorProfilerCallback2 * This, /* [in] */ ULONG cRootRefs, /* [size_is][in] */ ObjectID rootRefIds[ ], /* [size_is][in] */ COR_PRF_GC_ROOT_KIND rootKinds[ ], /* [size_is][in] */ COR_PRF_GC_ROOT_FLAGS rootFlags[ ], /* [size_is][in] */ UINT_PTR rootIds[ ]); HRESULT ( STDMETHODCALLTYPE *HandleCreated )( ICorProfilerCallback2 * This, /* [in] */ GCHandleID handleId, /* [in] */ ObjectID initialObjectId); HRESULT ( STDMETHODCALLTYPE *HandleDestroyed )( ICorProfilerCallback2 * This, /* [in] */ GCHandleID handleId); END_INTERFACE } ICorProfilerCallback2Vtbl; interface ICorProfilerCallback2 { CONST_VTBL struct ICorProfilerCallback2Vtbl *lpVtbl; }; #ifdef COBJMACROS #define ICorProfilerCallback2_QueryInterface(This,riid,ppvObject) \ ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) ) #define ICorProfilerCallback2_AddRef(This) \ ( (This)->lpVtbl -> AddRef(This) ) #define ICorProfilerCallback2_Release(This) \ ( (This)->lpVtbl -> Release(This) ) #define ICorProfilerCallback2_Initialize(This,pICorProfilerInfoUnk) \ ( (This)->lpVtbl -> Initialize(This,pICorProfilerInfoUnk) ) #define ICorProfilerCallback2_Shutdown(This) \ ( (This)->lpVtbl -> Shutdown(This) ) #define ICorProfilerCallback2_AppDomainCreationStarted(This,appDomainId) \ ( (This)->lpVtbl -> AppDomainCreationStarted(This,appDomainId) ) #define ICorProfilerCallback2_AppDomainCreationFinished(This,appDomainId,hrStatus) \ ( (This)->lpVtbl -> AppDomainCreationFinished(This,appDomainId,hrStatus) ) #define ICorProfilerCallback2_AppDomainShutdownStarted(This,appDomainId) \ ( (This)->lpVtbl -> AppDomainShutdownStarted(This,appDomainId) ) #define ICorProfilerCallback2_AppDomainShutdownFinished(This,appDomainId,hrStatus) \ ( (This)->lpVtbl -> AppDomainShutdownFinished(This,appDomainId,hrStatus) ) #define ICorProfilerCallback2_AssemblyLoadStarted(This,assemblyId) \ ( (This)->lpVtbl -> AssemblyLoadStarted(This,assemblyId) ) #define ICorProfilerCallback2_AssemblyLoadFinished(This,assemblyId,hrStatus) \ ( (This)->lpVtbl -> AssemblyLoadFinished(This,assemblyId,hrStatus) ) #define ICorProfilerCallback2_AssemblyUnloadStarted(This,assemblyId) \ ( (This)->lpVtbl -> AssemblyUnloadStarted(This,assemblyId) ) #define ICorProfilerCallback2_AssemblyUnloadFinished(This,assemblyId,hrStatus) \ ( (This)->lpVtbl -> AssemblyUnloadFinished(This,assemblyId,hrStatus) ) #define ICorProfilerCallback2_ModuleLoadStarted(This,moduleId) \ ( (This)->lpVtbl -> ModuleLoadStarted(This,moduleId) ) #define ICorProfilerCallback2_ModuleLoadFinished(This,moduleId,hrStatus) \ ( (This)->lpVtbl -> ModuleLoadFinished(This,moduleId,hrStatus) ) #define ICorProfilerCallback2_ModuleUnloadStarted(This,moduleId) \ ( (This)->lpVtbl -> ModuleUnloadStarted(This,moduleId) ) #define ICorProfilerCallback2_ModuleUnloadFinished(This,moduleId,hrStatus) \ ( (This)->lpVtbl -> ModuleUnloadFinished(This,moduleId,hrStatus) ) #define ICorProfilerCallback2_ModuleAttachedToAssembly(This,moduleId,AssemblyId) \ ( (This)->lpVtbl -> ModuleAttachedToAssembly(This,moduleId,AssemblyId) ) #define ICorProfilerCallback2_ClassLoadStarted(This,classId) \ ( (This)->lpVtbl -> ClassLoadStarted(This,classId) ) #define ICorProfilerCallback2_ClassLoadFinished(This,classId,hrStatus) \ ( (This)->lpVtbl -> ClassLoadFinished(This,classId,hrStatus) ) #define ICorProfilerCallback2_ClassUnloadStarted(This,classId) \ ( (This)->lpVtbl -> ClassUnloadStarted(This,classId) ) #define ICorProfilerCallback2_ClassUnloadFinished(This,classId,hrStatus) \ ( (This)->lpVtbl -> ClassUnloadFinished(This,classId,hrStatus) ) #define ICorProfilerCallback2_FunctionUnloadStarted(This,functionId) \ ( (This)->lpVtbl -> FunctionUnloadStarted(This,functionId) ) #define ICorProfilerCallback2_JITCompilationStarted(This,functionId,fIsSafeToBlock) \ ( (This)->lpVtbl -> JITCompilationStarted(This,functionId,fIsSafeToBlock) ) #define ICorProfilerCallback2_JITCompilationFinished(This,functionId,hrStatus,fIsSafeToBlock) \ ( (This)->lpVtbl -> JITCompilationFinished(This,functionId,hrStatus,fIsSafeToBlock) ) #define ICorProfilerCallback2_JITCachedFunctionSearchStarted(This,functionId,pbUseCachedFunction) \ ( (This)->lpVtbl -> JITCachedFunctionSearchStarted(This,functionId,pbUseCachedFunction) ) #define ICorProfilerCallback2_JITCachedFunctionSearchFinished(This,functionId,result) \ ( (This)->lpVtbl -> JITCachedFunctionSearchFinished(This,functionId,result) ) #define ICorProfilerCallback2_JITFunctionPitched(This,functionId) \ ( (This)->lpVtbl -> JITFunctionPitched(This,functionId) ) #define ICorProfilerCallback2_JITInlining(This,callerId,calleeId,pfShouldInline) \ ( (This)->lpVtbl -> JITInlining(This,callerId,calleeId,pfShouldInline) ) #define ICorProfilerCallback2_ThreadCreated(This,threadId) \ ( (This)->lpVtbl -> ThreadCreated(This,threadId) ) #define ICorProfilerCallback2_ThreadDestroyed(This,threadId) \ ( (This)->lpVtbl -> ThreadDestroyed(This,threadId) ) #define ICorProfilerCallback2_ThreadAssignedToOSThread(This,managedThreadId,osThreadId) \ ( (This)->lpVtbl -> ThreadAssignedToOSThread(This,managedThreadId,osThreadId) ) #define ICorProfilerCallback2_RemotingClientInvocationStarted(This) \ ( (This)->lpVtbl -> RemotingClientInvocationStarted(This) ) #define ICorProfilerCallback2_RemotingClientSendingMessage(This,pCookie,fIsAsync) \ ( (This)->lpVtbl -> RemotingClientSendingMessage(This,pCookie,fIsAsync) ) #define ICorProfilerCallback2_RemotingClientReceivingReply(This,pCookie,fIsAsync) \ ( (This)->lpVtbl -> RemotingClientReceivingReply(This,pCookie,fIsAsync) ) #define ICorProfilerCallback2_RemotingClientInvocationFinished(This) \ ( (This)->lpVtbl -> RemotingClientInvocationFinished(This) ) #define ICorProfilerCallback2_RemotingServerReceivingMessage(This,pCookie,fIsAsync) \ ( (This)->lpVtbl -> RemotingServerReceivingMessage(This,pCookie,fIsAsync) ) #define ICorProfilerCallback2_RemotingServerInvocationStarted(This) \ ( (This)->lpVtbl -> RemotingServerInvocationStarted(This) ) #define ICorProfilerCallback2_RemotingServerInvocationReturned(This) \ ( (This)->lpVtbl -> RemotingServerInvocationReturned(This) ) #define ICorProfilerCallback2_RemotingServerSendingReply(This,pCookie,fIsAsync) \ ( (This)->lpVtbl -> RemotingServerSendingReply(This,pCookie,fIsAsync) ) #define ICorProfilerCallback2_UnmanagedToManagedTransition(This,functionId,reason) \ ( (This)->lpVtbl -> UnmanagedToManagedTransition(This,functionId,reason) ) #define ICorProfilerCallback2_ManagedToUnmanagedTransition(This,functionId,reason) \ ( (This)->lpVtbl -> ManagedToUnmanagedTransition(This,functionId,reason) ) #define ICorProfilerCallback2_RuntimeSuspendStarted(This,suspendReason) \ ( (This)->lpVtbl -> RuntimeSuspendStarted(This,suspendReason) ) #define ICorProfilerCallback2_RuntimeSuspendFinished(This) \ ( (This)->lpVtbl -> RuntimeSuspendFinished(This) ) #define ICorProfilerCallback2_RuntimeSuspendAborted(This) \ ( (This)->lpVtbl -> RuntimeSuspendAborted(This) ) #define ICorProfilerCallback2_RuntimeResumeStarted(This) \ ( (This)->lpVtbl -> RuntimeResumeStarted(This) ) #define ICorProfilerCallback2_RuntimeResumeFinished(This) \ ( (This)->lpVtbl -> RuntimeResumeFinished(This) ) #define ICorProfilerCallback2_RuntimeThreadSuspended(This,threadId) \ ( (This)->lpVtbl -> RuntimeThreadSuspended(This,threadId) ) #define ICorProfilerCallback2_RuntimeThreadResumed(This,threadId) \ ( (This)->lpVtbl -> RuntimeThreadResumed(This,threadId) ) #define ICorProfilerCallback2_MovedReferences(This,cMovedObjectIDRanges,oldObjectIDRangeStart,newObjectIDRangeStart,cObjectIDRangeLength) \ ( (This)->lpVtbl -> MovedReferences(This,cMovedObjectIDRanges,oldObjectIDRangeStart,newObjectIDRangeStart,cObjectIDRangeLength) ) #define ICorProfilerCallback2_ObjectAllocated(This,objectId,classId) \ ( (This)->lpVtbl -> ObjectAllocated(This,objectId,classId) ) #define ICorProfilerCallback2_ObjectsAllocatedByClass(This,cClassCount,classIds,cObjects) \ ( (This)->lpVtbl -> ObjectsAllocatedByClass(This,cClassCount,classIds,cObjects) ) #define ICorProfilerCallback2_ObjectReferences(This,objectId,classId,cObjectRefs,objectRefIds) \ ( (This)->lpVtbl -> ObjectReferences(This,objectId,classId,cObjectRefs,objectRefIds) ) #define ICorProfilerCallback2_RootReferences(This,cRootRefs,rootRefIds) \ ( (This)->lpVtbl -> RootReferences(This,cRootRefs,rootRefIds) ) #define ICorProfilerCallback2_ExceptionThrown(This,thrownObjectId) \ ( (This)->lpVtbl -> ExceptionThrown(This,thrownObjectId) ) #define ICorProfilerCallback2_ExceptionSearchFunctionEnter(This,functionId) \ ( (This)->lpVtbl -> ExceptionSearchFunctionEnter(This,functionId) ) #define ICorProfilerCallback2_ExceptionSearchFunctionLeave(This) \ ( (This)->lpVtbl -> ExceptionSearchFunctionLeave(This) ) #define ICorProfilerCallback2_ExceptionSearchFilterEnter(This,functionId) \ ( (This)->lpVtbl -> ExceptionSearchFilterEnter(This,functionId) ) #define ICorProfilerCallback2_ExceptionSearchFilterLeave(This) \ ( (This)->lpVtbl -> ExceptionSearchFilterLeave(This) ) #define ICorProfilerCallback2_ExceptionSearchCatcherFound(This,functionId) \ ( (This)->lpVtbl -> ExceptionSearchCatcherFound(This,functionId) ) #define ICorProfilerCallback2_ExceptionOSHandlerEnter(This,__unused) \ ( (This)->lpVtbl -> ExceptionOSHandlerEnter(This,__unused) ) #define ICorProfilerCallback2_ExceptionOSHandlerLeave(This,__unused) \ ( (This)->lpVtbl -> ExceptionOSHandlerLeave(This,__unused) ) #define ICorProfilerCallback2_ExceptionUnwindFunctionEnter(This,functionId) \ ( (This)->lpVtbl -> ExceptionUnwindFunctionEnter(This,functionId) ) #define ICorProfilerCallback2_ExceptionUnwindFunctionLeave(This) \ ( (This)->lpVtbl -> ExceptionUnwindFunctionLeave(This) ) #define ICorProfilerCallback2_ExceptionUnwindFinallyEnter(This,functionId) \ ( (This)->lpVtbl -> ExceptionUnwindFinallyEnter(This,functionId) ) #define ICorProfilerCallback2_ExceptionUnwindFinallyLeave(This) \ ( (This)->lpVtbl -> ExceptionUnwindFinallyLeave(This) ) #define ICorProfilerCallback2_ExceptionCatcherEnter(This,functionId,objectId) \ ( (This)->lpVtbl -> ExceptionCatcherEnter(This,functionId,objectId) ) #define ICorProfilerCallback2_ExceptionCatcherLeave(This) \ ( (This)->lpVtbl -> ExceptionCatcherLeave(This) ) #define ICorProfilerCallback2_COMClassicVTableCreated(This,wrappedClassId,implementedIID,pVTable,cSlots) \ ( (This)->lpVtbl -> COMClassicVTableCreated(This,wrappedClassId,implementedIID,pVTable,cSlots) ) #define ICorProfilerCallback2_COMClassicVTableDestroyed(This,wrappedClassId,implementedIID,pVTable) \ ( (This)->lpVtbl -> COMClassicVTableDestroyed(This,wrappedClassId,implementedIID,pVTable) ) #define ICorProfilerCallback2_ExceptionCLRCatcherFound(This) \ ( (This)->lpVtbl -> ExceptionCLRCatcherFound(This) ) #define ICorProfilerCallback2_ExceptionCLRCatcherExecute(This) \ ( (This)->lpVtbl -> ExceptionCLRCatcherExecute(This) ) #define ICorProfilerCallback2_ThreadNameChanged(This,threadId,cchName,name) \ ( (This)->lpVtbl -> ThreadNameChanged(This,threadId,cchName,name) ) #define ICorProfilerCallback2_GarbageCollectionStarted(This,cGenerations,generationCollected,reason) \ ( (This)->lpVtbl -> GarbageCollectionStarted(This,cGenerations,generationCollected,reason) ) #define ICorProfilerCallback2_SurvivingReferences(This,cSurvivingObjectIDRanges,objectIDRangeStart,cObjectIDRangeLength) \ ( (This)->lpVtbl -> SurvivingReferences(This,cSurvivingObjectIDRanges,objectIDRangeStart,cObjectIDRangeLength) ) #define ICorProfilerCallback2_GarbageCollectionFinished(This) \ ( (This)->lpVtbl -> GarbageCollectionFinished(This) ) #define ICorProfilerCallback2_FinalizeableObjectQueued(This,finalizerFlags,objectID) \ ( (This)->lpVtbl -> FinalizeableObjectQueued(This,finalizerFlags,objectID) ) #define ICorProfilerCallback2_RootReferences2(This,cRootRefs,rootRefIds,rootKinds,rootFlags,rootIds) \ ( (This)->lpVtbl -> RootReferences2(This,cRootRefs,rootRefIds,rootKinds,rootFlags,rootIds) ) #define ICorProfilerCallback2_HandleCreated(This,handleId,initialObjectId) \ ( (This)->lpVtbl -> HandleCreated(This,handleId,initialObjectId) ) #define ICorProfilerCallback2_HandleDestroyed(This,handleId) \ ( (This)->lpVtbl -> HandleDestroyed(This,handleId) ) #endif /* COBJMACROS */ #endif /* C style interface */ #endif /* __ICorProfilerCallback2_INTERFACE_DEFINED__ */ #ifndef __ICorProfilerCallback3_INTERFACE_DEFINED__ #define __ICorProfilerCallback3_INTERFACE_DEFINED__ /* interface ICorProfilerCallback3 */ /* [local][unique][uuid][object] */ EXTERN_C const IID IID_ICorProfilerCallback3; #if defined(__cplusplus) && !defined(CINTERFACE) MIDL_INTERFACE("4FD2ED52-7731-4b8d-9469-03D2CC3086C5") ICorProfilerCallback3 : public ICorProfilerCallback2 { public: virtual HRESULT STDMETHODCALLTYPE InitializeForAttach( /* [in] */ IUnknown *pCorProfilerInfoUnk, /* [in] */ void *pvClientData, /* [in] */ UINT cbClientData) = 0; virtual HRESULT STDMETHODCALLTYPE ProfilerAttachComplete( void) = 0; virtual HRESULT STDMETHODCALLTYPE ProfilerDetachSucceeded( void) = 0; }; #else /* C style interface */ typedef struct ICorProfilerCallback3Vtbl { BEGIN_INTERFACE HRESULT ( STDMETHODCALLTYPE *QueryInterface )( ICorProfilerCallback3 * This, /* [in] */ REFIID riid, /* [annotation][iid_is][out] */ _COM_Outptr_ void **ppvObject); ULONG ( STDMETHODCALLTYPE *AddRef )( ICorProfilerCallback3 * This); ULONG ( STDMETHODCALLTYPE *Release )( ICorProfilerCallback3 * This); HRESULT ( STDMETHODCALLTYPE *Initialize )( ICorProfilerCallback3 * This, /* [in] */ IUnknown *pICorProfilerInfoUnk); HRESULT ( STDMETHODCALLTYPE *Shutdown )( ICorProfilerCallback3 * This); HRESULT ( STDMETHODCALLTYPE *AppDomainCreationStarted )( ICorProfilerCallback3 * This, /* [in] */ AppDomainID appDomainId); HRESULT ( STDMETHODCALLTYPE *AppDomainCreationFinished )( ICorProfilerCallback3 * This, /* [in] */ AppDomainID appDomainId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *AppDomainShutdownStarted )( ICorProfilerCallback3 * This, /* [in] */ AppDomainID appDomainId); HRESULT ( STDMETHODCALLTYPE *AppDomainShutdownFinished )( ICorProfilerCallback3 * This, /* [in] */ AppDomainID appDomainId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *AssemblyLoadStarted )( ICorProfilerCallback3 * This, /* [in] */ AssemblyID assemblyId); HRESULT ( STDMETHODCALLTYPE *AssemblyLoadFinished )( ICorProfilerCallback3 * This, /* [in] */ AssemblyID assemblyId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *AssemblyUnloadStarted )( ICorProfilerCallback3 * This, /* [in] */ AssemblyID assemblyId); HRESULT ( STDMETHODCALLTYPE *AssemblyUnloadFinished )( ICorProfilerCallback3 * This, /* [in] */ AssemblyID assemblyId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *ModuleLoadStarted )( ICorProfilerCallback3 * This, /* [in] */ ModuleID moduleId); HRESULT ( STDMETHODCALLTYPE *ModuleLoadFinished )( ICorProfilerCallback3 * This, /* [in] */ ModuleID moduleId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *ModuleUnloadStarted )( ICorProfilerCallback3 * This, /* [in] */ ModuleID moduleId); HRESULT ( STDMETHODCALLTYPE *ModuleUnloadFinished )( ICorProfilerCallback3 * This, /* [in] */ ModuleID moduleId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *ModuleAttachedToAssembly )( ICorProfilerCallback3 * This, /* [in] */ ModuleID moduleId, /* [in] */ AssemblyID AssemblyId); HRESULT ( STDMETHODCALLTYPE *ClassLoadStarted )( ICorProfilerCallback3 * This, /* [in] */ ClassID classId); HRESULT ( STDMETHODCALLTYPE *ClassLoadFinished )( ICorProfilerCallback3 * This, /* [in] */ ClassID classId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *ClassUnloadStarted )( ICorProfilerCallback3 * This, /* [in] */ ClassID classId); HRESULT ( STDMETHODCALLTYPE *ClassUnloadFinished )( ICorProfilerCallback3 * This, /* [in] */ ClassID classId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *FunctionUnloadStarted )( ICorProfilerCallback3 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *JITCompilationStarted )( ICorProfilerCallback3 * This, /* [in] */ FunctionID functionId, /* [in] */ BOOL fIsSafeToBlock); HRESULT ( STDMETHODCALLTYPE *JITCompilationFinished )( ICorProfilerCallback3 * This, /* [in] */ FunctionID functionId, /* [in] */ HRESULT hrStatus, /* [in] */ BOOL fIsSafeToBlock); HRESULT ( STDMETHODCALLTYPE *JITCachedFunctionSearchStarted )( ICorProfilerCallback3 * This, /* [in] */ FunctionID functionId, /* [out] */ BOOL *pbUseCachedFunction); HRESULT ( STDMETHODCALLTYPE *JITCachedFunctionSearchFinished )( ICorProfilerCallback3 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_JIT_CACHE result); HRESULT ( STDMETHODCALLTYPE *JITFunctionPitched )( ICorProfilerCallback3 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *JITInlining )( ICorProfilerCallback3 * This, /* [in] */ FunctionID callerId, /* [in] */ FunctionID calleeId, /* [out] */ BOOL *pfShouldInline); HRESULT ( STDMETHODCALLTYPE *ThreadCreated )( ICorProfilerCallback3 * This, /* [in] */ ThreadID threadId); HRESULT ( STDMETHODCALLTYPE *ThreadDestroyed )( ICorProfilerCallback3 * This, /* [in] */ ThreadID threadId); HRESULT ( STDMETHODCALLTYPE *ThreadAssignedToOSThread )( ICorProfilerCallback3 * This, /* [in] */ ThreadID managedThreadId, /* [in] */ DWORD osThreadId); HRESULT ( STDMETHODCALLTYPE *RemotingClientInvocationStarted )( ICorProfilerCallback3 * This); HRESULT ( STDMETHODCALLTYPE *RemotingClientSendingMessage )( ICorProfilerCallback3 * This, /* [in] */ GUID *pCookie, /* [in] */ BOOL fIsAsync); HRESULT ( STDMETHODCALLTYPE *RemotingClientReceivingReply )( ICorProfilerCallback3 * This, /* [in] */ GUID *pCookie, /* [in] */ BOOL fIsAsync); HRESULT ( STDMETHODCALLTYPE *RemotingClientInvocationFinished )( ICorProfilerCallback3 * This); HRESULT ( STDMETHODCALLTYPE *RemotingServerReceivingMessage )( ICorProfilerCallback3 * This, /* [in] */ GUID *pCookie, /* [in] */ BOOL fIsAsync); HRESULT ( STDMETHODCALLTYPE *RemotingServerInvocationStarted )( ICorProfilerCallback3 * This); HRESULT ( STDMETHODCALLTYPE *RemotingServerInvocationReturned )( ICorProfilerCallback3 * This); HRESULT ( STDMETHODCALLTYPE *RemotingServerSendingReply )( ICorProfilerCallback3 * This, /* [in] */ GUID *pCookie, /* [in] */ BOOL fIsAsync); HRESULT ( STDMETHODCALLTYPE *UnmanagedToManagedTransition )( ICorProfilerCallback3 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_TRANSITION_REASON reason); HRESULT ( STDMETHODCALLTYPE *ManagedToUnmanagedTransition )( ICorProfilerCallback3 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_TRANSITION_REASON reason); HRESULT ( STDMETHODCALLTYPE *RuntimeSuspendStarted )( ICorProfilerCallback3 * This, /* [in] */ COR_PRF_SUSPEND_REASON suspendReason); HRESULT ( STDMETHODCALLTYPE *RuntimeSuspendFinished )( ICorProfilerCallback3 * This); HRESULT ( STDMETHODCALLTYPE *RuntimeSuspendAborted )( ICorProfilerCallback3 * This); HRESULT ( STDMETHODCALLTYPE *RuntimeResumeStarted )( ICorProfilerCallback3 * This); HRESULT ( STDMETHODCALLTYPE *RuntimeResumeFinished )( ICorProfilerCallback3 * This); HRESULT ( STDMETHODCALLTYPE *RuntimeThreadSuspended )( ICorProfilerCallback3 * This, /* [in] */ ThreadID threadId); HRESULT ( STDMETHODCALLTYPE *RuntimeThreadResumed )( ICorProfilerCallback3 * This, /* [in] */ ThreadID threadId); HRESULT ( STDMETHODCALLTYPE *MovedReferences )( ICorProfilerCallback3 * This, /* [in] */ ULONG cMovedObjectIDRanges, /* [size_is][in] */ ObjectID oldObjectIDRangeStart[ ], /* [size_is][in] */ ObjectID newObjectIDRangeStart[ ], /* [size_is][in] */ ULONG cObjectIDRangeLength[ ]); HRESULT ( STDMETHODCALLTYPE *ObjectAllocated )( ICorProfilerCallback3 * This, /* [in] */ ObjectID objectId, /* [in] */ ClassID classId); HRESULT ( STDMETHODCALLTYPE *ObjectsAllocatedByClass )( ICorProfilerCallback3 * This, /* [in] */ ULONG cClassCount, /* [size_is][in] */ ClassID classIds[ ], /* [size_is][in] */ ULONG cObjects[ ]); HRESULT ( STDMETHODCALLTYPE *ObjectReferences )( ICorProfilerCallback3 * This, /* [in] */ ObjectID objectId, /* [in] */ ClassID classId, /* [in] */ ULONG cObjectRefs, /* [size_is][in] */ ObjectID objectRefIds[ ]); HRESULT ( STDMETHODCALLTYPE *RootReferences )( ICorProfilerCallback3 * This, /* [in] */ ULONG cRootRefs, /* [size_is][in] */ ObjectID rootRefIds[ ]); HRESULT ( STDMETHODCALLTYPE *ExceptionThrown )( ICorProfilerCallback3 * This, /* [in] */ ObjectID thrownObjectId); HRESULT ( STDMETHODCALLTYPE *ExceptionSearchFunctionEnter )( ICorProfilerCallback3 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ExceptionSearchFunctionLeave )( ICorProfilerCallback3 * This); HRESULT ( STDMETHODCALLTYPE *ExceptionSearchFilterEnter )( ICorProfilerCallback3 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ExceptionSearchFilterLeave )( ICorProfilerCallback3 * This); HRESULT ( STDMETHODCALLTYPE *ExceptionSearchCatcherFound )( ICorProfilerCallback3 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ExceptionOSHandlerEnter )( ICorProfilerCallback3 * This, /* [in] */ UINT_PTR __unused); HRESULT ( STDMETHODCALLTYPE *ExceptionOSHandlerLeave )( ICorProfilerCallback3 * This, /* [in] */ UINT_PTR __unused); HRESULT ( STDMETHODCALLTYPE *ExceptionUnwindFunctionEnter )( ICorProfilerCallback3 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ExceptionUnwindFunctionLeave )( ICorProfilerCallback3 * This); HRESULT ( STDMETHODCALLTYPE *ExceptionUnwindFinallyEnter )( ICorProfilerCallback3 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ExceptionUnwindFinallyLeave )( ICorProfilerCallback3 * This); HRESULT ( STDMETHODCALLTYPE *ExceptionCatcherEnter )( ICorProfilerCallback3 * This, /* [in] */ FunctionID functionId, /* [in] */ ObjectID objectId); HRESULT ( STDMETHODCALLTYPE *ExceptionCatcherLeave )( ICorProfilerCallback3 * This); HRESULT ( STDMETHODCALLTYPE *COMClassicVTableCreated )( ICorProfilerCallback3 * This, /* [in] */ ClassID wrappedClassId, /* [in] */ REFGUID implementedIID, /* [in] */ void *pVTable, /* [in] */ ULONG cSlots); HRESULT ( STDMETHODCALLTYPE *COMClassicVTableDestroyed )( ICorProfilerCallback3 * This, /* [in] */ ClassID wrappedClassId, /* [in] */ REFGUID implementedIID, /* [in] */ void *pVTable); HRESULT ( STDMETHODCALLTYPE *ExceptionCLRCatcherFound )( ICorProfilerCallback3 * This); HRESULT ( STDMETHODCALLTYPE *ExceptionCLRCatcherExecute )( ICorProfilerCallback3 * This); HRESULT ( STDMETHODCALLTYPE *ThreadNameChanged )( ICorProfilerCallback3 * This, /* [in] */ ThreadID threadId, /* [in] */ ULONG cchName, /* [annotation][in] */ _In_reads_opt_(cchName) WCHAR name[ ]); HRESULT ( STDMETHODCALLTYPE *GarbageCollectionStarted )( ICorProfilerCallback3 * This, /* [in] */ int cGenerations, /* [size_is][in] */ BOOL generationCollected[ ], /* [in] */ COR_PRF_GC_REASON reason); HRESULT ( STDMETHODCALLTYPE *SurvivingReferences )( ICorProfilerCallback3 * This, /* [in] */ ULONG cSurvivingObjectIDRanges, /* [size_is][in] */ ObjectID objectIDRangeStart[ ], /* [size_is][in] */ ULONG cObjectIDRangeLength[ ]); HRESULT ( STDMETHODCALLTYPE *GarbageCollectionFinished )( ICorProfilerCallback3 * This); HRESULT ( STDMETHODCALLTYPE *FinalizeableObjectQueued )( ICorProfilerCallback3 * This, /* [in] */ DWORD finalizerFlags, /* [in] */ ObjectID objectID); HRESULT ( STDMETHODCALLTYPE *RootReferences2 )( ICorProfilerCallback3 * This, /* [in] */ ULONG cRootRefs, /* [size_is][in] */ ObjectID rootRefIds[ ], /* [size_is][in] */ COR_PRF_GC_ROOT_KIND rootKinds[ ], /* [size_is][in] */ COR_PRF_GC_ROOT_FLAGS rootFlags[ ], /* [size_is][in] */ UINT_PTR rootIds[ ]); HRESULT ( STDMETHODCALLTYPE *HandleCreated )( ICorProfilerCallback3 * This, /* [in] */ GCHandleID handleId, /* [in] */ ObjectID initialObjectId); HRESULT ( STDMETHODCALLTYPE *HandleDestroyed )( ICorProfilerCallback3 * This, /* [in] */ GCHandleID handleId); HRESULT ( STDMETHODCALLTYPE *InitializeForAttach )( ICorProfilerCallback3 * This, /* [in] */ IUnknown *pCorProfilerInfoUnk, /* [in] */ void *pvClientData, /* [in] */ UINT cbClientData); HRESULT ( STDMETHODCALLTYPE *ProfilerAttachComplete )( ICorProfilerCallback3 * This); HRESULT ( STDMETHODCALLTYPE *ProfilerDetachSucceeded )( ICorProfilerCallback3 * This); END_INTERFACE } ICorProfilerCallback3Vtbl; interface ICorProfilerCallback3 { CONST_VTBL struct ICorProfilerCallback3Vtbl *lpVtbl; }; #ifdef COBJMACROS #define ICorProfilerCallback3_QueryInterface(This,riid,ppvObject) \ ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) ) #define ICorProfilerCallback3_AddRef(This) \ ( (This)->lpVtbl -> AddRef(This) ) #define ICorProfilerCallback3_Release(This) \ ( (This)->lpVtbl -> Release(This) ) #define ICorProfilerCallback3_Initialize(This,pICorProfilerInfoUnk) \ ( (This)->lpVtbl -> Initialize(This,pICorProfilerInfoUnk) ) #define ICorProfilerCallback3_Shutdown(This) \ ( (This)->lpVtbl -> Shutdown(This) ) #define ICorProfilerCallback3_AppDomainCreationStarted(This,appDomainId) \ ( (This)->lpVtbl -> AppDomainCreationStarted(This,appDomainId) ) #define ICorProfilerCallback3_AppDomainCreationFinished(This,appDomainId,hrStatus) \ ( (This)->lpVtbl -> AppDomainCreationFinished(This,appDomainId,hrStatus) ) #define ICorProfilerCallback3_AppDomainShutdownStarted(This,appDomainId) \ ( (This)->lpVtbl -> AppDomainShutdownStarted(This,appDomainId) ) #define ICorProfilerCallback3_AppDomainShutdownFinished(This,appDomainId,hrStatus) \ ( (This)->lpVtbl -> AppDomainShutdownFinished(This,appDomainId,hrStatus) ) #define ICorProfilerCallback3_AssemblyLoadStarted(This,assemblyId) \ ( (This)->lpVtbl -> AssemblyLoadStarted(This,assemblyId) ) #define ICorProfilerCallback3_AssemblyLoadFinished(This,assemblyId,hrStatus) \ ( (This)->lpVtbl -> AssemblyLoadFinished(This,assemblyId,hrStatus) ) #define ICorProfilerCallback3_AssemblyUnloadStarted(This,assemblyId) \ ( (This)->lpVtbl -> AssemblyUnloadStarted(This,assemblyId) ) #define ICorProfilerCallback3_AssemblyUnloadFinished(This,assemblyId,hrStatus) \ ( (This)->lpVtbl -> AssemblyUnloadFinished(This,assemblyId,hrStatus) ) #define ICorProfilerCallback3_ModuleLoadStarted(This,moduleId) \ ( (This)->lpVtbl -> ModuleLoadStarted(This,moduleId) ) #define ICorProfilerCallback3_ModuleLoadFinished(This,moduleId,hrStatus) \ ( (This)->lpVtbl -> ModuleLoadFinished(This,moduleId,hrStatus) ) #define ICorProfilerCallback3_ModuleUnloadStarted(This,moduleId) \ ( (This)->lpVtbl -> ModuleUnloadStarted(This,moduleId) ) #define ICorProfilerCallback3_ModuleUnloadFinished(This,moduleId,hrStatus) \ ( (This)->lpVtbl -> ModuleUnloadFinished(This,moduleId,hrStatus) ) #define ICorProfilerCallback3_ModuleAttachedToAssembly(This,moduleId,AssemblyId) \ ( (This)->lpVtbl -> ModuleAttachedToAssembly(This,moduleId,AssemblyId) ) #define ICorProfilerCallback3_ClassLoadStarted(This,classId) \ ( (This)->lpVtbl -> ClassLoadStarted(This,classId) ) #define ICorProfilerCallback3_ClassLoadFinished(This,classId,hrStatus) \ ( (This)->lpVtbl -> ClassLoadFinished(This,classId,hrStatus) ) #define ICorProfilerCallback3_ClassUnloadStarted(This,classId) \ ( (This)->lpVtbl -> ClassUnloadStarted(This,classId) ) #define ICorProfilerCallback3_ClassUnloadFinished(This,classId,hrStatus) \ ( (This)->lpVtbl -> ClassUnloadFinished(This,classId,hrStatus) ) #define ICorProfilerCallback3_FunctionUnloadStarted(This,functionId) \ ( (This)->lpVtbl -> FunctionUnloadStarted(This,functionId) ) #define ICorProfilerCallback3_JITCompilationStarted(This,functionId,fIsSafeToBlock) \ ( (This)->lpVtbl -> JITCompilationStarted(This,functionId,fIsSafeToBlock) ) #define ICorProfilerCallback3_JITCompilationFinished(This,functionId,hrStatus,fIsSafeToBlock) \ ( (This)->lpVtbl -> JITCompilationFinished(This,functionId,hrStatus,fIsSafeToBlock) ) #define ICorProfilerCallback3_JITCachedFunctionSearchStarted(This,functionId,pbUseCachedFunction) \ ( (This)->lpVtbl -> JITCachedFunctionSearchStarted(This,functionId,pbUseCachedFunction) ) #define ICorProfilerCallback3_JITCachedFunctionSearchFinished(This,functionId,result) \ ( (This)->lpVtbl -> JITCachedFunctionSearchFinished(This,functionId,result) ) #define ICorProfilerCallback3_JITFunctionPitched(This,functionId) \ ( (This)->lpVtbl -> JITFunctionPitched(This,functionId) ) #define ICorProfilerCallback3_JITInlining(This,callerId,calleeId,pfShouldInline) \ ( (This)->lpVtbl -> JITInlining(This,callerId,calleeId,pfShouldInline) ) #define ICorProfilerCallback3_ThreadCreated(This,threadId) \ ( (This)->lpVtbl -> ThreadCreated(This,threadId) ) #define ICorProfilerCallback3_ThreadDestroyed(This,threadId) \ ( (This)->lpVtbl -> ThreadDestroyed(This,threadId) ) #define ICorProfilerCallback3_ThreadAssignedToOSThread(This,managedThreadId,osThreadId) \ ( (This)->lpVtbl -> ThreadAssignedToOSThread(This,managedThreadId,osThreadId) ) #define ICorProfilerCallback3_RemotingClientInvocationStarted(This) \ ( (This)->lpVtbl -> RemotingClientInvocationStarted(This) ) #define ICorProfilerCallback3_RemotingClientSendingMessage(This,pCookie,fIsAsync) \ ( (This)->lpVtbl -> RemotingClientSendingMessage(This,pCookie,fIsAsync) ) #define ICorProfilerCallback3_RemotingClientReceivingReply(This,pCookie,fIsAsync) \ ( (This)->lpVtbl -> RemotingClientReceivingReply(This,pCookie,fIsAsync) ) #define ICorProfilerCallback3_RemotingClientInvocationFinished(This) \ ( (This)->lpVtbl -> RemotingClientInvocationFinished(This) ) #define ICorProfilerCallback3_RemotingServerReceivingMessage(This,pCookie,fIsAsync) \ ( (This)->lpVtbl -> RemotingServerReceivingMessage(This,pCookie,fIsAsync) ) #define ICorProfilerCallback3_RemotingServerInvocationStarted(This) \ ( (This)->lpVtbl -> RemotingServerInvocationStarted(This) ) #define ICorProfilerCallback3_RemotingServerInvocationReturned(This) \ ( (This)->lpVtbl -> RemotingServerInvocationReturned(This) ) #define ICorProfilerCallback3_RemotingServerSendingReply(This,pCookie,fIsAsync) \ ( (This)->lpVtbl -> RemotingServerSendingReply(This,pCookie,fIsAsync) ) #define ICorProfilerCallback3_UnmanagedToManagedTransition(This,functionId,reason) \ ( (This)->lpVtbl -> UnmanagedToManagedTransition(This,functionId,reason) ) #define ICorProfilerCallback3_ManagedToUnmanagedTransition(This,functionId,reason) \ ( (This)->lpVtbl -> ManagedToUnmanagedTransition(This,functionId,reason) ) #define ICorProfilerCallback3_RuntimeSuspendStarted(This,suspendReason) \ ( (This)->lpVtbl -> RuntimeSuspendStarted(This,suspendReason) ) #define ICorProfilerCallback3_RuntimeSuspendFinished(This) \ ( (This)->lpVtbl -> RuntimeSuspendFinished(This) ) #define ICorProfilerCallback3_RuntimeSuspendAborted(This) \ ( (This)->lpVtbl -> RuntimeSuspendAborted(This) ) #define ICorProfilerCallback3_RuntimeResumeStarted(This) \ ( (This)->lpVtbl -> RuntimeResumeStarted(This) ) #define ICorProfilerCallback3_RuntimeResumeFinished(This) \ ( (This)->lpVtbl -> RuntimeResumeFinished(This) ) #define ICorProfilerCallback3_RuntimeThreadSuspended(This,threadId) \ ( (This)->lpVtbl -> RuntimeThreadSuspended(This,threadId) ) #define ICorProfilerCallback3_RuntimeThreadResumed(This,threadId) \ ( (This)->lpVtbl -> RuntimeThreadResumed(This,threadId) ) #define ICorProfilerCallback3_MovedReferences(This,cMovedObjectIDRanges,oldObjectIDRangeStart,newObjectIDRangeStart,cObjectIDRangeLength) \ ( (This)->lpVtbl -> MovedReferences(This,cMovedObjectIDRanges,oldObjectIDRangeStart,newObjectIDRangeStart,cObjectIDRangeLength) ) #define ICorProfilerCallback3_ObjectAllocated(This,objectId,classId) \ ( (This)->lpVtbl -> ObjectAllocated(This,objectId,classId) ) #define ICorProfilerCallback3_ObjectsAllocatedByClass(This,cClassCount,classIds,cObjects) \ ( (This)->lpVtbl -> ObjectsAllocatedByClass(This,cClassCount,classIds,cObjects) ) #define ICorProfilerCallback3_ObjectReferences(This,objectId,classId,cObjectRefs,objectRefIds) \ ( (This)->lpVtbl -> ObjectReferences(This,objectId,classId,cObjectRefs,objectRefIds) ) #define ICorProfilerCallback3_RootReferences(This,cRootRefs,rootRefIds) \ ( (This)->lpVtbl -> RootReferences(This,cRootRefs,rootRefIds) ) #define ICorProfilerCallback3_ExceptionThrown(This,thrownObjectId) \ ( (This)->lpVtbl -> ExceptionThrown(This,thrownObjectId) ) #define ICorProfilerCallback3_ExceptionSearchFunctionEnter(This,functionId) \ ( (This)->lpVtbl -> ExceptionSearchFunctionEnter(This,functionId) ) #define ICorProfilerCallback3_ExceptionSearchFunctionLeave(This) \ ( (This)->lpVtbl -> ExceptionSearchFunctionLeave(This) ) #define ICorProfilerCallback3_ExceptionSearchFilterEnter(This,functionId) \ ( (This)->lpVtbl -> ExceptionSearchFilterEnter(This,functionId) ) #define ICorProfilerCallback3_ExceptionSearchFilterLeave(This) \ ( (This)->lpVtbl -> ExceptionSearchFilterLeave(This) ) #define ICorProfilerCallback3_ExceptionSearchCatcherFound(This,functionId) \ ( (This)->lpVtbl -> ExceptionSearchCatcherFound(This,functionId) ) #define ICorProfilerCallback3_ExceptionOSHandlerEnter(This,__unused) \ ( (This)->lpVtbl -> ExceptionOSHandlerEnter(This,__unused) ) #define ICorProfilerCallback3_ExceptionOSHandlerLeave(This,__unused) \ ( (This)->lpVtbl -> ExceptionOSHandlerLeave(This,__unused) ) #define ICorProfilerCallback3_ExceptionUnwindFunctionEnter(This,functionId) \ ( (This)->lpVtbl -> ExceptionUnwindFunctionEnter(This,functionId) ) #define ICorProfilerCallback3_ExceptionUnwindFunctionLeave(This) \ ( (This)->lpVtbl -> ExceptionUnwindFunctionLeave(This) ) #define ICorProfilerCallback3_ExceptionUnwindFinallyEnter(This,functionId) \ ( (This)->lpVtbl -> ExceptionUnwindFinallyEnter(This,functionId) ) #define ICorProfilerCallback3_ExceptionUnwindFinallyLeave(This) \ ( (This)->lpVtbl -> ExceptionUnwindFinallyLeave(This) ) #define ICorProfilerCallback3_ExceptionCatcherEnter(This,functionId,objectId) \ ( (This)->lpVtbl -> ExceptionCatcherEnter(This,functionId,objectId) ) #define ICorProfilerCallback3_ExceptionCatcherLeave(This) \ ( (This)->lpVtbl -> ExceptionCatcherLeave(This) ) #define ICorProfilerCallback3_COMClassicVTableCreated(This,wrappedClassId,implementedIID,pVTable,cSlots) \ ( (This)->lpVtbl -> COMClassicVTableCreated(This,wrappedClassId,implementedIID,pVTable,cSlots) ) #define ICorProfilerCallback3_COMClassicVTableDestroyed(This,wrappedClassId,implementedIID,pVTable) \ ( (This)->lpVtbl -> COMClassicVTableDestroyed(This,wrappedClassId,implementedIID,pVTable) ) #define ICorProfilerCallback3_ExceptionCLRCatcherFound(This) \ ( (This)->lpVtbl -> ExceptionCLRCatcherFound(This) ) #define ICorProfilerCallback3_ExceptionCLRCatcherExecute(This) \ ( (This)->lpVtbl -> ExceptionCLRCatcherExecute(This) ) #define ICorProfilerCallback3_ThreadNameChanged(This,threadId,cchName,name) \ ( (This)->lpVtbl -> ThreadNameChanged(This,threadId,cchName,name) ) #define ICorProfilerCallback3_GarbageCollectionStarted(This,cGenerations,generationCollected,reason) \ ( (This)->lpVtbl -> GarbageCollectionStarted(This,cGenerations,generationCollected,reason) ) #define ICorProfilerCallback3_SurvivingReferences(This,cSurvivingObjectIDRanges,objectIDRangeStart,cObjectIDRangeLength) \ ( (This)->lpVtbl -> SurvivingReferences(This,cSurvivingObjectIDRanges,objectIDRangeStart,cObjectIDRangeLength) ) #define ICorProfilerCallback3_GarbageCollectionFinished(This) \ ( (This)->lpVtbl -> GarbageCollectionFinished(This) ) #define ICorProfilerCallback3_FinalizeableObjectQueued(This,finalizerFlags,objectID) \ ( (This)->lpVtbl -> FinalizeableObjectQueued(This,finalizerFlags,objectID) ) #define ICorProfilerCallback3_RootReferences2(This,cRootRefs,rootRefIds,rootKinds,rootFlags,rootIds) \ ( (This)->lpVtbl -> RootReferences2(This,cRootRefs,rootRefIds,rootKinds,rootFlags,rootIds) ) #define ICorProfilerCallback3_HandleCreated(This,handleId,initialObjectId) \ ( (This)->lpVtbl -> HandleCreated(This,handleId,initialObjectId) ) #define ICorProfilerCallback3_HandleDestroyed(This,handleId) \ ( (This)->lpVtbl -> HandleDestroyed(This,handleId) ) #define ICorProfilerCallback3_InitializeForAttach(This,pCorProfilerInfoUnk,pvClientData,cbClientData) \ ( (This)->lpVtbl -> InitializeForAttach(This,pCorProfilerInfoUnk,pvClientData,cbClientData) ) #define ICorProfilerCallback3_ProfilerAttachComplete(This) \ ( (This)->lpVtbl -> ProfilerAttachComplete(This) ) #define ICorProfilerCallback3_ProfilerDetachSucceeded(This) \ ( (This)->lpVtbl -> ProfilerDetachSucceeded(This) ) #endif /* COBJMACROS */ #endif /* C style interface */ #endif /* __ICorProfilerCallback3_INTERFACE_DEFINED__ */ #ifndef __ICorProfilerCallback4_INTERFACE_DEFINED__ #define __ICorProfilerCallback4_INTERFACE_DEFINED__ /* interface ICorProfilerCallback4 */ /* [local][unique][uuid][object] */ EXTERN_C const IID IID_ICorProfilerCallback4; #if defined(__cplusplus) && !defined(CINTERFACE) MIDL_INTERFACE("7B63B2E3-107D-4d48-B2F6-F61E229470D2") ICorProfilerCallback4 : public ICorProfilerCallback3 { public: virtual HRESULT STDMETHODCALLTYPE ReJITCompilationStarted( /* [in] */ FunctionID functionId, /* [in] */ ReJITID rejitId, /* [in] */ BOOL fIsSafeToBlock) = 0; virtual HRESULT STDMETHODCALLTYPE GetReJITParameters( /* [in] */ ModuleID moduleId, /* [in] */ mdMethodDef methodId, /* [in] */ ICorProfilerFunctionControl *pFunctionControl) = 0; virtual HRESULT STDMETHODCALLTYPE ReJITCompilationFinished( /* [in] */ FunctionID functionId, /* [in] */ ReJITID rejitId, /* [in] */ HRESULT hrStatus, /* [in] */ BOOL fIsSafeToBlock) = 0; virtual HRESULT STDMETHODCALLTYPE ReJITError( /* [in] */ ModuleID moduleId, /* [in] */ mdMethodDef methodId, /* [in] */ FunctionID functionId, /* [in] */ HRESULT hrStatus) = 0; virtual HRESULT STDMETHODCALLTYPE MovedReferences2( /* [in] */ ULONG cMovedObjectIDRanges, /* [size_is][in] */ ObjectID oldObjectIDRangeStart[ ], /* [size_is][in] */ ObjectID newObjectIDRangeStart[ ], /* [size_is][in] */ SIZE_T cObjectIDRangeLength[ ]) = 0; virtual HRESULT STDMETHODCALLTYPE SurvivingReferences2( /* [in] */ ULONG cSurvivingObjectIDRanges, /* [size_is][in] */ ObjectID objectIDRangeStart[ ], /* [size_is][in] */ SIZE_T cObjectIDRangeLength[ ]) = 0; }; #else /* C style interface */ typedef struct ICorProfilerCallback4Vtbl { BEGIN_INTERFACE HRESULT ( STDMETHODCALLTYPE *QueryInterface )( ICorProfilerCallback4 * This, /* [in] */ REFIID riid, /* [annotation][iid_is][out] */ _COM_Outptr_ void **ppvObject); ULONG ( STDMETHODCALLTYPE *AddRef )( ICorProfilerCallback4 * This); ULONG ( STDMETHODCALLTYPE *Release )( ICorProfilerCallback4 * This); HRESULT ( STDMETHODCALLTYPE *Initialize )( ICorProfilerCallback4 * This, /* [in] */ IUnknown *pICorProfilerInfoUnk); HRESULT ( STDMETHODCALLTYPE *Shutdown )( ICorProfilerCallback4 * This); HRESULT ( STDMETHODCALLTYPE *AppDomainCreationStarted )( ICorProfilerCallback4 * This, /* [in] */ AppDomainID appDomainId); HRESULT ( STDMETHODCALLTYPE *AppDomainCreationFinished )( ICorProfilerCallback4 * This, /* [in] */ AppDomainID appDomainId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *AppDomainShutdownStarted )( ICorProfilerCallback4 * This, /* [in] */ AppDomainID appDomainId); HRESULT ( STDMETHODCALLTYPE *AppDomainShutdownFinished )( ICorProfilerCallback4 * This, /* [in] */ AppDomainID appDomainId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *AssemblyLoadStarted )( ICorProfilerCallback4 * This, /* [in] */ AssemblyID assemblyId); HRESULT ( STDMETHODCALLTYPE *AssemblyLoadFinished )( ICorProfilerCallback4 * This, /* [in] */ AssemblyID assemblyId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *AssemblyUnloadStarted )( ICorProfilerCallback4 * This, /* [in] */ AssemblyID assemblyId); HRESULT ( STDMETHODCALLTYPE *AssemblyUnloadFinished )( ICorProfilerCallback4 * This, /* [in] */ AssemblyID assemblyId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *ModuleLoadStarted )( ICorProfilerCallback4 * This, /* [in] */ ModuleID moduleId); HRESULT ( STDMETHODCALLTYPE *ModuleLoadFinished )( ICorProfilerCallback4 * This, /* [in] */ ModuleID moduleId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *ModuleUnloadStarted )( ICorProfilerCallback4 * This, /* [in] */ ModuleID moduleId); HRESULT ( STDMETHODCALLTYPE *ModuleUnloadFinished )( ICorProfilerCallback4 * This, /* [in] */ ModuleID moduleId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *ModuleAttachedToAssembly )( ICorProfilerCallback4 * This, /* [in] */ ModuleID moduleId, /* [in] */ AssemblyID AssemblyId); HRESULT ( STDMETHODCALLTYPE *ClassLoadStarted )( ICorProfilerCallback4 * This, /* [in] */ ClassID classId); HRESULT ( STDMETHODCALLTYPE *ClassLoadFinished )( ICorProfilerCallback4 * This, /* [in] */ ClassID classId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *ClassUnloadStarted )( ICorProfilerCallback4 * This, /* [in] */ ClassID classId); HRESULT ( STDMETHODCALLTYPE *ClassUnloadFinished )( ICorProfilerCallback4 * This, /* [in] */ ClassID classId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *FunctionUnloadStarted )( ICorProfilerCallback4 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *JITCompilationStarted )( ICorProfilerCallback4 * This, /* [in] */ FunctionID functionId, /* [in] */ BOOL fIsSafeToBlock); HRESULT ( STDMETHODCALLTYPE *JITCompilationFinished )( ICorProfilerCallback4 * This, /* [in] */ FunctionID functionId, /* [in] */ HRESULT hrStatus, /* [in] */ BOOL fIsSafeToBlock); HRESULT ( STDMETHODCALLTYPE *JITCachedFunctionSearchStarted )( ICorProfilerCallback4 * This, /* [in] */ FunctionID functionId, /* [out] */ BOOL *pbUseCachedFunction); HRESULT ( STDMETHODCALLTYPE *JITCachedFunctionSearchFinished )( ICorProfilerCallback4 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_JIT_CACHE result); HRESULT ( STDMETHODCALLTYPE *JITFunctionPitched )( ICorProfilerCallback4 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *JITInlining )( ICorProfilerCallback4 * This, /* [in] */ FunctionID callerId, /* [in] */ FunctionID calleeId, /* [out] */ BOOL *pfShouldInline); HRESULT ( STDMETHODCALLTYPE *ThreadCreated )( ICorProfilerCallback4 * This, /* [in] */ ThreadID threadId); HRESULT ( STDMETHODCALLTYPE *ThreadDestroyed )( ICorProfilerCallback4 * This, /* [in] */ ThreadID threadId); HRESULT ( STDMETHODCALLTYPE *ThreadAssignedToOSThread )( ICorProfilerCallback4 * This, /* [in] */ ThreadID managedThreadId, /* [in] */ DWORD osThreadId); HRESULT ( STDMETHODCALLTYPE *RemotingClientInvocationStarted )( ICorProfilerCallback4 * This); HRESULT ( STDMETHODCALLTYPE *RemotingClientSendingMessage )( ICorProfilerCallback4 * This, /* [in] */ GUID *pCookie, /* [in] */ BOOL fIsAsync); HRESULT ( STDMETHODCALLTYPE *RemotingClientReceivingReply )( ICorProfilerCallback4 * This, /* [in] */ GUID *pCookie, /* [in] */ BOOL fIsAsync); HRESULT ( STDMETHODCALLTYPE *RemotingClientInvocationFinished )( ICorProfilerCallback4 * This); HRESULT ( STDMETHODCALLTYPE *RemotingServerReceivingMessage )( ICorProfilerCallback4 * This, /* [in] */ GUID *pCookie, /* [in] */ BOOL fIsAsync); HRESULT ( STDMETHODCALLTYPE *RemotingServerInvocationStarted )( ICorProfilerCallback4 * This); HRESULT ( STDMETHODCALLTYPE *RemotingServerInvocationReturned )( ICorProfilerCallback4 * This); HRESULT ( STDMETHODCALLTYPE *RemotingServerSendingReply )( ICorProfilerCallback4 * This, /* [in] */ GUID *pCookie, /* [in] */ BOOL fIsAsync); HRESULT ( STDMETHODCALLTYPE *UnmanagedToManagedTransition )( ICorProfilerCallback4 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_TRANSITION_REASON reason); HRESULT ( STDMETHODCALLTYPE *ManagedToUnmanagedTransition )( ICorProfilerCallback4 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_TRANSITION_REASON reason); HRESULT ( STDMETHODCALLTYPE *RuntimeSuspendStarted )( ICorProfilerCallback4 * This, /* [in] */ COR_PRF_SUSPEND_REASON suspendReason); HRESULT ( STDMETHODCALLTYPE *RuntimeSuspendFinished )( ICorProfilerCallback4 * This); HRESULT ( STDMETHODCALLTYPE *RuntimeSuspendAborted )( ICorProfilerCallback4 * This); HRESULT ( STDMETHODCALLTYPE *RuntimeResumeStarted )( ICorProfilerCallback4 * This); HRESULT ( STDMETHODCALLTYPE *RuntimeResumeFinished )( ICorProfilerCallback4 * This); HRESULT ( STDMETHODCALLTYPE *RuntimeThreadSuspended )( ICorProfilerCallback4 * This, /* [in] */ ThreadID threadId); HRESULT ( STDMETHODCALLTYPE *RuntimeThreadResumed )( ICorProfilerCallback4 * This, /* [in] */ ThreadID threadId); HRESULT ( STDMETHODCALLTYPE *MovedReferences )( ICorProfilerCallback4 * This, /* [in] */ ULONG cMovedObjectIDRanges, /* [size_is][in] */ ObjectID oldObjectIDRangeStart[ ], /* [size_is][in] */ ObjectID newObjectIDRangeStart[ ], /* [size_is][in] */ ULONG cObjectIDRangeLength[ ]); HRESULT ( STDMETHODCALLTYPE *ObjectAllocated )( ICorProfilerCallback4 * This, /* [in] */ ObjectID objectId, /* [in] */ ClassID classId); HRESULT ( STDMETHODCALLTYPE *ObjectsAllocatedByClass )( ICorProfilerCallback4 * This, /* [in] */ ULONG cClassCount, /* [size_is][in] */ ClassID classIds[ ], /* [size_is][in] */ ULONG cObjects[ ]); HRESULT ( STDMETHODCALLTYPE *ObjectReferences )( ICorProfilerCallback4 * This, /* [in] */ ObjectID objectId, /* [in] */ ClassID classId, /* [in] */ ULONG cObjectRefs, /* [size_is][in] */ ObjectID objectRefIds[ ]); HRESULT ( STDMETHODCALLTYPE *RootReferences )( ICorProfilerCallback4 * This, /* [in] */ ULONG cRootRefs, /* [size_is][in] */ ObjectID rootRefIds[ ]); HRESULT ( STDMETHODCALLTYPE *ExceptionThrown )( ICorProfilerCallback4 * This, /* [in] */ ObjectID thrownObjectId); HRESULT ( STDMETHODCALLTYPE *ExceptionSearchFunctionEnter )( ICorProfilerCallback4 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ExceptionSearchFunctionLeave )( ICorProfilerCallback4 * This); HRESULT ( STDMETHODCALLTYPE *ExceptionSearchFilterEnter )( ICorProfilerCallback4 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ExceptionSearchFilterLeave )( ICorProfilerCallback4 * This); HRESULT ( STDMETHODCALLTYPE *ExceptionSearchCatcherFound )( ICorProfilerCallback4 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ExceptionOSHandlerEnter )( ICorProfilerCallback4 * This, /* [in] */ UINT_PTR __unused); HRESULT ( STDMETHODCALLTYPE *ExceptionOSHandlerLeave )( ICorProfilerCallback4 * This, /* [in] */ UINT_PTR __unused); HRESULT ( STDMETHODCALLTYPE *ExceptionUnwindFunctionEnter )( ICorProfilerCallback4 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ExceptionUnwindFunctionLeave )( ICorProfilerCallback4 * This); HRESULT ( STDMETHODCALLTYPE *ExceptionUnwindFinallyEnter )( ICorProfilerCallback4 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ExceptionUnwindFinallyLeave )( ICorProfilerCallback4 * This); HRESULT ( STDMETHODCALLTYPE *ExceptionCatcherEnter )( ICorProfilerCallback4 * This, /* [in] */ FunctionID functionId, /* [in] */ ObjectID objectId); HRESULT ( STDMETHODCALLTYPE *ExceptionCatcherLeave )( ICorProfilerCallback4 * This); HRESULT ( STDMETHODCALLTYPE *COMClassicVTableCreated )( ICorProfilerCallback4 * This, /* [in] */ ClassID wrappedClassId, /* [in] */ REFGUID implementedIID, /* [in] */ void *pVTable, /* [in] */ ULONG cSlots); HRESULT ( STDMETHODCALLTYPE *COMClassicVTableDestroyed )( ICorProfilerCallback4 * This, /* [in] */ ClassID wrappedClassId, /* [in] */ REFGUID implementedIID, /* [in] */ void *pVTable); HRESULT ( STDMETHODCALLTYPE *ExceptionCLRCatcherFound )( ICorProfilerCallback4 * This); HRESULT ( STDMETHODCALLTYPE *ExceptionCLRCatcherExecute )( ICorProfilerCallback4 * This); HRESULT ( STDMETHODCALLTYPE *ThreadNameChanged )( ICorProfilerCallback4 * This, /* [in] */ ThreadID threadId, /* [in] */ ULONG cchName, /* [annotation][in] */ _In_reads_opt_(cchName) WCHAR name[ ]); HRESULT ( STDMETHODCALLTYPE *GarbageCollectionStarted )( ICorProfilerCallback4 * This, /* [in] */ int cGenerations, /* [size_is][in] */ BOOL generationCollected[ ], /* [in] */ COR_PRF_GC_REASON reason); HRESULT ( STDMETHODCALLTYPE *SurvivingReferences )( ICorProfilerCallback4 * This, /* [in] */ ULONG cSurvivingObjectIDRanges, /* [size_is][in] */ ObjectID objectIDRangeStart[ ], /* [size_is][in] */ ULONG cObjectIDRangeLength[ ]); HRESULT ( STDMETHODCALLTYPE *GarbageCollectionFinished )( ICorProfilerCallback4 * This); HRESULT ( STDMETHODCALLTYPE *FinalizeableObjectQueued )( ICorProfilerCallback4 * This, /* [in] */ DWORD finalizerFlags, /* [in] */ ObjectID objectID); HRESULT ( STDMETHODCALLTYPE *RootReferences2 )( ICorProfilerCallback4 * This, /* [in] */ ULONG cRootRefs, /* [size_is][in] */ ObjectID rootRefIds[ ], /* [size_is][in] */ COR_PRF_GC_ROOT_KIND rootKinds[ ], /* [size_is][in] */ COR_PRF_GC_ROOT_FLAGS rootFlags[ ], /* [size_is][in] */ UINT_PTR rootIds[ ]); HRESULT ( STDMETHODCALLTYPE *HandleCreated )( ICorProfilerCallback4 * This, /* [in] */ GCHandleID handleId, /* [in] */ ObjectID initialObjectId); HRESULT ( STDMETHODCALLTYPE *HandleDestroyed )( ICorProfilerCallback4 * This, /* [in] */ GCHandleID handleId); HRESULT ( STDMETHODCALLTYPE *InitializeForAttach )( ICorProfilerCallback4 * This, /* [in] */ IUnknown *pCorProfilerInfoUnk, /* [in] */ void *pvClientData, /* [in] */ UINT cbClientData); HRESULT ( STDMETHODCALLTYPE *ProfilerAttachComplete )( ICorProfilerCallback4 * This); HRESULT ( STDMETHODCALLTYPE *ProfilerDetachSucceeded )( ICorProfilerCallback4 * This); HRESULT ( STDMETHODCALLTYPE *ReJITCompilationStarted )( ICorProfilerCallback4 * This, /* [in] */ FunctionID functionId, /* [in] */ ReJITID rejitId, /* [in] */ BOOL fIsSafeToBlock); HRESULT ( STDMETHODCALLTYPE *GetReJITParameters )( ICorProfilerCallback4 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdMethodDef methodId, /* [in] */ ICorProfilerFunctionControl *pFunctionControl); HRESULT ( STDMETHODCALLTYPE *ReJITCompilationFinished )( ICorProfilerCallback4 * This, /* [in] */ FunctionID functionId, /* [in] */ ReJITID rejitId, /* [in] */ HRESULT hrStatus, /* [in] */ BOOL fIsSafeToBlock); HRESULT ( STDMETHODCALLTYPE *ReJITError )( ICorProfilerCallback4 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdMethodDef methodId, /* [in] */ FunctionID functionId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *MovedReferences2 )( ICorProfilerCallback4 * This, /* [in] */ ULONG cMovedObjectIDRanges, /* [size_is][in] */ ObjectID oldObjectIDRangeStart[ ], /* [size_is][in] */ ObjectID newObjectIDRangeStart[ ], /* [size_is][in] */ SIZE_T cObjectIDRangeLength[ ]); HRESULT ( STDMETHODCALLTYPE *SurvivingReferences2 )( ICorProfilerCallback4 * This, /* [in] */ ULONG cSurvivingObjectIDRanges, /* [size_is][in] */ ObjectID objectIDRangeStart[ ], /* [size_is][in] */ SIZE_T cObjectIDRangeLength[ ]); END_INTERFACE } ICorProfilerCallback4Vtbl; interface ICorProfilerCallback4 { CONST_VTBL struct ICorProfilerCallback4Vtbl *lpVtbl; }; #ifdef COBJMACROS #define ICorProfilerCallback4_QueryInterface(This,riid,ppvObject) \ ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) ) #define ICorProfilerCallback4_AddRef(This) \ ( (This)->lpVtbl -> AddRef(This) ) #define ICorProfilerCallback4_Release(This) \ ( (This)->lpVtbl -> Release(This) ) #define ICorProfilerCallback4_Initialize(This,pICorProfilerInfoUnk) \ ( (This)->lpVtbl -> Initialize(This,pICorProfilerInfoUnk) ) #define ICorProfilerCallback4_Shutdown(This) \ ( (This)->lpVtbl -> Shutdown(This) ) #define ICorProfilerCallback4_AppDomainCreationStarted(This,appDomainId) \ ( (This)->lpVtbl -> AppDomainCreationStarted(This,appDomainId) ) #define ICorProfilerCallback4_AppDomainCreationFinished(This,appDomainId,hrStatus) \ ( (This)->lpVtbl -> AppDomainCreationFinished(This,appDomainId,hrStatus) ) #define ICorProfilerCallback4_AppDomainShutdownStarted(This,appDomainId) \ ( (This)->lpVtbl -> AppDomainShutdownStarted(This,appDomainId) ) #define ICorProfilerCallback4_AppDomainShutdownFinished(This,appDomainId,hrStatus) \ ( (This)->lpVtbl -> AppDomainShutdownFinished(This,appDomainId,hrStatus) ) #define ICorProfilerCallback4_AssemblyLoadStarted(This,assemblyId) \ ( (This)->lpVtbl -> AssemblyLoadStarted(This,assemblyId) ) #define ICorProfilerCallback4_AssemblyLoadFinished(This,assemblyId,hrStatus) \ ( (This)->lpVtbl -> AssemblyLoadFinished(This,assemblyId,hrStatus) ) #define ICorProfilerCallback4_AssemblyUnloadStarted(This,assemblyId) \ ( (This)->lpVtbl -> AssemblyUnloadStarted(This,assemblyId) ) #define ICorProfilerCallback4_AssemblyUnloadFinished(This,assemblyId,hrStatus) \ ( (This)->lpVtbl -> AssemblyUnloadFinished(This,assemblyId,hrStatus) ) #define ICorProfilerCallback4_ModuleLoadStarted(This,moduleId) \ ( (This)->lpVtbl -> ModuleLoadStarted(This,moduleId) ) #define ICorProfilerCallback4_ModuleLoadFinished(This,moduleId,hrStatus) \ ( (This)->lpVtbl -> ModuleLoadFinished(This,moduleId,hrStatus) ) #define ICorProfilerCallback4_ModuleUnloadStarted(This,moduleId) \ ( (This)->lpVtbl -> ModuleUnloadStarted(This,moduleId) ) #define ICorProfilerCallback4_ModuleUnloadFinished(This,moduleId,hrStatus) \ ( (This)->lpVtbl -> ModuleUnloadFinished(This,moduleId,hrStatus) ) #define ICorProfilerCallback4_ModuleAttachedToAssembly(This,moduleId,AssemblyId) \ ( (This)->lpVtbl -> ModuleAttachedToAssembly(This,moduleId,AssemblyId) ) #define ICorProfilerCallback4_ClassLoadStarted(This,classId) \ ( (This)->lpVtbl -> ClassLoadStarted(This,classId) ) #define ICorProfilerCallback4_ClassLoadFinished(This,classId,hrStatus) \ ( (This)->lpVtbl -> ClassLoadFinished(This,classId,hrStatus) ) #define ICorProfilerCallback4_ClassUnloadStarted(This,classId) \ ( (This)->lpVtbl -> ClassUnloadStarted(This,classId) ) #define ICorProfilerCallback4_ClassUnloadFinished(This,classId,hrStatus) \ ( (This)->lpVtbl -> ClassUnloadFinished(This,classId,hrStatus) ) #define ICorProfilerCallback4_FunctionUnloadStarted(This,functionId) \ ( (This)->lpVtbl -> FunctionUnloadStarted(This,functionId) ) #define ICorProfilerCallback4_JITCompilationStarted(This,functionId,fIsSafeToBlock) \ ( (This)->lpVtbl -> JITCompilationStarted(This,functionId,fIsSafeToBlock) ) #define ICorProfilerCallback4_JITCompilationFinished(This,functionId,hrStatus,fIsSafeToBlock) \ ( (This)->lpVtbl -> JITCompilationFinished(This,functionId,hrStatus,fIsSafeToBlock) ) #define ICorProfilerCallback4_JITCachedFunctionSearchStarted(This,functionId,pbUseCachedFunction) \ ( (This)->lpVtbl -> JITCachedFunctionSearchStarted(This,functionId,pbUseCachedFunction) ) #define ICorProfilerCallback4_JITCachedFunctionSearchFinished(This,functionId,result) \ ( (This)->lpVtbl -> JITCachedFunctionSearchFinished(This,functionId,result) ) #define ICorProfilerCallback4_JITFunctionPitched(This,functionId) \ ( (This)->lpVtbl -> JITFunctionPitched(This,functionId) ) #define ICorProfilerCallback4_JITInlining(This,callerId,calleeId,pfShouldInline) \ ( (This)->lpVtbl -> JITInlining(This,callerId,calleeId,pfShouldInline) ) #define ICorProfilerCallback4_ThreadCreated(This,threadId) \ ( (This)->lpVtbl -> ThreadCreated(This,threadId) ) #define ICorProfilerCallback4_ThreadDestroyed(This,threadId) \ ( (This)->lpVtbl -> ThreadDestroyed(This,threadId) ) #define ICorProfilerCallback4_ThreadAssignedToOSThread(This,managedThreadId,osThreadId) \ ( (This)->lpVtbl -> ThreadAssignedToOSThread(This,managedThreadId,osThreadId) ) #define ICorProfilerCallback4_RemotingClientInvocationStarted(This) \ ( (This)->lpVtbl -> RemotingClientInvocationStarted(This) ) #define ICorProfilerCallback4_RemotingClientSendingMessage(This,pCookie,fIsAsync) \ ( (This)->lpVtbl -> RemotingClientSendingMessage(This,pCookie,fIsAsync) ) #define ICorProfilerCallback4_RemotingClientReceivingReply(This,pCookie,fIsAsync) \ ( (This)->lpVtbl -> RemotingClientReceivingReply(This,pCookie,fIsAsync) ) #define ICorProfilerCallback4_RemotingClientInvocationFinished(This) \ ( (This)->lpVtbl -> RemotingClientInvocationFinished(This) ) #define ICorProfilerCallback4_RemotingServerReceivingMessage(This,pCookie,fIsAsync) \ ( (This)->lpVtbl -> RemotingServerReceivingMessage(This,pCookie,fIsAsync) ) #define ICorProfilerCallback4_RemotingServerInvocationStarted(This) \ ( (This)->lpVtbl -> RemotingServerInvocationStarted(This) ) #define ICorProfilerCallback4_RemotingServerInvocationReturned(This) \ ( (This)->lpVtbl -> RemotingServerInvocationReturned(This) ) #define ICorProfilerCallback4_RemotingServerSendingReply(This,pCookie,fIsAsync) \ ( (This)->lpVtbl -> RemotingServerSendingReply(This,pCookie,fIsAsync) ) #define ICorProfilerCallback4_UnmanagedToManagedTransition(This,functionId,reason) \ ( (This)->lpVtbl -> UnmanagedToManagedTransition(This,functionId,reason) ) #define ICorProfilerCallback4_ManagedToUnmanagedTransition(This,functionId,reason) \ ( (This)->lpVtbl -> ManagedToUnmanagedTransition(This,functionId,reason) ) #define ICorProfilerCallback4_RuntimeSuspendStarted(This,suspendReason) \ ( (This)->lpVtbl -> RuntimeSuspendStarted(This,suspendReason) ) #define ICorProfilerCallback4_RuntimeSuspendFinished(This) \ ( (This)->lpVtbl -> RuntimeSuspendFinished(This) ) #define ICorProfilerCallback4_RuntimeSuspendAborted(This) \ ( (This)->lpVtbl -> RuntimeSuspendAborted(This) ) #define ICorProfilerCallback4_RuntimeResumeStarted(This) \ ( (This)->lpVtbl -> RuntimeResumeStarted(This) ) #define ICorProfilerCallback4_RuntimeResumeFinished(This) \ ( (This)->lpVtbl -> RuntimeResumeFinished(This) ) #define ICorProfilerCallback4_RuntimeThreadSuspended(This,threadId) \ ( (This)->lpVtbl -> RuntimeThreadSuspended(This,threadId) ) #define ICorProfilerCallback4_RuntimeThreadResumed(This,threadId) \ ( (This)->lpVtbl -> RuntimeThreadResumed(This,threadId) ) #define ICorProfilerCallback4_MovedReferences(This,cMovedObjectIDRanges,oldObjectIDRangeStart,newObjectIDRangeStart,cObjectIDRangeLength) \ ( (This)->lpVtbl -> MovedReferences(This,cMovedObjectIDRanges,oldObjectIDRangeStart,newObjectIDRangeStart,cObjectIDRangeLength) ) #define ICorProfilerCallback4_ObjectAllocated(This,objectId,classId) \ ( (This)->lpVtbl -> ObjectAllocated(This,objectId,classId) ) #define ICorProfilerCallback4_ObjectsAllocatedByClass(This,cClassCount,classIds,cObjects) \ ( (This)->lpVtbl -> ObjectsAllocatedByClass(This,cClassCount,classIds,cObjects) ) #define ICorProfilerCallback4_ObjectReferences(This,objectId,classId,cObjectRefs,objectRefIds) \ ( (This)->lpVtbl -> ObjectReferences(This,objectId,classId,cObjectRefs,objectRefIds) ) #define ICorProfilerCallback4_RootReferences(This,cRootRefs,rootRefIds) \ ( (This)->lpVtbl -> RootReferences(This,cRootRefs,rootRefIds) ) #define ICorProfilerCallback4_ExceptionThrown(This,thrownObjectId) \ ( (This)->lpVtbl -> ExceptionThrown(This,thrownObjectId) ) #define ICorProfilerCallback4_ExceptionSearchFunctionEnter(This,functionId) \ ( (This)->lpVtbl -> ExceptionSearchFunctionEnter(This,functionId) ) #define ICorProfilerCallback4_ExceptionSearchFunctionLeave(This) \ ( (This)->lpVtbl -> ExceptionSearchFunctionLeave(This) ) #define ICorProfilerCallback4_ExceptionSearchFilterEnter(This,functionId) \ ( (This)->lpVtbl -> ExceptionSearchFilterEnter(This,functionId) ) #define ICorProfilerCallback4_ExceptionSearchFilterLeave(This) \ ( (This)->lpVtbl -> ExceptionSearchFilterLeave(This) ) #define ICorProfilerCallback4_ExceptionSearchCatcherFound(This,functionId) \ ( (This)->lpVtbl -> ExceptionSearchCatcherFound(This,functionId) ) #define ICorProfilerCallback4_ExceptionOSHandlerEnter(This,__unused) \ ( (This)->lpVtbl -> ExceptionOSHandlerEnter(This,__unused) ) #define ICorProfilerCallback4_ExceptionOSHandlerLeave(This,__unused) \ ( (This)->lpVtbl -> ExceptionOSHandlerLeave(This,__unused) ) #define ICorProfilerCallback4_ExceptionUnwindFunctionEnter(This,functionId) \ ( (This)->lpVtbl -> ExceptionUnwindFunctionEnter(This,functionId) ) #define ICorProfilerCallback4_ExceptionUnwindFunctionLeave(This) \ ( (This)->lpVtbl -> ExceptionUnwindFunctionLeave(This) ) #define ICorProfilerCallback4_ExceptionUnwindFinallyEnter(This,functionId) \ ( (This)->lpVtbl -> ExceptionUnwindFinallyEnter(This,functionId) ) #define ICorProfilerCallback4_ExceptionUnwindFinallyLeave(This) \ ( (This)->lpVtbl -> ExceptionUnwindFinallyLeave(This) ) #define ICorProfilerCallback4_ExceptionCatcherEnter(This,functionId,objectId) \ ( (This)->lpVtbl -> ExceptionCatcherEnter(This,functionId,objectId) ) #define ICorProfilerCallback4_ExceptionCatcherLeave(This) \ ( (This)->lpVtbl -> ExceptionCatcherLeave(This) ) #define ICorProfilerCallback4_COMClassicVTableCreated(This,wrappedClassId,implementedIID,pVTable,cSlots) \ ( (This)->lpVtbl -> COMClassicVTableCreated(This,wrappedClassId,implementedIID,pVTable,cSlots) ) #define ICorProfilerCallback4_COMClassicVTableDestroyed(This,wrappedClassId,implementedIID,pVTable) \ ( (This)->lpVtbl -> COMClassicVTableDestroyed(This,wrappedClassId,implementedIID,pVTable) ) #define ICorProfilerCallback4_ExceptionCLRCatcherFound(This) \ ( (This)->lpVtbl -> ExceptionCLRCatcherFound(This) ) #define ICorProfilerCallback4_ExceptionCLRCatcherExecute(This) \ ( (This)->lpVtbl -> ExceptionCLRCatcherExecute(This) ) #define ICorProfilerCallback4_ThreadNameChanged(This,threadId,cchName,name) \ ( (This)->lpVtbl -> ThreadNameChanged(This,threadId,cchName,name) ) #define ICorProfilerCallback4_GarbageCollectionStarted(This,cGenerations,generationCollected,reason) \ ( (This)->lpVtbl -> GarbageCollectionStarted(This,cGenerations,generationCollected,reason) ) #define ICorProfilerCallback4_SurvivingReferences(This,cSurvivingObjectIDRanges,objectIDRangeStart,cObjectIDRangeLength) \ ( (This)->lpVtbl -> SurvivingReferences(This,cSurvivingObjectIDRanges,objectIDRangeStart,cObjectIDRangeLength) ) #define ICorProfilerCallback4_GarbageCollectionFinished(This) \ ( (This)->lpVtbl -> GarbageCollectionFinished(This) ) #define ICorProfilerCallback4_FinalizeableObjectQueued(This,finalizerFlags,objectID) \ ( (This)->lpVtbl -> FinalizeableObjectQueued(This,finalizerFlags,objectID) ) #define ICorProfilerCallback4_RootReferences2(This,cRootRefs,rootRefIds,rootKinds,rootFlags,rootIds) \ ( (This)->lpVtbl -> RootReferences2(This,cRootRefs,rootRefIds,rootKinds,rootFlags,rootIds) ) #define ICorProfilerCallback4_HandleCreated(This,handleId,initialObjectId) \ ( (This)->lpVtbl -> HandleCreated(This,handleId,initialObjectId) ) #define ICorProfilerCallback4_HandleDestroyed(This,handleId) \ ( (This)->lpVtbl -> HandleDestroyed(This,handleId) ) #define ICorProfilerCallback4_InitializeForAttach(This,pCorProfilerInfoUnk,pvClientData,cbClientData) \ ( (This)->lpVtbl -> InitializeForAttach(This,pCorProfilerInfoUnk,pvClientData,cbClientData) ) #define ICorProfilerCallback4_ProfilerAttachComplete(This) \ ( (This)->lpVtbl -> ProfilerAttachComplete(This) ) #define ICorProfilerCallback4_ProfilerDetachSucceeded(This) \ ( (This)->lpVtbl -> ProfilerDetachSucceeded(This) ) #define ICorProfilerCallback4_ReJITCompilationStarted(This,functionId,rejitId,fIsSafeToBlock) \ ( (This)->lpVtbl -> ReJITCompilationStarted(This,functionId,rejitId,fIsSafeToBlock) ) #define ICorProfilerCallback4_GetReJITParameters(This,moduleId,methodId,pFunctionControl) \ ( (This)->lpVtbl -> GetReJITParameters(This,moduleId,methodId,pFunctionControl) ) #define ICorProfilerCallback4_ReJITCompilationFinished(This,functionId,rejitId,hrStatus,fIsSafeToBlock) \ ( (This)->lpVtbl -> ReJITCompilationFinished(This,functionId,rejitId,hrStatus,fIsSafeToBlock) ) #define ICorProfilerCallback4_ReJITError(This,moduleId,methodId,functionId,hrStatus) \ ( (This)->lpVtbl -> ReJITError(This,moduleId,methodId,functionId,hrStatus) ) #define ICorProfilerCallback4_MovedReferences2(This,cMovedObjectIDRanges,oldObjectIDRangeStart,newObjectIDRangeStart,cObjectIDRangeLength) \ ( (This)->lpVtbl -> MovedReferences2(This,cMovedObjectIDRanges,oldObjectIDRangeStart,newObjectIDRangeStart,cObjectIDRangeLength) ) #define ICorProfilerCallback4_SurvivingReferences2(This,cSurvivingObjectIDRanges,objectIDRangeStart,cObjectIDRangeLength) \ ( (This)->lpVtbl -> SurvivingReferences2(This,cSurvivingObjectIDRanges,objectIDRangeStart,cObjectIDRangeLength) ) #endif /* COBJMACROS */ #endif /* C style interface */ #endif /* __ICorProfilerCallback4_INTERFACE_DEFINED__ */ #ifndef __ICorProfilerCallback5_INTERFACE_DEFINED__ #define __ICorProfilerCallback5_INTERFACE_DEFINED__ /* interface ICorProfilerCallback5 */ /* [local][unique][uuid][object] */ EXTERN_C const IID IID_ICorProfilerCallback5; #if defined(__cplusplus) && !defined(CINTERFACE) MIDL_INTERFACE("8DFBA405-8C9F-45F8-BFFA-83B14CEF78B5") ICorProfilerCallback5 : public ICorProfilerCallback4 { public: virtual HRESULT STDMETHODCALLTYPE ConditionalWeakTableElementReferences( /* [in] */ ULONG cRootRefs, /* [size_is][in] */ ObjectID keyRefIds[ ], /* [size_is][in] */ ObjectID valueRefIds[ ], /* [size_is][in] */ GCHandleID rootIds[ ]) = 0; }; #else /* C style interface */ typedef struct ICorProfilerCallback5Vtbl { BEGIN_INTERFACE HRESULT ( STDMETHODCALLTYPE *QueryInterface )( ICorProfilerCallback5 * This, /* [in] */ REFIID riid, /* [annotation][iid_is][out] */ _COM_Outptr_ void **ppvObject); ULONG ( STDMETHODCALLTYPE *AddRef )( ICorProfilerCallback5 * This); ULONG ( STDMETHODCALLTYPE *Release )( ICorProfilerCallback5 * This); HRESULT ( STDMETHODCALLTYPE *Initialize )( ICorProfilerCallback5 * This, /* [in] */ IUnknown *pICorProfilerInfoUnk); HRESULT ( STDMETHODCALLTYPE *Shutdown )( ICorProfilerCallback5 * This); HRESULT ( STDMETHODCALLTYPE *AppDomainCreationStarted )( ICorProfilerCallback5 * This, /* [in] */ AppDomainID appDomainId); HRESULT ( STDMETHODCALLTYPE *AppDomainCreationFinished )( ICorProfilerCallback5 * This, /* [in] */ AppDomainID appDomainId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *AppDomainShutdownStarted )( ICorProfilerCallback5 * This, /* [in] */ AppDomainID appDomainId); HRESULT ( STDMETHODCALLTYPE *AppDomainShutdownFinished )( ICorProfilerCallback5 * This, /* [in] */ AppDomainID appDomainId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *AssemblyLoadStarted )( ICorProfilerCallback5 * This, /* [in] */ AssemblyID assemblyId); HRESULT ( STDMETHODCALLTYPE *AssemblyLoadFinished )( ICorProfilerCallback5 * This, /* [in] */ AssemblyID assemblyId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *AssemblyUnloadStarted )( ICorProfilerCallback5 * This, /* [in] */ AssemblyID assemblyId); HRESULT ( STDMETHODCALLTYPE *AssemblyUnloadFinished )( ICorProfilerCallback5 * This, /* [in] */ AssemblyID assemblyId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *ModuleLoadStarted )( ICorProfilerCallback5 * This, /* [in] */ ModuleID moduleId); HRESULT ( STDMETHODCALLTYPE *ModuleLoadFinished )( ICorProfilerCallback5 * This, /* [in] */ ModuleID moduleId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *ModuleUnloadStarted )( ICorProfilerCallback5 * This, /* [in] */ ModuleID moduleId); HRESULT ( STDMETHODCALLTYPE *ModuleUnloadFinished )( ICorProfilerCallback5 * This, /* [in] */ ModuleID moduleId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *ModuleAttachedToAssembly )( ICorProfilerCallback5 * This, /* [in] */ ModuleID moduleId, /* [in] */ AssemblyID AssemblyId); HRESULT ( STDMETHODCALLTYPE *ClassLoadStarted )( ICorProfilerCallback5 * This, /* [in] */ ClassID classId); HRESULT ( STDMETHODCALLTYPE *ClassLoadFinished )( ICorProfilerCallback5 * This, /* [in] */ ClassID classId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *ClassUnloadStarted )( ICorProfilerCallback5 * This, /* [in] */ ClassID classId); HRESULT ( STDMETHODCALLTYPE *ClassUnloadFinished )( ICorProfilerCallback5 * This, /* [in] */ ClassID classId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *FunctionUnloadStarted )( ICorProfilerCallback5 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *JITCompilationStarted )( ICorProfilerCallback5 * This, /* [in] */ FunctionID functionId, /* [in] */ BOOL fIsSafeToBlock); HRESULT ( STDMETHODCALLTYPE *JITCompilationFinished )( ICorProfilerCallback5 * This, /* [in] */ FunctionID functionId, /* [in] */ HRESULT hrStatus, /* [in] */ BOOL fIsSafeToBlock); HRESULT ( STDMETHODCALLTYPE *JITCachedFunctionSearchStarted )( ICorProfilerCallback5 * This, /* [in] */ FunctionID functionId, /* [out] */ BOOL *pbUseCachedFunction); HRESULT ( STDMETHODCALLTYPE *JITCachedFunctionSearchFinished )( ICorProfilerCallback5 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_JIT_CACHE result); HRESULT ( STDMETHODCALLTYPE *JITFunctionPitched )( ICorProfilerCallback5 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *JITInlining )( ICorProfilerCallback5 * This, /* [in] */ FunctionID callerId, /* [in] */ FunctionID calleeId, /* [out] */ BOOL *pfShouldInline); HRESULT ( STDMETHODCALLTYPE *ThreadCreated )( ICorProfilerCallback5 * This, /* [in] */ ThreadID threadId); HRESULT ( STDMETHODCALLTYPE *ThreadDestroyed )( ICorProfilerCallback5 * This, /* [in] */ ThreadID threadId); HRESULT ( STDMETHODCALLTYPE *ThreadAssignedToOSThread )( ICorProfilerCallback5 * This, /* [in] */ ThreadID managedThreadId, /* [in] */ DWORD osThreadId); HRESULT ( STDMETHODCALLTYPE *RemotingClientInvocationStarted )( ICorProfilerCallback5 * This); HRESULT ( STDMETHODCALLTYPE *RemotingClientSendingMessage )( ICorProfilerCallback5 * This, /* [in] */ GUID *pCookie, /* [in] */ BOOL fIsAsync); HRESULT ( STDMETHODCALLTYPE *RemotingClientReceivingReply )( ICorProfilerCallback5 * This, /* [in] */ GUID *pCookie, /* [in] */ BOOL fIsAsync); HRESULT ( STDMETHODCALLTYPE *RemotingClientInvocationFinished )( ICorProfilerCallback5 * This); HRESULT ( STDMETHODCALLTYPE *RemotingServerReceivingMessage )( ICorProfilerCallback5 * This, /* [in] */ GUID *pCookie, /* [in] */ BOOL fIsAsync); HRESULT ( STDMETHODCALLTYPE *RemotingServerInvocationStarted )( ICorProfilerCallback5 * This); HRESULT ( STDMETHODCALLTYPE *RemotingServerInvocationReturned )( ICorProfilerCallback5 * This); HRESULT ( STDMETHODCALLTYPE *RemotingServerSendingReply )( ICorProfilerCallback5 * This, /* [in] */ GUID *pCookie, /* [in] */ BOOL fIsAsync); HRESULT ( STDMETHODCALLTYPE *UnmanagedToManagedTransition )( ICorProfilerCallback5 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_TRANSITION_REASON reason); HRESULT ( STDMETHODCALLTYPE *ManagedToUnmanagedTransition )( ICorProfilerCallback5 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_TRANSITION_REASON reason); HRESULT ( STDMETHODCALLTYPE *RuntimeSuspendStarted )( ICorProfilerCallback5 * This, /* [in] */ COR_PRF_SUSPEND_REASON suspendReason); HRESULT ( STDMETHODCALLTYPE *RuntimeSuspendFinished )( ICorProfilerCallback5 * This); HRESULT ( STDMETHODCALLTYPE *RuntimeSuspendAborted )( ICorProfilerCallback5 * This); HRESULT ( STDMETHODCALLTYPE *RuntimeResumeStarted )( ICorProfilerCallback5 * This); HRESULT ( STDMETHODCALLTYPE *RuntimeResumeFinished )( ICorProfilerCallback5 * This); HRESULT ( STDMETHODCALLTYPE *RuntimeThreadSuspended )( ICorProfilerCallback5 * This, /* [in] */ ThreadID threadId); HRESULT ( STDMETHODCALLTYPE *RuntimeThreadResumed )( ICorProfilerCallback5 * This, /* [in] */ ThreadID threadId); HRESULT ( STDMETHODCALLTYPE *MovedReferences )( ICorProfilerCallback5 * This, /* [in] */ ULONG cMovedObjectIDRanges, /* [size_is][in] */ ObjectID oldObjectIDRangeStart[ ], /* [size_is][in] */ ObjectID newObjectIDRangeStart[ ], /* [size_is][in] */ ULONG cObjectIDRangeLength[ ]); HRESULT ( STDMETHODCALLTYPE *ObjectAllocated )( ICorProfilerCallback5 * This, /* [in] */ ObjectID objectId, /* [in] */ ClassID classId); HRESULT ( STDMETHODCALLTYPE *ObjectsAllocatedByClass )( ICorProfilerCallback5 * This, /* [in] */ ULONG cClassCount, /* [size_is][in] */ ClassID classIds[ ], /* [size_is][in] */ ULONG cObjects[ ]); HRESULT ( STDMETHODCALLTYPE *ObjectReferences )( ICorProfilerCallback5 * This, /* [in] */ ObjectID objectId, /* [in] */ ClassID classId, /* [in] */ ULONG cObjectRefs, /* [size_is][in] */ ObjectID objectRefIds[ ]); HRESULT ( STDMETHODCALLTYPE *RootReferences )( ICorProfilerCallback5 * This, /* [in] */ ULONG cRootRefs, /* [size_is][in] */ ObjectID rootRefIds[ ]); HRESULT ( STDMETHODCALLTYPE *ExceptionThrown )( ICorProfilerCallback5 * This, /* [in] */ ObjectID thrownObjectId); HRESULT ( STDMETHODCALLTYPE *ExceptionSearchFunctionEnter )( ICorProfilerCallback5 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ExceptionSearchFunctionLeave )( ICorProfilerCallback5 * This); HRESULT ( STDMETHODCALLTYPE *ExceptionSearchFilterEnter )( ICorProfilerCallback5 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ExceptionSearchFilterLeave )( ICorProfilerCallback5 * This); HRESULT ( STDMETHODCALLTYPE *ExceptionSearchCatcherFound )( ICorProfilerCallback5 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ExceptionOSHandlerEnter )( ICorProfilerCallback5 * This, /* [in] */ UINT_PTR __unused); HRESULT ( STDMETHODCALLTYPE *ExceptionOSHandlerLeave )( ICorProfilerCallback5 * This, /* [in] */ UINT_PTR __unused); HRESULT ( STDMETHODCALLTYPE *ExceptionUnwindFunctionEnter )( ICorProfilerCallback5 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ExceptionUnwindFunctionLeave )( ICorProfilerCallback5 * This); HRESULT ( STDMETHODCALLTYPE *ExceptionUnwindFinallyEnter )( ICorProfilerCallback5 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ExceptionUnwindFinallyLeave )( ICorProfilerCallback5 * This); HRESULT ( STDMETHODCALLTYPE *ExceptionCatcherEnter )( ICorProfilerCallback5 * This, /* [in] */ FunctionID functionId, /* [in] */ ObjectID objectId); HRESULT ( STDMETHODCALLTYPE *ExceptionCatcherLeave )( ICorProfilerCallback5 * This); HRESULT ( STDMETHODCALLTYPE *COMClassicVTableCreated )( ICorProfilerCallback5 * This, /* [in] */ ClassID wrappedClassId, /* [in] */ REFGUID implementedIID, /* [in] */ void *pVTable, /* [in] */ ULONG cSlots); HRESULT ( STDMETHODCALLTYPE *COMClassicVTableDestroyed )( ICorProfilerCallback5 * This, /* [in] */ ClassID wrappedClassId, /* [in] */ REFGUID implementedIID, /* [in] */ void *pVTable); HRESULT ( STDMETHODCALLTYPE *ExceptionCLRCatcherFound )( ICorProfilerCallback5 * This); HRESULT ( STDMETHODCALLTYPE *ExceptionCLRCatcherExecute )( ICorProfilerCallback5 * This); HRESULT ( STDMETHODCALLTYPE *ThreadNameChanged )( ICorProfilerCallback5 * This, /* [in] */ ThreadID threadId, /* [in] */ ULONG cchName, /* [annotation][in] */ _In_reads_opt_(cchName) WCHAR name[ ]); HRESULT ( STDMETHODCALLTYPE *GarbageCollectionStarted )( ICorProfilerCallback5 * This, /* [in] */ int cGenerations, /* [size_is][in] */ BOOL generationCollected[ ], /* [in] */ COR_PRF_GC_REASON reason); HRESULT ( STDMETHODCALLTYPE *SurvivingReferences )( ICorProfilerCallback5 * This, /* [in] */ ULONG cSurvivingObjectIDRanges, /* [size_is][in] */ ObjectID objectIDRangeStart[ ], /* [size_is][in] */ ULONG cObjectIDRangeLength[ ]); HRESULT ( STDMETHODCALLTYPE *GarbageCollectionFinished )( ICorProfilerCallback5 * This); HRESULT ( STDMETHODCALLTYPE *FinalizeableObjectQueued )( ICorProfilerCallback5 * This, /* [in] */ DWORD finalizerFlags, /* [in] */ ObjectID objectID); HRESULT ( STDMETHODCALLTYPE *RootReferences2 )( ICorProfilerCallback5 * This, /* [in] */ ULONG cRootRefs, /* [size_is][in] */ ObjectID rootRefIds[ ], /* [size_is][in] */ COR_PRF_GC_ROOT_KIND rootKinds[ ], /* [size_is][in] */ COR_PRF_GC_ROOT_FLAGS rootFlags[ ], /* [size_is][in] */ UINT_PTR rootIds[ ]); HRESULT ( STDMETHODCALLTYPE *HandleCreated )( ICorProfilerCallback5 * This, /* [in] */ GCHandleID handleId, /* [in] */ ObjectID initialObjectId); HRESULT ( STDMETHODCALLTYPE *HandleDestroyed )( ICorProfilerCallback5 * This, /* [in] */ GCHandleID handleId); HRESULT ( STDMETHODCALLTYPE *InitializeForAttach )( ICorProfilerCallback5 * This, /* [in] */ IUnknown *pCorProfilerInfoUnk, /* [in] */ void *pvClientData, /* [in] */ UINT cbClientData); HRESULT ( STDMETHODCALLTYPE *ProfilerAttachComplete )( ICorProfilerCallback5 * This); HRESULT ( STDMETHODCALLTYPE *ProfilerDetachSucceeded )( ICorProfilerCallback5 * This); HRESULT ( STDMETHODCALLTYPE *ReJITCompilationStarted )( ICorProfilerCallback5 * This, /* [in] */ FunctionID functionId, /* [in] */ ReJITID rejitId, /* [in] */ BOOL fIsSafeToBlock); HRESULT ( STDMETHODCALLTYPE *GetReJITParameters )( ICorProfilerCallback5 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdMethodDef methodId, /* [in] */ ICorProfilerFunctionControl *pFunctionControl); HRESULT ( STDMETHODCALLTYPE *ReJITCompilationFinished )( ICorProfilerCallback5 * This, /* [in] */ FunctionID functionId, /* [in] */ ReJITID rejitId, /* [in] */ HRESULT hrStatus, /* [in] */ BOOL fIsSafeToBlock); HRESULT ( STDMETHODCALLTYPE *ReJITError )( ICorProfilerCallback5 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdMethodDef methodId, /* [in] */ FunctionID functionId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *MovedReferences2 )( ICorProfilerCallback5 * This, /* [in] */ ULONG cMovedObjectIDRanges, /* [size_is][in] */ ObjectID oldObjectIDRangeStart[ ], /* [size_is][in] */ ObjectID newObjectIDRangeStart[ ], /* [size_is][in] */ SIZE_T cObjectIDRangeLength[ ]); HRESULT ( STDMETHODCALLTYPE *SurvivingReferences2 )( ICorProfilerCallback5 * This, /* [in] */ ULONG cSurvivingObjectIDRanges, /* [size_is][in] */ ObjectID objectIDRangeStart[ ], /* [size_is][in] */ SIZE_T cObjectIDRangeLength[ ]); HRESULT ( STDMETHODCALLTYPE *ConditionalWeakTableElementReferences )( ICorProfilerCallback5 * This, /* [in] */ ULONG cRootRefs, /* [size_is][in] */ ObjectID keyRefIds[ ], /* [size_is][in] */ ObjectID valueRefIds[ ], /* [size_is][in] */ GCHandleID rootIds[ ]); END_INTERFACE } ICorProfilerCallback5Vtbl; interface ICorProfilerCallback5 { CONST_VTBL struct ICorProfilerCallback5Vtbl *lpVtbl; }; #ifdef COBJMACROS #define ICorProfilerCallback5_QueryInterface(This,riid,ppvObject) \ ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) ) #define ICorProfilerCallback5_AddRef(This) \ ( (This)->lpVtbl -> AddRef(This) ) #define ICorProfilerCallback5_Release(This) \ ( (This)->lpVtbl -> Release(This) ) #define ICorProfilerCallback5_Initialize(This,pICorProfilerInfoUnk) \ ( (This)->lpVtbl -> Initialize(This,pICorProfilerInfoUnk) ) #define ICorProfilerCallback5_Shutdown(This) \ ( (This)->lpVtbl -> Shutdown(This) ) #define ICorProfilerCallback5_AppDomainCreationStarted(This,appDomainId) \ ( (This)->lpVtbl -> AppDomainCreationStarted(This,appDomainId) ) #define ICorProfilerCallback5_AppDomainCreationFinished(This,appDomainId,hrStatus) \ ( (This)->lpVtbl -> AppDomainCreationFinished(This,appDomainId,hrStatus) ) #define ICorProfilerCallback5_AppDomainShutdownStarted(This,appDomainId) \ ( (This)->lpVtbl -> AppDomainShutdownStarted(This,appDomainId) ) #define ICorProfilerCallback5_AppDomainShutdownFinished(This,appDomainId,hrStatus) \ ( (This)->lpVtbl -> AppDomainShutdownFinished(This,appDomainId,hrStatus) ) #define ICorProfilerCallback5_AssemblyLoadStarted(This,assemblyId) \ ( (This)->lpVtbl -> AssemblyLoadStarted(This,assemblyId) ) #define ICorProfilerCallback5_AssemblyLoadFinished(This,assemblyId,hrStatus) \ ( (This)->lpVtbl -> AssemblyLoadFinished(This,assemblyId,hrStatus) ) #define ICorProfilerCallback5_AssemblyUnloadStarted(This,assemblyId) \ ( (This)->lpVtbl -> AssemblyUnloadStarted(This,assemblyId) ) #define ICorProfilerCallback5_AssemblyUnloadFinished(This,assemblyId,hrStatus) \ ( (This)->lpVtbl -> AssemblyUnloadFinished(This,assemblyId,hrStatus) ) #define ICorProfilerCallback5_ModuleLoadStarted(This,moduleId) \ ( (This)->lpVtbl -> ModuleLoadStarted(This,moduleId) ) #define ICorProfilerCallback5_ModuleLoadFinished(This,moduleId,hrStatus) \ ( (This)->lpVtbl -> ModuleLoadFinished(This,moduleId,hrStatus) ) #define ICorProfilerCallback5_ModuleUnloadStarted(This,moduleId) \ ( (This)->lpVtbl -> ModuleUnloadStarted(This,moduleId) ) #define ICorProfilerCallback5_ModuleUnloadFinished(This,moduleId,hrStatus) \ ( (This)->lpVtbl -> ModuleUnloadFinished(This,moduleId,hrStatus) ) #define ICorProfilerCallback5_ModuleAttachedToAssembly(This,moduleId,AssemblyId) \ ( (This)->lpVtbl -> ModuleAttachedToAssembly(This,moduleId,AssemblyId) ) #define ICorProfilerCallback5_ClassLoadStarted(This,classId) \ ( (This)->lpVtbl -> ClassLoadStarted(This,classId) ) #define ICorProfilerCallback5_ClassLoadFinished(This,classId,hrStatus) \ ( (This)->lpVtbl -> ClassLoadFinished(This,classId,hrStatus) ) #define ICorProfilerCallback5_ClassUnloadStarted(This,classId) \ ( (This)->lpVtbl -> ClassUnloadStarted(This,classId) ) #define ICorProfilerCallback5_ClassUnloadFinished(This,classId,hrStatus) \ ( (This)->lpVtbl -> ClassUnloadFinished(This,classId,hrStatus) ) #define ICorProfilerCallback5_FunctionUnloadStarted(This,functionId) \ ( (This)->lpVtbl -> FunctionUnloadStarted(This,functionId) ) #define ICorProfilerCallback5_JITCompilationStarted(This,functionId,fIsSafeToBlock) \ ( (This)->lpVtbl -> JITCompilationStarted(This,functionId,fIsSafeToBlock) ) #define ICorProfilerCallback5_JITCompilationFinished(This,functionId,hrStatus,fIsSafeToBlock) \ ( (This)->lpVtbl -> JITCompilationFinished(This,functionId,hrStatus,fIsSafeToBlock) ) #define ICorProfilerCallback5_JITCachedFunctionSearchStarted(This,functionId,pbUseCachedFunction) \ ( (This)->lpVtbl -> JITCachedFunctionSearchStarted(This,functionId,pbUseCachedFunction) ) #define ICorProfilerCallback5_JITCachedFunctionSearchFinished(This,functionId,result) \ ( (This)->lpVtbl -> JITCachedFunctionSearchFinished(This,functionId,result) ) #define ICorProfilerCallback5_JITFunctionPitched(This,functionId) \ ( (This)->lpVtbl -> JITFunctionPitched(This,functionId) ) #define ICorProfilerCallback5_JITInlining(This,callerId,calleeId,pfShouldInline) \ ( (This)->lpVtbl -> JITInlining(This,callerId,calleeId,pfShouldInline) ) #define ICorProfilerCallback5_ThreadCreated(This,threadId) \ ( (This)->lpVtbl -> ThreadCreated(This,threadId) ) #define ICorProfilerCallback5_ThreadDestroyed(This,threadId) \ ( (This)->lpVtbl -> ThreadDestroyed(This,threadId) ) #define ICorProfilerCallback5_ThreadAssignedToOSThread(This,managedThreadId,osThreadId) \ ( (This)->lpVtbl -> ThreadAssignedToOSThread(This,managedThreadId,osThreadId) ) #define ICorProfilerCallback5_RemotingClientInvocationStarted(This) \ ( (This)->lpVtbl -> RemotingClientInvocationStarted(This) ) #define ICorProfilerCallback5_RemotingClientSendingMessage(This,pCookie,fIsAsync) \ ( (This)->lpVtbl -> RemotingClientSendingMessage(This,pCookie,fIsAsync) ) #define ICorProfilerCallback5_RemotingClientReceivingReply(This,pCookie,fIsAsync) \ ( (This)->lpVtbl -> RemotingClientReceivingReply(This,pCookie,fIsAsync) ) #define ICorProfilerCallback5_RemotingClientInvocationFinished(This) \ ( (This)->lpVtbl -> RemotingClientInvocationFinished(This) ) #define ICorProfilerCallback5_RemotingServerReceivingMessage(This,pCookie,fIsAsync) \ ( (This)->lpVtbl -> RemotingServerReceivingMessage(This,pCookie,fIsAsync) ) #define ICorProfilerCallback5_RemotingServerInvocationStarted(This) \ ( (This)->lpVtbl -> RemotingServerInvocationStarted(This) ) #define ICorProfilerCallback5_RemotingServerInvocationReturned(This) \ ( (This)->lpVtbl -> RemotingServerInvocationReturned(This) ) #define ICorProfilerCallback5_RemotingServerSendingReply(This,pCookie,fIsAsync) \ ( (This)->lpVtbl -> RemotingServerSendingReply(This,pCookie,fIsAsync) ) #define ICorProfilerCallback5_UnmanagedToManagedTransition(This,functionId,reason) \ ( (This)->lpVtbl -> UnmanagedToManagedTransition(This,functionId,reason) ) #define ICorProfilerCallback5_ManagedToUnmanagedTransition(This,functionId,reason) \ ( (This)->lpVtbl -> ManagedToUnmanagedTransition(This,functionId,reason) ) #define ICorProfilerCallback5_RuntimeSuspendStarted(This,suspendReason) \ ( (This)->lpVtbl -> RuntimeSuspendStarted(This,suspendReason) ) #define ICorProfilerCallback5_RuntimeSuspendFinished(This) \ ( (This)->lpVtbl -> RuntimeSuspendFinished(This) ) #define ICorProfilerCallback5_RuntimeSuspendAborted(This) \ ( (This)->lpVtbl -> RuntimeSuspendAborted(This) ) #define ICorProfilerCallback5_RuntimeResumeStarted(This) \ ( (This)->lpVtbl -> RuntimeResumeStarted(This) ) #define ICorProfilerCallback5_RuntimeResumeFinished(This) \ ( (This)->lpVtbl -> RuntimeResumeFinished(This) ) #define ICorProfilerCallback5_RuntimeThreadSuspended(This,threadId) \ ( (This)->lpVtbl -> RuntimeThreadSuspended(This,threadId) ) #define ICorProfilerCallback5_RuntimeThreadResumed(This,threadId) \ ( (This)->lpVtbl -> RuntimeThreadResumed(This,threadId) ) #define ICorProfilerCallback5_MovedReferences(This,cMovedObjectIDRanges,oldObjectIDRangeStart,newObjectIDRangeStart,cObjectIDRangeLength) \ ( (This)->lpVtbl -> MovedReferences(This,cMovedObjectIDRanges,oldObjectIDRangeStart,newObjectIDRangeStart,cObjectIDRangeLength) ) #define ICorProfilerCallback5_ObjectAllocated(This,objectId,classId) \ ( (This)->lpVtbl -> ObjectAllocated(This,objectId,classId) ) #define ICorProfilerCallback5_ObjectsAllocatedByClass(This,cClassCount,classIds,cObjects) \ ( (This)->lpVtbl -> ObjectsAllocatedByClass(This,cClassCount,classIds,cObjects) ) #define ICorProfilerCallback5_ObjectReferences(This,objectId,classId,cObjectRefs,objectRefIds) \ ( (This)->lpVtbl -> ObjectReferences(This,objectId,classId,cObjectRefs,objectRefIds) ) #define ICorProfilerCallback5_RootReferences(This,cRootRefs,rootRefIds) \ ( (This)->lpVtbl -> RootReferences(This,cRootRefs,rootRefIds) ) #define ICorProfilerCallback5_ExceptionThrown(This,thrownObjectId) \ ( (This)->lpVtbl -> ExceptionThrown(This,thrownObjectId) ) #define ICorProfilerCallback5_ExceptionSearchFunctionEnter(This,functionId) \ ( (This)->lpVtbl -> ExceptionSearchFunctionEnter(This,functionId) ) #define ICorProfilerCallback5_ExceptionSearchFunctionLeave(This) \ ( (This)->lpVtbl -> ExceptionSearchFunctionLeave(This) ) #define ICorProfilerCallback5_ExceptionSearchFilterEnter(This,functionId) \ ( (This)->lpVtbl -> ExceptionSearchFilterEnter(This,functionId) ) #define ICorProfilerCallback5_ExceptionSearchFilterLeave(This) \ ( (This)->lpVtbl -> ExceptionSearchFilterLeave(This) ) #define ICorProfilerCallback5_ExceptionSearchCatcherFound(This,functionId) \ ( (This)->lpVtbl -> ExceptionSearchCatcherFound(This,functionId) ) #define ICorProfilerCallback5_ExceptionOSHandlerEnter(This,__unused) \ ( (This)->lpVtbl -> ExceptionOSHandlerEnter(This,__unused) ) #define ICorProfilerCallback5_ExceptionOSHandlerLeave(This,__unused) \ ( (This)->lpVtbl -> ExceptionOSHandlerLeave(This,__unused) ) #define ICorProfilerCallback5_ExceptionUnwindFunctionEnter(This,functionId) \ ( (This)->lpVtbl -> ExceptionUnwindFunctionEnter(This,functionId) ) #define ICorProfilerCallback5_ExceptionUnwindFunctionLeave(This) \ ( (This)->lpVtbl -> ExceptionUnwindFunctionLeave(This) ) #define ICorProfilerCallback5_ExceptionUnwindFinallyEnter(This,functionId) \ ( (This)->lpVtbl -> ExceptionUnwindFinallyEnter(This,functionId) ) #define ICorProfilerCallback5_ExceptionUnwindFinallyLeave(This) \ ( (This)->lpVtbl -> ExceptionUnwindFinallyLeave(This) ) #define ICorProfilerCallback5_ExceptionCatcherEnter(This,functionId,objectId) \ ( (This)->lpVtbl -> ExceptionCatcherEnter(This,functionId,objectId) ) #define ICorProfilerCallback5_ExceptionCatcherLeave(This) \ ( (This)->lpVtbl -> ExceptionCatcherLeave(This) ) #define ICorProfilerCallback5_COMClassicVTableCreated(This,wrappedClassId,implementedIID,pVTable,cSlots) \ ( (This)->lpVtbl -> COMClassicVTableCreated(This,wrappedClassId,implementedIID,pVTable,cSlots) ) #define ICorProfilerCallback5_COMClassicVTableDestroyed(This,wrappedClassId,implementedIID,pVTable) \ ( (This)->lpVtbl -> COMClassicVTableDestroyed(This,wrappedClassId,implementedIID,pVTable) ) #define ICorProfilerCallback5_ExceptionCLRCatcherFound(This) \ ( (This)->lpVtbl -> ExceptionCLRCatcherFound(This) ) #define ICorProfilerCallback5_ExceptionCLRCatcherExecute(This) \ ( (This)->lpVtbl -> ExceptionCLRCatcherExecute(This) ) #define ICorProfilerCallback5_ThreadNameChanged(This,threadId,cchName,name) \ ( (This)->lpVtbl -> ThreadNameChanged(This,threadId,cchName,name) ) #define ICorProfilerCallback5_GarbageCollectionStarted(This,cGenerations,generationCollected,reason) \ ( (This)->lpVtbl -> GarbageCollectionStarted(This,cGenerations,generationCollected,reason) ) #define ICorProfilerCallback5_SurvivingReferences(This,cSurvivingObjectIDRanges,objectIDRangeStart,cObjectIDRangeLength) \ ( (This)->lpVtbl -> SurvivingReferences(This,cSurvivingObjectIDRanges,objectIDRangeStart,cObjectIDRangeLength) ) #define ICorProfilerCallback5_GarbageCollectionFinished(This) \ ( (This)->lpVtbl -> GarbageCollectionFinished(This) ) #define ICorProfilerCallback5_FinalizeableObjectQueued(This,finalizerFlags,objectID) \ ( (This)->lpVtbl -> FinalizeableObjectQueued(This,finalizerFlags,objectID) ) #define ICorProfilerCallback5_RootReferences2(This,cRootRefs,rootRefIds,rootKinds,rootFlags,rootIds) \ ( (This)->lpVtbl -> RootReferences2(This,cRootRefs,rootRefIds,rootKinds,rootFlags,rootIds) ) #define ICorProfilerCallback5_HandleCreated(This,handleId,initialObjectId) \ ( (This)->lpVtbl -> HandleCreated(This,handleId,initialObjectId) ) #define ICorProfilerCallback5_HandleDestroyed(This,handleId) \ ( (This)->lpVtbl -> HandleDestroyed(This,handleId) ) #define ICorProfilerCallback5_InitializeForAttach(This,pCorProfilerInfoUnk,pvClientData,cbClientData) \ ( (This)->lpVtbl -> InitializeForAttach(This,pCorProfilerInfoUnk,pvClientData,cbClientData) ) #define ICorProfilerCallback5_ProfilerAttachComplete(This) \ ( (This)->lpVtbl -> ProfilerAttachComplete(This) ) #define ICorProfilerCallback5_ProfilerDetachSucceeded(This) \ ( (This)->lpVtbl -> ProfilerDetachSucceeded(This) ) #define ICorProfilerCallback5_ReJITCompilationStarted(This,functionId,rejitId,fIsSafeToBlock) \ ( (This)->lpVtbl -> ReJITCompilationStarted(This,functionId,rejitId,fIsSafeToBlock) ) #define ICorProfilerCallback5_GetReJITParameters(This,moduleId,methodId,pFunctionControl) \ ( (This)->lpVtbl -> GetReJITParameters(This,moduleId,methodId,pFunctionControl) ) #define ICorProfilerCallback5_ReJITCompilationFinished(This,functionId,rejitId,hrStatus,fIsSafeToBlock) \ ( (This)->lpVtbl -> ReJITCompilationFinished(This,functionId,rejitId,hrStatus,fIsSafeToBlock) ) #define ICorProfilerCallback5_ReJITError(This,moduleId,methodId,functionId,hrStatus) \ ( (This)->lpVtbl -> ReJITError(This,moduleId,methodId,functionId,hrStatus) ) #define ICorProfilerCallback5_MovedReferences2(This,cMovedObjectIDRanges,oldObjectIDRangeStart,newObjectIDRangeStart,cObjectIDRangeLength) \ ( (This)->lpVtbl -> MovedReferences2(This,cMovedObjectIDRanges,oldObjectIDRangeStart,newObjectIDRangeStart,cObjectIDRangeLength) ) #define ICorProfilerCallback5_SurvivingReferences2(This,cSurvivingObjectIDRanges,objectIDRangeStart,cObjectIDRangeLength) \ ( (This)->lpVtbl -> SurvivingReferences2(This,cSurvivingObjectIDRanges,objectIDRangeStart,cObjectIDRangeLength) ) #define ICorProfilerCallback5_ConditionalWeakTableElementReferences(This,cRootRefs,keyRefIds,valueRefIds,rootIds) \ ( (This)->lpVtbl -> ConditionalWeakTableElementReferences(This,cRootRefs,keyRefIds,valueRefIds,rootIds) ) #endif /* COBJMACROS */ #endif /* C style interface */ #endif /* __ICorProfilerCallback5_INTERFACE_DEFINED__ */ #ifndef __ICorProfilerCallback6_INTERFACE_DEFINED__ #define __ICorProfilerCallback6_INTERFACE_DEFINED__ /* interface ICorProfilerCallback6 */ /* [local][unique][uuid][object] */ EXTERN_C const IID IID_ICorProfilerCallback6; #if defined(__cplusplus) && !defined(CINTERFACE) MIDL_INTERFACE("FC13DF4B-4448-4F4F-950C-BA8D19D00C36") ICorProfilerCallback6 : public ICorProfilerCallback5 { public: virtual HRESULT STDMETHODCALLTYPE GetAssemblyReferences( /* [string][in] */ const WCHAR *wszAssemblyPath, /* [in] */ ICorProfilerAssemblyReferenceProvider *pAsmRefProvider) = 0; }; #else /* C style interface */ typedef struct ICorProfilerCallback6Vtbl { BEGIN_INTERFACE HRESULT ( STDMETHODCALLTYPE *QueryInterface )( ICorProfilerCallback6 * This, /* [in] */ REFIID riid, /* [annotation][iid_is][out] */ _COM_Outptr_ void **ppvObject); ULONG ( STDMETHODCALLTYPE *AddRef )( ICorProfilerCallback6 * This); ULONG ( STDMETHODCALLTYPE *Release )( ICorProfilerCallback6 * This); HRESULT ( STDMETHODCALLTYPE *Initialize )( ICorProfilerCallback6 * This, /* [in] */ IUnknown *pICorProfilerInfoUnk); HRESULT ( STDMETHODCALLTYPE *Shutdown )( ICorProfilerCallback6 * This); HRESULT ( STDMETHODCALLTYPE *AppDomainCreationStarted )( ICorProfilerCallback6 * This, /* [in] */ AppDomainID appDomainId); HRESULT ( STDMETHODCALLTYPE *AppDomainCreationFinished )( ICorProfilerCallback6 * This, /* [in] */ AppDomainID appDomainId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *AppDomainShutdownStarted )( ICorProfilerCallback6 * This, /* [in] */ AppDomainID appDomainId); HRESULT ( STDMETHODCALLTYPE *AppDomainShutdownFinished )( ICorProfilerCallback6 * This, /* [in] */ AppDomainID appDomainId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *AssemblyLoadStarted )( ICorProfilerCallback6 * This, /* [in] */ AssemblyID assemblyId); HRESULT ( STDMETHODCALLTYPE *AssemblyLoadFinished )( ICorProfilerCallback6 * This, /* [in] */ AssemblyID assemblyId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *AssemblyUnloadStarted )( ICorProfilerCallback6 * This, /* [in] */ AssemblyID assemblyId); HRESULT ( STDMETHODCALLTYPE *AssemblyUnloadFinished )( ICorProfilerCallback6 * This, /* [in] */ AssemblyID assemblyId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *ModuleLoadStarted )( ICorProfilerCallback6 * This, /* [in] */ ModuleID moduleId); HRESULT ( STDMETHODCALLTYPE *ModuleLoadFinished )( ICorProfilerCallback6 * This, /* [in] */ ModuleID moduleId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *ModuleUnloadStarted )( ICorProfilerCallback6 * This, /* [in] */ ModuleID moduleId); HRESULT ( STDMETHODCALLTYPE *ModuleUnloadFinished )( ICorProfilerCallback6 * This, /* [in] */ ModuleID moduleId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *ModuleAttachedToAssembly )( ICorProfilerCallback6 * This, /* [in] */ ModuleID moduleId, /* [in] */ AssemblyID AssemblyId); HRESULT ( STDMETHODCALLTYPE *ClassLoadStarted )( ICorProfilerCallback6 * This, /* [in] */ ClassID classId); HRESULT ( STDMETHODCALLTYPE *ClassLoadFinished )( ICorProfilerCallback6 * This, /* [in] */ ClassID classId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *ClassUnloadStarted )( ICorProfilerCallback6 * This, /* [in] */ ClassID classId); HRESULT ( STDMETHODCALLTYPE *ClassUnloadFinished )( ICorProfilerCallback6 * This, /* [in] */ ClassID classId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *FunctionUnloadStarted )( ICorProfilerCallback6 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *JITCompilationStarted )( ICorProfilerCallback6 * This, /* [in] */ FunctionID functionId, /* [in] */ BOOL fIsSafeToBlock); HRESULT ( STDMETHODCALLTYPE *JITCompilationFinished )( ICorProfilerCallback6 * This, /* [in] */ FunctionID functionId, /* [in] */ HRESULT hrStatus, /* [in] */ BOOL fIsSafeToBlock); HRESULT ( STDMETHODCALLTYPE *JITCachedFunctionSearchStarted )( ICorProfilerCallback6 * This, /* [in] */ FunctionID functionId, /* [out] */ BOOL *pbUseCachedFunction); HRESULT ( STDMETHODCALLTYPE *JITCachedFunctionSearchFinished )( ICorProfilerCallback6 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_JIT_CACHE result); HRESULT ( STDMETHODCALLTYPE *JITFunctionPitched )( ICorProfilerCallback6 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *JITInlining )( ICorProfilerCallback6 * This, /* [in] */ FunctionID callerId, /* [in] */ FunctionID calleeId, /* [out] */ BOOL *pfShouldInline); HRESULT ( STDMETHODCALLTYPE *ThreadCreated )( ICorProfilerCallback6 * This, /* [in] */ ThreadID threadId); HRESULT ( STDMETHODCALLTYPE *ThreadDestroyed )( ICorProfilerCallback6 * This, /* [in] */ ThreadID threadId); HRESULT ( STDMETHODCALLTYPE *ThreadAssignedToOSThread )( ICorProfilerCallback6 * This, /* [in] */ ThreadID managedThreadId, /* [in] */ DWORD osThreadId); HRESULT ( STDMETHODCALLTYPE *RemotingClientInvocationStarted )( ICorProfilerCallback6 * This); HRESULT ( STDMETHODCALLTYPE *RemotingClientSendingMessage )( ICorProfilerCallback6 * This, /* [in] */ GUID *pCookie, /* [in] */ BOOL fIsAsync); HRESULT ( STDMETHODCALLTYPE *RemotingClientReceivingReply )( ICorProfilerCallback6 * This, /* [in] */ GUID *pCookie, /* [in] */ BOOL fIsAsync); HRESULT ( STDMETHODCALLTYPE *RemotingClientInvocationFinished )( ICorProfilerCallback6 * This); HRESULT ( STDMETHODCALLTYPE *RemotingServerReceivingMessage )( ICorProfilerCallback6 * This, /* [in] */ GUID *pCookie, /* [in] */ BOOL fIsAsync); HRESULT ( STDMETHODCALLTYPE *RemotingServerInvocationStarted )( ICorProfilerCallback6 * This); HRESULT ( STDMETHODCALLTYPE *RemotingServerInvocationReturned )( ICorProfilerCallback6 * This); HRESULT ( STDMETHODCALLTYPE *RemotingServerSendingReply )( ICorProfilerCallback6 * This, /* [in] */ GUID *pCookie, /* [in] */ BOOL fIsAsync); HRESULT ( STDMETHODCALLTYPE *UnmanagedToManagedTransition )( ICorProfilerCallback6 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_TRANSITION_REASON reason); HRESULT ( STDMETHODCALLTYPE *ManagedToUnmanagedTransition )( ICorProfilerCallback6 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_TRANSITION_REASON reason); HRESULT ( STDMETHODCALLTYPE *RuntimeSuspendStarted )( ICorProfilerCallback6 * This, /* [in] */ COR_PRF_SUSPEND_REASON suspendReason); HRESULT ( STDMETHODCALLTYPE *RuntimeSuspendFinished )( ICorProfilerCallback6 * This); HRESULT ( STDMETHODCALLTYPE *RuntimeSuspendAborted )( ICorProfilerCallback6 * This); HRESULT ( STDMETHODCALLTYPE *RuntimeResumeStarted )( ICorProfilerCallback6 * This); HRESULT ( STDMETHODCALLTYPE *RuntimeResumeFinished )( ICorProfilerCallback6 * This); HRESULT ( STDMETHODCALLTYPE *RuntimeThreadSuspended )( ICorProfilerCallback6 * This, /* [in] */ ThreadID threadId); HRESULT ( STDMETHODCALLTYPE *RuntimeThreadResumed )( ICorProfilerCallback6 * This, /* [in] */ ThreadID threadId); HRESULT ( STDMETHODCALLTYPE *MovedReferences )( ICorProfilerCallback6 * This, /* [in] */ ULONG cMovedObjectIDRanges, /* [size_is][in] */ ObjectID oldObjectIDRangeStart[ ], /* [size_is][in] */ ObjectID newObjectIDRangeStart[ ], /* [size_is][in] */ ULONG cObjectIDRangeLength[ ]); HRESULT ( STDMETHODCALLTYPE *ObjectAllocated )( ICorProfilerCallback6 * This, /* [in] */ ObjectID objectId, /* [in] */ ClassID classId); HRESULT ( STDMETHODCALLTYPE *ObjectsAllocatedByClass )( ICorProfilerCallback6 * This, /* [in] */ ULONG cClassCount, /* [size_is][in] */ ClassID classIds[ ], /* [size_is][in] */ ULONG cObjects[ ]); HRESULT ( STDMETHODCALLTYPE *ObjectReferences )( ICorProfilerCallback6 * This, /* [in] */ ObjectID objectId, /* [in] */ ClassID classId, /* [in] */ ULONG cObjectRefs, /* [size_is][in] */ ObjectID objectRefIds[ ]); HRESULT ( STDMETHODCALLTYPE *RootReferences )( ICorProfilerCallback6 * This, /* [in] */ ULONG cRootRefs, /* [size_is][in] */ ObjectID rootRefIds[ ]); HRESULT ( STDMETHODCALLTYPE *ExceptionThrown )( ICorProfilerCallback6 * This, /* [in] */ ObjectID thrownObjectId); HRESULT ( STDMETHODCALLTYPE *ExceptionSearchFunctionEnter )( ICorProfilerCallback6 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ExceptionSearchFunctionLeave )( ICorProfilerCallback6 * This); HRESULT ( STDMETHODCALLTYPE *ExceptionSearchFilterEnter )( ICorProfilerCallback6 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ExceptionSearchFilterLeave )( ICorProfilerCallback6 * This); HRESULT ( STDMETHODCALLTYPE *ExceptionSearchCatcherFound )( ICorProfilerCallback6 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ExceptionOSHandlerEnter )( ICorProfilerCallback6 * This, /* [in] */ UINT_PTR __unused); HRESULT ( STDMETHODCALLTYPE *ExceptionOSHandlerLeave )( ICorProfilerCallback6 * This, /* [in] */ UINT_PTR __unused); HRESULT ( STDMETHODCALLTYPE *ExceptionUnwindFunctionEnter )( ICorProfilerCallback6 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ExceptionUnwindFunctionLeave )( ICorProfilerCallback6 * This); HRESULT ( STDMETHODCALLTYPE *ExceptionUnwindFinallyEnter )( ICorProfilerCallback6 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ExceptionUnwindFinallyLeave )( ICorProfilerCallback6 * This); HRESULT ( STDMETHODCALLTYPE *ExceptionCatcherEnter )( ICorProfilerCallback6 * This, /* [in] */ FunctionID functionId, /* [in] */ ObjectID objectId); HRESULT ( STDMETHODCALLTYPE *ExceptionCatcherLeave )( ICorProfilerCallback6 * This); HRESULT ( STDMETHODCALLTYPE *COMClassicVTableCreated )( ICorProfilerCallback6 * This, /* [in] */ ClassID wrappedClassId, /* [in] */ REFGUID implementedIID, /* [in] */ void *pVTable, /* [in] */ ULONG cSlots); HRESULT ( STDMETHODCALLTYPE *COMClassicVTableDestroyed )( ICorProfilerCallback6 * This, /* [in] */ ClassID wrappedClassId, /* [in] */ REFGUID implementedIID, /* [in] */ void *pVTable); HRESULT ( STDMETHODCALLTYPE *ExceptionCLRCatcherFound )( ICorProfilerCallback6 * This); HRESULT ( STDMETHODCALLTYPE *ExceptionCLRCatcherExecute )( ICorProfilerCallback6 * This); HRESULT ( STDMETHODCALLTYPE *ThreadNameChanged )( ICorProfilerCallback6 * This, /* [in] */ ThreadID threadId, /* [in] */ ULONG cchName, /* [annotation][in] */ _In_reads_opt_(cchName) WCHAR name[ ]); HRESULT ( STDMETHODCALLTYPE *GarbageCollectionStarted )( ICorProfilerCallback6 * This, /* [in] */ int cGenerations, /* [size_is][in] */ BOOL generationCollected[ ], /* [in] */ COR_PRF_GC_REASON reason); HRESULT ( STDMETHODCALLTYPE *SurvivingReferences )( ICorProfilerCallback6 * This, /* [in] */ ULONG cSurvivingObjectIDRanges, /* [size_is][in] */ ObjectID objectIDRangeStart[ ], /* [size_is][in] */ ULONG cObjectIDRangeLength[ ]); HRESULT ( STDMETHODCALLTYPE *GarbageCollectionFinished )( ICorProfilerCallback6 * This); HRESULT ( STDMETHODCALLTYPE *FinalizeableObjectQueued )( ICorProfilerCallback6 * This, /* [in] */ DWORD finalizerFlags, /* [in] */ ObjectID objectID); HRESULT ( STDMETHODCALLTYPE *RootReferences2 )( ICorProfilerCallback6 * This, /* [in] */ ULONG cRootRefs, /* [size_is][in] */ ObjectID rootRefIds[ ], /* [size_is][in] */ COR_PRF_GC_ROOT_KIND rootKinds[ ], /* [size_is][in] */ COR_PRF_GC_ROOT_FLAGS rootFlags[ ], /* [size_is][in] */ UINT_PTR rootIds[ ]); HRESULT ( STDMETHODCALLTYPE *HandleCreated )( ICorProfilerCallback6 * This, /* [in] */ GCHandleID handleId, /* [in] */ ObjectID initialObjectId); HRESULT ( STDMETHODCALLTYPE *HandleDestroyed )( ICorProfilerCallback6 * This, /* [in] */ GCHandleID handleId); HRESULT ( STDMETHODCALLTYPE *InitializeForAttach )( ICorProfilerCallback6 * This, /* [in] */ IUnknown *pCorProfilerInfoUnk, /* [in] */ void *pvClientData, /* [in] */ UINT cbClientData); HRESULT ( STDMETHODCALLTYPE *ProfilerAttachComplete )( ICorProfilerCallback6 * This); HRESULT ( STDMETHODCALLTYPE *ProfilerDetachSucceeded )( ICorProfilerCallback6 * This); HRESULT ( STDMETHODCALLTYPE *ReJITCompilationStarted )( ICorProfilerCallback6 * This, /* [in] */ FunctionID functionId, /* [in] */ ReJITID rejitId, /* [in] */ BOOL fIsSafeToBlock); HRESULT ( STDMETHODCALLTYPE *GetReJITParameters )( ICorProfilerCallback6 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdMethodDef methodId, /* [in] */ ICorProfilerFunctionControl *pFunctionControl); HRESULT ( STDMETHODCALLTYPE *ReJITCompilationFinished )( ICorProfilerCallback6 * This, /* [in] */ FunctionID functionId, /* [in] */ ReJITID rejitId, /* [in] */ HRESULT hrStatus, /* [in] */ BOOL fIsSafeToBlock); HRESULT ( STDMETHODCALLTYPE *ReJITError )( ICorProfilerCallback6 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdMethodDef methodId, /* [in] */ FunctionID functionId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *MovedReferences2 )( ICorProfilerCallback6 * This, /* [in] */ ULONG cMovedObjectIDRanges, /* [size_is][in] */ ObjectID oldObjectIDRangeStart[ ], /* [size_is][in] */ ObjectID newObjectIDRangeStart[ ], /* [size_is][in] */ SIZE_T cObjectIDRangeLength[ ]); HRESULT ( STDMETHODCALLTYPE *SurvivingReferences2 )( ICorProfilerCallback6 * This, /* [in] */ ULONG cSurvivingObjectIDRanges, /* [size_is][in] */ ObjectID objectIDRangeStart[ ], /* [size_is][in] */ SIZE_T cObjectIDRangeLength[ ]); HRESULT ( STDMETHODCALLTYPE *ConditionalWeakTableElementReferences )( ICorProfilerCallback6 * This, /* [in] */ ULONG cRootRefs, /* [size_is][in] */ ObjectID keyRefIds[ ], /* [size_is][in] */ ObjectID valueRefIds[ ], /* [size_is][in] */ GCHandleID rootIds[ ]); HRESULT ( STDMETHODCALLTYPE *GetAssemblyReferences )( ICorProfilerCallback6 * This, /* [string][in] */ const WCHAR *wszAssemblyPath, /* [in] */ ICorProfilerAssemblyReferenceProvider *pAsmRefProvider); END_INTERFACE } ICorProfilerCallback6Vtbl; interface ICorProfilerCallback6 { CONST_VTBL struct ICorProfilerCallback6Vtbl *lpVtbl; }; #ifdef COBJMACROS #define ICorProfilerCallback6_QueryInterface(This,riid,ppvObject) \ ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) ) #define ICorProfilerCallback6_AddRef(This) \ ( (This)->lpVtbl -> AddRef(This) ) #define ICorProfilerCallback6_Release(This) \ ( (This)->lpVtbl -> Release(This) ) #define ICorProfilerCallback6_Initialize(This,pICorProfilerInfoUnk) \ ( (This)->lpVtbl -> Initialize(This,pICorProfilerInfoUnk) ) #define ICorProfilerCallback6_Shutdown(This) \ ( (This)->lpVtbl -> Shutdown(This) ) #define ICorProfilerCallback6_AppDomainCreationStarted(This,appDomainId) \ ( (This)->lpVtbl -> AppDomainCreationStarted(This,appDomainId) ) #define ICorProfilerCallback6_AppDomainCreationFinished(This,appDomainId,hrStatus) \ ( (This)->lpVtbl -> AppDomainCreationFinished(This,appDomainId,hrStatus) ) #define ICorProfilerCallback6_AppDomainShutdownStarted(This,appDomainId) \ ( (This)->lpVtbl -> AppDomainShutdownStarted(This,appDomainId) ) #define ICorProfilerCallback6_AppDomainShutdownFinished(This,appDomainId,hrStatus) \ ( (This)->lpVtbl -> AppDomainShutdownFinished(This,appDomainId,hrStatus) ) #define ICorProfilerCallback6_AssemblyLoadStarted(This,assemblyId) \ ( (This)->lpVtbl -> AssemblyLoadStarted(This,assemblyId) ) #define ICorProfilerCallback6_AssemblyLoadFinished(This,assemblyId,hrStatus) \ ( (This)->lpVtbl -> AssemblyLoadFinished(This,assemblyId,hrStatus) ) #define ICorProfilerCallback6_AssemblyUnloadStarted(This,assemblyId) \ ( (This)->lpVtbl -> AssemblyUnloadStarted(This,assemblyId) ) #define ICorProfilerCallback6_AssemblyUnloadFinished(This,assemblyId,hrStatus) \ ( (This)->lpVtbl -> AssemblyUnloadFinished(This,assemblyId,hrStatus) ) #define ICorProfilerCallback6_ModuleLoadStarted(This,moduleId) \ ( (This)->lpVtbl -> ModuleLoadStarted(This,moduleId) ) #define ICorProfilerCallback6_ModuleLoadFinished(This,moduleId,hrStatus) \ ( (This)->lpVtbl -> ModuleLoadFinished(This,moduleId,hrStatus) ) #define ICorProfilerCallback6_ModuleUnloadStarted(This,moduleId) \ ( (This)->lpVtbl -> ModuleUnloadStarted(This,moduleId) ) #define ICorProfilerCallback6_ModuleUnloadFinished(This,moduleId,hrStatus) \ ( (This)->lpVtbl -> ModuleUnloadFinished(This,moduleId,hrStatus) ) #define ICorProfilerCallback6_ModuleAttachedToAssembly(This,moduleId,AssemblyId) \ ( (This)->lpVtbl -> ModuleAttachedToAssembly(This,moduleId,AssemblyId) ) #define ICorProfilerCallback6_ClassLoadStarted(This,classId) \ ( (This)->lpVtbl -> ClassLoadStarted(This,classId) ) #define ICorProfilerCallback6_ClassLoadFinished(This,classId,hrStatus) \ ( (This)->lpVtbl -> ClassLoadFinished(This,classId,hrStatus) ) #define ICorProfilerCallback6_ClassUnloadStarted(This,classId) \ ( (This)->lpVtbl -> ClassUnloadStarted(This,classId) ) #define ICorProfilerCallback6_ClassUnloadFinished(This,classId,hrStatus) \ ( (This)->lpVtbl -> ClassUnloadFinished(This,classId,hrStatus) ) #define ICorProfilerCallback6_FunctionUnloadStarted(This,functionId) \ ( (This)->lpVtbl -> FunctionUnloadStarted(This,functionId) ) #define ICorProfilerCallback6_JITCompilationStarted(This,functionId,fIsSafeToBlock) \ ( (This)->lpVtbl -> JITCompilationStarted(This,functionId,fIsSafeToBlock) ) #define ICorProfilerCallback6_JITCompilationFinished(This,functionId,hrStatus,fIsSafeToBlock) \ ( (This)->lpVtbl -> JITCompilationFinished(This,functionId,hrStatus,fIsSafeToBlock) ) #define ICorProfilerCallback6_JITCachedFunctionSearchStarted(This,functionId,pbUseCachedFunction) \ ( (This)->lpVtbl -> JITCachedFunctionSearchStarted(This,functionId,pbUseCachedFunction) ) #define ICorProfilerCallback6_JITCachedFunctionSearchFinished(This,functionId,result) \ ( (This)->lpVtbl -> JITCachedFunctionSearchFinished(This,functionId,result) ) #define ICorProfilerCallback6_JITFunctionPitched(This,functionId) \ ( (This)->lpVtbl -> JITFunctionPitched(This,functionId) ) #define ICorProfilerCallback6_JITInlining(This,callerId,calleeId,pfShouldInline) \ ( (This)->lpVtbl -> JITInlining(This,callerId,calleeId,pfShouldInline) ) #define ICorProfilerCallback6_ThreadCreated(This,threadId) \ ( (This)->lpVtbl -> ThreadCreated(This,threadId) ) #define ICorProfilerCallback6_ThreadDestroyed(This,threadId) \ ( (This)->lpVtbl -> ThreadDestroyed(This,threadId) ) #define ICorProfilerCallback6_ThreadAssignedToOSThread(This,managedThreadId,osThreadId) \ ( (This)->lpVtbl -> ThreadAssignedToOSThread(This,managedThreadId,osThreadId) ) #define ICorProfilerCallback6_RemotingClientInvocationStarted(This) \ ( (This)->lpVtbl -> RemotingClientInvocationStarted(This) ) #define ICorProfilerCallback6_RemotingClientSendingMessage(This,pCookie,fIsAsync) \ ( (This)->lpVtbl -> RemotingClientSendingMessage(This,pCookie,fIsAsync) ) #define ICorProfilerCallback6_RemotingClientReceivingReply(This,pCookie,fIsAsync) \ ( (This)->lpVtbl -> RemotingClientReceivingReply(This,pCookie,fIsAsync) ) #define ICorProfilerCallback6_RemotingClientInvocationFinished(This) \ ( (This)->lpVtbl -> RemotingClientInvocationFinished(This) ) #define ICorProfilerCallback6_RemotingServerReceivingMessage(This,pCookie,fIsAsync) \ ( (This)->lpVtbl -> RemotingServerReceivingMessage(This,pCookie,fIsAsync) ) #define ICorProfilerCallback6_RemotingServerInvocationStarted(This) \ ( (This)->lpVtbl -> RemotingServerInvocationStarted(This) ) #define ICorProfilerCallback6_RemotingServerInvocationReturned(This) \ ( (This)->lpVtbl -> RemotingServerInvocationReturned(This) ) #define ICorProfilerCallback6_RemotingServerSendingReply(This,pCookie,fIsAsync) \ ( (This)->lpVtbl -> RemotingServerSendingReply(This,pCookie,fIsAsync) ) #define ICorProfilerCallback6_UnmanagedToManagedTransition(This,functionId,reason) \ ( (This)->lpVtbl -> UnmanagedToManagedTransition(This,functionId,reason) ) #define ICorProfilerCallback6_ManagedToUnmanagedTransition(This,functionId,reason) \ ( (This)->lpVtbl -> ManagedToUnmanagedTransition(This,functionId,reason) ) #define ICorProfilerCallback6_RuntimeSuspendStarted(This,suspendReason) \ ( (This)->lpVtbl -> RuntimeSuspendStarted(This,suspendReason) ) #define ICorProfilerCallback6_RuntimeSuspendFinished(This) \ ( (This)->lpVtbl -> RuntimeSuspendFinished(This) ) #define ICorProfilerCallback6_RuntimeSuspendAborted(This) \ ( (This)->lpVtbl -> RuntimeSuspendAborted(This) ) #define ICorProfilerCallback6_RuntimeResumeStarted(This) \ ( (This)->lpVtbl -> RuntimeResumeStarted(This) ) #define ICorProfilerCallback6_RuntimeResumeFinished(This) \ ( (This)->lpVtbl -> RuntimeResumeFinished(This) ) #define ICorProfilerCallback6_RuntimeThreadSuspended(This,threadId) \ ( (This)->lpVtbl -> RuntimeThreadSuspended(This,threadId) ) #define ICorProfilerCallback6_RuntimeThreadResumed(This,threadId) \ ( (This)->lpVtbl -> RuntimeThreadResumed(This,threadId) ) #define ICorProfilerCallback6_MovedReferences(This,cMovedObjectIDRanges,oldObjectIDRangeStart,newObjectIDRangeStart,cObjectIDRangeLength) \ ( (This)->lpVtbl -> MovedReferences(This,cMovedObjectIDRanges,oldObjectIDRangeStart,newObjectIDRangeStart,cObjectIDRangeLength) ) #define ICorProfilerCallback6_ObjectAllocated(This,objectId,classId) \ ( (This)->lpVtbl -> ObjectAllocated(This,objectId,classId) ) #define ICorProfilerCallback6_ObjectsAllocatedByClass(This,cClassCount,classIds,cObjects) \ ( (This)->lpVtbl -> ObjectsAllocatedByClass(This,cClassCount,classIds,cObjects) ) #define ICorProfilerCallback6_ObjectReferences(This,objectId,classId,cObjectRefs,objectRefIds) \ ( (This)->lpVtbl -> ObjectReferences(This,objectId,classId,cObjectRefs,objectRefIds) ) #define ICorProfilerCallback6_RootReferences(This,cRootRefs,rootRefIds) \ ( (This)->lpVtbl -> RootReferences(This,cRootRefs,rootRefIds) ) #define ICorProfilerCallback6_ExceptionThrown(This,thrownObjectId) \ ( (This)->lpVtbl -> ExceptionThrown(This,thrownObjectId) ) #define ICorProfilerCallback6_ExceptionSearchFunctionEnter(This,functionId) \ ( (This)->lpVtbl -> ExceptionSearchFunctionEnter(This,functionId) ) #define ICorProfilerCallback6_ExceptionSearchFunctionLeave(This) \ ( (This)->lpVtbl -> ExceptionSearchFunctionLeave(This) ) #define ICorProfilerCallback6_ExceptionSearchFilterEnter(This,functionId) \ ( (This)->lpVtbl -> ExceptionSearchFilterEnter(This,functionId) ) #define ICorProfilerCallback6_ExceptionSearchFilterLeave(This) \ ( (This)->lpVtbl -> ExceptionSearchFilterLeave(This) ) #define ICorProfilerCallback6_ExceptionSearchCatcherFound(This,functionId) \ ( (This)->lpVtbl -> ExceptionSearchCatcherFound(This,functionId) ) #define ICorProfilerCallback6_ExceptionOSHandlerEnter(This,__unused) \ ( (This)->lpVtbl -> ExceptionOSHandlerEnter(This,__unused) ) #define ICorProfilerCallback6_ExceptionOSHandlerLeave(This,__unused) \ ( (This)->lpVtbl -> ExceptionOSHandlerLeave(This,__unused) ) #define ICorProfilerCallback6_ExceptionUnwindFunctionEnter(This,functionId) \ ( (This)->lpVtbl -> ExceptionUnwindFunctionEnter(This,functionId) ) #define ICorProfilerCallback6_ExceptionUnwindFunctionLeave(This) \ ( (This)->lpVtbl -> ExceptionUnwindFunctionLeave(This) ) #define ICorProfilerCallback6_ExceptionUnwindFinallyEnter(This,functionId) \ ( (This)->lpVtbl -> ExceptionUnwindFinallyEnter(This,functionId) ) #define ICorProfilerCallback6_ExceptionUnwindFinallyLeave(This) \ ( (This)->lpVtbl -> ExceptionUnwindFinallyLeave(This) ) #define ICorProfilerCallback6_ExceptionCatcherEnter(This,functionId,objectId) \ ( (This)->lpVtbl -> ExceptionCatcherEnter(This,functionId,objectId) ) #define ICorProfilerCallback6_ExceptionCatcherLeave(This) \ ( (This)->lpVtbl -> ExceptionCatcherLeave(This) ) #define ICorProfilerCallback6_COMClassicVTableCreated(This,wrappedClassId,implementedIID,pVTable,cSlots) \ ( (This)->lpVtbl -> COMClassicVTableCreated(This,wrappedClassId,implementedIID,pVTable,cSlots) ) #define ICorProfilerCallback6_COMClassicVTableDestroyed(This,wrappedClassId,implementedIID,pVTable) \ ( (This)->lpVtbl -> COMClassicVTableDestroyed(This,wrappedClassId,implementedIID,pVTable) ) #define ICorProfilerCallback6_ExceptionCLRCatcherFound(This) \ ( (This)->lpVtbl -> ExceptionCLRCatcherFound(This) ) #define ICorProfilerCallback6_ExceptionCLRCatcherExecute(This) \ ( (This)->lpVtbl -> ExceptionCLRCatcherExecute(This) ) #define ICorProfilerCallback6_ThreadNameChanged(This,threadId,cchName,name) \ ( (This)->lpVtbl -> ThreadNameChanged(This,threadId,cchName,name) ) #define ICorProfilerCallback6_GarbageCollectionStarted(This,cGenerations,generationCollected,reason) \ ( (This)->lpVtbl -> GarbageCollectionStarted(This,cGenerations,generationCollected,reason) ) #define ICorProfilerCallback6_SurvivingReferences(This,cSurvivingObjectIDRanges,objectIDRangeStart,cObjectIDRangeLength) \ ( (This)->lpVtbl -> SurvivingReferences(This,cSurvivingObjectIDRanges,objectIDRangeStart,cObjectIDRangeLength) ) #define ICorProfilerCallback6_GarbageCollectionFinished(This) \ ( (This)->lpVtbl -> GarbageCollectionFinished(This) ) #define ICorProfilerCallback6_FinalizeableObjectQueued(This,finalizerFlags,objectID) \ ( (This)->lpVtbl -> FinalizeableObjectQueued(This,finalizerFlags,objectID) ) #define ICorProfilerCallback6_RootReferences2(This,cRootRefs,rootRefIds,rootKinds,rootFlags,rootIds) \ ( (This)->lpVtbl -> RootReferences2(This,cRootRefs,rootRefIds,rootKinds,rootFlags,rootIds) ) #define ICorProfilerCallback6_HandleCreated(This,handleId,initialObjectId) \ ( (This)->lpVtbl -> HandleCreated(This,handleId,initialObjectId) ) #define ICorProfilerCallback6_HandleDestroyed(This,handleId) \ ( (This)->lpVtbl -> HandleDestroyed(This,handleId) ) #define ICorProfilerCallback6_InitializeForAttach(This,pCorProfilerInfoUnk,pvClientData,cbClientData) \ ( (This)->lpVtbl -> InitializeForAttach(This,pCorProfilerInfoUnk,pvClientData,cbClientData) ) #define ICorProfilerCallback6_ProfilerAttachComplete(This) \ ( (This)->lpVtbl -> ProfilerAttachComplete(This) ) #define ICorProfilerCallback6_ProfilerDetachSucceeded(This) \ ( (This)->lpVtbl -> ProfilerDetachSucceeded(This) ) #define ICorProfilerCallback6_ReJITCompilationStarted(This,functionId,rejitId,fIsSafeToBlock) \ ( (This)->lpVtbl -> ReJITCompilationStarted(This,functionId,rejitId,fIsSafeToBlock) ) #define ICorProfilerCallback6_GetReJITParameters(This,moduleId,methodId,pFunctionControl) \ ( (This)->lpVtbl -> GetReJITParameters(This,moduleId,methodId,pFunctionControl) ) #define ICorProfilerCallback6_ReJITCompilationFinished(This,functionId,rejitId,hrStatus,fIsSafeToBlock) \ ( (This)->lpVtbl -> ReJITCompilationFinished(This,functionId,rejitId,hrStatus,fIsSafeToBlock) ) #define ICorProfilerCallback6_ReJITError(This,moduleId,methodId,functionId,hrStatus) \ ( (This)->lpVtbl -> ReJITError(This,moduleId,methodId,functionId,hrStatus) ) #define ICorProfilerCallback6_MovedReferences2(This,cMovedObjectIDRanges,oldObjectIDRangeStart,newObjectIDRangeStart,cObjectIDRangeLength) \ ( (This)->lpVtbl -> MovedReferences2(This,cMovedObjectIDRanges,oldObjectIDRangeStart,newObjectIDRangeStart,cObjectIDRangeLength) ) #define ICorProfilerCallback6_SurvivingReferences2(This,cSurvivingObjectIDRanges,objectIDRangeStart,cObjectIDRangeLength) \ ( (This)->lpVtbl -> SurvivingReferences2(This,cSurvivingObjectIDRanges,objectIDRangeStart,cObjectIDRangeLength) ) #define ICorProfilerCallback6_ConditionalWeakTableElementReferences(This,cRootRefs,keyRefIds,valueRefIds,rootIds) \ ( (This)->lpVtbl -> ConditionalWeakTableElementReferences(This,cRootRefs,keyRefIds,valueRefIds,rootIds) ) #define ICorProfilerCallback6_GetAssemblyReferences(This,wszAssemblyPath,pAsmRefProvider) \ ( (This)->lpVtbl -> GetAssemblyReferences(This,wszAssemblyPath,pAsmRefProvider) ) #endif /* COBJMACROS */ #endif /* C style interface */ #endif /* __ICorProfilerCallback6_INTERFACE_DEFINED__ */ #ifndef __ICorProfilerCallback7_INTERFACE_DEFINED__ #define __ICorProfilerCallback7_INTERFACE_DEFINED__ /* interface ICorProfilerCallback7 */ /* [local][unique][uuid][object] */ EXTERN_C const IID IID_ICorProfilerCallback7; #if defined(__cplusplus) && !defined(CINTERFACE) MIDL_INTERFACE("F76A2DBA-1D52-4539-866C-2AA518F9EFC3") ICorProfilerCallback7 : public ICorProfilerCallback6 { public: virtual HRESULT STDMETHODCALLTYPE ModuleInMemorySymbolsUpdated( ModuleID moduleId) = 0; }; #else /* C style interface */ typedef struct ICorProfilerCallback7Vtbl { BEGIN_INTERFACE HRESULT ( STDMETHODCALLTYPE *QueryInterface )( ICorProfilerCallback7 * This, /* [in] */ REFIID riid, /* [annotation][iid_is][out] */ _COM_Outptr_ void **ppvObject); ULONG ( STDMETHODCALLTYPE *AddRef )( ICorProfilerCallback7 * This); ULONG ( STDMETHODCALLTYPE *Release )( ICorProfilerCallback7 * This); HRESULT ( STDMETHODCALLTYPE *Initialize )( ICorProfilerCallback7 * This, /* [in] */ IUnknown *pICorProfilerInfoUnk); HRESULT ( STDMETHODCALLTYPE *Shutdown )( ICorProfilerCallback7 * This); HRESULT ( STDMETHODCALLTYPE *AppDomainCreationStarted )( ICorProfilerCallback7 * This, /* [in] */ AppDomainID appDomainId); HRESULT ( STDMETHODCALLTYPE *AppDomainCreationFinished )( ICorProfilerCallback7 * This, /* [in] */ AppDomainID appDomainId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *AppDomainShutdownStarted )( ICorProfilerCallback7 * This, /* [in] */ AppDomainID appDomainId); HRESULT ( STDMETHODCALLTYPE *AppDomainShutdownFinished )( ICorProfilerCallback7 * This, /* [in] */ AppDomainID appDomainId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *AssemblyLoadStarted )( ICorProfilerCallback7 * This, /* [in] */ AssemblyID assemblyId); HRESULT ( STDMETHODCALLTYPE *AssemblyLoadFinished )( ICorProfilerCallback7 * This, /* [in] */ AssemblyID assemblyId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *AssemblyUnloadStarted )( ICorProfilerCallback7 * This, /* [in] */ AssemblyID assemblyId); HRESULT ( STDMETHODCALLTYPE *AssemblyUnloadFinished )( ICorProfilerCallback7 * This, /* [in] */ AssemblyID assemblyId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *ModuleLoadStarted )( ICorProfilerCallback7 * This, /* [in] */ ModuleID moduleId); HRESULT ( STDMETHODCALLTYPE *ModuleLoadFinished )( ICorProfilerCallback7 * This, /* [in] */ ModuleID moduleId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *ModuleUnloadStarted )( ICorProfilerCallback7 * This, /* [in] */ ModuleID moduleId); HRESULT ( STDMETHODCALLTYPE *ModuleUnloadFinished )( ICorProfilerCallback7 * This, /* [in] */ ModuleID moduleId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *ModuleAttachedToAssembly )( ICorProfilerCallback7 * This, /* [in] */ ModuleID moduleId, /* [in] */ AssemblyID AssemblyId); HRESULT ( STDMETHODCALLTYPE *ClassLoadStarted )( ICorProfilerCallback7 * This, /* [in] */ ClassID classId); HRESULT ( STDMETHODCALLTYPE *ClassLoadFinished )( ICorProfilerCallback7 * This, /* [in] */ ClassID classId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *ClassUnloadStarted )( ICorProfilerCallback7 * This, /* [in] */ ClassID classId); HRESULT ( STDMETHODCALLTYPE *ClassUnloadFinished )( ICorProfilerCallback7 * This, /* [in] */ ClassID classId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *FunctionUnloadStarted )( ICorProfilerCallback7 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *JITCompilationStarted )( ICorProfilerCallback7 * This, /* [in] */ FunctionID functionId, /* [in] */ BOOL fIsSafeToBlock); HRESULT ( STDMETHODCALLTYPE *JITCompilationFinished )( ICorProfilerCallback7 * This, /* [in] */ FunctionID functionId, /* [in] */ HRESULT hrStatus, /* [in] */ BOOL fIsSafeToBlock); HRESULT ( STDMETHODCALLTYPE *JITCachedFunctionSearchStarted )( ICorProfilerCallback7 * This, /* [in] */ FunctionID functionId, /* [out] */ BOOL *pbUseCachedFunction); HRESULT ( STDMETHODCALLTYPE *JITCachedFunctionSearchFinished )( ICorProfilerCallback7 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_JIT_CACHE result); HRESULT ( STDMETHODCALLTYPE *JITFunctionPitched )( ICorProfilerCallback7 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *JITInlining )( ICorProfilerCallback7 * This, /* [in] */ FunctionID callerId, /* [in] */ FunctionID calleeId, /* [out] */ BOOL *pfShouldInline); HRESULT ( STDMETHODCALLTYPE *ThreadCreated )( ICorProfilerCallback7 * This, /* [in] */ ThreadID threadId); HRESULT ( STDMETHODCALLTYPE *ThreadDestroyed )( ICorProfilerCallback7 * This, /* [in] */ ThreadID threadId); HRESULT ( STDMETHODCALLTYPE *ThreadAssignedToOSThread )( ICorProfilerCallback7 * This, /* [in] */ ThreadID managedThreadId, /* [in] */ DWORD osThreadId); HRESULT ( STDMETHODCALLTYPE *RemotingClientInvocationStarted )( ICorProfilerCallback7 * This); HRESULT ( STDMETHODCALLTYPE *RemotingClientSendingMessage )( ICorProfilerCallback7 * This, /* [in] */ GUID *pCookie, /* [in] */ BOOL fIsAsync); HRESULT ( STDMETHODCALLTYPE *RemotingClientReceivingReply )( ICorProfilerCallback7 * This, /* [in] */ GUID *pCookie, /* [in] */ BOOL fIsAsync); HRESULT ( STDMETHODCALLTYPE *RemotingClientInvocationFinished )( ICorProfilerCallback7 * This); HRESULT ( STDMETHODCALLTYPE *RemotingServerReceivingMessage )( ICorProfilerCallback7 * This, /* [in] */ GUID *pCookie, /* [in] */ BOOL fIsAsync); HRESULT ( STDMETHODCALLTYPE *RemotingServerInvocationStarted )( ICorProfilerCallback7 * This); HRESULT ( STDMETHODCALLTYPE *RemotingServerInvocationReturned )( ICorProfilerCallback7 * This); HRESULT ( STDMETHODCALLTYPE *RemotingServerSendingReply )( ICorProfilerCallback7 * This, /* [in] */ GUID *pCookie, /* [in] */ BOOL fIsAsync); HRESULT ( STDMETHODCALLTYPE *UnmanagedToManagedTransition )( ICorProfilerCallback7 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_TRANSITION_REASON reason); HRESULT ( STDMETHODCALLTYPE *ManagedToUnmanagedTransition )( ICorProfilerCallback7 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_TRANSITION_REASON reason); HRESULT ( STDMETHODCALLTYPE *RuntimeSuspendStarted )( ICorProfilerCallback7 * This, /* [in] */ COR_PRF_SUSPEND_REASON suspendReason); HRESULT ( STDMETHODCALLTYPE *RuntimeSuspendFinished )( ICorProfilerCallback7 * This); HRESULT ( STDMETHODCALLTYPE *RuntimeSuspendAborted )( ICorProfilerCallback7 * This); HRESULT ( STDMETHODCALLTYPE *RuntimeResumeStarted )( ICorProfilerCallback7 * This); HRESULT ( STDMETHODCALLTYPE *RuntimeResumeFinished )( ICorProfilerCallback7 * This); HRESULT ( STDMETHODCALLTYPE *RuntimeThreadSuspended )( ICorProfilerCallback7 * This, /* [in] */ ThreadID threadId); HRESULT ( STDMETHODCALLTYPE *RuntimeThreadResumed )( ICorProfilerCallback7 * This, /* [in] */ ThreadID threadId); HRESULT ( STDMETHODCALLTYPE *MovedReferences )( ICorProfilerCallback7 * This, /* [in] */ ULONG cMovedObjectIDRanges, /* [size_is][in] */ ObjectID oldObjectIDRangeStart[ ], /* [size_is][in] */ ObjectID newObjectIDRangeStart[ ], /* [size_is][in] */ ULONG cObjectIDRangeLength[ ]); HRESULT ( STDMETHODCALLTYPE *ObjectAllocated )( ICorProfilerCallback7 * This, /* [in] */ ObjectID objectId, /* [in] */ ClassID classId); HRESULT ( STDMETHODCALLTYPE *ObjectsAllocatedByClass )( ICorProfilerCallback7 * This, /* [in] */ ULONG cClassCount, /* [size_is][in] */ ClassID classIds[ ], /* [size_is][in] */ ULONG cObjects[ ]); HRESULT ( STDMETHODCALLTYPE *ObjectReferences )( ICorProfilerCallback7 * This, /* [in] */ ObjectID objectId, /* [in] */ ClassID classId, /* [in] */ ULONG cObjectRefs, /* [size_is][in] */ ObjectID objectRefIds[ ]); HRESULT ( STDMETHODCALLTYPE *RootReferences )( ICorProfilerCallback7 * This, /* [in] */ ULONG cRootRefs, /* [size_is][in] */ ObjectID rootRefIds[ ]); HRESULT ( STDMETHODCALLTYPE *ExceptionThrown )( ICorProfilerCallback7 * This, /* [in] */ ObjectID thrownObjectId); HRESULT ( STDMETHODCALLTYPE *ExceptionSearchFunctionEnter )( ICorProfilerCallback7 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ExceptionSearchFunctionLeave )( ICorProfilerCallback7 * This); HRESULT ( STDMETHODCALLTYPE *ExceptionSearchFilterEnter )( ICorProfilerCallback7 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ExceptionSearchFilterLeave )( ICorProfilerCallback7 * This); HRESULT ( STDMETHODCALLTYPE *ExceptionSearchCatcherFound )( ICorProfilerCallback7 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ExceptionOSHandlerEnter )( ICorProfilerCallback7 * This, /* [in] */ UINT_PTR __unused); HRESULT ( STDMETHODCALLTYPE *ExceptionOSHandlerLeave )( ICorProfilerCallback7 * This, /* [in] */ UINT_PTR __unused); HRESULT ( STDMETHODCALLTYPE *ExceptionUnwindFunctionEnter )( ICorProfilerCallback7 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ExceptionUnwindFunctionLeave )( ICorProfilerCallback7 * This); HRESULT ( STDMETHODCALLTYPE *ExceptionUnwindFinallyEnter )( ICorProfilerCallback7 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ExceptionUnwindFinallyLeave )( ICorProfilerCallback7 * This); HRESULT ( STDMETHODCALLTYPE *ExceptionCatcherEnter )( ICorProfilerCallback7 * This, /* [in] */ FunctionID functionId, /* [in] */ ObjectID objectId); HRESULT ( STDMETHODCALLTYPE *ExceptionCatcherLeave )( ICorProfilerCallback7 * This); HRESULT ( STDMETHODCALLTYPE *COMClassicVTableCreated )( ICorProfilerCallback7 * This, /* [in] */ ClassID wrappedClassId, /* [in] */ REFGUID implementedIID, /* [in] */ void *pVTable, /* [in] */ ULONG cSlots); HRESULT ( STDMETHODCALLTYPE *COMClassicVTableDestroyed )( ICorProfilerCallback7 * This, /* [in] */ ClassID wrappedClassId, /* [in] */ REFGUID implementedIID, /* [in] */ void *pVTable); HRESULT ( STDMETHODCALLTYPE *ExceptionCLRCatcherFound )( ICorProfilerCallback7 * This); HRESULT ( STDMETHODCALLTYPE *ExceptionCLRCatcherExecute )( ICorProfilerCallback7 * This); HRESULT ( STDMETHODCALLTYPE *ThreadNameChanged )( ICorProfilerCallback7 * This, /* [in] */ ThreadID threadId, /* [in] */ ULONG cchName, /* [annotation][in] */ _In_reads_opt_(cchName) WCHAR name[ ]); HRESULT ( STDMETHODCALLTYPE *GarbageCollectionStarted )( ICorProfilerCallback7 * This, /* [in] */ int cGenerations, /* [size_is][in] */ BOOL generationCollected[ ], /* [in] */ COR_PRF_GC_REASON reason); HRESULT ( STDMETHODCALLTYPE *SurvivingReferences )( ICorProfilerCallback7 * This, /* [in] */ ULONG cSurvivingObjectIDRanges, /* [size_is][in] */ ObjectID objectIDRangeStart[ ], /* [size_is][in] */ ULONG cObjectIDRangeLength[ ]); HRESULT ( STDMETHODCALLTYPE *GarbageCollectionFinished )( ICorProfilerCallback7 * This); HRESULT ( STDMETHODCALLTYPE *FinalizeableObjectQueued )( ICorProfilerCallback7 * This, /* [in] */ DWORD finalizerFlags, /* [in] */ ObjectID objectID); HRESULT ( STDMETHODCALLTYPE *RootReferences2 )( ICorProfilerCallback7 * This, /* [in] */ ULONG cRootRefs, /* [size_is][in] */ ObjectID rootRefIds[ ], /* [size_is][in] */ COR_PRF_GC_ROOT_KIND rootKinds[ ], /* [size_is][in] */ COR_PRF_GC_ROOT_FLAGS rootFlags[ ], /* [size_is][in] */ UINT_PTR rootIds[ ]); HRESULT ( STDMETHODCALLTYPE *HandleCreated )( ICorProfilerCallback7 * This, /* [in] */ GCHandleID handleId, /* [in] */ ObjectID initialObjectId); HRESULT ( STDMETHODCALLTYPE *HandleDestroyed )( ICorProfilerCallback7 * This, /* [in] */ GCHandleID handleId); HRESULT ( STDMETHODCALLTYPE *InitializeForAttach )( ICorProfilerCallback7 * This, /* [in] */ IUnknown *pCorProfilerInfoUnk, /* [in] */ void *pvClientData, /* [in] */ UINT cbClientData); HRESULT ( STDMETHODCALLTYPE *ProfilerAttachComplete )( ICorProfilerCallback7 * This); HRESULT ( STDMETHODCALLTYPE *ProfilerDetachSucceeded )( ICorProfilerCallback7 * This); HRESULT ( STDMETHODCALLTYPE *ReJITCompilationStarted )( ICorProfilerCallback7 * This, /* [in] */ FunctionID functionId, /* [in] */ ReJITID rejitId, /* [in] */ BOOL fIsSafeToBlock); HRESULT ( STDMETHODCALLTYPE *GetReJITParameters )( ICorProfilerCallback7 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdMethodDef methodId, /* [in] */ ICorProfilerFunctionControl *pFunctionControl); HRESULT ( STDMETHODCALLTYPE *ReJITCompilationFinished )( ICorProfilerCallback7 * This, /* [in] */ FunctionID functionId, /* [in] */ ReJITID rejitId, /* [in] */ HRESULT hrStatus, /* [in] */ BOOL fIsSafeToBlock); HRESULT ( STDMETHODCALLTYPE *ReJITError )( ICorProfilerCallback7 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdMethodDef methodId, /* [in] */ FunctionID functionId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *MovedReferences2 )( ICorProfilerCallback7 * This, /* [in] */ ULONG cMovedObjectIDRanges, /* [size_is][in] */ ObjectID oldObjectIDRangeStart[ ], /* [size_is][in] */ ObjectID newObjectIDRangeStart[ ], /* [size_is][in] */ SIZE_T cObjectIDRangeLength[ ]); HRESULT ( STDMETHODCALLTYPE *SurvivingReferences2 )( ICorProfilerCallback7 * This, /* [in] */ ULONG cSurvivingObjectIDRanges, /* [size_is][in] */ ObjectID objectIDRangeStart[ ], /* [size_is][in] */ SIZE_T cObjectIDRangeLength[ ]); HRESULT ( STDMETHODCALLTYPE *ConditionalWeakTableElementReferences )( ICorProfilerCallback7 * This, /* [in] */ ULONG cRootRefs, /* [size_is][in] */ ObjectID keyRefIds[ ], /* [size_is][in] */ ObjectID valueRefIds[ ], /* [size_is][in] */ GCHandleID rootIds[ ]); HRESULT ( STDMETHODCALLTYPE *GetAssemblyReferences )( ICorProfilerCallback7 * This, /* [string][in] */ const WCHAR *wszAssemblyPath, /* [in] */ ICorProfilerAssemblyReferenceProvider *pAsmRefProvider); HRESULT ( STDMETHODCALLTYPE *ModuleInMemorySymbolsUpdated )( ICorProfilerCallback7 * This, ModuleID moduleId); END_INTERFACE } ICorProfilerCallback7Vtbl; interface ICorProfilerCallback7 { CONST_VTBL struct ICorProfilerCallback7Vtbl *lpVtbl; }; #ifdef COBJMACROS #define ICorProfilerCallback7_QueryInterface(This,riid,ppvObject) \ ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) ) #define ICorProfilerCallback7_AddRef(This) \ ( (This)->lpVtbl -> AddRef(This) ) #define ICorProfilerCallback7_Release(This) \ ( (This)->lpVtbl -> Release(This) ) #define ICorProfilerCallback7_Initialize(This,pICorProfilerInfoUnk) \ ( (This)->lpVtbl -> Initialize(This,pICorProfilerInfoUnk) ) #define ICorProfilerCallback7_Shutdown(This) \ ( (This)->lpVtbl -> Shutdown(This) ) #define ICorProfilerCallback7_AppDomainCreationStarted(This,appDomainId) \ ( (This)->lpVtbl -> AppDomainCreationStarted(This,appDomainId) ) #define ICorProfilerCallback7_AppDomainCreationFinished(This,appDomainId,hrStatus) \ ( (This)->lpVtbl -> AppDomainCreationFinished(This,appDomainId,hrStatus) ) #define ICorProfilerCallback7_AppDomainShutdownStarted(This,appDomainId) \ ( (This)->lpVtbl -> AppDomainShutdownStarted(This,appDomainId) ) #define ICorProfilerCallback7_AppDomainShutdownFinished(This,appDomainId,hrStatus) \ ( (This)->lpVtbl -> AppDomainShutdownFinished(This,appDomainId,hrStatus) ) #define ICorProfilerCallback7_AssemblyLoadStarted(This,assemblyId) \ ( (This)->lpVtbl -> AssemblyLoadStarted(This,assemblyId) ) #define ICorProfilerCallback7_AssemblyLoadFinished(This,assemblyId,hrStatus) \ ( (This)->lpVtbl -> AssemblyLoadFinished(This,assemblyId,hrStatus) ) #define ICorProfilerCallback7_AssemblyUnloadStarted(This,assemblyId) \ ( (This)->lpVtbl -> AssemblyUnloadStarted(This,assemblyId) ) #define ICorProfilerCallback7_AssemblyUnloadFinished(This,assemblyId,hrStatus) \ ( (This)->lpVtbl -> AssemblyUnloadFinished(This,assemblyId,hrStatus) ) #define ICorProfilerCallback7_ModuleLoadStarted(This,moduleId) \ ( (This)->lpVtbl -> ModuleLoadStarted(This,moduleId) ) #define ICorProfilerCallback7_ModuleLoadFinished(This,moduleId,hrStatus) \ ( (This)->lpVtbl -> ModuleLoadFinished(This,moduleId,hrStatus) ) #define ICorProfilerCallback7_ModuleUnloadStarted(This,moduleId) \ ( (This)->lpVtbl -> ModuleUnloadStarted(This,moduleId) ) #define ICorProfilerCallback7_ModuleUnloadFinished(This,moduleId,hrStatus) \ ( (This)->lpVtbl -> ModuleUnloadFinished(This,moduleId,hrStatus) ) #define ICorProfilerCallback7_ModuleAttachedToAssembly(This,moduleId,AssemblyId) \ ( (This)->lpVtbl -> ModuleAttachedToAssembly(This,moduleId,AssemblyId) ) #define ICorProfilerCallback7_ClassLoadStarted(This,classId) \ ( (This)->lpVtbl -> ClassLoadStarted(This,classId) ) #define ICorProfilerCallback7_ClassLoadFinished(This,classId,hrStatus) \ ( (This)->lpVtbl -> ClassLoadFinished(This,classId,hrStatus) ) #define ICorProfilerCallback7_ClassUnloadStarted(This,classId) \ ( (This)->lpVtbl -> ClassUnloadStarted(This,classId) ) #define ICorProfilerCallback7_ClassUnloadFinished(This,classId,hrStatus) \ ( (This)->lpVtbl -> ClassUnloadFinished(This,classId,hrStatus) ) #define ICorProfilerCallback7_FunctionUnloadStarted(This,functionId) \ ( (This)->lpVtbl -> FunctionUnloadStarted(This,functionId) ) #define ICorProfilerCallback7_JITCompilationStarted(This,functionId,fIsSafeToBlock) \ ( (This)->lpVtbl -> JITCompilationStarted(This,functionId,fIsSafeToBlock) ) #define ICorProfilerCallback7_JITCompilationFinished(This,functionId,hrStatus,fIsSafeToBlock) \ ( (This)->lpVtbl -> JITCompilationFinished(This,functionId,hrStatus,fIsSafeToBlock) ) #define ICorProfilerCallback7_JITCachedFunctionSearchStarted(This,functionId,pbUseCachedFunction) \ ( (This)->lpVtbl -> JITCachedFunctionSearchStarted(This,functionId,pbUseCachedFunction) ) #define ICorProfilerCallback7_JITCachedFunctionSearchFinished(This,functionId,result) \ ( (This)->lpVtbl -> JITCachedFunctionSearchFinished(This,functionId,result) ) #define ICorProfilerCallback7_JITFunctionPitched(This,functionId) \ ( (This)->lpVtbl -> JITFunctionPitched(This,functionId) ) #define ICorProfilerCallback7_JITInlining(This,callerId,calleeId,pfShouldInline) \ ( (This)->lpVtbl -> JITInlining(This,callerId,calleeId,pfShouldInline) ) #define ICorProfilerCallback7_ThreadCreated(This,threadId) \ ( (This)->lpVtbl -> ThreadCreated(This,threadId) ) #define ICorProfilerCallback7_ThreadDestroyed(This,threadId) \ ( (This)->lpVtbl -> ThreadDestroyed(This,threadId) ) #define ICorProfilerCallback7_ThreadAssignedToOSThread(This,managedThreadId,osThreadId) \ ( (This)->lpVtbl -> ThreadAssignedToOSThread(This,managedThreadId,osThreadId) ) #define ICorProfilerCallback7_RemotingClientInvocationStarted(This) \ ( (This)->lpVtbl -> RemotingClientInvocationStarted(This) ) #define ICorProfilerCallback7_RemotingClientSendingMessage(This,pCookie,fIsAsync) \ ( (This)->lpVtbl -> RemotingClientSendingMessage(This,pCookie,fIsAsync) ) #define ICorProfilerCallback7_RemotingClientReceivingReply(This,pCookie,fIsAsync) \ ( (This)->lpVtbl -> RemotingClientReceivingReply(This,pCookie,fIsAsync) ) #define ICorProfilerCallback7_RemotingClientInvocationFinished(This) \ ( (This)->lpVtbl -> RemotingClientInvocationFinished(This) ) #define ICorProfilerCallback7_RemotingServerReceivingMessage(This,pCookie,fIsAsync) \ ( (This)->lpVtbl -> RemotingServerReceivingMessage(This,pCookie,fIsAsync) ) #define ICorProfilerCallback7_RemotingServerInvocationStarted(This) \ ( (This)->lpVtbl -> RemotingServerInvocationStarted(This) ) #define ICorProfilerCallback7_RemotingServerInvocationReturned(This) \ ( (This)->lpVtbl -> RemotingServerInvocationReturned(This) ) #define ICorProfilerCallback7_RemotingServerSendingReply(This,pCookie,fIsAsync) \ ( (This)->lpVtbl -> RemotingServerSendingReply(This,pCookie,fIsAsync) ) #define ICorProfilerCallback7_UnmanagedToManagedTransition(This,functionId,reason) \ ( (This)->lpVtbl -> UnmanagedToManagedTransition(This,functionId,reason) ) #define ICorProfilerCallback7_ManagedToUnmanagedTransition(This,functionId,reason) \ ( (This)->lpVtbl -> ManagedToUnmanagedTransition(This,functionId,reason) ) #define ICorProfilerCallback7_RuntimeSuspendStarted(This,suspendReason) \ ( (This)->lpVtbl -> RuntimeSuspendStarted(This,suspendReason) ) #define ICorProfilerCallback7_RuntimeSuspendFinished(This) \ ( (This)->lpVtbl -> RuntimeSuspendFinished(This) ) #define ICorProfilerCallback7_RuntimeSuspendAborted(This) \ ( (This)->lpVtbl -> RuntimeSuspendAborted(This) ) #define ICorProfilerCallback7_RuntimeResumeStarted(This) \ ( (This)->lpVtbl -> RuntimeResumeStarted(This) ) #define ICorProfilerCallback7_RuntimeResumeFinished(This) \ ( (This)->lpVtbl -> RuntimeResumeFinished(This) ) #define ICorProfilerCallback7_RuntimeThreadSuspended(This,threadId) \ ( (This)->lpVtbl -> RuntimeThreadSuspended(This,threadId) ) #define ICorProfilerCallback7_RuntimeThreadResumed(This,threadId) \ ( (This)->lpVtbl -> RuntimeThreadResumed(This,threadId) ) #define ICorProfilerCallback7_MovedReferences(This,cMovedObjectIDRanges,oldObjectIDRangeStart,newObjectIDRangeStart,cObjectIDRangeLength) \ ( (This)->lpVtbl -> MovedReferences(This,cMovedObjectIDRanges,oldObjectIDRangeStart,newObjectIDRangeStart,cObjectIDRangeLength) ) #define ICorProfilerCallback7_ObjectAllocated(This,objectId,classId) \ ( (This)->lpVtbl -> ObjectAllocated(This,objectId,classId) ) #define ICorProfilerCallback7_ObjectsAllocatedByClass(This,cClassCount,classIds,cObjects) \ ( (This)->lpVtbl -> ObjectsAllocatedByClass(This,cClassCount,classIds,cObjects) ) #define ICorProfilerCallback7_ObjectReferences(This,objectId,classId,cObjectRefs,objectRefIds) \ ( (This)->lpVtbl -> ObjectReferences(This,objectId,classId,cObjectRefs,objectRefIds) ) #define ICorProfilerCallback7_RootReferences(This,cRootRefs,rootRefIds) \ ( (This)->lpVtbl -> RootReferences(This,cRootRefs,rootRefIds) ) #define ICorProfilerCallback7_ExceptionThrown(This,thrownObjectId) \ ( (This)->lpVtbl -> ExceptionThrown(This,thrownObjectId) ) #define ICorProfilerCallback7_ExceptionSearchFunctionEnter(This,functionId) \ ( (This)->lpVtbl -> ExceptionSearchFunctionEnter(This,functionId) ) #define ICorProfilerCallback7_ExceptionSearchFunctionLeave(This) \ ( (This)->lpVtbl -> ExceptionSearchFunctionLeave(This) ) #define ICorProfilerCallback7_ExceptionSearchFilterEnter(This,functionId) \ ( (This)->lpVtbl -> ExceptionSearchFilterEnter(This,functionId) ) #define ICorProfilerCallback7_ExceptionSearchFilterLeave(This) \ ( (This)->lpVtbl -> ExceptionSearchFilterLeave(This) ) #define ICorProfilerCallback7_ExceptionSearchCatcherFound(This,functionId) \ ( (This)->lpVtbl -> ExceptionSearchCatcherFound(This,functionId) ) #define ICorProfilerCallback7_ExceptionOSHandlerEnter(This,__unused) \ ( (This)->lpVtbl -> ExceptionOSHandlerEnter(This,__unused) ) #define ICorProfilerCallback7_ExceptionOSHandlerLeave(This,__unused) \ ( (This)->lpVtbl -> ExceptionOSHandlerLeave(This,__unused) ) #define ICorProfilerCallback7_ExceptionUnwindFunctionEnter(This,functionId) \ ( (This)->lpVtbl -> ExceptionUnwindFunctionEnter(This,functionId) ) #define ICorProfilerCallback7_ExceptionUnwindFunctionLeave(This) \ ( (This)->lpVtbl -> ExceptionUnwindFunctionLeave(This) ) #define ICorProfilerCallback7_ExceptionUnwindFinallyEnter(This,functionId) \ ( (This)->lpVtbl -> ExceptionUnwindFinallyEnter(This,functionId) ) #define ICorProfilerCallback7_ExceptionUnwindFinallyLeave(This) \ ( (This)->lpVtbl -> ExceptionUnwindFinallyLeave(This) ) #define ICorProfilerCallback7_ExceptionCatcherEnter(This,functionId,objectId) \ ( (This)->lpVtbl -> ExceptionCatcherEnter(This,functionId,objectId) ) #define ICorProfilerCallback7_ExceptionCatcherLeave(This) \ ( (This)->lpVtbl -> ExceptionCatcherLeave(This) ) #define ICorProfilerCallback7_COMClassicVTableCreated(This,wrappedClassId,implementedIID,pVTable,cSlots) \ ( (This)->lpVtbl -> COMClassicVTableCreated(This,wrappedClassId,implementedIID,pVTable,cSlots) ) #define ICorProfilerCallback7_COMClassicVTableDestroyed(This,wrappedClassId,implementedIID,pVTable) \ ( (This)->lpVtbl -> COMClassicVTableDestroyed(This,wrappedClassId,implementedIID,pVTable) ) #define ICorProfilerCallback7_ExceptionCLRCatcherFound(This) \ ( (This)->lpVtbl -> ExceptionCLRCatcherFound(This) ) #define ICorProfilerCallback7_ExceptionCLRCatcherExecute(This) \ ( (This)->lpVtbl -> ExceptionCLRCatcherExecute(This) ) #define ICorProfilerCallback7_ThreadNameChanged(This,threadId,cchName,name) \ ( (This)->lpVtbl -> ThreadNameChanged(This,threadId,cchName,name) ) #define ICorProfilerCallback7_GarbageCollectionStarted(This,cGenerations,generationCollected,reason) \ ( (This)->lpVtbl -> GarbageCollectionStarted(This,cGenerations,generationCollected,reason) ) #define ICorProfilerCallback7_SurvivingReferences(This,cSurvivingObjectIDRanges,objectIDRangeStart,cObjectIDRangeLength) \ ( (This)->lpVtbl -> SurvivingReferences(This,cSurvivingObjectIDRanges,objectIDRangeStart,cObjectIDRangeLength) ) #define ICorProfilerCallback7_GarbageCollectionFinished(This) \ ( (This)->lpVtbl -> GarbageCollectionFinished(This) ) #define ICorProfilerCallback7_FinalizeableObjectQueued(This,finalizerFlags,objectID) \ ( (This)->lpVtbl -> FinalizeableObjectQueued(This,finalizerFlags,objectID) ) #define ICorProfilerCallback7_RootReferences2(This,cRootRefs,rootRefIds,rootKinds,rootFlags,rootIds) \ ( (This)->lpVtbl -> RootReferences2(This,cRootRefs,rootRefIds,rootKinds,rootFlags,rootIds) ) #define ICorProfilerCallback7_HandleCreated(This,handleId,initialObjectId) \ ( (This)->lpVtbl -> HandleCreated(This,handleId,initialObjectId) ) #define ICorProfilerCallback7_HandleDestroyed(This,handleId) \ ( (This)->lpVtbl -> HandleDestroyed(This,handleId) ) #define ICorProfilerCallback7_InitializeForAttach(This,pCorProfilerInfoUnk,pvClientData,cbClientData) \ ( (This)->lpVtbl -> InitializeForAttach(This,pCorProfilerInfoUnk,pvClientData,cbClientData) ) #define ICorProfilerCallback7_ProfilerAttachComplete(This) \ ( (This)->lpVtbl -> ProfilerAttachComplete(This) ) #define ICorProfilerCallback7_ProfilerDetachSucceeded(This) \ ( (This)->lpVtbl -> ProfilerDetachSucceeded(This) ) #define ICorProfilerCallback7_ReJITCompilationStarted(This,functionId,rejitId,fIsSafeToBlock) \ ( (This)->lpVtbl -> ReJITCompilationStarted(This,functionId,rejitId,fIsSafeToBlock) ) #define ICorProfilerCallback7_GetReJITParameters(This,moduleId,methodId,pFunctionControl) \ ( (This)->lpVtbl -> GetReJITParameters(This,moduleId,methodId,pFunctionControl) ) #define ICorProfilerCallback7_ReJITCompilationFinished(This,functionId,rejitId,hrStatus,fIsSafeToBlock) \ ( (This)->lpVtbl -> ReJITCompilationFinished(This,functionId,rejitId,hrStatus,fIsSafeToBlock) ) #define ICorProfilerCallback7_ReJITError(This,moduleId,methodId,functionId,hrStatus) \ ( (This)->lpVtbl -> ReJITError(This,moduleId,methodId,functionId,hrStatus) ) #define ICorProfilerCallback7_MovedReferences2(This,cMovedObjectIDRanges,oldObjectIDRangeStart,newObjectIDRangeStart,cObjectIDRangeLength) \ ( (This)->lpVtbl -> MovedReferences2(This,cMovedObjectIDRanges,oldObjectIDRangeStart,newObjectIDRangeStart,cObjectIDRangeLength) ) #define ICorProfilerCallback7_SurvivingReferences2(This,cSurvivingObjectIDRanges,objectIDRangeStart,cObjectIDRangeLength) \ ( (This)->lpVtbl -> SurvivingReferences2(This,cSurvivingObjectIDRanges,objectIDRangeStart,cObjectIDRangeLength) ) #define ICorProfilerCallback7_ConditionalWeakTableElementReferences(This,cRootRefs,keyRefIds,valueRefIds,rootIds) \ ( (This)->lpVtbl -> ConditionalWeakTableElementReferences(This,cRootRefs,keyRefIds,valueRefIds,rootIds) ) #define ICorProfilerCallback7_GetAssemblyReferences(This,wszAssemblyPath,pAsmRefProvider) \ ( (This)->lpVtbl -> GetAssemblyReferences(This,wszAssemblyPath,pAsmRefProvider) ) #define ICorProfilerCallback7_ModuleInMemorySymbolsUpdated(This,moduleId) \ ( (This)->lpVtbl -> ModuleInMemorySymbolsUpdated(This,moduleId) ) #endif /* COBJMACROS */ #endif /* C style interface */ #endif /* __ICorProfilerCallback7_INTERFACE_DEFINED__ */ #ifndef __ICorProfilerCallback8_INTERFACE_DEFINED__ #define __ICorProfilerCallback8_INTERFACE_DEFINED__ /* interface ICorProfilerCallback8 */ /* [local][unique][uuid][object] */ EXTERN_C const IID IID_ICorProfilerCallback8; #if defined(__cplusplus) && !defined(CINTERFACE) MIDL_INTERFACE("5BED9B15-C079-4D47-BFE2-215A140C07E0") ICorProfilerCallback8 : public ICorProfilerCallback7 { public: virtual HRESULT STDMETHODCALLTYPE DynamicMethodJITCompilationStarted( /* [in] */ FunctionID functionId, /* [in] */ BOOL fIsSafeToBlock, /* [in] */ LPCBYTE pILHeader, /* [in] */ ULONG cbILHeader) = 0; virtual HRESULT STDMETHODCALLTYPE DynamicMethodJITCompilationFinished( /* [in] */ FunctionID functionId, /* [in] */ HRESULT hrStatus, /* [in] */ BOOL fIsSafeToBlock) = 0; }; #else /* C style interface */ typedef struct ICorProfilerCallback8Vtbl { BEGIN_INTERFACE HRESULT ( STDMETHODCALLTYPE *QueryInterface )( ICorProfilerCallback8 * This, /* [in] */ REFIID riid, /* [annotation][iid_is][out] */ _COM_Outptr_ void **ppvObject); ULONG ( STDMETHODCALLTYPE *AddRef )( ICorProfilerCallback8 * This); ULONG ( STDMETHODCALLTYPE *Release )( ICorProfilerCallback8 * This); HRESULT ( STDMETHODCALLTYPE *Initialize )( ICorProfilerCallback8 * This, /* [in] */ IUnknown *pICorProfilerInfoUnk); HRESULT ( STDMETHODCALLTYPE *Shutdown )( ICorProfilerCallback8 * This); HRESULT ( STDMETHODCALLTYPE *AppDomainCreationStarted )( ICorProfilerCallback8 * This, /* [in] */ AppDomainID appDomainId); HRESULT ( STDMETHODCALLTYPE *AppDomainCreationFinished )( ICorProfilerCallback8 * This, /* [in] */ AppDomainID appDomainId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *AppDomainShutdownStarted )( ICorProfilerCallback8 * This, /* [in] */ AppDomainID appDomainId); HRESULT ( STDMETHODCALLTYPE *AppDomainShutdownFinished )( ICorProfilerCallback8 * This, /* [in] */ AppDomainID appDomainId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *AssemblyLoadStarted )( ICorProfilerCallback8 * This, /* [in] */ AssemblyID assemblyId); HRESULT ( STDMETHODCALLTYPE *AssemblyLoadFinished )( ICorProfilerCallback8 * This, /* [in] */ AssemblyID assemblyId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *AssemblyUnloadStarted )( ICorProfilerCallback8 * This, /* [in] */ AssemblyID assemblyId); HRESULT ( STDMETHODCALLTYPE *AssemblyUnloadFinished )( ICorProfilerCallback8 * This, /* [in] */ AssemblyID assemblyId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *ModuleLoadStarted )( ICorProfilerCallback8 * This, /* [in] */ ModuleID moduleId); HRESULT ( STDMETHODCALLTYPE *ModuleLoadFinished )( ICorProfilerCallback8 * This, /* [in] */ ModuleID moduleId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *ModuleUnloadStarted )( ICorProfilerCallback8 * This, /* [in] */ ModuleID moduleId); HRESULT ( STDMETHODCALLTYPE *ModuleUnloadFinished )( ICorProfilerCallback8 * This, /* [in] */ ModuleID moduleId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *ModuleAttachedToAssembly )( ICorProfilerCallback8 * This, /* [in] */ ModuleID moduleId, /* [in] */ AssemblyID AssemblyId); HRESULT ( STDMETHODCALLTYPE *ClassLoadStarted )( ICorProfilerCallback8 * This, /* [in] */ ClassID classId); HRESULT ( STDMETHODCALLTYPE *ClassLoadFinished )( ICorProfilerCallback8 * This, /* [in] */ ClassID classId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *ClassUnloadStarted )( ICorProfilerCallback8 * This, /* [in] */ ClassID classId); HRESULT ( STDMETHODCALLTYPE *ClassUnloadFinished )( ICorProfilerCallback8 * This, /* [in] */ ClassID classId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *FunctionUnloadStarted )( ICorProfilerCallback8 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *JITCompilationStarted )( ICorProfilerCallback8 * This, /* [in] */ FunctionID functionId, /* [in] */ BOOL fIsSafeToBlock); HRESULT ( STDMETHODCALLTYPE *JITCompilationFinished )( ICorProfilerCallback8 * This, /* [in] */ FunctionID functionId, /* [in] */ HRESULT hrStatus, /* [in] */ BOOL fIsSafeToBlock); HRESULT ( STDMETHODCALLTYPE *JITCachedFunctionSearchStarted )( ICorProfilerCallback8 * This, /* [in] */ FunctionID functionId, /* [out] */ BOOL *pbUseCachedFunction); HRESULT ( STDMETHODCALLTYPE *JITCachedFunctionSearchFinished )( ICorProfilerCallback8 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_JIT_CACHE result); HRESULT ( STDMETHODCALLTYPE *JITFunctionPitched )( ICorProfilerCallback8 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *JITInlining )( ICorProfilerCallback8 * This, /* [in] */ FunctionID callerId, /* [in] */ FunctionID calleeId, /* [out] */ BOOL *pfShouldInline); HRESULT ( STDMETHODCALLTYPE *ThreadCreated )( ICorProfilerCallback8 * This, /* [in] */ ThreadID threadId); HRESULT ( STDMETHODCALLTYPE *ThreadDestroyed )( ICorProfilerCallback8 * This, /* [in] */ ThreadID threadId); HRESULT ( STDMETHODCALLTYPE *ThreadAssignedToOSThread )( ICorProfilerCallback8 * This, /* [in] */ ThreadID managedThreadId, /* [in] */ DWORD osThreadId); HRESULT ( STDMETHODCALLTYPE *RemotingClientInvocationStarted )( ICorProfilerCallback8 * This); HRESULT ( STDMETHODCALLTYPE *RemotingClientSendingMessage )( ICorProfilerCallback8 * This, /* [in] */ GUID *pCookie, /* [in] */ BOOL fIsAsync); HRESULT ( STDMETHODCALLTYPE *RemotingClientReceivingReply )( ICorProfilerCallback8 * This, /* [in] */ GUID *pCookie, /* [in] */ BOOL fIsAsync); HRESULT ( STDMETHODCALLTYPE *RemotingClientInvocationFinished )( ICorProfilerCallback8 * This); HRESULT ( STDMETHODCALLTYPE *RemotingServerReceivingMessage )( ICorProfilerCallback8 * This, /* [in] */ GUID *pCookie, /* [in] */ BOOL fIsAsync); HRESULT ( STDMETHODCALLTYPE *RemotingServerInvocationStarted )( ICorProfilerCallback8 * This); HRESULT ( STDMETHODCALLTYPE *RemotingServerInvocationReturned )( ICorProfilerCallback8 * This); HRESULT ( STDMETHODCALLTYPE *RemotingServerSendingReply )( ICorProfilerCallback8 * This, /* [in] */ GUID *pCookie, /* [in] */ BOOL fIsAsync); HRESULT ( STDMETHODCALLTYPE *UnmanagedToManagedTransition )( ICorProfilerCallback8 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_TRANSITION_REASON reason); HRESULT ( STDMETHODCALLTYPE *ManagedToUnmanagedTransition )( ICorProfilerCallback8 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_TRANSITION_REASON reason); HRESULT ( STDMETHODCALLTYPE *RuntimeSuspendStarted )( ICorProfilerCallback8 * This, /* [in] */ COR_PRF_SUSPEND_REASON suspendReason); HRESULT ( STDMETHODCALLTYPE *RuntimeSuspendFinished )( ICorProfilerCallback8 * This); HRESULT ( STDMETHODCALLTYPE *RuntimeSuspendAborted )( ICorProfilerCallback8 * This); HRESULT ( STDMETHODCALLTYPE *RuntimeResumeStarted )( ICorProfilerCallback8 * This); HRESULT ( STDMETHODCALLTYPE *RuntimeResumeFinished )( ICorProfilerCallback8 * This); HRESULT ( STDMETHODCALLTYPE *RuntimeThreadSuspended )( ICorProfilerCallback8 * This, /* [in] */ ThreadID threadId); HRESULT ( STDMETHODCALLTYPE *RuntimeThreadResumed )( ICorProfilerCallback8 * This, /* [in] */ ThreadID threadId); HRESULT ( STDMETHODCALLTYPE *MovedReferences )( ICorProfilerCallback8 * This, /* [in] */ ULONG cMovedObjectIDRanges, /* [size_is][in] */ ObjectID oldObjectIDRangeStart[ ], /* [size_is][in] */ ObjectID newObjectIDRangeStart[ ], /* [size_is][in] */ ULONG cObjectIDRangeLength[ ]); HRESULT ( STDMETHODCALLTYPE *ObjectAllocated )( ICorProfilerCallback8 * This, /* [in] */ ObjectID objectId, /* [in] */ ClassID classId); HRESULT ( STDMETHODCALLTYPE *ObjectsAllocatedByClass )( ICorProfilerCallback8 * This, /* [in] */ ULONG cClassCount, /* [size_is][in] */ ClassID classIds[ ], /* [size_is][in] */ ULONG cObjects[ ]); HRESULT ( STDMETHODCALLTYPE *ObjectReferences )( ICorProfilerCallback8 * This, /* [in] */ ObjectID objectId, /* [in] */ ClassID classId, /* [in] */ ULONG cObjectRefs, /* [size_is][in] */ ObjectID objectRefIds[ ]); HRESULT ( STDMETHODCALLTYPE *RootReferences )( ICorProfilerCallback8 * This, /* [in] */ ULONG cRootRefs, /* [size_is][in] */ ObjectID rootRefIds[ ]); HRESULT ( STDMETHODCALLTYPE *ExceptionThrown )( ICorProfilerCallback8 * This, /* [in] */ ObjectID thrownObjectId); HRESULT ( STDMETHODCALLTYPE *ExceptionSearchFunctionEnter )( ICorProfilerCallback8 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ExceptionSearchFunctionLeave )( ICorProfilerCallback8 * This); HRESULT ( STDMETHODCALLTYPE *ExceptionSearchFilterEnter )( ICorProfilerCallback8 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ExceptionSearchFilterLeave )( ICorProfilerCallback8 * This); HRESULT ( STDMETHODCALLTYPE *ExceptionSearchCatcherFound )( ICorProfilerCallback8 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ExceptionOSHandlerEnter )( ICorProfilerCallback8 * This, /* [in] */ UINT_PTR __unused); HRESULT ( STDMETHODCALLTYPE *ExceptionOSHandlerLeave )( ICorProfilerCallback8 * This, /* [in] */ UINT_PTR __unused); HRESULT ( STDMETHODCALLTYPE *ExceptionUnwindFunctionEnter )( ICorProfilerCallback8 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ExceptionUnwindFunctionLeave )( ICorProfilerCallback8 * This); HRESULT ( STDMETHODCALLTYPE *ExceptionUnwindFinallyEnter )( ICorProfilerCallback8 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ExceptionUnwindFinallyLeave )( ICorProfilerCallback8 * This); HRESULT ( STDMETHODCALLTYPE *ExceptionCatcherEnter )( ICorProfilerCallback8 * This, /* [in] */ FunctionID functionId, /* [in] */ ObjectID objectId); HRESULT ( STDMETHODCALLTYPE *ExceptionCatcherLeave )( ICorProfilerCallback8 * This); HRESULT ( STDMETHODCALLTYPE *COMClassicVTableCreated )( ICorProfilerCallback8 * This, /* [in] */ ClassID wrappedClassId, /* [in] */ REFGUID implementedIID, /* [in] */ void *pVTable, /* [in] */ ULONG cSlots); HRESULT ( STDMETHODCALLTYPE *COMClassicVTableDestroyed )( ICorProfilerCallback8 * This, /* [in] */ ClassID wrappedClassId, /* [in] */ REFGUID implementedIID, /* [in] */ void *pVTable); HRESULT ( STDMETHODCALLTYPE *ExceptionCLRCatcherFound )( ICorProfilerCallback8 * This); HRESULT ( STDMETHODCALLTYPE *ExceptionCLRCatcherExecute )( ICorProfilerCallback8 * This); HRESULT ( STDMETHODCALLTYPE *ThreadNameChanged )( ICorProfilerCallback8 * This, /* [in] */ ThreadID threadId, /* [in] */ ULONG cchName, /* [annotation][in] */ _In_reads_opt_(cchName) WCHAR name[ ]); HRESULT ( STDMETHODCALLTYPE *GarbageCollectionStarted )( ICorProfilerCallback8 * This, /* [in] */ int cGenerations, /* [size_is][in] */ BOOL generationCollected[ ], /* [in] */ COR_PRF_GC_REASON reason); HRESULT ( STDMETHODCALLTYPE *SurvivingReferences )( ICorProfilerCallback8 * This, /* [in] */ ULONG cSurvivingObjectIDRanges, /* [size_is][in] */ ObjectID objectIDRangeStart[ ], /* [size_is][in] */ ULONG cObjectIDRangeLength[ ]); HRESULT ( STDMETHODCALLTYPE *GarbageCollectionFinished )( ICorProfilerCallback8 * This); HRESULT ( STDMETHODCALLTYPE *FinalizeableObjectQueued )( ICorProfilerCallback8 * This, /* [in] */ DWORD finalizerFlags, /* [in] */ ObjectID objectID); HRESULT ( STDMETHODCALLTYPE *RootReferences2 )( ICorProfilerCallback8 * This, /* [in] */ ULONG cRootRefs, /* [size_is][in] */ ObjectID rootRefIds[ ], /* [size_is][in] */ COR_PRF_GC_ROOT_KIND rootKinds[ ], /* [size_is][in] */ COR_PRF_GC_ROOT_FLAGS rootFlags[ ], /* [size_is][in] */ UINT_PTR rootIds[ ]); HRESULT ( STDMETHODCALLTYPE *HandleCreated )( ICorProfilerCallback8 * This, /* [in] */ GCHandleID handleId, /* [in] */ ObjectID initialObjectId); HRESULT ( STDMETHODCALLTYPE *HandleDestroyed )( ICorProfilerCallback8 * This, /* [in] */ GCHandleID handleId); HRESULT ( STDMETHODCALLTYPE *InitializeForAttach )( ICorProfilerCallback8 * This, /* [in] */ IUnknown *pCorProfilerInfoUnk, /* [in] */ void *pvClientData, /* [in] */ UINT cbClientData); HRESULT ( STDMETHODCALLTYPE *ProfilerAttachComplete )( ICorProfilerCallback8 * This); HRESULT ( STDMETHODCALLTYPE *ProfilerDetachSucceeded )( ICorProfilerCallback8 * This); HRESULT ( STDMETHODCALLTYPE *ReJITCompilationStarted )( ICorProfilerCallback8 * This, /* [in] */ FunctionID functionId, /* [in] */ ReJITID rejitId, /* [in] */ BOOL fIsSafeToBlock); HRESULT ( STDMETHODCALLTYPE *GetReJITParameters )( ICorProfilerCallback8 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdMethodDef methodId, /* [in] */ ICorProfilerFunctionControl *pFunctionControl); HRESULT ( STDMETHODCALLTYPE *ReJITCompilationFinished )( ICorProfilerCallback8 * This, /* [in] */ FunctionID functionId, /* [in] */ ReJITID rejitId, /* [in] */ HRESULT hrStatus, /* [in] */ BOOL fIsSafeToBlock); HRESULT ( STDMETHODCALLTYPE *ReJITError )( ICorProfilerCallback8 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdMethodDef methodId, /* [in] */ FunctionID functionId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *MovedReferences2 )( ICorProfilerCallback8 * This, /* [in] */ ULONG cMovedObjectIDRanges, /* [size_is][in] */ ObjectID oldObjectIDRangeStart[ ], /* [size_is][in] */ ObjectID newObjectIDRangeStart[ ], /* [size_is][in] */ SIZE_T cObjectIDRangeLength[ ]); HRESULT ( STDMETHODCALLTYPE *SurvivingReferences2 )( ICorProfilerCallback8 * This, /* [in] */ ULONG cSurvivingObjectIDRanges, /* [size_is][in] */ ObjectID objectIDRangeStart[ ], /* [size_is][in] */ SIZE_T cObjectIDRangeLength[ ]); HRESULT ( STDMETHODCALLTYPE *ConditionalWeakTableElementReferences )( ICorProfilerCallback8 * This, /* [in] */ ULONG cRootRefs, /* [size_is][in] */ ObjectID keyRefIds[ ], /* [size_is][in] */ ObjectID valueRefIds[ ], /* [size_is][in] */ GCHandleID rootIds[ ]); HRESULT ( STDMETHODCALLTYPE *GetAssemblyReferences )( ICorProfilerCallback8 * This, /* [string][in] */ const WCHAR *wszAssemblyPath, /* [in] */ ICorProfilerAssemblyReferenceProvider *pAsmRefProvider); HRESULT ( STDMETHODCALLTYPE *ModuleInMemorySymbolsUpdated )( ICorProfilerCallback8 * This, ModuleID moduleId); HRESULT ( STDMETHODCALLTYPE *DynamicMethodJITCompilationStarted )( ICorProfilerCallback8 * This, /* [in] */ FunctionID functionId, /* [in] */ BOOL fIsSafeToBlock, /* [in] */ LPCBYTE pILHeader, /* [in] */ ULONG cbILHeader); HRESULT ( STDMETHODCALLTYPE *DynamicMethodJITCompilationFinished )( ICorProfilerCallback8 * This, /* [in] */ FunctionID functionId, /* [in] */ HRESULT hrStatus, /* [in] */ BOOL fIsSafeToBlock); END_INTERFACE } ICorProfilerCallback8Vtbl; interface ICorProfilerCallback8 { CONST_VTBL struct ICorProfilerCallback8Vtbl *lpVtbl; }; #ifdef COBJMACROS #define ICorProfilerCallback8_QueryInterface(This,riid,ppvObject) \ ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) ) #define ICorProfilerCallback8_AddRef(This) \ ( (This)->lpVtbl -> AddRef(This) ) #define ICorProfilerCallback8_Release(This) \ ( (This)->lpVtbl -> Release(This) ) #define ICorProfilerCallback8_Initialize(This,pICorProfilerInfoUnk) \ ( (This)->lpVtbl -> Initialize(This,pICorProfilerInfoUnk) ) #define ICorProfilerCallback8_Shutdown(This) \ ( (This)->lpVtbl -> Shutdown(This) ) #define ICorProfilerCallback8_AppDomainCreationStarted(This,appDomainId) \ ( (This)->lpVtbl -> AppDomainCreationStarted(This,appDomainId) ) #define ICorProfilerCallback8_AppDomainCreationFinished(This,appDomainId,hrStatus) \ ( (This)->lpVtbl -> AppDomainCreationFinished(This,appDomainId,hrStatus) ) #define ICorProfilerCallback8_AppDomainShutdownStarted(This,appDomainId) \ ( (This)->lpVtbl -> AppDomainShutdownStarted(This,appDomainId) ) #define ICorProfilerCallback8_AppDomainShutdownFinished(This,appDomainId,hrStatus) \ ( (This)->lpVtbl -> AppDomainShutdownFinished(This,appDomainId,hrStatus) ) #define ICorProfilerCallback8_AssemblyLoadStarted(This,assemblyId) \ ( (This)->lpVtbl -> AssemblyLoadStarted(This,assemblyId) ) #define ICorProfilerCallback8_AssemblyLoadFinished(This,assemblyId,hrStatus) \ ( (This)->lpVtbl -> AssemblyLoadFinished(This,assemblyId,hrStatus) ) #define ICorProfilerCallback8_AssemblyUnloadStarted(This,assemblyId) \ ( (This)->lpVtbl -> AssemblyUnloadStarted(This,assemblyId) ) #define ICorProfilerCallback8_AssemblyUnloadFinished(This,assemblyId,hrStatus) \ ( (This)->lpVtbl -> AssemblyUnloadFinished(This,assemblyId,hrStatus) ) #define ICorProfilerCallback8_ModuleLoadStarted(This,moduleId) \ ( (This)->lpVtbl -> ModuleLoadStarted(This,moduleId) ) #define ICorProfilerCallback8_ModuleLoadFinished(This,moduleId,hrStatus) \ ( (This)->lpVtbl -> ModuleLoadFinished(This,moduleId,hrStatus) ) #define ICorProfilerCallback8_ModuleUnloadStarted(This,moduleId) \ ( (This)->lpVtbl -> ModuleUnloadStarted(This,moduleId) ) #define ICorProfilerCallback8_ModuleUnloadFinished(This,moduleId,hrStatus) \ ( (This)->lpVtbl -> ModuleUnloadFinished(This,moduleId,hrStatus) ) #define ICorProfilerCallback8_ModuleAttachedToAssembly(This,moduleId,AssemblyId) \ ( (This)->lpVtbl -> ModuleAttachedToAssembly(This,moduleId,AssemblyId) ) #define ICorProfilerCallback8_ClassLoadStarted(This,classId) \ ( (This)->lpVtbl -> ClassLoadStarted(This,classId) ) #define ICorProfilerCallback8_ClassLoadFinished(This,classId,hrStatus) \ ( (This)->lpVtbl -> ClassLoadFinished(This,classId,hrStatus) ) #define ICorProfilerCallback8_ClassUnloadStarted(This,classId) \ ( (This)->lpVtbl -> ClassUnloadStarted(This,classId) ) #define ICorProfilerCallback8_ClassUnloadFinished(This,classId,hrStatus) \ ( (This)->lpVtbl -> ClassUnloadFinished(This,classId,hrStatus) ) #define ICorProfilerCallback8_FunctionUnloadStarted(This,functionId) \ ( (This)->lpVtbl -> FunctionUnloadStarted(This,functionId) ) #define ICorProfilerCallback8_JITCompilationStarted(This,functionId,fIsSafeToBlock) \ ( (This)->lpVtbl -> JITCompilationStarted(This,functionId,fIsSafeToBlock) ) #define ICorProfilerCallback8_JITCompilationFinished(This,functionId,hrStatus,fIsSafeToBlock) \ ( (This)->lpVtbl -> JITCompilationFinished(This,functionId,hrStatus,fIsSafeToBlock) ) #define ICorProfilerCallback8_JITCachedFunctionSearchStarted(This,functionId,pbUseCachedFunction) \ ( (This)->lpVtbl -> JITCachedFunctionSearchStarted(This,functionId,pbUseCachedFunction) ) #define ICorProfilerCallback8_JITCachedFunctionSearchFinished(This,functionId,result) \ ( (This)->lpVtbl -> JITCachedFunctionSearchFinished(This,functionId,result) ) #define ICorProfilerCallback8_JITFunctionPitched(This,functionId) \ ( (This)->lpVtbl -> JITFunctionPitched(This,functionId) ) #define ICorProfilerCallback8_JITInlining(This,callerId,calleeId,pfShouldInline) \ ( (This)->lpVtbl -> JITInlining(This,callerId,calleeId,pfShouldInline) ) #define ICorProfilerCallback8_ThreadCreated(This,threadId) \ ( (This)->lpVtbl -> ThreadCreated(This,threadId) ) #define ICorProfilerCallback8_ThreadDestroyed(This,threadId) \ ( (This)->lpVtbl -> ThreadDestroyed(This,threadId) ) #define ICorProfilerCallback8_ThreadAssignedToOSThread(This,managedThreadId,osThreadId) \ ( (This)->lpVtbl -> ThreadAssignedToOSThread(This,managedThreadId,osThreadId) ) #define ICorProfilerCallback8_RemotingClientInvocationStarted(This) \ ( (This)->lpVtbl -> RemotingClientInvocationStarted(This) ) #define ICorProfilerCallback8_RemotingClientSendingMessage(This,pCookie,fIsAsync) \ ( (This)->lpVtbl -> RemotingClientSendingMessage(This,pCookie,fIsAsync) ) #define ICorProfilerCallback8_RemotingClientReceivingReply(This,pCookie,fIsAsync) \ ( (This)->lpVtbl -> RemotingClientReceivingReply(This,pCookie,fIsAsync) ) #define ICorProfilerCallback8_RemotingClientInvocationFinished(This) \ ( (This)->lpVtbl -> RemotingClientInvocationFinished(This) ) #define ICorProfilerCallback8_RemotingServerReceivingMessage(This,pCookie,fIsAsync) \ ( (This)->lpVtbl -> RemotingServerReceivingMessage(This,pCookie,fIsAsync) ) #define ICorProfilerCallback8_RemotingServerInvocationStarted(This) \ ( (This)->lpVtbl -> RemotingServerInvocationStarted(This) ) #define ICorProfilerCallback8_RemotingServerInvocationReturned(This) \ ( (This)->lpVtbl -> RemotingServerInvocationReturned(This) ) #define ICorProfilerCallback8_RemotingServerSendingReply(This,pCookie,fIsAsync) \ ( (This)->lpVtbl -> RemotingServerSendingReply(This,pCookie,fIsAsync) ) #define ICorProfilerCallback8_UnmanagedToManagedTransition(This,functionId,reason) \ ( (This)->lpVtbl -> UnmanagedToManagedTransition(This,functionId,reason) ) #define ICorProfilerCallback8_ManagedToUnmanagedTransition(This,functionId,reason) \ ( (This)->lpVtbl -> ManagedToUnmanagedTransition(This,functionId,reason) ) #define ICorProfilerCallback8_RuntimeSuspendStarted(This,suspendReason) \ ( (This)->lpVtbl -> RuntimeSuspendStarted(This,suspendReason) ) #define ICorProfilerCallback8_RuntimeSuspendFinished(This) \ ( (This)->lpVtbl -> RuntimeSuspendFinished(This) ) #define ICorProfilerCallback8_RuntimeSuspendAborted(This) \ ( (This)->lpVtbl -> RuntimeSuspendAborted(This) ) #define ICorProfilerCallback8_RuntimeResumeStarted(This) \ ( (This)->lpVtbl -> RuntimeResumeStarted(This) ) #define ICorProfilerCallback8_RuntimeResumeFinished(This) \ ( (This)->lpVtbl -> RuntimeResumeFinished(This) ) #define ICorProfilerCallback8_RuntimeThreadSuspended(This,threadId) \ ( (This)->lpVtbl -> RuntimeThreadSuspended(This,threadId) ) #define ICorProfilerCallback8_RuntimeThreadResumed(This,threadId) \ ( (This)->lpVtbl -> RuntimeThreadResumed(This,threadId) ) #define ICorProfilerCallback8_MovedReferences(This,cMovedObjectIDRanges,oldObjectIDRangeStart,newObjectIDRangeStart,cObjectIDRangeLength) \ ( (This)->lpVtbl -> MovedReferences(This,cMovedObjectIDRanges,oldObjectIDRangeStart,newObjectIDRangeStart,cObjectIDRangeLength) ) #define ICorProfilerCallback8_ObjectAllocated(This,objectId,classId) \ ( (This)->lpVtbl -> ObjectAllocated(This,objectId,classId) ) #define ICorProfilerCallback8_ObjectsAllocatedByClass(This,cClassCount,classIds,cObjects) \ ( (This)->lpVtbl -> ObjectsAllocatedByClass(This,cClassCount,classIds,cObjects) ) #define ICorProfilerCallback8_ObjectReferences(This,objectId,classId,cObjectRefs,objectRefIds) \ ( (This)->lpVtbl -> ObjectReferences(This,objectId,classId,cObjectRefs,objectRefIds) ) #define ICorProfilerCallback8_RootReferences(This,cRootRefs,rootRefIds) \ ( (This)->lpVtbl -> RootReferences(This,cRootRefs,rootRefIds) ) #define ICorProfilerCallback8_ExceptionThrown(This,thrownObjectId) \ ( (This)->lpVtbl -> ExceptionThrown(This,thrownObjectId) ) #define ICorProfilerCallback8_ExceptionSearchFunctionEnter(This,functionId) \ ( (This)->lpVtbl -> ExceptionSearchFunctionEnter(This,functionId) ) #define ICorProfilerCallback8_ExceptionSearchFunctionLeave(This) \ ( (This)->lpVtbl -> ExceptionSearchFunctionLeave(This) ) #define ICorProfilerCallback8_ExceptionSearchFilterEnter(This,functionId) \ ( (This)->lpVtbl -> ExceptionSearchFilterEnter(This,functionId) ) #define ICorProfilerCallback8_ExceptionSearchFilterLeave(This) \ ( (This)->lpVtbl -> ExceptionSearchFilterLeave(This) ) #define ICorProfilerCallback8_ExceptionSearchCatcherFound(This,functionId) \ ( (This)->lpVtbl -> ExceptionSearchCatcherFound(This,functionId) ) #define ICorProfilerCallback8_ExceptionOSHandlerEnter(This,__unused) \ ( (This)->lpVtbl -> ExceptionOSHandlerEnter(This,__unused) ) #define ICorProfilerCallback8_ExceptionOSHandlerLeave(This,__unused) \ ( (This)->lpVtbl -> ExceptionOSHandlerLeave(This,__unused) ) #define ICorProfilerCallback8_ExceptionUnwindFunctionEnter(This,functionId) \ ( (This)->lpVtbl -> ExceptionUnwindFunctionEnter(This,functionId) ) #define ICorProfilerCallback8_ExceptionUnwindFunctionLeave(This) \ ( (This)->lpVtbl -> ExceptionUnwindFunctionLeave(This) ) #define ICorProfilerCallback8_ExceptionUnwindFinallyEnter(This,functionId) \ ( (This)->lpVtbl -> ExceptionUnwindFinallyEnter(This,functionId) ) #define ICorProfilerCallback8_ExceptionUnwindFinallyLeave(This) \ ( (This)->lpVtbl -> ExceptionUnwindFinallyLeave(This) ) #define ICorProfilerCallback8_ExceptionCatcherEnter(This,functionId,objectId) \ ( (This)->lpVtbl -> ExceptionCatcherEnter(This,functionId,objectId) ) #define ICorProfilerCallback8_ExceptionCatcherLeave(This) \ ( (This)->lpVtbl -> ExceptionCatcherLeave(This) ) #define ICorProfilerCallback8_COMClassicVTableCreated(This,wrappedClassId,implementedIID,pVTable,cSlots) \ ( (This)->lpVtbl -> COMClassicVTableCreated(This,wrappedClassId,implementedIID,pVTable,cSlots) ) #define ICorProfilerCallback8_COMClassicVTableDestroyed(This,wrappedClassId,implementedIID,pVTable) \ ( (This)->lpVtbl -> COMClassicVTableDestroyed(This,wrappedClassId,implementedIID,pVTable) ) #define ICorProfilerCallback8_ExceptionCLRCatcherFound(This) \ ( (This)->lpVtbl -> ExceptionCLRCatcherFound(This) ) #define ICorProfilerCallback8_ExceptionCLRCatcherExecute(This) \ ( (This)->lpVtbl -> ExceptionCLRCatcherExecute(This) ) #define ICorProfilerCallback8_ThreadNameChanged(This,threadId,cchName,name) \ ( (This)->lpVtbl -> ThreadNameChanged(This,threadId,cchName,name) ) #define ICorProfilerCallback8_GarbageCollectionStarted(This,cGenerations,generationCollected,reason) \ ( (This)->lpVtbl -> GarbageCollectionStarted(This,cGenerations,generationCollected,reason) ) #define ICorProfilerCallback8_SurvivingReferences(This,cSurvivingObjectIDRanges,objectIDRangeStart,cObjectIDRangeLength) \ ( (This)->lpVtbl -> SurvivingReferences(This,cSurvivingObjectIDRanges,objectIDRangeStart,cObjectIDRangeLength) ) #define ICorProfilerCallback8_GarbageCollectionFinished(This) \ ( (This)->lpVtbl -> GarbageCollectionFinished(This) ) #define ICorProfilerCallback8_FinalizeableObjectQueued(This,finalizerFlags,objectID) \ ( (This)->lpVtbl -> FinalizeableObjectQueued(This,finalizerFlags,objectID) ) #define ICorProfilerCallback8_RootReferences2(This,cRootRefs,rootRefIds,rootKinds,rootFlags,rootIds) \ ( (This)->lpVtbl -> RootReferences2(This,cRootRefs,rootRefIds,rootKinds,rootFlags,rootIds) ) #define ICorProfilerCallback8_HandleCreated(This,handleId,initialObjectId) \ ( (This)->lpVtbl -> HandleCreated(This,handleId,initialObjectId) ) #define ICorProfilerCallback8_HandleDestroyed(This,handleId) \ ( (This)->lpVtbl -> HandleDestroyed(This,handleId) ) #define ICorProfilerCallback8_InitializeForAttach(This,pCorProfilerInfoUnk,pvClientData,cbClientData) \ ( (This)->lpVtbl -> InitializeForAttach(This,pCorProfilerInfoUnk,pvClientData,cbClientData) ) #define ICorProfilerCallback8_ProfilerAttachComplete(This) \ ( (This)->lpVtbl -> ProfilerAttachComplete(This) ) #define ICorProfilerCallback8_ProfilerDetachSucceeded(This) \ ( (This)->lpVtbl -> ProfilerDetachSucceeded(This) ) #define ICorProfilerCallback8_ReJITCompilationStarted(This,functionId,rejitId,fIsSafeToBlock) \ ( (This)->lpVtbl -> ReJITCompilationStarted(This,functionId,rejitId,fIsSafeToBlock) ) #define ICorProfilerCallback8_GetReJITParameters(This,moduleId,methodId,pFunctionControl) \ ( (This)->lpVtbl -> GetReJITParameters(This,moduleId,methodId,pFunctionControl) ) #define ICorProfilerCallback8_ReJITCompilationFinished(This,functionId,rejitId,hrStatus,fIsSafeToBlock) \ ( (This)->lpVtbl -> ReJITCompilationFinished(This,functionId,rejitId,hrStatus,fIsSafeToBlock) ) #define ICorProfilerCallback8_ReJITError(This,moduleId,methodId,functionId,hrStatus) \ ( (This)->lpVtbl -> ReJITError(This,moduleId,methodId,functionId,hrStatus) ) #define ICorProfilerCallback8_MovedReferences2(This,cMovedObjectIDRanges,oldObjectIDRangeStart,newObjectIDRangeStart,cObjectIDRangeLength) \ ( (This)->lpVtbl -> MovedReferences2(This,cMovedObjectIDRanges,oldObjectIDRangeStart,newObjectIDRangeStart,cObjectIDRangeLength) ) #define ICorProfilerCallback8_SurvivingReferences2(This,cSurvivingObjectIDRanges,objectIDRangeStart,cObjectIDRangeLength) \ ( (This)->lpVtbl -> SurvivingReferences2(This,cSurvivingObjectIDRanges,objectIDRangeStart,cObjectIDRangeLength) ) #define ICorProfilerCallback8_ConditionalWeakTableElementReferences(This,cRootRefs,keyRefIds,valueRefIds,rootIds) \ ( (This)->lpVtbl -> ConditionalWeakTableElementReferences(This,cRootRefs,keyRefIds,valueRefIds,rootIds) ) #define ICorProfilerCallback8_GetAssemblyReferences(This,wszAssemblyPath,pAsmRefProvider) \ ( (This)->lpVtbl -> GetAssemblyReferences(This,wszAssemblyPath,pAsmRefProvider) ) #define ICorProfilerCallback8_ModuleInMemorySymbolsUpdated(This,moduleId) \ ( (This)->lpVtbl -> ModuleInMemorySymbolsUpdated(This,moduleId) ) #define ICorProfilerCallback8_DynamicMethodJITCompilationStarted(This,functionId,fIsSafeToBlock,pILHeader,cbILHeader) \ ( (This)->lpVtbl -> DynamicMethodJITCompilationStarted(This,functionId,fIsSafeToBlock,pILHeader,cbILHeader) ) #define ICorProfilerCallback8_DynamicMethodJITCompilationFinished(This,functionId,hrStatus,fIsSafeToBlock) \ ( (This)->lpVtbl -> DynamicMethodJITCompilationFinished(This,functionId,hrStatus,fIsSafeToBlock) ) #endif /* COBJMACROS */ #endif /* C style interface */ #endif /* __ICorProfilerCallback8_INTERFACE_DEFINED__ */ #ifndef __ICorProfilerCallback9_INTERFACE_DEFINED__ #define __ICorProfilerCallback9_INTERFACE_DEFINED__ /* interface ICorProfilerCallback9 */ /* [local][unique][uuid][object] */ EXTERN_C const IID IID_ICorProfilerCallback9; #if defined(__cplusplus) && !defined(CINTERFACE) MIDL_INTERFACE("27583EC3-C8F5-482F-8052-194B8CE4705A") ICorProfilerCallback9 : public ICorProfilerCallback8 { public: virtual HRESULT STDMETHODCALLTYPE DynamicMethodUnloaded( /* [in] */ FunctionID functionId) = 0; }; #else /* C style interface */ typedef struct ICorProfilerCallback9Vtbl { BEGIN_INTERFACE HRESULT ( STDMETHODCALLTYPE *QueryInterface )( ICorProfilerCallback9 * This, /* [in] */ REFIID riid, /* [annotation][iid_is][out] */ _COM_Outptr_ void **ppvObject); ULONG ( STDMETHODCALLTYPE *AddRef )( ICorProfilerCallback9 * This); ULONG ( STDMETHODCALLTYPE *Release )( ICorProfilerCallback9 * This); HRESULT ( STDMETHODCALLTYPE *Initialize )( ICorProfilerCallback9 * This, /* [in] */ IUnknown *pICorProfilerInfoUnk); HRESULT ( STDMETHODCALLTYPE *Shutdown )( ICorProfilerCallback9 * This); HRESULT ( STDMETHODCALLTYPE *AppDomainCreationStarted )( ICorProfilerCallback9 * This, /* [in] */ AppDomainID appDomainId); HRESULT ( STDMETHODCALLTYPE *AppDomainCreationFinished )( ICorProfilerCallback9 * This, /* [in] */ AppDomainID appDomainId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *AppDomainShutdownStarted )( ICorProfilerCallback9 * This, /* [in] */ AppDomainID appDomainId); HRESULT ( STDMETHODCALLTYPE *AppDomainShutdownFinished )( ICorProfilerCallback9 * This, /* [in] */ AppDomainID appDomainId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *AssemblyLoadStarted )( ICorProfilerCallback9 * This, /* [in] */ AssemblyID assemblyId); HRESULT ( STDMETHODCALLTYPE *AssemblyLoadFinished )( ICorProfilerCallback9 * This, /* [in] */ AssemblyID assemblyId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *AssemblyUnloadStarted )( ICorProfilerCallback9 * This, /* [in] */ AssemblyID assemblyId); HRESULT ( STDMETHODCALLTYPE *AssemblyUnloadFinished )( ICorProfilerCallback9 * This, /* [in] */ AssemblyID assemblyId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *ModuleLoadStarted )( ICorProfilerCallback9 * This, /* [in] */ ModuleID moduleId); HRESULT ( STDMETHODCALLTYPE *ModuleLoadFinished )( ICorProfilerCallback9 * This, /* [in] */ ModuleID moduleId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *ModuleUnloadStarted )( ICorProfilerCallback9 * This, /* [in] */ ModuleID moduleId); HRESULT ( STDMETHODCALLTYPE *ModuleUnloadFinished )( ICorProfilerCallback9 * This, /* [in] */ ModuleID moduleId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *ModuleAttachedToAssembly )( ICorProfilerCallback9 * This, /* [in] */ ModuleID moduleId, /* [in] */ AssemblyID AssemblyId); HRESULT ( STDMETHODCALLTYPE *ClassLoadStarted )( ICorProfilerCallback9 * This, /* [in] */ ClassID classId); HRESULT ( STDMETHODCALLTYPE *ClassLoadFinished )( ICorProfilerCallback9 * This, /* [in] */ ClassID classId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *ClassUnloadStarted )( ICorProfilerCallback9 * This, /* [in] */ ClassID classId); HRESULT ( STDMETHODCALLTYPE *ClassUnloadFinished )( ICorProfilerCallback9 * This, /* [in] */ ClassID classId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *FunctionUnloadStarted )( ICorProfilerCallback9 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *JITCompilationStarted )( ICorProfilerCallback9 * This, /* [in] */ FunctionID functionId, /* [in] */ BOOL fIsSafeToBlock); HRESULT ( STDMETHODCALLTYPE *JITCompilationFinished )( ICorProfilerCallback9 * This, /* [in] */ FunctionID functionId, /* [in] */ HRESULT hrStatus, /* [in] */ BOOL fIsSafeToBlock); HRESULT ( STDMETHODCALLTYPE *JITCachedFunctionSearchStarted )( ICorProfilerCallback9 * This, /* [in] */ FunctionID functionId, /* [out] */ BOOL *pbUseCachedFunction); HRESULT ( STDMETHODCALLTYPE *JITCachedFunctionSearchFinished )( ICorProfilerCallback9 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_JIT_CACHE result); HRESULT ( STDMETHODCALLTYPE *JITFunctionPitched )( ICorProfilerCallback9 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *JITInlining )( ICorProfilerCallback9 * This, /* [in] */ FunctionID callerId, /* [in] */ FunctionID calleeId, /* [out] */ BOOL *pfShouldInline); HRESULT ( STDMETHODCALLTYPE *ThreadCreated )( ICorProfilerCallback9 * This, /* [in] */ ThreadID threadId); HRESULT ( STDMETHODCALLTYPE *ThreadDestroyed )( ICorProfilerCallback9 * This, /* [in] */ ThreadID threadId); HRESULT ( STDMETHODCALLTYPE *ThreadAssignedToOSThread )( ICorProfilerCallback9 * This, /* [in] */ ThreadID managedThreadId, /* [in] */ DWORD osThreadId); HRESULT ( STDMETHODCALLTYPE *RemotingClientInvocationStarted )( ICorProfilerCallback9 * This); HRESULT ( STDMETHODCALLTYPE *RemotingClientSendingMessage )( ICorProfilerCallback9 * This, /* [in] */ GUID *pCookie, /* [in] */ BOOL fIsAsync); HRESULT ( STDMETHODCALLTYPE *RemotingClientReceivingReply )( ICorProfilerCallback9 * This, /* [in] */ GUID *pCookie, /* [in] */ BOOL fIsAsync); HRESULT ( STDMETHODCALLTYPE *RemotingClientInvocationFinished )( ICorProfilerCallback9 * This); HRESULT ( STDMETHODCALLTYPE *RemotingServerReceivingMessage )( ICorProfilerCallback9 * This, /* [in] */ GUID *pCookie, /* [in] */ BOOL fIsAsync); HRESULT ( STDMETHODCALLTYPE *RemotingServerInvocationStarted )( ICorProfilerCallback9 * This); HRESULT ( STDMETHODCALLTYPE *RemotingServerInvocationReturned )( ICorProfilerCallback9 * This); HRESULT ( STDMETHODCALLTYPE *RemotingServerSendingReply )( ICorProfilerCallback9 * This, /* [in] */ GUID *pCookie, /* [in] */ BOOL fIsAsync); HRESULT ( STDMETHODCALLTYPE *UnmanagedToManagedTransition )( ICorProfilerCallback9 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_TRANSITION_REASON reason); HRESULT ( STDMETHODCALLTYPE *ManagedToUnmanagedTransition )( ICorProfilerCallback9 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_TRANSITION_REASON reason); HRESULT ( STDMETHODCALLTYPE *RuntimeSuspendStarted )( ICorProfilerCallback9 * This, /* [in] */ COR_PRF_SUSPEND_REASON suspendReason); HRESULT ( STDMETHODCALLTYPE *RuntimeSuspendFinished )( ICorProfilerCallback9 * This); HRESULT ( STDMETHODCALLTYPE *RuntimeSuspendAborted )( ICorProfilerCallback9 * This); HRESULT ( STDMETHODCALLTYPE *RuntimeResumeStarted )( ICorProfilerCallback9 * This); HRESULT ( STDMETHODCALLTYPE *RuntimeResumeFinished )( ICorProfilerCallback9 * This); HRESULT ( STDMETHODCALLTYPE *RuntimeThreadSuspended )( ICorProfilerCallback9 * This, /* [in] */ ThreadID threadId); HRESULT ( STDMETHODCALLTYPE *RuntimeThreadResumed )( ICorProfilerCallback9 * This, /* [in] */ ThreadID threadId); HRESULT ( STDMETHODCALLTYPE *MovedReferences )( ICorProfilerCallback9 * This, /* [in] */ ULONG cMovedObjectIDRanges, /* [size_is][in] */ ObjectID oldObjectIDRangeStart[ ], /* [size_is][in] */ ObjectID newObjectIDRangeStart[ ], /* [size_is][in] */ ULONG cObjectIDRangeLength[ ]); HRESULT ( STDMETHODCALLTYPE *ObjectAllocated )( ICorProfilerCallback9 * This, /* [in] */ ObjectID objectId, /* [in] */ ClassID classId); HRESULT ( STDMETHODCALLTYPE *ObjectsAllocatedByClass )( ICorProfilerCallback9 * This, /* [in] */ ULONG cClassCount, /* [size_is][in] */ ClassID classIds[ ], /* [size_is][in] */ ULONG cObjects[ ]); HRESULT ( STDMETHODCALLTYPE *ObjectReferences )( ICorProfilerCallback9 * This, /* [in] */ ObjectID objectId, /* [in] */ ClassID classId, /* [in] */ ULONG cObjectRefs, /* [size_is][in] */ ObjectID objectRefIds[ ]); HRESULT ( STDMETHODCALLTYPE *RootReferences )( ICorProfilerCallback9 * This, /* [in] */ ULONG cRootRefs, /* [size_is][in] */ ObjectID rootRefIds[ ]); HRESULT ( STDMETHODCALLTYPE *ExceptionThrown )( ICorProfilerCallback9 * This, /* [in] */ ObjectID thrownObjectId); HRESULT ( STDMETHODCALLTYPE *ExceptionSearchFunctionEnter )( ICorProfilerCallback9 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ExceptionSearchFunctionLeave )( ICorProfilerCallback9 * This); HRESULT ( STDMETHODCALLTYPE *ExceptionSearchFilterEnter )( ICorProfilerCallback9 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ExceptionSearchFilterLeave )( ICorProfilerCallback9 * This); HRESULT ( STDMETHODCALLTYPE *ExceptionSearchCatcherFound )( ICorProfilerCallback9 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ExceptionOSHandlerEnter )( ICorProfilerCallback9 * This, /* [in] */ UINT_PTR __unused); HRESULT ( STDMETHODCALLTYPE *ExceptionOSHandlerLeave )( ICorProfilerCallback9 * This, /* [in] */ UINT_PTR __unused); HRESULT ( STDMETHODCALLTYPE *ExceptionUnwindFunctionEnter )( ICorProfilerCallback9 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ExceptionUnwindFunctionLeave )( ICorProfilerCallback9 * This); HRESULT ( STDMETHODCALLTYPE *ExceptionUnwindFinallyEnter )( ICorProfilerCallback9 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ExceptionUnwindFinallyLeave )( ICorProfilerCallback9 * This); HRESULT ( STDMETHODCALLTYPE *ExceptionCatcherEnter )( ICorProfilerCallback9 * This, /* [in] */ FunctionID functionId, /* [in] */ ObjectID objectId); HRESULT ( STDMETHODCALLTYPE *ExceptionCatcherLeave )( ICorProfilerCallback9 * This); HRESULT ( STDMETHODCALLTYPE *COMClassicVTableCreated )( ICorProfilerCallback9 * This, /* [in] */ ClassID wrappedClassId, /* [in] */ REFGUID implementedIID, /* [in] */ void *pVTable, /* [in] */ ULONG cSlots); HRESULT ( STDMETHODCALLTYPE *COMClassicVTableDestroyed )( ICorProfilerCallback9 * This, /* [in] */ ClassID wrappedClassId, /* [in] */ REFGUID implementedIID, /* [in] */ void *pVTable); HRESULT ( STDMETHODCALLTYPE *ExceptionCLRCatcherFound )( ICorProfilerCallback9 * This); HRESULT ( STDMETHODCALLTYPE *ExceptionCLRCatcherExecute )( ICorProfilerCallback9 * This); HRESULT ( STDMETHODCALLTYPE *ThreadNameChanged )( ICorProfilerCallback9 * This, /* [in] */ ThreadID threadId, /* [in] */ ULONG cchName, /* [annotation][in] */ _In_reads_opt_(cchName) WCHAR name[ ]); HRESULT ( STDMETHODCALLTYPE *GarbageCollectionStarted )( ICorProfilerCallback9 * This, /* [in] */ int cGenerations, /* [size_is][in] */ BOOL generationCollected[ ], /* [in] */ COR_PRF_GC_REASON reason); HRESULT ( STDMETHODCALLTYPE *SurvivingReferences )( ICorProfilerCallback9 * This, /* [in] */ ULONG cSurvivingObjectIDRanges, /* [size_is][in] */ ObjectID objectIDRangeStart[ ], /* [size_is][in] */ ULONG cObjectIDRangeLength[ ]); HRESULT ( STDMETHODCALLTYPE *GarbageCollectionFinished )( ICorProfilerCallback9 * This); HRESULT ( STDMETHODCALLTYPE *FinalizeableObjectQueued )( ICorProfilerCallback9 * This, /* [in] */ DWORD finalizerFlags, /* [in] */ ObjectID objectID); HRESULT ( STDMETHODCALLTYPE *RootReferences2 )( ICorProfilerCallback9 * This, /* [in] */ ULONG cRootRefs, /* [size_is][in] */ ObjectID rootRefIds[ ], /* [size_is][in] */ COR_PRF_GC_ROOT_KIND rootKinds[ ], /* [size_is][in] */ COR_PRF_GC_ROOT_FLAGS rootFlags[ ], /* [size_is][in] */ UINT_PTR rootIds[ ]); HRESULT ( STDMETHODCALLTYPE *HandleCreated )( ICorProfilerCallback9 * This, /* [in] */ GCHandleID handleId, /* [in] */ ObjectID initialObjectId); HRESULT ( STDMETHODCALLTYPE *HandleDestroyed )( ICorProfilerCallback9 * This, /* [in] */ GCHandleID handleId); HRESULT ( STDMETHODCALLTYPE *InitializeForAttach )( ICorProfilerCallback9 * This, /* [in] */ IUnknown *pCorProfilerInfoUnk, /* [in] */ void *pvClientData, /* [in] */ UINT cbClientData); HRESULT ( STDMETHODCALLTYPE *ProfilerAttachComplete )( ICorProfilerCallback9 * This); HRESULT ( STDMETHODCALLTYPE *ProfilerDetachSucceeded )( ICorProfilerCallback9 * This); HRESULT ( STDMETHODCALLTYPE *ReJITCompilationStarted )( ICorProfilerCallback9 * This, /* [in] */ FunctionID functionId, /* [in] */ ReJITID rejitId, /* [in] */ BOOL fIsSafeToBlock); HRESULT ( STDMETHODCALLTYPE *GetReJITParameters )( ICorProfilerCallback9 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdMethodDef methodId, /* [in] */ ICorProfilerFunctionControl *pFunctionControl); HRESULT ( STDMETHODCALLTYPE *ReJITCompilationFinished )( ICorProfilerCallback9 * This, /* [in] */ FunctionID functionId, /* [in] */ ReJITID rejitId, /* [in] */ HRESULT hrStatus, /* [in] */ BOOL fIsSafeToBlock); HRESULT ( STDMETHODCALLTYPE *ReJITError )( ICorProfilerCallback9 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdMethodDef methodId, /* [in] */ FunctionID functionId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *MovedReferences2 )( ICorProfilerCallback9 * This, /* [in] */ ULONG cMovedObjectIDRanges, /* [size_is][in] */ ObjectID oldObjectIDRangeStart[ ], /* [size_is][in] */ ObjectID newObjectIDRangeStart[ ], /* [size_is][in] */ SIZE_T cObjectIDRangeLength[ ]); HRESULT ( STDMETHODCALLTYPE *SurvivingReferences2 )( ICorProfilerCallback9 * This, /* [in] */ ULONG cSurvivingObjectIDRanges, /* [size_is][in] */ ObjectID objectIDRangeStart[ ], /* [size_is][in] */ SIZE_T cObjectIDRangeLength[ ]); HRESULT ( STDMETHODCALLTYPE *ConditionalWeakTableElementReferences )( ICorProfilerCallback9 * This, /* [in] */ ULONG cRootRefs, /* [size_is][in] */ ObjectID keyRefIds[ ], /* [size_is][in] */ ObjectID valueRefIds[ ], /* [size_is][in] */ GCHandleID rootIds[ ]); HRESULT ( STDMETHODCALLTYPE *GetAssemblyReferences )( ICorProfilerCallback9 * This, /* [string][in] */ const WCHAR *wszAssemblyPath, /* [in] */ ICorProfilerAssemblyReferenceProvider *pAsmRefProvider); HRESULT ( STDMETHODCALLTYPE *ModuleInMemorySymbolsUpdated )( ICorProfilerCallback9 * This, ModuleID moduleId); HRESULT ( STDMETHODCALLTYPE *DynamicMethodJITCompilationStarted )( ICorProfilerCallback9 * This, /* [in] */ FunctionID functionId, /* [in] */ BOOL fIsSafeToBlock, /* [in] */ LPCBYTE pILHeader, /* [in] */ ULONG cbILHeader); HRESULT ( STDMETHODCALLTYPE *DynamicMethodJITCompilationFinished )( ICorProfilerCallback9 * This, /* [in] */ FunctionID functionId, /* [in] */ HRESULT hrStatus, /* [in] */ BOOL fIsSafeToBlock); HRESULT ( STDMETHODCALLTYPE *DynamicMethodUnloaded )( ICorProfilerCallback9 * This, /* [in] */ FunctionID functionId); END_INTERFACE } ICorProfilerCallback9Vtbl; interface ICorProfilerCallback9 { CONST_VTBL struct ICorProfilerCallback9Vtbl *lpVtbl; }; #ifdef COBJMACROS #define ICorProfilerCallback9_QueryInterface(This,riid,ppvObject) \ ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) ) #define ICorProfilerCallback9_AddRef(This) \ ( (This)->lpVtbl -> AddRef(This) ) #define ICorProfilerCallback9_Release(This) \ ( (This)->lpVtbl -> Release(This) ) #define ICorProfilerCallback9_Initialize(This,pICorProfilerInfoUnk) \ ( (This)->lpVtbl -> Initialize(This,pICorProfilerInfoUnk) ) #define ICorProfilerCallback9_Shutdown(This) \ ( (This)->lpVtbl -> Shutdown(This) ) #define ICorProfilerCallback9_AppDomainCreationStarted(This,appDomainId) \ ( (This)->lpVtbl -> AppDomainCreationStarted(This,appDomainId) ) #define ICorProfilerCallback9_AppDomainCreationFinished(This,appDomainId,hrStatus) \ ( (This)->lpVtbl -> AppDomainCreationFinished(This,appDomainId,hrStatus) ) #define ICorProfilerCallback9_AppDomainShutdownStarted(This,appDomainId) \ ( (This)->lpVtbl -> AppDomainShutdownStarted(This,appDomainId) ) #define ICorProfilerCallback9_AppDomainShutdownFinished(This,appDomainId,hrStatus) \ ( (This)->lpVtbl -> AppDomainShutdownFinished(This,appDomainId,hrStatus) ) #define ICorProfilerCallback9_AssemblyLoadStarted(This,assemblyId) \ ( (This)->lpVtbl -> AssemblyLoadStarted(This,assemblyId) ) #define ICorProfilerCallback9_AssemblyLoadFinished(This,assemblyId,hrStatus) \ ( (This)->lpVtbl -> AssemblyLoadFinished(This,assemblyId,hrStatus) ) #define ICorProfilerCallback9_AssemblyUnloadStarted(This,assemblyId) \ ( (This)->lpVtbl -> AssemblyUnloadStarted(This,assemblyId) ) #define ICorProfilerCallback9_AssemblyUnloadFinished(This,assemblyId,hrStatus) \ ( (This)->lpVtbl -> AssemblyUnloadFinished(This,assemblyId,hrStatus) ) #define ICorProfilerCallback9_ModuleLoadStarted(This,moduleId) \ ( (This)->lpVtbl -> ModuleLoadStarted(This,moduleId) ) #define ICorProfilerCallback9_ModuleLoadFinished(This,moduleId,hrStatus) \ ( (This)->lpVtbl -> ModuleLoadFinished(This,moduleId,hrStatus) ) #define ICorProfilerCallback9_ModuleUnloadStarted(This,moduleId) \ ( (This)->lpVtbl -> ModuleUnloadStarted(This,moduleId) ) #define ICorProfilerCallback9_ModuleUnloadFinished(This,moduleId,hrStatus) \ ( (This)->lpVtbl -> ModuleUnloadFinished(This,moduleId,hrStatus) ) #define ICorProfilerCallback9_ModuleAttachedToAssembly(This,moduleId,AssemblyId) \ ( (This)->lpVtbl -> ModuleAttachedToAssembly(This,moduleId,AssemblyId) ) #define ICorProfilerCallback9_ClassLoadStarted(This,classId) \ ( (This)->lpVtbl -> ClassLoadStarted(This,classId) ) #define ICorProfilerCallback9_ClassLoadFinished(This,classId,hrStatus) \ ( (This)->lpVtbl -> ClassLoadFinished(This,classId,hrStatus) ) #define ICorProfilerCallback9_ClassUnloadStarted(This,classId) \ ( (This)->lpVtbl -> ClassUnloadStarted(This,classId) ) #define ICorProfilerCallback9_ClassUnloadFinished(This,classId,hrStatus) \ ( (This)->lpVtbl -> ClassUnloadFinished(This,classId,hrStatus) ) #define ICorProfilerCallback9_FunctionUnloadStarted(This,functionId) \ ( (This)->lpVtbl -> FunctionUnloadStarted(This,functionId) ) #define ICorProfilerCallback9_JITCompilationStarted(This,functionId,fIsSafeToBlock) \ ( (This)->lpVtbl -> JITCompilationStarted(This,functionId,fIsSafeToBlock) ) #define ICorProfilerCallback9_JITCompilationFinished(This,functionId,hrStatus,fIsSafeToBlock) \ ( (This)->lpVtbl -> JITCompilationFinished(This,functionId,hrStatus,fIsSafeToBlock) ) #define ICorProfilerCallback9_JITCachedFunctionSearchStarted(This,functionId,pbUseCachedFunction) \ ( (This)->lpVtbl -> JITCachedFunctionSearchStarted(This,functionId,pbUseCachedFunction) ) #define ICorProfilerCallback9_JITCachedFunctionSearchFinished(This,functionId,result) \ ( (This)->lpVtbl -> JITCachedFunctionSearchFinished(This,functionId,result) ) #define ICorProfilerCallback9_JITFunctionPitched(This,functionId) \ ( (This)->lpVtbl -> JITFunctionPitched(This,functionId) ) #define ICorProfilerCallback9_JITInlining(This,callerId,calleeId,pfShouldInline) \ ( (This)->lpVtbl -> JITInlining(This,callerId,calleeId,pfShouldInline) ) #define ICorProfilerCallback9_ThreadCreated(This,threadId) \ ( (This)->lpVtbl -> ThreadCreated(This,threadId) ) #define ICorProfilerCallback9_ThreadDestroyed(This,threadId) \ ( (This)->lpVtbl -> ThreadDestroyed(This,threadId) ) #define ICorProfilerCallback9_ThreadAssignedToOSThread(This,managedThreadId,osThreadId) \ ( (This)->lpVtbl -> ThreadAssignedToOSThread(This,managedThreadId,osThreadId) ) #define ICorProfilerCallback9_RemotingClientInvocationStarted(This) \ ( (This)->lpVtbl -> RemotingClientInvocationStarted(This) ) #define ICorProfilerCallback9_RemotingClientSendingMessage(This,pCookie,fIsAsync) \ ( (This)->lpVtbl -> RemotingClientSendingMessage(This,pCookie,fIsAsync) ) #define ICorProfilerCallback9_RemotingClientReceivingReply(This,pCookie,fIsAsync) \ ( (This)->lpVtbl -> RemotingClientReceivingReply(This,pCookie,fIsAsync) ) #define ICorProfilerCallback9_RemotingClientInvocationFinished(This) \ ( (This)->lpVtbl -> RemotingClientInvocationFinished(This) ) #define ICorProfilerCallback9_RemotingServerReceivingMessage(This,pCookie,fIsAsync) \ ( (This)->lpVtbl -> RemotingServerReceivingMessage(This,pCookie,fIsAsync) ) #define ICorProfilerCallback9_RemotingServerInvocationStarted(This) \ ( (This)->lpVtbl -> RemotingServerInvocationStarted(This) ) #define ICorProfilerCallback9_RemotingServerInvocationReturned(This) \ ( (This)->lpVtbl -> RemotingServerInvocationReturned(This) ) #define ICorProfilerCallback9_RemotingServerSendingReply(This,pCookie,fIsAsync) \ ( (This)->lpVtbl -> RemotingServerSendingReply(This,pCookie,fIsAsync) ) #define ICorProfilerCallback9_UnmanagedToManagedTransition(This,functionId,reason) \ ( (This)->lpVtbl -> UnmanagedToManagedTransition(This,functionId,reason) ) #define ICorProfilerCallback9_ManagedToUnmanagedTransition(This,functionId,reason) \ ( (This)->lpVtbl -> ManagedToUnmanagedTransition(This,functionId,reason) ) #define ICorProfilerCallback9_RuntimeSuspendStarted(This,suspendReason) \ ( (This)->lpVtbl -> RuntimeSuspendStarted(This,suspendReason) ) #define ICorProfilerCallback9_RuntimeSuspendFinished(This) \ ( (This)->lpVtbl -> RuntimeSuspendFinished(This) ) #define ICorProfilerCallback9_RuntimeSuspendAborted(This) \ ( (This)->lpVtbl -> RuntimeSuspendAborted(This) ) #define ICorProfilerCallback9_RuntimeResumeStarted(This) \ ( (This)->lpVtbl -> RuntimeResumeStarted(This) ) #define ICorProfilerCallback9_RuntimeResumeFinished(This) \ ( (This)->lpVtbl -> RuntimeResumeFinished(This) ) #define ICorProfilerCallback9_RuntimeThreadSuspended(This,threadId) \ ( (This)->lpVtbl -> RuntimeThreadSuspended(This,threadId) ) #define ICorProfilerCallback9_RuntimeThreadResumed(This,threadId) \ ( (This)->lpVtbl -> RuntimeThreadResumed(This,threadId) ) #define ICorProfilerCallback9_MovedReferences(This,cMovedObjectIDRanges,oldObjectIDRangeStart,newObjectIDRangeStart,cObjectIDRangeLength) \ ( (This)->lpVtbl -> MovedReferences(This,cMovedObjectIDRanges,oldObjectIDRangeStart,newObjectIDRangeStart,cObjectIDRangeLength) ) #define ICorProfilerCallback9_ObjectAllocated(This,objectId,classId) \ ( (This)->lpVtbl -> ObjectAllocated(This,objectId,classId) ) #define ICorProfilerCallback9_ObjectsAllocatedByClass(This,cClassCount,classIds,cObjects) \ ( (This)->lpVtbl -> ObjectsAllocatedByClass(This,cClassCount,classIds,cObjects) ) #define ICorProfilerCallback9_ObjectReferences(This,objectId,classId,cObjectRefs,objectRefIds) \ ( (This)->lpVtbl -> ObjectReferences(This,objectId,classId,cObjectRefs,objectRefIds) ) #define ICorProfilerCallback9_RootReferences(This,cRootRefs,rootRefIds) \ ( (This)->lpVtbl -> RootReferences(This,cRootRefs,rootRefIds) ) #define ICorProfilerCallback9_ExceptionThrown(This,thrownObjectId) \ ( (This)->lpVtbl -> ExceptionThrown(This,thrownObjectId) ) #define ICorProfilerCallback9_ExceptionSearchFunctionEnter(This,functionId) \ ( (This)->lpVtbl -> ExceptionSearchFunctionEnter(This,functionId) ) #define ICorProfilerCallback9_ExceptionSearchFunctionLeave(This) \ ( (This)->lpVtbl -> ExceptionSearchFunctionLeave(This) ) #define ICorProfilerCallback9_ExceptionSearchFilterEnter(This,functionId) \ ( (This)->lpVtbl -> ExceptionSearchFilterEnter(This,functionId) ) #define ICorProfilerCallback9_ExceptionSearchFilterLeave(This) \ ( (This)->lpVtbl -> ExceptionSearchFilterLeave(This) ) #define ICorProfilerCallback9_ExceptionSearchCatcherFound(This,functionId) \ ( (This)->lpVtbl -> ExceptionSearchCatcherFound(This,functionId) ) #define ICorProfilerCallback9_ExceptionOSHandlerEnter(This,__unused) \ ( (This)->lpVtbl -> ExceptionOSHandlerEnter(This,__unused) ) #define ICorProfilerCallback9_ExceptionOSHandlerLeave(This,__unused) \ ( (This)->lpVtbl -> ExceptionOSHandlerLeave(This,__unused) ) #define ICorProfilerCallback9_ExceptionUnwindFunctionEnter(This,functionId) \ ( (This)->lpVtbl -> ExceptionUnwindFunctionEnter(This,functionId) ) #define ICorProfilerCallback9_ExceptionUnwindFunctionLeave(This) \ ( (This)->lpVtbl -> ExceptionUnwindFunctionLeave(This) ) #define ICorProfilerCallback9_ExceptionUnwindFinallyEnter(This,functionId) \ ( (This)->lpVtbl -> ExceptionUnwindFinallyEnter(This,functionId) ) #define ICorProfilerCallback9_ExceptionUnwindFinallyLeave(This) \ ( (This)->lpVtbl -> ExceptionUnwindFinallyLeave(This) ) #define ICorProfilerCallback9_ExceptionCatcherEnter(This,functionId,objectId) \ ( (This)->lpVtbl -> ExceptionCatcherEnter(This,functionId,objectId) ) #define ICorProfilerCallback9_ExceptionCatcherLeave(This) \ ( (This)->lpVtbl -> ExceptionCatcherLeave(This) ) #define ICorProfilerCallback9_COMClassicVTableCreated(This,wrappedClassId,implementedIID,pVTable,cSlots) \ ( (This)->lpVtbl -> COMClassicVTableCreated(This,wrappedClassId,implementedIID,pVTable,cSlots) ) #define ICorProfilerCallback9_COMClassicVTableDestroyed(This,wrappedClassId,implementedIID,pVTable) \ ( (This)->lpVtbl -> COMClassicVTableDestroyed(This,wrappedClassId,implementedIID,pVTable) ) #define ICorProfilerCallback9_ExceptionCLRCatcherFound(This) \ ( (This)->lpVtbl -> ExceptionCLRCatcherFound(This) ) #define ICorProfilerCallback9_ExceptionCLRCatcherExecute(This) \ ( (This)->lpVtbl -> ExceptionCLRCatcherExecute(This) ) #define ICorProfilerCallback9_ThreadNameChanged(This,threadId,cchName,name) \ ( (This)->lpVtbl -> ThreadNameChanged(This,threadId,cchName,name) ) #define ICorProfilerCallback9_GarbageCollectionStarted(This,cGenerations,generationCollected,reason) \ ( (This)->lpVtbl -> GarbageCollectionStarted(This,cGenerations,generationCollected,reason) ) #define ICorProfilerCallback9_SurvivingReferences(This,cSurvivingObjectIDRanges,objectIDRangeStart,cObjectIDRangeLength) \ ( (This)->lpVtbl -> SurvivingReferences(This,cSurvivingObjectIDRanges,objectIDRangeStart,cObjectIDRangeLength) ) #define ICorProfilerCallback9_GarbageCollectionFinished(This) \ ( (This)->lpVtbl -> GarbageCollectionFinished(This) ) #define ICorProfilerCallback9_FinalizeableObjectQueued(This,finalizerFlags,objectID) \ ( (This)->lpVtbl -> FinalizeableObjectQueued(This,finalizerFlags,objectID) ) #define ICorProfilerCallback9_RootReferences2(This,cRootRefs,rootRefIds,rootKinds,rootFlags,rootIds) \ ( (This)->lpVtbl -> RootReferences2(This,cRootRefs,rootRefIds,rootKinds,rootFlags,rootIds) ) #define ICorProfilerCallback9_HandleCreated(This,handleId,initialObjectId) \ ( (This)->lpVtbl -> HandleCreated(This,handleId,initialObjectId) ) #define ICorProfilerCallback9_HandleDestroyed(This,handleId) \ ( (This)->lpVtbl -> HandleDestroyed(This,handleId) ) #define ICorProfilerCallback9_InitializeForAttach(This,pCorProfilerInfoUnk,pvClientData,cbClientData) \ ( (This)->lpVtbl -> InitializeForAttach(This,pCorProfilerInfoUnk,pvClientData,cbClientData) ) #define ICorProfilerCallback9_ProfilerAttachComplete(This) \ ( (This)->lpVtbl -> ProfilerAttachComplete(This) ) #define ICorProfilerCallback9_ProfilerDetachSucceeded(This) \ ( (This)->lpVtbl -> ProfilerDetachSucceeded(This) ) #define ICorProfilerCallback9_ReJITCompilationStarted(This,functionId,rejitId,fIsSafeToBlock) \ ( (This)->lpVtbl -> ReJITCompilationStarted(This,functionId,rejitId,fIsSafeToBlock) ) #define ICorProfilerCallback9_GetReJITParameters(This,moduleId,methodId,pFunctionControl) \ ( (This)->lpVtbl -> GetReJITParameters(This,moduleId,methodId,pFunctionControl) ) #define ICorProfilerCallback9_ReJITCompilationFinished(This,functionId,rejitId,hrStatus,fIsSafeToBlock) \ ( (This)->lpVtbl -> ReJITCompilationFinished(This,functionId,rejitId,hrStatus,fIsSafeToBlock) ) #define ICorProfilerCallback9_ReJITError(This,moduleId,methodId,functionId,hrStatus) \ ( (This)->lpVtbl -> ReJITError(This,moduleId,methodId,functionId,hrStatus) ) #define ICorProfilerCallback9_MovedReferences2(This,cMovedObjectIDRanges,oldObjectIDRangeStart,newObjectIDRangeStart,cObjectIDRangeLength) \ ( (This)->lpVtbl -> MovedReferences2(This,cMovedObjectIDRanges,oldObjectIDRangeStart,newObjectIDRangeStart,cObjectIDRangeLength) ) #define ICorProfilerCallback9_SurvivingReferences2(This,cSurvivingObjectIDRanges,objectIDRangeStart,cObjectIDRangeLength) \ ( (This)->lpVtbl -> SurvivingReferences2(This,cSurvivingObjectIDRanges,objectIDRangeStart,cObjectIDRangeLength) ) #define ICorProfilerCallback9_ConditionalWeakTableElementReferences(This,cRootRefs,keyRefIds,valueRefIds,rootIds) \ ( (This)->lpVtbl -> ConditionalWeakTableElementReferences(This,cRootRefs,keyRefIds,valueRefIds,rootIds) ) #define ICorProfilerCallback9_GetAssemblyReferences(This,wszAssemblyPath,pAsmRefProvider) \ ( (This)->lpVtbl -> GetAssemblyReferences(This,wszAssemblyPath,pAsmRefProvider) ) #define ICorProfilerCallback9_ModuleInMemorySymbolsUpdated(This,moduleId) \ ( (This)->lpVtbl -> ModuleInMemorySymbolsUpdated(This,moduleId) ) #define ICorProfilerCallback9_DynamicMethodJITCompilationStarted(This,functionId,fIsSafeToBlock,pILHeader,cbILHeader) \ ( (This)->lpVtbl -> DynamicMethodJITCompilationStarted(This,functionId,fIsSafeToBlock,pILHeader,cbILHeader) ) #define ICorProfilerCallback9_DynamicMethodJITCompilationFinished(This,functionId,hrStatus,fIsSafeToBlock) \ ( (This)->lpVtbl -> DynamicMethodJITCompilationFinished(This,functionId,hrStatus,fIsSafeToBlock) ) #define ICorProfilerCallback9_DynamicMethodUnloaded(This,functionId) \ ( (This)->lpVtbl -> DynamicMethodUnloaded(This,functionId) ) #endif /* COBJMACROS */ #endif /* C style interface */ #endif /* __ICorProfilerCallback9_INTERFACE_DEFINED__ */ #ifndef __ICorProfilerCallback10_INTERFACE_DEFINED__ #define __ICorProfilerCallback10_INTERFACE_DEFINED__ /* interface ICorProfilerCallback10 */ /* [local][unique][uuid][object] */ EXTERN_C const IID IID_ICorProfilerCallback10; #if defined(__cplusplus) && !defined(CINTERFACE) MIDL_INTERFACE("CEC5B60E-C69C-495F-87F6-84D28EE16FFB") ICorProfilerCallback10 : public ICorProfilerCallback9 { public: virtual HRESULT STDMETHODCALLTYPE EventPipeEventDelivered( /* [in] */ EVENTPIPE_PROVIDER provider, /* [in] */ DWORD eventId, /* [in] */ DWORD eventVersion, /* [in] */ ULONG cbMetadataBlob, /* [size_is][in] */ LPCBYTE metadataBlob, /* [in] */ ULONG cbEventData, /* [size_is][in] */ LPCBYTE eventData, /* [in] */ LPCGUID pActivityId, /* [in] */ LPCGUID pRelatedActivityId, /* [in] */ ThreadID eventThread, /* [in] */ ULONG numStackFrames, /* [length_is][in] */ UINT_PTR stackFrames[ ]) = 0; virtual HRESULT STDMETHODCALLTYPE EventPipeProviderCreated( /* [in] */ EVENTPIPE_PROVIDER provider) = 0; }; #else /* C style interface */ typedef struct ICorProfilerCallback10Vtbl { BEGIN_INTERFACE HRESULT ( STDMETHODCALLTYPE *QueryInterface )( ICorProfilerCallback10 * This, /* [in] */ REFIID riid, /* [annotation][iid_is][out] */ _COM_Outptr_ void **ppvObject); ULONG ( STDMETHODCALLTYPE *AddRef )( ICorProfilerCallback10 * This); ULONG ( STDMETHODCALLTYPE *Release )( ICorProfilerCallback10 * This); HRESULT ( STDMETHODCALLTYPE *Initialize )( ICorProfilerCallback10 * This, /* [in] */ IUnknown *pICorProfilerInfoUnk); HRESULT ( STDMETHODCALLTYPE *Shutdown )( ICorProfilerCallback10 * This); HRESULT ( STDMETHODCALLTYPE *AppDomainCreationStarted )( ICorProfilerCallback10 * This, /* [in] */ AppDomainID appDomainId); HRESULT ( STDMETHODCALLTYPE *AppDomainCreationFinished )( ICorProfilerCallback10 * This, /* [in] */ AppDomainID appDomainId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *AppDomainShutdownStarted )( ICorProfilerCallback10 * This, /* [in] */ AppDomainID appDomainId); HRESULT ( STDMETHODCALLTYPE *AppDomainShutdownFinished )( ICorProfilerCallback10 * This, /* [in] */ AppDomainID appDomainId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *AssemblyLoadStarted )( ICorProfilerCallback10 * This, /* [in] */ AssemblyID assemblyId); HRESULT ( STDMETHODCALLTYPE *AssemblyLoadFinished )( ICorProfilerCallback10 * This, /* [in] */ AssemblyID assemblyId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *AssemblyUnloadStarted )( ICorProfilerCallback10 * This, /* [in] */ AssemblyID assemblyId); HRESULT ( STDMETHODCALLTYPE *AssemblyUnloadFinished )( ICorProfilerCallback10 * This, /* [in] */ AssemblyID assemblyId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *ModuleLoadStarted )( ICorProfilerCallback10 * This, /* [in] */ ModuleID moduleId); HRESULT ( STDMETHODCALLTYPE *ModuleLoadFinished )( ICorProfilerCallback10 * This, /* [in] */ ModuleID moduleId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *ModuleUnloadStarted )( ICorProfilerCallback10 * This, /* [in] */ ModuleID moduleId); HRESULT ( STDMETHODCALLTYPE *ModuleUnloadFinished )( ICorProfilerCallback10 * This, /* [in] */ ModuleID moduleId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *ModuleAttachedToAssembly )( ICorProfilerCallback10 * This, /* [in] */ ModuleID moduleId, /* [in] */ AssemblyID AssemblyId); HRESULT ( STDMETHODCALLTYPE *ClassLoadStarted )( ICorProfilerCallback10 * This, /* [in] */ ClassID classId); HRESULT ( STDMETHODCALLTYPE *ClassLoadFinished )( ICorProfilerCallback10 * This, /* [in] */ ClassID classId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *ClassUnloadStarted )( ICorProfilerCallback10 * This, /* [in] */ ClassID classId); HRESULT ( STDMETHODCALLTYPE *ClassUnloadFinished )( ICorProfilerCallback10 * This, /* [in] */ ClassID classId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *FunctionUnloadStarted )( ICorProfilerCallback10 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *JITCompilationStarted )( ICorProfilerCallback10 * This, /* [in] */ FunctionID functionId, /* [in] */ BOOL fIsSafeToBlock); HRESULT ( STDMETHODCALLTYPE *JITCompilationFinished )( ICorProfilerCallback10 * This, /* [in] */ FunctionID functionId, /* [in] */ HRESULT hrStatus, /* [in] */ BOOL fIsSafeToBlock); HRESULT ( STDMETHODCALLTYPE *JITCachedFunctionSearchStarted )( ICorProfilerCallback10 * This, /* [in] */ FunctionID functionId, /* [out] */ BOOL *pbUseCachedFunction); HRESULT ( STDMETHODCALLTYPE *JITCachedFunctionSearchFinished )( ICorProfilerCallback10 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_JIT_CACHE result); HRESULT ( STDMETHODCALLTYPE *JITFunctionPitched )( ICorProfilerCallback10 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *JITInlining )( ICorProfilerCallback10 * This, /* [in] */ FunctionID callerId, /* [in] */ FunctionID calleeId, /* [out] */ BOOL *pfShouldInline); HRESULT ( STDMETHODCALLTYPE *ThreadCreated )( ICorProfilerCallback10 * This, /* [in] */ ThreadID threadId); HRESULT ( STDMETHODCALLTYPE *ThreadDestroyed )( ICorProfilerCallback10 * This, /* [in] */ ThreadID threadId); HRESULT ( STDMETHODCALLTYPE *ThreadAssignedToOSThread )( ICorProfilerCallback10 * This, /* [in] */ ThreadID managedThreadId, /* [in] */ DWORD osThreadId); HRESULT ( STDMETHODCALLTYPE *RemotingClientInvocationStarted )( ICorProfilerCallback10 * This); HRESULT ( STDMETHODCALLTYPE *RemotingClientSendingMessage )( ICorProfilerCallback10 * This, /* [in] */ GUID *pCookie, /* [in] */ BOOL fIsAsync); HRESULT ( STDMETHODCALLTYPE *RemotingClientReceivingReply )( ICorProfilerCallback10 * This, /* [in] */ GUID *pCookie, /* [in] */ BOOL fIsAsync); HRESULT ( STDMETHODCALLTYPE *RemotingClientInvocationFinished )( ICorProfilerCallback10 * This); HRESULT ( STDMETHODCALLTYPE *RemotingServerReceivingMessage )( ICorProfilerCallback10 * This, /* [in] */ GUID *pCookie, /* [in] */ BOOL fIsAsync); HRESULT ( STDMETHODCALLTYPE *RemotingServerInvocationStarted )( ICorProfilerCallback10 * This); HRESULT ( STDMETHODCALLTYPE *RemotingServerInvocationReturned )( ICorProfilerCallback10 * This); HRESULT ( STDMETHODCALLTYPE *RemotingServerSendingReply )( ICorProfilerCallback10 * This, /* [in] */ GUID *pCookie, /* [in] */ BOOL fIsAsync); HRESULT ( STDMETHODCALLTYPE *UnmanagedToManagedTransition )( ICorProfilerCallback10 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_TRANSITION_REASON reason); HRESULT ( STDMETHODCALLTYPE *ManagedToUnmanagedTransition )( ICorProfilerCallback10 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_TRANSITION_REASON reason); HRESULT ( STDMETHODCALLTYPE *RuntimeSuspendStarted )( ICorProfilerCallback10 * This, /* [in] */ COR_PRF_SUSPEND_REASON suspendReason); HRESULT ( STDMETHODCALLTYPE *RuntimeSuspendFinished )( ICorProfilerCallback10 * This); HRESULT ( STDMETHODCALLTYPE *RuntimeSuspendAborted )( ICorProfilerCallback10 * This); HRESULT ( STDMETHODCALLTYPE *RuntimeResumeStarted )( ICorProfilerCallback10 * This); HRESULT ( STDMETHODCALLTYPE *RuntimeResumeFinished )( ICorProfilerCallback10 * This); HRESULT ( STDMETHODCALLTYPE *RuntimeThreadSuspended )( ICorProfilerCallback10 * This, /* [in] */ ThreadID threadId); HRESULT ( STDMETHODCALLTYPE *RuntimeThreadResumed )( ICorProfilerCallback10 * This, /* [in] */ ThreadID threadId); HRESULT ( STDMETHODCALLTYPE *MovedReferences )( ICorProfilerCallback10 * This, /* [in] */ ULONG cMovedObjectIDRanges, /* [size_is][in] */ ObjectID oldObjectIDRangeStart[ ], /* [size_is][in] */ ObjectID newObjectIDRangeStart[ ], /* [size_is][in] */ ULONG cObjectIDRangeLength[ ]); HRESULT ( STDMETHODCALLTYPE *ObjectAllocated )( ICorProfilerCallback10 * This, /* [in] */ ObjectID objectId, /* [in] */ ClassID classId); HRESULT ( STDMETHODCALLTYPE *ObjectsAllocatedByClass )( ICorProfilerCallback10 * This, /* [in] */ ULONG cClassCount, /* [size_is][in] */ ClassID classIds[ ], /* [size_is][in] */ ULONG cObjects[ ]); HRESULT ( STDMETHODCALLTYPE *ObjectReferences )( ICorProfilerCallback10 * This, /* [in] */ ObjectID objectId, /* [in] */ ClassID classId, /* [in] */ ULONG cObjectRefs, /* [size_is][in] */ ObjectID objectRefIds[ ]); HRESULT ( STDMETHODCALLTYPE *RootReferences )( ICorProfilerCallback10 * This, /* [in] */ ULONG cRootRefs, /* [size_is][in] */ ObjectID rootRefIds[ ]); HRESULT ( STDMETHODCALLTYPE *ExceptionThrown )( ICorProfilerCallback10 * This, /* [in] */ ObjectID thrownObjectId); HRESULT ( STDMETHODCALLTYPE *ExceptionSearchFunctionEnter )( ICorProfilerCallback10 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ExceptionSearchFunctionLeave )( ICorProfilerCallback10 * This); HRESULT ( STDMETHODCALLTYPE *ExceptionSearchFilterEnter )( ICorProfilerCallback10 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ExceptionSearchFilterLeave )( ICorProfilerCallback10 * This); HRESULT ( STDMETHODCALLTYPE *ExceptionSearchCatcherFound )( ICorProfilerCallback10 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ExceptionOSHandlerEnter )( ICorProfilerCallback10 * This, /* [in] */ UINT_PTR __unused); HRESULT ( STDMETHODCALLTYPE *ExceptionOSHandlerLeave )( ICorProfilerCallback10 * This, /* [in] */ UINT_PTR __unused); HRESULT ( STDMETHODCALLTYPE *ExceptionUnwindFunctionEnter )( ICorProfilerCallback10 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ExceptionUnwindFunctionLeave )( ICorProfilerCallback10 * This); HRESULT ( STDMETHODCALLTYPE *ExceptionUnwindFinallyEnter )( ICorProfilerCallback10 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ExceptionUnwindFinallyLeave )( ICorProfilerCallback10 * This); HRESULT ( STDMETHODCALLTYPE *ExceptionCatcherEnter )( ICorProfilerCallback10 * This, /* [in] */ FunctionID functionId, /* [in] */ ObjectID objectId); HRESULT ( STDMETHODCALLTYPE *ExceptionCatcherLeave )( ICorProfilerCallback10 * This); HRESULT ( STDMETHODCALLTYPE *COMClassicVTableCreated )( ICorProfilerCallback10 * This, /* [in] */ ClassID wrappedClassId, /* [in] */ REFGUID implementedIID, /* [in] */ void *pVTable, /* [in] */ ULONG cSlots); HRESULT ( STDMETHODCALLTYPE *COMClassicVTableDestroyed )( ICorProfilerCallback10 * This, /* [in] */ ClassID wrappedClassId, /* [in] */ REFGUID implementedIID, /* [in] */ void *pVTable); HRESULT ( STDMETHODCALLTYPE *ExceptionCLRCatcherFound )( ICorProfilerCallback10 * This); HRESULT ( STDMETHODCALLTYPE *ExceptionCLRCatcherExecute )( ICorProfilerCallback10 * This); HRESULT ( STDMETHODCALLTYPE *ThreadNameChanged )( ICorProfilerCallback10 * This, /* [in] */ ThreadID threadId, /* [in] */ ULONG cchName, /* [annotation][in] */ _In_reads_opt_(cchName) WCHAR name[ ]); HRESULT ( STDMETHODCALLTYPE *GarbageCollectionStarted )( ICorProfilerCallback10 * This, /* [in] */ int cGenerations, /* [size_is][in] */ BOOL generationCollected[ ], /* [in] */ COR_PRF_GC_REASON reason); HRESULT ( STDMETHODCALLTYPE *SurvivingReferences )( ICorProfilerCallback10 * This, /* [in] */ ULONG cSurvivingObjectIDRanges, /* [size_is][in] */ ObjectID objectIDRangeStart[ ], /* [size_is][in] */ ULONG cObjectIDRangeLength[ ]); HRESULT ( STDMETHODCALLTYPE *GarbageCollectionFinished )( ICorProfilerCallback10 * This); HRESULT ( STDMETHODCALLTYPE *FinalizeableObjectQueued )( ICorProfilerCallback10 * This, /* [in] */ DWORD finalizerFlags, /* [in] */ ObjectID objectID); HRESULT ( STDMETHODCALLTYPE *RootReferences2 )( ICorProfilerCallback10 * This, /* [in] */ ULONG cRootRefs, /* [size_is][in] */ ObjectID rootRefIds[ ], /* [size_is][in] */ COR_PRF_GC_ROOT_KIND rootKinds[ ], /* [size_is][in] */ COR_PRF_GC_ROOT_FLAGS rootFlags[ ], /* [size_is][in] */ UINT_PTR rootIds[ ]); HRESULT ( STDMETHODCALLTYPE *HandleCreated )( ICorProfilerCallback10 * This, /* [in] */ GCHandleID handleId, /* [in] */ ObjectID initialObjectId); HRESULT ( STDMETHODCALLTYPE *HandleDestroyed )( ICorProfilerCallback10 * This, /* [in] */ GCHandleID handleId); HRESULT ( STDMETHODCALLTYPE *InitializeForAttach )( ICorProfilerCallback10 * This, /* [in] */ IUnknown *pCorProfilerInfoUnk, /* [in] */ void *pvClientData, /* [in] */ UINT cbClientData); HRESULT ( STDMETHODCALLTYPE *ProfilerAttachComplete )( ICorProfilerCallback10 * This); HRESULT ( STDMETHODCALLTYPE *ProfilerDetachSucceeded )( ICorProfilerCallback10 * This); HRESULT ( STDMETHODCALLTYPE *ReJITCompilationStarted )( ICorProfilerCallback10 * This, /* [in] */ FunctionID functionId, /* [in] */ ReJITID rejitId, /* [in] */ BOOL fIsSafeToBlock); HRESULT ( STDMETHODCALLTYPE *GetReJITParameters )( ICorProfilerCallback10 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdMethodDef methodId, /* [in] */ ICorProfilerFunctionControl *pFunctionControl); HRESULT ( STDMETHODCALLTYPE *ReJITCompilationFinished )( ICorProfilerCallback10 * This, /* [in] */ FunctionID functionId, /* [in] */ ReJITID rejitId, /* [in] */ HRESULT hrStatus, /* [in] */ BOOL fIsSafeToBlock); HRESULT ( STDMETHODCALLTYPE *ReJITError )( ICorProfilerCallback10 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdMethodDef methodId, /* [in] */ FunctionID functionId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *MovedReferences2 )( ICorProfilerCallback10 * This, /* [in] */ ULONG cMovedObjectIDRanges, /* [size_is][in] */ ObjectID oldObjectIDRangeStart[ ], /* [size_is][in] */ ObjectID newObjectIDRangeStart[ ], /* [size_is][in] */ SIZE_T cObjectIDRangeLength[ ]); HRESULT ( STDMETHODCALLTYPE *SurvivingReferences2 )( ICorProfilerCallback10 * This, /* [in] */ ULONG cSurvivingObjectIDRanges, /* [size_is][in] */ ObjectID objectIDRangeStart[ ], /* [size_is][in] */ SIZE_T cObjectIDRangeLength[ ]); HRESULT ( STDMETHODCALLTYPE *ConditionalWeakTableElementReferences )( ICorProfilerCallback10 * This, /* [in] */ ULONG cRootRefs, /* [size_is][in] */ ObjectID keyRefIds[ ], /* [size_is][in] */ ObjectID valueRefIds[ ], /* [size_is][in] */ GCHandleID rootIds[ ]); HRESULT ( STDMETHODCALLTYPE *GetAssemblyReferences )( ICorProfilerCallback10 * This, /* [string][in] */ const WCHAR *wszAssemblyPath, /* [in] */ ICorProfilerAssemblyReferenceProvider *pAsmRefProvider); HRESULT ( STDMETHODCALLTYPE *ModuleInMemorySymbolsUpdated )( ICorProfilerCallback10 * This, ModuleID moduleId); HRESULT ( STDMETHODCALLTYPE *DynamicMethodJITCompilationStarted )( ICorProfilerCallback10 * This, /* [in] */ FunctionID functionId, /* [in] */ BOOL fIsSafeToBlock, /* [in] */ LPCBYTE pILHeader, /* [in] */ ULONG cbILHeader); HRESULT ( STDMETHODCALLTYPE *DynamicMethodJITCompilationFinished )( ICorProfilerCallback10 * This, /* [in] */ FunctionID functionId, /* [in] */ HRESULT hrStatus, /* [in] */ BOOL fIsSafeToBlock); HRESULT ( STDMETHODCALLTYPE *DynamicMethodUnloaded )( ICorProfilerCallback10 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *EventPipeEventDelivered )( ICorProfilerCallback10 * This, /* [in] */ EVENTPIPE_PROVIDER provider, /* [in] */ DWORD eventId, /* [in] */ DWORD eventVersion, /* [in] */ ULONG cbMetadataBlob, /* [size_is][in] */ LPCBYTE metadataBlob, /* [in] */ ULONG cbEventData, /* [size_is][in] */ LPCBYTE eventData, /* [in] */ LPCGUID pActivityId, /* [in] */ LPCGUID pRelatedActivityId, /* [in] */ ThreadID eventThread, /* [in] */ ULONG numStackFrames, /* [length_is][in] */ UINT_PTR stackFrames[ ]); HRESULT ( STDMETHODCALLTYPE *EventPipeProviderCreated )( ICorProfilerCallback10 * This, /* [in] */ EVENTPIPE_PROVIDER provider); END_INTERFACE } ICorProfilerCallback10Vtbl; interface ICorProfilerCallback10 { CONST_VTBL struct ICorProfilerCallback10Vtbl *lpVtbl; }; #ifdef COBJMACROS #define ICorProfilerCallback10_QueryInterface(This,riid,ppvObject) \ ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) ) #define ICorProfilerCallback10_AddRef(This) \ ( (This)->lpVtbl -> AddRef(This) ) #define ICorProfilerCallback10_Release(This) \ ( (This)->lpVtbl -> Release(This) ) #define ICorProfilerCallback10_Initialize(This,pICorProfilerInfoUnk) \ ( (This)->lpVtbl -> Initialize(This,pICorProfilerInfoUnk) ) #define ICorProfilerCallback10_Shutdown(This) \ ( (This)->lpVtbl -> Shutdown(This) ) #define ICorProfilerCallback10_AppDomainCreationStarted(This,appDomainId) \ ( (This)->lpVtbl -> AppDomainCreationStarted(This,appDomainId) ) #define ICorProfilerCallback10_AppDomainCreationFinished(This,appDomainId,hrStatus) \ ( (This)->lpVtbl -> AppDomainCreationFinished(This,appDomainId,hrStatus) ) #define ICorProfilerCallback10_AppDomainShutdownStarted(This,appDomainId) \ ( (This)->lpVtbl -> AppDomainShutdownStarted(This,appDomainId) ) #define ICorProfilerCallback10_AppDomainShutdownFinished(This,appDomainId,hrStatus) \ ( (This)->lpVtbl -> AppDomainShutdownFinished(This,appDomainId,hrStatus) ) #define ICorProfilerCallback10_AssemblyLoadStarted(This,assemblyId) \ ( (This)->lpVtbl -> AssemblyLoadStarted(This,assemblyId) ) #define ICorProfilerCallback10_AssemblyLoadFinished(This,assemblyId,hrStatus) \ ( (This)->lpVtbl -> AssemblyLoadFinished(This,assemblyId,hrStatus) ) #define ICorProfilerCallback10_AssemblyUnloadStarted(This,assemblyId) \ ( (This)->lpVtbl -> AssemblyUnloadStarted(This,assemblyId) ) #define ICorProfilerCallback10_AssemblyUnloadFinished(This,assemblyId,hrStatus) \ ( (This)->lpVtbl -> AssemblyUnloadFinished(This,assemblyId,hrStatus) ) #define ICorProfilerCallback10_ModuleLoadStarted(This,moduleId) \ ( (This)->lpVtbl -> ModuleLoadStarted(This,moduleId) ) #define ICorProfilerCallback10_ModuleLoadFinished(This,moduleId,hrStatus) \ ( (This)->lpVtbl -> ModuleLoadFinished(This,moduleId,hrStatus) ) #define ICorProfilerCallback10_ModuleUnloadStarted(This,moduleId) \ ( (This)->lpVtbl -> ModuleUnloadStarted(This,moduleId) ) #define ICorProfilerCallback10_ModuleUnloadFinished(This,moduleId,hrStatus) \ ( (This)->lpVtbl -> ModuleUnloadFinished(This,moduleId,hrStatus) ) #define ICorProfilerCallback10_ModuleAttachedToAssembly(This,moduleId,AssemblyId) \ ( (This)->lpVtbl -> ModuleAttachedToAssembly(This,moduleId,AssemblyId) ) #define ICorProfilerCallback10_ClassLoadStarted(This,classId) \ ( (This)->lpVtbl -> ClassLoadStarted(This,classId) ) #define ICorProfilerCallback10_ClassLoadFinished(This,classId,hrStatus) \ ( (This)->lpVtbl -> ClassLoadFinished(This,classId,hrStatus) ) #define ICorProfilerCallback10_ClassUnloadStarted(This,classId) \ ( (This)->lpVtbl -> ClassUnloadStarted(This,classId) ) #define ICorProfilerCallback10_ClassUnloadFinished(This,classId,hrStatus) \ ( (This)->lpVtbl -> ClassUnloadFinished(This,classId,hrStatus) ) #define ICorProfilerCallback10_FunctionUnloadStarted(This,functionId) \ ( (This)->lpVtbl -> FunctionUnloadStarted(This,functionId) ) #define ICorProfilerCallback10_JITCompilationStarted(This,functionId,fIsSafeToBlock) \ ( (This)->lpVtbl -> JITCompilationStarted(This,functionId,fIsSafeToBlock) ) #define ICorProfilerCallback10_JITCompilationFinished(This,functionId,hrStatus,fIsSafeToBlock) \ ( (This)->lpVtbl -> JITCompilationFinished(This,functionId,hrStatus,fIsSafeToBlock) ) #define ICorProfilerCallback10_JITCachedFunctionSearchStarted(This,functionId,pbUseCachedFunction) \ ( (This)->lpVtbl -> JITCachedFunctionSearchStarted(This,functionId,pbUseCachedFunction) ) #define ICorProfilerCallback10_JITCachedFunctionSearchFinished(This,functionId,result) \ ( (This)->lpVtbl -> JITCachedFunctionSearchFinished(This,functionId,result) ) #define ICorProfilerCallback10_JITFunctionPitched(This,functionId) \ ( (This)->lpVtbl -> JITFunctionPitched(This,functionId) ) #define ICorProfilerCallback10_JITInlining(This,callerId,calleeId,pfShouldInline) \ ( (This)->lpVtbl -> JITInlining(This,callerId,calleeId,pfShouldInline) ) #define ICorProfilerCallback10_ThreadCreated(This,threadId) \ ( (This)->lpVtbl -> ThreadCreated(This,threadId) ) #define ICorProfilerCallback10_ThreadDestroyed(This,threadId) \ ( (This)->lpVtbl -> ThreadDestroyed(This,threadId) ) #define ICorProfilerCallback10_ThreadAssignedToOSThread(This,managedThreadId,osThreadId) \ ( (This)->lpVtbl -> ThreadAssignedToOSThread(This,managedThreadId,osThreadId) ) #define ICorProfilerCallback10_RemotingClientInvocationStarted(This) \ ( (This)->lpVtbl -> RemotingClientInvocationStarted(This) ) #define ICorProfilerCallback10_RemotingClientSendingMessage(This,pCookie,fIsAsync) \ ( (This)->lpVtbl -> RemotingClientSendingMessage(This,pCookie,fIsAsync) ) #define ICorProfilerCallback10_RemotingClientReceivingReply(This,pCookie,fIsAsync) \ ( (This)->lpVtbl -> RemotingClientReceivingReply(This,pCookie,fIsAsync) ) #define ICorProfilerCallback10_RemotingClientInvocationFinished(This) \ ( (This)->lpVtbl -> RemotingClientInvocationFinished(This) ) #define ICorProfilerCallback10_RemotingServerReceivingMessage(This,pCookie,fIsAsync) \ ( (This)->lpVtbl -> RemotingServerReceivingMessage(This,pCookie,fIsAsync) ) #define ICorProfilerCallback10_RemotingServerInvocationStarted(This) \ ( (This)->lpVtbl -> RemotingServerInvocationStarted(This) ) #define ICorProfilerCallback10_RemotingServerInvocationReturned(This) \ ( (This)->lpVtbl -> RemotingServerInvocationReturned(This) ) #define ICorProfilerCallback10_RemotingServerSendingReply(This,pCookie,fIsAsync) \ ( (This)->lpVtbl -> RemotingServerSendingReply(This,pCookie,fIsAsync) ) #define ICorProfilerCallback10_UnmanagedToManagedTransition(This,functionId,reason) \ ( (This)->lpVtbl -> UnmanagedToManagedTransition(This,functionId,reason) ) #define ICorProfilerCallback10_ManagedToUnmanagedTransition(This,functionId,reason) \ ( (This)->lpVtbl -> ManagedToUnmanagedTransition(This,functionId,reason) ) #define ICorProfilerCallback10_RuntimeSuspendStarted(This,suspendReason) \ ( (This)->lpVtbl -> RuntimeSuspendStarted(This,suspendReason) ) #define ICorProfilerCallback10_RuntimeSuspendFinished(This) \ ( (This)->lpVtbl -> RuntimeSuspendFinished(This) ) #define ICorProfilerCallback10_RuntimeSuspendAborted(This) \ ( (This)->lpVtbl -> RuntimeSuspendAborted(This) ) #define ICorProfilerCallback10_RuntimeResumeStarted(This) \ ( (This)->lpVtbl -> RuntimeResumeStarted(This) ) #define ICorProfilerCallback10_RuntimeResumeFinished(This) \ ( (This)->lpVtbl -> RuntimeResumeFinished(This) ) #define ICorProfilerCallback10_RuntimeThreadSuspended(This,threadId) \ ( (This)->lpVtbl -> RuntimeThreadSuspended(This,threadId) ) #define ICorProfilerCallback10_RuntimeThreadResumed(This,threadId) \ ( (This)->lpVtbl -> RuntimeThreadResumed(This,threadId) ) #define ICorProfilerCallback10_MovedReferences(This,cMovedObjectIDRanges,oldObjectIDRangeStart,newObjectIDRangeStart,cObjectIDRangeLength) \ ( (This)->lpVtbl -> MovedReferences(This,cMovedObjectIDRanges,oldObjectIDRangeStart,newObjectIDRangeStart,cObjectIDRangeLength) ) #define ICorProfilerCallback10_ObjectAllocated(This,objectId,classId) \ ( (This)->lpVtbl -> ObjectAllocated(This,objectId,classId) ) #define ICorProfilerCallback10_ObjectsAllocatedByClass(This,cClassCount,classIds,cObjects) \ ( (This)->lpVtbl -> ObjectsAllocatedByClass(This,cClassCount,classIds,cObjects) ) #define ICorProfilerCallback10_ObjectReferences(This,objectId,classId,cObjectRefs,objectRefIds) \ ( (This)->lpVtbl -> ObjectReferences(This,objectId,classId,cObjectRefs,objectRefIds) ) #define ICorProfilerCallback10_RootReferences(This,cRootRefs,rootRefIds) \ ( (This)->lpVtbl -> RootReferences(This,cRootRefs,rootRefIds) ) #define ICorProfilerCallback10_ExceptionThrown(This,thrownObjectId) \ ( (This)->lpVtbl -> ExceptionThrown(This,thrownObjectId) ) #define ICorProfilerCallback10_ExceptionSearchFunctionEnter(This,functionId) \ ( (This)->lpVtbl -> ExceptionSearchFunctionEnter(This,functionId) ) #define ICorProfilerCallback10_ExceptionSearchFunctionLeave(This) \ ( (This)->lpVtbl -> ExceptionSearchFunctionLeave(This) ) #define ICorProfilerCallback10_ExceptionSearchFilterEnter(This,functionId) \ ( (This)->lpVtbl -> ExceptionSearchFilterEnter(This,functionId) ) #define ICorProfilerCallback10_ExceptionSearchFilterLeave(This) \ ( (This)->lpVtbl -> ExceptionSearchFilterLeave(This) ) #define ICorProfilerCallback10_ExceptionSearchCatcherFound(This,functionId) \ ( (This)->lpVtbl -> ExceptionSearchCatcherFound(This,functionId) ) #define ICorProfilerCallback10_ExceptionOSHandlerEnter(This,__unused) \ ( (This)->lpVtbl -> ExceptionOSHandlerEnter(This,__unused) ) #define ICorProfilerCallback10_ExceptionOSHandlerLeave(This,__unused) \ ( (This)->lpVtbl -> ExceptionOSHandlerLeave(This,__unused) ) #define ICorProfilerCallback10_ExceptionUnwindFunctionEnter(This,functionId) \ ( (This)->lpVtbl -> ExceptionUnwindFunctionEnter(This,functionId) ) #define ICorProfilerCallback10_ExceptionUnwindFunctionLeave(This) \ ( (This)->lpVtbl -> ExceptionUnwindFunctionLeave(This) ) #define ICorProfilerCallback10_ExceptionUnwindFinallyEnter(This,functionId) \ ( (This)->lpVtbl -> ExceptionUnwindFinallyEnter(This,functionId) ) #define ICorProfilerCallback10_ExceptionUnwindFinallyLeave(This) \ ( (This)->lpVtbl -> ExceptionUnwindFinallyLeave(This) ) #define ICorProfilerCallback10_ExceptionCatcherEnter(This,functionId,objectId) \ ( (This)->lpVtbl -> ExceptionCatcherEnter(This,functionId,objectId) ) #define ICorProfilerCallback10_ExceptionCatcherLeave(This) \ ( (This)->lpVtbl -> ExceptionCatcherLeave(This) ) #define ICorProfilerCallback10_COMClassicVTableCreated(This,wrappedClassId,implementedIID,pVTable,cSlots) \ ( (This)->lpVtbl -> COMClassicVTableCreated(This,wrappedClassId,implementedIID,pVTable,cSlots) ) #define ICorProfilerCallback10_COMClassicVTableDestroyed(This,wrappedClassId,implementedIID,pVTable) \ ( (This)->lpVtbl -> COMClassicVTableDestroyed(This,wrappedClassId,implementedIID,pVTable) ) #define ICorProfilerCallback10_ExceptionCLRCatcherFound(This) \ ( (This)->lpVtbl -> ExceptionCLRCatcherFound(This) ) #define ICorProfilerCallback10_ExceptionCLRCatcherExecute(This) \ ( (This)->lpVtbl -> ExceptionCLRCatcherExecute(This) ) #define ICorProfilerCallback10_ThreadNameChanged(This,threadId,cchName,name) \ ( (This)->lpVtbl -> ThreadNameChanged(This,threadId,cchName,name) ) #define ICorProfilerCallback10_GarbageCollectionStarted(This,cGenerations,generationCollected,reason) \ ( (This)->lpVtbl -> GarbageCollectionStarted(This,cGenerations,generationCollected,reason) ) #define ICorProfilerCallback10_SurvivingReferences(This,cSurvivingObjectIDRanges,objectIDRangeStart,cObjectIDRangeLength) \ ( (This)->lpVtbl -> SurvivingReferences(This,cSurvivingObjectIDRanges,objectIDRangeStart,cObjectIDRangeLength) ) #define ICorProfilerCallback10_GarbageCollectionFinished(This) \ ( (This)->lpVtbl -> GarbageCollectionFinished(This) ) #define ICorProfilerCallback10_FinalizeableObjectQueued(This,finalizerFlags,objectID) \ ( (This)->lpVtbl -> FinalizeableObjectQueued(This,finalizerFlags,objectID) ) #define ICorProfilerCallback10_RootReferences2(This,cRootRefs,rootRefIds,rootKinds,rootFlags,rootIds) \ ( (This)->lpVtbl -> RootReferences2(This,cRootRefs,rootRefIds,rootKinds,rootFlags,rootIds) ) #define ICorProfilerCallback10_HandleCreated(This,handleId,initialObjectId) \ ( (This)->lpVtbl -> HandleCreated(This,handleId,initialObjectId) ) #define ICorProfilerCallback10_HandleDestroyed(This,handleId) \ ( (This)->lpVtbl -> HandleDestroyed(This,handleId) ) #define ICorProfilerCallback10_InitializeForAttach(This,pCorProfilerInfoUnk,pvClientData,cbClientData) \ ( (This)->lpVtbl -> InitializeForAttach(This,pCorProfilerInfoUnk,pvClientData,cbClientData) ) #define ICorProfilerCallback10_ProfilerAttachComplete(This) \ ( (This)->lpVtbl -> ProfilerAttachComplete(This) ) #define ICorProfilerCallback10_ProfilerDetachSucceeded(This) \ ( (This)->lpVtbl -> ProfilerDetachSucceeded(This) ) #define ICorProfilerCallback10_ReJITCompilationStarted(This,functionId,rejitId,fIsSafeToBlock) \ ( (This)->lpVtbl -> ReJITCompilationStarted(This,functionId,rejitId,fIsSafeToBlock) ) #define ICorProfilerCallback10_GetReJITParameters(This,moduleId,methodId,pFunctionControl) \ ( (This)->lpVtbl -> GetReJITParameters(This,moduleId,methodId,pFunctionControl) ) #define ICorProfilerCallback10_ReJITCompilationFinished(This,functionId,rejitId,hrStatus,fIsSafeToBlock) \ ( (This)->lpVtbl -> ReJITCompilationFinished(This,functionId,rejitId,hrStatus,fIsSafeToBlock) ) #define ICorProfilerCallback10_ReJITError(This,moduleId,methodId,functionId,hrStatus) \ ( (This)->lpVtbl -> ReJITError(This,moduleId,methodId,functionId,hrStatus) ) #define ICorProfilerCallback10_MovedReferences2(This,cMovedObjectIDRanges,oldObjectIDRangeStart,newObjectIDRangeStart,cObjectIDRangeLength) \ ( (This)->lpVtbl -> MovedReferences2(This,cMovedObjectIDRanges,oldObjectIDRangeStart,newObjectIDRangeStart,cObjectIDRangeLength) ) #define ICorProfilerCallback10_SurvivingReferences2(This,cSurvivingObjectIDRanges,objectIDRangeStart,cObjectIDRangeLength) \ ( (This)->lpVtbl -> SurvivingReferences2(This,cSurvivingObjectIDRanges,objectIDRangeStart,cObjectIDRangeLength) ) #define ICorProfilerCallback10_ConditionalWeakTableElementReferences(This,cRootRefs,keyRefIds,valueRefIds,rootIds) \ ( (This)->lpVtbl -> ConditionalWeakTableElementReferences(This,cRootRefs,keyRefIds,valueRefIds,rootIds) ) #define ICorProfilerCallback10_GetAssemblyReferences(This,wszAssemblyPath,pAsmRefProvider) \ ( (This)->lpVtbl -> GetAssemblyReferences(This,wszAssemblyPath,pAsmRefProvider) ) #define ICorProfilerCallback10_ModuleInMemorySymbolsUpdated(This,moduleId) \ ( (This)->lpVtbl -> ModuleInMemorySymbolsUpdated(This,moduleId) ) #define ICorProfilerCallback10_DynamicMethodJITCompilationStarted(This,functionId,fIsSafeToBlock,pILHeader,cbILHeader) \ ( (This)->lpVtbl -> DynamicMethodJITCompilationStarted(This,functionId,fIsSafeToBlock,pILHeader,cbILHeader) ) #define ICorProfilerCallback10_DynamicMethodJITCompilationFinished(This,functionId,hrStatus,fIsSafeToBlock) \ ( (This)->lpVtbl -> DynamicMethodJITCompilationFinished(This,functionId,hrStatus,fIsSafeToBlock) ) #define ICorProfilerCallback10_DynamicMethodUnloaded(This,functionId) \ ( (This)->lpVtbl -> DynamicMethodUnloaded(This,functionId) ) #define ICorProfilerCallback10_EventPipeEventDelivered(This,provider,eventId,eventVersion,cbMetadataBlob,metadataBlob,cbEventData,eventData,pActivityId,pRelatedActivityId,eventThread,numStackFrames,stackFrames) \ ( (This)->lpVtbl -> EventPipeEventDelivered(This,provider,eventId,eventVersion,cbMetadataBlob,metadataBlob,cbEventData,eventData,pActivityId,pRelatedActivityId,eventThread,numStackFrames,stackFrames) ) #define ICorProfilerCallback10_EventPipeProviderCreated(This,provider) \ ( (This)->lpVtbl -> EventPipeProviderCreated(This,provider) ) #endif /* COBJMACROS */ #endif /* C style interface */ #endif /* __ICorProfilerCallback10_INTERFACE_DEFINED__ */ #ifndef __ICorProfilerCallback11_INTERFACE_DEFINED__ #define __ICorProfilerCallback11_INTERFACE_DEFINED__ /* interface ICorProfilerCallback11 */ /* [local][unique][uuid][object] */ EXTERN_C const IID IID_ICorProfilerCallback11; #if defined(__cplusplus) && !defined(CINTERFACE) MIDL_INTERFACE("42350846-AAED-47F7-B128-FD0C98881CDE") ICorProfilerCallback11 : public ICorProfilerCallback10 { public: virtual HRESULT STDMETHODCALLTYPE LoadAsNotificationOnly( BOOL *pbNotificationOnly) = 0; }; #else /* C style interface */ typedef struct ICorProfilerCallback11Vtbl { BEGIN_INTERFACE HRESULT ( STDMETHODCALLTYPE *QueryInterface )( ICorProfilerCallback11 * This, /* [in] */ REFIID riid, /* [annotation][iid_is][out] */ _COM_Outptr_ void **ppvObject); ULONG ( STDMETHODCALLTYPE *AddRef )( ICorProfilerCallback11 * This); ULONG ( STDMETHODCALLTYPE *Release )( ICorProfilerCallback11 * This); HRESULT ( STDMETHODCALLTYPE *Initialize )( ICorProfilerCallback11 * This, /* [in] */ IUnknown *pICorProfilerInfoUnk); HRESULT ( STDMETHODCALLTYPE *Shutdown )( ICorProfilerCallback11 * This); HRESULT ( STDMETHODCALLTYPE *AppDomainCreationStarted )( ICorProfilerCallback11 * This, /* [in] */ AppDomainID appDomainId); HRESULT ( STDMETHODCALLTYPE *AppDomainCreationFinished )( ICorProfilerCallback11 * This, /* [in] */ AppDomainID appDomainId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *AppDomainShutdownStarted )( ICorProfilerCallback11 * This, /* [in] */ AppDomainID appDomainId); HRESULT ( STDMETHODCALLTYPE *AppDomainShutdownFinished )( ICorProfilerCallback11 * This, /* [in] */ AppDomainID appDomainId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *AssemblyLoadStarted )( ICorProfilerCallback11 * This, /* [in] */ AssemblyID assemblyId); HRESULT ( STDMETHODCALLTYPE *AssemblyLoadFinished )( ICorProfilerCallback11 * This, /* [in] */ AssemblyID assemblyId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *AssemblyUnloadStarted )( ICorProfilerCallback11 * This, /* [in] */ AssemblyID assemblyId); HRESULT ( STDMETHODCALLTYPE *AssemblyUnloadFinished )( ICorProfilerCallback11 * This, /* [in] */ AssemblyID assemblyId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *ModuleLoadStarted )( ICorProfilerCallback11 * This, /* [in] */ ModuleID moduleId); HRESULT ( STDMETHODCALLTYPE *ModuleLoadFinished )( ICorProfilerCallback11 * This, /* [in] */ ModuleID moduleId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *ModuleUnloadStarted )( ICorProfilerCallback11 * This, /* [in] */ ModuleID moduleId); HRESULT ( STDMETHODCALLTYPE *ModuleUnloadFinished )( ICorProfilerCallback11 * This, /* [in] */ ModuleID moduleId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *ModuleAttachedToAssembly )( ICorProfilerCallback11 * This, /* [in] */ ModuleID moduleId, /* [in] */ AssemblyID AssemblyId); HRESULT ( STDMETHODCALLTYPE *ClassLoadStarted )( ICorProfilerCallback11 * This, /* [in] */ ClassID classId); HRESULT ( STDMETHODCALLTYPE *ClassLoadFinished )( ICorProfilerCallback11 * This, /* [in] */ ClassID classId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *ClassUnloadStarted )( ICorProfilerCallback11 * This, /* [in] */ ClassID classId); HRESULT ( STDMETHODCALLTYPE *ClassUnloadFinished )( ICorProfilerCallback11 * This, /* [in] */ ClassID classId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *FunctionUnloadStarted )( ICorProfilerCallback11 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *JITCompilationStarted )( ICorProfilerCallback11 * This, /* [in] */ FunctionID functionId, /* [in] */ BOOL fIsSafeToBlock); HRESULT ( STDMETHODCALLTYPE *JITCompilationFinished )( ICorProfilerCallback11 * This, /* [in] */ FunctionID functionId, /* [in] */ HRESULT hrStatus, /* [in] */ BOOL fIsSafeToBlock); HRESULT ( STDMETHODCALLTYPE *JITCachedFunctionSearchStarted )( ICorProfilerCallback11 * This, /* [in] */ FunctionID functionId, /* [out] */ BOOL *pbUseCachedFunction); HRESULT ( STDMETHODCALLTYPE *JITCachedFunctionSearchFinished )( ICorProfilerCallback11 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_JIT_CACHE result); HRESULT ( STDMETHODCALLTYPE *JITFunctionPitched )( ICorProfilerCallback11 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *JITInlining )( ICorProfilerCallback11 * This, /* [in] */ FunctionID callerId, /* [in] */ FunctionID calleeId, /* [out] */ BOOL *pfShouldInline); HRESULT ( STDMETHODCALLTYPE *ThreadCreated )( ICorProfilerCallback11 * This, /* [in] */ ThreadID threadId); HRESULT ( STDMETHODCALLTYPE *ThreadDestroyed )( ICorProfilerCallback11 * This, /* [in] */ ThreadID threadId); HRESULT ( STDMETHODCALLTYPE *ThreadAssignedToOSThread )( ICorProfilerCallback11 * This, /* [in] */ ThreadID managedThreadId, /* [in] */ DWORD osThreadId); HRESULT ( STDMETHODCALLTYPE *RemotingClientInvocationStarted )( ICorProfilerCallback11 * This); HRESULT ( STDMETHODCALLTYPE *RemotingClientSendingMessage )( ICorProfilerCallback11 * This, /* [in] */ GUID *pCookie, /* [in] */ BOOL fIsAsync); HRESULT ( STDMETHODCALLTYPE *RemotingClientReceivingReply )( ICorProfilerCallback11 * This, /* [in] */ GUID *pCookie, /* [in] */ BOOL fIsAsync); HRESULT ( STDMETHODCALLTYPE *RemotingClientInvocationFinished )( ICorProfilerCallback11 * This); HRESULT ( STDMETHODCALLTYPE *RemotingServerReceivingMessage )( ICorProfilerCallback11 * This, /* [in] */ GUID *pCookie, /* [in] */ BOOL fIsAsync); HRESULT ( STDMETHODCALLTYPE *RemotingServerInvocationStarted )( ICorProfilerCallback11 * This); HRESULT ( STDMETHODCALLTYPE *RemotingServerInvocationReturned )( ICorProfilerCallback11 * This); HRESULT ( STDMETHODCALLTYPE *RemotingServerSendingReply )( ICorProfilerCallback11 * This, /* [in] */ GUID *pCookie, /* [in] */ BOOL fIsAsync); HRESULT ( STDMETHODCALLTYPE *UnmanagedToManagedTransition )( ICorProfilerCallback11 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_TRANSITION_REASON reason); HRESULT ( STDMETHODCALLTYPE *ManagedToUnmanagedTransition )( ICorProfilerCallback11 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_TRANSITION_REASON reason); HRESULT ( STDMETHODCALLTYPE *RuntimeSuspendStarted )( ICorProfilerCallback11 * This, /* [in] */ COR_PRF_SUSPEND_REASON suspendReason); HRESULT ( STDMETHODCALLTYPE *RuntimeSuspendFinished )( ICorProfilerCallback11 * This); HRESULT ( STDMETHODCALLTYPE *RuntimeSuspendAborted )( ICorProfilerCallback11 * This); HRESULT ( STDMETHODCALLTYPE *RuntimeResumeStarted )( ICorProfilerCallback11 * This); HRESULT ( STDMETHODCALLTYPE *RuntimeResumeFinished )( ICorProfilerCallback11 * This); HRESULT ( STDMETHODCALLTYPE *RuntimeThreadSuspended )( ICorProfilerCallback11 * This, /* [in] */ ThreadID threadId); HRESULT ( STDMETHODCALLTYPE *RuntimeThreadResumed )( ICorProfilerCallback11 * This, /* [in] */ ThreadID threadId); HRESULT ( STDMETHODCALLTYPE *MovedReferences )( ICorProfilerCallback11 * This, /* [in] */ ULONG cMovedObjectIDRanges, /* [size_is][in] */ ObjectID oldObjectIDRangeStart[ ], /* [size_is][in] */ ObjectID newObjectIDRangeStart[ ], /* [size_is][in] */ ULONG cObjectIDRangeLength[ ]); HRESULT ( STDMETHODCALLTYPE *ObjectAllocated )( ICorProfilerCallback11 * This, /* [in] */ ObjectID objectId, /* [in] */ ClassID classId); HRESULT ( STDMETHODCALLTYPE *ObjectsAllocatedByClass )( ICorProfilerCallback11 * This, /* [in] */ ULONG cClassCount, /* [size_is][in] */ ClassID classIds[ ], /* [size_is][in] */ ULONG cObjects[ ]); HRESULT ( STDMETHODCALLTYPE *ObjectReferences )( ICorProfilerCallback11 * This, /* [in] */ ObjectID objectId, /* [in] */ ClassID classId, /* [in] */ ULONG cObjectRefs, /* [size_is][in] */ ObjectID objectRefIds[ ]); HRESULT ( STDMETHODCALLTYPE *RootReferences )( ICorProfilerCallback11 * This, /* [in] */ ULONG cRootRefs, /* [size_is][in] */ ObjectID rootRefIds[ ]); HRESULT ( STDMETHODCALLTYPE *ExceptionThrown )( ICorProfilerCallback11 * This, /* [in] */ ObjectID thrownObjectId); HRESULT ( STDMETHODCALLTYPE *ExceptionSearchFunctionEnter )( ICorProfilerCallback11 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ExceptionSearchFunctionLeave )( ICorProfilerCallback11 * This); HRESULT ( STDMETHODCALLTYPE *ExceptionSearchFilterEnter )( ICorProfilerCallback11 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ExceptionSearchFilterLeave )( ICorProfilerCallback11 * This); HRESULT ( STDMETHODCALLTYPE *ExceptionSearchCatcherFound )( ICorProfilerCallback11 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ExceptionOSHandlerEnter )( ICorProfilerCallback11 * This, /* [in] */ UINT_PTR __unused); HRESULT ( STDMETHODCALLTYPE *ExceptionOSHandlerLeave )( ICorProfilerCallback11 * This, /* [in] */ UINT_PTR __unused); HRESULT ( STDMETHODCALLTYPE *ExceptionUnwindFunctionEnter )( ICorProfilerCallback11 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ExceptionUnwindFunctionLeave )( ICorProfilerCallback11 * This); HRESULT ( STDMETHODCALLTYPE *ExceptionUnwindFinallyEnter )( ICorProfilerCallback11 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ExceptionUnwindFinallyLeave )( ICorProfilerCallback11 * This); HRESULT ( STDMETHODCALLTYPE *ExceptionCatcherEnter )( ICorProfilerCallback11 * This, /* [in] */ FunctionID functionId, /* [in] */ ObjectID objectId); HRESULT ( STDMETHODCALLTYPE *ExceptionCatcherLeave )( ICorProfilerCallback11 * This); HRESULT ( STDMETHODCALLTYPE *COMClassicVTableCreated )( ICorProfilerCallback11 * This, /* [in] */ ClassID wrappedClassId, /* [in] */ REFGUID implementedIID, /* [in] */ void *pVTable, /* [in] */ ULONG cSlots); HRESULT ( STDMETHODCALLTYPE *COMClassicVTableDestroyed )( ICorProfilerCallback11 * This, /* [in] */ ClassID wrappedClassId, /* [in] */ REFGUID implementedIID, /* [in] */ void *pVTable); HRESULT ( STDMETHODCALLTYPE *ExceptionCLRCatcherFound )( ICorProfilerCallback11 * This); HRESULT ( STDMETHODCALLTYPE *ExceptionCLRCatcherExecute )( ICorProfilerCallback11 * This); HRESULT ( STDMETHODCALLTYPE *ThreadNameChanged )( ICorProfilerCallback11 * This, /* [in] */ ThreadID threadId, /* [in] */ ULONG cchName, /* [annotation][in] */ _In_reads_opt_(cchName) WCHAR name[ ]); HRESULT ( STDMETHODCALLTYPE *GarbageCollectionStarted )( ICorProfilerCallback11 * This, /* [in] */ int cGenerations, /* [size_is][in] */ BOOL generationCollected[ ], /* [in] */ COR_PRF_GC_REASON reason); HRESULT ( STDMETHODCALLTYPE *SurvivingReferences )( ICorProfilerCallback11 * This, /* [in] */ ULONG cSurvivingObjectIDRanges, /* [size_is][in] */ ObjectID objectIDRangeStart[ ], /* [size_is][in] */ ULONG cObjectIDRangeLength[ ]); HRESULT ( STDMETHODCALLTYPE *GarbageCollectionFinished )( ICorProfilerCallback11 * This); HRESULT ( STDMETHODCALLTYPE *FinalizeableObjectQueued )( ICorProfilerCallback11 * This, /* [in] */ DWORD finalizerFlags, /* [in] */ ObjectID objectID); HRESULT ( STDMETHODCALLTYPE *RootReferences2 )( ICorProfilerCallback11 * This, /* [in] */ ULONG cRootRefs, /* [size_is][in] */ ObjectID rootRefIds[ ], /* [size_is][in] */ COR_PRF_GC_ROOT_KIND rootKinds[ ], /* [size_is][in] */ COR_PRF_GC_ROOT_FLAGS rootFlags[ ], /* [size_is][in] */ UINT_PTR rootIds[ ]); HRESULT ( STDMETHODCALLTYPE *HandleCreated )( ICorProfilerCallback11 * This, /* [in] */ GCHandleID handleId, /* [in] */ ObjectID initialObjectId); HRESULT ( STDMETHODCALLTYPE *HandleDestroyed )( ICorProfilerCallback11 * This, /* [in] */ GCHandleID handleId); HRESULT ( STDMETHODCALLTYPE *InitializeForAttach )( ICorProfilerCallback11 * This, /* [in] */ IUnknown *pCorProfilerInfoUnk, /* [in] */ void *pvClientData, /* [in] */ UINT cbClientData); HRESULT ( STDMETHODCALLTYPE *ProfilerAttachComplete )( ICorProfilerCallback11 * This); HRESULT ( STDMETHODCALLTYPE *ProfilerDetachSucceeded )( ICorProfilerCallback11 * This); HRESULT ( STDMETHODCALLTYPE *ReJITCompilationStarted )( ICorProfilerCallback11 * This, /* [in] */ FunctionID functionId, /* [in] */ ReJITID rejitId, /* [in] */ BOOL fIsSafeToBlock); HRESULT ( STDMETHODCALLTYPE *GetReJITParameters )( ICorProfilerCallback11 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdMethodDef methodId, /* [in] */ ICorProfilerFunctionControl *pFunctionControl); HRESULT ( STDMETHODCALLTYPE *ReJITCompilationFinished )( ICorProfilerCallback11 * This, /* [in] */ FunctionID functionId, /* [in] */ ReJITID rejitId, /* [in] */ HRESULT hrStatus, /* [in] */ BOOL fIsSafeToBlock); HRESULT ( STDMETHODCALLTYPE *ReJITError )( ICorProfilerCallback11 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdMethodDef methodId, /* [in] */ FunctionID functionId, /* [in] */ HRESULT hrStatus); HRESULT ( STDMETHODCALLTYPE *MovedReferences2 )( ICorProfilerCallback11 * This, /* [in] */ ULONG cMovedObjectIDRanges, /* [size_is][in] */ ObjectID oldObjectIDRangeStart[ ], /* [size_is][in] */ ObjectID newObjectIDRangeStart[ ], /* [size_is][in] */ SIZE_T cObjectIDRangeLength[ ]); HRESULT ( STDMETHODCALLTYPE *SurvivingReferences2 )( ICorProfilerCallback11 * This, /* [in] */ ULONG cSurvivingObjectIDRanges, /* [size_is][in] */ ObjectID objectIDRangeStart[ ], /* [size_is][in] */ SIZE_T cObjectIDRangeLength[ ]); HRESULT ( STDMETHODCALLTYPE *ConditionalWeakTableElementReferences )( ICorProfilerCallback11 * This, /* [in] */ ULONG cRootRefs, /* [size_is][in] */ ObjectID keyRefIds[ ], /* [size_is][in] */ ObjectID valueRefIds[ ], /* [size_is][in] */ GCHandleID rootIds[ ]); HRESULT ( STDMETHODCALLTYPE *GetAssemblyReferences )( ICorProfilerCallback11 * This, /* [string][in] */ const WCHAR *wszAssemblyPath, /* [in] */ ICorProfilerAssemblyReferenceProvider *pAsmRefProvider); HRESULT ( STDMETHODCALLTYPE *ModuleInMemorySymbolsUpdated )( ICorProfilerCallback11 * This, ModuleID moduleId); HRESULT ( STDMETHODCALLTYPE *DynamicMethodJITCompilationStarted )( ICorProfilerCallback11 * This, /* [in] */ FunctionID functionId, /* [in] */ BOOL fIsSafeToBlock, /* [in] */ LPCBYTE pILHeader, /* [in] */ ULONG cbILHeader); HRESULT ( STDMETHODCALLTYPE *DynamicMethodJITCompilationFinished )( ICorProfilerCallback11 * This, /* [in] */ FunctionID functionId, /* [in] */ HRESULT hrStatus, /* [in] */ BOOL fIsSafeToBlock); HRESULT ( STDMETHODCALLTYPE *DynamicMethodUnloaded )( ICorProfilerCallback11 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *EventPipeEventDelivered )( ICorProfilerCallback11 * This, /* [in] */ EVENTPIPE_PROVIDER provider, /* [in] */ DWORD eventId, /* [in] */ DWORD eventVersion, /* [in] */ ULONG cbMetadataBlob, /* [size_is][in] */ LPCBYTE metadataBlob, /* [in] */ ULONG cbEventData, /* [size_is][in] */ LPCBYTE eventData, /* [in] */ LPCGUID pActivityId, /* [in] */ LPCGUID pRelatedActivityId, /* [in] */ ThreadID eventThread, /* [in] */ ULONG numStackFrames, /* [length_is][in] */ UINT_PTR stackFrames[ ]); HRESULT ( STDMETHODCALLTYPE *EventPipeProviderCreated )( ICorProfilerCallback11 * This, /* [in] */ EVENTPIPE_PROVIDER provider); HRESULT ( STDMETHODCALLTYPE *LoadAsNotificationOnly )( ICorProfilerCallback11 * This, BOOL *pbNotificationOnly); END_INTERFACE } ICorProfilerCallback11Vtbl; interface ICorProfilerCallback11 { CONST_VTBL struct ICorProfilerCallback11Vtbl *lpVtbl; }; #ifdef COBJMACROS #define ICorProfilerCallback11_QueryInterface(This,riid,ppvObject) \ ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) ) #define ICorProfilerCallback11_AddRef(This) \ ( (This)->lpVtbl -> AddRef(This) ) #define ICorProfilerCallback11_Release(This) \ ( (This)->lpVtbl -> Release(This) ) #define ICorProfilerCallback11_Initialize(This,pICorProfilerInfoUnk) \ ( (This)->lpVtbl -> Initialize(This,pICorProfilerInfoUnk) ) #define ICorProfilerCallback11_Shutdown(This) \ ( (This)->lpVtbl -> Shutdown(This) ) #define ICorProfilerCallback11_AppDomainCreationStarted(This,appDomainId) \ ( (This)->lpVtbl -> AppDomainCreationStarted(This,appDomainId) ) #define ICorProfilerCallback11_AppDomainCreationFinished(This,appDomainId,hrStatus) \ ( (This)->lpVtbl -> AppDomainCreationFinished(This,appDomainId,hrStatus) ) #define ICorProfilerCallback11_AppDomainShutdownStarted(This,appDomainId) \ ( (This)->lpVtbl -> AppDomainShutdownStarted(This,appDomainId) ) #define ICorProfilerCallback11_AppDomainShutdownFinished(This,appDomainId,hrStatus) \ ( (This)->lpVtbl -> AppDomainShutdownFinished(This,appDomainId,hrStatus) ) #define ICorProfilerCallback11_AssemblyLoadStarted(This,assemblyId) \ ( (This)->lpVtbl -> AssemblyLoadStarted(This,assemblyId) ) #define ICorProfilerCallback11_AssemblyLoadFinished(This,assemblyId,hrStatus) \ ( (This)->lpVtbl -> AssemblyLoadFinished(This,assemblyId,hrStatus) ) #define ICorProfilerCallback11_AssemblyUnloadStarted(This,assemblyId) \ ( (This)->lpVtbl -> AssemblyUnloadStarted(This,assemblyId) ) #define ICorProfilerCallback11_AssemblyUnloadFinished(This,assemblyId,hrStatus) \ ( (This)->lpVtbl -> AssemblyUnloadFinished(This,assemblyId,hrStatus) ) #define ICorProfilerCallback11_ModuleLoadStarted(This,moduleId) \ ( (This)->lpVtbl -> ModuleLoadStarted(This,moduleId) ) #define ICorProfilerCallback11_ModuleLoadFinished(This,moduleId,hrStatus) \ ( (This)->lpVtbl -> ModuleLoadFinished(This,moduleId,hrStatus) ) #define ICorProfilerCallback11_ModuleUnloadStarted(This,moduleId) \ ( (This)->lpVtbl -> ModuleUnloadStarted(This,moduleId) ) #define ICorProfilerCallback11_ModuleUnloadFinished(This,moduleId,hrStatus) \ ( (This)->lpVtbl -> ModuleUnloadFinished(This,moduleId,hrStatus) ) #define ICorProfilerCallback11_ModuleAttachedToAssembly(This,moduleId,AssemblyId) \ ( (This)->lpVtbl -> ModuleAttachedToAssembly(This,moduleId,AssemblyId) ) #define ICorProfilerCallback11_ClassLoadStarted(This,classId) \ ( (This)->lpVtbl -> ClassLoadStarted(This,classId) ) #define ICorProfilerCallback11_ClassLoadFinished(This,classId,hrStatus) \ ( (This)->lpVtbl -> ClassLoadFinished(This,classId,hrStatus) ) #define ICorProfilerCallback11_ClassUnloadStarted(This,classId) \ ( (This)->lpVtbl -> ClassUnloadStarted(This,classId) ) #define ICorProfilerCallback11_ClassUnloadFinished(This,classId,hrStatus) \ ( (This)->lpVtbl -> ClassUnloadFinished(This,classId,hrStatus) ) #define ICorProfilerCallback11_FunctionUnloadStarted(This,functionId) \ ( (This)->lpVtbl -> FunctionUnloadStarted(This,functionId) ) #define ICorProfilerCallback11_JITCompilationStarted(This,functionId,fIsSafeToBlock) \ ( (This)->lpVtbl -> JITCompilationStarted(This,functionId,fIsSafeToBlock) ) #define ICorProfilerCallback11_JITCompilationFinished(This,functionId,hrStatus,fIsSafeToBlock) \ ( (This)->lpVtbl -> JITCompilationFinished(This,functionId,hrStatus,fIsSafeToBlock) ) #define ICorProfilerCallback11_JITCachedFunctionSearchStarted(This,functionId,pbUseCachedFunction) \ ( (This)->lpVtbl -> JITCachedFunctionSearchStarted(This,functionId,pbUseCachedFunction) ) #define ICorProfilerCallback11_JITCachedFunctionSearchFinished(This,functionId,result) \ ( (This)->lpVtbl -> JITCachedFunctionSearchFinished(This,functionId,result) ) #define ICorProfilerCallback11_JITFunctionPitched(This,functionId) \ ( (This)->lpVtbl -> JITFunctionPitched(This,functionId) ) #define ICorProfilerCallback11_JITInlining(This,callerId,calleeId,pfShouldInline) \ ( (This)->lpVtbl -> JITInlining(This,callerId,calleeId,pfShouldInline) ) #define ICorProfilerCallback11_ThreadCreated(This,threadId) \ ( (This)->lpVtbl -> ThreadCreated(This,threadId) ) #define ICorProfilerCallback11_ThreadDestroyed(This,threadId) \ ( (This)->lpVtbl -> ThreadDestroyed(This,threadId) ) #define ICorProfilerCallback11_ThreadAssignedToOSThread(This,managedThreadId,osThreadId) \ ( (This)->lpVtbl -> ThreadAssignedToOSThread(This,managedThreadId,osThreadId) ) #define ICorProfilerCallback11_RemotingClientInvocationStarted(This) \ ( (This)->lpVtbl -> RemotingClientInvocationStarted(This) ) #define ICorProfilerCallback11_RemotingClientSendingMessage(This,pCookie,fIsAsync) \ ( (This)->lpVtbl -> RemotingClientSendingMessage(This,pCookie,fIsAsync) ) #define ICorProfilerCallback11_RemotingClientReceivingReply(This,pCookie,fIsAsync) \ ( (This)->lpVtbl -> RemotingClientReceivingReply(This,pCookie,fIsAsync) ) #define ICorProfilerCallback11_RemotingClientInvocationFinished(This) \ ( (This)->lpVtbl -> RemotingClientInvocationFinished(This) ) #define ICorProfilerCallback11_RemotingServerReceivingMessage(This,pCookie,fIsAsync) \ ( (This)->lpVtbl -> RemotingServerReceivingMessage(This,pCookie,fIsAsync) ) #define ICorProfilerCallback11_RemotingServerInvocationStarted(This) \ ( (This)->lpVtbl -> RemotingServerInvocationStarted(This) ) #define ICorProfilerCallback11_RemotingServerInvocationReturned(This) \ ( (This)->lpVtbl -> RemotingServerInvocationReturned(This) ) #define ICorProfilerCallback11_RemotingServerSendingReply(This,pCookie,fIsAsync) \ ( (This)->lpVtbl -> RemotingServerSendingReply(This,pCookie,fIsAsync) ) #define ICorProfilerCallback11_UnmanagedToManagedTransition(This,functionId,reason) \ ( (This)->lpVtbl -> UnmanagedToManagedTransition(This,functionId,reason) ) #define ICorProfilerCallback11_ManagedToUnmanagedTransition(This,functionId,reason) \ ( (This)->lpVtbl -> ManagedToUnmanagedTransition(This,functionId,reason) ) #define ICorProfilerCallback11_RuntimeSuspendStarted(This,suspendReason) \ ( (This)->lpVtbl -> RuntimeSuspendStarted(This,suspendReason) ) #define ICorProfilerCallback11_RuntimeSuspendFinished(This) \ ( (This)->lpVtbl -> RuntimeSuspendFinished(This) ) #define ICorProfilerCallback11_RuntimeSuspendAborted(This) \ ( (This)->lpVtbl -> RuntimeSuspendAborted(This) ) #define ICorProfilerCallback11_RuntimeResumeStarted(This) \ ( (This)->lpVtbl -> RuntimeResumeStarted(This) ) #define ICorProfilerCallback11_RuntimeResumeFinished(This) \ ( (This)->lpVtbl -> RuntimeResumeFinished(This) ) #define ICorProfilerCallback11_RuntimeThreadSuspended(This,threadId) \ ( (This)->lpVtbl -> RuntimeThreadSuspended(This,threadId) ) #define ICorProfilerCallback11_RuntimeThreadResumed(This,threadId) \ ( (This)->lpVtbl -> RuntimeThreadResumed(This,threadId) ) #define ICorProfilerCallback11_MovedReferences(This,cMovedObjectIDRanges,oldObjectIDRangeStart,newObjectIDRangeStart,cObjectIDRangeLength) \ ( (This)->lpVtbl -> MovedReferences(This,cMovedObjectIDRanges,oldObjectIDRangeStart,newObjectIDRangeStart,cObjectIDRangeLength) ) #define ICorProfilerCallback11_ObjectAllocated(This,objectId,classId) \ ( (This)->lpVtbl -> ObjectAllocated(This,objectId,classId) ) #define ICorProfilerCallback11_ObjectsAllocatedByClass(This,cClassCount,classIds,cObjects) \ ( (This)->lpVtbl -> ObjectsAllocatedByClass(This,cClassCount,classIds,cObjects) ) #define ICorProfilerCallback11_ObjectReferences(This,objectId,classId,cObjectRefs,objectRefIds) \ ( (This)->lpVtbl -> ObjectReferences(This,objectId,classId,cObjectRefs,objectRefIds) ) #define ICorProfilerCallback11_RootReferences(This,cRootRefs,rootRefIds) \ ( (This)->lpVtbl -> RootReferences(This,cRootRefs,rootRefIds) ) #define ICorProfilerCallback11_ExceptionThrown(This,thrownObjectId) \ ( (This)->lpVtbl -> ExceptionThrown(This,thrownObjectId) ) #define ICorProfilerCallback11_ExceptionSearchFunctionEnter(This,functionId) \ ( (This)->lpVtbl -> ExceptionSearchFunctionEnter(This,functionId) ) #define ICorProfilerCallback11_ExceptionSearchFunctionLeave(This) \ ( (This)->lpVtbl -> ExceptionSearchFunctionLeave(This) ) #define ICorProfilerCallback11_ExceptionSearchFilterEnter(This,functionId) \ ( (This)->lpVtbl -> ExceptionSearchFilterEnter(This,functionId) ) #define ICorProfilerCallback11_ExceptionSearchFilterLeave(This) \ ( (This)->lpVtbl -> ExceptionSearchFilterLeave(This) ) #define ICorProfilerCallback11_ExceptionSearchCatcherFound(This,functionId) \ ( (This)->lpVtbl -> ExceptionSearchCatcherFound(This,functionId) ) #define ICorProfilerCallback11_ExceptionOSHandlerEnter(This,__unused) \ ( (This)->lpVtbl -> ExceptionOSHandlerEnter(This,__unused) ) #define ICorProfilerCallback11_ExceptionOSHandlerLeave(This,__unused) \ ( (This)->lpVtbl -> ExceptionOSHandlerLeave(This,__unused) ) #define ICorProfilerCallback11_ExceptionUnwindFunctionEnter(This,functionId) \ ( (This)->lpVtbl -> ExceptionUnwindFunctionEnter(This,functionId) ) #define ICorProfilerCallback11_ExceptionUnwindFunctionLeave(This) \ ( (This)->lpVtbl -> ExceptionUnwindFunctionLeave(This) ) #define ICorProfilerCallback11_ExceptionUnwindFinallyEnter(This,functionId) \ ( (This)->lpVtbl -> ExceptionUnwindFinallyEnter(This,functionId) ) #define ICorProfilerCallback11_ExceptionUnwindFinallyLeave(This) \ ( (This)->lpVtbl -> ExceptionUnwindFinallyLeave(This) ) #define ICorProfilerCallback11_ExceptionCatcherEnter(This,functionId,objectId) \ ( (This)->lpVtbl -> ExceptionCatcherEnter(This,functionId,objectId) ) #define ICorProfilerCallback11_ExceptionCatcherLeave(This) \ ( (This)->lpVtbl -> ExceptionCatcherLeave(This) ) #define ICorProfilerCallback11_COMClassicVTableCreated(This,wrappedClassId,implementedIID,pVTable,cSlots) \ ( (This)->lpVtbl -> COMClassicVTableCreated(This,wrappedClassId,implementedIID,pVTable,cSlots) ) #define ICorProfilerCallback11_COMClassicVTableDestroyed(This,wrappedClassId,implementedIID,pVTable) \ ( (This)->lpVtbl -> COMClassicVTableDestroyed(This,wrappedClassId,implementedIID,pVTable) ) #define ICorProfilerCallback11_ExceptionCLRCatcherFound(This) \ ( (This)->lpVtbl -> ExceptionCLRCatcherFound(This) ) #define ICorProfilerCallback11_ExceptionCLRCatcherExecute(This) \ ( (This)->lpVtbl -> ExceptionCLRCatcherExecute(This) ) #define ICorProfilerCallback11_ThreadNameChanged(This,threadId,cchName,name) \ ( (This)->lpVtbl -> ThreadNameChanged(This,threadId,cchName,name) ) #define ICorProfilerCallback11_GarbageCollectionStarted(This,cGenerations,generationCollected,reason) \ ( (This)->lpVtbl -> GarbageCollectionStarted(This,cGenerations,generationCollected,reason) ) #define ICorProfilerCallback11_SurvivingReferences(This,cSurvivingObjectIDRanges,objectIDRangeStart,cObjectIDRangeLength) \ ( (This)->lpVtbl -> SurvivingReferences(This,cSurvivingObjectIDRanges,objectIDRangeStart,cObjectIDRangeLength) ) #define ICorProfilerCallback11_GarbageCollectionFinished(This) \ ( (This)->lpVtbl -> GarbageCollectionFinished(This) ) #define ICorProfilerCallback11_FinalizeableObjectQueued(This,finalizerFlags,objectID) \ ( (This)->lpVtbl -> FinalizeableObjectQueued(This,finalizerFlags,objectID) ) #define ICorProfilerCallback11_RootReferences2(This,cRootRefs,rootRefIds,rootKinds,rootFlags,rootIds) \ ( (This)->lpVtbl -> RootReferences2(This,cRootRefs,rootRefIds,rootKinds,rootFlags,rootIds) ) #define ICorProfilerCallback11_HandleCreated(This,handleId,initialObjectId) \ ( (This)->lpVtbl -> HandleCreated(This,handleId,initialObjectId) ) #define ICorProfilerCallback11_HandleDestroyed(This,handleId) \ ( (This)->lpVtbl -> HandleDestroyed(This,handleId) ) #define ICorProfilerCallback11_InitializeForAttach(This,pCorProfilerInfoUnk,pvClientData,cbClientData) \ ( (This)->lpVtbl -> InitializeForAttach(This,pCorProfilerInfoUnk,pvClientData,cbClientData) ) #define ICorProfilerCallback11_ProfilerAttachComplete(This) \ ( (This)->lpVtbl -> ProfilerAttachComplete(This) ) #define ICorProfilerCallback11_ProfilerDetachSucceeded(This) \ ( (This)->lpVtbl -> ProfilerDetachSucceeded(This) ) #define ICorProfilerCallback11_ReJITCompilationStarted(This,functionId,rejitId,fIsSafeToBlock) \ ( (This)->lpVtbl -> ReJITCompilationStarted(This,functionId,rejitId,fIsSafeToBlock) ) #define ICorProfilerCallback11_GetReJITParameters(This,moduleId,methodId,pFunctionControl) \ ( (This)->lpVtbl -> GetReJITParameters(This,moduleId,methodId,pFunctionControl) ) #define ICorProfilerCallback11_ReJITCompilationFinished(This,functionId,rejitId,hrStatus,fIsSafeToBlock) \ ( (This)->lpVtbl -> ReJITCompilationFinished(This,functionId,rejitId,hrStatus,fIsSafeToBlock) ) #define ICorProfilerCallback11_ReJITError(This,moduleId,methodId,functionId,hrStatus) \ ( (This)->lpVtbl -> ReJITError(This,moduleId,methodId,functionId,hrStatus) ) #define ICorProfilerCallback11_MovedReferences2(This,cMovedObjectIDRanges,oldObjectIDRangeStart,newObjectIDRangeStart,cObjectIDRangeLength) \ ( (This)->lpVtbl -> MovedReferences2(This,cMovedObjectIDRanges,oldObjectIDRangeStart,newObjectIDRangeStart,cObjectIDRangeLength) ) #define ICorProfilerCallback11_SurvivingReferences2(This,cSurvivingObjectIDRanges,objectIDRangeStart,cObjectIDRangeLength) \ ( (This)->lpVtbl -> SurvivingReferences2(This,cSurvivingObjectIDRanges,objectIDRangeStart,cObjectIDRangeLength) ) #define ICorProfilerCallback11_ConditionalWeakTableElementReferences(This,cRootRefs,keyRefIds,valueRefIds,rootIds) \ ( (This)->lpVtbl -> ConditionalWeakTableElementReferences(This,cRootRefs,keyRefIds,valueRefIds,rootIds) ) #define ICorProfilerCallback11_GetAssemblyReferences(This,wszAssemblyPath,pAsmRefProvider) \ ( (This)->lpVtbl -> GetAssemblyReferences(This,wszAssemblyPath,pAsmRefProvider) ) #define ICorProfilerCallback11_ModuleInMemorySymbolsUpdated(This,moduleId) \ ( (This)->lpVtbl -> ModuleInMemorySymbolsUpdated(This,moduleId) ) #define ICorProfilerCallback11_DynamicMethodJITCompilationStarted(This,functionId,fIsSafeToBlock,pILHeader,cbILHeader) \ ( (This)->lpVtbl -> DynamicMethodJITCompilationStarted(This,functionId,fIsSafeToBlock,pILHeader,cbILHeader) ) #define ICorProfilerCallback11_DynamicMethodJITCompilationFinished(This,functionId,hrStatus,fIsSafeToBlock) \ ( (This)->lpVtbl -> DynamicMethodJITCompilationFinished(This,functionId,hrStatus,fIsSafeToBlock) ) #define ICorProfilerCallback11_DynamicMethodUnloaded(This,functionId) \ ( (This)->lpVtbl -> DynamicMethodUnloaded(This,functionId) ) #define ICorProfilerCallback11_EventPipeEventDelivered(This,provider,eventId,eventVersion,cbMetadataBlob,metadataBlob,cbEventData,eventData,pActivityId,pRelatedActivityId,eventThread,numStackFrames,stackFrames) \ ( (This)->lpVtbl -> EventPipeEventDelivered(This,provider,eventId,eventVersion,cbMetadataBlob,metadataBlob,cbEventData,eventData,pActivityId,pRelatedActivityId,eventThread,numStackFrames,stackFrames) ) #define ICorProfilerCallback11_EventPipeProviderCreated(This,provider) \ ( (This)->lpVtbl -> EventPipeProviderCreated(This,provider) ) #define ICorProfilerCallback11_LoadAsNotificationOnly(This,pbNotificationOnly) \ ( (This)->lpVtbl -> LoadAsNotificationOnly(This,pbNotificationOnly) ) #endif /* COBJMACROS */ #endif /* C style interface */ #endif /* __ICorProfilerCallback11_INTERFACE_DEFINED__ */ /* interface __MIDL_itf_corprof_0000_0011 */ /* [local] */ typedef /* [public] */ enum __MIDL___MIDL_itf_corprof_0000_0011_0001 { COR_PRF_CODEGEN_DISABLE_INLINING = 0x1, COR_PRF_CODEGEN_DISABLE_ALL_OPTIMIZATIONS = 0x2 } COR_PRF_CODEGEN_FLAGS; extern RPC_IF_HANDLE __MIDL_itf_corprof_0000_0011_v0_0_c_ifspec; extern RPC_IF_HANDLE __MIDL_itf_corprof_0000_0011_v0_0_s_ifspec; #ifndef __ICorProfilerInfo_INTERFACE_DEFINED__ #define __ICorProfilerInfo_INTERFACE_DEFINED__ /* interface ICorProfilerInfo */ /* [local][unique][uuid][object] */ EXTERN_C const IID IID_ICorProfilerInfo; #if defined(__cplusplus) && !defined(CINTERFACE) MIDL_INTERFACE("28B5557D-3F3F-48b4-90B2-5F9EEA2F6C48") ICorProfilerInfo : public IUnknown { public: virtual HRESULT STDMETHODCALLTYPE GetClassFromObject( /* [in] */ ObjectID objectId, /* [out] */ ClassID *pClassId) = 0; virtual HRESULT STDMETHODCALLTYPE GetClassFromToken( /* [in] */ ModuleID moduleId, /* [in] */ mdTypeDef typeDef, /* [out] */ ClassID *pClassId) = 0; virtual HRESULT STDMETHODCALLTYPE GetCodeInfo( /* [in] */ FunctionID functionId, /* [out] */ LPCBYTE *pStart, /* [out] */ ULONG *pcSize) = 0; virtual HRESULT STDMETHODCALLTYPE GetEventMask( /* [out] */ DWORD *pdwEvents) = 0; virtual HRESULT STDMETHODCALLTYPE GetFunctionFromIP( /* [in] */ LPCBYTE ip, /* [out] */ FunctionID *pFunctionId) = 0; virtual HRESULT STDMETHODCALLTYPE GetFunctionFromToken( /* [in] */ ModuleID moduleId, /* [in] */ mdToken token, /* [out] */ FunctionID *pFunctionId) = 0; virtual HRESULT STDMETHODCALLTYPE GetHandleFromThread( /* [in] */ ThreadID threadId, /* [out] */ HANDLE *phThread) = 0; virtual HRESULT STDMETHODCALLTYPE GetObjectSize( /* [in] */ ObjectID objectId, /* [out] */ ULONG *pcSize) = 0; virtual HRESULT STDMETHODCALLTYPE IsArrayClass( /* [in] */ ClassID classId, /* [out] */ CorElementType *pBaseElemType, /* [out] */ ClassID *pBaseClassId, /* [out] */ ULONG *pcRank) = 0; virtual HRESULT STDMETHODCALLTYPE GetThreadInfo( /* [in] */ ThreadID threadId, /* [out] */ DWORD *pdwWin32ThreadId) = 0; virtual HRESULT STDMETHODCALLTYPE GetCurrentThreadID( /* [out] */ ThreadID *pThreadId) = 0; virtual HRESULT STDMETHODCALLTYPE GetClassIDInfo( /* [in] */ ClassID classId, /* [out] */ ModuleID *pModuleId, /* [out] */ mdTypeDef *pTypeDefToken) = 0; virtual HRESULT STDMETHODCALLTYPE GetFunctionInfo( /* [in] */ FunctionID functionId, /* [out] */ ClassID *pClassId, /* [out] */ ModuleID *pModuleId, /* [out] */ mdToken *pToken) = 0; virtual HRESULT STDMETHODCALLTYPE SetEventMask( /* [in] */ DWORD dwEvents) = 0; virtual HRESULT STDMETHODCALLTYPE SetEnterLeaveFunctionHooks( /* [in] */ FunctionEnter *pFuncEnter, /* [in] */ FunctionLeave *pFuncLeave, /* [in] */ FunctionTailcall *pFuncTailcall) = 0; virtual HRESULT STDMETHODCALLTYPE SetFunctionIDMapper( /* [in] */ FunctionIDMapper *pFunc) = 0; virtual HRESULT STDMETHODCALLTYPE GetTokenAndMetaDataFromFunction( /* [in] */ FunctionID functionId, /* [in] */ REFIID riid, /* [out] */ IUnknown **ppImport, /* [out] */ mdToken *pToken) = 0; virtual HRESULT STDMETHODCALLTYPE GetModuleInfo( /* [in] */ ModuleID moduleId, /* [out] */ LPCBYTE *ppBaseLoadAddress, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [annotation][out] */ _Out_writes_to_(cchName, *pcchName) WCHAR szName[ ], /* [out] */ AssemblyID *pAssemblyId) = 0; virtual HRESULT STDMETHODCALLTYPE GetModuleMetaData( /* [in] */ ModuleID moduleId, /* [in] */ DWORD dwOpenFlags, /* [in] */ REFIID riid, /* [out] */ IUnknown **ppOut) = 0; virtual HRESULT STDMETHODCALLTYPE GetILFunctionBody( /* [in] */ ModuleID moduleId, /* [in] */ mdMethodDef methodId, /* [out] */ LPCBYTE *ppMethodHeader, /* [out] */ ULONG *pcbMethodSize) = 0; virtual HRESULT STDMETHODCALLTYPE GetILFunctionBodyAllocator( /* [in] */ ModuleID moduleId, /* [out] */ IMethodMalloc **ppMalloc) = 0; virtual HRESULT STDMETHODCALLTYPE SetILFunctionBody( /* [in] */ ModuleID moduleId, /* [in] */ mdMethodDef methodid, /* [in] */ LPCBYTE pbNewILMethodHeader) = 0; virtual HRESULT STDMETHODCALLTYPE GetAppDomainInfo( /* [in] */ AppDomainID appDomainId, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [annotation][out] */ _Out_writes_to_(cchName, *pcchName) WCHAR szName[ ], /* [out] */ ProcessID *pProcessId) = 0; virtual HRESULT STDMETHODCALLTYPE GetAssemblyInfo( /* [in] */ AssemblyID assemblyId, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [annotation][out] */ _Out_writes_to_(cchName, *pcchName) WCHAR szName[ ], /* [out] */ AppDomainID *pAppDomainId, /* [out] */ ModuleID *pModuleId) = 0; virtual HRESULT STDMETHODCALLTYPE SetFunctionReJIT( /* [in] */ FunctionID functionId) = 0; virtual HRESULT STDMETHODCALLTYPE ForceGC( void) = 0; virtual HRESULT STDMETHODCALLTYPE SetILInstrumentedCodeMap( /* [in] */ FunctionID functionId, /* [in] */ BOOL fStartJit, /* [in] */ ULONG cILMapEntries, /* [size_is][in] */ COR_IL_MAP rgILMapEntries[ ]) = 0; virtual HRESULT STDMETHODCALLTYPE GetInprocInspectionInterface( /* [out] */ IUnknown **ppicd) = 0; virtual HRESULT STDMETHODCALLTYPE GetInprocInspectionIThisThread( /* [out] */ IUnknown **ppicd) = 0; virtual HRESULT STDMETHODCALLTYPE GetThreadContext( /* [in] */ ThreadID threadId, /* [out] */ ContextID *pContextId) = 0; virtual HRESULT STDMETHODCALLTYPE BeginInprocDebugging( /* [in] */ BOOL fThisThreadOnly, /* [out] */ DWORD *pdwProfilerContext) = 0; virtual HRESULT STDMETHODCALLTYPE EndInprocDebugging( /* [in] */ DWORD dwProfilerContext) = 0; virtual HRESULT STDMETHODCALLTYPE GetILToNativeMapping( /* [in] */ FunctionID functionId, /* [in] */ ULONG32 cMap, /* [out] */ ULONG32 *pcMap, /* [length_is][size_is][out] */ COR_DEBUG_IL_TO_NATIVE_MAP map[ ]) = 0; }; #else /* C style interface */ typedef struct ICorProfilerInfoVtbl { BEGIN_INTERFACE HRESULT ( STDMETHODCALLTYPE *QueryInterface )( ICorProfilerInfo * This, /* [in] */ REFIID riid, /* [annotation][iid_is][out] */ _COM_Outptr_ void **ppvObject); ULONG ( STDMETHODCALLTYPE *AddRef )( ICorProfilerInfo * This); ULONG ( STDMETHODCALLTYPE *Release )( ICorProfilerInfo * This); HRESULT ( STDMETHODCALLTYPE *GetClassFromObject )( ICorProfilerInfo * This, /* [in] */ ObjectID objectId, /* [out] */ ClassID *pClassId); HRESULT ( STDMETHODCALLTYPE *GetClassFromToken )( ICorProfilerInfo * This, /* [in] */ ModuleID moduleId, /* [in] */ mdTypeDef typeDef, /* [out] */ ClassID *pClassId); HRESULT ( STDMETHODCALLTYPE *GetCodeInfo )( ICorProfilerInfo * This, /* [in] */ FunctionID functionId, /* [out] */ LPCBYTE *pStart, /* [out] */ ULONG *pcSize); HRESULT ( STDMETHODCALLTYPE *GetEventMask )( ICorProfilerInfo * This, /* [out] */ DWORD *pdwEvents); HRESULT ( STDMETHODCALLTYPE *GetFunctionFromIP )( ICorProfilerInfo * This, /* [in] */ LPCBYTE ip, /* [out] */ FunctionID *pFunctionId); HRESULT ( STDMETHODCALLTYPE *GetFunctionFromToken )( ICorProfilerInfo * This, /* [in] */ ModuleID moduleId, /* [in] */ mdToken token, /* [out] */ FunctionID *pFunctionId); HRESULT ( STDMETHODCALLTYPE *GetHandleFromThread )( ICorProfilerInfo * This, /* [in] */ ThreadID threadId, /* [out] */ HANDLE *phThread); HRESULT ( STDMETHODCALLTYPE *GetObjectSize )( ICorProfilerInfo * This, /* [in] */ ObjectID objectId, /* [out] */ ULONG *pcSize); HRESULT ( STDMETHODCALLTYPE *IsArrayClass )( ICorProfilerInfo * This, /* [in] */ ClassID classId, /* [out] */ CorElementType *pBaseElemType, /* [out] */ ClassID *pBaseClassId, /* [out] */ ULONG *pcRank); HRESULT ( STDMETHODCALLTYPE *GetThreadInfo )( ICorProfilerInfo * This, /* [in] */ ThreadID threadId, /* [out] */ DWORD *pdwWin32ThreadId); HRESULT ( STDMETHODCALLTYPE *GetCurrentThreadID )( ICorProfilerInfo * This, /* [out] */ ThreadID *pThreadId); HRESULT ( STDMETHODCALLTYPE *GetClassIDInfo )( ICorProfilerInfo * This, /* [in] */ ClassID classId, /* [out] */ ModuleID *pModuleId, /* [out] */ mdTypeDef *pTypeDefToken); HRESULT ( STDMETHODCALLTYPE *GetFunctionInfo )( ICorProfilerInfo * This, /* [in] */ FunctionID functionId, /* [out] */ ClassID *pClassId, /* [out] */ ModuleID *pModuleId, /* [out] */ mdToken *pToken); HRESULT ( STDMETHODCALLTYPE *SetEventMask )( ICorProfilerInfo * This, /* [in] */ DWORD dwEvents); HRESULT ( STDMETHODCALLTYPE *SetEnterLeaveFunctionHooks )( ICorProfilerInfo * This, /* [in] */ FunctionEnter *pFuncEnter, /* [in] */ FunctionLeave *pFuncLeave, /* [in] */ FunctionTailcall *pFuncTailcall); HRESULT ( STDMETHODCALLTYPE *SetFunctionIDMapper )( ICorProfilerInfo * This, /* [in] */ FunctionIDMapper *pFunc); HRESULT ( STDMETHODCALLTYPE *GetTokenAndMetaDataFromFunction )( ICorProfilerInfo * This, /* [in] */ FunctionID functionId, /* [in] */ REFIID riid, /* [out] */ IUnknown **ppImport, /* [out] */ mdToken *pToken); HRESULT ( STDMETHODCALLTYPE *GetModuleInfo )( ICorProfilerInfo * This, /* [in] */ ModuleID moduleId, /* [out] */ LPCBYTE *ppBaseLoadAddress, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [annotation][out] */ _Out_writes_to_(cchName, *pcchName) WCHAR szName[ ], /* [out] */ AssemblyID *pAssemblyId); HRESULT ( STDMETHODCALLTYPE *GetModuleMetaData )( ICorProfilerInfo * This, /* [in] */ ModuleID moduleId, /* [in] */ DWORD dwOpenFlags, /* [in] */ REFIID riid, /* [out] */ IUnknown **ppOut); HRESULT ( STDMETHODCALLTYPE *GetILFunctionBody )( ICorProfilerInfo * This, /* [in] */ ModuleID moduleId, /* [in] */ mdMethodDef methodId, /* [out] */ LPCBYTE *ppMethodHeader, /* [out] */ ULONG *pcbMethodSize); HRESULT ( STDMETHODCALLTYPE *GetILFunctionBodyAllocator )( ICorProfilerInfo * This, /* [in] */ ModuleID moduleId, /* [out] */ IMethodMalloc **ppMalloc); HRESULT ( STDMETHODCALLTYPE *SetILFunctionBody )( ICorProfilerInfo * This, /* [in] */ ModuleID moduleId, /* [in] */ mdMethodDef methodid, /* [in] */ LPCBYTE pbNewILMethodHeader); HRESULT ( STDMETHODCALLTYPE *GetAppDomainInfo )( ICorProfilerInfo * This, /* [in] */ AppDomainID appDomainId, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [annotation][out] */ _Out_writes_to_(cchName, *pcchName) WCHAR szName[ ], /* [out] */ ProcessID *pProcessId); HRESULT ( STDMETHODCALLTYPE *GetAssemblyInfo )( ICorProfilerInfo * This, /* [in] */ AssemblyID assemblyId, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [annotation][out] */ _Out_writes_to_(cchName, *pcchName) WCHAR szName[ ], /* [out] */ AppDomainID *pAppDomainId, /* [out] */ ModuleID *pModuleId); HRESULT ( STDMETHODCALLTYPE *SetFunctionReJIT )( ICorProfilerInfo * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ForceGC )( ICorProfilerInfo * This); HRESULT ( STDMETHODCALLTYPE *SetILInstrumentedCodeMap )( ICorProfilerInfo * This, /* [in] */ FunctionID functionId, /* [in] */ BOOL fStartJit, /* [in] */ ULONG cILMapEntries, /* [size_is][in] */ COR_IL_MAP rgILMapEntries[ ]); HRESULT ( STDMETHODCALLTYPE *GetInprocInspectionInterface )( ICorProfilerInfo * This, /* [out] */ IUnknown **ppicd); HRESULT ( STDMETHODCALLTYPE *GetInprocInspectionIThisThread )( ICorProfilerInfo * This, /* [out] */ IUnknown **ppicd); HRESULT ( STDMETHODCALLTYPE *GetThreadContext )( ICorProfilerInfo * This, /* [in] */ ThreadID threadId, /* [out] */ ContextID *pContextId); HRESULT ( STDMETHODCALLTYPE *BeginInprocDebugging )( ICorProfilerInfo * This, /* [in] */ BOOL fThisThreadOnly, /* [out] */ DWORD *pdwProfilerContext); HRESULT ( STDMETHODCALLTYPE *EndInprocDebugging )( ICorProfilerInfo * This, /* [in] */ DWORD dwProfilerContext); HRESULT ( STDMETHODCALLTYPE *GetILToNativeMapping )( ICorProfilerInfo * This, /* [in] */ FunctionID functionId, /* [in] */ ULONG32 cMap, /* [out] */ ULONG32 *pcMap, /* [length_is][size_is][out] */ COR_DEBUG_IL_TO_NATIVE_MAP map[ ]); END_INTERFACE } ICorProfilerInfoVtbl; interface ICorProfilerInfo { CONST_VTBL struct ICorProfilerInfoVtbl *lpVtbl; }; #ifdef COBJMACROS #define ICorProfilerInfo_QueryInterface(This,riid,ppvObject) \ ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) ) #define ICorProfilerInfo_AddRef(This) \ ( (This)->lpVtbl -> AddRef(This) ) #define ICorProfilerInfo_Release(This) \ ( (This)->lpVtbl -> Release(This) ) #define ICorProfilerInfo_GetClassFromObject(This,objectId,pClassId) \ ( (This)->lpVtbl -> GetClassFromObject(This,objectId,pClassId) ) #define ICorProfilerInfo_GetClassFromToken(This,moduleId,typeDef,pClassId) \ ( (This)->lpVtbl -> GetClassFromToken(This,moduleId,typeDef,pClassId) ) #define ICorProfilerInfo_GetCodeInfo(This,functionId,pStart,pcSize) \ ( (This)->lpVtbl -> GetCodeInfo(This,functionId,pStart,pcSize) ) #define ICorProfilerInfo_GetEventMask(This,pdwEvents) \ ( (This)->lpVtbl -> GetEventMask(This,pdwEvents) ) #define ICorProfilerInfo_GetFunctionFromIP(This,ip,pFunctionId) \ ( (This)->lpVtbl -> GetFunctionFromIP(This,ip,pFunctionId) ) #define ICorProfilerInfo_GetFunctionFromToken(This,moduleId,token,pFunctionId) \ ( (This)->lpVtbl -> GetFunctionFromToken(This,moduleId,token,pFunctionId) ) #define ICorProfilerInfo_GetHandleFromThread(This,threadId,phThread) \ ( (This)->lpVtbl -> GetHandleFromThread(This,threadId,phThread) ) #define ICorProfilerInfo_GetObjectSize(This,objectId,pcSize) \ ( (This)->lpVtbl -> GetObjectSize(This,objectId,pcSize) ) #define ICorProfilerInfo_IsArrayClass(This,classId,pBaseElemType,pBaseClassId,pcRank) \ ( (This)->lpVtbl -> IsArrayClass(This,classId,pBaseElemType,pBaseClassId,pcRank) ) #define ICorProfilerInfo_GetThreadInfo(This,threadId,pdwWin32ThreadId) \ ( (This)->lpVtbl -> GetThreadInfo(This,threadId,pdwWin32ThreadId) ) #define ICorProfilerInfo_GetCurrentThreadID(This,pThreadId) \ ( (This)->lpVtbl -> GetCurrentThreadID(This,pThreadId) ) #define ICorProfilerInfo_GetClassIDInfo(This,classId,pModuleId,pTypeDefToken) \ ( (This)->lpVtbl -> GetClassIDInfo(This,classId,pModuleId,pTypeDefToken) ) #define ICorProfilerInfo_GetFunctionInfo(This,functionId,pClassId,pModuleId,pToken) \ ( (This)->lpVtbl -> GetFunctionInfo(This,functionId,pClassId,pModuleId,pToken) ) #define ICorProfilerInfo_SetEventMask(This,dwEvents) \ ( (This)->lpVtbl -> SetEventMask(This,dwEvents) ) #define ICorProfilerInfo_SetEnterLeaveFunctionHooks(This,pFuncEnter,pFuncLeave,pFuncTailcall) \ ( (This)->lpVtbl -> SetEnterLeaveFunctionHooks(This,pFuncEnter,pFuncLeave,pFuncTailcall) ) #define ICorProfilerInfo_SetFunctionIDMapper(This,pFunc) \ ( (This)->lpVtbl -> SetFunctionIDMapper(This,pFunc) ) #define ICorProfilerInfo_GetTokenAndMetaDataFromFunction(This,functionId,riid,ppImport,pToken) \ ( (This)->lpVtbl -> GetTokenAndMetaDataFromFunction(This,functionId,riid,ppImport,pToken) ) #define ICorProfilerInfo_GetModuleInfo(This,moduleId,ppBaseLoadAddress,cchName,pcchName,szName,pAssemblyId) \ ( (This)->lpVtbl -> GetModuleInfo(This,moduleId,ppBaseLoadAddress,cchName,pcchName,szName,pAssemblyId) ) #define ICorProfilerInfo_GetModuleMetaData(This,moduleId,dwOpenFlags,riid,ppOut) \ ( (This)->lpVtbl -> GetModuleMetaData(This,moduleId,dwOpenFlags,riid,ppOut) ) #define ICorProfilerInfo_GetILFunctionBody(This,moduleId,methodId,ppMethodHeader,pcbMethodSize) \ ( (This)->lpVtbl -> GetILFunctionBody(This,moduleId,methodId,ppMethodHeader,pcbMethodSize) ) #define ICorProfilerInfo_GetILFunctionBodyAllocator(This,moduleId,ppMalloc) \ ( (This)->lpVtbl -> GetILFunctionBodyAllocator(This,moduleId,ppMalloc) ) #define ICorProfilerInfo_SetILFunctionBody(This,moduleId,methodid,pbNewILMethodHeader) \ ( (This)->lpVtbl -> SetILFunctionBody(This,moduleId,methodid,pbNewILMethodHeader) ) #define ICorProfilerInfo_GetAppDomainInfo(This,appDomainId,cchName,pcchName,szName,pProcessId) \ ( (This)->lpVtbl -> GetAppDomainInfo(This,appDomainId,cchName,pcchName,szName,pProcessId) ) #define ICorProfilerInfo_GetAssemblyInfo(This,assemblyId,cchName,pcchName,szName,pAppDomainId,pModuleId) \ ( (This)->lpVtbl -> GetAssemblyInfo(This,assemblyId,cchName,pcchName,szName,pAppDomainId,pModuleId) ) #define ICorProfilerInfo_SetFunctionReJIT(This,functionId) \ ( (This)->lpVtbl -> SetFunctionReJIT(This,functionId) ) #define ICorProfilerInfo_ForceGC(This) \ ( (This)->lpVtbl -> ForceGC(This) ) #define ICorProfilerInfo_SetILInstrumentedCodeMap(This,functionId,fStartJit,cILMapEntries,rgILMapEntries) \ ( (This)->lpVtbl -> SetILInstrumentedCodeMap(This,functionId,fStartJit,cILMapEntries,rgILMapEntries) ) #define ICorProfilerInfo_GetInprocInspectionInterface(This,ppicd) \ ( (This)->lpVtbl -> GetInprocInspectionInterface(This,ppicd) ) #define ICorProfilerInfo_GetInprocInspectionIThisThread(This,ppicd) \ ( (This)->lpVtbl -> GetInprocInspectionIThisThread(This,ppicd) ) #define ICorProfilerInfo_GetThreadContext(This,threadId,pContextId) \ ( (This)->lpVtbl -> GetThreadContext(This,threadId,pContextId) ) #define ICorProfilerInfo_BeginInprocDebugging(This,fThisThreadOnly,pdwProfilerContext) \ ( (This)->lpVtbl -> BeginInprocDebugging(This,fThisThreadOnly,pdwProfilerContext) ) #define ICorProfilerInfo_EndInprocDebugging(This,dwProfilerContext) \ ( (This)->lpVtbl -> EndInprocDebugging(This,dwProfilerContext) ) #define ICorProfilerInfo_GetILToNativeMapping(This,functionId,cMap,pcMap,map) \ ( (This)->lpVtbl -> GetILToNativeMapping(This,functionId,cMap,pcMap,map) ) #endif /* COBJMACROS */ #endif /* C style interface */ #endif /* __ICorProfilerInfo_INTERFACE_DEFINED__ */ #ifndef __ICorProfilerInfo2_INTERFACE_DEFINED__ #define __ICorProfilerInfo2_INTERFACE_DEFINED__ /* interface ICorProfilerInfo2 */ /* [local][unique][uuid][object] */ EXTERN_C const IID IID_ICorProfilerInfo2; #if defined(__cplusplus) && !defined(CINTERFACE) MIDL_INTERFACE("CC0935CD-A518-487d-B0BB-A93214E65478") ICorProfilerInfo2 : public ICorProfilerInfo { public: virtual HRESULT STDMETHODCALLTYPE DoStackSnapshot( /* [in] */ ThreadID thread, /* [in] */ StackSnapshotCallback *callback, /* [in] */ ULONG32 infoFlags, /* [in] */ void *clientData, /* [size_is][in] */ BYTE context[ ], /* [in] */ ULONG32 contextSize) = 0; virtual HRESULT STDMETHODCALLTYPE SetEnterLeaveFunctionHooks2( /* [in] */ FunctionEnter2 *pFuncEnter, /* [in] */ FunctionLeave2 *pFuncLeave, /* [in] */ FunctionTailcall2 *pFuncTailcall) = 0; virtual HRESULT STDMETHODCALLTYPE GetFunctionInfo2( /* [in] */ FunctionID funcId, /* [in] */ COR_PRF_FRAME_INFO frameInfo, /* [out] */ ClassID *pClassId, /* [out] */ ModuleID *pModuleId, /* [out] */ mdToken *pToken, /* [in] */ ULONG32 cTypeArgs, /* [out] */ ULONG32 *pcTypeArgs, /* [out] */ ClassID typeArgs[ ]) = 0; virtual HRESULT STDMETHODCALLTYPE GetStringLayout( /* [out] */ ULONG *pBufferLengthOffset, /* [out] */ ULONG *pStringLengthOffset, /* [out] */ ULONG *pBufferOffset) = 0; virtual HRESULT STDMETHODCALLTYPE GetClassLayout( /* [in] */ ClassID classID, /* [out][in] */ COR_FIELD_OFFSET rFieldOffset[ ], /* [in] */ ULONG cFieldOffset, /* [out] */ ULONG *pcFieldOffset, /* [out] */ ULONG *pulClassSize) = 0; virtual HRESULT STDMETHODCALLTYPE GetClassIDInfo2( /* [in] */ ClassID classId, /* [out] */ ModuleID *pModuleId, /* [out] */ mdTypeDef *pTypeDefToken, /* [out] */ ClassID *pParentClassId, /* [in] */ ULONG32 cNumTypeArgs, /* [out] */ ULONG32 *pcNumTypeArgs, /* [out] */ ClassID typeArgs[ ]) = 0; virtual HRESULT STDMETHODCALLTYPE GetCodeInfo2( /* [in] */ FunctionID functionID, /* [in] */ ULONG32 cCodeInfos, /* [out] */ ULONG32 *pcCodeInfos, /* [length_is][size_is][out] */ COR_PRF_CODE_INFO codeInfos[ ]) = 0; virtual HRESULT STDMETHODCALLTYPE GetClassFromTokenAndTypeArgs( /* [in] */ ModuleID moduleID, /* [in] */ mdTypeDef typeDef, /* [in] */ ULONG32 cTypeArgs, /* [size_is][in] */ ClassID typeArgs[ ], /* [out] */ ClassID *pClassID) = 0; virtual HRESULT STDMETHODCALLTYPE GetFunctionFromTokenAndTypeArgs( /* [in] */ ModuleID moduleID, /* [in] */ mdMethodDef funcDef, /* [in] */ ClassID classId, /* [in] */ ULONG32 cTypeArgs, /* [size_is][in] */ ClassID typeArgs[ ], /* [out] */ FunctionID *pFunctionID) = 0; virtual HRESULT STDMETHODCALLTYPE EnumModuleFrozenObjects( /* [in] */ ModuleID moduleID, /* [out] */ ICorProfilerObjectEnum **ppEnum) = 0; virtual HRESULT STDMETHODCALLTYPE GetArrayObjectInfo( /* [in] */ ObjectID objectId, /* [in] */ ULONG32 cDimensions, /* [size_is][out] */ ULONG32 pDimensionSizes[ ], /* [size_is][out] */ int pDimensionLowerBounds[ ], /* [out] */ BYTE **ppData) = 0; virtual HRESULT STDMETHODCALLTYPE GetBoxClassLayout( /* [in] */ ClassID classId, /* [out] */ ULONG32 *pBufferOffset) = 0; virtual HRESULT STDMETHODCALLTYPE GetThreadAppDomain( /* [in] */ ThreadID threadId, /* [out] */ AppDomainID *pAppDomainId) = 0; virtual HRESULT STDMETHODCALLTYPE GetRVAStaticAddress( /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [out] */ void **ppAddress) = 0; virtual HRESULT STDMETHODCALLTYPE GetAppDomainStaticAddress( /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [in] */ AppDomainID appDomainId, /* [out] */ void **ppAddress) = 0; virtual HRESULT STDMETHODCALLTYPE GetThreadStaticAddress( /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [in] */ ThreadID threadId, /* [out] */ void **ppAddress) = 0; virtual HRESULT STDMETHODCALLTYPE GetContextStaticAddress( /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [in] */ ContextID contextId, /* [out] */ void **ppAddress) = 0; virtual HRESULT STDMETHODCALLTYPE GetStaticFieldInfo( /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [out] */ COR_PRF_STATIC_TYPE *pFieldInfo) = 0; virtual HRESULT STDMETHODCALLTYPE GetGenerationBounds( /* [in] */ ULONG cObjectRanges, /* [out] */ ULONG *pcObjectRanges, /* [length_is][size_is][out] */ COR_PRF_GC_GENERATION_RANGE ranges[ ]) = 0; virtual HRESULT STDMETHODCALLTYPE GetObjectGeneration( /* [in] */ ObjectID objectId, /* [out] */ COR_PRF_GC_GENERATION_RANGE *range) = 0; virtual HRESULT STDMETHODCALLTYPE GetNotifiedExceptionClauseInfo( /* [out] */ COR_PRF_EX_CLAUSE_INFO *pinfo) = 0; }; #else /* C style interface */ typedef struct ICorProfilerInfo2Vtbl { BEGIN_INTERFACE HRESULT ( STDMETHODCALLTYPE *QueryInterface )( ICorProfilerInfo2 * This, /* [in] */ REFIID riid, /* [annotation][iid_is][out] */ _COM_Outptr_ void **ppvObject); ULONG ( STDMETHODCALLTYPE *AddRef )( ICorProfilerInfo2 * This); ULONG ( STDMETHODCALLTYPE *Release )( ICorProfilerInfo2 * This); HRESULT ( STDMETHODCALLTYPE *GetClassFromObject )( ICorProfilerInfo2 * This, /* [in] */ ObjectID objectId, /* [out] */ ClassID *pClassId); HRESULT ( STDMETHODCALLTYPE *GetClassFromToken )( ICorProfilerInfo2 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdTypeDef typeDef, /* [out] */ ClassID *pClassId); HRESULT ( STDMETHODCALLTYPE *GetCodeInfo )( ICorProfilerInfo2 * This, /* [in] */ FunctionID functionId, /* [out] */ LPCBYTE *pStart, /* [out] */ ULONG *pcSize); HRESULT ( STDMETHODCALLTYPE *GetEventMask )( ICorProfilerInfo2 * This, /* [out] */ DWORD *pdwEvents); HRESULT ( STDMETHODCALLTYPE *GetFunctionFromIP )( ICorProfilerInfo2 * This, /* [in] */ LPCBYTE ip, /* [out] */ FunctionID *pFunctionId); HRESULT ( STDMETHODCALLTYPE *GetFunctionFromToken )( ICorProfilerInfo2 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdToken token, /* [out] */ FunctionID *pFunctionId); HRESULT ( STDMETHODCALLTYPE *GetHandleFromThread )( ICorProfilerInfo2 * This, /* [in] */ ThreadID threadId, /* [out] */ HANDLE *phThread); HRESULT ( STDMETHODCALLTYPE *GetObjectSize )( ICorProfilerInfo2 * This, /* [in] */ ObjectID objectId, /* [out] */ ULONG *pcSize); HRESULT ( STDMETHODCALLTYPE *IsArrayClass )( ICorProfilerInfo2 * This, /* [in] */ ClassID classId, /* [out] */ CorElementType *pBaseElemType, /* [out] */ ClassID *pBaseClassId, /* [out] */ ULONG *pcRank); HRESULT ( STDMETHODCALLTYPE *GetThreadInfo )( ICorProfilerInfo2 * This, /* [in] */ ThreadID threadId, /* [out] */ DWORD *pdwWin32ThreadId); HRESULT ( STDMETHODCALLTYPE *GetCurrentThreadID )( ICorProfilerInfo2 * This, /* [out] */ ThreadID *pThreadId); HRESULT ( STDMETHODCALLTYPE *GetClassIDInfo )( ICorProfilerInfo2 * This, /* [in] */ ClassID classId, /* [out] */ ModuleID *pModuleId, /* [out] */ mdTypeDef *pTypeDefToken); HRESULT ( STDMETHODCALLTYPE *GetFunctionInfo )( ICorProfilerInfo2 * This, /* [in] */ FunctionID functionId, /* [out] */ ClassID *pClassId, /* [out] */ ModuleID *pModuleId, /* [out] */ mdToken *pToken); HRESULT ( STDMETHODCALLTYPE *SetEventMask )( ICorProfilerInfo2 * This, /* [in] */ DWORD dwEvents); HRESULT ( STDMETHODCALLTYPE *SetEnterLeaveFunctionHooks )( ICorProfilerInfo2 * This, /* [in] */ FunctionEnter *pFuncEnter, /* [in] */ FunctionLeave *pFuncLeave, /* [in] */ FunctionTailcall *pFuncTailcall); HRESULT ( STDMETHODCALLTYPE *SetFunctionIDMapper )( ICorProfilerInfo2 * This, /* [in] */ FunctionIDMapper *pFunc); HRESULT ( STDMETHODCALLTYPE *GetTokenAndMetaDataFromFunction )( ICorProfilerInfo2 * This, /* [in] */ FunctionID functionId, /* [in] */ REFIID riid, /* [out] */ IUnknown **ppImport, /* [out] */ mdToken *pToken); HRESULT ( STDMETHODCALLTYPE *GetModuleInfo )( ICorProfilerInfo2 * This, /* [in] */ ModuleID moduleId, /* [out] */ LPCBYTE *ppBaseLoadAddress, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [annotation][out] */ _Out_writes_to_(cchName, *pcchName) WCHAR szName[ ], /* [out] */ AssemblyID *pAssemblyId); HRESULT ( STDMETHODCALLTYPE *GetModuleMetaData )( ICorProfilerInfo2 * This, /* [in] */ ModuleID moduleId, /* [in] */ DWORD dwOpenFlags, /* [in] */ REFIID riid, /* [out] */ IUnknown **ppOut); HRESULT ( STDMETHODCALLTYPE *GetILFunctionBody )( ICorProfilerInfo2 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdMethodDef methodId, /* [out] */ LPCBYTE *ppMethodHeader, /* [out] */ ULONG *pcbMethodSize); HRESULT ( STDMETHODCALLTYPE *GetILFunctionBodyAllocator )( ICorProfilerInfo2 * This, /* [in] */ ModuleID moduleId, /* [out] */ IMethodMalloc **ppMalloc); HRESULT ( STDMETHODCALLTYPE *SetILFunctionBody )( ICorProfilerInfo2 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdMethodDef methodid, /* [in] */ LPCBYTE pbNewILMethodHeader); HRESULT ( STDMETHODCALLTYPE *GetAppDomainInfo )( ICorProfilerInfo2 * This, /* [in] */ AppDomainID appDomainId, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [annotation][out] */ _Out_writes_to_(cchName, *pcchName) WCHAR szName[ ], /* [out] */ ProcessID *pProcessId); HRESULT ( STDMETHODCALLTYPE *GetAssemblyInfo )( ICorProfilerInfo2 * This, /* [in] */ AssemblyID assemblyId, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [annotation][out] */ _Out_writes_to_(cchName, *pcchName) WCHAR szName[ ], /* [out] */ AppDomainID *pAppDomainId, /* [out] */ ModuleID *pModuleId); HRESULT ( STDMETHODCALLTYPE *SetFunctionReJIT )( ICorProfilerInfo2 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ForceGC )( ICorProfilerInfo2 * This); HRESULT ( STDMETHODCALLTYPE *SetILInstrumentedCodeMap )( ICorProfilerInfo2 * This, /* [in] */ FunctionID functionId, /* [in] */ BOOL fStartJit, /* [in] */ ULONG cILMapEntries, /* [size_is][in] */ COR_IL_MAP rgILMapEntries[ ]); HRESULT ( STDMETHODCALLTYPE *GetInprocInspectionInterface )( ICorProfilerInfo2 * This, /* [out] */ IUnknown **ppicd); HRESULT ( STDMETHODCALLTYPE *GetInprocInspectionIThisThread )( ICorProfilerInfo2 * This, /* [out] */ IUnknown **ppicd); HRESULT ( STDMETHODCALLTYPE *GetThreadContext )( ICorProfilerInfo2 * This, /* [in] */ ThreadID threadId, /* [out] */ ContextID *pContextId); HRESULT ( STDMETHODCALLTYPE *BeginInprocDebugging )( ICorProfilerInfo2 * This, /* [in] */ BOOL fThisThreadOnly, /* [out] */ DWORD *pdwProfilerContext); HRESULT ( STDMETHODCALLTYPE *EndInprocDebugging )( ICorProfilerInfo2 * This, /* [in] */ DWORD dwProfilerContext); HRESULT ( STDMETHODCALLTYPE *GetILToNativeMapping )( ICorProfilerInfo2 * This, /* [in] */ FunctionID functionId, /* [in] */ ULONG32 cMap, /* [out] */ ULONG32 *pcMap, /* [length_is][size_is][out] */ COR_DEBUG_IL_TO_NATIVE_MAP map[ ]); HRESULT ( STDMETHODCALLTYPE *DoStackSnapshot )( ICorProfilerInfo2 * This, /* [in] */ ThreadID thread, /* [in] */ StackSnapshotCallback *callback, /* [in] */ ULONG32 infoFlags, /* [in] */ void *clientData, /* [size_is][in] */ BYTE context[ ], /* [in] */ ULONG32 contextSize); HRESULT ( STDMETHODCALLTYPE *SetEnterLeaveFunctionHooks2 )( ICorProfilerInfo2 * This, /* [in] */ FunctionEnter2 *pFuncEnter, /* [in] */ FunctionLeave2 *pFuncLeave, /* [in] */ FunctionTailcall2 *pFuncTailcall); HRESULT ( STDMETHODCALLTYPE *GetFunctionInfo2 )( ICorProfilerInfo2 * This, /* [in] */ FunctionID funcId, /* [in] */ COR_PRF_FRAME_INFO frameInfo, /* [out] */ ClassID *pClassId, /* [out] */ ModuleID *pModuleId, /* [out] */ mdToken *pToken, /* [in] */ ULONG32 cTypeArgs, /* [out] */ ULONG32 *pcTypeArgs, /* [out] */ ClassID typeArgs[ ]); HRESULT ( STDMETHODCALLTYPE *GetStringLayout )( ICorProfilerInfo2 * This, /* [out] */ ULONG *pBufferLengthOffset, /* [out] */ ULONG *pStringLengthOffset, /* [out] */ ULONG *pBufferOffset); HRESULT ( STDMETHODCALLTYPE *GetClassLayout )( ICorProfilerInfo2 * This, /* [in] */ ClassID classID, /* [out][in] */ COR_FIELD_OFFSET rFieldOffset[ ], /* [in] */ ULONG cFieldOffset, /* [out] */ ULONG *pcFieldOffset, /* [out] */ ULONG *pulClassSize); HRESULT ( STDMETHODCALLTYPE *GetClassIDInfo2 )( ICorProfilerInfo2 * This, /* [in] */ ClassID classId, /* [out] */ ModuleID *pModuleId, /* [out] */ mdTypeDef *pTypeDefToken, /* [out] */ ClassID *pParentClassId, /* [in] */ ULONG32 cNumTypeArgs, /* [out] */ ULONG32 *pcNumTypeArgs, /* [out] */ ClassID typeArgs[ ]); HRESULT ( STDMETHODCALLTYPE *GetCodeInfo2 )( ICorProfilerInfo2 * This, /* [in] */ FunctionID functionID, /* [in] */ ULONG32 cCodeInfos, /* [out] */ ULONG32 *pcCodeInfos, /* [length_is][size_is][out] */ COR_PRF_CODE_INFO codeInfos[ ]); HRESULT ( STDMETHODCALLTYPE *GetClassFromTokenAndTypeArgs )( ICorProfilerInfo2 * This, /* [in] */ ModuleID moduleID, /* [in] */ mdTypeDef typeDef, /* [in] */ ULONG32 cTypeArgs, /* [size_is][in] */ ClassID typeArgs[ ], /* [out] */ ClassID *pClassID); HRESULT ( STDMETHODCALLTYPE *GetFunctionFromTokenAndTypeArgs )( ICorProfilerInfo2 * This, /* [in] */ ModuleID moduleID, /* [in] */ mdMethodDef funcDef, /* [in] */ ClassID classId, /* [in] */ ULONG32 cTypeArgs, /* [size_is][in] */ ClassID typeArgs[ ], /* [out] */ FunctionID *pFunctionID); HRESULT ( STDMETHODCALLTYPE *EnumModuleFrozenObjects )( ICorProfilerInfo2 * This, /* [in] */ ModuleID moduleID, /* [out] */ ICorProfilerObjectEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *GetArrayObjectInfo )( ICorProfilerInfo2 * This, /* [in] */ ObjectID objectId, /* [in] */ ULONG32 cDimensions, /* [size_is][out] */ ULONG32 pDimensionSizes[ ], /* [size_is][out] */ int pDimensionLowerBounds[ ], /* [out] */ BYTE **ppData); HRESULT ( STDMETHODCALLTYPE *GetBoxClassLayout )( ICorProfilerInfo2 * This, /* [in] */ ClassID classId, /* [out] */ ULONG32 *pBufferOffset); HRESULT ( STDMETHODCALLTYPE *GetThreadAppDomain )( ICorProfilerInfo2 * This, /* [in] */ ThreadID threadId, /* [out] */ AppDomainID *pAppDomainId); HRESULT ( STDMETHODCALLTYPE *GetRVAStaticAddress )( ICorProfilerInfo2 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [out] */ void **ppAddress); HRESULT ( STDMETHODCALLTYPE *GetAppDomainStaticAddress )( ICorProfilerInfo2 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [in] */ AppDomainID appDomainId, /* [out] */ void **ppAddress); HRESULT ( STDMETHODCALLTYPE *GetThreadStaticAddress )( ICorProfilerInfo2 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [in] */ ThreadID threadId, /* [out] */ void **ppAddress); HRESULT ( STDMETHODCALLTYPE *GetContextStaticAddress )( ICorProfilerInfo2 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [in] */ ContextID contextId, /* [out] */ void **ppAddress); HRESULT ( STDMETHODCALLTYPE *GetStaticFieldInfo )( ICorProfilerInfo2 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [out] */ COR_PRF_STATIC_TYPE *pFieldInfo); HRESULT ( STDMETHODCALLTYPE *GetGenerationBounds )( ICorProfilerInfo2 * This, /* [in] */ ULONG cObjectRanges, /* [out] */ ULONG *pcObjectRanges, /* [length_is][size_is][out] */ COR_PRF_GC_GENERATION_RANGE ranges[ ]); HRESULT ( STDMETHODCALLTYPE *GetObjectGeneration )( ICorProfilerInfo2 * This, /* [in] */ ObjectID objectId, /* [out] */ COR_PRF_GC_GENERATION_RANGE *range); HRESULT ( STDMETHODCALLTYPE *GetNotifiedExceptionClauseInfo )( ICorProfilerInfo2 * This, /* [out] */ COR_PRF_EX_CLAUSE_INFO *pinfo); END_INTERFACE } ICorProfilerInfo2Vtbl; interface ICorProfilerInfo2 { CONST_VTBL struct ICorProfilerInfo2Vtbl *lpVtbl; }; #ifdef COBJMACROS #define ICorProfilerInfo2_QueryInterface(This,riid,ppvObject) \ ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) ) #define ICorProfilerInfo2_AddRef(This) \ ( (This)->lpVtbl -> AddRef(This) ) #define ICorProfilerInfo2_Release(This) \ ( (This)->lpVtbl -> Release(This) ) #define ICorProfilerInfo2_GetClassFromObject(This,objectId,pClassId) \ ( (This)->lpVtbl -> GetClassFromObject(This,objectId,pClassId) ) #define ICorProfilerInfo2_GetClassFromToken(This,moduleId,typeDef,pClassId) \ ( (This)->lpVtbl -> GetClassFromToken(This,moduleId,typeDef,pClassId) ) #define ICorProfilerInfo2_GetCodeInfo(This,functionId,pStart,pcSize) \ ( (This)->lpVtbl -> GetCodeInfo(This,functionId,pStart,pcSize) ) #define ICorProfilerInfo2_GetEventMask(This,pdwEvents) \ ( (This)->lpVtbl -> GetEventMask(This,pdwEvents) ) #define ICorProfilerInfo2_GetFunctionFromIP(This,ip,pFunctionId) \ ( (This)->lpVtbl -> GetFunctionFromIP(This,ip,pFunctionId) ) #define ICorProfilerInfo2_GetFunctionFromToken(This,moduleId,token,pFunctionId) \ ( (This)->lpVtbl -> GetFunctionFromToken(This,moduleId,token,pFunctionId) ) #define ICorProfilerInfo2_GetHandleFromThread(This,threadId,phThread) \ ( (This)->lpVtbl -> GetHandleFromThread(This,threadId,phThread) ) #define ICorProfilerInfo2_GetObjectSize(This,objectId,pcSize) \ ( (This)->lpVtbl -> GetObjectSize(This,objectId,pcSize) ) #define ICorProfilerInfo2_IsArrayClass(This,classId,pBaseElemType,pBaseClassId,pcRank) \ ( (This)->lpVtbl -> IsArrayClass(This,classId,pBaseElemType,pBaseClassId,pcRank) ) #define ICorProfilerInfo2_GetThreadInfo(This,threadId,pdwWin32ThreadId) \ ( (This)->lpVtbl -> GetThreadInfo(This,threadId,pdwWin32ThreadId) ) #define ICorProfilerInfo2_GetCurrentThreadID(This,pThreadId) \ ( (This)->lpVtbl -> GetCurrentThreadID(This,pThreadId) ) #define ICorProfilerInfo2_GetClassIDInfo(This,classId,pModuleId,pTypeDefToken) \ ( (This)->lpVtbl -> GetClassIDInfo(This,classId,pModuleId,pTypeDefToken) ) #define ICorProfilerInfo2_GetFunctionInfo(This,functionId,pClassId,pModuleId,pToken) \ ( (This)->lpVtbl -> GetFunctionInfo(This,functionId,pClassId,pModuleId,pToken) ) #define ICorProfilerInfo2_SetEventMask(This,dwEvents) \ ( (This)->lpVtbl -> SetEventMask(This,dwEvents) ) #define ICorProfilerInfo2_SetEnterLeaveFunctionHooks(This,pFuncEnter,pFuncLeave,pFuncTailcall) \ ( (This)->lpVtbl -> SetEnterLeaveFunctionHooks(This,pFuncEnter,pFuncLeave,pFuncTailcall) ) #define ICorProfilerInfo2_SetFunctionIDMapper(This,pFunc) \ ( (This)->lpVtbl -> SetFunctionIDMapper(This,pFunc) ) #define ICorProfilerInfo2_GetTokenAndMetaDataFromFunction(This,functionId,riid,ppImport,pToken) \ ( (This)->lpVtbl -> GetTokenAndMetaDataFromFunction(This,functionId,riid,ppImport,pToken) ) #define ICorProfilerInfo2_GetModuleInfo(This,moduleId,ppBaseLoadAddress,cchName,pcchName,szName,pAssemblyId) \ ( (This)->lpVtbl -> GetModuleInfo(This,moduleId,ppBaseLoadAddress,cchName,pcchName,szName,pAssemblyId) ) #define ICorProfilerInfo2_GetModuleMetaData(This,moduleId,dwOpenFlags,riid,ppOut) \ ( (This)->lpVtbl -> GetModuleMetaData(This,moduleId,dwOpenFlags,riid,ppOut) ) #define ICorProfilerInfo2_GetILFunctionBody(This,moduleId,methodId,ppMethodHeader,pcbMethodSize) \ ( (This)->lpVtbl -> GetILFunctionBody(This,moduleId,methodId,ppMethodHeader,pcbMethodSize) ) #define ICorProfilerInfo2_GetILFunctionBodyAllocator(This,moduleId,ppMalloc) \ ( (This)->lpVtbl -> GetILFunctionBodyAllocator(This,moduleId,ppMalloc) ) #define ICorProfilerInfo2_SetILFunctionBody(This,moduleId,methodid,pbNewILMethodHeader) \ ( (This)->lpVtbl -> SetILFunctionBody(This,moduleId,methodid,pbNewILMethodHeader) ) #define ICorProfilerInfo2_GetAppDomainInfo(This,appDomainId,cchName,pcchName,szName,pProcessId) \ ( (This)->lpVtbl -> GetAppDomainInfo(This,appDomainId,cchName,pcchName,szName,pProcessId) ) #define ICorProfilerInfo2_GetAssemblyInfo(This,assemblyId,cchName,pcchName,szName,pAppDomainId,pModuleId) \ ( (This)->lpVtbl -> GetAssemblyInfo(This,assemblyId,cchName,pcchName,szName,pAppDomainId,pModuleId) ) #define ICorProfilerInfo2_SetFunctionReJIT(This,functionId) \ ( (This)->lpVtbl -> SetFunctionReJIT(This,functionId) ) #define ICorProfilerInfo2_ForceGC(This) \ ( (This)->lpVtbl -> ForceGC(This) ) #define ICorProfilerInfo2_SetILInstrumentedCodeMap(This,functionId,fStartJit,cILMapEntries,rgILMapEntries) \ ( (This)->lpVtbl -> SetILInstrumentedCodeMap(This,functionId,fStartJit,cILMapEntries,rgILMapEntries) ) #define ICorProfilerInfo2_GetInprocInspectionInterface(This,ppicd) \ ( (This)->lpVtbl -> GetInprocInspectionInterface(This,ppicd) ) #define ICorProfilerInfo2_GetInprocInspectionIThisThread(This,ppicd) \ ( (This)->lpVtbl -> GetInprocInspectionIThisThread(This,ppicd) ) #define ICorProfilerInfo2_GetThreadContext(This,threadId,pContextId) \ ( (This)->lpVtbl -> GetThreadContext(This,threadId,pContextId) ) #define ICorProfilerInfo2_BeginInprocDebugging(This,fThisThreadOnly,pdwProfilerContext) \ ( (This)->lpVtbl -> BeginInprocDebugging(This,fThisThreadOnly,pdwProfilerContext) ) #define ICorProfilerInfo2_EndInprocDebugging(This,dwProfilerContext) \ ( (This)->lpVtbl -> EndInprocDebugging(This,dwProfilerContext) ) #define ICorProfilerInfo2_GetILToNativeMapping(This,functionId,cMap,pcMap,map) \ ( (This)->lpVtbl -> GetILToNativeMapping(This,functionId,cMap,pcMap,map) ) #define ICorProfilerInfo2_DoStackSnapshot(This,thread,callback,infoFlags,clientData,context,contextSize) \ ( (This)->lpVtbl -> DoStackSnapshot(This,thread,callback,infoFlags,clientData,context,contextSize) ) #define ICorProfilerInfo2_SetEnterLeaveFunctionHooks2(This,pFuncEnter,pFuncLeave,pFuncTailcall) \ ( (This)->lpVtbl -> SetEnterLeaveFunctionHooks2(This,pFuncEnter,pFuncLeave,pFuncTailcall) ) #define ICorProfilerInfo2_GetFunctionInfo2(This,funcId,frameInfo,pClassId,pModuleId,pToken,cTypeArgs,pcTypeArgs,typeArgs) \ ( (This)->lpVtbl -> GetFunctionInfo2(This,funcId,frameInfo,pClassId,pModuleId,pToken,cTypeArgs,pcTypeArgs,typeArgs) ) #define ICorProfilerInfo2_GetStringLayout(This,pBufferLengthOffset,pStringLengthOffset,pBufferOffset) \ ( (This)->lpVtbl -> GetStringLayout(This,pBufferLengthOffset,pStringLengthOffset,pBufferOffset) ) #define ICorProfilerInfo2_GetClassLayout(This,classID,rFieldOffset,cFieldOffset,pcFieldOffset,pulClassSize) \ ( (This)->lpVtbl -> GetClassLayout(This,classID,rFieldOffset,cFieldOffset,pcFieldOffset,pulClassSize) ) #define ICorProfilerInfo2_GetClassIDInfo2(This,classId,pModuleId,pTypeDefToken,pParentClassId,cNumTypeArgs,pcNumTypeArgs,typeArgs) \ ( (This)->lpVtbl -> GetClassIDInfo2(This,classId,pModuleId,pTypeDefToken,pParentClassId,cNumTypeArgs,pcNumTypeArgs,typeArgs) ) #define ICorProfilerInfo2_GetCodeInfo2(This,functionID,cCodeInfos,pcCodeInfos,codeInfos) \ ( (This)->lpVtbl -> GetCodeInfo2(This,functionID,cCodeInfos,pcCodeInfos,codeInfos) ) #define ICorProfilerInfo2_GetClassFromTokenAndTypeArgs(This,moduleID,typeDef,cTypeArgs,typeArgs,pClassID) \ ( (This)->lpVtbl -> GetClassFromTokenAndTypeArgs(This,moduleID,typeDef,cTypeArgs,typeArgs,pClassID) ) #define ICorProfilerInfo2_GetFunctionFromTokenAndTypeArgs(This,moduleID,funcDef,classId,cTypeArgs,typeArgs,pFunctionID) \ ( (This)->lpVtbl -> GetFunctionFromTokenAndTypeArgs(This,moduleID,funcDef,classId,cTypeArgs,typeArgs,pFunctionID) ) #define ICorProfilerInfo2_EnumModuleFrozenObjects(This,moduleID,ppEnum) \ ( (This)->lpVtbl -> EnumModuleFrozenObjects(This,moduleID,ppEnum) ) #define ICorProfilerInfo2_GetArrayObjectInfo(This,objectId,cDimensions,pDimensionSizes,pDimensionLowerBounds,ppData) \ ( (This)->lpVtbl -> GetArrayObjectInfo(This,objectId,cDimensions,pDimensionSizes,pDimensionLowerBounds,ppData) ) #define ICorProfilerInfo2_GetBoxClassLayout(This,classId,pBufferOffset) \ ( (This)->lpVtbl -> GetBoxClassLayout(This,classId,pBufferOffset) ) #define ICorProfilerInfo2_GetThreadAppDomain(This,threadId,pAppDomainId) \ ( (This)->lpVtbl -> GetThreadAppDomain(This,threadId,pAppDomainId) ) #define ICorProfilerInfo2_GetRVAStaticAddress(This,classId,fieldToken,ppAddress) \ ( (This)->lpVtbl -> GetRVAStaticAddress(This,classId,fieldToken,ppAddress) ) #define ICorProfilerInfo2_GetAppDomainStaticAddress(This,classId,fieldToken,appDomainId,ppAddress) \ ( (This)->lpVtbl -> GetAppDomainStaticAddress(This,classId,fieldToken,appDomainId,ppAddress) ) #define ICorProfilerInfo2_GetThreadStaticAddress(This,classId,fieldToken,threadId,ppAddress) \ ( (This)->lpVtbl -> GetThreadStaticAddress(This,classId,fieldToken,threadId,ppAddress) ) #define ICorProfilerInfo2_GetContextStaticAddress(This,classId,fieldToken,contextId,ppAddress) \ ( (This)->lpVtbl -> GetContextStaticAddress(This,classId,fieldToken,contextId,ppAddress) ) #define ICorProfilerInfo2_GetStaticFieldInfo(This,classId,fieldToken,pFieldInfo) \ ( (This)->lpVtbl -> GetStaticFieldInfo(This,classId,fieldToken,pFieldInfo) ) #define ICorProfilerInfo2_GetGenerationBounds(This,cObjectRanges,pcObjectRanges,ranges) \ ( (This)->lpVtbl -> GetGenerationBounds(This,cObjectRanges,pcObjectRanges,ranges) ) #define ICorProfilerInfo2_GetObjectGeneration(This,objectId,range) \ ( (This)->lpVtbl -> GetObjectGeneration(This,objectId,range) ) #define ICorProfilerInfo2_GetNotifiedExceptionClauseInfo(This,pinfo) \ ( (This)->lpVtbl -> GetNotifiedExceptionClauseInfo(This,pinfo) ) #endif /* COBJMACROS */ #endif /* C style interface */ #endif /* __ICorProfilerInfo2_INTERFACE_DEFINED__ */ #ifndef __ICorProfilerInfo3_INTERFACE_DEFINED__ #define __ICorProfilerInfo3_INTERFACE_DEFINED__ /* interface ICorProfilerInfo3 */ /* [local][unique][uuid][object] */ EXTERN_C const IID IID_ICorProfilerInfo3; #if defined(__cplusplus) && !defined(CINTERFACE) MIDL_INTERFACE("B555ED4F-452A-4E54-8B39-B5360BAD32A0") ICorProfilerInfo3 : public ICorProfilerInfo2 { public: virtual HRESULT STDMETHODCALLTYPE EnumJITedFunctions( /* [out] */ ICorProfilerFunctionEnum **ppEnum) = 0; virtual HRESULT STDMETHODCALLTYPE RequestProfilerDetach( /* [in] */ DWORD dwExpectedCompletionMilliseconds) = 0; virtual HRESULT STDMETHODCALLTYPE SetFunctionIDMapper2( /* [in] */ FunctionIDMapper2 *pFunc, /* [in] */ void *clientData) = 0; virtual HRESULT STDMETHODCALLTYPE GetStringLayout2( /* [out] */ ULONG *pStringLengthOffset, /* [out] */ ULONG *pBufferOffset) = 0; virtual HRESULT STDMETHODCALLTYPE SetEnterLeaveFunctionHooks3( /* [in] */ FunctionEnter3 *pFuncEnter3, /* [in] */ FunctionLeave3 *pFuncLeave3, /* [in] */ FunctionTailcall3 *pFuncTailcall3) = 0; virtual HRESULT STDMETHODCALLTYPE SetEnterLeaveFunctionHooks3WithInfo( /* [in] */ FunctionEnter3WithInfo *pFuncEnter3WithInfo, /* [in] */ FunctionLeave3WithInfo *pFuncLeave3WithInfo, /* [in] */ FunctionTailcall3WithInfo *pFuncTailcall3WithInfo) = 0; virtual HRESULT STDMETHODCALLTYPE GetFunctionEnter3Info( /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_ELT_INFO eltInfo, /* [out] */ COR_PRF_FRAME_INFO *pFrameInfo, /* [out][in] */ ULONG *pcbArgumentInfo, /* [size_is][out] */ COR_PRF_FUNCTION_ARGUMENT_INFO *pArgumentInfo) = 0; virtual HRESULT STDMETHODCALLTYPE GetFunctionLeave3Info( /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_ELT_INFO eltInfo, /* [out] */ COR_PRF_FRAME_INFO *pFrameInfo, /* [out] */ COR_PRF_FUNCTION_ARGUMENT_RANGE *pRetvalRange) = 0; virtual HRESULT STDMETHODCALLTYPE GetFunctionTailcall3Info( /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_ELT_INFO eltInfo, /* [out] */ COR_PRF_FRAME_INFO *pFrameInfo) = 0; virtual HRESULT STDMETHODCALLTYPE EnumModules( /* [out] */ ICorProfilerModuleEnum **ppEnum) = 0; virtual HRESULT STDMETHODCALLTYPE GetRuntimeInformation( /* [out] */ USHORT *pClrInstanceId, /* [out] */ COR_PRF_RUNTIME_TYPE *pRuntimeType, /* [out] */ USHORT *pMajorVersion, /* [out] */ USHORT *pMinorVersion, /* [out] */ USHORT *pBuildNumber, /* [out] */ USHORT *pQFEVersion, /* [in] */ ULONG cchVersionString, /* [out] */ ULONG *pcchVersionString, /* [annotation][out] */ _Out_writes_to_(cchVersionString, *pcchVersionString) WCHAR szVersionString[ ]) = 0; virtual HRESULT STDMETHODCALLTYPE GetThreadStaticAddress2( /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [in] */ AppDomainID appDomainId, /* [in] */ ThreadID threadId, /* [out] */ void **ppAddress) = 0; virtual HRESULT STDMETHODCALLTYPE GetAppDomainsContainingModule( /* [in] */ ModuleID moduleId, /* [in] */ ULONG32 cAppDomainIds, /* [out] */ ULONG32 *pcAppDomainIds, /* [length_is][size_is][out] */ AppDomainID appDomainIds[ ]) = 0; virtual HRESULT STDMETHODCALLTYPE GetModuleInfo2( /* [in] */ ModuleID moduleId, /* [out] */ LPCBYTE *ppBaseLoadAddress, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [annotation][out] */ _Out_writes_to_(cchName, *pcchName) WCHAR szName[ ], /* [out] */ AssemblyID *pAssemblyId, /* [out] */ DWORD *pdwModuleFlags) = 0; }; #else /* C style interface */ typedef struct ICorProfilerInfo3Vtbl { BEGIN_INTERFACE HRESULT ( STDMETHODCALLTYPE *QueryInterface )( ICorProfilerInfo3 * This, /* [in] */ REFIID riid, /* [annotation][iid_is][out] */ _COM_Outptr_ void **ppvObject); ULONG ( STDMETHODCALLTYPE *AddRef )( ICorProfilerInfo3 * This); ULONG ( STDMETHODCALLTYPE *Release )( ICorProfilerInfo3 * This); HRESULT ( STDMETHODCALLTYPE *GetClassFromObject )( ICorProfilerInfo3 * This, /* [in] */ ObjectID objectId, /* [out] */ ClassID *pClassId); HRESULT ( STDMETHODCALLTYPE *GetClassFromToken )( ICorProfilerInfo3 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdTypeDef typeDef, /* [out] */ ClassID *pClassId); HRESULT ( STDMETHODCALLTYPE *GetCodeInfo )( ICorProfilerInfo3 * This, /* [in] */ FunctionID functionId, /* [out] */ LPCBYTE *pStart, /* [out] */ ULONG *pcSize); HRESULT ( STDMETHODCALLTYPE *GetEventMask )( ICorProfilerInfo3 * This, /* [out] */ DWORD *pdwEvents); HRESULT ( STDMETHODCALLTYPE *GetFunctionFromIP )( ICorProfilerInfo3 * This, /* [in] */ LPCBYTE ip, /* [out] */ FunctionID *pFunctionId); HRESULT ( STDMETHODCALLTYPE *GetFunctionFromToken )( ICorProfilerInfo3 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdToken token, /* [out] */ FunctionID *pFunctionId); HRESULT ( STDMETHODCALLTYPE *GetHandleFromThread )( ICorProfilerInfo3 * This, /* [in] */ ThreadID threadId, /* [out] */ HANDLE *phThread); HRESULT ( STDMETHODCALLTYPE *GetObjectSize )( ICorProfilerInfo3 * This, /* [in] */ ObjectID objectId, /* [out] */ ULONG *pcSize); HRESULT ( STDMETHODCALLTYPE *IsArrayClass )( ICorProfilerInfo3 * This, /* [in] */ ClassID classId, /* [out] */ CorElementType *pBaseElemType, /* [out] */ ClassID *pBaseClassId, /* [out] */ ULONG *pcRank); HRESULT ( STDMETHODCALLTYPE *GetThreadInfo )( ICorProfilerInfo3 * This, /* [in] */ ThreadID threadId, /* [out] */ DWORD *pdwWin32ThreadId); HRESULT ( STDMETHODCALLTYPE *GetCurrentThreadID )( ICorProfilerInfo3 * This, /* [out] */ ThreadID *pThreadId); HRESULT ( STDMETHODCALLTYPE *GetClassIDInfo )( ICorProfilerInfo3 * This, /* [in] */ ClassID classId, /* [out] */ ModuleID *pModuleId, /* [out] */ mdTypeDef *pTypeDefToken); HRESULT ( STDMETHODCALLTYPE *GetFunctionInfo )( ICorProfilerInfo3 * This, /* [in] */ FunctionID functionId, /* [out] */ ClassID *pClassId, /* [out] */ ModuleID *pModuleId, /* [out] */ mdToken *pToken); HRESULT ( STDMETHODCALLTYPE *SetEventMask )( ICorProfilerInfo3 * This, /* [in] */ DWORD dwEvents); HRESULT ( STDMETHODCALLTYPE *SetEnterLeaveFunctionHooks )( ICorProfilerInfo3 * This, /* [in] */ FunctionEnter *pFuncEnter, /* [in] */ FunctionLeave *pFuncLeave, /* [in] */ FunctionTailcall *pFuncTailcall); HRESULT ( STDMETHODCALLTYPE *SetFunctionIDMapper )( ICorProfilerInfo3 * This, /* [in] */ FunctionIDMapper *pFunc); HRESULT ( STDMETHODCALLTYPE *GetTokenAndMetaDataFromFunction )( ICorProfilerInfo3 * This, /* [in] */ FunctionID functionId, /* [in] */ REFIID riid, /* [out] */ IUnknown **ppImport, /* [out] */ mdToken *pToken); HRESULT ( STDMETHODCALLTYPE *GetModuleInfo )( ICorProfilerInfo3 * This, /* [in] */ ModuleID moduleId, /* [out] */ LPCBYTE *ppBaseLoadAddress, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [annotation][out] */ _Out_writes_to_(cchName, *pcchName) WCHAR szName[ ], /* [out] */ AssemblyID *pAssemblyId); HRESULT ( STDMETHODCALLTYPE *GetModuleMetaData )( ICorProfilerInfo3 * This, /* [in] */ ModuleID moduleId, /* [in] */ DWORD dwOpenFlags, /* [in] */ REFIID riid, /* [out] */ IUnknown **ppOut); HRESULT ( STDMETHODCALLTYPE *GetILFunctionBody )( ICorProfilerInfo3 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdMethodDef methodId, /* [out] */ LPCBYTE *ppMethodHeader, /* [out] */ ULONG *pcbMethodSize); HRESULT ( STDMETHODCALLTYPE *GetILFunctionBodyAllocator )( ICorProfilerInfo3 * This, /* [in] */ ModuleID moduleId, /* [out] */ IMethodMalloc **ppMalloc); HRESULT ( STDMETHODCALLTYPE *SetILFunctionBody )( ICorProfilerInfo3 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdMethodDef methodid, /* [in] */ LPCBYTE pbNewILMethodHeader); HRESULT ( STDMETHODCALLTYPE *GetAppDomainInfo )( ICorProfilerInfo3 * This, /* [in] */ AppDomainID appDomainId, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [annotation][out] */ _Out_writes_to_(cchName, *pcchName) WCHAR szName[ ], /* [out] */ ProcessID *pProcessId); HRESULT ( STDMETHODCALLTYPE *GetAssemblyInfo )( ICorProfilerInfo3 * This, /* [in] */ AssemblyID assemblyId, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [annotation][out] */ _Out_writes_to_(cchName, *pcchName) WCHAR szName[ ], /* [out] */ AppDomainID *pAppDomainId, /* [out] */ ModuleID *pModuleId); HRESULT ( STDMETHODCALLTYPE *SetFunctionReJIT )( ICorProfilerInfo3 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ForceGC )( ICorProfilerInfo3 * This); HRESULT ( STDMETHODCALLTYPE *SetILInstrumentedCodeMap )( ICorProfilerInfo3 * This, /* [in] */ FunctionID functionId, /* [in] */ BOOL fStartJit, /* [in] */ ULONG cILMapEntries, /* [size_is][in] */ COR_IL_MAP rgILMapEntries[ ]); HRESULT ( STDMETHODCALLTYPE *GetInprocInspectionInterface )( ICorProfilerInfo3 * This, /* [out] */ IUnknown **ppicd); HRESULT ( STDMETHODCALLTYPE *GetInprocInspectionIThisThread )( ICorProfilerInfo3 * This, /* [out] */ IUnknown **ppicd); HRESULT ( STDMETHODCALLTYPE *GetThreadContext )( ICorProfilerInfo3 * This, /* [in] */ ThreadID threadId, /* [out] */ ContextID *pContextId); HRESULT ( STDMETHODCALLTYPE *BeginInprocDebugging )( ICorProfilerInfo3 * This, /* [in] */ BOOL fThisThreadOnly, /* [out] */ DWORD *pdwProfilerContext); HRESULT ( STDMETHODCALLTYPE *EndInprocDebugging )( ICorProfilerInfo3 * This, /* [in] */ DWORD dwProfilerContext); HRESULT ( STDMETHODCALLTYPE *GetILToNativeMapping )( ICorProfilerInfo3 * This, /* [in] */ FunctionID functionId, /* [in] */ ULONG32 cMap, /* [out] */ ULONG32 *pcMap, /* [length_is][size_is][out] */ COR_DEBUG_IL_TO_NATIVE_MAP map[ ]); HRESULT ( STDMETHODCALLTYPE *DoStackSnapshot )( ICorProfilerInfo3 * This, /* [in] */ ThreadID thread, /* [in] */ StackSnapshotCallback *callback, /* [in] */ ULONG32 infoFlags, /* [in] */ void *clientData, /* [size_is][in] */ BYTE context[ ], /* [in] */ ULONG32 contextSize); HRESULT ( STDMETHODCALLTYPE *SetEnterLeaveFunctionHooks2 )( ICorProfilerInfo3 * This, /* [in] */ FunctionEnter2 *pFuncEnter, /* [in] */ FunctionLeave2 *pFuncLeave, /* [in] */ FunctionTailcall2 *pFuncTailcall); HRESULT ( STDMETHODCALLTYPE *GetFunctionInfo2 )( ICorProfilerInfo3 * This, /* [in] */ FunctionID funcId, /* [in] */ COR_PRF_FRAME_INFO frameInfo, /* [out] */ ClassID *pClassId, /* [out] */ ModuleID *pModuleId, /* [out] */ mdToken *pToken, /* [in] */ ULONG32 cTypeArgs, /* [out] */ ULONG32 *pcTypeArgs, /* [out] */ ClassID typeArgs[ ]); HRESULT ( STDMETHODCALLTYPE *GetStringLayout )( ICorProfilerInfo3 * This, /* [out] */ ULONG *pBufferLengthOffset, /* [out] */ ULONG *pStringLengthOffset, /* [out] */ ULONG *pBufferOffset); HRESULT ( STDMETHODCALLTYPE *GetClassLayout )( ICorProfilerInfo3 * This, /* [in] */ ClassID classID, /* [out][in] */ COR_FIELD_OFFSET rFieldOffset[ ], /* [in] */ ULONG cFieldOffset, /* [out] */ ULONG *pcFieldOffset, /* [out] */ ULONG *pulClassSize); HRESULT ( STDMETHODCALLTYPE *GetClassIDInfo2 )( ICorProfilerInfo3 * This, /* [in] */ ClassID classId, /* [out] */ ModuleID *pModuleId, /* [out] */ mdTypeDef *pTypeDefToken, /* [out] */ ClassID *pParentClassId, /* [in] */ ULONG32 cNumTypeArgs, /* [out] */ ULONG32 *pcNumTypeArgs, /* [out] */ ClassID typeArgs[ ]); HRESULT ( STDMETHODCALLTYPE *GetCodeInfo2 )( ICorProfilerInfo3 * This, /* [in] */ FunctionID functionID, /* [in] */ ULONG32 cCodeInfos, /* [out] */ ULONG32 *pcCodeInfos, /* [length_is][size_is][out] */ COR_PRF_CODE_INFO codeInfos[ ]); HRESULT ( STDMETHODCALLTYPE *GetClassFromTokenAndTypeArgs )( ICorProfilerInfo3 * This, /* [in] */ ModuleID moduleID, /* [in] */ mdTypeDef typeDef, /* [in] */ ULONG32 cTypeArgs, /* [size_is][in] */ ClassID typeArgs[ ], /* [out] */ ClassID *pClassID); HRESULT ( STDMETHODCALLTYPE *GetFunctionFromTokenAndTypeArgs )( ICorProfilerInfo3 * This, /* [in] */ ModuleID moduleID, /* [in] */ mdMethodDef funcDef, /* [in] */ ClassID classId, /* [in] */ ULONG32 cTypeArgs, /* [size_is][in] */ ClassID typeArgs[ ], /* [out] */ FunctionID *pFunctionID); HRESULT ( STDMETHODCALLTYPE *EnumModuleFrozenObjects )( ICorProfilerInfo3 * This, /* [in] */ ModuleID moduleID, /* [out] */ ICorProfilerObjectEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *GetArrayObjectInfo )( ICorProfilerInfo3 * This, /* [in] */ ObjectID objectId, /* [in] */ ULONG32 cDimensions, /* [size_is][out] */ ULONG32 pDimensionSizes[ ], /* [size_is][out] */ int pDimensionLowerBounds[ ], /* [out] */ BYTE **ppData); HRESULT ( STDMETHODCALLTYPE *GetBoxClassLayout )( ICorProfilerInfo3 * This, /* [in] */ ClassID classId, /* [out] */ ULONG32 *pBufferOffset); HRESULT ( STDMETHODCALLTYPE *GetThreadAppDomain )( ICorProfilerInfo3 * This, /* [in] */ ThreadID threadId, /* [out] */ AppDomainID *pAppDomainId); HRESULT ( STDMETHODCALLTYPE *GetRVAStaticAddress )( ICorProfilerInfo3 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [out] */ void **ppAddress); HRESULT ( STDMETHODCALLTYPE *GetAppDomainStaticAddress )( ICorProfilerInfo3 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [in] */ AppDomainID appDomainId, /* [out] */ void **ppAddress); HRESULT ( STDMETHODCALLTYPE *GetThreadStaticAddress )( ICorProfilerInfo3 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [in] */ ThreadID threadId, /* [out] */ void **ppAddress); HRESULT ( STDMETHODCALLTYPE *GetContextStaticAddress )( ICorProfilerInfo3 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [in] */ ContextID contextId, /* [out] */ void **ppAddress); HRESULT ( STDMETHODCALLTYPE *GetStaticFieldInfo )( ICorProfilerInfo3 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [out] */ COR_PRF_STATIC_TYPE *pFieldInfo); HRESULT ( STDMETHODCALLTYPE *GetGenerationBounds )( ICorProfilerInfo3 * This, /* [in] */ ULONG cObjectRanges, /* [out] */ ULONG *pcObjectRanges, /* [length_is][size_is][out] */ COR_PRF_GC_GENERATION_RANGE ranges[ ]); HRESULT ( STDMETHODCALLTYPE *GetObjectGeneration )( ICorProfilerInfo3 * This, /* [in] */ ObjectID objectId, /* [out] */ COR_PRF_GC_GENERATION_RANGE *range); HRESULT ( STDMETHODCALLTYPE *GetNotifiedExceptionClauseInfo )( ICorProfilerInfo3 * This, /* [out] */ COR_PRF_EX_CLAUSE_INFO *pinfo); HRESULT ( STDMETHODCALLTYPE *EnumJITedFunctions )( ICorProfilerInfo3 * This, /* [out] */ ICorProfilerFunctionEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *RequestProfilerDetach )( ICorProfilerInfo3 * This, /* [in] */ DWORD dwExpectedCompletionMilliseconds); HRESULT ( STDMETHODCALLTYPE *SetFunctionIDMapper2 )( ICorProfilerInfo3 * This, /* [in] */ FunctionIDMapper2 *pFunc, /* [in] */ void *clientData); HRESULT ( STDMETHODCALLTYPE *GetStringLayout2 )( ICorProfilerInfo3 * This, /* [out] */ ULONG *pStringLengthOffset, /* [out] */ ULONG *pBufferOffset); HRESULT ( STDMETHODCALLTYPE *SetEnterLeaveFunctionHooks3 )( ICorProfilerInfo3 * This, /* [in] */ FunctionEnter3 *pFuncEnter3, /* [in] */ FunctionLeave3 *pFuncLeave3, /* [in] */ FunctionTailcall3 *pFuncTailcall3); HRESULT ( STDMETHODCALLTYPE *SetEnterLeaveFunctionHooks3WithInfo )( ICorProfilerInfo3 * This, /* [in] */ FunctionEnter3WithInfo *pFuncEnter3WithInfo, /* [in] */ FunctionLeave3WithInfo *pFuncLeave3WithInfo, /* [in] */ FunctionTailcall3WithInfo *pFuncTailcall3WithInfo); HRESULT ( STDMETHODCALLTYPE *GetFunctionEnter3Info )( ICorProfilerInfo3 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_ELT_INFO eltInfo, /* [out] */ COR_PRF_FRAME_INFO *pFrameInfo, /* [out][in] */ ULONG *pcbArgumentInfo, /* [size_is][out] */ COR_PRF_FUNCTION_ARGUMENT_INFO *pArgumentInfo); HRESULT ( STDMETHODCALLTYPE *GetFunctionLeave3Info )( ICorProfilerInfo3 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_ELT_INFO eltInfo, /* [out] */ COR_PRF_FRAME_INFO *pFrameInfo, /* [out] */ COR_PRF_FUNCTION_ARGUMENT_RANGE *pRetvalRange); HRESULT ( STDMETHODCALLTYPE *GetFunctionTailcall3Info )( ICorProfilerInfo3 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_ELT_INFO eltInfo, /* [out] */ COR_PRF_FRAME_INFO *pFrameInfo); HRESULT ( STDMETHODCALLTYPE *EnumModules )( ICorProfilerInfo3 * This, /* [out] */ ICorProfilerModuleEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *GetRuntimeInformation )( ICorProfilerInfo3 * This, /* [out] */ USHORT *pClrInstanceId, /* [out] */ COR_PRF_RUNTIME_TYPE *pRuntimeType, /* [out] */ USHORT *pMajorVersion, /* [out] */ USHORT *pMinorVersion, /* [out] */ USHORT *pBuildNumber, /* [out] */ USHORT *pQFEVersion, /* [in] */ ULONG cchVersionString, /* [out] */ ULONG *pcchVersionString, /* [annotation][out] */ _Out_writes_to_(cchVersionString, *pcchVersionString) WCHAR szVersionString[ ]); HRESULT ( STDMETHODCALLTYPE *GetThreadStaticAddress2 )( ICorProfilerInfo3 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [in] */ AppDomainID appDomainId, /* [in] */ ThreadID threadId, /* [out] */ void **ppAddress); HRESULT ( STDMETHODCALLTYPE *GetAppDomainsContainingModule )( ICorProfilerInfo3 * This, /* [in] */ ModuleID moduleId, /* [in] */ ULONG32 cAppDomainIds, /* [out] */ ULONG32 *pcAppDomainIds, /* [length_is][size_is][out] */ AppDomainID appDomainIds[ ]); HRESULT ( STDMETHODCALLTYPE *GetModuleInfo2 )( ICorProfilerInfo3 * This, /* [in] */ ModuleID moduleId, /* [out] */ LPCBYTE *ppBaseLoadAddress, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [annotation][out] */ _Out_writes_to_(cchName, *pcchName) WCHAR szName[ ], /* [out] */ AssemblyID *pAssemblyId, /* [out] */ DWORD *pdwModuleFlags); END_INTERFACE } ICorProfilerInfo3Vtbl; interface ICorProfilerInfo3 { CONST_VTBL struct ICorProfilerInfo3Vtbl *lpVtbl; }; #ifdef COBJMACROS #define ICorProfilerInfo3_QueryInterface(This,riid,ppvObject) \ ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) ) #define ICorProfilerInfo3_AddRef(This) \ ( (This)->lpVtbl -> AddRef(This) ) #define ICorProfilerInfo3_Release(This) \ ( (This)->lpVtbl -> Release(This) ) #define ICorProfilerInfo3_GetClassFromObject(This,objectId,pClassId) \ ( (This)->lpVtbl -> GetClassFromObject(This,objectId,pClassId) ) #define ICorProfilerInfo3_GetClassFromToken(This,moduleId,typeDef,pClassId) \ ( (This)->lpVtbl -> GetClassFromToken(This,moduleId,typeDef,pClassId) ) #define ICorProfilerInfo3_GetCodeInfo(This,functionId,pStart,pcSize) \ ( (This)->lpVtbl -> GetCodeInfo(This,functionId,pStart,pcSize) ) #define ICorProfilerInfo3_GetEventMask(This,pdwEvents) \ ( (This)->lpVtbl -> GetEventMask(This,pdwEvents) ) #define ICorProfilerInfo3_GetFunctionFromIP(This,ip,pFunctionId) \ ( (This)->lpVtbl -> GetFunctionFromIP(This,ip,pFunctionId) ) #define ICorProfilerInfo3_GetFunctionFromToken(This,moduleId,token,pFunctionId) \ ( (This)->lpVtbl -> GetFunctionFromToken(This,moduleId,token,pFunctionId) ) #define ICorProfilerInfo3_GetHandleFromThread(This,threadId,phThread) \ ( (This)->lpVtbl -> GetHandleFromThread(This,threadId,phThread) ) #define ICorProfilerInfo3_GetObjectSize(This,objectId,pcSize) \ ( (This)->lpVtbl -> GetObjectSize(This,objectId,pcSize) ) #define ICorProfilerInfo3_IsArrayClass(This,classId,pBaseElemType,pBaseClassId,pcRank) \ ( (This)->lpVtbl -> IsArrayClass(This,classId,pBaseElemType,pBaseClassId,pcRank) ) #define ICorProfilerInfo3_GetThreadInfo(This,threadId,pdwWin32ThreadId) \ ( (This)->lpVtbl -> GetThreadInfo(This,threadId,pdwWin32ThreadId) ) #define ICorProfilerInfo3_GetCurrentThreadID(This,pThreadId) \ ( (This)->lpVtbl -> GetCurrentThreadID(This,pThreadId) ) #define ICorProfilerInfo3_GetClassIDInfo(This,classId,pModuleId,pTypeDefToken) \ ( (This)->lpVtbl -> GetClassIDInfo(This,classId,pModuleId,pTypeDefToken) ) #define ICorProfilerInfo3_GetFunctionInfo(This,functionId,pClassId,pModuleId,pToken) \ ( (This)->lpVtbl -> GetFunctionInfo(This,functionId,pClassId,pModuleId,pToken) ) #define ICorProfilerInfo3_SetEventMask(This,dwEvents) \ ( (This)->lpVtbl -> SetEventMask(This,dwEvents) ) #define ICorProfilerInfo3_SetEnterLeaveFunctionHooks(This,pFuncEnter,pFuncLeave,pFuncTailcall) \ ( (This)->lpVtbl -> SetEnterLeaveFunctionHooks(This,pFuncEnter,pFuncLeave,pFuncTailcall) ) #define ICorProfilerInfo3_SetFunctionIDMapper(This,pFunc) \ ( (This)->lpVtbl -> SetFunctionIDMapper(This,pFunc) ) #define ICorProfilerInfo3_GetTokenAndMetaDataFromFunction(This,functionId,riid,ppImport,pToken) \ ( (This)->lpVtbl -> GetTokenAndMetaDataFromFunction(This,functionId,riid,ppImport,pToken) ) #define ICorProfilerInfo3_GetModuleInfo(This,moduleId,ppBaseLoadAddress,cchName,pcchName,szName,pAssemblyId) \ ( (This)->lpVtbl -> GetModuleInfo(This,moduleId,ppBaseLoadAddress,cchName,pcchName,szName,pAssemblyId) ) #define ICorProfilerInfo3_GetModuleMetaData(This,moduleId,dwOpenFlags,riid,ppOut) \ ( (This)->lpVtbl -> GetModuleMetaData(This,moduleId,dwOpenFlags,riid,ppOut) ) #define ICorProfilerInfo3_GetILFunctionBody(This,moduleId,methodId,ppMethodHeader,pcbMethodSize) \ ( (This)->lpVtbl -> GetILFunctionBody(This,moduleId,methodId,ppMethodHeader,pcbMethodSize) ) #define ICorProfilerInfo3_GetILFunctionBodyAllocator(This,moduleId,ppMalloc) \ ( (This)->lpVtbl -> GetILFunctionBodyAllocator(This,moduleId,ppMalloc) ) #define ICorProfilerInfo3_SetILFunctionBody(This,moduleId,methodid,pbNewILMethodHeader) \ ( (This)->lpVtbl -> SetILFunctionBody(This,moduleId,methodid,pbNewILMethodHeader) ) #define ICorProfilerInfo3_GetAppDomainInfo(This,appDomainId,cchName,pcchName,szName,pProcessId) \ ( (This)->lpVtbl -> GetAppDomainInfo(This,appDomainId,cchName,pcchName,szName,pProcessId) ) #define ICorProfilerInfo3_GetAssemblyInfo(This,assemblyId,cchName,pcchName,szName,pAppDomainId,pModuleId) \ ( (This)->lpVtbl -> GetAssemblyInfo(This,assemblyId,cchName,pcchName,szName,pAppDomainId,pModuleId) ) #define ICorProfilerInfo3_SetFunctionReJIT(This,functionId) \ ( (This)->lpVtbl -> SetFunctionReJIT(This,functionId) ) #define ICorProfilerInfo3_ForceGC(This) \ ( (This)->lpVtbl -> ForceGC(This) ) #define ICorProfilerInfo3_SetILInstrumentedCodeMap(This,functionId,fStartJit,cILMapEntries,rgILMapEntries) \ ( (This)->lpVtbl -> SetILInstrumentedCodeMap(This,functionId,fStartJit,cILMapEntries,rgILMapEntries) ) #define ICorProfilerInfo3_GetInprocInspectionInterface(This,ppicd) \ ( (This)->lpVtbl -> GetInprocInspectionInterface(This,ppicd) ) #define ICorProfilerInfo3_GetInprocInspectionIThisThread(This,ppicd) \ ( (This)->lpVtbl -> GetInprocInspectionIThisThread(This,ppicd) ) #define ICorProfilerInfo3_GetThreadContext(This,threadId,pContextId) \ ( (This)->lpVtbl -> GetThreadContext(This,threadId,pContextId) ) #define ICorProfilerInfo3_BeginInprocDebugging(This,fThisThreadOnly,pdwProfilerContext) \ ( (This)->lpVtbl -> BeginInprocDebugging(This,fThisThreadOnly,pdwProfilerContext) ) #define ICorProfilerInfo3_EndInprocDebugging(This,dwProfilerContext) \ ( (This)->lpVtbl -> EndInprocDebugging(This,dwProfilerContext) ) #define ICorProfilerInfo3_GetILToNativeMapping(This,functionId,cMap,pcMap,map) \ ( (This)->lpVtbl -> GetILToNativeMapping(This,functionId,cMap,pcMap,map) ) #define ICorProfilerInfo3_DoStackSnapshot(This,thread,callback,infoFlags,clientData,context,contextSize) \ ( (This)->lpVtbl -> DoStackSnapshot(This,thread,callback,infoFlags,clientData,context,contextSize) ) #define ICorProfilerInfo3_SetEnterLeaveFunctionHooks2(This,pFuncEnter,pFuncLeave,pFuncTailcall) \ ( (This)->lpVtbl -> SetEnterLeaveFunctionHooks2(This,pFuncEnter,pFuncLeave,pFuncTailcall) ) #define ICorProfilerInfo3_GetFunctionInfo2(This,funcId,frameInfo,pClassId,pModuleId,pToken,cTypeArgs,pcTypeArgs,typeArgs) \ ( (This)->lpVtbl -> GetFunctionInfo2(This,funcId,frameInfo,pClassId,pModuleId,pToken,cTypeArgs,pcTypeArgs,typeArgs) ) #define ICorProfilerInfo3_GetStringLayout(This,pBufferLengthOffset,pStringLengthOffset,pBufferOffset) \ ( (This)->lpVtbl -> GetStringLayout(This,pBufferLengthOffset,pStringLengthOffset,pBufferOffset) ) #define ICorProfilerInfo3_GetClassLayout(This,classID,rFieldOffset,cFieldOffset,pcFieldOffset,pulClassSize) \ ( (This)->lpVtbl -> GetClassLayout(This,classID,rFieldOffset,cFieldOffset,pcFieldOffset,pulClassSize) ) #define ICorProfilerInfo3_GetClassIDInfo2(This,classId,pModuleId,pTypeDefToken,pParentClassId,cNumTypeArgs,pcNumTypeArgs,typeArgs) \ ( (This)->lpVtbl -> GetClassIDInfo2(This,classId,pModuleId,pTypeDefToken,pParentClassId,cNumTypeArgs,pcNumTypeArgs,typeArgs) ) #define ICorProfilerInfo3_GetCodeInfo2(This,functionID,cCodeInfos,pcCodeInfos,codeInfos) \ ( (This)->lpVtbl -> GetCodeInfo2(This,functionID,cCodeInfos,pcCodeInfos,codeInfos) ) #define ICorProfilerInfo3_GetClassFromTokenAndTypeArgs(This,moduleID,typeDef,cTypeArgs,typeArgs,pClassID) \ ( (This)->lpVtbl -> GetClassFromTokenAndTypeArgs(This,moduleID,typeDef,cTypeArgs,typeArgs,pClassID) ) #define ICorProfilerInfo3_GetFunctionFromTokenAndTypeArgs(This,moduleID,funcDef,classId,cTypeArgs,typeArgs,pFunctionID) \ ( (This)->lpVtbl -> GetFunctionFromTokenAndTypeArgs(This,moduleID,funcDef,classId,cTypeArgs,typeArgs,pFunctionID) ) #define ICorProfilerInfo3_EnumModuleFrozenObjects(This,moduleID,ppEnum) \ ( (This)->lpVtbl -> EnumModuleFrozenObjects(This,moduleID,ppEnum) ) #define ICorProfilerInfo3_GetArrayObjectInfo(This,objectId,cDimensions,pDimensionSizes,pDimensionLowerBounds,ppData) \ ( (This)->lpVtbl -> GetArrayObjectInfo(This,objectId,cDimensions,pDimensionSizes,pDimensionLowerBounds,ppData) ) #define ICorProfilerInfo3_GetBoxClassLayout(This,classId,pBufferOffset) \ ( (This)->lpVtbl -> GetBoxClassLayout(This,classId,pBufferOffset) ) #define ICorProfilerInfo3_GetThreadAppDomain(This,threadId,pAppDomainId) \ ( (This)->lpVtbl -> GetThreadAppDomain(This,threadId,pAppDomainId) ) #define ICorProfilerInfo3_GetRVAStaticAddress(This,classId,fieldToken,ppAddress) \ ( (This)->lpVtbl -> GetRVAStaticAddress(This,classId,fieldToken,ppAddress) ) #define ICorProfilerInfo3_GetAppDomainStaticAddress(This,classId,fieldToken,appDomainId,ppAddress) \ ( (This)->lpVtbl -> GetAppDomainStaticAddress(This,classId,fieldToken,appDomainId,ppAddress) ) #define ICorProfilerInfo3_GetThreadStaticAddress(This,classId,fieldToken,threadId,ppAddress) \ ( (This)->lpVtbl -> GetThreadStaticAddress(This,classId,fieldToken,threadId,ppAddress) ) #define ICorProfilerInfo3_GetContextStaticAddress(This,classId,fieldToken,contextId,ppAddress) \ ( (This)->lpVtbl -> GetContextStaticAddress(This,classId,fieldToken,contextId,ppAddress) ) #define ICorProfilerInfo3_GetStaticFieldInfo(This,classId,fieldToken,pFieldInfo) \ ( (This)->lpVtbl -> GetStaticFieldInfo(This,classId,fieldToken,pFieldInfo) ) #define ICorProfilerInfo3_GetGenerationBounds(This,cObjectRanges,pcObjectRanges,ranges) \ ( (This)->lpVtbl -> GetGenerationBounds(This,cObjectRanges,pcObjectRanges,ranges) ) #define ICorProfilerInfo3_GetObjectGeneration(This,objectId,range) \ ( (This)->lpVtbl -> GetObjectGeneration(This,objectId,range) ) #define ICorProfilerInfo3_GetNotifiedExceptionClauseInfo(This,pinfo) \ ( (This)->lpVtbl -> GetNotifiedExceptionClauseInfo(This,pinfo) ) #define ICorProfilerInfo3_EnumJITedFunctions(This,ppEnum) \ ( (This)->lpVtbl -> EnumJITedFunctions(This,ppEnum) ) #define ICorProfilerInfo3_RequestProfilerDetach(This,dwExpectedCompletionMilliseconds) \ ( (This)->lpVtbl -> RequestProfilerDetach(This,dwExpectedCompletionMilliseconds) ) #define ICorProfilerInfo3_SetFunctionIDMapper2(This,pFunc,clientData) \ ( (This)->lpVtbl -> SetFunctionIDMapper2(This,pFunc,clientData) ) #define ICorProfilerInfo3_GetStringLayout2(This,pStringLengthOffset,pBufferOffset) \ ( (This)->lpVtbl -> GetStringLayout2(This,pStringLengthOffset,pBufferOffset) ) #define ICorProfilerInfo3_SetEnterLeaveFunctionHooks3(This,pFuncEnter3,pFuncLeave3,pFuncTailcall3) \ ( (This)->lpVtbl -> SetEnterLeaveFunctionHooks3(This,pFuncEnter3,pFuncLeave3,pFuncTailcall3) ) #define ICorProfilerInfo3_SetEnterLeaveFunctionHooks3WithInfo(This,pFuncEnter3WithInfo,pFuncLeave3WithInfo,pFuncTailcall3WithInfo) \ ( (This)->lpVtbl -> SetEnterLeaveFunctionHooks3WithInfo(This,pFuncEnter3WithInfo,pFuncLeave3WithInfo,pFuncTailcall3WithInfo) ) #define ICorProfilerInfo3_GetFunctionEnter3Info(This,functionId,eltInfo,pFrameInfo,pcbArgumentInfo,pArgumentInfo) \ ( (This)->lpVtbl -> GetFunctionEnter3Info(This,functionId,eltInfo,pFrameInfo,pcbArgumentInfo,pArgumentInfo) ) #define ICorProfilerInfo3_GetFunctionLeave3Info(This,functionId,eltInfo,pFrameInfo,pRetvalRange) \ ( (This)->lpVtbl -> GetFunctionLeave3Info(This,functionId,eltInfo,pFrameInfo,pRetvalRange) ) #define ICorProfilerInfo3_GetFunctionTailcall3Info(This,functionId,eltInfo,pFrameInfo) \ ( (This)->lpVtbl -> GetFunctionTailcall3Info(This,functionId,eltInfo,pFrameInfo) ) #define ICorProfilerInfo3_EnumModules(This,ppEnum) \ ( (This)->lpVtbl -> EnumModules(This,ppEnum) ) #define ICorProfilerInfo3_GetRuntimeInformation(This,pClrInstanceId,pRuntimeType,pMajorVersion,pMinorVersion,pBuildNumber,pQFEVersion,cchVersionString,pcchVersionString,szVersionString) \ ( (This)->lpVtbl -> GetRuntimeInformation(This,pClrInstanceId,pRuntimeType,pMajorVersion,pMinorVersion,pBuildNumber,pQFEVersion,cchVersionString,pcchVersionString,szVersionString) ) #define ICorProfilerInfo3_GetThreadStaticAddress2(This,classId,fieldToken,appDomainId,threadId,ppAddress) \ ( (This)->lpVtbl -> GetThreadStaticAddress2(This,classId,fieldToken,appDomainId,threadId,ppAddress) ) #define ICorProfilerInfo3_GetAppDomainsContainingModule(This,moduleId,cAppDomainIds,pcAppDomainIds,appDomainIds) \ ( (This)->lpVtbl -> GetAppDomainsContainingModule(This,moduleId,cAppDomainIds,pcAppDomainIds,appDomainIds) ) #define ICorProfilerInfo3_GetModuleInfo2(This,moduleId,ppBaseLoadAddress,cchName,pcchName,szName,pAssemblyId,pdwModuleFlags) \ ( (This)->lpVtbl -> GetModuleInfo2(This,moduleId,ppBaseLoadAddress,cchName,pcchName,szName,pAssemblyId,pdwModuleFlags) ) #endif /* COBJMACROS */ #endif /* C style interface */ #endif /* __ICorProfilerInfo3_INTERFACE_DEFINED__ */ #ifndef __ICorProfilerObjectEnum_INTERFACE_DEFINED__ #define __ICorProfilerObjectEnum_INTERFACE_DEFINED__ /* interface ICorProfilerObjectEnum */ /* [local][unique][uuid][object] */ EXTERN_C const IID IID_ICorProfilerObjectEnum; #if defined(__cplusplus) && !defined(CINTERFACE) MIDL_INTERFACE("2C6269BD-2D13-4321-AE12-6686365FD6AF") ICorProfilerObjectEnum : public IUnknown { public: virtual HRESULT STDMETHODCALLTYPE Skip( /* [in] */ ULONG celt) = 0; virtual HRESULT STDMETHODCALLTYPE Reset( void) = 0; virtual HRESULT STDMETHODCALLTYPE Clone( /* [out] */ ICorProfilerObjectEnum **ppEnum) = 0; virtual HRESULT STDMETHODCALLTYPE GetCount( /* [out] */ ULONG *pcelt) = 0; virtual HRESULT STDMETHODCALLTYPE Next( /* [in] */ ULONG celt, /* [length_is][size_is][out] */ ObjectID objects[ ], /* [out] */ ULONG *pceltFetched) = 0; }; #else /* C style interface */ typedef struct ICorProfilerObjectEnumVtbl { BEGIN_INTERFACE HRESULT ( STDMETHODCALLTYPE *QueryInterface )( ICorProfilerObjectEnum * This, /* [in] */ REFIID riid, /* [annotation][iid_is][out] */ _COM_Outptr_ void **ppvObject); ULONG ( STDMETHODCALLTYPE *AddRef )( ICorProfilerObjectEnum * This); ULONG ( STDMETHODCALLTYPE *Release )( ICorProfilerObjectEnum * This); HRESULT ( STDMETHODCALLTYPE *Skip )( ICorProfilerObjectEnum * This, /* [in] */ ULONG celt); HRESULT ( STDMETHODCALLTYPE *Reset )( ICorProfilerObjectEnum * This); HRESULT ( STDMETHODCALLTYPE *Clone )( ICorProfilerObjectEnum * This, /* [out] */ ICorProfilerObjectEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *GetCount )( ICorProfilerObjectEnum * This, /* [out] */ ULONG *pcelt); HRESULT ( STDMETHODCALLTYPE *Next )( ICorProfilerObjectEnum * This, /* [in] */ ULONG celt, /* [length_is][size_is][out] */ ObjectID objects[ ], /* [out] */ ULONG *pceltFetched); END_INTERFACE } ICorProfilerObjectEnumVtbl; interface ICorProfilerObjectEnum { CONST_VTBL struct ICorProfilerObjectEnumVtbl *lpVtbl; }; #ifdef COBJMACROS #define ICorProfilerObjectEnum_QueryInterface(This,riid,ppvObject) \ ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) ) #define ICorProfilerObjectEnum_AddRef(This) \ ( (This)->lpVtbl -> AddRef(This) ) #define ICorProfilerObjectEnum_Release(This) \ ( (This)->lpVtbl -> Release(This) ) #define ICorProfilerObjectEnum_Skip(This,celt) \ ( (This)->lpVtbl -> Skip(This,celt) ) #define ICorProfilerObjectEnum_Reset(This) \ ( (This)->lpVtbl -> Reset(This) ) #define ICorProfilerObjectEnum_Clone(This,ppEnum) \ ( (This)->lpVtbl -> Clone(This,ppEnum) ) #define ICorProfilerObjectEnum_GetCount(This,pcelt) \ ( (This)->lpVtbl -> GetCount(This,pcelt) ) #define ICorProfilerObjectEnum_Next(This,celt,objects,pceltFetched) \ ( (This)->lpVtbl -> Next(This,celt,objects,pceltFetched) ) #endif /* COBJMACROS */ #endif /* C style interface */ #endif /* __ICorProfilerObjectEnum_INTERFACE_DEFINED__ */ #ifndef __ICorProfilerFunctionEnum_INTERFACE_DEFINED__ #define __ICorProfilerFunctionEnum_INTERFACE_DEFINED__ /* interface ICorProfilerFunctionEnum */ /* [local][unique][uuid][object] */ EXTERN_C const IID IID_ICorProfilerFunctionEnum; #if defined(__cplusplus) && !defined(CINTERFACE) MIDL_INTERFACE("FF71301A-B994-429D-A10B-B345A65280EF") ICorProfilerFunctionEnum : public IUnknown { public: virtual HRESULT STDMETHODCALLTYPE Skip( /* [in] */ ULONG celt) = 0; virtual HRESULT STDMETHODCALLTYPE Reset( void) = 0; virtual HRESULT STDMETHODCALLTYPE Clone( /* [out] */ ICorProfilerFunctionEnum **ppEnum) = 0; virtual HRESULT STDMETHODCALLTYPE GetCount( /* [out] */ ULONG *pcelt) = 0; virtual HRESULT STDMETHODCALLTYPE Next( /* [in] */ ULONG celt, /* [length_is][size_is][out] */ COR_PRF_FUNCTION ids[ ], /* [out] */ ULONG *pceltFetched) = 0; }; #else /* C style interface */ typedef struct ICorProfilerFunctionEnumVtbl { BEGIN_INTERFACE HRESULT ( STDMETHODCALLTYPE *QueryInterface )( ICorProfilerFunctionEnum * This, /* [in] */ REFIID riid, /* [annotation][iid_is][out] */ _COM_Outptr_ void **ppvObject); ULONG ( STDMETHODCALLTYPE *AddRef )( ICorProfilerFunctionEnum * This); ULONG ( STDMETHODCALLTYPE *Release )( ICorProfilerFunctionEnum * This); HRESULT ( STDMETHODCALLTYPE *Skip )( ICorProfilerFunctionEnum * This, /* [in] */ ULONG celt); HRESULT ( STDMETHODCALLTYPE *Reset )( ICorProfilerFunctionEnum * This); HRESULT ( STDMETHODCALLTYPE *Clone )( ICorProfilerFunctionEnum * This, /* [out] */ ICorProfilerFunctionEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *GetCount )( ICorProfilerFunctionEnum * This, /* [out] */ ULONG *pcelt); HRESULT ( STDMETHODCALLTYPE *Next )( ICorProfilerFunctionEnum * This, /* [in] */ ULONG celt, /* [length_is][size_is][out] */ COR_PRF_FUNCTION ids[ ], /* [out] */ ULONG *pceltFetched); END_INTERFACE } ICorProfilerFunctionEnumVtbl; interface ICorProfilerFunctionEnum { CONST_VTBL struct ICorProfilerFunctionEnumVtbl *lpVtbl; }; #ifdef COBJMACROS #define ICorProfilerFunctionEnum_QueryInterface(This,riid,ppvObject) \ ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) ) #define ICorProfilerFunctionEnum_AddRef(This) \ ( (This)->lpVtbl -> AddRef(This) ) #define ICorProfilerFunctionEnum_Release(This) \ ( (This)->lpVtbl -> Release(This) ) #define ICorProfilerFunctionEnum_Skip(This,celt) \ ( (This)->lpVtbl -> Skip(This,celt) ) #define ICorProfilerFunctionEnum_Reset(This) \ ( (This)->lpVtbl -> Reset(This) ) #define ICorProfilerFunctionEnum_Clone(This,ppEnum) \ ( (This)->lpVtbl -> Clone(This,ppEnum) ) #define ICorProfilerFunctionEnum_GetCount(This,pcelt) \ ( (This)->lpVtbl -> GetCount(This,pcelt) ) #define ICorProfilerFunctionEnum_Next(This,celt,ids,pceltFetched) \ ( (This)->lpVtbl -> Next(This,celt,ids,pceltFetched) ) #endif /* COBJMACROS */ #endif /* C style interface */ #endif /* __ICorProfilerFunctionEnum_INTERFACE_DEFINED__ */ #ifndef __ICorProfilerModuleEnum_INTERFACE_DEFINED__ #define __ICorProfilerModuleEnum_INTERFACE_DEFINED__ /* interface ICorProfilerModuleEnum */ /* [local][unique][uuid][object] */ EXTERN_C const IID IID_ICorProfilerModuleEnum; #if defined(__cplusplus) && !defined(CINTERFACE) MIDL_INTERFACE("b0266d75-2081-4493-af7f-028ba34db891") ICorProfilerModuleEnum : public IUnknown { public: virtual HRESULT STDMETHODCALLTYPE Skip( /* [in] */ ULONG celt) = 0; virtual HRESULT STDMETHODCALLTYPE Reset( void) = 0; virtual HRESULT STDMETHODCALLTYPE Clone( /* [out] */ ICorProfilerModuleEnum **ppEnum) = 0; virtual HRESULT STDMETHODCALLTYPE GetCount( /* [out] */ ULONG *pcelt) = 0; virtual HRESULT STDMETHODCALLTYPE Next( /* [in] */ ULONG celt, /* [length_is][size_is][out] */ ModuleID ids[ ], /* [out] */ ULONG *pceltFetched) = 0; }; #else /* C style interface */ typedef struct ICorProfilerModuleEnumVtbl { BEGIN_INTERFACE HRESULT ( STDMETHODCALLTYPE *QueryInterface )( ICorProfilerModuleEnum * This, /* [in] */ REFIID riid, /* [annotation][iid_is][out] */ _COM_Outptr_ void **ppvObject); ULONG ( STDMETHODCALLTYPE *AddRef )( ICorProfilerModuleEnum * This); ULONG ( STDMETHODCALLTYPE *Release )( ICorProfilerModuleEnum * This); HRESULT ( STDMETHODCALLTYPE *Skip )( ICorProfilerModuleEnum * This, /* [in] */ ULONG celt); HRESULT ( STDMETHODCALLTYPE *Reset )( ICorProfilerModuleEnum * This); HRESULT ( STDMETHODCALLTYPE *Clone )( ICorProfilerModuleEnum * This, /* [out] */ ICorProfilerModuleEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *GetCount )( ICorProfilerModuleEnum * This, /* [out] */ ULONG *pcelt); HRESULT ( STDMETHODCALLTYPE *Next )( ICorProfilerModuleEnum * This, /* [in] */ ULONG celt, /* [length_is][size_is][out] */ ModuleID ids[ ], /* [out] */ ULONG *pceltFetched); END_INTERFACE } ICorProfilerModuleEnumVtbl; interface ICorProfilerModuleEnum { CONST_VTBL struct ICorProfilerModuleEnumVtbl *lpVtbl; }; #ifdef COBJMACROS #define ICorProfilerModuleEnum_QueryInterface(This,riid,ppvObject) \ ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) ) #define ICorProfilerModuleEnum_AddRef(This) \ ( (This)->lpVtbl -> AddRef(This) ) #define ICorProfilerModuleEnum_Release(This) \ ( (This)->lpVtbl -> Release(This) ) #define ICorProfilerModuleEnum_Skip(This,celt) \ ( (This)->lpVtbl -> Skip(This,celt) ) #define ICorProfilerModuleEnum_Reset(This) \ ( (This)->lpVtbl -> Reset(This) ) #define ICorProfilerModuleEnum_Clone(This,ppEnum) \ ( (This)->lpVtbl -> Clone(This,ppEnum) ) #define ICorProfilerModuleEnum_GetCount(This,pcelt) \ ( (This)->lpVtbl -> GetCount(This,pcelt) ) #define ICorProfilerModuleEnum_Next(This,celt,ids,pceltFetched) \ ( (This)->lpVtbl -> Next(This,celt,ids,pceltFetched) ) #endif /* COBJMACROS */ #endif /* C style interface */ #endif /* __ICorProfilerModuleEnum_INTERFACE_DEFINED__ */ #ifndef __IMethodMalloc_INTERFACE_DEFINED__ #define __IMethodMalloc_INTERFACE_DEFINED__ /* interface IMethodMalloc */ /* [local][unique][uuid][object] */ EXTERN_C const IID IID_IMethodMalloc; #if defined(__cplusplus) && !defined(CINTERFACE) MIDL_INTERFACE("A0EFB28B-6EE2-4d7b-B983-A75EF7BEEDB8") IMethodMalloc : public IUnknown { public: virtual PVOID STDMETHODCALLTYPE Alloc( /* [in] */ ULONG cb) = 0; }; #else /* C style interface */ typedef struct IMethodMallocVtbl { BEGIN_INTERFACE HRESULT ( STDMETHODCALLTYPE *QueryInterface )( IMethodMalloc * This, /* [in] */ REFIID riid, /* [annotation][iid_is][out] */ _COM_Outptr_ void **ppvObject); ULONG ( STDMETHODCALLTYPE *AddRef )( IMethodMalloc * This); ULONG ( STDMETHODCALLTYPE *Release )( IMethodMalloc * This); PVOID ( STDMETHODCALLTYPE *Alloc )( IMethodMalloc * This, /* [in] */ ULONG cb); END_INTERFACE } IMethodMallocVtbl; interface IMethodMalloc { CONST_VTBL struct IMethodMallocVtbl *lpVtbl; }; #ifdef COBJMACROS #define IMethodMalloc_QueryInterface(This,riid,ppvObject) \ ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) ) #define IMethodMalloc_AddRef(This) \ ( (This)->lpVtbl -> AddRef(This) ) #define IMethodMalloc_Release(This) \ ( (This)->lpVtbl -> Release(This) ) #define IMethodMalloc_Alloc(This,cb) \ ( (This)->lpVtbl -> Alloc(This,cb) ) #endif /* COBJMACROS */ #endif /* C style interface */ #endif /* __IMethodMalloc_INTERFACE_DEFINED__ */ #ifndef __ICorProfilerFunctionControl_INTERFACE_DEFINED__ #define __ICorProfilerFunctionControl_INTERFACE_DEFINED__ /* interface ICorProfilerFunctionControl */ /* [local][unique][uuid][object] */ EXTERN_C const IID IID_ICorProfilerFunctionControl; #if defined(__cplusplus) && !defined(CINTERFACE) MIDL_INTERFACE("F0963021-E1EA-4732-8581-E01B0BD3C0C6") ICorProfilerFunctionControl : public IUnknown { public: virtual HRESULT STDMETHODCALLTYPE SetCodegenFlags( /* [in] */ DWORD flags) = 0; virtual HRESULT STDMETHODCALLTYPE SetILFunctionBody( /* [in] */ ULONG cbNewILMethodHeader, /* [size_is][in] */ LPCBYTE pbNewILMethodHeader) = 0; virtual HRESULT STDMETHODCALLTYPE SetILInstrumentedCodeMap( /* [in] */ ULONG cILMapEntries, /* [size_is][in] */ COR_IL_MAP rgILMapEntries[ ]) = 0; }; #else /* C style interface */ typedef struct ICorProfilerFunctionControlVtbl { BEGIN_INTERFACE HRESULT ( STDMETHODCALLTYPE *QueryInterface )( ICorProfilerFunctionControl * This, /* [in] */ REFIID riid, /* [annotation][iid_is][out] */ _COM_Outptr_ void **ppvObject); ULONG ( STDMETHODCALLTYPE *AddRef )( ICorProfilerFunctionControl * This); ULONG ( STDMETHODCALLTYPE *Release )( ICorProfilerFunctionControl * This); HRESULT ( STDMETHODCALLTYPE *SetCodegenFlags )( ICorProfilerFunctionControl * This, /* [in] */ DWORD flags); HRESULT ( STDMETHODCALLTYPE *SetILFunctionBody )( ICorProfilerFunctionControl * This, /* [in] */ ULONG cbNewILMethodHeader, /* [size_is][in] */ LPCBYTE pbNewILMethodHeader); HRESULT ( STDMETHODCALLTYPE *SetILInstrumentedCodeMap )( ICorProfilerFunctionControl * This, /* [in] */ ULONG cILMapEntries, /* [size_is][in] */ COR_IL_MAP rgILMapEntries[ ]); END_INTERFACE } ICorProfilerFunctionControlVtbl; interface ICorProfilerFunctionControl { CONST_VTBL struct ICorProfilerFunctionControlVtbl *lpVtbl; }; #ifdef COBJMACROS #define ICorProfilerFunctionControl_QueryInterface(This,riid,ppvObject) \ ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) ) #define ICorProfilerFunctionControl_AddRef(This) \ ( (This)->lpVtbl -> AddRef(This) ) #define ICorProfilerFunctionControl_Release(This) \ ( (This)->lpVtbl -> Release(This) ) #define ICorProfilerFunctionControl_SetCodegenFlags(This,flags) \ ( (This)->lpVtbl -> SetCodegenFlags(This,flags) ) #define ICorProfilerFunctionControl_SetILFunctionBody(This,cbNewILMethodHeader,pbNewILMethodHeader) \ ( (This)->lpVtbl -> SetILFunctionBody(This,cbNewILMethodHeader,pbNewILMethodHeader) ) #define ICorProfilerFunctionControl_SetILInstrumentedCodeMap(This,cILMapEntries,rgILMapEntries) \ ( (This)->lpVtbl -> SetILInstrumentedCodeMap(This,cILMapEntries,rgILMapEntries) ) #endif /* COBJMACROS */ #endif /* C style interface */ #endif /* __ICorProfilerFunctionControl_INTERFACE_DEFINED__ */ #ifndef __ICorProfilerInfo4_INTERFACE_DEFINED__ #define __ICorProfilerInfo4_INTERFACE_DEFINED__ /* interface ICorProfilerInfo4 */ /* [local][unique][uuid][object] */ EXTERN_C const IID IID_ICorProfilerInfo4; #if defined(__cplusplus) && !defined(CINTERFACE) MIDL_INTERFACE("0d8fdcaa-6257-47bf-b1bf-94dac88466ee") ICorProfilerInfo4 : public ICorProfilerInfo3 { public: virtual HRESULT STDMETHODCALLTYPE EnumThreads( /* [out] */ ICorProfilerThreadEnum **ppEnum) = 0; virtual HRESULT STDMETHODCALLTYPE InitializeCurrentThread( void) = 0; virtual HRESULT STDMETHODCALLTYPE RequestReJIT( /* [in] */ ULONG cFunctions, /* [size_is][in] */ ModuleID moduleIds[ ], /* [size_is][in] */ mdMethodDef methodIds[ ]) = 0; virtual HRESULT STDMETHODCALLTYPE RequestRevert( /* [in] */ ULONG cFunctions, /* [size_is][in] */ ModuleID moduleIds[ ], /* [size_is][in] */ mdMethodDef methodIds[ ], /* [size_is][out] */ HRESULT status[ ]) = 0; virtual HRESULT STDMETHODCALLTYPE GetCodeInfo3( /* [in] */ FunctionID functionID, /* [in] */ ReJITID reJitId, /* [in] */ ULONG32 cCodeInfos, /* [out] */ ULONG32 *pcCodeInfos, /* [length_is][size_is][out] */ COR_PRF_CODE_INFO codeInfos[ ]) = 0; virtual HRESULT STDMETHODCALLTYPE GetFunctionFromIP2( /* [in] */ LPCBYTE ip, /* [out] */ FunctionID *pFunctionId, /* [out] */ ReJITID *pReJitId) = 0; virtual HRESULT STDMETHODCALLTYPE GetReJITIDs( /* [in] */ FunctionID functionId, /* [in] */ ULONG cReJitIds, /* [out] */ ULONG *pcReJitIds, /* [length_is][size_is][out] */ ReJITID reJitIds[ ]) = 0; virtual HRESULT STDMETHODCALLTYPE GetILToNativeMapping2( /* [in] */ FunctionID functionId, /* [in] */ ReJITID reJitId, /* [in] */ ULONG32 cMap, /* [out] */ ULONG32 *pcMap, /* [length_is][size_is][out] */ COR_DEBUG_IL_TO_NATIVE_MAP map[ ]) = 0; virtual HRESULT STDMETHODCALLTYPE EnumJITedFunctions2( /* [out] */ ICorProfilerFunctionEnum **ppEnum) = 0; virtual HRESULT STDMETHODCALLTYPE GetObjectSize2( /* [in] */ ObjectID objectId, /* [out] */ SIZE_T *pcSize) = 0; }; #else /* C style interface */ typedef struct ICorProfilerInfo4Vtbl { BEGIN_INTERFACE HRESULT ( STDMETHODCALLTYPE *QueryInterface )( ICorProfilerInfo4 * This, /* [in] */ REFIID riid, /* [annotation][iid_is][out] */ _COM_Outptr_ void **ppvObject); ULONG ( STDMETHODCALLTYPE *AddRef )( ICorProfilerInfo4 * This); ULONG ( STDMETHODCALLTYPE *Release )( ICorProfilerInfo4 * This); HRESULT ( STDMETHODCALLTYPE *GetClassFromObject )( ICorProfilerInfo4 * This, /* [in] */ ObjectID objectId, /* [out] */ ClassID *pClassId); HRESULT ( STDMETHODCALLTYPE *GetClassFromToken )( ICorProfilerInfo4 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdTypeDef typeDef, /* [out] */ ClassID *pClassId); HRESULT ( STDMETHODCALLTYPE *GetCodeInfo )( ICorProfilerInfo4 * This, /* [in] */ FunctionID functionId, /* [out] */ LPCBYTE *pStart, /* [out] */ ULONG *pcSize); HRESULT ( STDMETHODCALLTYPE *GetEventMask )( ICorProfilerInfo4 * This, /* [out] */ DWORD *pdwEvents); HRESULT ( STDMETHODCALLTYPE *GetFunctionFromIP )( ICorProfilerInfo4 * This, /* [in] */ LPCBYTE ip, /* [out] */ FunctionID *pFunctionId); HRESULT ( STDMETHODCALLTYPE *GetFunctionFromToken )( ICorProfilerInfo4 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdToken token, /* [out] */ FunctionID *pFunctionId); HRESULT ( STDMETHODCALLTYPE *GetHandleFromThread )( ICorProfilerInfo4 * This, /* [in] */ ThreadID threadId, /* [out] */ HANDLE *phThread); HRESULT ( STDMETHODCALLTYPE *GetObjectSize )( ICorProfilerInfo4 * This, /* [in] */ ObjectID objectId, /* [out] */ ULONG *pcSize); HRESULT ( STDMETHODCALLTYPE *IsArrayClass )( ICorProfilerInfo4 * This, /* [in] */ ClassID classId, /* [out] */ CorElementType *pBaseElemType, /* [out] */ ClassID *pBaseClassId, /* [out] */ ULONG *pcRank); HRESULT ( STDMETHODCALLTYPE *GetThreadInfo )( ICorProfilerInfo4 * This, /* [in] */ ThreadID threadId, /* [out] */ DWORD *pdwWin32ThreadId); HRESULT ( STDMETHODCALLTYPE *GetCurrentThreadID )( ICorProfilerInfo4 * This, /* [out] */ ThreadID *pThreadId); HRESULT ( STDMETHODCALLTYPE *GetClassIDInfo )( ICorProfilerInfo4 * This, /* [in] */ ClassID classId, /* [out] */ ModuleID *pModuleId, /* [out] */ mdTypeDef *pTypeDefToken); HRESULT ( STDMETHODCALLTYPE *GetFunctionInfo )( ICorProfilerInfo4 * This, /* [in] */ FunctionID functionId, /* [out] */ ClassID *pClassId, /* [out] */ ModuleID *pModuleId, /* [out] */ mdToken *pToken); HRESULT ( STDMETHODCALLTYPE *SetEventMask )( ICorProfilerInfo4 * This, /* [in] */ DWORD dwEvents); HRESULT ( STDMETHODCALLTYPE *SetEnterLeaveFunctionHooks )( ICorProfilerInfo4 * This, /* [in] */ FunctionEnter *pFuncEnter, /* [in] */ FunctionLeave *pFuncLeave, /* [in] */ FunctionTailcall *pFuncTailcall); HRESULT ( STDMETHODCALLTYPE *SetFunctionIDMapper )( ICorProfilerInfo4 * This, /* [in] */ FunctionIDMapper *pFunc); HRESULT ( STDMETHODCALLTYPE *GetTokenAndMetaDataFromFunction )( ICorProfilerInfo4 * This, /* [in] */ FunctionID functionId, /* [in] */ REFIID riid, /* [out] */ IUnknown **ppImport, /* [out] */ mdToken *pToken); HRESULT ( STDMETHODCALLTYPE *GetModuleInfo )( ICorProfilerInfo4 * This, /* [in] */ ModuleID moduleId, /* [out] */ LPCBYTE *ppBaseLoadAddress, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [annotation][out] */ _Out_writes_to_(cchName, *pcchName) WCHAR szName[ ], /* [out] */ AssemblyID *pAssemblyId); HRESULT ( STDMETHODCALLTYPE *GetModuleMetaData )( ICorProfilerInfo4 * This, /* [in] */ ModuleID moduleId, /* [in] */ DWORD dwOpenFlags, /* [in] */ REFIID riid, /* [out] */ IUnknown **ppOut); HRESULT ( STDMETHODCALLTYPE *GetILFunctionBody )( ICorProfilerInfo4 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdMethodDef methodId, /* [out] */ LPCBYTE *ppMethodHeader, /* [out] */ ULONG *pcbMethodSize); HRESULT ( STDMETHODCALLTYPE *GetILFunctionBodyAllocator )( ICorProfilerInfo4 * This, /* [in] */ ModuleID moduleId, /* [out] */ IMethodMalloc **ppMalloc); HRESULT ( STDMETHODCALLTYPE *SetILFunctionBody )( ICorProfilerInfo4 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdMethodDef methodid, /* [in] */ LPCBYTE pbNewILMethodHeader); HRESULT ( STDMETHODCALLTYPE *GetAppDomainInfo )( ICorProfilerInfo4 * This, /* [in] */ AppDomainID appDomainId, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [annotation][out] */ _Out_writes_to_(cchName, *pcchName) WCHAR szName[ ], /* [out] */ ProcessID *pProcessId); HRESULT ( STDMETHODCALLTYPE *GetAssemblyInfo )( ICorProfilerInfo4 * This, /* [in] */ AssemblyID assemblyId, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [annotation][out] */ _Out_writes_to_(cchName, *pcchName) WCHAR szName[ ], /* [out] */ AppDomainID *pAppDomainId, /* [out] */ ModuleID *pModuleId); HRESULT ( STDMETHODCALLTYPE *SetFunctionReJIT )( ICorProfilerInfo4 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ForceGC )( ICorProfilerInfo4 * This); HRESULT ( STDMETHODCALLTYPE *SetILInstrumentedCodeMap )( ICorProfilerInfo4 * This, /* [in] */ FunctionID functionId, /* [in] */ BOOL fStartJit, /* [in] */ ULONG cILMapEntries, /* [size_is][in] */ COR_IL_MAP rgILMapEntries[ ]); HRESULT ( STDMETHODCALLTYPE *GetInprocInspectionInterface )( ICorProfilerInfo4 * This, /* [out] */ IUnknown **ppicd); HRESULT ( STDMETHODCALLTYPE *GetInprocInspectionIThisThread )( ICorProfilerInfo4 * This, /* [out] */ IUnknown **ppicd); HRESULT ( STDMETHODCALLTYPE *GetThreadContext )( ICorProfilerInfo4 * This, /* [in] */ ThreadID threadId, /* [out] */ ContextID *pContextId); HRESULT ( STDMETHODCALLTYPE *BeginInprocDebugging )( ICorProfilerInfo4 * This, /* [in] */ BOOL fThisThreadOnly, /* [out] */ DWORD *pdwProfilerContext); HRESULT ( STDMETHODCALLTYPE *EndInprocDebugging )( ICorProfilerInfo4 * This, /* [in] */ DWORD dwProfilerContext); HRESULT ( STDMETHODCALLTYPE *GetILToNativeMapping )( ICorProfilerInfo4 * This, /* [in] */ FunctionID functionId, /* [in] */ ULONG32 cMap, /* [out] */ ULONG32 *pcMap, /* [length_is][size_is][out] */ COR_DEBUG_IL_TO_NATIVE_MAP map[ ]); HRESULT ( STDMETHODCALLTYPE *DoStackSnapshot )( ICorProfilerInfo4 * This, /* [in] */ ThreadID thread, /* [in] */ StackSnapshotCallback *callback, /* [in] */ ULONG32 infoFlags, /* [in] */ void *clientData, /* [size_is][in] */ BYTE context[ ], /* [in] */ ULONG32 contextSize); HRESULT ( STDMETHODCALLTYPE *SetEnterLeaveFunctionHooks2 )( ICorProfilerInfo4 * This, /* [in] */ FunctionEnter2 *pFuncEnter, /* [in] */ FunctionLeave2 *pFuncLeave, /* [in] */ FunctionTailcall2 *pFuncTailcall); HRESULT ( STDMETHODCALLTYPE *GetFunctionInfo2 )( ICorProfilerInfo4 * This, /* [in] */ FunctionID funcId, /* [in] */ COR_PRF_FRAME_INFO frameInfo, /* [out] */ ClassID *pClassId, /* [out] */ ModuleID *pModuleId, /* [out] */ mdToken *pToken, /* [in] */ ULONG32 cTypeArgs, /* [out] */ ULONG32 *pcTypeArgs, /* [out] */ ClassID typeArgs[ ]); HRESULT ( STDMETHODCALLTYPE *GetStringLayout )( ICorProfilerInfo4 * This, /* [out] */ ULONG *pBufferLengthOffset, /* [out] */ ULONG *pStringLengthOffset, /* [out] */ ULONG *pBufferOffset); HRESULT ( STDMETHODCALLTYPE *GetClassLayout )( ICorProfilerInfo4 * This, /* [in] */ ClassID classID, /* [out][in] */ COR_FIELD_OFFSET rFieldOffset[ ], /* [in] */ ULONG cFieldOffset, /* [out] */ ULONG *pcFieldOffset, /* [out] */ ULONG *pulClassSize); HRESULT ( STDMETHODCALLTYPE *GetClassIDInfo2 )( ICorProfilerInfo4 * This, /* [in] */ ClassID classId, /* [out] */ ModuleID *pModuleId, /* [out] */ mdTypeDef *pTypeDefToken, /* [out] */ ClassID *pParentClassId, /* [in] */ ULONG32 cNumTypeArgs, /* [out] */ ULONG32 *pcNumTypeArgs, /* [out] */ ClassID typeArgs[ ]); HRESULT ( STDMETHODCALLTYPE *GetCodeInfo2 )( ICorProfilerInfo4 * This, /* [in] */ FunctionID functionID, /* [in] */ ULONG32 cCodeInfos, /* [out] */ ULONG32 *pcCodeInfos, /* [length_is][size_is][out] */ COR_PRF_CODE_INFO codeInfos[ ]); HRESULT ( STDMETHODCALLTYPE *GetClassFromTokenAndTypeArgs )( ICorProfilerInfo4 * This, /* [in] */ ModuleID moduleID, /* [in] */ mdTypeDef typeDef, /* [in] */ ULONG32 cTypeArgs, /* [size_is][in] */ ClassID typeArgs[ ], /* [out] */ ClassID *pClassID); HRESULT ( STDMETHODCALLTYPE *GetFunctionFromTokenAndTypeArgs )( ICorProfilerInfo4 * This, /* [in] */ ModuleID moduleID, /* [in] */ mdMethodDef funcDef, /* [in] */ ClassID classId, /* [in] */ ULONG32 cTypeArgs, /* [size_is][in] */ ClassID typeArgs[ ], /* [out] */ FunctionID *pFunctionID); HRESULT ( STDMETHODCALLTYPE *EnumModuleFrozenObjects )( ICorProfilerInfo4 * This, /* [in] */ ModuleID moduleID, /* [out] */ ICorProfilerObjectEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *GetArrayObjectInfo )( ICorProfilerInfo4 * This, /* [in] */ ObjectID objectId, /* [in] */ ULONG32 cDimensions, /* [size_is][out] */ ULONG32 pDimensionSizes[ ], /* [size_is][out] */ int pDimensionLowerBounds[ ], /* [out] */ BYTE **ppData); HRESULT ( STDMETHODCALLTYPE *GetBoxClassLayout )( ICorProfilerInfo4 * This, /* [in] */ ClassID classId, /* [out] */ ULONG32 *pBufferOffset); HRESULT ( STDMETHODCALLTYPE *GetThreadAppDomain )( ICorProfilerInfo4 * This, /* [in] */ ThreadID threadId, /* [out] */ AppDomainID *pAppDomainId); HRESULT ( STDMETHODCALLTYPE *GetRVAStaticAddress )( ICorProfilerInfo4 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [out] */ void **ppAddress); HRESULT ( STDMETHODCALLTYPE *GetAppDomainStaticAddress )( ICorProfilerInfo4 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [in] */ AppDomainID appDomainId, /* [out] */ void **ppAddress); HRESULT ( STDMETHODCALLTYPE *GetThreadStaticAddress )( ICorProfilerInfo4 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [in] */ ThreadID threadId, /* [out] */ void **ppAddress); HRESULT ( STDMETHODCALLTYPE *GetContextStaticAddress )( ICorProfilerInfo4 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [in] */ ContextID contextId, /* [out] */ void **ppAddress); HRESULT ( STDMETHODCALLTYPE *GetStaticFieldInfo )( ICorProfilerInfo4 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [out] */ COR_PRF_STATIC_TYPE *pFieldInfo); HRESULT ( STDMETHODCALLTYPE *GetGenerationBounds )( ICorProfilerInfo4 * This, /* [in] */ ULONG cObjectRanges, /* [out] */ ULONG *pcObjectRanges, /* [length_is][size_is][out] */ COR_PRF_GC_GENERATION_RANGE ranges[ ]); HRESULT ( STDMETHODCALLTYPE *GetObjectGeneration )( ICorProfilerInfo4 * This, /* [in] */ ObjectID objectId, /* [out] */ COR_PRF_GC_GENERATION_RANGE *range); HRESULT ( STDMETHODCALLTYPE *GetNotifiedExceptionClauseInfo )( ICorProfilerInfo4 * This, /* [out] */ COR_PRF_EX_CLAUSE_INFO *pinfo); HRESULT ( STDMETHODCALLTYPE *EnumJITedFunctions )( ICorProfilerInfo4 * This, /* [out] */ ICorProfilerFunctionEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *RequestProfilerDetach )( ICorProfilerInfo4 * This, /* [in] */ DWORD dwExpectedCompletionMilliseconds); HRESULT ( STDMETHODCALLTYPE *SetFunctionIDMapper2 )( ICorProfilerInfo4 * This, /* [in] */ FunctionIDMapper2 *pFunc, /* [in] */ void *clientData); HRESULT ( STDMETHODCALLTYPE *GetStringLayout2 )( ICorProfilerInfo4 * This, /* [out] */ ULONG *pStringLengthOffset, /* [out] */ ULONG *pBufferOffset); HRESULT ( STDMETHODCALLTYPE *SetEnterLeaveFunctionHooks3 )( ICorProfilerInfo4 * This, /* [in] */ FunctionEnter3 *pFuncEnter3, /* [in] */ FunctionLeave3 *pFuncLeave3, /* [in] */ FunctionTailcall3 *pFuncTailcall3); HRESULT ( STDMETHODCALLTYPE *SetEnterLeaveFunctionHooks3WithInfo )( ICorProfilerInfo4 * This, /* [in] */ FunctionEnter3WithInfo *pFuncEnter3WithInfo, /* [in] */ FunctionLeave3WithInfo *pFuncLeave3WithInfo, /* [in] */ FunctionTailcall3WithInfo *pFuncTailcall3WithInfo); HRESULT ( STDMETHODCALLTYPE *GetFunctionEnter3Info )( ICorProfilerInfo4 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_ELT_INFO eltInfo, /* [out] */ COR_PRF_FRAME_INFO *pFrameInfo, /* [out][in] */ ULONG *pcbArgumentInfo, /* [size_is][out] */ COR_PRF_FUNCTION_ARGUMENT_INFO *pArgumentInfo); HRESULT ( STDMETHODCALLTYPE *GetFunctionLeave3Info )( ICorProfilerInfo4 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_ELT_INFO eltInfo, /* [out] */ COR_PRF_FRAME_INFO *pFrameInfo, /* [out] */ COR_PRF_FUNCTION_ARGUMENT_RANGE *pRetvalRange); HRESULT ( STDMETHODCALLTYPE *GetFunctionTailcall3Info )( ICorProfilerInfo4 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_ELT_INFO eltInfo, /* [out] */ COR_PRF_FRAME_INFO *pFrameInfo); HRESULT ( STDMETHODCALLTYPE *EnumModules )( ICorProfilerInfo4 * This, /* [out] */ ICorProfilerModuleEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *GetRuntimeInformation )( ICorProfilerInfo4 * This, /* [out] */ USHORT *pClrInstanceId, /* [out] */ COR_PRF_RUNTIME_TYPE *pRuntimeType, /* [out] */ USHORT *pMajorVersion, /* [out] */ USHORT *pMinorVersion, /* [out] */ USHORT *pBuildNumber, /* [out] */ USHORT *pQFEVersion, /* [in] */ ULONG cchVersionString, /* [out] */ ULONG *pcchVersionString, /* [annotation][out] */ _Out_writes_to_(cchVersionString, *pcchVersionString) WCHAR szVersionString[ ]); HRESULT ( STDMETHODCALLTYPE *GetThreadStaticAddress2 )( ICorProfilerInfo4 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [in] */ AppDomainID appDomainId, /* [in] */ ThreadID threadId, /* [out] */ void **ppAddress); HRESULT ( STDMETHODCALLTYPE *GetAppDomainsContainingModule )( ICorProfilerInfo4 * This, /* [in] */ ModuleID moduleId, /* [in] */ ULONG32 cAppDomainIds, /* [out] */ ULONG32 *pcAppDomainIds, /* [length_is][size_is][out] */ AppDomainID appDomainIds[ ]); HRESULT ( STDMETHODCALLTYPE *GetModuleInfo2 )( ICorProfilerInfo4 * This, /* [in] */ ModuleID moduleId, /* [out] */ LPCBYTE *ppBaseLoadAddress, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [annotation][out] */ _Out_writes_to_(cchName, *pcchName) WCHAR szName[ ], /* [out] */ AssemblyID *pAssemblyId, /* [out] */ DWORD *pdwModuleFlags); HRESULT ( STDMETHODCALLTYPE *EnumThreads )( ICorProfilerInfo4 * This, /* [out] */ ICorProfilerThreadEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *InitializeCurrentThread )( ICorProfilerInfo4 * This); HRESULT ( STDMETHODCALLTYPE *RequestReJIT )( ICorProfilerInfo4 * This, /* [in] */ ULONG cFunctions, /* [size_is][in] */ ModuleID moduleIds[ ], /* [size_is][in] */ mdMethodDef methodIds[ ]); HRESULT ( STDMETHODCALLTYPE *RequestRevert )( ICorProfilerInfo4 * This, /* [in] */ ULONG cFunctions, /* [size_is][in] */ ModuleID moduleIds[ ], /* [size_is][in] */ mdMethodDef methodIds[ ], /* [size_is][out] */ HRESULT status[ ]); HRESULT ( STDMETHODCALLTYPE *GetCodeInfo3 )( ICorProfilerInfo4 * This, /* [in] */ FunctionID functionID, /* [in] */ ReJITID reJitId, /* [in] */ ULONG32 cCodeInfos, /* [out] */ ULONG32 *pcCodeInfos, /* [length_is][size_is][out] */ COR_PRF_CODE_INFO codeInfos[ ]); HRESULT ( STDMETHODCALLTYPE *GetFunctionFromIP2 )( ICorProfilerInfo4 * This, /* [in] */ LPCBYTE ip, /* [out] */ FunctionID *pFunctionId, /* [out] */ ReJITID *pReJitId); HRESULT ( STDMETHODCALLTYPE *GetReJITIDs )( ICorProfilerInfo4 * This, /* [in] */ FunctionID functionId, /* [in] */ ULONG cReJitIds, /* [out] */ ULONG *pcReJitIds, /* [length_is][size_is][out] */ ReJITID reJitIds[ ]); HRESULT ( STDMETHODCALLTYPE *GetILToNativeMapping2 )( ICorProfilerInfo4 * This, /* [in] */ FunctionID functionId, /* [in] */ ReJITID reJitId, /* [in] */ ULONG32 cMap, /* [out] */ ULONG32 *pcMap, /* [length_is][size_is][out] */ COR_DEBUG_IL_TO_NATIVE_MAP map[ ]); HRESULT ( STDMETHODCALLTYPE *EnumJITedFunctions2 )( ICorProfilerInfo4 * This, /* [out] */ ICorProfilerFunctionEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *GetObjectSize2 )( ICorProfilerInfo4 * This, /* [in] */ ObjectID objectId, /* [out] */ SIZE_T *pcSize); END_INTERFACE } ICorProfilerInfo4Vtbl; interface ICorProfilerInfo4 { CONST_VTBL struct ICorProfilerInfo4Vtbl *lpVtbl; }; #ifdef COBJMACROS #define ICorProfilerInfo4_QueryInterface(This,riid,ppvObject) \ ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) ) #define ICorProfilerInfo4_AddRef(This) \ ( (This)->lpVtbl -> AddRef(This) ) #define ICorProfilerInfo4_Release(This) \ ( (This)->lpVtbl -> Release(This) ) #define ICorProfilerInfo4_GetClassFromObject(This,objectId,pClassId) \ ( (This)->lpVtbl -> GetClassFromObject(This,objectId,pClassId) ) #define ICorProfilerInfo4_GetClassFromToken(This,moduleId,typeDef,pClassId) \ ( (This)->lpVtbl -> GetClassFromToken(This,moduleId,typeDef,pClassId) ) #define ICorProfilerInfo4_GetCodeInfo(This,functionId,pStart,pcSize) \ ( (This)->lpVtbl -> GetCodeInfo(This,functionId,pStart,pcSize) ) #define ICorProfilerInfo4_GetEventMask(This,pdwEvents) \ ( (This)->lpVtbl -> GetEventMask(This,pdwEvents) ) #define ICorProfilerInfo4_GetFunctionFromIP(This,ip,pFunctionId) \ ( (This)->lpVtbl -> GetFunctionFromIP(This,ip,pFunctionId) ) #define ICorProfilerInfo4_GetFunctionFromToken(This,moduleId,token,pFunctionId) \ ( (This)->lpVtbl -> GetFunctionFromToken(This,moduleId,token,pFunctionId) ) #define ICorProfilerInfo4_GetHandleFromThread(This,threadId,phThread) \ ( (This)->lpVtbl -> GetHandleFromThread(This,threadId,phThread) ) #define ICorProfilerInfo4_GetObjectSize(This,objectId,pcSize) \ ( (This)->lpVtbl -> GetObjectSize(This,objectId,pcSize) ) #define ICorProfilerInfo4_IsArrayClass(This,classId,pBaseElemType,pBaseClassId,pcRank) \ ( (This)->lpVtbl -> IsArrayClass(This,classId,pBaseElemType,pBaseClassId,pcRank) ) #define ICorProfilerInfo4_GetThreadInfo(This,threadId,pdwWin32ThreadId) \ ( (This)->lpVtbl -> GetThreadInfo(This,threadId,pdwWin32ThreadId) ) #define ICorProfilerInfo4_GetCurrentThreadID(This,pThreadId) \ ( (This)->lpVtbl -> GetCurrentThreadID(This,pThreadId) ) #define ICorProfilerInfo4_GetClassIDInfo(This,classId,pModuleId,pTypeDefToken) \ ( (This)->lpVtbl -> GetClassIDInfo(This,classId,pModuleId,pTypeDefToken) ) #define ICorProfilerInfo4_GetFunctionInfo(This,functionId,pClassId,pModuleId,pToken) \ ( (This)->lpVtbl -> GetFunctionInfo(This,functionId,pClassId,pModuleId,pToken) ) #define ICorProfilerInfo4_SetEventMask(This,dwEvents) \ ( (This)->lpVtbl -> SetEventMask(This,dwEvents) ) #define ICorProfilerInfo4_SetEnterLeaveFunctionHooks(This,pFuncEnter,pFuncLeave,pFuncTailcall) \ ( (This)->lpVtbl -> SetEnterLeaveFunctionHooks(This,pFuncEnter,pFuncLeave,pFuncTailcall) ) #define ICorProfilerInfo4_SetFunctionIDMapper(This,pFunc) \ ( (This)->lpVtbl -> SetFunctionIDMapper(This,pFunc) ) #define ICorProfilerInfo4_GetTokenAndMetaDataFromFunction(This,functionId,riid,ppImport,pToken) \ ( (This)->lpVtbl -> GetTokenAndMetaDataFromFunction(This,functionId,riid,ppImport,pToken) ) #define ICorProfilerInfo4_GetModuleInfo(This,moduleId,ppBaseLoadAddress,cchName,pcchName,szName,pAssemblyId) \ ( (This)->lpVtbl -> GetModuleInfo(This,moduleId,ppBaseLoadAddress,cchName,pcchName,szName,pAssemblyId) ) #define ICorProfilerInfo4_GetModuleMetaData(This,moduleId,dwOpenFlags,riid,ppOut) \ ( (This)->lpVtbl -> GetModuleMetaData(This,moduleId,dwOpenFlags,riid,ppOut) ) #define ICorProfilerInfo4_GetILFunctionBody(This,moduleId,methodId,ppMethodHeader,pcbMethodSize) \ ( (This)->lpVtbl -> GetILFunctionBody(This,moduleId,methodId,ppMethodHeader,pcbMethodSize) ) #define ICorProfilerInfo4_GetILFunctionBodyAllocator(This,moduleId,ppMalloc) \ ( (This)->lpVtbl -> GetILFunctionBodyAllocator(This,moduleId,ppMalloc) ) #define ICorProfilerInfo4_SetILFunctionBody(This,moduleId,methodid,pbNewILMethodHeader) \ ( (This)->lpVtbl -> SetILFunctionBody(This,moduleId,methodid,pbNewILMethodHeader) ) #define ICorProfilerInfo4_GetAppDomainInfo(This,appDomainId,cchName,pcchName,szName,pProcessId) \ ( (This)->lpVtbl -> GetAppDomainInfo(This,appDomainId,cchName,pcchName,szName,pProcessId) ) #define ICorProfilerInfo4_GetAssemblyInfo(This,assemblyId,cchName,pcchName,szName,pAppDomainId,pModuleId) \ ( (This)->lpVtbl -> GetAssemblyInfo(This,assemblyId,cchName,pcchName,szName,pAppDomainId,pModuleId) ) #define ICorProfilerInfo4_SetFunctionReJIT(This,functionId) \ ( (This)->lpVtbl -> SetFunctionReJIT(This,functionId) ) #define ICorProfilerInfo4_ForceGC(This) \ ( (This)->lpVtbl -> ForceGC(This) ) #define ICorProfilerInfo4_SetILInstrumentedCodeMap(This,functionId,fStartJit,cILMapEntries,rgILMapEntries) \ ( (This)->lpVtbl -> SetILInstrumentedCodeMap(This,functionId,fStartJit,cILMapEntries,rgILMapEntries) ) #define ICorProfilerInfo4_GetInprocInspectionInterface(This,ppicd) \ ( (This)->lpVtbl -> GetInprocInspectionInterface(This,ppicd) ) #define ICorProfilerInfo4_GetInprocInspectionIThisThread(This,ppicd) \ ( (This)->lpVtbl -> GetInprocInspectionIThisThread(This,ppicd) ) #define ICorProfilerInfo4_GetThreadContext(This,threadId,pContextId) \ ( (This)->lpVtbl -> GetThreadContext(This,threadId,pContextId) ) #define ICorProfilerInfo4_BeginInprocDebugging(This,fThisThreadOnly,pdwProfilerContext) \ ( (This)->lpVtbl -> BeginInprocDebugging(This,fThisThreadOnly,pdwProfilerContext) ) #define ICorProfilerInfo4_EndInprocDebugging(This,dwProfilerContext) \ ( (This)->lpVtbl -> EndInprocDebugging(This,dwProfilerContext) ) #define ICorProfilerInfo4_GetILToNativeMapping(This,functionId,cMap,pcMap,map) \ ( (This)->lpVtbl -> GetILToNativeMapping(This,functionId,cMap,pcMap,map) ) #define ICorProfilerInfo4_DoStackSnapshot(This,thread,callback,infoFlags,clientData,context,contextSize) \ ( (This)->lpVtbl -> DoStackSnapshot(This,thread,callback,infoFlags,clientData,context,contextSize) ) #define ICorProfilerInfo4_SetEnterLeaveFunctionHooks2(This,pFuncEnter,pFuncLeave,pFuncTailcall) \ ( (This)->lpVtbl -> SetEnterLeaveFunctionHooks2(This,pFuncEnter,pFuncLeave,pFuncTailcall) ) #define ICorProfilerInfo4_GetFunctionInfo2(This,funcId,frameInfo,pClassId,pModuleId,pToken,cTypeArgs,pcTypeArgs,typeArgs) \ ( (This)->lpVtbl -> GetFunctionInfo2(This,funcId,frameInfo,pClassId,pModuleId,pToken,cTypeArgs,pcTypeArgs,typeArgs) ) #define ICorProfilerInfo4_GetStringLayout(This,pBufferLengthOffset,pStringLengthOffset,pBufferOffset) \ ( (This)->lpVtbl -> GetStringLayout(This,pBufferLengthOffset,pStringLengthOffset,pBufferOffset) ) #define ICorProfilerInfo4_GetClassLayout(This,classID,rFieldOffset,cFieldOffset,pcFieldOffset,pulClassSize) \ ( (This)->lpVtbl -> GetClassLayout(This,classID,rFieldOffset,cFieldOffset,pcFieldOffset,pulClassSize) ) #define ICorProfilerInfo4_GetClassIDInfo2(This,classId,pModuleId,pTypeDefToken,pParentClassId,cNumTypeArgs,pcNumTypeArgs,typeArgs) \ ( (This)->lpVtbl -> GetClassIDInfo2(This,classId,pModuleId,pTypeDefToken,pParentClassId,cNumTypeArgs,pcNumTypeArgs,typeArgs) ) #define ICorProfilerInfo4_GetCodeInfo2(This,functionID,cCodeInfos,pcCodeInfos,codeInfos) \ ( (This)->lpVtbl -> GetCodeInfo2(This,functionID,cCodeInfos,pcCodeInfos,codeInfos) ) #define ICorProfilerInfo4_GetClassFromTokenAndTypeArgs(This,moduleID,typeDef,cTypeArgs,typeArgs,pClassID) \ ( (This)->lpVtbl -> GetClassFromTokenAndTypeArgs(This,moduleID,typeDef,cTypeArgs,typeArgs,pClassID) ) #define ICorProfilerInfo4_GetFunctionFromTokenAndTypeArgs(This,moduleID,funcDef,classId,cTypeArgs,typeArgs,pFunctionID) \ ( (This)->lpVtbl -> GetFunctionFromTokenAndTypeArgs(This,moduleID,funcDef,classId,cTypeArgs,typeArgs,pFunctionID) ) #define ICorProfilerInfo4_EnumModuleFrozenObjects(This,moduleID,ppEnum) \ ( (This)->lpVtbl -> EnumModuleFrozenObjects(This,moduleID,ppEnum) ) #define ICorProfilerInfo4_GetArrayObjectInfo(This,objectId,cDimensions,pDimensionSizes,pDimensionLowerBounds,ppData) \ ( (This)->lpVtbl -> GetArrayObjectInfo(This,objectId,cDimensions,pDimensionSizes,pDimensionLowerBounds,ppData) ) #define ICorProfilerInfo4_GetBoxClassLayout(This,classId,pBufferOffset) \ ( (This)->lpVtbl -> GetBoxClassLayout(This,classId,pBufferOffset) ) #define ICorProfilerInfo4_GetThreadAppDomain(This,threadId,pAppDomainId) \ ( (This)->lpVtbl -> GetThreadAppDomain(This,threadId,pAppDomainId) ) #define ICorProfilerInfo4_GetRVAStaticAddress(This,classId,fieldToken,ppAddress) \ ( (This)->lpVtbl -> GetRVAStaticAddress(This,classId,fieldToken,ppAddress) ) #define ICorProfilerInfo4_GetAppDomainStaticAddress(This,classId,fieldToken,appDomainId,ppAddress) \ ( (This)->lpVtbl -> GetAppDomainStaticAddress(This,classId,fieldToken,appDomainId,ppAddress) ) #define ICorProfilerInfo4_GetThreadStaticAddress(This,classId,fieldToken,threadId,ppAddress) \ ( (This)->lpVtbl -> GetThreadStaticAddress(This,classId,fieldToken,threadId,ppAddress) ) #define ICorProfilerInfo4_GetContextStaticAddress(This,classId,fieldToken,contextId,ppAddress) \ ( (This)->lpVtbl -> GetContextStaticAddress(This,classId,fieldToken,contextId,ppAddress) ) #define ICorProfilerInfo4_GetStaticFieldInfo(This,classId,fieldToken,pFieldInfo) \ ( (This)->lpVtbl -> GetStaticFieldInfo(This,classId,fieldToken,pFieldInfo) ) #define ICorProfilerInfo4_GetGenerationBounds(This,cObjectRanges,pcObjectRanges,ranges) \ ( (This)->lpVtbl -> GetGenerationBounds(This,cObjectRanges,pcObjectRanges,ranges) ) #define ICorProfilerInfo4_GetObjectGeneration(This,objectId,range) \ ( (This)->lpVtbl -> GetObjectGeneration(This,objectId,range) ) #define ICorProfilerInfo4_GetNotifiedExceptionClauseInfo(This,pinfo) \ ( (This)->lpVtbl -> GetNotifiedExceptionClauseInfo(This,pinfo) ) #define ICorProfilerInfo4_EnumJITedFunctions(This,ppEnum) \ ( (This)->lpVtbl -> EnumJITedFunctions(This,ppEnum) ) #define ICorProfilerInfo4_RequestProfilerDetach(This,dwExpectedCompletionMilliseconds) \ ( (This)->lpVtbl -> RequestProfilerDetach(This,dwExpectedCompletionMilliseconds) ) #define ICorProfilerInfo4_SetFunctionIDMapper2(This,pFunc,clientData) \ ( (This)->lpVtbl -> SetFunctionIDMapper2(This,pFunc,clientData) ) #define ICorProfilerInfo4_GetStringLayout2(This,pStringLengthOffset,pBufferOffset) \ ( (This)->lpVtbl -> GetStringLayout2(This,pStringLengthOffset,pBufferOffset) ) #define ICorProfilerInfo4_SetEnterLeaveFunctionHooks3(This,pFuncEnter3,pFuncLeave3,pFuncTailcall3) \ ( (This)->lpVtbl -> SetEnterLeaveFunctionHooks3(This,pFuncEnter3,pFuncLeave3,pFuncTailcall3) ) #define ICorProfilerInfo4_SetEnterLeaveFunctionHooks3WithInfo(This,pFuncEnter3WithInfo,pFuncLeave3WithInfo,pFuncTailcall3WithInfo) \ ( (This)->lpVtbl -> SetEnterLeaveFunctionHooks3WithInfo(This,pFuncEnter3WithInfo,pFuncLeave3WithInfo,pFuncTailcall3WithInfo) ) #define ICorProfilerInfo4_GetFunctionEnter3Info(This,functionId,eltInfo,pFrameInfo,pcbArgumentInfo,pArgumentInfo) \ ( (This)->lpVtbl -> GetFunctionEnter3Info(This,functionId,eltInfo,pFrameInfo,pcbArgumentInfo,pArgumentInfo) ) #define ICorProfilerInfo4_GetFunctionLeave3Info(This,functionId,eltInfo,pFrameInfo,pRetvalRange) \ ( (This)->lpVtbl -> GetFunctionLeave3Info(This,functionId,eltInfo,pFrameInfo,pRetvalRange) ) #define ICorProfilerInfo4_GetFunctionTailcall3Info(This,functionId,eltInfo,pFrameInfo) \ ( (This)->lpVtbl -> GetFunctionTailcall3Info(This,functionId,eltInfo,pFrameInfo) ) #define ICorProfilerInfo4_EnumModules(This,ppEnum) \ ( (This)->lpVtbl -> EnumModules(This,ppEnum) ) #define ICorProfilerInfo4_GetRuntimeInformation(This,pClrInstanceId,pRuntimeType,pMajorVersion,pMinorVersion,pBuildNumber,pQFEVersion,cchVersionString,pcchVersionString,szVersionString) \ ( (This)->lpVtbl -> GetRuntimeInformation(This,pClrInstanceId,pRuntimeType,pMajorVersion,pMinorVersion,pBuildNumber,pQFEVersion,cchVersionString,pcchVersionString,szVersionString) ) #define ICorProfilerInfo4_GetThreadStaticAddress2(This,classId,fieldToken,appDomainId,threadId,ppAddress) \ ( (This)->lpVtbl -> GetThreadStaticAddress2(This,classId,fieldToken,appDomainId,threadId,ppAddress) ) #define ICorProfilerInfo4_GetAppDomainsContainingModule(This,moduleId,cAppDomainIds,pcAppDomainIds,appDomainIds) \ ( (This)->lpVtbl -> GetAppDomainsContainingModule(This,moduleId,cAppDomainIds,pcAppDomainIds,appDomainIds) ) #define ICorProfilerInfo4_GetModuleInfo2(This,moduleId,ppBaseLoadAddress,cchName,pcchName,szName,pAssemblyId,pdwModuleFlags) \ ( (This)->lpVtbl -> GetModuleInfo2(This,moduleId,ppBaseLoadAddress,cchName,pcchName,szName,pAssemblyId,pdwModuleFlags) ) #define ICorProfilerInfo4_EnumThreads(This,ppEnum) \ ( (This)->lpVtbl -> EnumThreads(This,ppEnum) ) #define ICorProfilerInfo4_InitializeCurrentThread(This) \ ( (This)->lpVtbl -> InitializeCurrentThread(This) ) #define ICorProfilerInfo4_RequestReJIT(This,cFunctions,moduleIds,methodIds) \ ( (This)->lpVtbl -> RequestReJIT(This,cFunctions,moduleIds,methodIds) ) #define ICorProfilerInfo4_RequestRevert(This,cFunctions,moduleIds,methodIds,status) \ ( (This)->lpVtbl -> RequestRevert(This,cFunctions,moduleIds,methodIds,status) ) #define ICorProfilerInfo4_GetCodeInfo3(This,functionID,reJitId,cCodeInfos,pcCodeInfos,codeInfos) \ ( (This)->lpVtbl -> GetCodeInfo3(This,functionID,reJitId,cCodeInfos,pcCodeInfos,codeInfos) ) #define ICorProfilerInfo4_GetFunctionFromIP2(This,ip,pFunctionId,pReJitId) \ ( (This)->lpVtbl -> GetFunctionFromIP2(This,ip,pFunctionId,pReJitId) ) #define ICorProfilerInfo4_GetReJITIDs(This,functionId,cReJitIds,pcReJitIds,reJitIds) \ ( (This)->lpVtbl -> GetReJITIDs(This,functionId,cReJitIds,pcReJitIds,reJitIds) ) #define ICorProfilerInfo4_GetILToNativeMapping2(This,functionId,reJitId,cMap,pcMap,map) \ ( (This)->lpVtbl -> GetILToNativeMapping2(This,functionId,reJitId,cMap,pcMap,map) ) #define ICorProfilerInfo4_EnumJITedFunctions2(This,ppEnum) \ ( (This)->lpVtbl -> EnumJITedFunctions2(This,ppEnum) ) #define ICorProfilerInfo4_GetObjectSize2(This,objectId,pcSize) \ ( (This)->lpVtbl -> GetObjectSize2(This,objectId,pcSize) ) #endif /* COBJMACROS */ #endif /* C style interface */ #endif /* __ICorProfilerInfo4_INTERFACE_DEFINED__ */ #ifndef __ICorProfilerInfo5_INTERFACE_DEFINED__ #define __ICorProfilerInfo5_INTERFACE_DEFINED__ /* interface ICorProfilerInfo5 */ /* [local][unique][uuid][object] */ EXTERN_C const IID IID_ICorProfilerInfo5; #if defined(__cplusplus) && !defined(CINTERFACE) MIDL_INTERFACE("07602928-CE38-4B83-81E7-74ADAF781214") ICorProfilerInfo5 : public ICorProfilerInfo4 { public: virtual HRESULT STDMETHODCALLTYPE GetEventMask2( /* [out] */ DWORD *pdwEventsLow, /* [out] */ DWORD *pdwEventsHigh) = 0; virtual HRESULT STDMETHODCALLTYPE SetEventMask2( /* [in] */ DWORD dwEventsLow, /* [in] */ DWORD dwEventsHigh) = 0; }; #else /* C style interface */ typedef struct ICorProfilerInfo5Vtbl { BEGIN_INTERFACE HRESULT ( STDMETHODCALLTYPE *QueryInterface )( ICorProfilerInfo5 * This, /* [in] */ REFIID riid, /* [annotation][iid_is][out] */ _COM_Outptr_ void **ppvObject); ULONG ( STDMETHODCALLTYPE *AddRef )( ICorProfilerInfo5 * This); ULONG ( STDMETHODCALLTYPE *Release )( ICorProfilerInfo5 * This); HRESULT ( STDMETHODCALLTYPE *GetClassFromObject )( ICorProfilerInfo5 * This, /* [in] */ ObjectID objectId, /* [out] */ ClassID *pClassId); HRESULT ( STDMETHODCALLTYPE *GetClassFromToken )( ICorProfilerInfo5 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdTypeDef typeDef, /* [out] */ ClassID *pClassId); HRESULT ( STDMETHODCALLTYPE *GetCodeInfo )( ICorProfilerInfo5 * This, /* [in] */ FunctionID functionId, /* [out] */ LPCBYTE *pStart, /* [out] */ ULONG *pcSize); HRESULT ( STDMETHODCALLTYPE *GetEventMask )( ICorProfilerInfo5 * This, /* [out] */ DWORD *pdwEvents); HRESULT ( STDMETHODCALLTYPE *GetFunctionFromIP )( ICorProfilerInfo5 * This, /* [in] */ LPCBYTE ip, /* [out] */ FunctionID *pFunctionId); HRESULT ( STDMETHODCALLTYPE *GetFunctionFromToken )( ICorProfilerInfo5 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdToken token, /* [out] */ FunctionID *pFunctionId); HRESULT ( STDMETHODCALLTYPE *GetHandleFromThread )( ICorProfilerInfo5 * This, /* [in] */ ThreadID threadId, /* [out] */ HANDLE *phThread); HRESULT ( STDMETHODCALLTYPE *GetObjectSize )( ICorProfilerInfo5 * This, /* [in] */ ObjectID objectId, /* [out] */ ULONG *pcSize); HRESULT ( STDMETHODCALLTYPE *IsArrayClass )( ICorProfilerInfo5 * This, /* [in] */ ClassID classId, /* [out] */ CorElementType *pBaseElemType, /* [out] */ ClassID *pBaseClassId, /* [out] */ ULONG *pcRank); HRESULT ( STDMETHODCALLTYPE *GetThreadInfo )( ICorProfilerInfo5 * This, /* [in] */ ThreadID threadId, /* [out] */ DWORD *pdwWin32ThreadId); HRESULT ( STDMETHODCALLTYPE *GetCurrentThreadID )( ICorProfilerInfo5 * This, /* [out] */ ThreadID *pThreadId); HRESULT ( STDMETHODCALLTYPE *GetClassIDInfo )( ICorProfilerInfo5 * This, /* [in] */ ClassID classId, /* [out] */ ModuleID *pModuleId, /* [out] */ mdTypeDef *pTypeDefToken); HRESULT ( STDMETHODCALLTYPE *GetFunctionInfo )( ICorProfilerInfo5 * This, /* [in] */ FunctionID functionId, /* [out] */ ClassID *pClassId, /* [out] */ ModuleID *pModuleId, /* [out] */ mdToken *pToken); HRESULT ( STDMETHODCALLTYPE *SetEventMask )( ICorProfilerInfo5 * This, /* [in] */ DWORD dwEvents); HRESULT ( STDMETHODCALLTYPE *SetEnterLeaveFunctionHooks )( ICorProfilerInfo5 * This, /* [in] */ FunctionEnter *pFuncEnter, /* [in] */ FunctionLeave *pFuncLeave, /* [in] */ FunctionTailcall *pFuncTailcall); HRESULT ( STDMETHODCALLTYPE *SetFunctionIDMapper )( ICorProfilerInfo5 * This, /* [in] */ FunctionIDMapper *pFunc); HRESULT ( STDMETHODCALLTYPE *GetTokenAndMetaDataFromFunction )( ICorProfilerInfo5 * This, /* [in] */ FunctionID functionId, /* [in] */ REFIID riid, /* [out] */ IUnknown **ppImport, /* [out] */ mdToken *pToken); HRESULT ( STDMETHODCALLTYPE *GetModuleInfo )( ICorProfilerInfo5 * This, /* [in] */ ModuleID moduleId, /* [out] */ LPCBYTE *ppBaseLoadAddress, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [annotation][out] */ _Out_writes_to_(cchName, *pcchName) WCHAR szName[ ], /* [out] */ AssemblyID *pAssemblyId); HRESULT ( STDMETHODCALLTYPE *GetModuleMetaData )( ICorProfilerInfo5 * This, /* [in] */ ModuleID moduleId, /* [in] */ DWORD dwOpenFlags, /* [in] */ REFIID riid, /* [out] */ IUnknown **ppOut); HRESULT ( STDMETHODCALLTYPE *GetILFunctionBody )( ICorProfilerInfo5 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdMethodDef methodId, /* [out] */ LPCBYTE *ppMethodHeader, /* [out] */ ULONG *pcbMethodSize); HRESULT ( STDMETHODCALLTYPE *GetILFunctionBodyAllocator )( ICorProfilerInfo5 * This, /* [in] */ ModuleID moduleId, /* [out] */ IMethodMalloc **ppMalloc); HRESULT ( STDMETHODCALLTYPE *SetILFunctionBody )( ICorProfilerInfo5 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdMethodDef methodid, /* [in] */ LPCBYTE pbNewILMethodHeader); HRESULT ( STDMETHODCALLTYPE *GetAppDomainInfo )( ICorProfilerInfo5 * This, /* [in] */ AppDomainID appDomainId, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [annotation][out] */ _Out_writes_to_(cchName, *pcchName) WCHAR szName[ ], /* [out] */ ProcessID *pProcessId); HRESULT ( STDMETHODCALLTYPE *GetAssemblyInfo )( ICorProfilerInfo5 * This, /* [in] */ AssemblyID assemblyId, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [annotation][out] */ _Out_writes_to_(cchName, *pcchName) WCHAR szName[ ], /* [out] */ AppDomainID *pAppDomainId, /* [out] */ ModuleID *pModuleId); HRESULT ( STDMETHODCALLTYPE *SetFunctionReJIT )( ICorProfilerInfo5 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ForceGC )( ICorProfilerInfo5 * This); HRESULT ( STDMETHODCALLTYPE *SetILInstrumentedCodeMap )( ICorProfilerInfo5 * This, /* [in] */ FunctionID functionId, /* [in] */ BOOL fStartJit, /* [in] */ ULONG cILMapEntries, /* [size_is][in] */ COR_IL_MAP rgILMapEntries[ ]); HRESULT ( STDMETHODCALLTYPE *GetInprocInspectionInterface )( ICorProfilerInfo5 * This, /* [out] */ IUnknown **ppicd); HRESULT ( STDMETHODCALLTYPE *GetInprocInspectionIThisThread )( ICorProfilerInfo5 * This, /* [out] */ IUnknown **ppicd); HRESULT ( STDMETHODCALLTYPE *GetThreadContext )( ICorProfilerInfo5 * This, /* [in] */ ThreadID threadId, /* [out] */ ContextID *pContextId); HRESULT ( STDMETHODCALLTYPE *BeginInprocDebugging )( ICorProfilerInfo5 * This, /* [in] */ BOOL fThisThreadOnly, /* [out] */ DWORD *pdwProfilerContext); HRESULT ( STDMETHODCALLTYPE *EndInprocDebugging )( ICorProfilerInfo5 * This, /* [in] */ DWORD dwProfilerContext); HRESULT ( STDMETHODCALLTYPE *GetILToNativeMapping )( ICorProfilerInfo5 * This, /* [in] */ FunctionID functionId, /* [in] */ ULONG32 cMap, /* [out] */ ULONG32 *pcMap, /* [length_is][size_is][out] */ COR_DEBUG_IL_TO_NATIVE_MAP map[ ]); HRESULT ( STDMETHODCALLTYPE *DoStackSnapshot )( ICorProfilerInfo5 * This, /* [in] */ ThreadID thread, /* [in] */ StackSnapshotCallback *callback, /* [in] */ ULONG32 infoFlags, /* [in] */ void *clientData, /* [size_is][in] */ BYTE context[ ], /* [in] */ ULONG32 contextSize); HRESULT ( STDMETHODCALLTYPE *SetEnterLeaveFunctionHooks2 )( ICorProfilerInfo5 * This, /* [in] */ FunctionEnter2 *pFuncEnter, /* [in] */ FunctionLeave2 *pFuncLeave, /* [in] */ FunctionTailcall2 *pFuncTailcall); HRESULT ( STDMETHODCALLTYPE *GetFunctionInfo2 )( ICorProfilerInfo5 * This, /* [in] */ FunctionID funcId, /* [in] */ COR_PRF_FRAME_INFO frameInfo, /* [out] */ ClassID *pClassId, /* [out] */ ModuleID *pModuleId, /* [out] */ mdToken *pToken, /* [in] */ ULONG32 cTypeArgs, /* [out] */ ULONG32 *pcTypeArgs, /* [out] */ ClassID typeArgs[ ]); HRESULT ( STDMETHODCALLTYPE *GetStringLayout )( ICorProfilerInfo5 * This, /* [out] */ ULONG *pBufferLengthOffset, /* [out] */ ULONG *pStringLengthOffset, /* [out] */ ULONG *pBufferOffset); HRESULT ( STDMETHODCALLTYPE *GetClassLayout )( ICorProfilerInfo5 * This, /* [in] */ ClassID classID, /* [out][in] */ COR_FIELD_OFFSET rFieldOffset[ ], /* [in] */ ULONG cFieldOffset, /* [out] */ ULONG *pcFieldOffset, /* [out] */ ULONG *pulClassSize); HRESULT ( STDMETHODCALLTYPE *GetClassIDInfo2 )( ICorProfilerInfo5 * This, /* [in] */ ClassID classId, /* [out] */ ModuleID *pModuleId, /* [out] */ mdTypeDef *pTypeDefToken, /* [out] */ ClassID *pParentClassId, /* [in] */ ULONG32 cNumTypeArgs, /* [out] */ ULONG32 *pcNumTypeArgs, /* [out] */ ClassID typeArgs[ ]); HRESULT ( STDMETHODCALLTYPE *GetCodeInfo2 )( ICorProfilerInfo5 * This, /* [in] */ FunctionID functionID, /* [in] */ ULONG32 cCodeInfos, /* [out] */ ULONG32 *pcCodeInfos, /* [length_is][size_is][out] */ COR_PRF_CODE_INFO codeInfos[ ]); HRESULT ( STDMETHODCALLTYPE *GetClassFromTokenAndTypeArgs )( ICorProfilerInfo5 * This, /* [in] */ ModuleID moduleID, /* [in] */ mdTypeDef typeDef, /* [in] */ ULONG32 cTypeArgs, /* [size_is][in] */ ClassID typeArgs[ ], /* [out] */ ClassID *pClassID); HRESULT ( STDMETHODCALLTYPE *GetFunctionFromTokenAndTypeArgs )( ICorProfilerInfo5 * This, /* [in] */ ModuleID moduleID, /* [in] */ mdMethodDef funcDef, /* [in] */ ClassID classId, /* [in] */ ULONG32 cTypeArgs, /* [size_is][in] */ ClassID typeArgs[ ], /* [out] */ FunctionID *pFunctionID); HRESULT ( STDMETHODCALLTYPE *EnumModuleFrozenObjects )( ICorProfilerInfo5 * This, /* [in] */ ModuleID moduleID, /* [out] */ ICorProfilerObjectEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *GetArrayObjectInfo )( ICorProfilerInfo5 * This, /* [in] */ ObjectID objectId, /* [in] */ ULONG32 cDimensions, /* [size_is][out] */ ULONG32 pDimensionSizes[ ], /* [size_is][out] */ int pDimensionLowerBounds[ ], /* [out] */ BYTE **ppData); HRESULT ( STDMETHODCALLTYPE *GetBoxClassLayout )( ICorProfilerInfo5 * This, /* [in] */ ClassID classId, /* [out] */ ULONG32 *pBufferOffset); HRESULT ( STDMETHODCALLTYPE *GetThreadAppDomain )( ICorProfilerInfo5 * This, /* [in] */ ThreadID threadId, /* [out] */ AppDomainID *pAppDomainId); HRESULT ( STDMETHODCALLTYPE *GetRVAStaticAddress )( ICorProfilerInfo5 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [out] */ void **ppAddress); HRESULT ( STDMETHODCALLTYPE *GetAppDomainStaticAddress )( ICorProfilerInfo5 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [in] */ AppDomainID appDomainId, /* [out] */ void **ppAddress); HRESULT ( STDMETHODCALLTYPE *GetThreadStaticAddress )( ICorProfilerInfo5 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [in] */ ThreadID threadId, /* [out] */ void **ppAddress); HRESULT ( STDMETHODCALLTYPE *GetContextStaticAddress )( ICorProfilerInfo5 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [in] */ ContextID contextId, /* [out] */ void **ppAddress); HRESULT ( STDMETHODCALLTYPE *GetStaticFieldInfo )( ICorProfilerInfo5 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [out] */ COR_PRF_STATIC_TYPE *pFieldInfo); HRESULT ( STDMETHODCALLTYPE *GetGenerationBounds )( ICorProfilerInfo5 * This, /* [in] */ ULONG cObjectRanges, /* [out] */ ULONG *pcObjectRanges, /* [length_is][size_is][out] */ COR_PRF_GC_GENERATION_RANGE ranges[ ]); HRESULT ( STDMETHODCALLTYPE *GetObjectGeneration )( ICorProfilerInfo5 * This, /* [in] */ ObjectID objectId, /* [out] */ COR_PRF_GC_GENERATION_RANGE *range); HRESULT ( STDMETHODCALLTYPE *GetNotifiedExceptionClauseInfo )( ICorProfilerInfo5 * This, /* [out] */ COR_PRF_EX_CLAUSE_INFO *pinfo); HRESULT ( STDMETHODCALLTYPE *EnumJITedFunctions )( ICorProfilerInfo5 * This, /* [out] */ ICorProfilerFunctionEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *RequestProfilerDetach )( ICorProfilerInfo5 * This, /* [in] */ DWORD dwExpectedCompletionMilliseconds); HRESULT ( STDMETHODCALLTYPE *SetFunctionIDMapper2 )( ICorProfilerInfo5 * This, /* [in] */ FunctionIDMapper2 *pFunc, /* [in] */ void *clientData); HRESULT ( STDMETHODCALLTYPE *GetStringLayout2 )( ICorProfilerInfo5 * This, /* [out] */ ULONG *pStringLengthOffset, /* [out] */ ULONG *pBufferOffset); HRESULT ( STDMETHODCALLTYPE *SetEnterLeaveFunctionHooks3 )( ICorProfilerInfo5 * This, /* [in] */ FunctionEnter3 *pFuncEnter3, /* [in] */ FunctionLeave3 *pFuncLeave3, /* [in] */ FunctionTailcall3 *pFuncTailcall3); HRESULT ( STDMETHODCALLTYPE *SetEnterLeaveFunctionHooks3WithInfo )( ICorProfilerInfo5 * This, /* [in] */ FunctionEnter3WithInfo *pFuncEnter3WithInfo, /* [in] */ FunctionLeave3WithInfo *pFuncLeave3WithInfo, /* [in] */ FunctionTailcall3WithInfo *pFuncTailcall3WithInfo); HRESULT ( STDMETHODCALLTYPE *GetFunctionEnter3Info )( ICorProfilerInfo5 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_ELT_INFO eltInfo, /* [out] */ COR_PRF_FRAME_INFO *pFrameInfo, /* [out][in] */ ULONG *pcbArgumentInfo, /* [size_is][out] */ COR_PRF_FUNCTION_ARGUMENT_INFO *pArgumentInfo); HRESULT ( STDMETHODCALLTYPE *GetFunctionLeave3Info )( ICorProfilerInfo5 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_ELT_INFO eltInfo, /* [out] */ COR_PRF_FRAME_INFO *pFrameInfo, /* [out] */ COR_PRF_FUNCTION_ARGUMENT_RANGE *pRetvalRange); HRESULT ( STDMETHODCALLTYPE *GetFunctionTailcall3Info )( ICorProfilerInfo5 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_ELT_INFO eltInfo, /* [out] */ COR_PRF_FRAME_INFO *pFrameInfo); HRESULT ( STDMETHODCALLTYPE *EnumModules )( ICorProfilerInfo5 * This, /* [out] */ ICorProfilerModuleEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *GetRuntimeInformation )( ICorProfilerInfo5 * This, /* [out] */ USHORT *pClrInstanceId, /* [out] */ COR_PRF_RUNTIME_TYPE *pRuntimeType, /* [out] */ USHORT *pMajorVersion, /* [out] */ USHORT *pMinorVersion, /* [out] */ USHORT *pBuildNumber, /* [out] */ USHORT *pQFEVersion, /* [in] */ ULONG cchVersionString, /* [out] */ ULONG *pcchVersionString, /* [annotation][out] */ _Out_writes_to_(cchVersionString, *pcchVersionString) WCHAR szVersionString[ ]); HRESULT ( STDMETHODCALLTYPE *GetThreadStaticAddress2 )( ICorProfilerInfo5 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [in] */ AppDomainID appDomainId, /* [in] */ ThreadID threadId, /* [out] */ void **ppAddress); HRESULT ( STDMETHODCALLTYPE *GetAppDomainsContainingModule )( ICorProfilerInfo5 * This, /* [in] */ ModuleID moduleId, /* [in] */ ULONG32 cAppDomainIds, /* [out] */ ULONG32 *pcAppDomainIds, /* [length_is][size_is][out] */ AppDomainID appDomainIds[ ]); HRESULT ( STDMETHODCALLTYPE *GetModuleInfo2 )( ICorProfilerInfo5 * This, /* [in] */ ModuleID moduleId, /* [out] */ LPCBYTE *ppBaseLoadAddress, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [annotation][out] */ _Out_writes_to_(cchName, *pcchName) WCHAR szName[ ], /* [out] */ AssemblyID *pAssemblyId, /* [out] */ DWORD *pdwModuleFlags); HRESULT ( STDMETHODCALLTYPE *EnumThreads )( ICorProfilerInfo5 * This, /* [out] */ ICorProfilerThreadEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *InitializeCurrentThread )( ICorProfilerInfo5 * This); HRESULT ( STDMETHODCALLTYPE *RequestReJIT )( ICorProfilerInfo5 * This, /* [in] */ ULONG cFunctions, /* [size_is][in] */ ModuleID moduleIds[ ], /* [size_is][in] */ mdMethodDef methodIds[ ]); HRESULT ( STDMETHODCALLTYPE *RequestRevert )( ICorProfilerInfo5 * This, /* [in] */ ULONG cFunctions, /* [size_is][in] */ ModuleID moduleIds[ ], /* [size_is][in] */ mdMethodDef methodIds[ ], /* [size_is][out] */ HRESULT status[ ]); HRESULT ( STDMETHODCALLTYPE *GetCodeInfo3 )( ICorProfilerInfo5 * This, /* [in] */ FunctionID functionID, /* [in] */ ReJITID reJitId, /* [in] */ ULONG32 cCodeInfos, /* [out] */ ULONG32 *pcCodeInfos, /* [length_is][size_is][out] */ COR_PRF_CODE_INFO codeInfos[ ]); HRESULT ( STDMETHODCALLTYPE *GetFunctionFromIP2 )( ICorProfilerInfo5 * This, /* [in] */ LPCBYTE ip, /* [out] */ FunctionID *pFunctionId, /* [out] */ ReJITID *pReJitId); HRESULT ( STDMETHODCALLTYPE *GetReJITIDs )( ICorProfilerInfo5 * This, /* [in] */ FunctionID functionId, /* [in] */ ULONG cReJitIds, /* [out] */ ULONG *pcReJitIds, /* [length_is][size_is][out] */ ReJITID reJitIds[ ]); HRESULT ( STDMETHODCALLTYPE *GetILToNativeMapping2 )( ICorProfilerInfo5 * This, /* [in] */ FunctionID functionId, /* [in] */ ReJITID reJitId, /* [in] */ ULONG32 cMap, /* [out] */ ULONG32 *pcMap, /* [length_is][size_is][out] */ COR_DEBUG_IL_TO_NATIVE_MAP map[ ]); HRESULT ( STDMETHODCALLTYPE *EnumJITedFunctions2 )( ICorProfilerInfo5 * This, /* [out] */ ICorProfilerFunctionEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *GetObjectSize2 )( ICorProfilerInfo5 * This, /* [in] */ ObjectID objectId, /* [out] */ SIZE_T *pcSize); HRESULT ( STDMETHODCALLTYPE *GetEventMask2 )( ICorProfilerInfo5 * This, /* [out] */ DWORD *pdwEventsLow, /* [out] */ DWORD *pdwEventsHigh); HRESULT ( STDMETHODCALLTYPE *SetEventMask2 )( ICorProfilerInfo5 * This, /* [in] */ DWORD dwEventsLow, /* [in] */ DWORD dwEventsHigh); END_INTERFACE } ICorProfilerInfo5Vtbl; interface ICorProfilerInfo5 { CONST_VTBL struct ICorProfilerInfo5Vtbl *lpVtbl; }; #ifdef COBJMACROS #define ICorProfilerInfo5_QueryInterface(This,riid,ppvObject) \ ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) ) #define ICorProfilerInfo5_AddRef(This) \ ( (This)->lpVtbl -> AddRef(This) ) #define ICorProfilerInfo5_Release(This) \ ( (This)->lpVtbl -> Release(This) ) #define ICorProfilerInfo5_GetClassFromObject(This,objectId,pClassId) \ ( (This)->lpVtbl -> GetClassFromObject(This,objectId,pClassId) ) #define ICorProfilerInfo5_GetClassFromToken(This,moduleId,typeDef,pClassId) \ ( (This)->lpVtbl -> GetClassFromToken(This,moduleId,typeDef,pClassId) ) #define ICorProfilerInfo5_GetCodeInfo(This,functionId,pStart,pcSize) \ ( (This)->lpVtbl -> GetCodeInfo(This,functionId,pStart,pcSize) ) #define ICorProfilerInfo5_GetEventMask(This,pdwEvents) \ ( (This)->lpVtbl -> GetEventMask(This,pdwEvents) ) #define ICorProfilerInfo5_GetFunctionFromIP(This,ip,pFunctionId) \ ( (This)->lpVtbl -> GetFunctionFromIP(This,ip,pFunctionId) ) #define ICorProfilerInfo5_GetFunctionFromToken(This,moduleId,token,pFunctionId) \ ( (This)->lpVtbl -> GetFunctionFromToken(This,moduleId,token,pFunctionId) ) #define ICorProfilerInfo5_GetHandleFromThread(This,threadId,phThread) \ ( (This)->lpVtbl -> GetHandleFromThread(This,threadId,phThread) ) #define ICorProfilerInfo5_GetObjectSize(This,objectId,pcSize) \ ( (This)->lpVtbl -> GetObjectSize(This,objectId,pcSize) ) #define ICorProfilerInfo5_IsArrayClass(This,classId,pBaseElemType,pBaseClassId,pcRank) \ ( (This)->lpVtbl -> IsArrayClass(This,classId,pBaseElemType,pBaseClassId,pcRank) ) #define ICorProfilerInfo5_GetThreadInfo(This,threadId,pdwWin32ThreadId) \ ( (This)->lpVtbl -> GetThreadInfo(This,threadId,pdwWin32ThreadId) ) #define ICorProfilerInfo5_GetCurrentThreadID(This,pThreadId) \ ( (This)->lpVtbl -> GetCurrentThreadID(This,pThreadId) ) #define ICorProfilerInfo5_GetClassIDInfo(This,classId,pModuleId,pTypeDefToken) \ ( (This)->lpVtbl -> GetClassIDInfo(This,classId,pModuleId,pTypeDefToken) ) #define ICorProfilerInfo5_GetFunctionInfo(This,functionId,pClassId,pModuleId,pToken) \ ( (This)->lpVtbl -> GetFunctionInfo(This,functionId,pClassId,pModuleId,pToken) ) #define ICorProfilerInfo5_SetEventMask(This,dwEvents) \ ( (This)->lpVtbl -> SetEventMask(This,dwEvents) ) #define ICorProfilerInfo5_SetEnterLeaveFunctionHooks(This,pFuncEnter,pFuncLeave,pFuncTailcall) \ ( (This)->lpVtbl -> SetEnterLeaveFunctionHooks(This,pFuncEnter,pFuncLeave,pFuncTailcall) ) #define ICorProfilerInfo5_SetFunctionIDMapper(This,pFunc) \ ( (This)->lpVtbl -> SetFunctionIDMapper(This,pFunc) ) #define ICorProfilerInfo5_GetTokenAndMetaDataFromFunction(This,functionId,riid,ppImport,pToken) \ ( (This)->lpVtbl -> GetTokenAndMetaDataFromFunction(This,functionId,riid,ppImport,pToken) ) #define ICorProfilerInfo5_GetModuleInfo(This,moduleId,ppBaseLoadAddress,cchName,pcchName,szName,pAssemblyId) \ ( (This)->lpVtbl -> GetModuleInfo(This,moduleId,ppBaseLoadAddress,cchName,pcchName,szName,pAssemblyId) ) #define ICorProfilerInfo5_GetModuleMetaData(This,moduleId,dwOpenFlags,riid,ppOut) \ ( (This)->lpVtbl -> GetModuleMetaData(This,moduleId,dwOpenFlags,riid,ppOut) ) #define ICorProfilerInfo5_GetILFunctionBody(This,moduleId,methodId,ppMethodHeader,pcbMethodSize) \ ( (This)->lpVtbl -> GetILFunctionBody(This,moduleId,methodId,ppMethodHeader,pcbMethodSize) ) #define ICorProfilerInfo5_GetILFunctionBodyAllocator(This,moduleId,ppMalloc) \ ( (This)->lpVtbl -> GetILFunctionBodyAllocator(This,moduleId,ppMalloc) ) #define ICorProfilerInfo5_SetILFunctionBody(This,moduleId,methodid,pbNewILMethodHeader) \ ( (This)->lpVtbl -> SetILFunctionBody(This,moduleId,methodid,pbNewILMethodHeader) ) #define ICorProfilerInfo5_GetAppDomainInfo(This,appDomainId,cchName,pcchName,szName,pProcessId) \ ( (This)->lpVtbl -> GetAppDomainInfo(This,appDomainId,cchName,pcchName,szName,pProcessId) ) #define ICorProfilerInfo5_GetAssemblyInfo(This,assemblyId,cchName,pcchName,szName,pAppDomainId,pModuleId) \ ( (This)->lpVtbl -> GetAssemblyInfo(This,assemblyId,cchName,pcchName,szName,pAppDomainId,pModuleId) ) #define ICorProfilerInfo5_SetFunctionReJIT(This,functionId) \ ( (This)->lpVtbl -> SetFunctionReJIT(This,functionId) ) #define ICorProfilerInfo5_ForceGC(This) \ ( (This)->lpVtbl -> ForceGC(This) ) #define ICorProfilerInfo5_SetILInstrumentedCodeMap(This,functionId,fStartJit,cILMapEntries,rgILMapEntries) \ ( (This)->lpVtbl -> SetILInstrumentedCodeMap(This,functionId,fStartJit,cILMapEntries,rgILMapEntries) ) #define ICorProfilerInfo5_GetInprocInspectionInterface(This,ppicd) \ ( (This)->lpVtbl -> GetInprocInspectionInterface(This,ppicd) ) #define ICorProfilerInfo5_GetInprocInspectionIThisThread(This,ppicd) \ ( (This)->lpVtbl -> GetInprocInspectionIThisThread(This,ppicd) ) #define ICorProfilerInfo5_GetThreadContext(This,threadId,pContextId) \ ( (This)->lpVtbl -> GetThreadContext(This,threadId,pContextId) ) #define ICorProfilerInfo5_BeginInprocDebugging(This,fThisThreadOnly,pdwProfilerContext) \ ( (This)->lpVtbl -> BeginInprocDebugging(This,fThisThreadOnly,pdwProfilerContext) ) #define ICorProfilerInfo5_EndInprocDebugging(This,dwProfilerContext) \ ( (This)->lpVtbl -> EndInprocDebugging(This,dwProfilerContext) ) #define ICorProfilerInfo5_GetILToNativeMapping(This,functionId,cMap,pcMap,map) \ ( (This)->lpVtbl -> GetILToNativeMapping(This,functionId,cMap,pcMap,map) ) #define ICorProfilerInfo5_DoStackSnapshot(This,thread,callback,infoFlags,clientData,context,contextSize) \ ( (This)->lpVtbl -> DoStackSnapshot(This,thread,callback,infoFlags,clientData,context,contextSize) ) #define ICorProfilerInfo5_SetEnterLeaveFunctionHooks2(This,pFuncEnter,pFuncLeave,pFuncTailcall) \ ( (This)->lpVtbl -> SetEnterLeaveFunctionHooks2(This,pFuncEnter,pFuncLeave,pFuncTailcall) ) #define ICorProfilerInfo5_GetFunctionInfo2(This,funcId,frameInfo,pClassId,pModuleId,pToken,cTypeArgs,pcTypeArgs,typeArgs) \ ( (This)->lpVtbl -> GetFunctionInfo2(This,funcId,frameInfo,pClassId,pModuleId,pToken,cTypeArgs,pcTypeArgs,typeArgs) ) #define ICorProfilerInfo5_GetStringLayout(This,pBufferLengthOffset,pStringLengthOffset,pBufferOffset) \ ( (This)->lpVtbl -> GetStringLayout(This,pBufferLengthOffset,pStringLengthOffset,pBufferOffset) ) #define ICorProfilerInfo5_GetClassLayout(This,classID,rFieldOffset,cFieldOffset,pcFieldOffset,pulClassSize) \ ( (This)->lpVtbl -> GetClassLayout(This,classID,rFieldOffset,cFieldOffset,pcFieldOffset,pulClassSize) ) #define ICorProfilerInfo5_GetClassIDInfo2(This,classId,pModuleId,pTypeDefToken,pParentClassId,cNumTypeArgs,pcNumTypeArgs,typeArgs) \ ( (This)->lpVtbl -> GetClassIDInfo2(This,classId,pModuleId,pTypeDefToken,pParentClassId,cNumTypeArgs,pcNumTypeArgs,typeArgs) ) #define ICorProfilerInfo5_GetCodeInfo2(This,functionID,cCodeInfos,pcCodeInfos,codeInfos) \ ( (This)->lpVtbl -> GetCodeInfo2(This,functionID,cCodeInfos,pcCodeInfos,codeInfos) ) #define ICorProfilerInfo5_GetClassFromTokenAndTypeArgs(This,moduleID,typeDef,cTypeArgs,typeArgs,pClassID) \ ( (This)->lpVtbl -> GetClassFromTokenAndTypeArgs(This,moduleID,typeDef,cTypeArgs,typeArgs,pClassID) ) #define ICorProfilerInfo5_GetFunctionFromTokenAndTypeArgs(This,moduleID,funcDef,classId,cTypeArgs,typeArgs,pFunctionID) \ ( (This)->lpVtbl -> GetFunctionFromTokenAndTypeArgs(This,moduleID,funcDef,classId,cTypeArgs,typeArgs,pFunctionID) ) #define ICorProfilerInfo5_EnumModuleFrozenObjects(This,moduleID,ppEnum) \ ( (This)->lpVtbl -> EnumModuleFrozenObjects(This,moduleID,ppEnum) ) #define ICorProfilerInfo5_GetArrayObjectInfo(This,objectId,cDimensions,pDimensionSizes,pDimensionLowerBounds,ppData) \ ( (This)->lpVtbl -> GetArrayObjectInfo(This,objectId,cDimensions,pDimensionSizes,pDimensionLowerBounds,ppData) ) #define ICorProfilerInfo5_GetBoxClassLayout(This,classId,pBufferOffset) \ ( (This)->lpVtbl -> GetBoxClassLayout(This,classId,pBufferOffset) ) #define ICorProfilerInfo5_GetThreadAppDomain(This,threadId,pAppDomainId) \ ( (This)->lpVtbl -> GetThreadAppDomain(This,threadId,pAppDomainId) ) #define ICorProfilerInfo5_GetRVAStaticAddress(This,classId,fieldToken,ppAddress) \ ( (This)->lpVtbl -> GetRVAStaticAddress(This,classId,fieldToken,ppAddress) ) #define ICorProfilerInfo5_GetAppDomainStaticAddress(This,classId,fieldToken,appDomainId,ppAddress) \ ( (This)->lpVtbl -> GetAppDomainStaticAddress(This,classId,fieldToken,appDomainId,ppAddress) ) #define ICorProfilerInfo5_GetThreadStaticAddress(This,classId,fieldToken,threadId,ppAddress) \ ( (This)->lpVtbl -> GetThreadStaticAddress(This,classId,fieldToken,threadId,ppAddress) ) #define ICorProfilerInfo5_GetContextStaticAddress(This,classId,fieldToken,contextId,ppAddress) \ ( (This)->lpVtbl -> GetContextStaticAddress(This,classId,fieldToken,contextId,ppAddress) ) #define ICorProfilerInfo5_GetStaticFieldInfo(This,classId,fieldToken,pFieldInfo) \ ( (This)->lpVtbl -> GetStaticFieldInfo(This,classId,fieldToken,pFieldInfo) ) #define ICorProfilerInfo5_GetGenerationBounds(This,cObjectRanges,pcObjectRanges,ranges) \ ( (This)->lpVtbl -> GetGenerationBounds(This,cObjectRanges,pcObjectRanges,ranges) ) #define ICorProfilerInfo5_GetObjectGeneration(This,objectId,range) \ ( (This)->lpVtbl -> GetObjectGeneration(This,objectId,range) ) #define ICorProfilerInfo5_GetNotifiedExceptionClauseInfo(This,pinfo) \ ( (This)->lpVtbl -> GetNotifiedExceptionClauseInfo(This,pinfo) ) #define ICorProfilerInfo5_EnumJITedFunctions(This,ppEnum) \ ( (This)->lpVtbl -> EnumJITedFunctions(This,ppEnum) ) #define ICorProfilerInfo5_RequestProfilerDetach(This,dwExpectedCompletionMilliseconds) \ ( (This)->lpVtbl -> RequestProfilerDetach(This,dwExpectedCompletionMilliseconds) ) #define ICorProfilerInfo5_SetFunctionIDMapper2(This,pFunc,clientData) \ ( (This)->lpVtbl -> SetFunctionIDMapper2(This,pFunc,clientData) ) #define ICorProfilerInfo5_GetStringLayout2(This,pStringLengthOffset,pBufferOffset) \ ( (This)->lpVtbl -> GetStringLayout2(This,pStringLengthOffset,pBufferOffset) ) #define ICorProfilerInfo5_SetEnterLeaveFunctionHooks3(This,pFuncEnter3,pFuncLeave3,pFuncTailcall3) \ ( (This)->lpVtbl -> SetEnterLeaveFunctionHooks3(This,pFuncEnter3,pFuncLeave3,pFuncTailcall3) ) #define ICorProfilerInfo5_SetEnterLeaveFunctionHooks3WithInfo(This,pFuncEnter3WithInfo,pFuncLeave3WithInfo,pFuncTailcall3WithInfo) \ ( (This)->lpVtbl -> SetEnterLeaveFunctionHooks3WithInfo(This,pFuncEnter3WithInfo,pFuncLeave3WithInfo,pFuncTailcall3WithInfo) ) #define ICorProfilerInfo5_GetFunctionEnter3Info(This,functionId,eltInfo,pFrameInfo,pcbArgumentInfo,pArgumentInfo) \ ( (This)->lpVtbl -> GetFunctionEnter3Info(This,functionId,eltInfo,pFrameInfo,pcbArgumentInfo,pArgumentInfo) ) #define ICorProfilerInfo5_GetFunctionLeave3Info(This,functionId,eltInfo,pFrameInfo,pRetvalRange) \ ( (This)->lpVtbl -> GetFunctionLeave3Info(This,functionId,eltInfo,pFrameInfo,pRetvalRange) ) #define ICorProfilerInfo5_GetFunctionTailcall3Info(This,functionId,eltInfo,pFrameInfo) \ ( (This)->lpVtbl -> GetFunctionTailcall3Info(This,functionId,eltInfo,pFrameInfo) ) #define ICorProfilerInfo5_EnumModules(This,ppEnum) \ ( (This)->lpVtbl -> EnumModules(This,ppEnum) ) #define ICorProfilerInfo5_GetRuntimeInformation(This,pClrInstanceId,pRuntimeType,pMajorVersion,pMinorVersion,pBuildNumber,pQFEVersion,cchVersionString,pcchVersionString,szVersionString) \ ( (This)->lpVtbl -> GetRuntimeInformation(This,pClrInstanceId,pRuntimeType,pMajorVersion,pMinorVersion,pBuildNumber,pQFEVersion,cchVersionString,pcchVersionString,szVersionString) ) #define ICorProfilerInfo5_GetThreadStaticAddress2(This,classId,fieldToken,appDomainId,threadId,ppAddress) \ ( (This)->lpVtbl -> GetThreadStaticAddress2(This,classId,fieldToken,appDomainId,threadId,ppAddress) ) #define ICorProfilerInfo5_GetAppDomainsContainingModule(This,moduleId,cAppDomainIds,pcAppDomainIds,appDomainIds) \ ( (This)->lpVtbl -> GetAppDomainsContainingModule(This,moduleId,cAppDomainIds,pcAppDomainIds,appDomainIds) ) #define ICorProfilerInfo5_GetModuleInfo2(This,moduleId,ppBaseLoadAddress,cchName,pcchName,szName,pAssemblyId,pdwModuleFlags) \ ( (This)->lpVtbl -> GetModuleInfo2(This,moduleId,ppBaseLoadAddress,cchName,pcchName,szName,pAssemblyId,pdwModuleFlags) ) #define ICorProfilerInfo5_EnumThreads(This,ppEnum) \ ( (This)->lpVtbl -> EnumThreads(This,ppEnum) ) #define ICorProfilerInfo5_InitializeCurrentThread(This) \ ( (This)->lpVtbl -> InitializeCurrentThread(This) ) #define ICorProfilerInfo5_RequestReJIT(This,cFunctions,moduleIds,methodIds) \ ( (This)->lpVtbl -> RequestReJIT(This,cFunctions,moduleIds,methodIds) ) #define ICorProfilerInfo5_RequestRevert(This,cFunctions,moduleIds,methodIds,status) \ ( (This)->lpVtbl -> RequestRevert(This,cFunctions,moduleIds,methodIds,status) ) #define ICorProfilerInfo5_GetCodeInfo3(This,functionID,reJitId,cCodeInfos,pcCodeInfos,codeInfos) \ ( (This)->lpVtbl -> GetCodeInfo3(This,functionID,reJitId,cCodeInfos,pcCodeInfos,codeInfos) ) #define ICorProfilerInfo5_GetFunctionFromIP2(This,ip,pFunctionId,pReJitId) \ ( (This)->lpVtbl -> GetFunctionFromIP2(This,ip,pFunctionId,pReJitId) ) #define ICorProfilerInfo5_GetReJITIDs(This,functionId,cReJitIds,pcReJitIds,reJitIds) \ ( (This)->lpVtbl -> GetReJITIDs(This,functionId,cReJitIds,pcReJitIds,reJitIds) ) #define ICorProfilerInfo5_GetILToNativeMapping2(This,functionId,reJitId,cMap,pcMap,map) \ ( (This)->lpVtbl -> GetILToNativeMapping2(This,functionId,reJitId,cMap,pcMap,map) ) #define ICorProfilerInfo5_EnumJITedFunctions2(This,ppEnum) \ ( (This)->lpVtbl -> EnumJITedFunctions2(This,ppEnum) ) #define ICorProfilerInfo5_GetObjectSize2(This,objectId,pcSize) \ ( (This)->lpVtbl -> GetObjectSize2(This,objectId,pcSize) ) #define ICorProfilerInfo5_GetEventMask2(This,pdwEventsLow,pdwEventsHigh) \ ( (This)->lpVtbl -> GetEventMask2(This,pdwEventsLow,pdwEventsHigh) ) #define ICorProfilerInfo5_SetEventMask2(This,dwEventsLow,dwEventsHigh) \ ( (This)->lpVtbl -> SetEventMask2(This,dwEventsLow,dwEventsHigh) ) #endif /* COBJMACROS */ #endif /* C style interface */ #endif /* __ICorProfilerInfo5_INTERFACE_DEFINED__ */ #ifndef __ICorProfilerInfo6_INTERFACE_DEFINED__ #define __ICorProfilerInfo6_INTERFACE_DEFINED__ /* interface ICorProfilerInfo6 */ /* [local][unique][uuid][object] */ EXTERN_C const IID IID_ICorProfilerInfo6; #if defined(__cplusplus) && !defined(CINTERFACE) MIDL_INTERFACE("F30A070D-BFFB-46A7-B1D8-8781EF7B698A") ICorProfilerInfo6 : public ICorProfilerInfo5 { public: virtual HRESULT STDMETHODCALLTYPE EnumNgenModuleMethodsInliningThisMethod( /* [in] */ ModuleID inlinersModuleId, /* [in] */ ModuleID inlineeModuleId, /* [in] */ mdMethodDef inlineeMethodId, /* [out] */ BOOL *incompleteData, /* [out] */ ICorProfilerMethodEnum **ppEnum) = 0; }; #else /* C style interface */ typedef struct ICorProfilerInfo6Vtbl { BEGIN_INTERFACE HRESULT ( STDMETHODCALLTYPE *QueryInterface )( ICorProfilerInfo6 * This, /* [in] */ REFIID riid, /* [annotation][iid_is][out] */ _COM_Outptr_ void **ppvObject); ULONG ( STDMETHODCALLTYPE *AddRef )( ICorProfilerInfo6 * This); ULONG ( STDMETHODCALLTYPE *Release )( ICorProfilerInfo6 * This); HRESULT ( STDMETHODCALLTYPE *GetClassFromObject )( ICorProfilerInfo6 * This, /* [in] */ ObjectID objectId, /* [out] */ ClassID *pClassId); HRESULT ( STDMETHODCALLTYPE *GetClassFromToken )( ICorProfilerInfo6 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdTypeDef typeDef, /* [out] */ ClassID *pClassId); HRESULT ( STDMETHODCALLTYPE *GetCodeInfo )( ICorProfilerInfo6 * This, /* [in] */ FunctionID functionId, /* [out] */ LPCBYTE *pStart, /* [out] */ ULONG *pcSize); HRESULT ( STDMETHODCALLTYPE *GetEventMask )( ICorProfilerInfo6 * This, /* [out] */ DWORD *pdwEvents); HRESULT ( STDMETHODCALLTYPE *GetFunctionFromIP )( ICorProfilerInfo6 * This, /* [in] */ LPCBYTE ip, /* [out] */ FunctionID *pFunctionId); HRESULT ( STDMETHODCALLTYPE *GetFunctionFromToken )( ICorProfilerInfo6 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdToken token, /* [out] */ FunctionID *pFunctionId); HRESULT ( STDMETHODCALLTYPE *GetHandleFromThread )( ICorProfilerInfo6 * This, /* [in] */ ThreadID threadId, /* [out] */ HANDLE *phThread); HRESULT ( STDMETHODCALLTYPE *GetObjectSize )( ICorProfilerInfo6 * This, /* [in] */ ObjectID objectId, /* [out] */ ULONG *pcSize); HRESULT ( STDMETHODCALLTYPE *IsArrayClass )( ICorProfilerInfo6 * This, /* [in] */ ClassID classId, /* [out] */ CorElementType *pBaseElemType, /* [out] */ ClassID *pBaseClassId, /* [out] */ ULONG *pcRank); HRESULT ( STDMETHODCALLTYPE *GetThreadInfo )( ICorProfilerInfo6 * This, /* [in] */ ThreadID threadId, /* [out] */ DWORD *pdwWin32ThreadId); HRESULT ( STDMETHODCALLTYPE *GetCurrentThreadID )( ICorProfilerInfo6 * This, /* [out] */ ThreadID *pThreadId); HRESULT ( STDMETHODCALLTYPE *GetClassIDInfo )( ICorProfilerInfo6 * This, /* [in] */ ClassID classId, /* [out] */ ModuleID *pModuleId, /* [out] */ mdTypeDef *pTypeDefToken); HRESULT ( STDMETHODCALLTYPE *GetFunctionInfo )( ICorProfilerInfo6 * This, /* [in] */ FunctionID functionId, /* [out] */ ClassID *pClassId, /* [out] */ ModuleID *pModuleId, /* [out] */ mdToken *pToken); HRESULT ( STDMETHODCALLTYPE *SetEventMask )( ICorProfilerInfo6 * This, /* [in] */ DWORD dwEvents); HRESULT ( STDMETHODCALLTYPE *SetEnterLeaveFunctionHooks )( ICorProfilerInfo6 * This, /* [in] */ FunctionEnter *pFuncEnter, /* [in] */ FunctionLeave *pFuncLeave, /* [in] */ FunctionTailcall *pFuncTailcall); HRESULT ( STDMETHODCALLTYPE *SetFunctionIDMapper )( ICorProfilerInfo6 * This, /* [in] */ FunctionIDMapper *pFunc); HRESULT ( STDMETHODCALLTYPE *GetTokenAndMetaDataFromFunction )( ICorProfilerInfo6 * This, /* [in] */ FunctionID functionId, /* [in] */ REFIID riid, /* [out] */ IUnknown **ppImport, /* [out] */ mdToken *pToken); HRESULT ( STDMETHODCALLTYPE *GetModuleInfo )( ICorProfilerInfo6 * This, /* [in] */ ModuleID moduleId, /* [out] */ LPCBYTE *ppBaseLoadAddress, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [annotation][out] */ _Out_writes_to_(cchName, *pcchName) WCHAR szName[ ], /* [out] */ AssemblyID *pAssemblyId); HRESULT ( STDMETHODCALLTYPE *GetModuleMetaData )( ICorProfilerInfo6 * This, /* [in] */ ModuleID moduleId, /* [in] */ DWORD dwOpenFlags, /* [in] */ REFIID riid, /* [out] */ IUnknown **ppOut); HRESULT ( STDMETHODCALLTYPE *GetILFunctionBody )( ICorProfilerInfo6 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdMethodDef methodId, /* [out] */ LPCBYTE *ppMethodHeader, /* [out] */ ULONG *pcbMethodSize); HRESULT ( STDMETHODCALLTYPE *GetILFunctionBodyAllocator )( ICorProfilerInfo6 * This, /* [in] */ ModuleID moduleId, /* [out] */ IMethodMalloc **ppMalloc); HRESULT ( STDMETHODCALLTYPE *SetILFunctionBody )( ICorProfilerInfo6 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdMethodDef methodid, /* [in] */ LPCBYTE pbNewILMethodHeader); HRESULT ( STDMETHODCALLTYPE *GetAppDomainInfo )( ICorProfilerInfo6 * This, /* [in] */ AppDomainID appDomainId, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [annotation][out] */ _Out_writes_to_(cchName, *pcchName) WCHAR szName[ ], /* [out] */ ProcessID *pProcessId); HRESULT ( STDMETHODCALLTYPE *GetAssemblyInfo )( ICorProfilerInfo6 * This, /* [in] */ AssemblyID assemblyId, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [annotation][out] */ _Out_writes_to_(cchName, *pcchName) WCHAR szName[ ], /* [out] */ AppDomainID *pAppDomainId, /* [out] */ ModuleID *pModuleId); HRESULT ( STDMETHODCALLTYPE *SetFunctionReJIT )( ICorProfilerInfo6 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ForceGC )( ICorProfilerInfo6 * This); HRESULT ( STDMETHODCALLTYPE *SetILInstrumentedCodeMap )( ICorProfilerInfo6 * This, /* [in] */ FunctionID functionId, /* [in] */ BOOL fStartJit, /* [in] */ ULONG cILMapEntries, /* [size_is][in] */ COR_IL_MAP rgILMapEntries[ ]); HRESULT ( STDMETHODCALLTYPE *GetInprocInspectionInterface )( ICorProfilerInfo6 * This, /* [out] */ IUnknown **ppicd); HRESULT ( STDMETHODCALLTYPE *GetInprocInspectionIThisThread )( ICorProfilerInfo6 * This, /* [out] */ IUnknown **ppicd); HRESULT ( STDMETHODCALLTYPE *GetThreadContext )( ICorProfilerInfo6 * This, /* [in] */ ThreadID threadId, /* [out] */ ContextID *pContextId); HRESULT ( STDMETHODCALLTYPE *BeginInprocDebugging )( ICorProfilerInfo6 * This, /* [in] */ BOOL fThisThreadOnly, /* [out] */ DWORD *pdwProfilerContext); HRESULT ( STDMETHODCALLTYPE *EndInprocDebugging )( ICorProfilerInfo6 * This, /* [in] */ DWORD dwProfilerContext); HRESULT ( STDMETHODCALLTYPE *GetILToNativeMapping )( ICorProfilerInfo6 * This, /* [in] */ FunctionID functionId, /* [in] */ ULONG32 cMap, /* [out] */ ULONG32 *pcMap, /* [length_is][size_is][out] */ COR_DEBUG_IL_TO_NATIVE_MAP map[ ]); HRESULT ( STDMETHODCALLTYPE *DoStackSnapshot )( ICorProfilerInfo6 * This, /* [in] */ ThreadID thread, /* [in] */ StackSnapshotCallback *callback, /* [in] */ ULONG32 infoFlags, /* [in] */ void *clientData, /* [size_is][in] */ BYTE context[ ], /* [in] */ ULONG32 contextSize); HRESULT ( STDMETHODCALLTYPE *SetEnterLeaveFunctionHooks2 )( ICorProfilerInfo6 * This, /* [in] */ FunctionEnter2 *pFuncEnter, /* [in] */ FunctionLeave2 *pFuncLeave, /* [in] */ FunctionTailcall2 *pFuncTailcall); HRESULT ( STDMETHODCALLTYPE *GetFunctionInfo2 )( ICorProfilerInfo6 * This, /* [in] */ FunctionID funcId, /* [in] */ COR_PRF_FRAME_INFO frameInfo, /* [out] */ ClassID *pClassId, /* [out] */ ModuleID *pModuleId, /* [out] */ mdToken *pToken, /* [in] */ ULONG32 cTypeArgs, /* [out] */ ULONG32 *pcTypeArgs, /* [out] */ ClassID typeArgs[ ]); HRESULT ( STDMETHODCALLTYPE *GetStringLayout )( ICorProfilerInfo6 * This, /* [out] */ ULONG *pBufferLengthOffset, /* [out] */ ULONG *pStringLengthOffset, /* [out] */ ULONG *pBufferOffset); HRESULT ( STDMETHODCALLTYPE *GetClassLayout )( ICorProfilerInfo6 * This, /* [in] */ ClassID classID, /* [out][in] */ COR_FIELD_OFFSET rFieldOffset[ ], /* [in] */ ULONG cFieldOffset, /* [out] */ ULONG *pcFieldOffset, /* [out] */ ULONG *pulClassSize); HRESULT ( STDMETHODCALLTYPE *GetClassIDInfo2 )( ICorProfilerInfo6 * This, /* [in] */ ClassID classId, /* [out] */ ModuleID *pModuleId, /* [out] */ mdTypeDef *pTypeDefToken, /* [out] */ ClassID *pParentClassId, /* [in] */ ULONG32 cNumTypeArgs, /* [out] */ ULONG32 *pcNumTypeArgs, /* [out] */ ClassID typeArgs[ ]); HRESULT ( STDMETHODCALLTYPE *GetCodeInfo2 )( ICorProfilerInfo6 * This, /* [in] */ FunctionID functionID, /* [in] */ ULONG32 cCodeInfos, /* [out] */ ULONG32 *pcCodeInfos, /* [length_is][size_is][out] */ COR_PRF_CODE_INFO codeInfos[ ]); HRESULT ( STDMETHODCALLTYPE *GetClassFromTokenAndTypeArgs )( ICorProfilerInfo6 * This, /* [in] */ ModuleID moduleID, /* [in] */ mdTypeDef typeDef, /* [in] */ ULONG32 cTypeArgs, /* [size_is][in] */ ClassID typeArgs[ ], /* [out] */ ClassID *pClassID); HRESULT ( STDMETHODCALLTYPE *GetFunctionFromTokenAndTypeArgs )( ICorProfilerInfo6 * This, /* [in] */ ModuleID moduleID, /* [in] */ mdMethodDef funcDef, /* [in] */ ClassID classId, /* [in] */ ULONG32 cTypeArgs, /* [size_is][in] */ ClassID typeArgs[ ], /* [out] */ FunctionID *pFunctionID); HRESULT ( STDMETHODCALLTYPE *EnumModuleFrozenObjects )( ICorProfilerInfo6 * This, /* [in] */ ModuleID moduleID, /* [out] */ ICorProfilerObjectEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *GetArrayObjectInfo )( ICorProfilerInfo6 * This, /* [in] */ ObjectID objectId, /* [in] */ ULONG32 cDimensions, /* [size_is][out] */ ULONG32 pDimensionSizes[ ], /* [size_is][out] */ int pDimensionLowerBounds[ ], /* [out] */ BYTE **ppData); HRESULT ( STDMETHODCALLTYPE *GetBoxClassLayout )( ICorProfilerInfo6 * This, /* [in] */ ClassID classId, /* [out] */ ULONG32 *pBufferOffset); HRESULT ( STDMETHODCALLTYPE *GetThreadAppDomain )( ICorProfilerInfo6 * This, /* [in] */ ThreadID threadId, /* [out] */ AppDomainID *pAppDomainId); HRESULT ( STDMETHODCALLTYPE *GetRVAStaticAddress )( ICorProfilerInfo6 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [out] */ void **ppAddress); HRESULT ( STDMETHODCALLTYPE *GetAppDomainStaticAddress )( ICorProfilerInfo6 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [in] */ AppDomainID appDomainId, /* [out] */ void **ppAddress); HRESULT ( STDMETHODCALLTYPE *GetThreadStaticAddress )( ICorProfilerInfo6 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [in] */ ThreadID threadId, /* [out] */ void **ppAddress); HRESULT ( STDMETHODCALLTYPE *GetContextStaticAddress )( ICorProfilerInfo6 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [in] */ ContextID contextId, /* [out] */ void **ppAddress); HRESULT ( STDMETHODCALLTYPE *GetStaticFieldInfo )( ICorProfilerInfo6 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [out] */ COR_PRF_STATIC_TYPE *pFieldInfo); HRESULT ( STDMETHODCALLTYPE *GetGenerationBounds )( ICorProfilerInfo6 * This, /* [in] */ ULONG cObjectRanges, /* [out] */ ULONG *pcObjectRanges, /* [length_is][size_is][out] */ COR_PRF_GC_GENERATION_RANGE ranges[ ]); HRESULT ( STDMETHODCALLTYPE *GetObjectGeneration )( ICorProfilerInfo6 * This, /* [in] */ ObjectID objectId, /* [out] */ COR_PRF_GC_GENERATION_RANGE *range); HRESULT ( STDMETHODCALLTYPE *GetNotifiedExceptionClauseInfo )( ICorProfilerInfo6 * This, /* [out] */ COR_PRF_EX_CLAUSE_INFO *pinfo); HRESULT ( STDMETHODCALLTYPE *EnumJITedFunctions )( ICorProfilerInfo6 * This, /* [out] */ ICorProfilerFunctionEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *RequestProfilerDetach )( ICorProfilerInfo6 * This, /* [in] */ DWORD dwExpectedCompletionMilliseconds); HRESULT ( STDMETHODCALLTYPE *SetFunctionIDMapper2 )( ICorProfilerInfo6 * This, /* [in] */ FunctionIDMapper2 *pFunc, /* [in] */ void *clientData); HRESULT ( STDMETHODCALLTYPE *GetStringLayout2 )( ICorProfilerInfo6 * This, /* [out] */ ULONG *pStringLengthOffset, /* [out] */ ULONG *pBufferOffset); HRESULT ( STDMETHODCALLTYPE *SetEnterLeaveFunctionHooks3 )( ICorProfilerInfo6 * This, /* [in] */ FunctionEnter3 *pFuncEnter3, /* [in] */ FunctionLeave3 *pFuncLeave3, /* [in] */ FunctionTailcall3 *pFuncTailcall3); HRESULT ( STDMETHODCALLTYPE *SetEnterLeaveFunctionHooks3WithInfo )( ICorProfilerInfo6 * This, /* [in] */ FunctionEnter3WithInfo *pFuncEnter3WithInfo, /* [in] */ FunctionLeave3WithInfo *pFuncLeave3WithInfo, /* [in] */ FunctionTailcall3WithInfo *pFuncTailcall3WithInfo); HRESULT ( STDMETHODCALLTYPE *GetFunctionEnter3Info )( ICorProfilerInfo6 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_ELT_INFO eltInfo, /* [out] */ COR_PRF_FRAME_INFO *pFrameInfo, /* [out][in] */ ULONG *pcbArgumentInfo, /* [size_is][out] */ COR_PRF_FUNCTION_ARGUMENT_INFO *pArgumentInfo); HRESULT ( STDMETHODCALLTYPE *GetFunctionLeave3Info )( ICorProfilerInfo6 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_ELT_INFO eltInfo, /* [out] */ COR_PRF_FRAME_INFO *pFrameInfo, /* [out] */ COR_PRF_FUNCTION_ARGUMENT_RANGE *pRetvalRange); HRESULT ( STDMETHODCALLTYPE *GetFunctionTailcall3Info )( ICorProfilerInfo6 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_ELT_INFO eltInfo, /* [out] */ COR_PRF_FRAME_INFO *pFrameInfo); HRESULT ( STDMETHODCALLTYPE *EnumModules )( ICorProfilerInfo6 * This, /* [out] */ ICorProfilerModuleEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *GetRuntimeInformation )( ICorProfilerInfo6 * This, /* [out] */ USHORT *pClrInstanceId, /* [out] */ COR_PRF_RUNTIME_TYPE *pRuntimeType, /* [out] */ USHORT *pMajorVersion, /* [out] */ USHORT *pMinorVersion, /* [out] */ USHORT *pBuildNumber, /* [out] */ USHORT *pQFEVersion, /* [in] */ ULONG cchVersionString, /* [out] */ ULONG *pcchVersionString, /* [annotation][out] */ _Out_writes_to_(cchVersionString, *pcchVersionString) WCHAR szVersionString[ ]); HRESULT ( STDMETHODCALLTYPE *GetThreadStaticAddress2 )( ICorProfilerInfo6 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [in] */ AppDomainID appDomainId, /* [in] */ ThreadID threadId, /* [out] */ void **ppAddress); HRESULT ( STDMETHODCALLTYPE *GetAppDomainsContainingModule )( ICorProfilerInfo6 * This, /* [in] */ ModuleID moduleId, /* [in] */ ULONG32 cAppDomainIds, /* [out] */ ULONG32 *pcAppDomainIds, /* [length_is][size_is][out] */ AppDomainID appDomainIds[ ]); HRESULT ( STDMETHODCALLTYPE *GetModuleInfo2 )( ICorProfilerInfo6 * This, /* [in] */ ModuleID moduleId, /* [out] */ LPCBYTE *ppBaseLoadAddress, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [annotation][out] */ _Out_writes_to_(cchName, *pcchName) WCHAR szName[ ], /* [out] */ AssemblyID *pAssemblyId, /* [out] */ DWORD *pdwModuleFlags); HRESULT ( STDMETHODCALLTYPE *EnumThreads )( ICorProfilerInfo6 * This, /* [out] */ ICorProfilerThreadEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *InitializeCurrentThread )( ICorProfilerInfo6 * This); HRESULT ( STDMETHODCALLTYPE *RequestReJIT )( ICorProfilerInfo6 * This, /* [in] */ ULONG cFunctions, /* [size_is][in] */ ModuleID moduleIds[ ], /* [size_is][in] */ mdMethodDef methodIds[ ]); HRESULT ( STDMETHODCALLTYPE *RequestRevert )( ICorProfilerInfo6 * This, /* [in] */ ULONG cFunctions, /* [size_is][in] */ ModuleID moduleIds[ ], /* [size_is][in] */ mdMethodDef methodIds[ ], /* [size_is][out] */ HRESULT status[ ]); HRESULT ( STDMETHODCALLTYPE *GetCodeInfo3 )( ICorProfilerInfo6 * This, /* [in] */ FunctionID functionID, /* [in] */ ReJITID reJitId, /* [in] */ ULONG32 cCodeInfos, /* [out] */ ULONG32 *pcCodeInfos, /* [length_is][size_is][out] */ COR_PRF_CODE_INFO codeInfos[ ]); HRESULT ( STDMETHODCALLTYPE *GetFunctionFromIP2 )( ICorProfilerInfo6 * This, /* [in] */ LPCBYTE ip, /* [out] */ FunctionID *pFunctionId, /* [out] */ ReJITID *pReJitId); HRESULT ( STDMETHODCALLTYPE *GetReJITIDs )( ICorProfilerInfo6 * This, /* [in] */ FunctionID functionId, /* [in] */ ULONG cReJitIds, /* [out] */ ULONG *pcReJitIds, /* [length_is][size_is][out] */ ReJITID reJitIds[ ]); HRESULT ( STDMETHODCALLTYPE *GetILToNativeMapping2 )( ICorProfilerInfo6 * This, /* [in] */ FunctionID functionId, /* [in] */ ReJITID reJitId, /* [in] */ ULONG32 cMap, /* [out] */ ULONG32 *pcMap, /* [length_is][size_is][out] */ COR_DEBUG_IL_TO_NATIVE_MAP map[ ]); HRESULT ( STDMETHODCALLTYPE *EnumJITedFunctions2 )( ICorProfilerInfo6 * This, /* [out] */ ICorProfilerFunctionEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *GetObjectSize2 )( ICorProfilerInfo6 * This, /* [in] */ ObjectID objectId, /* [out] */ SIZE_T *pcSize); HRESULT ( STDMETHODCALLTYPE *GetEventMask2 )( ICorProfilerInfo6 * This, /* [out] */ DWORD *pdwEventsLow, /* [out] */ DWORD *pdwEventsHigh); HRESULT ( STDMETHODCALLTYPE *SetEventMask2 )( ICorProfilerInfo6 * This, /* [in] */ DWORD dwEventsLow, /* [in] */ DWORD dwEventsHigh); HRESULT ( STDMETHODCALLTYPE *EnumNgenModuleMethodsInliningThisMethod )( ICorProfilerInfo6 * This, /* [in] */ ModuleID inlinersModuleId, /* [in] */ ModuleID inlineeModuleId, /* [in] */ mdMethodDef inlineeMethodId, /* [out] */ BOOL *incompleteData, /* [out] */ ICorProfilerMethodEnum **ppEnum); END_INTERFACE } ICorProfilerInfo6Vtbl; interface ICorProfilerInfo6 { CONST_VTBL struct ICorProfilerInfo6Vtbl *lpVtbl; }; #ifdef COBJMACROS #define ICorProfilerInfo6_QueryInterface(This,riid,ppvObject) \ ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) ) #define ICorProfilerInfo6_AddRef(This) \ ( (This)->lpVtbl -> AddRef(This) ) #define ICorProfilerInfo6_Release(This) \ ( (This)->lpVtbl -> Release(This) ) #define ICorProfilerInfo6_GetClassFromObject(This,objectId,pClassId) \ ( (This)->lpVtbl -> GetClassFromObject(This,objectId,pClassId) ) #define ICorProfilerInfo6_GetClassFromToken(This,moduleId,typeDef,pClassId) \ ( (This)->lpVtbl -> GetClassFromToken(This,moduleId,typeDef,pClassId) ) #define ICorProfilerInfo6_GetCodeInfo(This,functionId,pStart,pcSize) \ ( (This)->lpVtbl -> GetCodeInfo(This,functionId,pStart,pcSize) ) #define ICorProfilerInfo6_GetEventMask(This,pdwEvents) \ ( (This)->lpVtbl -> GetEventMask(This,pdwEvents) ) #define ICorProfilerInfo6_GetFunctionFromIP(This,ip,pFunctionId) \ ( (This)->lpVtbl -> GetFunctionFromIP(This,ip,pFunctionId) ) #define ICorProfilerInfo6_GetFunctionFromToken(This,moduleId,token,pFunctionId) \ ( (This)->lpVtbl -> GetFunctionFromToken(This,moduleId,token,pFunctionId) ) #define ICorProfilerInfo6_GetHandleFromThread(This,threadId,phThread) \ ( (This)->lpVtbl -> GetHandleFromThread(This,threadId,phThread) ) #define ICorProfilerInfo6_GetObjectSize(This,objectId,pcSize) \ ( (This)->lpVtbl -> GetObjectSize(This,objectId,pcSize) ) #define ICorProfilerInfo6_IsArrayClass(This,classId,pBaseElemType,pBaseClassId,pcRank) \ ( (This)->lpVtbl -> IsArrayClass(This,classId,pBaseElemType,pBaseClassId,pcRank) ) #define ICorProfilerInfo6_GetThreadInfo(This,threadId,pdwWin32ThreadId) \ ( (This)->lpVtbl -> GetThreadInfo(This,threadId,pdwWin32ThreadId) ) #define ICorProfilerInfo6_GetCurrentThreadID(This,pThreadId) \ ( (This)->lpVtbl -> GetCurrentThreadID(This,pThreadId) ) #define ICorProfilerInfo6_GetClassIDInfo(This,classId,pModuleId,pTypeDefToken) \ ( (This)->lpVtbl -> GetClassIDInfo(This,classId,pModuleId,pTypeDefToken) ) #define ICorProfilerInfo6_GetFunctionInfo(This,functionId,pClassId,pModuleId,pToken) \ ( (This)->lpVtbl -> GetFunctionInfo(This,functionId,pClassId,pModuleId,pToken) ) #define ICorProfilerInfo6_SetEventMask(This,dwEvents) \ ( (This)->lpVtbl -> SetEventMask(This,dwEvents) ) #define ICorProfilerInfo6_SetEnterLeaveFunctionHooks(This,pFuncEnter,pFuncLeave,pFuncTailcall) \ ( (This)->lpVtbl -> SetEnterLeaveFunctionHooks(This,pFuncEnter,pFuncLeave,pFuncTailcall) ) #define ICorProfilerInfo6_SetFunctionIDMapper(This,pFunc) \ ( (This)->lpVtbl -> SetFunctionIDMapper(This,pFunc) ) #define ICorProfilerInfo6_GetTokenAndMetaDataFromFunction(This,functionId,riid,ppImport,pToken) \ ( (This)->lpVtbl -> GetTokenAndMetaDataFromFunction(This,functionId,riid,ppImport,pToken) ) #define ICorProfilerInfo6_GetModuleInfo(This,moduleId,ppBaseLoadAddress,cchName,pcchName,szName,pAssemblyId) \ ( (This)->lpVtbl -> GetModuleInfo(This,moduleId,ppBaseLoadAddress,cchName,pcchName,szName,pAssemblyId) ) #define ICorProfilerInfo6_GetModuleMetaData(This,moduleId,dwOpenFlags,riid,ppOut) \ ( (This)->lpVtbl -> GetModuleMetaData(This,moduleId,dwOpenFlags,riid,ppOut) ) #define ICorProfilerInfo6_GetILFunctionBody(This,moduleId,methodId,ppMethodHeader,pcbMethodSize) \ ( (This)->lpVtbl -> GetILFunctionBody(This,moduleId,methodId,ppMethodHeader,pcbMethodSize) ) #define ICorProfilerInfo6_GetILFunctionBodyAllocator(This,moduleId,ppMalloc) \ ( (This)->lpVtbl -> GetILFunctionBodyAllocator(This,moduleId,ppMalloc) ) #define ICorProfilerInfo6_SetILFunctionBody(This,moduleId,methodid,pbNewILMethodHeader) \ ( (This)->lpVtbl -> SetILFunctionBody(This,moduleId,methodid,pbNewILMethodHeader) ) #define ICorProfilerInfo6_GetAppDomainInfo(This,appDomainId,cchName,pcchName,szName,pProcessId) \ ( (This)->lpVtbl -> GetAppDomainInfo(This,appDomainId,cchName,pcchName,szName,pProcessId) ) #define ICorProfilerInfo6_GetAssemblyInfo(This,assemblyId,cchName,pcchName,szName,pAppDomainId,pModuleId) \ ( (This)->lpVtbl -> GetAssemblyInfo(This,assemblyId,cchName,pcchName,szName,pAppDomainId,pModuleId) ) #define ICorProfilerInfo6_SetFunctionReJIT(This,functionId) \ ( (This)->lpVtbl -> SetFunctionReJIT(This,functionId) ) #define ICorProfilerInfo6_ForceGC(This) \ ( (This)->lpVtbl -> ForceGC(This) ) #define ICorProfilerInfo6_SetILInstrumentedCodeMap(This,functionId,fStartJit,cILMapEntries,rgILMapEntries) \ ( (This)->lpVtbl -> SetILInstrumentedCodeMap(This,functionId,fStartJit,cILMapEntries,rgILMapEntries) ) #define ICorProfilerInfo6_GetInprocInspectionInterface(This,ppicd) \ ( (This)->lpVtbl -> GetInprocInspectionInterface(This,ppicd) ) #define ICorProfilerInfo6_GetInprocInspectionIThisThread(This,ppicd) \ ( (This)->lpVtbl -> GetInprocInspectionIThisThread(This,ppicd) ) #define ICorProfilerInfo6_GetThreadContext(This,threadId,pContextId) \ ( (This)->lpVtbl -> GetThreadContext(This,threadId,pContextId) ) #define ICorProfilerInfo6_BeginInprocDebugging(This,fThisThreadOnly,pdwProfilerContext) \ ( (This)->lpVtbl -> BeginInprocDebugging(This,fThisThreadOnly,pdwProfilerContext) ) #define ICorProfilerInfo6_EndInprocDebugging(This,dwProfilerContext) \ ( (This)->lpVtbl -> EndInprocDebugging(This,dwProfilerContext) ) #define ICorProfilerInfo6_GetILToNativeMapping(This,functionId,cMap,pcMap,map) \ ( (This)->lpVtbl -> GetILToNativeMapping(This,functionId,cMap,pcMap,map) ) #define ICorProfilerInfo6_DoStackSnapshot(This,thread,callback,infoFlags,clientData,context,contextSize) \ ( (This)->lpVtbl -> DoStackSnapshot(This,thread,callback,infoFlags,clientData,context,contextSize) ) #define ICorProfilerInfo6_SetEnterLeaveFunctionHooks2(This,pFuncEnter,pFuncLeave,pFuncTailcall) \ ( (This)->lpVtbl -> SetEnterLeaveFunctionHooks2(This,pFuncEnter,pFuncLeave,pFuncTailcall) ) #define ICorProfilerInfo6_GetFunctionInfo2(This,funcId,frameInfo,pClassId,pModuleId,pToken,cTypeArgs,pcTypeArgs,typeArgs) \ ( (This)->lpVtbl -> GetFunctionInfo2(This,funcId,frameInfo,pClassId,pModuleId,pToken,cTypeArgs,pcTypeArgs,typeArgs) ) #define ICorProfilerInfo6_GetStringLayout(This,pBufferLengthOffset,pStringLengthOffset,pBufferOffset) \ ( (This)->lpVtbl -> GetStringLayout(This,pBufferLengthOffset,pStringLengthOffset,pBufferOffset) ) #define ICorProfilerInfo6_GetClassLayout(This,classID,rFieldOffset,cFieldOffset,pcFieldOffset,pulClassSize) \ ( (This)->lpVtbl -> GetClassLayout(This,classID,rFieldOffset,cFieldOffset,pcFieldOffset,pulClassSize) ) #define ICorProfilerInfo6_GetClassIDInfo2(This,classId,pModuleId,pTypeDefToken,pParentClassId,cNumTypeArgs,pcNumTypeArgs,typeArgs) \ ( (This)->lpVtbl -> GetClassIDInfo2(This,classId,pModuleId,pTypeDefToken,pParentClassId,cNumTypeArgs,pcNumTypeArgs,typeArgs) ) #define ICorProfilerInfo6_GetCodeInfo2(This,functionID,cCodeInfos,pcCodeInfos,codeInfos) \ ( (This)->lpVtbl -> GetCodeInfo2(This,functionID,cCodeInfos,pcCodeInfos,codeInfos) ) #define ICorProfilerInfo6_GetClassFromTokenAndTypeArgs(This,moduleID,typeDef,cTypeArgs,typeArgs,pClassID) \ ( (This)->lpVtbl -> GetClassFromTokenAndTypeArgs(This,moduleID,typeDef,cTypeArgs,typeArgs,pClassID) ) #define ICorProfilerInfo6_GetFunctionFromTokenAndTypeArgs(This,moduleID,funcDef,classId,cTypeArgs,typeArgs,pFunctionID) \ ( (This)->lpVtbl -> GetFunctionFromTokenAndTypeArgs(This,moduleID,funcDef,classId,cTypeArgs,typeArgs,pFunctionID) ) #define ICorProfilerInfo6_EnumModuleFrozenObjects(This,moduleID,ppEnum) \ ( (This)->lpVtbl -> EnumModuleFrozenObjects(This,moduleID,ppEnum) ) #define ICorProfilerInfo6_GetArrayObjectInfo(This,objectId,cDimensions,pDimensionSizes,pDimensionLowerBounds,ppData) \ ( (This)->lpVtbl -> GetArrayObjectInfo(This,objectId,cDimensions,pDimensionSizes,pDimensionLowerBounds,ppData) ) #define ICorProfilerInfo6_GetBoxClassLayout(This,classId,pBufferOffset) \ ( (This)->lpVtbl -> GetBoxClassLayout(This,classId,pBufferOffset) ) #define ICorProfilerInfo6_GetThreadAppDomain(This,threadId,pAppDomainId) \ ( (This)->lpVtbl -> GetThreadAppDomain(This,threadId,pAppDomainId) ) #define ICorProfilerInfo6_GetRVAStaticAddress(This,classId,fieldToken,ppAddress) \ ( (This)->lpVtbl -> GetRVAStaticAddress(This,classId,fieldToken,ppAddress) ) #define ICorProfilerInfo6_GetAppDomainStaticAddress(This,classId,fieldToken,appDomainId,ppAddress) \ ( (This)->lpVtbl -> GetAppDomainStaticAddress(This,classId,fieldToken,appDomainId,ppAddress) ) #define ICorProfilerInfo6_GetThreadStaticAddress(This,classId,fieldToken,threadId,ppAddress) \ ( (This)->lpVtbl -> GetThreadStaticAddress(This,classId,fieldToken,threadId,ppAddress) ) #define ICorProfilerInfo6_GetContextStaticAddress(This,classId,fieldToken,contextId,ppAddress) \ ( (This)->lpVtbl -> GetContextStaticAddress(This,classId,fieldToken,contextId,ppAddress) ) #define ICorProfilerInfo6_GetStaticFieldInfo(This,classId,fieldToken,pFieldInfo) \ ( (This)->lpVtbl -> GetStaticFieldInfo(This,classId,fieldToken,pFieldInfo) ) #define ICorProfilerInfo6_GetGenerationBounds(This,cObjectRanges,pcObjectRanges,ranges) \ ( (This)->lpVtbl -> GetGenerationBounds(This,cObjectRanges,pcObjectRanges,ranges) ) #define ICorProfilerInfo6_GetObjectGeneration(This,objectId,range) \ ( (This)->lpVtbl -> GetObjectGeneration(This,objectId,range) ) #define ICorProfilerInfo6_GetNotifiedExceptionClauseInfo(This,pinfo) \ ( (This)->lpVtbl -> GetNotifiedExceptionClauseInfo(This,pinfo) ) #define ICorProfilerInfo6_EnumJITedFunctions(This,ppEnum) \ ( (This)->lpVtbl -> EnumJITedFunctions(This,ppEnum) ) #define ICorProfilerInfo6_RequestProfilerDetach(This,dwExpectedCompletionMilliseconds) \ ( (This)->lpVtbl -> RequestProfilerDetach(This,dwExpectedCompletionMilliseconds) ) #define ICorProfilerInfo6_SetFunctionIDMapper2(This,pFunc,clientData) \ ( (This)->lpVtbl -> SetFunctionIDMapper2(This,pFunc,clientData) ) #define ICorProfilerInfo6_GetStringLayout2(This,pStringLengthOffset,pBufferOffset) \ ( (This)->lpVtbl -> GetStringLayout2(This,pStringLengthOffset,pBufferOffset) ) #define ICorProfilerInfo6_SetEnterLeaveFunctionHooks3(This,pFuncEnter3,pFuncLeave3,pFuncTailcall3) \ ( (This)->lpVtbl -> SetEnterLeaveFunctionHooks3(This,pFuncEnter3,pFuncLeave3,pFuncTailcall3) ) #define ICorProfilerInfo6_SetEnterLeaveFunctionHooks3WithInfo(This,pFuncEnter3WithInfo,pFuncLeave3WithInfo,pFuncTailcall3WithInfo) \ ( (This)->lpVtbl -> SetEnterLeaveFunctionHooks3WithInfo(This,pFuncEnter3WithInfo,pFuncLeave3WithInfo,pFuncTailcall3WithInfo) ) #define ICorProfilerInfo6_GetFunctionEnter3Info(This,functionId,eltInfo,pFrameInfo,pcbArgumentInfo,pArgumentInfo) \ ( (This)->lpVtbl -> GetFunctionEnter3Info(This,functionId,eltInfo,pFrameInfo,pcbArgumentInfo,pArgumentInfo) ) #define ICorProfilerInfo6_GetFunctionLeave3Info(This,functionId,eltInfo,pFrameInfo,pRetvalRange) \ ( (This)->lpVtbl -> GetFunctionLeave3Info(This,functionId,eltInfo,pFrameInfo,pRetvalRange) ) #define ICorProfilerInfo6_GetFunctionTailcall3Info(This,functionId,eltInfo,pFrameInfo) \ ( (This)->lpVtbl -> GetFunctionTailcall3Info(This,functionId,eltInfo,pFrameInfo) ) #define ICorProfilerInfo6_EnumModules(This,ppEnum) \ ( (This)->lpVtbl -> EnumModules(This,ppEnum) ) #define ICorProfilerInfo6_GetRuntimeInformation(This,pClrInstanceId,pRuntimeType,pMajorVersion,pMinorVersion,pBuildNumber,pQFEVersion,cchVersionString,pcchVersionString,szVersionString) \ ( (This)->lpVtbl -> GetRuntimeInformation(This,pClrInstanceId,pRuntimeType,pMajorVersion,pMinorVersion,pBuildNumber,pQFEVersion,cchVersionString,pcchVersionString,szVersionString) ) #define ICorProfilerInfo6_GetThreadStaticAddress2(This,classId,fieldToken,appDomainId,threadId,ppAddress) \ ( (This)->lpVtbl -> GetThreadStaticAddress2(This,classId,fieldToken,appDomainId,threadId,ppAddress) ) #define ICorProfilerInfo6_GetAppDomainsContainingModule(This,moduleId,cAppDomainIds,pcAppDomainIds,appDomainIds) \ ( (This)->lpVtbl -> GetAppDomainsContainingModule(This,moduleId,cAppDomainIds,pcAppDomainIds,appDomainIds) ) #define ICorProfilerInfo6_GetModuleInfo2(This,moduleId,ppBaseLoadAddress,cchName,pcchName,szName,pAssemblyId,pdwModuleFlags) \ ( (This)->lpVtbl -> GetModuleInfo2(This,moduleId,ppBaseLoadAddress,cchName,pcchName,szName,pAssemblyId,pdwModuleFlags) ) #define ICorProfilerInfo6_EnumThreads(This,ppEnum) \ ( (This)->lpVtbl -> EnumThreads(This,ppEnum) ) #define ICorProfilerInfo6_InitializeCurrentThread(This) \ ( (This)->lpVtbl -> InitializeCurrentThread(This) ) #define ICorProfilerInfo6_RequestReJIT(This,cFunctions,moduleIds,methodIds) \ ( (This)->lpVtbl -> RequestReJIT(This,cFunctions,moduleIds,methodIds) ) #define ICorProfilerInfo6_RequestRevert(This,cFunctions,moduleIds,methodIds,status) \ ( (This)->lpVtbl -> RequestRevert(This,cFunctions,moduleIds,methodIds,status) ) #define ICorProfilerInfo6_GetCodeInfo3(This,functionID,reJitId,cCodeInfos,pcCodeInfos,codeInfos) \ ( (This)->lpVtbl -> GetCodeInfo3(This,functionID,reJitId,cCodeInfos,pcCodeInfos,codeInfos) ) #define ICorProfilerInfo6_GetFunctionFromIP2(This,ip,pFunctionId,pReJitId) \ ( (This)->lpVtbl -> GetFunctionFromIP2(This,ip,pFunctionId,pReJitId) ) #define ICorProfilerInfo6_GetReJITIDs(This,functionId,cReJitIds,pcReJitIds,reJitIds) \ ( (This)->lpVtbl -> GetReJITIDs(This,functionId,cReJitIds,pcReJitIds,reJitIds) ) #define ICorProfilerInfo6_GetILToNativeMapping2(This,functionId,reJitId,cMap,pcMap,map) \ ( (This)->lpVtbl -> GetILToNativeMapping2(This,functionId,reJitId,cMap,pcMap,map) ) #define ICorProfilerInfo6_EnumJITedFunctions2(This,ppEnum) \ ( (This)->lpVtbl -> EnumJITedFunctions2(This,ppEnum) ) #define ICorProfilerInfo6_GetObjectSize2(This,objectId,pcSize) \ ( (This)->lpVtbl -> GetObjectSize2(This,objectId,pcSize) ) #define ICorProfilerInfo6_GetEventMask2(This,pdwEventsLow,pdwEventsHigh) \ ( (This)->lpVtbl -> GetEventMask2(This,pdwEventsLow,pdwEventsHigh) ) #define ICorProfilerInfo6_SetEventMask2(This,dwEventsLow,dwEventsHigh) \ ( (This)->lpVtbl -> SetEventMask2(This,dwEventsLow,dwEventsHigh) ) #define ICorProfilerInfo6_EnumNgenModuleMethodsInliningThisMethod(This,inlinersModuleId,inlineeModuleId,inlineeMethodId,incompleteData,ppEnum) \ ( (This)->lpVtbl -> EnumNgenModuleMethodsInliningThisMethod(This,inlinersModuleId,inlineeModuleId,inlineeMethodId,incompleteData,ppEnum) ) #endif /* COBJMACROS */ #endif /* C style interface */ #endif /* __ICorProfilerInfo6_INTERFACE_DEFINED__ */ #ifndef __ICorProfilerInfo7_INTERFACE_DEFINED__ #define __ICorProfilerInfo7_INTERFACE_DEFINED__ /* interface ICorProfilerInfo7 */ /* [local][unique][uuid][object] */ EXTERN_C const IID IID_ICorProfilerInfo7; #if defined(__cplusplus) && !defined(CINTERFACE) MIDL_INTERFACE("9AEECC0D-63E0-4187-8C00-E312F503F663") ICorProfilerInfo7 : public ICorProfilerInfo6 { public: virtual HRESULT STDMETHODCALLTYPE ApplyMetaData( /* [in] */ ModuleID moduleId) = 0; virtual HRESULT STDMETHODCALLTYPE GetInMemorySymbolsLength( /* [in] */ ModuleID moduleId, /* [out] */ DWORD *pCountSymbolBytes) = 0; virtual HRESULT STDMETHODCALLTYPE ReadInMemorySymbols( /* [in] */ ModuleID moduleId, /* [in] */ DWORD symbolsReadOffset, /* [out] */ BYTE *pSymbolBytes, /* [in] */ DWORD countSymbolBytes, /* [out] */ DWORD *pCountSymbolBytesRead) = 0; }; #else /* C style interface */ typedef struct ICorProfilerInfo7Vtbl { BEGIN_INTERFACE HRESULT ( STDMETHODCALLTYPE *QueryInterface )( ICorProfilerInfo7 * This, /* [in] */ REFIID riid, /* [annotation][iid_is][out] */ _COM_Outptr_ void **ppvObject); ULONG ( STDMETHODCALLTYPE *AddRef )( ICorProfilerInfo7 * This); ULONG ( STDMETHODCALLTYPE *Release )( ICorProfilerInfo7 * This); HRESULT ( STDMETHODCALLTYPE *GetClassFromObject )( ICorProfilerInfo7 * This, /* [in] */ ObjectID objectId, /* [out] */ ClassID *pClassId); HRESULT ( STDMETHODCALLTYPE *GetClassFromToken )( ICorProfilerInfo7 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdTypeDef typeDef, /* [out] */ ClassID *pClassId); HRESULT ( STDMETHODCALLTYPE *GetCodeInfo )( ICorProfilerInfo7 * This, /* [in] */ FunctionID functionId, /* [out] */ LPCBYTE *pStart, /* [out] */ ULONG *pcSize); HRESULT ( STDMETHODCALLTYPE *GetEventMask )( ICorProfilerInfo7 * This, /* [out] */ DWORD *pdwEvents); HRESULT ( STDMETHODCALLTYPE *GetFunctionFromIP )( ICorProfilerInfo7 * This, /* [in] */ LPCBYTE ip, /* [out] */ FunctionID *pFunctionId); HRESULT ( STDMETHODCALLTYPE *GetFunctionFromToken )( ICorProfilerInfo7 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdToken token, /* [out] */ FunctionID *pFunctionId); HRESULT ( STDMETHODCALLTYPE *GetHandleFromThread )( ICorProfilerInfo7 * This, /* [in] */ ThreadID threadId, /* [out] */ HANDLE *phThread); HRESULT ( STDMETHODCALLTYPE *GetObjectSize )( ICorProfilerInfo7 * This, /* [in] */ ObjectID objectId, /* [out] */ ULONG *pcSize); HRESULT ( STDMETHODCALLTYPE *IsArrayClass )( ICorProfilerInfo7 * This, /* [in] */ ClassID classId, /* [out] */ CorElementType *pBaseElemType, /* [out] */ ClassID *pBaseClassId, /* [out] */ ULONG *pcRank); HRESULT ( STDMETHODCALLTYPE *GetThreadInfo )( ICorProfilerInfo7 * This, /* [in] */ ThreadID threadId, /* [out] */ DWORD *pdwWin32ThreadId); HRESULT ( STDMETHODCALLTYPE *GetCurrentThreadID )( ICorProfilerInfo7 * This, /* [out] */ ThreadID *pThreadId); HRESULT ( STDMETHODCALLTYPE *GetClassIDInfo )( ICorProfilerInfo7 * This, /* [in] */ ClassID classId, /* [out] */ ModuleID *pModuleId, /* [out] */ mdTypeDef *pTypeDefToken); HRESULT ( STDMETHODCALLTYPE *GetFunctionInfo )( ICorProfilerInfo7 * This, /* [in] */ FunctionID functionId, /* [out] */ ClassID *pClassId, /* [out] */ ModuleID *pModuleId, /* [out] */ mdToken *pToken); HRESULT ( STDMETHODCALLTYPE *SetEventMask )( ICorProfilerInfo7 * This, /* [in] */ DWORD dwEvents); HRESULT ( STDMETHODCALLTYPE *SetEnterLeaveFunctionHooks )( ICorProfilerInfo7 * This, /* [in] */ FunctionEnter *pFuncEnter, /* [in] */ FunctionLeave *pFuncLeave, /* [in] */ FunctionTailcall *pFuncTailcall); HRESULT ( STDMETHODCALLTYPE *SetFunctionIDMapper )( ICorProfilerInfo7 * This, /* [in] */ FunctionIDMapper *pFunc); HRESULT ( STDMETHODCALLTYPE *GetTokenAndMetaDataFromFunction )( ICorProfilerInfo7 * This, /* [in] */ FunctionID functionId, /* [in] */ REFIID riid, /* [out] */ IUnknown **ppImport, /* [out] */ mdToken *pToken); HRESULT ( STDMETHODCALLTYPE *GetModuleInfo )( ICorProfilerInfo7 * This, /* [in] */ ModuleID moduleId, /* [out] */ LPCBYTE *ppBaseLoadAddress, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [annotation][out] */ _Out_writes_to_(cchName, *pcchName) WCHAR szName[ ], /* [out] */ AssemblyID *pAssemblyId); HRESULT ( STDMETHODCALLTYPE *GetModuleMetaData )( ICorProfilerInfo7 * This, /* [in] */ ModuleID moduleId, /* [in] */ DWORD dwOpenFlags, /* [in] */ REFIID riid, /* [out] */ IUnknown **ppOut); HRESULT ( STDMETHODCALLTYPE *GetILFunctionBody )( ICorProfilerInfo7 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdMethodDef methodId, /* [out] */ LPCBYTE *ppMethodHeader, /* [out] */ ULONG *pcbMethodSize); HRESULT ( STDMETHODCALLTYPE *GetILFunctionBodyAllocator )( ICorProfilerInfo7 * This, /* [in] */ ModuleID moduleId, /* [out] */ IMethodMalloc **ppMalloc); HRESULT ( STDMETHODCALLTYPE *SetILFunctionBody )( ICorProfilerInfo7 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdMethodDef methodid, /* [in] */ LPCBYTE pbNewILMethodHeader); HRESULT ( STDMETHODCALLTYPE *GetAppDomainInfo )( ICorProfilerInfo7 * This, /* [in] */ AppDomainID appDomainId, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [annotation][out] */ _Out_writes_to_(cchName, *pcchName) WCHAR szName[ ], /* [out] */ ProcessID *pProcessId); HRESULT ( STDMETHODCALLTYPE *GetAssemblyInfo )( ICorProfilerInfo7 * This, /* [in] */ AssemblyID assemblyId, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [annotation][out] */ _Out_writes_to_(cchName, *pcchName) WCHAR szName[ ], /* [out] */ AppDomainID *pAppDomainId, /* [out] */ ModuleID *pModuleId); HRESULT ( STDMETHODCALLTYPE *SetFunctionReJIT )( ICorProfilerInfo7 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ForceGC )( ICorProfilerInfo7 * This); HRESULT ( STDMETHODCALLTYPE *SetILInstrumentedCodeMap )( ICorProfilerInfo7 * This, /* [in] */ FunctionID functionId, /* [in] */ BOOL fStartJit, /* [in] */ ULONG cILMapEntries, /* [size_is][in] */ COR_IL_MAP rgILMapEntries[ ]); HRESULT ( STDMETHODCALLTYPE *GetInprocInspectionInterface )( ICorProfilerInfo7 * This, /* [out] */ IUnknown **ppicd); HRESULT ( STDMETHODCALLTYPE *GetInprocInspectionIThisThread )( ICorProfilerInfo7 * This, /* [out] */ IUnknown **ppicd); HRESULT ( STDMETHODCALLTYPE *GetThreadContext )( ICorProfilerInfo7 * This, /* [in] */ ThreadID threadId, /* [out] */ ContextID *pContextId); HRESULT ( STDMETHODCALLTYPE *BeginInprocDebugging )( ICorProfilerInfo7 * This, /* [in] */ BOOL fThisThreadOnly, /* [out] */ DWORD *pdwProfilerContext); HRESULT ( STDMETHODCALLTYPE *EndInprocDebugging )( ICorProfilerInfo7 * This, /* [in] */ DWORD dwProfilerContext); HRESULT ( STDMETHODCALLTYPE *GetILToNativeMapping )( ICorProfilerInfo7 * This, /* [in] */ FunctionID functionId, /* [in] */ ULONG32 cMap, /* [out] */ ULONG32 *pcMap, /* [length_is][size_is][out] */ COR_DEBUG_IL_TO_NATIVE_MAP map[ ]); HRESULT ( STDMETHODCALLTYPE *DoStackSnapshot )( ICorProfilerInfo7 * This, /* [in] */ ThreadID thread, /* [in] */ StackSnapshotCallback *callback, /* [in] */ ULONG32 infoFlags, /* [in] */ void *clientData, /* [size_is][in] */ BYTE context[ ], /* [in] */ ULONG32 contextSize); HRESULT ( STDMETHODCALLTYPE *SetEnterLeaveFunctionHooks2 )( ICorProfilerInfo7 * This, /* [in] */ FunctionEnter2 *pFuncEnter, /* [in] */ FunctionLeave2 *pFuncLeave, /* [in] */ FunctionTailcall2 *pFuncTailcall); HRESULT ( STDMETHODCALLTYPE *GetFunctionInfo2 )( ICorProfilerInfo7 * This, /* [in] */ FunctionID funcId, /* [in] */ COR_PRF_FRAME_INFO frameInfo, /* [out] */ ClassID *pClassId, /* [out] */ ModuleID *pModuleId, /* [out] */ mdToken *pToken, /* [in] */ ULONG32 cTypeArgs, /* [out] */ ULONG32 *pcTypeArgs, /* [out] */ ClassID typeArgs[ ]); HRESULT ( STDMETHODCALLTYPE *GetStringLayout )( ICorProfilerInfo7 * This, /* [out] */ ULONG *pBufferLengthOffset, /* [out] */ ULONG *pStringLengthOffset, /* [out] */ ULONG *pBufferOffset); HRESULT ( STDMETHODCALLTYPE *GetClassLayout )( ICorProfilerInfo7 * This, /* [in] */ ClassID classID, /* [out][in] */ COR_FIELD_OFFSET rFieldOffset[ ], /* [in] */ ULONG cFieldOffset, /* [out] */ ULONG *pcFieldOffset, /* [out] */ ULONG *pulClassSize); HRESULT ( STDMETHODCALLTYPE *GetClassIDInfo2 )( ICorProfilerInfo7 * This, /* [in] */ ClassID classId, /* [out] */ ModuleID *pModuleId, /* [out] */ mdTypeDef *pTypeDefToken, /* [out] */ ClassID *pParentClassId, /* [in] */ ULONG32 cNumTypeArgs, /* [out] */ ULONG32 *pcNumTypeArgs, /* [out] */ ClassID typeArgs[ ]); HRESULT ( STDMETHODCALLTYPE *GetCodeInfo2 )( ICorProfilerInfo7 * This, /* [in] */ FunctionID functionID, /* [in] */ ULONG32 cCodeInfos, /* [out] */ ULONG32 *pcCodeInfos, /* [length_is][size_is][out] */ COR_PRF_CODE_INFO codeInfos[ ]); HRESULT ( STDMETHODCALLTYPE *GetClassFromTokenAndTypeArgs )( ICorProfilerInfo7 * This, /* [in] */ ModuleID moduleID, /* [in] */ mdTypeDef typeDef, /* [in] */ ULONG32 cTypeArgs, /* [size_is][in] */ ClassID typeArgs[ ], /* [out] */ ClassID *pClassID); HRESULT ( STDMETHODCALLTYPE *GetFunctionFromTokenAndTypeArgs )( ICorProfilerInfo7 * This, /* [in] */ ModuleID moduleID, /* [in] */ mdMethodDef funcDef, /* [in] */ ClassID classId, /* [in] */ ULONG32 cTypeArgs, /* [size_is][in] */ ClassID typeArgs[ ], /* [out] */ FunctionID *pFunctionID); HRESULT ( STDMETHODCALLTYPE *EnumModuleFrozenObjects )( ICorProfilerInfo7 * This, /* [in] */ ModuleID moduleID, /* [out] */ ICorProfilerObjectEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *GetArrayObjectInfo )( ICorProfilerInfo7 * This, /* [in] */ ObjectID objectId, /* [in] */ ULONG32 cDimensions, /* [size_is][out] */ ULONG32 pDimensionSizes[ ], /* [size_is][out] */ int pDimensionLowerBounds[ ], /* [out] */ BYTE **ppData); HRESULT ( STDMETHODCALLTYPE *GetBoxClassLayout )( ICorProfilerInfo7 * This, /* [in] */ ClassID classId, /* [out] */ ULONG32 *pBufferOffset); HRESULT ( STDMETHODCALLTYPE *GetThreadAppDomain )( ICorProfilerInfo7 * This, /* [in] */ ThreadID threadId, /* [out] */ AppDomainID *pAppDomainId); HRESULT ( STDMETHODCALLTYPE *GetRVAStaticAddress )( ICorProfilerInfo7 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [out] */ void **ppAddress); HRESULT ( STDMETHODCALLTYPE *GetAppDomainStaticAddress )( ICorProfilerInfo7 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [in] */ AppDomainID appDomainId, /* [out] */ void **ppAddress); HRESULT ( STDMETHODCALLTYPE *GetThreadStaticAddress )( ICorProfilerInfo7 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [in] */ ThreadID threadId, /* [out] */ void **ppAddress); HRESULT ( STDMETHODCALLTYPE *GetContextStaticAddress )( ICorProfilerInfo7 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [in] */ ContextID contextId, /* [out] */ void **ppAddress); HRESULT ( STDMETHODCALLTYPE *GetStaticFieldInfo )( ICorProfilerInfo7 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [out] */ COR_PRF_STATIC_TYPE *pFieldInfo); HRESULT ( STDMETHODCALLTYPE *GetGenerationBounds )( ICorProfilerInfo7 * This, /* [in] */ ULONG cObjectRanges, /* [out] */ ULONG *pcObjectRanges, /* [length_is][size_is][out] */ COR_PRF_GC_GENERATION_RANGE ranges[ ]); HRESULT ( STDMETHODCALLTYPE *GetObjectGeneration )( ICorProfilerInfo7 * This, /* [in] */ ObjectID objectId, /* [out] */ COR_PRF_GC_GENERATION_RANGE *range); HRESULT ( STDMETHODCALLTYPE *GetNotifiedExceptionClauseInfo )( ICorProfilerInfo7 * This, /* [out] */ COR_PRF_EX_CLAUSE_INFO *pinfo); HRESULT ( STDMETHODCALLTYPE *EnumJITedFunctions )( ICorProfilerInfo7 * This, /* [out] */ ICorProfilerFunctionEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *RequestProfilerDetach )( ICorProfilerInfo7 * This, /* [in] */ DWORD dwExpectedCompletionMilliseconds); HRESULT ( STDMETHODCALLTYPE *SetFunctionIDMapper2 )( ICorProfilerInfo7 * This, /* [in] */ FunctionIDMapper2 *pFunc, /* [in] */ void *clientData); HRESULT ( STDMETHODCALLTYPE *GetStringLayout2 )( ICorProfilerInfo7 * This, /* [out] */ ULONG *pStringLengthOffset, /* [out] */ ULONG *pBufferOffset); HRESULT ( STDMETHODCALLTYPE *SetEnterLeaveFunctionHooks3 )( ICorProfilerInfo7 * This, /* [in] */ FunctionEnter3 *pFuncEnter3, /* [in] */ FunctionLeave3 *pFuncLeave3, /* [in] */ FunctionTailcall3 *pFuncTailcall3); HRESULT ( STDMETHODCALLTYPE *SetEnterLeaveFunctionHooks3WithInfo )( ICorProfilerInfo7 * This, /* [in] */ FunctionEnter3WithInfo *pFuncEnter3WithInfo, /* [in] */ FunctionLeave3WithInfo *pFuncLeave3WithInfo, /* [in] */ FunctionTailcall3WithInfo *pFuncTailcall3WithInfo); HRESULT ( STDMETHODCALLTYPE *GetFunctionEnter3Info )( ICorProfilerInfo7 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_ELT_INFO eltInfo, /* [out] */ COR_PRF_FRAME_INFO *pFrameInfo, /* [out][in] */ ULONG *pcbArgumentInfo, /* [size_is][out] */ COR_PRF_FUNCTION_ARGUMENT_INFO *pArgumentInfo); HRESULT ( STDMETHODCALLTYPE *GetFunctionLeave3Info )( ICorProfilerInfo7 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_ELT_INFO eltInfo, /* [out] */ COR_PRF_FRAME_INFO *pFrameInfo, /* [out] */ COR_PRF_FUNCTION_ARGUMENT_RANGE *pRetvalRange); HRESULT ( STDMETHODCALLTYPE *GetFunctionTailcall3Info )( ICorProfilerInfo7 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_ELT_INFO eltInfo, /* [out] */ COR_PRF_FRAME_INFO *pFrameInfo); HRESULT ( STDMETHODCALLTYPE *EnumModules )( ICorProfilerInfo7 * This, /* [out] */ ICorProfilerModuleEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *GetRuntimeInformation )( ICorProfilerInfo7 * This, /* [out] */ USHORT *pClrInstanceId, /* [out] */ COR_PRF_RUNTIME_TYPE *pRuntimeType, /* [out] */ USHORT *pMajorVersion, /* [out] */ USHORT *pMinorVersion, /* [out] */ USHORT *pBuildNumber, /* [out] */ USHORT *pQFEVersion, /* [in] */ ULONG cchVersionString, /* [out] */ ULONG *pcchVersionString, /* [annotation][out] */ _Out_writes_to_(cchVersionString, *pcchVersionString) WCHAR szVersionString[ ]); HRESULT ( STDMETHODCALLTYPE *GetThreadStaticAddress2 )( ICorProfilerInfo7 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [in] */ AppDomainID appDomainId, /* [in] */ ThreadID threadId, /* [out] */ void **ppAddress); HRESULT ( STDMETHODCALLTYPE *GetAppDomainsContainingModule )( ICorProfilerInfo7 * This, /* [in] */ ModuleID moduleId, /* [in] */ ULONG32 cAppDomainIds, /* [out] */ ULONG32 *pcAppDomainIds, /* [length_is][size_is][out] */ AppDomainID appDomainIds[ ]); HRESULT ( STDMETHODCALLTYPE *GetModuleInfo2 )( ICorProfilerInfo7 * This, /* [in] */ ModuleID moduleId, /* [out] */ LPCBYTE *ppBaseLoadAddress, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [annotation][out] */ _Out_writes_to_(cchName, *pcchName) WCHAR szName[ ], /* [out] */ AssemblyID *pAssemblyId, /* [out] */ DWORD *pdwModuleFlags); HRESULT ( STDMETHODCALLTYPE *EnumThreads )( ICorProfilerInfo7 * This, /* [out] */ ICorProfilerThreadEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *InitializeCurrentThread )( ICorProfilerInfo7 * This); HRESULT ( STDMETHODCALLTYPE *RequestReJIT )( ICorProfilerInfo7 * This, /* [in] */ ULONG cFunctions, /* [size_is][in] */ ModuleID moduleIds[ ], /* [size_is][in] */ mdMethodDef methodIds[ ]); HRESULT ( STDMETHODCALLTYPE *RequestRevert )( ICorProfilerInfo7 * This, /* [in] */ ULONG cFunctions, /* [size_is][in] */ ModuleID moduleIds[ ], /* [size_is][in] */ mdMethodDef methodIds[ ], /* [size_is][out] */ HRESULT status[ ]); HRESULT ( STDMETHODCALLTYPE *GetCodeInfo3 )( ICorProfilerInfo7 * This, /* [in] */ FunctionID functionID, /* [in] */ ReJITID reJitId, /* [in] */ ULONG32 cCodeInfos, /* [out] */ ULONG32 *pcCodeInfos, /* [length_is][size_is][out] */ COR_PRF_CODE_INFO codeInfos[ ]); HRESULT ( STDMETHODCALLTYPE *GetFunctionFromIP2 )( ICorProfilerInfo7 * This, /* [in] */ LPCBYTE ip, /* [out] */ FunctionID *pFunctionId, /* [out] */ ReJITID *pReJitId); HRESULT ( STDMETHODCALLTYPE *GetReJITIDs )( ICorProfilerInfo7 * This, /* [in] */ FunctionID functionId, /* [in] */ ULONG cReJitIds, /* [out] */ ULONG *pcReJitIds, /* [length_is][size_is][out] */ ReJITID reJitIds[ ]); HRESULT ( STDMETHODCALLTYPE *GetILToNativeMapping2 )( ICorProfilerInfo7 * This, /* [in] */ FunctionID functionId, /* [in] */ ReJITID reJitId, /* [in] */ ULONG32 cMap, /* [out] */ ULONG32 *pcMap, /* [length_is][size_is][out] */ COR_DEBUG_IL_TO_NATIVE_MAP map[ ]); HRESULT ( STDMETHODCALLTYPE *EnumJITedFunctions2 )( ICorProfilerInfo7 * This, /* [out] */ ICorProfilerFunctionEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *GetObjectSize2 )( ICorProfilerInfo7 * This, /* [in] */ ObjectID objectId, /* [out] */ SIZE_T *pcSize); HRESULT ( STDMETHODCALLTYPE *GetEventMask2 )( ICorProfilerInfo7 * This, /* [out] */ DWORD *pdwEventsLow, /* [out] */ DWORD *pdwEventsHigh); HRESULT ( STDMETHODCALLTYPE *SetEventMask2 )( ICorProfilerInfo7 * This, /* [in] */ DWORD dwEventsLow, /* [in] */ DWORD dwEventsHigh); HRESULT ( STDMETHODCALLTYPE *EnumNgenModuleMethodsInliningThisMethod )( ICorProfilerInfo7 * This, /* [in] */ ModuleID inlinersModuleId, /* [in] */ ModuleID inlineeModuleId, /* [in] */ mdMethodDef inlineeMethodId, /* [out] */ BOOL *incompleteData, /* [out] */ ICorProfilerMethodEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *ApplyMetaData )( ICorProfilerInfo7 * This, /* [in] */ ModuleID moduleId); HRESULT ( STDMETHODCALLTYPE *GetInMemorySymbolsLength )( ICorProfilerInfo7 * This, /* [in] */ ModuleID moduleId, /* [out] */ DWORD *pCountSymbolBytes); HRESULT ( STDMETHODCALLTYPE *ReadInMemorySymbols )( ICorProfilerInfo7 * This, /* [in] */ ModuleID moduleId, /* [in] */ DWORD symbolsReadOffset, /* [out] */ BYTE *pSymbolBytes, /* [in] */ DWORD countSymbolBytes, /* [out] */ DWORD *pCountSymbolBytesRead); END_INTERFACE } ICorProfilerInfo7Vtbl; interface ICorProfilerInfo7 { CONST_VTBL struct ICorProfilerInfo7Vtbl *lpVtbl; }; #ifdef COBJMACROS #define ICorProfilerInfo7_QueryInterface(This,riid,ppvObject) \ ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) ) #define ICorProfilerInfo7_AddRef(This) \ ( (This)->lpVtbl -> AddRef(This) ) #define ICorProfilerInfo7_Release(This) \ ( (This)->lpVtbl -> Release(This) ) #define ICorProfilerInfo7_GetClassFromObject(This,objectId,pClassId) \ ( (This)->lpVtbl -> GetClassFromObject(This,objectId,pClassId) ) #define ICorProfilerInfo7_GetClassFromToken(This,moduleId,typeDef,pClassId) \ ( (This)->lpVtbl -> GetClassFromToken(This,moduleId,typeDef,pClassId) ) #define ICorProfilerInfo7_GetCodeInfo(This,functionId,pStart,pcSize) \ ( (This)->lpVtbl -> GetCodeInfo(This,functionId,pStart,pcSize) ) #define ICorProfilerInfo7_GetEventMask(This,pdwEvents) \ ( (This)->lpVtbl -> GetEventMask(This,pdwEvents) ) #define ICorProfilerInfo7_GetFunctionFromIP(This,ip,pFunctionId) \ ( (This)->lpVtbl -> GetFunctionFromIP(This,ip,pFunctionId) ) #define ICorProfilerInfo7_GetFunctionFromToken(This,moduleId,token,pFunctionId) \ ( (This)->lpVtbl -> GetFunctionFromToken(This,moduleId,token,pFunctionId) ) #define ICorProfilerInfo7_GetHandleFromThread(This,threadId,phThread) \ ( (This)->lpVtbl -> GetHandleFromThread(This,threadId,phThread) ) #define ICorProfilerInfo7_GetObjectSize(This,objectId,pcSize) \ ( (This)->lpVtbl -> GetObjectSize(This,objectId,pcSize) ) #define ICorProfilerInfo7_IsArrayClass(This,classId,pBaseElemType,pBaseClassId,pcRank) \ ( (This)->lpVtbl -> IsArrayClass(This,classId,pBaseElemType,pBaseClassId,pcRank) ) #define ICorProfilerInfo7_GetThreadInfo(This,threadId,pdwWin32ThreadId) \ ( (This)->lpVtbl -> GetThreadInfo(This,threadId,pdwWin32ThreadId) ) #define ICorProfilerInfo7_GetCurrentThreadID(This,pThreadId) \ ( (This)->lpVtbl -> GetCurrentThreadID(This,pThreadId) ) #define ICorProfilerInfo7_GetClassIDInfo(This,classId,pModuleId,pTypeDefToken) \ ( (This)->lpVtbl -> GetClassIDInfo(This,classId,pModuleId,pTypeDefToken) ) #define ICorProfilerInfo7_GetFunctionInfo(This,functionId,pClassId,pModuleId,pToken) \ ( (This)->lpVtbl -> GetFunctionInfo(This,functionId,pClassId,pModuleId,pToken) ) #define ICorProfilerInfo7_SetEventMask(This,dwEvents) \ ( (This)->lpVtbl -> SetEventMask(This,dwEvents) ) #define ICorProfilerInfo7_SetEnterLeaveFunctionHooks(This,pFuncEnter,pFuncLeave,pFuncTailcall) \ ( (This)->lpVtbl -> SetEnterLeaveFunctionHooks(This,pFuncEnter,pFuncLeave,pFuncTailcall) ) #define ICorProfilerInfo7_SetFunctionIDMapper(This,pFunc) \ ( (This)->lpVtbl -> SetFunctionIDMapper(This,pFunc) ) #define ICorProfilerInfo7_GetTokenAndMetaDataFromFunction(This,functionId,riid,ppImport,pToken) \ ( (This)->lpVtbl -> GetTokenAndMetaDataFromFunction(This,functionId,riid,ppImport,pToken) ) #define ICorProfilerInfo7_GetModuleInfo(This,moduleId,ppBaseLoadAddress,cchName,pcchName,szName,pAssemblyId) \ ( (This)->lpVtbl -> GetModuleInfo(This,moduleId,ppBaseLoadAddress,cchName,pcchName,szName,pAssemblyId) ) #define ICorProfilerInfo7_GetModuleMetaData(This,moduleId,dwOpenFlags,riid,ppOut) \ ( (This)->lpVtbl -> GetModuleMetaData(This,moduleId,dwOpenFlags,riid,ppOut) ) #define ICorProfilerInfo7_GetILFunctionBody(This,moduleId,methodId,ppMethodHeader,pcbMethodSize) \ ( (This)->lpVtbl -> GetILFunctionBody(This,moduleId,methodId,ppMethodHeader,pcbMethodSize) ) #define ICorProfilerInfo7_GetILFunctionBodyAllocator(This,moduleId,ppMalloc) \ ( (This)->lpVtbl -> GetILFunctionBodyAllocator(This,moduleId,ppMalloc) ) #define ICorProfilerInfo7_SetILFunctionBody(This,moduleId,methodid,pbNewILMethodHeader) \ ( (This)->lpVtbl -> SetILFunctionBody(This,moduleId,methodid,pbNewILMethodHeader) ) #define ICorProfilerInfo7_GetAppDomainInfo(This,appDomainId,cchName,pcchName,szName,pProcessId) \ ( (This)->lpVtbl -> GetAppDomainInfo(This,appDomainId,cchName,pcchName,szName,pProcessId) ) #define ICorProfilerInfo7_GetAssemblyInfo(This,assemblyId,cchName,pcchName,szName,pAppDomainId,pModuleId) \ ( (This)->lpVtbl -> GetAssemblyInfo(This,assemblyId,cchName,pcchName,szName,pAppDomainId,pModuleId) ) #define ICorProfilerInfo7_SetFunctionReJIT(This,functionId) \ ( (This)->lpVtbl -> SetFunctionReJIT(This,functionId) ) #define ICorProfilerInfo7_ForceGC(This) \ ( (This)->lpVtbl -> ForceGC(This) ) #define ICorProfilerInfo7_SetILInstrumentedCodeMap(This,functionId,fStartJit,cILMapEntries,rgILMapEntries) \ ( (This)->lpVtbl -> SetILInstrumentedCodeMap(This,functionId,fStartJit,cILMapEntries,rgILMapEntries) ) #define ICorProfilerInfo7_GetInprocInspectionInterface(This,ppicd) \ ( (This)->lpVtbl -> GetInprocInspectionInterface(This,ppicd) ) #define ICorProfilerInfo7_GetInprocInspectionIThisThread(This,ppicd) \ ( (This)->lpVtbl -> GetInprocInspectionIThisThread(This,ppicd) ) #define ICorProfilerInfo7_GetThreadContext(This,threadId,pContextId) \ ( (This)->lpVtbl -> GetThreadContext(This,threadId,pContextId) ) #define ICorProfilerInfo7_BeginInprocDebugging(This,fThisThreadOnly,pdwProfilerContext) \ ( (This)->lpVtbl -> BeginInprocDebugging(This,fThisThreadOnly,pdwProfilerContext) ) #define ICorProfilerInfo7_EndInprocDebugging(This,dwProfilerContext) \ ( (This)->lpVtbl -> EndInprocDebugging(This,dwProfilerContext) ) #define ICorProfilerInfo7_GetILToNativeMapping(This,functionId,cMap,pcMap,map) \ ( (This)->lpVtbl -> GetILToNativeMapping(This,functionId,cMap,pcMap,map) ) #define ICorProfilerInfo7_DoStackSnapshot(This,thread,callback,infoFlags,clientData,context,contextSize) \ ( (This)->lpVtbl -> DoStackSnapshot(This,thread,callback,infoFlags,clientData,context,contextSize) ) #define ICorProfilerInfo7_SetEnterLeaveFunctionHooks2(This,pFuncEnter,pFuncLeave,pFuncTailcall) \ ( (This)->lpVtbl -> SetEnterLeaveFunctionHooks2(This,pFuncEnter,pFuncLeave,pFuncTailcall) ) #define ICorProfilerInfo7_GetFunctionInfo2(This,funcId,frameInfo,pClassId,pModuleId,pToken,cTypeArgs,pcTypeArgs,typeArgs) \ ( (This)->lpVtbl -> GetFunctionInfo2(This,funcId,frameInfo,pClassId,pModuleId,pToken,cTypeArgs,pcTypeArgs,typeArgs) ) #define ICorProfilerInfo7_GetStringLayout(This,pBufferLengthOffset,pStringLengthOffset,pBufferOffset) \ ( (This)->lpVtbl -> GetStringLayout(This,pBufferLengthOffset,pStringLengthOffset,pBufferOffset) ) #define ICorProfilerInfo7_GetClassLayout(This,classID,rFieldOffset,cFieldOffset,pcFieldOffset,pulClassSize) \ ( (This)->lpVtbl -> GetClassLayout(This,classID,rFieldOffset,cFieldOffset,pcFieldOffset,pulClassSize) ) #define ICorProfilerInfo7_GetClassIDInfo2(This,classId,pModuleId,pTypeDefToken,pParentClassId,cNumTypeArgs,pcNumTypeArgs,typeArgs) \ ( (This)->lpVtbl -> GetClassIDInfo2(This,classId,pModuleId,pTypeDefToken,pParentClassId,cNumTypeArgs,pcNumTypeArgs,typeArgs) ) #define ICorProfilerInfo7_GetCodeInfo2(This,functionID,cCodeInfos,pcCodeInfos,codeInfos) \ ( (This)->lpVtbl -> GetCodeInfo2(This,functionID,cCodeInfos,pcCodeInfos,codeInfos) ) #define ICorProfilerInfo7_GetClassFromTokenAndTypeArgs(This,moduleID,typeDef,cTypeArgs,typeArgs,pClassID) \ ( (This)->lpVtbl -> GetClassFromTokenAndTypeArgs(This,moduleID,typeDef,cTypeArgs,typeArgs,pClassID) ) #define ICorProfilerInfo7_GetFunctionFromTokenAndTypeArgs(This,moduleID,funcDef,classId,cTypeArgs,typeArgs,pFunctionID) \ ( (This)->lpVtbl -> GetFunctionFromTokenAndTypeArgs(This,moduleID,funcDef,classId,cTypeArgs,typeArgs,pFunctionID) ) #define ICorProfilerInfo7_EnumModuleFrozenObjects(This,moduleID,ppEnum) \ ( (This)->lpVtbl -> EnumModuleFrozenObjects(This,moduleID,ppEnum) ) #define ICorProfilerInfo7_GetArrayObjectInfo(This,objectId,cDimensions,pDimensionSizes,pDimensionLowerBounds,ppData) \ ( (This)->lpVtbl -> GetArrayObjectInfo(This,objectId,cDimensions,pDimensionSizes,pDimensionLowerBounds,ppData) ) #define ICorProfilerInfo7_GetBoxClassLayout(This,classId,pBufferOffset) \ ( (This)->lpVtbl -> GetBoxClassLayout(This,classId,pBufferOffset) ) #define ICorProfilerInfo7_GetThreadAppDomain(This,threadId,pAppDomainId) \ ( (This)->lpVtbl -> GetThreadAppDomain(This,threadId,pAppDomainId) ) #define ICorProfilerInfo7_GetRVAStaticAddress(This,classId,fieldToken,ppAddress) \ ( (This)->lpVtbl -> GetRVAStaticAddress(This,classId,fieldToken,ppAddress) ) #define ICorProfilerInfo7_GetAppDomainStaticAddress(This,classId,fieldToken,appDomainId,ppAddress) \ ( (This)->lpVtbl -> GetAppDomainStaticAddress(This,classId,fieldToken,appDomainId,ppAddress) ) #define ICorProfilerInfo7_GetThreadStaticAddress(This,classId,fieldToken,threadId,ppAddress) \ ( (This)->lpVtbl -> GetThreadStaticAddress(This,classId,fieldToken,threadId,ppAddress) ) #define ICorProfilerInfo7_GetContextStaticAddress(This,classId,fieldToken,contextId,ppAddress) \ ( (This)->lpVtbl -> GetContextStaticAddress(This,classId,fieldToken,contextId,ppAddress) ) #define ICorProfilerInfo7_GetStaticFieldInfo(This,classId,fieldToken,pFieldInfo) \ ( (This)->lpVtbl -> GetStaticFieldInfo(This,classId,fieldToken,pFieldInfo) ) #define ICorProfilerInfo7_GetGenerationBounds(This,cObjectRanges,pcObjectRanges,ranges) \ ( (This)->lpVtbl -> GetGenerationBounds(This,cObjectRanges,pcObjectRanges,ranges) ) #define ICorProfilerInfo7_GetObjectGeneration(This,objectId,range) \ ( (This)->lpVtbl -> GetObjectGeneration(This,objectId,range) ) #define ICorProfilerInfo7_GetNotifiedExceptionClauseInfo(This,pinfo) \ ( (This)->lpVtbl -> GetNotifiedExceptionClauseInfo(This,pinfo) ) #define ICorProfilerInfo7_EnumJITedFunctions(This,ppEnum) \ ( (This)->lpVtbl -> EnumJITedFunctions(This,ppEnum) ) #define ICorProfilerInfo7_RequestProfilerDetach(This,dwExpectedCompletionMilliseconds) \ ( (This)->lpVtbl -> RequestProfilerDetach(This,dwExpectedCompletionMilliseconds) ) #define ICorProfilerInfo7_SetFunctionIDMapper2(This,pFunc,clientData) \ ( (This)->lpVtbl -> SetFunctionIDMapper2(This,pFunc,clientData) ) #define ICorProfilerInfo7_GetStringLayout2(This,pStringLengthOffset,pBufferOffset) \ ( (This)->lpVtbl -> GetStringLayout2(This,pStringLengthOffset,pBufferOffset) ) #define ICorProfilerInfo7_SetEnterLeaveFunctionHooks3(This,pFuncEnter3,pFuncLeave3,pFuncTailcall3) \ ( (This)->lpVtbl -> SetEnterLeaveFunctionHooks3(This,pFuncEnter3,pFuncLeave3,pFuncTailcall3) ) #define ICorProfilerInfo7_SetEnterLeaveFunctionHooks3WithInfo(This,pFuncEnter3WithInfo,pFuncLeave3WithInfo,pFuncTailcall3WithInfo) \ ( (This)->lpVtbl -> SetEnterLeaveFunctionHooks3WithInfo(This,pFuncEnter3WithInfo,pFuncLeave3WithInfo,pFuncTailcall3WithInfo) ) #define ICorProfilerInfo7_GetFunctionEnter3Info(This,functionId,eltInfo,pFrameInfo,pcbArgumentInfo,pArgumentInfo) \ ( (This)->lpVtbl -> GetFunctionEnter3Info(This,functionId,eltInfo,pFrameInfo,pcbArgumentInfo,pArgumentInfo) ) #define ICorProfilerInfo7_GetFunctionLeave3Info(This,functionId,eltInfo,pFrameInfo,pRetvalRange) \ ( (This)->lpVtbl -> GetFunctionLeave3Info(This,functionId,eltInfo,pFrameInfo,pRetvalRange) ) #define ICorProfilerInfo7_GetFunctionTailcall3Info(This,functionId,eltInfo,pFrameInfo) \ ( (This)->lpVtbl -> GetFunctionTailcall3Info(This,functionId,eltInfo,pFrameInfo) ) #define ICorProfilerInfo7_EnumModules(This,ppEnum) \ ( (This)->lpVtbl -> EnumModules(This,ppEnum) ) #define ICorProfilerInfo7_GetRuntimeInformation(This,pClrInstanceId,pRuntimeType,pMajorVersion,pMinorVersion,pBuildNumber,pQFEVersion,cchVersionString,pcchVersionString,szVersionString) \ ( (This)->lpVtbl -> GetRuntimeInformation(This,pClrInstanceId,pRuntimeType,pMajorVersion,pMinorVersion,pBuildNumber,pQFEVersion,cchVersionString,pcchVersionString,szVersionString) ) #define ICorProfilerInfo7_GetThreadStaticAddress2(This,classId,fieldToken,appDomainId,threadId,ppAddress) \ ( (This)->lpVtbl -> GetThreadStaticAddress2(This,classId,fieldToken,appDomainId,threadId,ppAddress) ) #define ICorProfilerInfo7_GetAppDomainsContainingModule(This,moduleId,cAppDomainIds,pcAppDomainIds,appDomainIds) \ ( (This)->lpVtbl -> GetAppDomainsContainingModule(This,moduleId,cAppDomainIds,pcAppDomainIds,appDomainIds) ) #define ICorProfilerInfo7_GetModuleInfo2(This,moduleId,ppBaseLoadAddress,cchName,pcchName,szName,pAssemblyId,pdwModuleFlags) \ ( (This)->lpVtbl -> GetModuleInfo2(This,moduleId,ppBaseLoadAddress,cchName,pcchName,szName,pAssemblyId,pdwModuleFlags) ) #define ICorProfilerInfo7_EnumThreads(This,ppEnum) \ ( (This)->lpVtbl -> EnumThreads(This,ppEnum) ) #define ICorProfilerInfo7_InitializeCurrentThread(This) \ ( (This)->lpVtbl -> InitializeCurrentThread(This) ) #define ICorProfilerInfo7_RequestReJIT(This,cFunctions,moduleIds,methodIds) \ ( (This)->lpVtbl -> RequestReJIT(This,cFunctions,moduleIds,methodIds) ) #define ICorProfilerInfo7_RequestRevert(This,cFunctions,moduleIds,methodIds,status) \ ( (This)->lpVtbl -> RequestRevert(This,cFunctions,moduleIds,methodIds,status) ) #define ICorProfilerInfo7_GetCodeInfo3(This,functionID,reJitId,cCodeInfos,pcCodeInfos,codeInfos) \ ( (This)->lpVtbl -> GetCodeInfo3(This,functionID,reJitId,cCodeInfos,pcCodeInfos,codeInfos) ) #define ICorProfilerInfo7_GetFunctionFromIP2(This,ip,pFunctionId,pReJitId) \ ( (This)->lpVtbl -> GetFunctionFromIP2(This,ip,pFunctionId,pReJitId) ) #define ICorProfilerInfo7_GetReJITIDs(This,functionId,cReJitIds,pcReJitIds,reJitIds) \ ( (This)->lpVtbl -> GetReJITIDs(This,functionId,cReJitIds,pcReJitIds,reJitIds) ) #define ICorProfilerInfo7_GetILToNativeMapping2(This,functionId,reJitId,cMap,pcMap,map) \ ( (This)->lpVtbl -> GetILToNativeMapping2(This,functionId,reJitId,cMap,pcMap,map) ) #define ICorProfilerInfo7_EnumJITedFunctions2(This,ppEnum) \ ( (This)->lpVtbl -> EnumJITedFunctions2(This,ppEnum) ) #define ICorProfilerInfo7_GetObjectSize2(This,objectId,pcSize) \ ( (This)->lpVtbl -> GetObjectSize2(This,objectId,pcSize) ) #define ICorProfilerInfo7_GetEventMask2(This,pdwEventsLow,pdwEventsHigh) \ ( (This)->lpVtbl -> GetEventMask2(This,pdwEventsLow,pdwEventsHigh) ) #define ICorProfilerInfo7_SetEventMask2(This,dwEventsLow,dwEventsHigh) \ ( (This)->lpVtbl -> SetEventMask2(This,dwEventsLow,dwEventsHigh) ) #define ICorProfilerInfo7_EnumNgenModuleMethodsInliningThisMethod(This,inlinersModuleId,inlineeModuleId,inlineeMethodId,incompleteData,ppEnum) \ ( (This)->lpVtbl -> EnumNgenModuleMethodsInliningThisMethod(This,inlinersModuleId,inlineeModuleId,inlineeMethodId,incompleteData,ppEnum) ) #define ICorProfilerInfo7_ApplyMetaData(This,moduleId) \ ( (This)->lpVtbl -> ApplyMetaData(This,moduleId) ) #define ICorProfilerInfo7_GetInMemorySymbolsLength(This,moduleId,pCountSymbolBytes) \ ( (This)->lpVtbl -> GetInMemorySymbolsLength(This,moduleId,pCountSymbolBytes) ) #define ICorProfilerInfo7_ReadInMemorySymbols(This,moduleId,symbolsReadOffset,pSymbolBytes,countSymbolBytes,pCountSymbolBytesRead) \ ( (This)->lpVtbl -> ReadInMemorySymbols(This,moduleId,symbolsReadOffset,pSymbolBytes,countSymbolBytes,pCountSymbolBytesRead) ) #endif /* COBJMACROS */ #endif /* C style interface */ #endif /* __ICorProfilerInfo7_INTERFACE_DEFINED__ */ #ifndef __ICorProfilerInfo8_INTERFACE_DEFINED__ #define __ICorProfilerInfo8_INTERFACE_DEFINED__ /* interface ICorProfilerInfo8 */ /* [local][unique][uuid][object] */ EXTERN_C const IID IID_ICorProfilerInfo8; #if defined(__cplusplus) && !defined(CINTERFACE) MIDL_INTERFACE("C5AC80A6-782E-4716-8044-39598C60CFBF") ICorProfilerInfo8 : public ICorProfilerInfo7 { public: virtual HRESULT STDMETHODCALLTYPE IsFunctionDynamic( /* [in] */ FunctionID functionId, /* [out] */ BOOL *isDynamic) = 0; virtual HRESULT STDMETHODCALLTYPE GetFunctionFromIP3( /* [in] */ LPCBYTE ip, /* [out] */ FunctionID *functionId, /* [out] */ ReJITID *pReJitId) = 0; virtual HRESULT STDMETHODCALLTYPE GetDynamicFunctionInfo( /* [in] */ FunctionID functionId, /* [out] */ ModuleID *moduleId, /* [out] */ PCCOR_SIGNATURE *ppvSig, /* [out] */ ULONG *pbSig, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [out] */ WCHAR wszName[ ]) = 0; }; #else /* C style interface */ typedef struct ICorProfilerInfo8Vtbl { BEGIN_INTERFACE HRESULT ( STDMETHODCALLTYPE *QueryInterface )( ICorProfilerInfo8 * This, /* [in] */ REFIID riid, /* [annotation][iid_is][out] */ _COM_Outptr_ void **ppvObject); ULONG ( STDMETHODCALLTYPE *AddRef )( ICorProfilerInfo8 * This); ULONG ( STDMETHODCALLTYPE *Release )( ICorProfilerInfo8 * This); HRESULT ( STDMETHODCALLTYPE *GetClassFromObject )( ICorProfilerInfo8 * This, /* [in] */ ObjectID objectId, /* [out] */ ClassID *pClassId); HRESULT ( STDMETHODCALLTYPE *GetClassFromToken )( ICorProfilerInfo8 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdTypeDef typeDef, /* [out] */ ClassID *pClassId); HRESULT ( STDMETHODCALLTYPE *GetCodeInfo )( ICorProfilerInfo8 * This, /* [in] */ FunctionID functionId, /* [out] */ LPCBYTE *pStart, /* [out] */ ULONG *pcSize); HRESULT ( STDMETHODCALLTYPE *GetEventMask )( ICorProfilerInfo8 * This, /* [out] */ DWORD *pdwEvents); HRESULT ( STDMETHODCALLTYPE *GetFunctionFromIP )( ICorProfilerInfo8 * This, /* [in] */ LPCBYTE ip, /* [out] */ FunctionID *pFunctionId); HRESULT ( STDMETHODCALLTYPE *GetFunctionFromToken )( ICorProfilerInfo8 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdToken token, /* [out] */ FunctionID *pFunctionId); HRESULT ( STDMETHODCALLTYPE *GetHandleFromThread )( ICorProfilerInfo8 * This, /* [in] */ ThreadID threadId, /* [out] */ HANDLE *phThread); HRESULT ( STDMETHODCALLTYPE *GetObjectSize )( ICorProfilerInfo8 * This, /* [in] */ ObjectID objectId, /* [out] */ ULONG *pcSize); HRESULT ( STDMETHODCALLTYPE *IsArrayClass )( ICorProfilerInfo8 * This, /* [in] */ ClassID classId, /* [out] */ CorElementType *pBaseElemType, /* [out] */ ClassID *pBaseClassId, /* [out] */ ULONG *pcRank); HRESULT ( STDMETHODCALLTYPE *GetThreadInfo )( ICorProfilerInfo8 * This, /* [in] */ ThreadID threadId, /* [out] */ DWORD *pdwWin32ThreadId); HRESULT ( STDMETHODCALLTYPE *GetCurrentThreadID )( ICorProfilerInfo8 * This, /* [out] */ ThreadID *pThreadId); HRESULT ( STDMETHODCALLTYPE *GetClassIDInfo )( ICorProfilerInfo8 * This, /* [in] */ ClassID classId, /* [out] */ ModuleID *pModuleId, /* [out] */ mdTypeDef *pTypeDefToken); HRESULT ( STDMETHODCALLTYPE *GetFunctionInfo )( ICorProfilerInfo8 * This, /* [in] */ FunctionID functionId, /* [out] */ ClassID *pClassId, /* [out] */ ModuleID *pModuleId, /* [out] */ mdToken *pToken); HRESULT ( STDMETHODCALLTYPE *SetEventMask )( ICorProfilerInfo8 * This, /* [in] */ DWORD dwEvents); HRESULT ( STDMETHODCALLTYPE *SetEnterLeaveFunctionHooks )( ICorProfilerInfo8 * This, /* [in] */ FunctionEnter *pFuncEnter, /* [in] */ FunctionLeave *pFuncLeave, /* [in] */ FunctionTailcall *pFuncTailcall); HRESULT ( STDMETHODCALLTYPE *SetFunctionIDMapper )( ICorProfilerInfo8 * This, /* [in] */ FunctionIDMapper *pFunc); HRESULT ( STDMETHODCALLTYPE *GetTokenAndMetaDataFromFunction )( ICorProfilerInfo8 * This, /* [in] */ FunctionID functionId, /* [in] */ REFIID riid, /* [out] */ IUnknown **ppImport, /* [out] */ mdToken *pToken); HRESULT ( STDMETHODCALLTYPE *GetModuleInfo )( ICorProfilerInfo8 * This, /* [in] */ ModuleID moduleId, /* [out] */ LPCBYTE *ppBaseLoadAddress, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [annotation][out] */ _Out_writes_to_(cchName, *pcchName) WCHAR szName[ ], /* [out] */ AssemblyID *pAssemblyId); HRESULT ( STDMETHODCALLTYPE *GetModuleMetaData )( ICorProfilerInfo8 * This, /* [in] */ ModuleID moduleId, /* [in] */ DWORD dwOpenFlags, /* [in] */ REFIID riid, /* [out] */ IUnknown **ppOut); HRESULT ( STDMETHODCALLTYPE *GetILFunctionBody )( ICorProfilerInfo8 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdMethodDef methodId, /* [out] */ LPCBYTE *ppMethodHeader, /* [out] */ ULONG *pcbMethodSize); HRESULT ( STDMETHODCALLTYPE *GetILFunctionBodyAllocator )( ICorProfilerInfo8 * This, /* [in] */ ModuleID moduleId, /* [out] */ IMethodMalloc **ppMalloc); HRESULT ( STDMETHODCALLTYPE *SetILFunctionBody )( ICorProfilerInfo8 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdMethodDef methodid, /* [in] */ LPCBYTE pbNewILMethodHeader); HRESULT ( STDMETHODCALLTYPE *GetAppDomainInfo )( ICorProfilerInfo8 * This, /* [in] */ AppDomainID appDomainId, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [annotation][out] */ _Out_writes_to_(cchName, *pcchName) WCHAR szName[ ], /* [out] */ ProcessID *pProcessId); HRESULT ( STDMETHODCALLTYPE *GetAssemblyInfo )( ICorProfilerInfo8 * This, /* [in] */ AssemblyID assemblyId, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [annotation][out] */ _Out_writes_to_(cchName, *pcchName) WCHAR szName[ ], /* [out] */ AppDomainID *pAppDomainId, /* [out] */ ModuleID *pModuleId); HRESULT ( STDMETHODCALLTYPE *SetFunctionReJIT )( ICorProfilerInfo8 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ForceGC )( ICorProfilerInfo8 * This); HRESULT ( STDMETHODCALLTYPE *SetILInstrumentedCodeMap )( ICorProfilerInfo8 * This, /* [in] */ FunctionID functionId, /* [in] */ BOOL fStartJit, /* [in] */ ULONG cILMapEntries, /* [size_is][in] */ COR_IL_MAP rgILMapEntries[ ]); HRESULT ( STDMETHODCALLTYPE *GetInprocInspectionInterface )( ICorProfilerInfo8 * This, /* [out] */ IUnknown **ppicd); HRESULT ( STDMETHODCALLTYPE *GetInprocInspectionIThisThread )( ICorProfilerInfo8 * This, /* [out] */ IUnknown **ppicd); HRESULT ( STDMETHODCALLTYPE *GetThreadContext )( ICorProfilerInfo8 * This, /* [in] */ ThreadID threadId, /* [out] */ ContextID *pContextId); HRESULT ( STDMETHODCALLTYPE *BeginInprocDebugging )( ICorProfilerInfo8 * This, /* [in] */ BOOL fThisThreadOnly, /* [out] */ DWORD *pdwProfilerContext); HRESULT ( STDMETHODCALLTYPE *EndInprocDebugging )( ICorProfilerInfo8 * This, /* [in] */ DWORD dwProfilerContext); HRESULT ( STDMETHODCALLTYPE *GetILToNativeMapping )( ICorProfilerInfo8 * This, /* [in] */ FunctionID functionId, /* [in] */ ULONG32 cMap, /* [out] */ ULONG32 *pcMap, /* [length_is][size_is][out] */ COR_DEBUG_IL_TO_NATIVE_MAP map[ ]); HRESULT ( STDMETHODCALLTYPE *DoStackSnapshot )( ICorProfilerInfo8 * This, /* [in] */ ThreadID thread, /* [in] */ StackSnapshotCallback *callback, /* [in] */ ULONG32 infoFlags, /* [in] */ void *clientData, /* [size_is][in] */ BYTE context[ ], /* [in] */ ULONG32 contextSize); HRESULT ( STDMETHODCALLTYPE *SetEnterLeaveFunctionHooks2 )( ICorProfilerInfo8 * This, /* [in] */ FunctionEnter2 *pFuncEnter, /* [in] */ FunctionLeave2 *pFuncLeave, /* [in] */ FunctionTailcall2 *pFuncTailcall); HRESULT ( STDMETHODCALLTYPE *GetFunctionInfo2 )( ICorProfilerInfo8 * This, /* [in] */ FunctionID funcId, /* [in] */ COR_PRF_FRAME_INFO frameInfo, /* [out] */ ClassID *pClassId, /* [out] */ ModuleID *pModuleId, /* [out] */ mdToken *pToken, /* [in] */ ULONG32 cTypeArgs, /* [out] */ ULONG32 *pcTypeArgs, /* [out] */ ClassID typeArgs[ ]); HRESULT ( STDMETHODCALLTYPE *GetStringLayout )( ICorProfilerInfo8 * This, /* [out] */ ULONG *pBufferLengthOffset, /* [out] */ ULONG *pStringLengthOffset, /* [out] */ ULONG *pBufferOffset); HRESULT ( STDMETHODCALLTYPE *GetClassLayout )( ICorProfilerInfo8 * This, /* [in] */ ClassID classID, /* [out][in] */ COR_FIELD_OFFSET rFieldOffset[ ], /* [in] */ ULONG cFieldOffset, /* [out] */ ULONG *pcFieldOffset, /* [out] */ ULONG *pulClassSize); HRESULT ( STDMETHODCALLTYPE *GetClassIDInfo2 )( ICorProfilerInfo8 * This, /* [in] */ ClassID classId, /* [out] */ ModuleID *pModuleId, /* [out] */ mdTypeDef *pTypeDefToken, /* [out] */ ClassID *pParentClassId, /* [in] */ ULONG32 cNumTypeArgs, /* [out] */ ULONG32 *pcNumTypeArgs, /* [out] */ ClassID typeArgs[ ]); HRESULT ( STDMETHODCALLTYPE *GetCodeInfo2 )( ICorProfilerInfo8 * This, /* [in] */ FunctionID functionID, /* [in] */ ULONG32 cCodeInfos, /* [out] */ ULONG32 *pcCodeInfos, /* [length_is][size_is][out] */ COR_PRF_CODE_INFO codeInfos[ ]); HRESULT ( STDMETHODCALLTYPE *GetClassFromTokenAndTypeArgs )( ICorProfilerInfo8 * This, /* [in] */ ModuleID moduleID, /* [in] */ mdTypeDef typeDef, /* [in] */ ULONG32 cTypeArgs, /* [size_is][in] */ ClassID typeArgs[ ], /* [out] */ ClassID *pClassID); HRESULT ( STDMETHODCALLTYPE *GetFunctionFromTokenAndTypeArgs )( ICorProfilerInfo8 * This, /* [in] */ ModuleID moduleID, /* [in] */ mdMethodDef funcDef, /* [in] */ ClassID classId, /* [in] */ ULONG32 cTypeArgs, /* [size_is][in] */ ClassID typeArgs[ ], /* [out] */ FunctionID *pFunctionID); HRESULT ( STDMETHODCALLTYPE *EnumModuleFrozenObjects )( ICorProfilerInfo8 * This, /* [in] */ ModuleID moduleID, /* [out] */ ICorProfilerObjectEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *GetArrayObjectInfo )( ICorProfilerInfo8 * This, /* [in] */ ObjectID objectId, /* [in] */ ULONG32 cDimensions, /* [size_is][out] */ ULONG32 pDimensionSizes[ ], /* [size_is][out] */ int pDimensionLowerBounds[ ], /* [out] */ BYTE **ppData); HRESULT ( STDMETHODCALLTYPE *GetBoxClassLayout )( ICorProfilerInfo8 * This, /* [in] */ ClassID classId, /* [out] */ ULONG32 *pBufferOffset); HRESULT ( STDMETHODCALLTYPE *GetThreadAppDomain )( ICorProfilerInfo8 * This, /* [in] */ ThreadID threadId, /* [out] */ AppDomainID *pAppDomainId); HRESULT ( STDMETHODCALLTYPE *GetRVAStaticAddress )( ICorProfilerInfo8 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [out] */ void **ppAddress); HRESULT ( STDMETHODCALLTYPE *GetAppDomainStaticAddress )( ICorProfilerInfo8 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [in] */ AppDomainID appDomainId, /* [out] */ void **ppAddress); HRESULT ( STDMETHODCALLTYPE *GetThreadStaticAddress )( ICorProfilerInfo8 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [in] */ ThreadID threadId, /* [out] */ void **ppAddress); HRESULT ( STDMETHODCALLTYPE *GetContextStaticAddress )( ICorProfilerInfo8 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [in] */ ContextID contextId, /* [out] */ void **ppAddress); HRESULT ( STDMETHODCALLTYPE *GetStaticFieldInfo )( ICorProfilerInfo8 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [out] */ COR_PRF_STATIC_TYPE *pFieldInfo); HRESULT ( STDMETHODCALLTYPE *GetGenerationBounds )( ICorProfilerInfo8 * This, /* [in] */ ULONG cObjectRanges, /* [out] */ ULONG *pcObjectRanges, /* [length_is][size_is][out] */ COR_PRF_GC_GENERATION_RANGE ranges[ ]); HRESULT ( STDMETHODCALLTYPE *GetObjectGeneration )( ICorProfilerInfo8 * This, /* [in] */ ObjectID objectId, /* [out] */ COR_PRF_GC_GENERATION_RANGE *range); HRESULT ( STDMETHODCALLTYPE *GetNotifiedExceptionClauseInfo )( ICorProfilerInfo8 * This, /* [out] */ COR_PRF_EX_CLAUSE_INFO *pinfo); HRESULT ( STDMETHODCALLTYPE *EnumJITedFunctions )( ICorProfilerInfo8 * This, /* [out] */ ICorProfilerFunctionEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *RequestProfilerDetach )( ICorProfilerInfo8 * This, /* [in] */ DWORD dwExpectedCompletionMilliseconds); HRESULT ( STDMETHODCALLTYPE *SetFunctionIDMapper2 )( ICorProfilerInfo8 * This, /* [in] */ FunctionIDMapper2 *pFunc, /* [in] */ void *clientData); HRESULT ( STDMETHODCALLTYPE *GetStringLayout2 )( ICorProfilerInfo8 * This, /* [out] */ ULONG *pStringLengthOffset, /* [out] */ ULONG *pBufferOffset); HRESULT ( STDMETHODCALLTYPE *SetEnterLeaveFunctionHooks3 )( ICorProfilerInfo8 * This, /* [in] */ FunctionEnter3 *pFuncEnter3, /* [in] */ FunctionLeave3 *pFuncLeave3, /* [in] */ FunctionTailcall3 *pFuncTailcall3); HRESULT ( STDMETHODCALLTYPE *SetEnterLeaveFunctionHooks3WithInfo )( ICorProfilerInfo8 * This, /* [in] */ FunctionEnter3WithInfo *pFuncEnter3WithInfo, /* [in] */ FunctionLeave3WithInfo *pFuncLeave3WithInfo, /* [in] */ FunctionTailcall3WithInfo *pFuncTailcall3WithInfo); HRESULT ( STDMETHODCALLTYPE *GetFunctionEnter3Info )( ICorProfilerInfo8 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_ELT_INFO eltInfo, /* [out] */ COR_PRF_FRAME_INFO *pFrameInfo, /* [out][in] */ ULONG *pcbArgumentInfo, /* [size_is][out] */ COR_PRF_FUNCTION_ARGUMENT_INFO *pArgumentInfo); HRESULT ( STDMETHODCALLTYPE *GetFunctionLeave3Info )( ICorProfilerInfo8 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_ELT_INFO eltInfo, /* [out] */ COR_PRF_FRAME_INFO *pFrameInfo, /* [out] */ COR_PRF_FUNCTION_ARGUMENT_RANGE *pRetvalRange); HRESULT ( STDMETHODCALLTYPE *GetFunctionTailcall3Info )( ICorProfilerInfo8 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_ELT_INFO eltInfo, /* [out] */ COR_PRF_FRAME_INFO *pFrameInfo); HRESULT ( STDMETHODCALLTYPE *EnumModules )( ICorProfilerInfo8 * This, /* [out] */ ICorProfilerModuleEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *GetRuntimeInformation )( ICorProfilerInfo8 * This, /* [out] */ USHORT *pClrInstanceId, /* [out] */ COR_PRF_RUNTIME_TYPE *pRuntimeType, /* [out] */ USHORT *pMajorVersion, /* [out] */ USHORT *pMinorVersion, /* [out] */ USHORT *pBuildNumber, /* [out] */ USHORT *pQFEVersion, /* [in] */ ULONG cchVersionString, /* [out] */ ULONG *pcchVersionString, /* [annotation][out] */ _Out_writes_to_(cchVersionString, *pcchVersionString) WCHAR szVersionString[ ]); HRESULT ( STDMETHODCALLTYPE *GetThreadStaticAddress2 )( ICorProfilerInfo8 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [in] */ AppDomainID appDomainId, /* [in] */ ThreadID threadId, /* [out] */ void **ppAddress); HRESULT ( STDMETHODCALLTYPE *GetAppDomainsContainingModule )( ICorProfilerInfo8 * This, /* [in] */ ModuleID moduleId, /* [in] */ ULONG32 cAppDomainIds, /* [out] */ ULONG32 *pcAppDomainIds, /* [length_is][size_is][out] */ AppDomainID appDomainIds[ ]); HRESULT ( STDMETHODCALLTYPE *GetModuleInfo2 )( ICorProfilerInfo8 * This, /* [in] */ ModuleID moduleId, /* [out] */ LPCBYTE *ppBaseLoadAddress, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [annotation][out] */ _Out_writes_to_(cchName, *pcchName) WCHAR szName[ ], /* [out] */ AssemblyID *pAssemblyId, /* [out] */ DWORD *pdwModuleFlags); HRESULT ( STDMETHODCALLTYPE *EnumThreads )( ICorProfilerInfo8 * This, /* [out] */ ICorProfilerThreadEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *InitializeCurrentThread )( ICorProfilerInfo8 * This); HRESULT ( STDMETHODCALLTYPE *RequestReJIT )( ICorProfilerInfo8 * This, /* [in] */ ULONG cFunctions, /* [size_is][in] */ ModuleID moduleIds[ ], /* [size_is][in] */ mdMethodDef methodIds[ ]); HRESULT ( STDMETHODCALLTYPE *RequestRevert )( ICorProfilerInfo8 * This, /* [in] */ ULONG cFunctions, /* [size_is][in] */ ModuleID moduleIds[ ], /* [size_is][in] */ mdMethodDef methodIds[ ], /* [size_is][out] */ HRESULT status[ ]); HRESULT ( STDMETHODCALLTYPE *GetCodeInfo3 )( ICorProfilerInfo8 * This, /* [in] */ FunctionID functionID, /* [in] */ ReJITID reJitId, /* [in] */ ULONG32 cCodeInfos, /* [out] */ ULONG32 *pcCodeInfos, /* [length_is][size_is][out] */ COR_PRF_CODE_INFO codeInfos[ ]); HRESULT ( STDMETHODCALLTYPE *GetFunctionFromIP2 )( ICorProfilerInfo8 * This, /* [in] */ LPCBYTE ip, /* [out] */ FunctionID *pFunctionId, /* [out] */ ReJITID *pReJitId); HRESULT ( STDMETHODCALLTYPE *GetReJITIDs )( ICorProfilerInfo8 * This, /* [in] */ FunctionID functionId, /* [in] */ ULONG cReJitIds, /* [out] */ ULONG *pcReJitIds, /* [length_is][size_is][out] */ ReJITID reJitIds[ ]); HRESULT ( STDMETHODCALLTYPE *GetILToNativeMapping2 )( ICorProfilerInfo8 * This, /* [in] */ FunctionID functionId, /* [in] */ ReJITID reJitId, /* [in] */ ULONG32 cMap, /* [out] */ ULONG32 *pcMap, /* [length_is][size_is][out] */ COR_DEBUG_IL_TO_NATIVE_MAP map[ ]); HRESULT ( STDMETHODCALLTYPE *EnumJITedFunctions2 )( ICorProfilerInfo8 * This, /* [out] */ ICorProfilerFunctionEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *GetObjectSize2 )( ICorProfilerInfo8 * This, /* [in] */ ObjectID objectId, /* [out] */ SIZE_T *pcSize); HRESULT ( STDMETHODCALLTYPE *GetEventMask2 )( ICorProfilerInfo8 * This, /* [out] */ DWORD *pdwEventsLow, /* [out] */ DWORD *pdwEventsHigh); HRESULT ( STDMETHODCALLTYPE *SetEventMask2 )( ICorProfilerInfo8 * This, /* [in] */ DWORD dwEventsLow, /* [in] */ DWORD dwEventsHigh); HRESULT ( STDMETHODCALLTYPE *EnumNgenModuleMethodsInliningThisMethod )( ICorProfilerInfo8 * This, /* [in] */ ModuleID inlinersModuleId, /* [in] */ ModuleID inlineeModuleId, /* [in] */ mdMethodDef inlineeMethodId, /* [out] */ BOOL *incompleteData, /* [out] */ ICorProfilerMethodEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *ApplyMetaData )( ICorProfilerInfo8 * This, /* [in] */ ModuleID moduleId); HRESULT ( STDMETHODCALLTYPE *GetInMemorySymbolsLength )( ICorProfilerInfo8 * This, /* [in] */ ModuleID moduleId, /* [out] */ DWORD *pCountSymbolBytes); HRESULT ( STDMETHODCALLTYPE *ReadInMemorySymbols )( ICorProfilerInfo8 * This, /* [in] */ ModuleID moduleId, /* [in] */ DWORD symbolsReadOffset, /* [out] */ BYTE *pSymbolBytes, /* [in] */ DWORD countSymbolBytes, /* [out] */ DWORD *pCountSymbolBytesRead); HRESULT ( STDMETHODCALLTYPE *IsFunctionDynamic )( ICorProfilerInfo8 * This, /* [in] */ FunctionID functionId, /* [out] */ BOOL *isDynamic); HRESULT ( STDMETHODCALLTYPE *GetFunctionFromIP3 )( ICorProfilerInfo8 * This, /* [in] */ LPCBYTE ip, /* [out] */ FunctionID *functionId, /* [out] */ ReJITID *pReJitId); HRESULT ( STDMETHODCALLTYPE *GetDynamicFunctionInfo )( ICorProfilerInfo8 * This, /* [in] */ FunctionID functionId, /* [out] */ ModuleID *moduleId, /* [out] */ PCCOR_SIGNATURE *ppvSig, /* [out] */ ULONG *pbSig, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [out] */ WCHAR wszName[ ]); END_INTERFACE } ICorProfilerInfo8Vtbl; interface ICorProfilerInfo8 { CONST_VTBL struct ICorProfilerInfo8Vtbl *lpVtbl; }; #ifdef COBJMACROS #define ICorProfilerInfo8_QueryInterface(This,riid,ppvObject) \ ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) ) #define ICorProfilerInfo8_AddRef(This) \ ( (This)->lpVtbl -> AddRef(This) ) #define ICorProfilerInfo8_Release(This) \ ( (This)->lpVtbl -> Release(This) ) #define ICorProfilerInfo8_GetClassFromObject(This,objectId,pClassId) \ ( (This)->lpVtbl -> GetClassFromObject(This,objectId,pClassId) ) #define ICorProfilerInfo8_GetClassFromToken(This,moduleId,typeDef,pClassId) \ ( (This)->lpVtbl -> GetClassFromToken(This,moduleId,typeDef,pClassId) ) #define ICorProfilerInfo8_GetCodeInfo(This,functionId,pStart,pcSize) \ ( (This)->lpVtbl -> GetCodeInfo(This,functionId,pStart,pcSize) ) #define ICorProfilerInfo8_GetEventMask(This,pdwEvents) \ ( (This)->lpVtbl -> GetEventMask(This,pdwEvents) ) #define ICorProfilerInfo8_GetFunctionFromIP(This,ip,pFunctionId) \ ( (This)->lpVtbl -> GetFunctionFromIP(This,ip,pFunctionId) ) #define ICorProfilerInfo8_GetFunctionFromToken(This,moduleId,token,pFunctionId) \ ( (This)->lpVtbl -> GetFunctionFromToken(This,moduleId,token,pFunctionId) ) #define ICorProfilerInfo8_GetHandleFromThread(This,threadId,phThread) \ ( (This)->lpVtbl -> GetHandleFromThread(This,threadId,phThread) ) #define ICorProfilerInfo8_GetObjectSize(This,objectId,pcSize) \ ( (This)->lpVtbl -> GetObjectSize(This,objectId,pcSize) ) #define ICorProfilerInfo8_IsArrayClass(This,classId,pBaseElemType,pBaseClassId,pcRank) \ ( (This)->lpVtbl -> IsArrayClass(This,classId,pBaseElemType,pBaseClassId,pcRank) ) #define ICorProfilerInfo8_GetThreadInfo(This,threadId,pdwWin32ThreadId) \ ( (This)->lpVtbl -> GetThreadInfo(This,threadId,pdwWin32ThreadId) ) #define ICorProfilerInfo8_GetCurrentThreadID(This,pThreadId) \ ( (This)->lpVtbl -> GetCurrentThreadID(This,pThreadId) ) #define ICorProfilerInfo8_GetClassIDInfo(This,classId,pModuleId,pTypeDefToken) \ ( (This)->lpVtbl -> GetClassIDInfo(This,classId,pModuleId,pTypeDefToken) ) #define ICorProfilerInfo8_GetFunctionInfo(This,functionId,pClassId,pModuleId,pToken) \ ( (This)->lpVtbl -> GetFunctionInfo(This,functionId,pClassId,pModuleId,pToken) ) #define ICorProfilerInfo8_SetEventMask(This,dwEvents) \ ( (This)->lpVtbl -> SetEventMask(This,dwEvents) ) #define ICorProfilerInfo8_SetEnterLeaveFunctionHooks(This,pFuncEnter,pFuncLeave,pFuncTailcall) \ ( (This)->lpVtbl -> SetEnterLeaveFunctionHooks(This,pFuncEnter,pFuncLeave,pFuncTailcall) ) #define ICorProfilerInfo8_SetFunctionIDMapper(This,pFunc) \ ( (This)->lpVtbl -> SetFunctionIDMapper(This,pFunc) ) #define ICorProfilerInfo8_GetTokenAndMetaDataFromFunction(This,functionId,riid,ppImport,pToken) \ ( (This)->lpVtbl -> GetTokenAndMetaDataFromFunction(This,functionId,riid,ppImport,pToken) ) #define ICorProfilerInfo8_GetModuleInfo(This,moduleId,ppBaseLoadAddress,cchName,pcchName,szName,pAssemblyId) \ ( (This)->lpVtbl -> GetModuleInfo(This,moduleId,ppBaseLoadAddress,cchName,pcchName,szName,pAssemblyId) ) #define ICorProfilerInfo8_GetModuleMetaData(This,moduleId,dwOpenFlags,riid,ppOut) \ ( (This)->lpVtbl -> GetModuleMetaData(This,moduleId,dwOpenFlags,riid,ppOut) ) #define ICorProfilerInfo8_GetILFunctionBody(This,moduleId,methodId,ppMethodHeader,pcbMethodSize) \ ( (This)->lpVtbl -> GetILFunctionBody(This,moduleId,methodId,ppMethodHeader,pcbMethodSize) ) #define ICorProfilerInfo8_GetILFunctionBodyAllocator(This,moduleId,ppMalloc) \ ( (This)->lpVtbl -> GetILFunctionBodyAllocator(This,moduleId,ppMalloc) ) #define ICorProfilerInfo8_SetILFunctionBody(This,moduleId,methodid,pbNewILMethodHeader) \ ( (This)->lpVtbl -> SetILFunctionBody(This,moduleId,methodid,pbNewILMethodHeader) ) #define ICorProfilerInfo8_GetAppDomainInfo(This,appDomainId,cchName,pcchName,szName,pProcessId) \ ( (This)->lpVtbl -> GetAppDomainInfo(This,appDomainId,cchName,pcchName,szName,pProcessId) ) #define ICorProfilerInfo8_GetAssemblyInfo(This,assemblyId,cchName,pcchName,szName,pAppDomainId,pModuleId) \ ( (This)->lpVtbl -> GetAssemblyInfo(This,assemblyId,cchName,pcchName,szName,pAppDomainId,pModuleId) ) #define ICorProfilerInfo8_SetFunctionReJIT(This,functionId) \ ( (This)->lpVtbl -> SetFunctionReJIT(This,functionId) ) #define ICorProfilerInfo8_ForceGC(This) \ ( (This)->lpVtbl -> ForceGC(This) ) #define ICorProfilerInfo8_SetILInstrumentedCodeMap(This,functionId,fStartJit,cILMapEntries,rgILMapEntries) \ ( (This)->lpVtbl -> SetILInstrumentedCodeMap(This,functionId,fStartJit,cILMapEntries,rgILMapEntries) ) #define ICorProfilerInfo8_GetInprocInspectionInterface(This,ppicd) \ ( (This)->lpVtbl -> GetInprocInspectionInterface(This,ppicd) ) #define ICorProfilerInfo8_GetInprocInspectionIThisThread(This,ppicd) \ ( (This)->lpVtbl -> GetInprocInspectionIThisThread(This,ppicd) ) #define ICorProfilerInfo8_GetThreadContext(This,threadId,pContextId) \ ( (This)->lpVtbl -> GetThreadContext(This,threadId,pContextId) ) #define ICorProfilerInfo8_BeginInprocDebugging(This,fThisThreadOnly,pdwProfilerContext) \ ( (This)->lpVtbl -> BeginInprocDebugging(This,fThisThreadOnly,pdwProfilerContext) ) #define ICorProfilerInfo8_EndInprocDebugging(This,dwProfilerContext) \ ( (This)->lpVtbl -> EndInprocDebugging(This,dwProfilerContext) ) #define ICorProfilerInfo8_GetILToNativeMapping(This,functionId,cMap,pcMap,map) \ ( (This)->lpVtbl -> GetILToNativeMapping(This,functionId,cMap,pcMap,map) ) #define ICorProfilerInfo8_DoStackSnapshot(This,thread,callback,infoFlags,clientData,context,contextSize) \ ( (This)->lpVtbl -> DoStackSnapshot(This,thread,callback,infoFlags,clientData,context,contextSize) ) #define ICorProfilerInfo8_SetEnterLeaveFunctionHooks2(This,pFuncEnter,pFuncLeave,pFuncTailcall) \ ( (This)->lpVtbl -> SetEnterLeaveFunctionHooks2(This,pFuncEnter,pFuncLeave,pFuncTailcall) ) #define ICorProfilerInfo8_GetFunctionInfo2(This,funcId,frameInfo,pClassId,pModuleId,pToken,cTypeArgs,pcTypeArgs,typeArgs) \ ( (This)->lpVtbl -> GetFunctionInfo2(This,funcId,frameInfo,pClassId,pModuleId,pToken,cTypeArgs,pcTypeArgs,typeArgs) ) #define ICorProfilerInfo8_GetStringLayout(This,pBufferLengthOffset,pStringLengthOffset,pBufferOffset) \ ( (This)->lpVtbl -> GetStringLayout(This,pBufferLengthOffset,pStringLengthOffset,pBufferOffset) ) #define ICorProfilerInfo8_GetClassLayout(This,classID,rFieldOffset,cFieldOffset,pcFieldOffset,pulClassSize) \ ( (This)->lpVtbl -> GetClassLayout(This,classID,rFieldOffset,cFieldOffset,pcFieldOffset,pulClassSize) ) #define ICorProfilerInfo8_GetClassIDInfo2(This,classId,pModuleId,pTypeDefToken,pParentClassId,cNumTypeArgs,pcNumTypeArgs,typeArgs) \ ( (This)->lpVtbl -> GetClassIDInfo2(This,classId,pModuleId,pTypeDefToken,pParentClassId,cNumTypeArgs,pcNumTypeArgs,typeArgs) ) #define ICorProfilerInfo8_GetCodeInfo2(This,functionID,cCodeInfos,pcCodeInfos,codeInfos) \ ( (This)->lpVtbl -> GetCodeInfo2(This,functionID,cCodeInfos,pcCodeInfos,codeInfos) ) #define ICorProfilerInfo8_GetClassFromTokenAndTypeArgs(This,moduleID,typeDef,cTypeArgs,typeArgs,pClassID) \ ( (This)->lpVtbl -> GetClassFromTokenAndTypeArgs(This,moduleID,typeDef,cTypeArgs,typeArgs,pClassID) ) #define ICorProfilerInfo8_GetFunctionFromTokenAndTypeArgs(This,moduleID,funcDef,classId,cTypeArgs,typeArgs,pFunctionID) \ ( (This)->lpVtbl -> GetFunctionFromTokenAndTypeArgs(This,moduleID,funcDef,classId,cTypeArgs,typeArgs,pFunctionID) ) #define ICorProfilerInfo8_EnumModuleFrozenObjects(This,moduleID,ppEnum) \ ( (This)->lpVtbl -> EnumModuleFrozenObjects(This,moduleID,ppEnum) ) #define ICorProfilerInfo8_GetArrayObjectInfo(This,objectId,cDimensions,pDimensionSizes,pDimensionLowerBounds,ppData) \ ( (This)->lpVtbl -> GetArrayObjectInfo(This,objectId,cDimensions,pDimensionSizes,pDimensionLowerBounds,ppData) ) #define ICorProfilerInfo8_GetBoxClassLayout(This,classId,pBufferOffset) \ ( (This)->lpVtbl -> GetBoxClassLayout(This,classId,pBufferOffset) ) #define ICorProfilerInfo8_GetThreadAppDomain(This,threadId,pAppDomainId) \ ( (This)->lpVtbl -> GetThreadAppDomain(This,threadId,pAppDomainId) ) #define ICorProfilerInfo8_GetRVAStaticAddress(This,classId,fieldToken,ppAddress) \ ( (This)->lpVtbl -> GetRVAStaticAddress(This,classId,fieldToken,ppAddress) ) #define ICorProfilerInfo8_GetAppDomainStaticAddress(This,classId,fieldToken,appDomainId,ppAddress) \ ( (This)->lpVtbl -> GetAppDomainStaticAddress(This,classId,fieldToken,appDomainId,ppAddress) ) #define ICorProfilerInfo8_GetThreadStaticAddress(This,classId,fieldToken,threadId,ppAddress) \ ( (This)->lpVtbl -> GetThreadStaticAddress(This,classId,fieldToken,threadId,ppAddress) ) #define ICorProfilerInfo8_GetContextStaticAddress(This,classId,fieldToken,contextId,ppAddress) \ ( (This)->lpVtbl -> GetContextStaticAddress(This,classId,fieldToken,contextId,ppAddress) ) #define ICorProfilerInfo8_GetStaticFieldInfo(This,classId,fieldToken,pFieldInfo) \ ( (This)->lpVtbl -> GetStaticFieldInfo(This,classId,fieldToken,pFieldInfo) ) #define ICorProfilerInfo8_GetGenerationBounds(This,cObjectRanges,pcObjectRanges,ranges) \ ( (This)->lpVtbl -> GetGenerationBounds(This,cObjectRanges,pcObjectRanges,ranges) ) #define ICorProfilerInfo8_GetObjectGeneration(This,objectId,range) \ ( (This)->lpVtbl -> GetObjectGeneration(This,objectId,range) ) #define ICorProfilerInfo8_GetNotifiedExceptionClauseInfo(This,pinfo) \ ( (This)->lpVtbl -> GetNotifiedExceptionClauseInfo(This,pinfo) ) #define ICorProfilerInfo8_EnumJITedFunctions(This,ppEnum) \ ( (This)->lpVtbl -> EnumJITedFunctions(This,ppEnum) ) #define ICorProfilerInfo8_RequestProfilerDetach(This,dwExpectedCompletionMilliseconds) \ ( (This)->lpVtbl -> RequestProfilerDetach(This,dwExpectedCompletionMilliseconds) ) #define ICorProfilerInfo8_SetFunctionIDMapper2(This,pFunc,clientData) \ ( (This)->lpVtbl -> SetFunctionIDMapper2(This,pFunc,clientData) ) #define ICorProfilerInfo8_GetStringLayout2(This,pStringLengthOffset,pBufferOffset) \ ( (This)->lpVtbl -> GetStringLayout2(This,pStringLengthOffset,pBufferOffset) ) #define ICorProfilerInfo8_SetEnterLeaveFunctionHooks3(This,pFuncEnter3,pFuncLeave3,pFuncTailcall3) \ ( (This)->lpVtbl -> SetEnterLeaveFunctionHooks3(This,pFuncEnter3,pFuncLeave3,pFuncTailcall3) ) #define ICorProfilerInfo8_SetEnterLeaveFunctionHooks3WithInfo(This,pFuncEnter3WithInfo,pFuncLeave3WithInfo,pFuncTailcall3WithInfo) \ ( (This)->lpVtbl -> SetEnterLeaveFunctionHooks3WithInfo(This,pFuncEnter3WithInfo,pFuncLeave3WithInfo,pFuncTailcall3WithInfo) ) #define ICorProfilerInfo8_GetFunctionEnter3Info(This,functionId,eltInfo,pFrameInfo,pcbArgumentInfo,pArgumentInfo) \ ( (This)->lpVtbl -> GetFunctionEnter3Info(This,functionId,eltInfo,pFrameInfo,pcbArgumentInfo,pArgumentInfo) ) #define ICorProfilerInfo8_GetFunctionLeave3Info(This,functionId,eltInfo,pFrameInfo,pRetvalRange) \ ( (This)->lpVtbl -> GetFunctionLeave3Info(This,functionId,eltInfo,pFrameInfo,pRetvalRange) ) #define ICorProfilerInfo8_GetFunctionTailcall3Info(This,functionId,eltInfo,pFrameInfo) \ ( (This)->lpVtbl -> GetFunctionTailcall3Info(This,functionId,eltInfo,pFrameInfo) ) #define ICorProfilerInfo8_EnumModules(This,ppEnum) \ ( (This)->lpVtbl -> EnumModules(This,ppEnum) ) #define ICorProfilerInfo8_GetRuntimeInformation(This,pClrInstanceId,pRuntimeType,pMajorVersion,pMinorVersion,pBuildNumber,pQFEVersion,cchVersionString,pcchVersionString,szVersionString) \ ( (This)->lpVtbl -> GetRuntimeInformation(This,pClrInstanceId,pRuntimeType,pMajorVersion,pMinorVersion,pBuildNumber,pQFEVersion,cchVersionString,pcchVersionString,szVersionString) ) #define ICorProfilerInfo8_GetThreadStaticAddress2(This,classId,fieldToken,appDomainId,threadId,ppAddress) \ ( (This)->lpVtbl -> GetThreadStaticAddress2(This,classId,fieldToken,appDomainId,threadId,ppAddress) ) #define ICorProfilerInfo8_GetAppDomainsContainingModule(This,moduleId,cAppDomainIds,pcAppDomainIds,appDomainIds) \ ( (This)->lpVtbl -> GetAppDomainsContainingModule(This,moduleId,cAppDomainIds,pcAppDomainIds,appDomainIds) ) #define ICorProfilerInfo8_GetModuleInfo2(This,moduleId,ppBaseLoadAddress,cchName,pcchName,szName,pAssemblyId,pdwModuleFlags) \ ( (This)->lpVtbl -> GetModuleInfo2(This,moduleId,ppBaseLoadAddress,cchName,pcchName,szName,pAssemblyId,pdwModuleFlags) ) #define ICorProfilerInfo8_EnumThreads(This,ppEnum) \ ( (This)->lpVtbl -> EnumThreads(This,ppEnum) ) #define ICorProfilerInfo8_InitializeCurrentThread(This) \ ( (This)->lpVtbl -> InitializeCurrentThread(This) ) #define ICorProfilerInfo8_RequestReJIT(This,cFunctions,moduleIds,methodIds) \ ( (This)->lpVtbl -> RequestReJIT(This,cFunctions,moduleIds,methodIds) ) #define ICorProfilerInfo8_RequestRevert(This,cFunctions,moduleIds,methodIds,status) \ ( (This)->lpVtbl -> RequestRevert(This,cFunctions,moduleIds,methodIds,status) ) #define ICorProfilerInfo8_GetCodeInfo3(This,functionID,reJitId,cCodeInfos,pcCodeInfos,codeInfos) \ ( (This)->lpVtbl -> GetCodeInfo3(This,functionID,reJitId,cCodeInfos,pcCodeInfos,codeInfos) ) #define ICorProfilerInfo8_GetFunctionFromIP2(This,ip,pFunctionId,pReJitId) \ ( (This)->lpVtbl -> GetFunctionFromIP2(This,ip,pFunctionId,pReJitId) ) #define ICorProfilerInfo8_GetReJITIDs(This,functionId,cReJitIds,pcReJitIds,reJitIds) \ ( (This)->lpVtbl -> GetReJITIDs(This,functionId,cReJitIds,pcReJitIds,reJitIds) ) #define ICorProfilerInfo8_GetILToNativeMapping2(This,functionId,reJitId,cMap,pcMap,map) \ ( (This)->lpVtbl -> GetILToNativeMapping2(This,functionId,reJitId,cMap,pcMap,map) ) #define ICorProfilerInfo8_EnumJITedFunctions2(This,ppEnum) \ ( (This)->lpVtbl -> EnumJITedFunctions2(This,ppEnum) ) #define ICorProfilerInfo8_GetObjectSize2(This,objectId,pcSize) \ ( (This)->lpVtbl -> GetObjectSize2(This,objectId,pcSize) ) #define ICorProfilerInfo8_GetEventMask2(This,pdwEventsLow,pdwEventsHigh) \ ( (This)->lpVtbl -> GetEventMask2(This,pdwEventsLow,pdwEventsHigh) ) #define ICorProfilerInfo8_SetEventMask2(This,dwEventsLow,dwEventsHigh) \ ( (This)->lpVtbl -> SetEventMask2(This,dwEventsLow,dwEventsHigh) ) #define ICorProfilerInfo8_EnumNgenModuleMethodsInliningThisMethod(This,inlinersModuleId,inlineeModuleId,inlineeMethodId,incompleteData,ppEnum) \ ( (This)->lpVtbl -> EnumNgenModuleMethodsInliningThisMethod(This,inlinersModuleId,inlineeModuleId,inlineeMethodId,incompleteData,ppEnum) ) #define ICorProfilerInfo8_ApplyMetaData(This,moduleId) \ ( (This)->lpVtbl -> ApplyMetaData(This,moduleId) ) #define ICorProfilerInfo8_GetInMemorySymbolsLength(This,moduleId,pCountSymbolBytes) \ ( (This)->lpVtbl -> GetInMemorySymbolsLength(This,moduleId,pCountSymbolBytes) ) #define ICorProfilerInfo8_ReadInMemorySymbols(This,moduleId,symbolsReadOffset,pSymbolBytes,countSymbolBytes,pCountSymbolBytesRead) \ ( (This)->lpVtbl -> ReadInMemorySymbols(This,moduleId,symbolsReadOffset,pSymbolBytes,countSymbolBytes,pCountSymbolBytesRead) ) #define ICorProfilerInfo8_IsFunctionDynamic(This,functionId,isDynamic) \ ( (This)->lpVtbl -> IsFunctionDynamic(This,functionId,isDynamic) ) #define ICorProfilerInfo8_GetFunctionFromIP3(This,ip,functionId,pReJitId) \ ( (This)->lpVtbl -> GetFunctionFromIP3(This,ip,functionId,pReJitId) ) #define ICorProfilerInfo8_GetDynamicFunctionInfo(This,functionId,moduleId,ppvSig,pbSig,cchName,pcchName,wszName) \ ( (This)->lpVtbl -> GetDynamicFunctionInfo(This,functionId,moduleId,ppvSig,pbSig,cchName,pcchName,wszName) ) #endif /* COBJMACROS */ #endif /* C style interface */ #endif /* __ICorProfilerInfo8_INTERFACE_DEFINED__ */ #ifndef __ICorProfilerInfo9_INTERFACE_DEFINED__ #define __ICorProfilerInfo9_INTERFACE_DEFINED__ /* interface ICorProfilerInfo9 */ /* [local][unique][uuid][object] */ EXTERN_C const IID IID_ICorProfilerInfo9; #if defined(__cplusplus) && !defined(CINTERFACE) MIDL_INTERFACE("008170DB-F8CC-4796-9A51-DC8AA0B47012") ICorProfilerInfo9 : public ICorProfilerInfo8 { public: virtual HRESULT STDMETHODCALLTYPE GetNativeCodeStartAddresses( FunctionID functionID, ReJITID reJitId, ULONG32 cCodeStartAddresses, ULONG32 *pcCodeStartAddresses, UINT_PTR codeStartAddresses[ ]) = 0; virtual HRESULT STDMETHODCALLTYPE GetILToNativeMapping3( UINT_PTR pNativeCodeStartAddress, ULONG32 cMap, ULONG32 *pcMap, COR_DEBUG_IL_TO_NATIVE_MAP map[ ]) = 0; virtual HRESULT STDMETHODCALLTYPE GetCodeInfo4( UINT_PTR pNativeCodeStartAddress, ULONG32 cCodeInfos, ULONG32 *pcCodeInfos, COR_PRF_CODE_INFO codeInfos[ ]) = 0; }; #else /* C style interface */ typedef struct ICorProfilerInfo9Vtbl { BEGIN_INTERFACE HRESULT ( STDMETHODCALLTYPE *QueryInterface )( ICorProfilerInfo9 * This, /* [in] */ REFIID riid, /* [annotation][iid_is][out] */ _COM_Outptr_ void **ppvObject); ULONG ( STDMETHODCALLTYPE *AddRef )( ICorProfilerInfo9 * This); ULONG ( STDMETHODCALLTYPE *Release )( ICorProfilerInfo9 * This); HRESULT ( STDMETHODCALLTYPE *GetClassFromObject )( ICorProfilerInfo9 * This, /* [in] */ ObjectID objectId, /* [out] */ ClassID *pClassId); HRESULT ( STDMETHODCALLTYPE *GetClassFromToken )( ICorProfilerInfo9 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdTypeDef typeDef, /* [out] */ ClassID *pClassId); HRESULT ( STDMETHODCALLTYPE *GetCodeInfo )( ICorProfilerInfo9 * This, /* [in] */ FunctionID functionId, /* [out] */ LPCBYTE *pStart, /* [out] */ ULONG *pcSize); HRESULT ( STDMETHODCALLTYPE *GetEventMask )( ICorProfilerInfo9 * This, /* [out] */ DWORD *pdwEvents); HRESULT ( STDMETHODCALLTYPE *GetFunctionFromIP )( ICorProfilerInfo9 * This, /* [in] */ LPCBYTE ip, /* [out] */ FunctionID *pFunctionId); HRESULT ( STDMETHODCALLTYPE *GetFunctionFromToken )( ICorProfilerInfo9 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdToken token, /* [out] */ FunctionID *pFunctionId); HRESULT ( STDMETHODCALLTYPE *GetHandleFromThread )( ICorProfilerInfo9 * This, /* [in] */ ThreadID threadId, /* [out] */ HANDLE *phThread); HRESULT ( STDMETHODCALLTYPE *GetObjectSize )( ICorProfilerInfo9 * This, /* [in] */ ObjectID objectId, /* [out] */ ULONG *pcSize); HRESULT ( STDMETHODCALLTYPE *IsArrayClass )( ICorProfilerInfo9 * This, /* [in] */ ClassID classId, /* [out] */ CorElementType *pBaseElemType, /* [out] */ ClassID *pBaseClassId, /* [out] */ ULONG *pcRank); HRESULT ( STDMETHODCALLTYPE *GetThreadInfo )( ICorProfilerInfo9 * This, /* [in] */ ThreadID threadId, /* [out] */ DWORD *pdwWin32ThreadId); HRESULT ( STDMETHODCALLTYPE *GetCurrentThreadID )( ICorProfilerInfo9 * This, /* [out] */ ThreadID *pThreadId); HRESULT ( STDMETHODCALLTYPE *GetClassIDInfo )( ICorProfilerInfo9 * This, /* [in] */ ClassID classId, /* [out] */ ModuleID *pModuleId, /* [out] */ mdTypeDef *pTypeDefToken); HRESULT ( STDMETHODCALLTYPE *GetFunctionInfo )( ICorProfilerInfo9 * This, /* [in] */ FunctionID functionId, /* [out] */ ClassID *pClassId, /* [out] */ ModuleID *pModuleId, /* [out] */ mdToken *pToken); HRESULT ( STDMETHODCALLTYPE *SetEventMask )( ICorProfilerInfo9 * This, /* [in] */ DWORD dwEvents); HRESULT ( STDMETHODCALLTYPE *SetEnterLeaveFunctionHooks )( ICorProfilerInfo9 * This, /* [in] */ FunctionEnter *pFuncEnter, /* [in] */ FunctionLeave *pFuncLeave, /* [in] */ FunctionTailcall *pFuncTailcall); HRESULT ( STDMETHODCALLTYPE *SetFunctionIDMapper )( ICorProfilerInfo9 * This, /* [in] */ FunctionIDMapper *pFunc); HRESULT ( STDMETHODCALLTYPE *GetTokenAndMetaDataFromFunction )( ICorProfilerInfo9 * This, /* [in] */ FunctionID functionId, /* [in] */ REFIID riid, /* [out] */ IUnknown **ppImport, /* [out] */ mdToken *pToken); HRESULT ( STDMETHODCALLTYPE *GetModuleInfo )( ICorProfilerInfo9 * This, /* [in] */ ModuleID moduleId, /* [out] */ LPCBYTE *ppBaseLoadAddress, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [annotation][out] */ _Out_writes_to_(cchName, *pcchName) WCHAR szName[ ], /* [out] */ AssemblyID *pAssemblyId); HRESULT ( STDMETHODCALLTYPE *GetModuleMetaData )( ICorProfilerInfo9 * This, /* [in] */ ModuleID moduleId, /* [in] */ DWORD dwOpenFlags, /* [in] */ REFIID riid, /* [out] */ IUnknown **ppOut); HRESULT ( STDMETHODCALLTYPE *GetILFunctionBody )( ICorProfilerInfo9 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdMethodDef methodId, /* [out] */ LPCBYTE *ppMethodHeader, /* [out] */ ULONG *pcbMethodSize); HRESULT ( STDMETHODCALLTYPE *GetILFunctionBodyAllocator )( ICorProfilerInfo9 * This, /* [in] */ ModuleID moduleId, /* [out] */ IMethodMalloc **ppMalloc); HRESULT ( STDMETHODCALLTYPE *SetILFunctionBody )( ICorProfilerInfo9 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdMethodDef methodid, /* [in] */ LPCBYTE pbNewILMethodHeader); HRESULT ( STDMETHODCALLTYPE *GetAppDomainInfo )( ICorProfilerInfo9 * This, /* [in] */ AppDomainID appDomainId, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [annotation][out] */ _Out_writes_to_(cchName, *pcchName) WCHAR szName[ ], /* [out] */ ProcessID *pProcessId); HRESULT ( STDMETHODCALLTYPE *GetAssemblyInfo )( ICorProfilerInfo9 * This, /* [in] */ AssemblyID assemblyId, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [annotation][out] */ _Out_writes_to_(cchName, *pcchName) WCHAR szName[ ], /* [out] */ AppDomainID *pAppDomainId, /* [out] */ ModuleID *pModuleId); HRESULT ( STDMETHODCALLTYPE *SetFunctionReJIT )( ICorProfilerInfo9 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ForceGC )( ICorProfilerInfo9 * This); HRESULT ( STDMETHODCALLTYPE *SetILInstrumentedCodeMap )( ICorProfilerInfo9 * This, /* [in] */ FunctionID functionId, /* [in] */ BOOL fStartJit, /* [in] */ ULONG cILMapEntries, /* [size_is][in] */ COR_IL_MAP rgILMapEntries[ ]); HRESULT ( STDMETHODCALLTYPE *GetInprocInspectionInterface )( ICorProfilerInfo9 * This, /* [out] */ IUnknown **ppicd); HRESULT ( STDMETHODCALLTYPE *GetInprocInspectionIThisThread )( ICorProfilerInfo9 * This, /* [out] */ IUnknown **ppicd); HRESULT ( STDMETHODCALLTYPE *GetThreadContext )( ICorProfilerInfo9 * This, /* [in] */ ThreadID threadId, /* [out] */ ContextID *pContextId); HRESULT ( STDMETHODCALLTYPE *BeginInprocDebugging )( ICorProfilerInfo9 * This, /* [in] */ BOOL fThisThreadOnly, /* [out] */ DWORD *pdwProfilerContext); HRESULT ( STDMETHODCALLTYPE *EndInprocDebugging )( ICorProfilerInfo9 * This, /* [in] */ DWORD dwProfilerContext); HRESULT ( STDMETHODCALLTYPE *GetILToNativeMapping )( ICorProfilerInfo9 * This, /* [in] */ FunctionID functionId, /* [in] */ ULONG32 cMap, /* [out] */ ULONG32 *pcMap, /* [length_is][size_is][out] */ COR_DEBUG_IL_TO_NATIVE_MAP map[ ]); HRESULT ( STDMETHODCALLTYPE *DoStackSnapshot )( ICorProfilerInfo9 * This, /* [in] */ ThreadID thread, /* [in] */ StackSnapshotCallback *callback, /* [in] */ ULONG32 infoFlags, /* [in] */ void *clientData, /* [size_is][in] */ BYTE context[ ], /* [in] */ ULONG32 contextSize); HRESULT ( STDMETHODCALLTYPE *SetEnterLeaveFunctionHooks2 )( ICorProfilerInfo9 * This, /* [in] */ FunctionEnter2 *pFuncEnter, /* [in] */ FunctionLeave2 *pFuncLeave, /* [in] */ FunctionTailcall2 *pFuncTailcall); HRESULT ( STDMETHODCALLTYPE *GetFunctionInfo2 )( ICorProfilerInfo9 * This, /* [in] */ FunctionID funcId, /* [in] */ COR_PRF_FRAME_INFO frameInfo, /* [out] */ ClassID *pClassId, /* [out] */ ModuleID *pModuleId, /* [out] */ mdToken *pToken, /* [in] */ ULONG32 cTypeArgs, /* [out] */ ULONG32 *pcTypeArgs, /* [out] */ ClassID typeArgs[ ]); HRESULT ( STDMETHODCALLTYPE *GetStringLayout )( ICorProfilerInfo9 * This, /* [out] */ ULONG *pBufferLengthOffset, /* [out] */ ULONG *pStringLengthOffset, /* [out] */ ULONG *pBufferOffset); HRESULT ( STDMETHODCALLTYPE *GetClassLayout )( ICorProfilerInfo9 * This, /* [in] */ ClassID classID, /* [out][in] */ COR_FIELD_OFFSET rFieldOffset[ ], /* [in] */ ULONG cFieldOffset, /* [out] */ ULONG *pcFieldOffset, /* [out] */ ULONG *pulClassSize); HRESULT ( STDMETHODCALLTYPE *GetClassIDInfo2 )( ICorProfilerInfo9 * This, /* [in] */ ClassID classId, /* [out] */ ModuleID *pModuleId, /* [out] */ mdTypeDef *pTypeDefToken, /* [out] */ ClassID *pParentClassId, /* [in] */ ULONG32 cNumTypeArgs, /* [out] */ ULONG32 *pcNumTypeArgs, /* [out] */ ClassID typeArgs[ ]); HRESULT ( STDMETHODCALLTYPE *GetCodeInfo2 )( ICorProfilerInfo9 * This, /* [in] */ FunctionID functionID, /* [in] */ ULONG32 cCodeInfos, /* [out] */ ULONG32 *pcCodeInfos, /* [length_is][size_is][out] */ COR_PRF_CODE_INFO codeInfos[ ]); HRESULT ( STDMETHODCALLTYPE *GetClassFromTokenAndTypeArgs )( ICorProfilerInfo9 * This, /* [in] */ ModuleID moduleID, /* [in] */ mdTypeDef typeDef, /* [in] */ ULONG32 cTypeArgs, /* [size_is][in] */ ClassID typeArgs[ ], /* [out] */ ClassID *pClassID); HRESULT ( STDMETHODCALLTYPE *GetFunctionFromTokenAndTypeArgs )( ICorProfilerInfo9 * This, /* [in] */ ModuleID moduleID, /* [in] */ mdMethodDef funcDef, /* [in] */ ClassID classId, /* [in] */ ULONG32 cTypeArgs, /* [size_is][in] */ ClassID typeArgs[ ], /* [out] */ FunctionID *pFunctionID); HRESULT ( STDMETHODCALLTYPE *EnumModuleFrozenObjects )( ICorProfilerInfo9 * This, /* [in] */ ModuleID moduleID, /* [out] */ ICorProfilerObjectEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *GetArrayObjectInfo )( ICorProfilerInfo9 * This, /* [in] */ ObjectID objectId, /* [in] */ ULONG32 cDimensions, /* [size_is][out] */ ULONG32 pDimensionSizes[ ], /* [size_is][out] */ int pDimensionLowerBounds[ ], /* [out] */ BYTE **ppData); HRESULT ( STDMETHODCALLTYPE *GetBoxClassLayout )( ICorProfilerInfo9 * This, /* [in] */ ClassID classId, /* [out] */ ULONG32 *pBufferOffset); HRESULT ( STDMETHODCALLTYPE *GetThreadAppDomain )( ICorProfilerInfo9 * This, /* [in] */ ThreadID threadId, /* [out] */ AppDomainID *pAppDomainId); HRESULT ( STDMETHODCALLTYPE *GetRVAStaticAddress )( ICorProfilerInfo9 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [out] */ void **ppAddress); HRESULT ( STDMETHODCALLTYPE *GetAppDomainStaticAddress )( ICorProfilerInfo9 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [in] */ AppDomainID appDomainId, /* [out] */ void **ppAddress); HRESULT ( STDMETHODCALLTYPE *GetThreadStaticAddress )( ICorProfilerInfo9 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [in] */ ThreadID threadId, /* [out] */ void **ppAddress); HRESULT ( STDMETHODCALLTYPE *GetContextStaticAddress )( ICorProfilerInfo9 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [in] */ ContextID contextId, /* [out] */ void **ppAddress); HRESULT ( STDMETHODCALLTYPE *GetStaticFieldInfo )( ICorProfilerInfo9 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [out] */ COR_PRF_STATIC_TYPE *pFieldInfo); HRESULT ( STDMETHODCALLTYPE *GetGenerationBounds )( ICorProfilerInfo9 * This, /* [in] */ ULONG cObjectRanges, /* [out] */ ULONG *pcObjectRanges, /* [length_is][size_is][out] */ COR_PRF_GC_GENERATION_RANGE ranges[ ]); HRESULT ( STDMETHODCALLTYPE *GetObjectGeneration )( ICorProfilerInfo9 * This, /* [in] */ ObjectID objectId, /* [out] */ COR_PRF_GC_GENERATION_RANGE *range); HRESULT ( STDMETHODCALLTYPE *GetNotifiedExceptionClauseInfo )( ICorProfilerInfo9 * This, /* [out] */ COR_PRF_EX_CLAUSE_INFO *pinfo); HRESULT ( STDMETHODCALLTYPE *EnumJITedFunctions )( ICorProfilerInfo9 * This, /* [out] */ ICorProfilerFunctionEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *RequestProfilerDetach )( ICorProfilerInfo9 * This, /* [in] */ DWORD dwExpectedCompletionMilliseconds); HRESULT ( STDMETHODCALLTYPE *SetFunctionIDMapper2 )( ICorProfilerInfo9 * This, /* [in] */ FunctionIDMapper2 *pFunc, /* [in] */ void *clientData); HRESULT ( STDMETHODCALLTYPE *GetStringLayout2 )( ICorProfilerInfo9 * This, /* [out] */ ULONG *pStringLengthOffset, /* [out] */ ULONG *pBufferOffset); HRESULT ( STDMETHODCALLTYPE *SetEnterLeaveFunctionHooks3 )( ICorProfilerInfo9 * This, /* [in] */ FunctionEnter3 *pFuncEnter3, /* [in] */ FunctionLeave3 *pFuncLeave3, /* [in] */ FunctionTailcall3 *pFuncTailcall3); HRESULT ( STDMETHODCALLTYPE *SetEnterLeaveFunctionHooks3WithInfo )( ICorProfilerInfo9 * This, /* [in] */ FunctionEnter3WithInfo *pFuncEnter3WithInfo, /* [in] */ FunctionLeave3WithInfo *pFuncLeave3WithInfo, /* [in] */ FunctionTailcall3WithInfo *pFuncTailcall3WithInfo); HRESULT ( STDMETHODCALLTYPE *GetFunctionEnter3Info )( ICorProfilerInfo9 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_ELT_INFO eltInfo, /* [out] */ COR_PRF_FRAME_INFO *pFrameInfo, /* [out][in] */ ULONG *pcbArgumentInfo, /* [size_is][out] */ COR_PRF_FUNCTION_ARGUMENT_INFO *pArgumentInfo); HRESULT ( STDMETHODCALLTYPE *GetFunctionLeave3Info )( ICorProfilerInfo9 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_ELT_INFO eltInfo, /* [out] */ COR_PRF_FRAME_INFO *pFrameInfo, /* [out] */ COR_PRF_FUNCTION_ARGUMENT_RANGE *pRetvalRange); HRESULT ( STDMETHODCALLTYPE *GetFunctionTailcall3Info )( ICorProfilerInfo9 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_ELT_INFO eltInfo, /* [out] */ COR_PRF_FRAME_INFO *pFrameInfo); HRESULT ( STDMETHODCALLTYPE *EnumModules )( ICorProfilerInfo9 * This, /* [out] */ ICorProfilerModuleEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *GetRuntimeInformation )( ICorProfilerInfo9 * This, /* [out] */ USHORT *pClrInstanceId, /* [out] */ COR_PRF_RUNTIME_TYPE *pRuntimeType, /* [out] */ USHORT *pMajorVersion, /* [out] */ USHORT *pMinorVersion, /* [out] */ USHORT *pBuildNumber, /* [out] */ USHORT *pQFEVersion, /* [in] */ ULONG cchVersionString, /* [out] */ ULONG *pcchVersionString, /* [annotation][out] */ _Out_writes_to_(cchVersionString, *pcchVersionString) WCHAR szVersionString[ ]); HRESULT ( STDMETHODCALLTYPE *GetThreadStaticAddress2 )( ICorProfilerInfo9 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [in] */ AppDomainID appDomainId, /* [in] */ ThreadID threadId, /* [out] */ void **ppAddress); HRESULT ( STDMETHODCALLTYPE *GetAppDomainsContainingModule )( ICorProfilerInfo9 * This, /* [in] */ ModuleID moduleId, /* [in] */ ULONG32 cAppDomainIds, /* [out] */ ULONG32 *pcAppDomainIds, /* [length_is][size_is][out] */ AppDomainID appDomainIds[ ]); HRESULT ( STDMETHODCALLTYPE *GetModuleInfo2 )( ICorProfilerInfo9 * This, /* [in] */ ModuleID moduleId, /* [out] */ LPCBYTE *ppBaseLoadAddress, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [annotation][out] */ _Out_writes_to_(cchName, *pcchName) WCHAR szName[ ], /* [out] */ AssemblyID *pAssemblyId, /* [out] */ DWORD *pdwModuleFlags); HRESULT ( STDMETHODCALLTYPE *EnumThreads )( ICorProfilerInfo9 * This, /* [out] */ ICorProfilerThreadEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *InitializeCurrentThread )( ICorProfilerInfo9 * This); HRESULT ( STDMETHODCALLTYPE *RequestReJIT )( ICorProfilerInfo9 * This, /* [in] */ ULONG cFunctions, /* [size_is][in] */ ModuleID moduleIds[ ], /* [size_is][in] */ mdMethodDef methodIds[ ]); HRESULT ( STDMETHODCALLTYPE *RequestRevert )( ICorProfilerInfo9 * This, /* [in] */ ULONG cFunctions, /* [size_is][in] */ ModuleID moduleIds[ ], /* [size_is][in] */ mdMethodDef methodIds[ ], /* [size_is][out] */ HRESULT status[ ]); HRESULT ( STDMETHODCALLTYPE *GetCodeInfo3 )( ICorProfilerInfo9 * This, /* [in] */ FunctionID functionID, /* [in] */ ReJITID reJitId, /* [in] */ ULONG32 cCodeInfos, /* [out] */ ULONG32 *pcCodeInfos, /* [length_is][size_is][out] */ COR_PRF_CODE_INFO codeInfos[ ]); HRESULT ( STDMETHODCALLTYPE *GetFunctionFromIP2 )( ICorProfilerInfo9 * This, /* [in] */ LPCBYTE ip, /* [out] */ FunctionID *pFunctionId, /* [out] */ ReJITID *pReJitId); HRESULT ( STDMETHODCALLTYPE *GetReJITIDs )( ICorProfilerInfo9 * This, /* [in] */ FunctionID functionId, /* [in] */ ULONG cReJitIds, /* [out] */ ULONG *pcReJitIds, /* [length_is][size_is][out] */ ReJITID reJitIds[ ]); HRESULT ( STDMETHODCALLTYPE *GetILToNativeMapping2 )( ICorProfilerInfo9 * This, /* [in] */ FunctionID functionId, /* [in] */ ReJITID reJitId, /* [in] */ ULONG32 cMap, /* [out] */ ULONG32 *pcMap, /* [length_is][size_is][out] */ COR_DEBUG_IL_TO_NATIVE_MAP map[ ]); HRESULT ( STDMETHODCALLTYPE *EnumJITedFunctions2 )( ICorProfilerInfo9 * This, /* [out] */ ICorProfilerFunctionEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *GetObjectSize2 )( ICorProfilerInfo9 * This, /* [in] */ ObjectID objectId, /* [out] */ SIZE_T *pcSize); HRESULT ( STDMETHODCALLTYPE *GetEventMask2 )( ICorProfilerInfo9 * This, /* [out] */ DWORD *pdwEventsLow, /* [out] */ DWORD *pdwEventsHigh); HRESULT ( STDMETHODCALLTYPE *SetEventMask2 )( ICorProfilerInfo9 * This, /* [in] */ DWORD dwEventsLow, /* [in] */ DWORD dwEventsHigh); HRESULT ( STDMETHODCALLTYPE *EnumNgenModuleMethodsInliningThisMethod )( ICorProfilerInfo9 * This, /* [in] */ ModuleID inlinersModuleId, /* [in] */ ModuleID inlineeModuleId, /* [in] */ mdMethodDef inlineeMethodId, /* [out] */ BOOL *incompleteData, /* [out] */ ICorProfilerMethodEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *ApplyMetaData )( ICorProfilerInfo9 * This, /* [in] */ ModuleID moduleId); HRESULT ( STDMETHODCALLTYPE *GetInMemorySymbolsLength )( ICorProfilerInfo9 * This, /* [in] */ ModuleID moduleId, /* [out] */ DWORD *pCountSymbolBytes); HRESULT ( STDMETHODCALLTYPE *ReadInMemorySymbols )( ICorProfilerInfo9 * This, /* [in] */ ModuleID moduleId, /* [in] */ DWORD symbolsReadOffset, /* [out] */ BYTE *pSymbolBytes, /* [in] */ DWORD countSymbolBytes, /* [out] */ DWORD *pCountSymbolBytesRead); HRESULT ( STDMETHODCALLTYPE *IsFunctionDynamic )( ICorProfilerInfo9 * This, /* [in] */ FunctionID functionId, /* [out] */ BOOL *isDynamic); HRESULT ( STDMETHODCALLTYPE *GetFunctionFromIP3 )( ICorProfilerInfo9 * This, /* [in] */ LPCBYTE ip, /* [out] */ FunctionID *functionId, /* [out] */ ReJITID *pReJitId); HRESULT ( STDMETHODCALLTYPE *GetDynamicFunctionInfo )( ICorProfilerInfo9 * This, /* [in] */ FunctionID functionId, /* [out] */ ModuleID *moduleId, /* [out] */ PCCOR_SIGNATURE *ppvSig, /* [out] */ ULONG *pbSig, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [out] */ WCHAR wszName[ ]); HRESULT ( STDMETHODCALLTYPE *GetNativeCodeStartAddresses )( ICorProfilerInfo9 * This, FunctionID functionID, ReJITID reJitId, ULONG32 cCodeStartAddresses, ULONG32 *pcCodeStartAddresses, UINT_PTR codeStartAddresses[ ]); HRESULT ( STDMETHODCALLTYPE *GetILToNativeMapping3 )( ICorProfilerInfo9 * This, UINT_PTR pNativeCodeStartAddress, ULONG32 cMap, ULONG32 *pcMap, COR_DEBUG_IL_TO_NATIVE_MAP map[ ]); HRESULT ( STDMETHODCALLTYPE *GetCodeInfo4 )( ICorProfilerInfo9 * This, UINT_PTR pNativeCodeStartAddress, ULONG32 cCodeInfos, ULONG32 *pcCodeInfos, COR_PRF_CODE_INFO codeInfos[ ]); END_INTERFACE } ICorProfilerInfo9Vtbl; interface ICorProfilerInfo9 { CONST_VTBL struct ICorProfilerInfo9Vtbl *lpVtbl; }; #ifdef COBJMACROS #define ICorProfilerInfo9_QueryInterface(This,riid,ppvObject) \ ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) ) #define ICorProfilerInfo9_AddRef(This) \ ( (This)->lpVtbl -> AddRef(This) ) #define ICorProfilerInfo9_Release(This) \ ( (This)->lpVtbl -> Release(This) ) #define ICorProfilerInfo9_GetClassFromObject(This,objectId,pClassId) \ ( (This)->lpVtbl -> GetClassFromObject(This,objectId,pClassId) ) #define ICorProfilerInfo9_GetClassFromToken(This,moduleId,typeDef,pClassId) \ ( (This)->lpVtbl -> GetClassFromToken(This,moduleId,typeDef,pClassId) ) #define ICorProfilerInfo9_GetCodeInfo(This,functionId,pStart,pcSize) \ ( (This)->lpVtbl -> GetCodeInfo(This,functionId,pStart,pcSize) ) #define ICorProfilerInfo9_GetEventMask(This,pdwEvents) \ ( (This)->lpVtbl -> GetEventMask(This,pdwEvents) ) #define ICorProfilerInfo9_GetFunctionFromIP(This,ip,pFunctionId) \ ( (This)->lpVtbl -> GetFunctionFromIP(This,ip,pFunctionId) ) #define ICorProfilerInfo9_GetFunctionFromToken(This,moduleId,token,pFunctionId) \ ( (This)->lpVtbl -> GetFunctionFromToken(This,moduleId,token,pFunctionId) ) #define ICorProfilerInfo9_GetHandleFromThread(This,threadId,phThread) \ ( (This)->lpVtbl -> GetHandleFromThread(This,threadId,phThread) ) #define ICorProfilerInfo9_GetObjectSize(This,objectId,pcSize) \ ( (This)->lpVtbl -> GetObjectSize(This,objectId,pcSize) ) #define ICorProfilerInfo9_IsArrayClass(This,classId,pBaseElemType,pBaseClassId,pcRank) \ ( (This)->lpVtbl -> IsArrayClass(This,classId,pBaseElemType,pBaseClassId,pcRank) ) #define ICorProfilerInfo9_GetThreadInfo(This,threadId,pdwWin32ThreadId) \ ( (This)->lpVtbl -> GetThreadInfo(This,threadId,pdwWin32ThreadId) ) #define ICorProfilerInfo9_GetCurrentThreadID(This,pThreadId) \ ( (This)->lpVtbl -> GetCurrentThreadID(This,pThreadId) ) #define ICorProfilerInfo9_GetClassIDInfo(This,classId,pModuleId,pTypeDefToken) \ ( (This)->lpVtbl -> GetClassIDInfo(This,classId,pModuleId,pTypeDefToken) ) #define ICorProfilerInfo9_GetFunctionInfo(This,functionId,pClassId,pModuleId,pToken) \ ( (This)->lpVtbl -> GetFunctionInfo(This,functionId,pClassId,pModuleId,pToken) ) #define ICorProfilerInfo9_SetEventMask(This,dwEvents) \ ( (This)->lpVtbl -> SetEventMask(This,dwEvents) ) #define ICorProfilerInfo9_SetEnterLeaveFunctionHooks(This,pFuncEnter,pFuncLeave,pFuncTailcall) \ ( (This)->lpVtbl -> SetEnterLeaveFunctionHooks(This,pFuncEnter,pFuncLeave,pFuncTailcall) ) #define ICorProfilerInfo9_SetFunctionIDMapper(This,pFunc) \ ( (This)->lpVtbl -> SetFunctionIDMapper(This,pFunc) ) #define ICorProfilerInfo9_GetTokenAndMetaDataFromFunction(This,functionId,riid,ppImport,pToken) \ ( (This)->lpVtbl -> GetTokenAndMetaDataFromFunction(This,functionId,riid,ppImport,pToken) ) #define ICorProfilerInfo9_GetModuleInfo(This,moduleId,ppBaseLoadAddress,cchName,pcchName,szName,pAssemblyId) \ ( (This)->lpVtbl -> GetModuleInfo(This,moduleId,ppBaseLoadAddress,cchName,pcchName,szName,pAssemblyId) ) #define ICorProfilerInfo9_GetModuleMetaData(This,moduleId,dwOpenFlags,riid,ppOut) \ ( (This)->lpVtbl -> GetModuleMetaData(This,moduleId,dwOpenFlags,riid,ppOut) ) #define ICorProfilerInfo9_GetILFunctionBody(This,moduleId,methodId,ppMethodHeader,pcbMethodSize) \ ( (This)->lpVtbl -> GetILFunctionBody(This,moduleId,methodId,ppMethodHeader,pcbMethodSize) ) #define ICorProfilerInfo9_GetILFunctionBodyAllocator(This,moduleId,ppMalloc) \ ( (This)->lpVtbl -> GetILFunctionBodyAllocator(This,moduleId,ppMalloc) ) #define ICorProfilerInfo9_SetILFunctionBody(This,moduleId,methodid,pbNewILMethodHeader) \ ( (This)->lpVtbl -> SetILFunctionBody(This,moduleId,methodid,pbNewILMethodHeader) ) #define ICorProfilerInfo9_GetAppDomainInfo(This,appDomainId,cchName,pcchName,szName,pProcessId) \ ( (This)->lpVtbl -> GetAppDomainInfo(This,appDomainId,cchName,pcchName,szName,pProcessId) ) #define ICorProfilerInfo9_GetAssemblyInfo(This,assemblyId,cchName,pcchName,szName,pAppDomainId,pModuleId) \ ( (This)->lpVtbl -> GetAssemblyInfo(This,assemblyId,cchName,pcchName,szName,pAppDomainId,pModuleId) ) #define ICorProfilerInfo9_SetFunctionReJIT(This,functionId) \ ( (This)->lpVtbl -> SetFunctionReJIT(This,functionId) ) #define ICorProfilerInfo9_ForceGC(This) \ ( (This)->lpVtbl -> ForceGC(This) ) #define ICorProfilerInfo9_SetILInstrumentedCodeMap(This,functionId,fStartJit,cILMapEntries,rgILMapEntries) \ ( (This)->lpVtbl -> SetILInstrumentedCodeMap(This,functionId,fStartJit,cILMapEntries,rgILMapEntries) ) #define ICorProfilerInfo9_GetInprocInspectionInterface(This,ppicd) \ ( (This)->lpVtbl -> GetInprocInspectionInterface(This,ppicd) ) #define ICorProfilerInfo9_GetInprocInspectionIThisThread(This,ppicd) \ ( (This)->lpVtbl -> GetInprocInspectionIThisThread(This,ppicd) ) #define ICorProfilerInfo9_GetThreadContext(This,threadId,pContextId) \ ( (This)->lpVtbl -> GetThreadContext(This,threadId,pContextId) ) #define ICorProfilerInfo9_BeginInprocDebugging(This,fThisThreadOnly,pdwProfilerContext) \ ( (This)->lpVtbl -> BeginInprocDebugging(This,fThisThreadOnly,pdwProfilerContext) ) #define ICorProfilerInfo9_EndInprocDebugging(This,dwProfilerContext) \ ( (This)->lpVtbl -> EndInprocDebugging(This,dwProfilerContext) ) #define ICorProfilerInfo9_GetILToNativeMapping(This,functionId,cMap,pcMap,map) \ ( (This)->lpVtbl -> GetILToNativeMapping(This,functionId,cMap,pcMap,map) ) #define ICorProfilerInfo9_DoStackSnapshot(This,thread,callback,infoFlags,clientData,context,contextSize) \ ( (This)->lpVtbl -> DoStackSnapshot(This,thread,callback,infoFlags,clientData,context,contextSize) ) #define ICorProfilerInfo9_SetEnterLeaveFunctionHooks2(This,pFuncEnter,pFuncLeave,pFuncTailcall) \ ( (This)->lpVtbl -> SetEnterLeaveFunctionHooks2(This,pFuncEnter,pFuncLeave,pFuncTailcall) ) #define ICorProfilerInfo9_GetFunctionInfo2(This,funcId,frameInfo,pClassId,pModuleId,pToken,cTypeArgs,pcTypeArgs,typeArgs) \ ( (This)->lpVtbl -> GetFunctionInfo2(This,funcId,frameInfo,pClassId,pModuleId,pToken,cTypeArgs,pcTypeArgs,typeArgs) ) #define ICorProfilerInfo9_GetStringLayout(This,pBufferLengthOffset,pStringLengthOffset,pBufferOffset) \ ( (This)->lpVtbl -> GetStringLayout(This,pBufferLengthOffset,pStringLengthOffset,pBufferOffset) ) #define ICorProfilerInfo9_GetClassLayout(This,classID,rFieldOffset,cFieldOffset,pcFieldOffset,pulClassSize) \ ( (This)->lpVtbl -> GetClassLayout(This,classID,rFieldOffset,cFieldOffset,pcFieldOffset,pulClassSize) ) #define ICorProfilerInfo9_GetClassIDInfo2(This,classId,pModuleId,pTypeDefToken,pParentClassId,cNumTypeArgs,pcNumTypeArgs,typeArgs) \ ( (This)->lpVtbl -> GetClassIDInfo2(This,classId,pModuleId,pTypeDefToken,pParentClassId,cNumTypeArgs,pcNumTypeArgs,typeArgs) ) #define ICorProfilerInfo9_GetCodeInfo2(This,functionID,cCodeInfos,pcCodeInfos,codeInfos) \ ( (This)->lpVtbl -> GetCodeInfo2(This,functionID,cCodeInfos,pcCodeInfos,codeInfos) ) #define ICorProfilerInfo9_GetClassFromTokenAndTypeArgs(This,moduleID,typeDef,cTypeArgs,typeArgs,pClassID) \ ( (This)->lpVtbl -> GetClassFromTokenAndTypeArgs(This,moduleID,typeDef,cTypeArgs,typeArgs,pClassID) ) #define ICorProfilerInfo9_GetFunctionFromTokenAndTypeArgs(This,moduleID,funcDef,classId,cTypeArgs,typeArgs,pFunctionID) \ ( (This)->lpVtbl -> GetFunctionFromTokenAndTypeArgs(This,moduleID,funcDef,classId,cTypeArgs,typeArgs,pFunctionID) ) #define ICorProfilerInfo9_EnumModuleFrozenObjects(This,moduleID,ppEnum) \ ( (This)->lpVtbl -> EnumModuleFrozenObjects(This,moduleID,ppEnum) ) #define ICorProfilerInfo9_GetArrayObjectInfo(This,objectId,cDimensions,pDimensionSizes,pDimensionLowerBounds,ppData) \ ( (This)->lpVtbl -> GetArrayObjectInfo(This,objectId,cDimensions,pDimensionSizes,pDimensionLowerBounds,ppData) ) #define ICorProfilerInfo9_GetBoxClassLayout(This,classId,pBufferOffset) \ ( (This)->lpVtbl -> GetBoxClassLayout(This,classId,pBufferOffset) ) #define ICorProfilerInfo9_GetThreadAppDomain(This,threadId,pAppDomainId) \ ( (This)->lpVtbl -> GetThreadAppDomain(This,threadId,pAppDomainId) ) #define ICorProfilerInfo9_GetRVAStaticAddress(This,classId,fieldToken,ppAddress) \ ( (This)->lpVtbl -> GetRVAStaticAddress(This,classId,fieldToken,ppAddress) ) #define ICorProfilerInfo9_GetAppDomainStaticAddress(This,classId,fieldToken,appDomainId,ppAddress) \ ( (This)->lpVtbl -> GetAppDomainStaticAddress(This,classId,fieldToken,appDomainId,ppAddress) ) #define ICorProfilerInfo9_GetThreadStaticAddress(This,classId,fieldToken,threadId,ppAddress) \ ( (This)->lpVtbl -> GetThreadStaticAddress(This,classId,fieldToken,threadId,ppAddress) ) #define ICorProfilerInfo9_GetContextStaticAddress(This,classId,fieldToken,contextId,ppAddress) \ ( (This)->lpVtbl -> GetContextStaticAddress(This,classId,fieldToken,contextId,ppAddress) ) #define ICorProfilerInfo9_GetStaticFieldInfo(This,classId,fieldToken,pFieldInfo) \ ( (This)->lpVtbl -> GetStaticFieldInfo(This,classId,fieldToken,pFieldInfo) ) #define ICorProfilerInfo9_GetGenerationBounds(This,cObjectRanges,pcObjectRanges,ranges) \ ( (This)->lpVtbl -> GetGenerationBounds(This,cObjectRanges,pcObjectRanges,ranges) ) #define ICorProfilerInfo9_GetObjectGeneration(This,objectId,range) \ ( (This)->lpVtbl -> GetObjectGeneration(This,objectId,range) ) #define ICorProfilerInfo9_GetNotifiedExceptionClauseInfo(This,pinfo) \ ( (This)->lpVtbl -> GetNotifiedExceptionClauseInfo(This,pinfo) ) #define ICorProfilerInfo9_EnumJITedFunctions(This,ppEnum) \ ( (This)->lpVtbl -> EnumJITedFunctions(This,ppEnum) ) #define ICorProfilerInfo9_RequestProfilerDetach(This,dwExpectedCompletionMilliseconds) \ ( (This)->lpVtbl -> RequestProfilerDetach(This,dwExpectedCompletionMilliseconds) ) #define ICorProfilerInfo9_SetFunctionIDMapper2(This,pFunc,clientData) \ ( (This)->lpVtbl -> SetFunctionIDMapper2(This,pFunc,clientData) ) #define ICorProfilerInfo9_GetStringLayout2(This,pStringLengthOffset,pBufferOffset) \ ( (This)->lpVtbl -> GetStringLayout2(This,pStringLengthOffset,pBufferOffset) ) #define ICorProfilerInfo9_SetEnterLeaveFunctionHooks3(This,pFuncEnter3,pFuncLeave3,pFuncTailcall3) \ ( (This)->lpVtbl -> SetEnterLeaveFunctionHooks3(This,pFuncEnter3,pFuncLeave3,pFuncTailcall3) ) #define ICorProfilerInfo9_SetEnterLeaveFunctionHooks3WithInfo(This,pFuncEnter3WithInfo,pFuncLeave3WithInfo,pFuncTailcall3WithInfo) \ ( (This)->lpVtbl -> SetEnterLeaveFunctionHooks3WithInfo(This,pFuncEnter3WithInfo,pFuncLeave3WithInfo,pFuncTailcall3WithInfo) ) #define ICorProfilerInfo9_GetFunctionEnter3Info(This,functionId,eltInfo,pFrameInfo,pcbArgumentInfo,pArgumentInfo) \ ( (This)->lpVtbl -> GetFunctionEnter3Info(This,functionId,eltInfo,pFrameInfo,pcbArgumentInfo,pArgumentInfo) ) #define ICorProfilerInfo9_GetFunctionLeave3Info(This,functionId,eltInfo,pFrameInfo,pRetvalRange) \ ( (This)->lpVtbl -> GetFunctionLeave3Info(This,functionId,eltInfo,pFrameInfo,pRetvalRange) ) #define ICorProfilerInfo9_GetFunctionTailcall3Info(This,functionId,eltInfo,pFrameInfo) \ ( (This)->lpVtbl -> GetFunctionTailcall3Info(This,functionId,eltInfo,pFrameInfo) ) #define ICorProfilerInfo9_EnumModules(This,ppEnum) \ ( (This)->lpVtbl -> EnumModules(This,ppEnum) ) #define ICorProfilerInfo9_GetRuntimeInformation(This,pClrInstanceId,pRuntimeType,pMajorVersion,pMinorVersion,pBuildNumber,pQFEVersion,cchVersionString,pcchVersionString,szVersionString) \ ( (This)->lpVtbl -> GetRuntimeInformation(This,pClrInstanceId,pRuntimeType,pMajorVersion,pMinorVersion,pBuildNumber,pQFEVersion,cchVersionString,pcchVersionString,szVersionString) ) #define ICorProfilerInfo9_GetThreadStaticAddress2(This,classId,fieldToken,appDomainId,threadId,ppAddress) \ ( (This)->lpVtbl -> GetThreadStaticAddress2(This,classId,fieldToken,appDomainId,threadId,ppAddress) ) #define ICorProfilerInfo9_GetAppDomainsContainingModule(This,moduleId,cAppDomainIds,pcAppDomainIds,appDomainIds) \ ( (This)->lpVtbl -> GetAppDomainsContainingModule(This,moduleId,cAppDomainIds,pcAppDomainIds,appDomainIds) ) #define ICorProfilerInfo9_GetModuleInfo2(This,moduleId,ppBaseLoadAddress,cchName,pcchName,szName,pAssemblyId,pdwModuleFlags) \ ( (This)->lpVtbl -> GetModuleInfo2(This,moduleId,ppBaseLoadAddress,cchName,pcchName,szName,pAssemblyId,pdwModuleFlags) ) #define ICorProfilerInfo9_EnumThreads(This,ppEnum) \ ( (This)->lpVtbl -> EnumThreads(This,ppEnum) ) #define ICorProfilerInfo9_InitializeCurrentThread(This) \ ( (This)->lpVtbl -> InitializeCurrentThread(This) ) #define ICorProfilerInfo9_RequestReJIT(This,cFunctions,moduleIds,methodIds) \ ( (This)->lpVtbl -> RequestReJIT(This,cFunctions,moduleIds,methodIds) ) #define ICorProfilerInfo9_RequestRevert(This,cFunctions,moduleIds,methodIds,status) \ ( (This)->lpVtbl -> RequestRevert(This,cFunctions,moduleIds,methodIds,status) ) #define ICorProfilerInfo9_GetCodeInfo3(This,functionID,reJitId,cCodeInfos,pcCodeInfos,codeInfos) \ ( (This)->lpVtbl -> GetCodeInfo3(This,functionID,reJitId,cCodeInfos,pcCodeInfos,codeInfos) ) #define ICorProfilerInfo9_GetFunctionFromIP2(This,ip,pFunctionId,pReJitId) \ ( (This)->lpVtbl -> GetFunctionFromIP2(This,ip,pFunctionId,pReJitId) ) #define ICorProfilerInfo9_GetReJITIDs(This,functionId,cReJitIds,pcReJitIds,reJitIds) \ ( (This)->lpVtbl -> GetReJITIDs(This,functionId,cReJitIds,pcReJitIds,reJitIds) ) #define ICorProfilerInfo9_GetILToNativeMapping2(This,functionId,reJitId,cMap,pcMap,map) \ ( (This)->lpVtbl -> GetILToNativeMapping2(This,functionId,reJitId,cMap,pcMap,map) ) #define ICorProfilerInfo9_EnumJITedFunctions2(This,ppEnum) \ ( (This)->lpVtbl -> EnumJITedFunctions2(This,ppEnum) ) #define ICorProfilerInfo9_GetObjectSize2(This,objectId,pcSize) \ ( (This)->lpVtbl -> GetObjectSize2(This,objectId,pcSize) ) #define ICorProfilerInfo9_GetEventMask2(This,pdwEventsLow,pdwEventsHigh) \ ( (This)->lpVtbl -> GetEventMask2(This,pdwEventsLow,pdwEventsHigh) ) #define ICorProfilerInfo9_SetEventMask2(This,dwEventsLow,dwEventsHigh) \ ( (This)->lpVtbl -> SetEventMask2(This,dwEventsLow,dwEventsHigh) ) #define ICorProfilerInfo9_EnumNgenModuleMethodsInliningThisMethod(This,inlinersModuleId,inlineeModuleId,inlineeMethodId,incompleteData,ppEnum) \ ( (This)->lpVtbl -> EnumNgenModuleMethodsInliningThisMethod(This,inlinersModuleId,inlineeModuleId,inlineeMethodId,incompleteData,ppEnum) ) #define ICorProfilerInfo9_ApplyMetaData(This,moduleId) \ ( (This)->lpVtbl -> ApplyMetaData(This,moduleId) ) #define ICorProfilerInfo9_GetInMemorySymbolsLength(This,moduleId,pCountSymbolBytes) \ ( (This)->lpVtbl -> GetInMemorySymbolsLength(This,moduleId,pCountSymbolBytes) ) #define ICorProfilerInfo9_ReadInMemorySymbols(This,moduleId,symbolsReadOffset,pSymbolBytes,countSymbolBytes,pCountSymbolBytesRead) \ ( (This)->lpVtbl -> ReadInMemorySymbols(This,moduleId,symbolsReadOffset,pSymbolBytes,countSymbolBytes,pCountSymbolBytesRead) ) #define ICorProfilerInfo9_IsFunctionDynamic(This,functionId,isDynamic) \ ( (This)->lpVtbl -> IsFunctionDynamic(This,functionId,isDynamic) ) #define ICorProfilerInfo9_GetFunctionFromIP3(This,ip,functionId,pReJitId) \ ( (This)->lpVtbl -> GetFunctionFromIP3(This,ip,functionId,pReJitId) ) #define ICorProfilerInfo9_GetDynamicFunctionInfo(This,functionId,moduleId,ppvSig,pbSig,cchName,pcchName,wszName) \ ( (This)->lpVtbl -> GetDynamicFunctionInfo(This,functionId,moduleId,ppvSig,pbSig,cchName,pcchName,wszName) ) #define ICorProfilerInfo9_GetNativeCodeStartAddresses(This,functionID,reJitId,cCodeStartAddresses,pcCodeStartAddresses,codeStartAddresses) \ ( (This)->lpVtbl -> GetNativeCodeStartAddresses(This,functionID,reJitId,cCodeStartAddresses,pcCodeStartAddresses,codeStartAddresses) ) #define ICorProfilerInfo9_GetILToNativeMapping3(This,pNativeCodeStartAddress,cMap,pcMap,map) \ ( (This)->lpVtbl -> GetILToNativeMapping3(This,pNativeCodeStartAddress,cMap,pcMap,map) ) #define ICorProfilerInfo9_GetCodeInfo4(This,pNativeCodeStartAddress,cCodeInfos,pcCodeInfos,codeInfos) \ ( (This)->lpVtbl -> GetCodeInfo4(This,pNativeCodeStartAddress,cCodeInfos,pcCodeInfos,codeInfos) ) #endif /* COBJMACROS */ #endif /* C style interface */ #endif /* __ICorProfilerInfo9_INTERFACE_DEFINED__ */ #ifndef __ICorProfilerInfo10_INTERFACE_DEFINED__ #define __ICorProfilerInfo10_INTERFACE_DEFINED__ /* interface ICorProfilerInfo10 */ /* [local][unique][uuid][object] */ EXTERN_C const IID IID_ICorProfilerInfo10; #if defined(__cplusplus) && !defined(CINTERFACE) MIDL_INTERFACE("2F1B5152-C869-40C9-AA5F-3ABE026BD720") ICorProfilerInfo10 : public ICorProfilerInfo9 { public: virtual HRESULT STDMETHODCALLTYPE EnumerateObjectReferences( ObjectID objectId, ObjectReferenceCallback callback, void *clientData) = 0; virtual HRESULT STDMETHODCALLTYPE IsFrozenObject( ObjectID objectId, BOOL *pbFrozen) = 0; virtual HRESULT STDMETHODCALLTYPE GetLOHObjectSizeThreshold( DWORD *pThreshold) = 0; virtual HRESULT STDMETHODCALLTYPE RequestReJITWithInliners( /* [in] */ DWORD dwRejitFlags, /* [in] */ ULONG cFunctions, /* [size_is][in] */ ModuleID moduleIds[ ], /* [size_is][in] */ mdMethodDef methodIds[ ]) = 0; virtual HRESULT STDMETHODCALLTYPE SuspendRuntime( void) = 0; virtual HRESULT STDMETHODCALLTYPE ResumeRuntime( void) = 0; }; #else /* C style interface */ typedef struct ICorProfilerInfo10Vtbl { BEGIN_INTERFACE HRESULT ( STDMETHODCALLTYPE *QueryInterface )( ICorProfilerInfo10 * This, /* [in] */ REFIID riid, /* [annotation][iid_is][out] */ _COM_Outptr_ void **ppvObject); ULONG ( STDMETHODCALLTYPE *AddRef )( ICorProfilerInfo10 * This); ULONG ( STDMETHODCALLTYPE *Release )( ICorProfilerInfo10 * This); HRESULT ( STDMETHODCALLTYPE *GetClassFromObject )( ICorProfilerInfo10 * This, /* [in] */ ObjectID objectId, /* [out] */ ClassID *pClassId); HRESULT ( STDMETHODCALLTYPE *GetClassFromToken )( ICorProfilerInfo10 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdTypeDef typeDef, /* [out] */ ClassID *pClassId); HRESULT ( STDMETHODCALLTYPE *GetCodeInfo )( ICorProfilerInfo10 * This, /* [in] */ FunctionID functionId, /* [out] */ LPCBYTE *pStart, /* [out] */ ULONG *pcSize); HRESULT ( STDMETHODCALLTYPE *GetEventMask )( ICorProfilerInfo10 * This, /* [out] */ DWORD *pdwEvents); HRESULT ( STDMETHODCALLTYPE *GetFunctionFromIP )( ICorProfilerInfo10 * This, /* [in] */ LPCBYTE ip, /* [out] */ FunctionID *pFunctionId); HRESULT ( STDMETHODCALLTYPE *GetFunctionFromToken )( ICorProfilerInfo10 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdToken token, /* [out] */ FunctionID *pFunctionId); HRESULT ( STDMETHODCALLTYPE *GetHandleFromThread )( ICorProfilerInfo10 * This, /* [in] */ ThreadID threadId, /* [out] */ HANDLE *phThread); HRESULT ( STDMETHODCALLTYPE *GetObjectSize )( ICorProfilerInfo10 * This, /* [in] */ ObjectID objectId, /* [out] */ ULONG *pcSize); HRESULT ( STDMETHODCALLTYPE *IsArrayClass )( ICorProfilerInfo10 * This, /* [in] */ ClassID classId, /* [out] */ CorElementType *pBaseElemType, /* [out] */ ClassID *pBaseClassId, /* [out] */ ULONG *pcRank); HRESULT ( STDMETHODCALLTYPE *GetThreadInfo )( ICorProfilerInfo10 * This, /* [in] */ ThreadID threadId, /* [out] */ DWORD *pdwWin32ThreadId); HRESULT ( STDMETHODCALLTYPE *GetCurrentThreadID )( ICorProfilerInfo10 * This, /* [out] */ ThreadID *pThreadId); HRESULT ( STDMETHODCALLTYPE *GetClassIDInfo )( ICorProfilerInfo10 * This, /* [in] */ ClassID classId, /* [out] */ ModuleID *pModuleId, /* [out] */ mdTypeDef *pTypeDefToken); HRESULT ( STDMETHODCALLTYPE *GetFunctionInfo )( ICorProfilerInfo10 * This, /* [in] */ FunctionID functionId, /* [out] */ ClassID *pClassId, /* [out] */ ModuleID *pModuleId, /* [out] */ mdToken *pToken); HRESULT ( STDMETHODCALLTYPE *SetEventMask )( ICorProfilerInfo10 * This, /* [in] */ DWORD dwEvents); HRESULT ( STDMETHODCALLTYPE *SetEnterLeaveFunctionHooks )( ICorProfilerInfo10 * This, /* [in] */ FunctionEnter *pFuncEnter, /* [in] */ FunctionLeave *pFuncLeave, /* [in] */ FunctionTailcall *pFuncTailcall); HRESULT ( STDMETHODCALLTYPE *SetFunctionIDMapper )( ICorProfilerInfo10 * This, /* [in] */ FunctionIDMapper *pFunc); HRESULT ( STDMETHODCALLTYPE *GetTokenAndMetaDataFromFunction )( ICorProfilerInfo10 * This, /* [in] */ FunctionID functionId, /* [in] */ REFIID riid, /* [out] */ IUnknown **ppImport, /* [out] */ mdToken *pToken); HRESULT ( STDMETHODCALLTYPE *GetModuleInfo )( ICorProfilerInfo10 * This, /* [in] */ ModuleID moduleId, /* [out] */ LPCBYTE *ppBaseLoadAddress, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [annotation][out] */ _Out_writes_to_(cchName, *pcchName) WCHAR szName[ ], /* [out] */ AssemblyID *pAssemblyId); HRESULT ( STDMETHODCALLTYPE *GetModuleMetaData )( ICorProfilerInfo10 * This, /* [in] */ ModuleID moduleId, /* [in] */ DWORD dwOpenFlags, /* [in] */ REFIID riid, /* [out] */ IUnknown **ppOut); HRESULT ( STDMETHODCALLTYPE *GetILFunctionBody )( ICorProfilerInfo10 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdMethodDef methodId, /* [out] */ LPCBYTE *ppMethodHeader, /* [out] */ ULONG *pcbMethodSize); HRESULT ( STDMETHODCALLTYPE *GetILFunctionBodyAllocator )( ICorProfilerInfo10 * This, /* [in] */ ModuleID moduleId, /* [out] */ IMethodMalloc **ppMalloc); HRESULT ( STDMETHODCALLTYPE *SetILFunctionBody )( ICorProfilerInfo10 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdMethodDef methodid, /* [in] */ LPCBYTE pbNewILMethodHeader); HRESULT ( STDMETHODCALLTYPE *GetAppDomainInfo )( ICorProfilerInfo10 * This, /* [in] */ AppDomainID appDomainId, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [annotation][out] */ _Out_writes_to_(cchName, *pcchName) WCHAR szName[ ], /* [out] */ ProcessID *pProcessId); HRESULT ( STDMETHODCALLTYPE *GetAssemblyInfo )( ICorProfilerInfo10 * This, /* [in] */ AssemblyID assemblyId, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [annotation][out] */ _Out_writes_to_(cchName, *pcchName) WCHAR szName[ ], /* [out] */ AppDomainID *pAppDomainId, /* [out] */ ModuleID *pModuleId); HRESULT ( STDMETHODCALLTYPE *SetFunctionReJIT )( ICorProfilerInfo10 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ForceGC )( ICorProfilerInfo10 * This); HRESULT ( STDMETHODCALLTYPE *SetILInstrumentedCodeMap )( ICorProfilerInfo10 * This, /* [in] */ FunctionID functionId, /* [in] */ BOOL fStartJit, /* [in] */ ULONG cILMapEntries, /* [size_is][in] */ COR_IL_MAP rgILMapEntries[ ]); HRESULT ( STDMETHODCALLTYPE *GetInprocInspectionInterface )( ICorProfilerInfo10 * This, /* [out] */ IUnknown **ppicd); HRESULT ( STDMETHODCALLTYPE *GetInprocInspectionIThisThread )( ICorProfilerInfo10 * This, /* [out] */ IUnknown **ppicd); HRESULT ( STDMETHODCALLTYPE *GetThreadContext )( ICorProfilerInfo10 * This, /* [in] */ ThreadID threadId, /* [out] */ ContextID *pContextId); HRESULT ( STDMETHODCALLTYPE *BeginInprocDebugging )( ICorProfilerInfo10 * This, /* [in] */ BOOL fThisThreadOnly, /* [out] */ DWORD *pdwProfilerContext); HRESULT ( STDMETHODCALLTYPE *EndInprocDebugging )( ICorProfilerInfo10 * This, /* [in] */ DWORD dwProfilerContext); HRESULT ( STDMETHODCALLTYPE *GetILToNativeMapping )( ICorProfilerInfo10 * This, /* [in] */ FunctionID functionId, /* [in] */ ULONG32 cMap, /* [out] */ ULONG32 *pcMap, /* [length_is][size_is][out] */ COR_DEBUG_IL_TO_NATIVE_MAP map[ ]); HRESULT ( STDMETHODCALLTYPE *DoStackSnapshot )( ICorProfilerInfo10 * This, /* [in] */ ThreadID thread, /* [in] */ StackSnapshotCallback *callback, /* [in] */ ULONG32 infoFlags, /* [in] */ void *clientData, /* [size_is][in] */ BYTE context[ ], /* [in] */ ULONG32 contextSize); HRESULT ( STDMETHODCALLTYPE *SetEnterLeaveFunctionHooks2 )( ICorProfilerInfo10 * This, /* [in] */ FunctionEnter2 *pFuncEnter, /* [in] */ FunctionLeave2 *pFuncLeave, /* [in] */ FunctionTailcall2 *pFuncTailcall); HRESULT ( STDMETHODCALLTYPE *GetFunctionInfo2 )( ICorProfilerInfo10 * This, /* [in] */ FunctionID funcId, /* [in] */ COR_PRF_FRAME_INFO frameInfo, /* [out] */ ClassID *pClassId, /* [out] */ ModuleID *pModuleId, /* [out] */ mdToken *pToken, /* [in] */ ULONG32 cTypeArgs, /* [out] */ ULONG32 *pcTypeArgs, /* [out] */ ClassID typeArgs[ ]); HRESULT ( STDMETHODCALLTYPE *GetStringLayout )( ICorProfilerInfo10 * This, /* [out] */ ULONG *pBufferLengthOffset, /* [out] */ ULONG *pStringLengthOffset, /* [out] */ ULONG *pBufferOffset); HRESULT ( STDMETHODCALLTYPE *GetClassLayout )( ICorProfilerInfo10 * This, /* [in] */ ClassID classID, /* [out][in] */ COR_FIELD_OFFSET rFieldOffset[ ], /* [in] */ ULONG cFieldOffset, /* [out] */ ULONG *pcFieldOffset, /* [out] */ ULONG *pulClassSize); HRESULT ( STDMETHODCALLTYPE *GetClassIDInfo2 )( ICorProfilerInfo10 * This, /* [in] */ ClassID classId, /* [out] */ ModuleID *pModuleId, /* [out] */ mdTypeDef *pTypeDefToken, /* [out] */ ClassID *pParentClassId, /* [in] */ ULONG32 cNumTypeArgs, /* [out] */ ULONG32 *pcNumTypeArgs, /* [out] */ ClassID typeArgs[ ]); HRESULT ( STDMETHODCALLTYPE *GetCodeInfo2 )( ICorProfilerInfo10 * This, /* [in] */ FunctionID functionID, /* [in] */ ULONG32 cCodeInfos, /* [out] */ ULONG32 *pcCodeInfos, /* [length_is][size_is][out] */ COR_PRF_CODE_INFO codeInfos[ ]); HRESULT ( STDMETHODCALLTYPE *GetClassFromTokenAndTypeArgs )( ICorProfilerInfo10 * This, /* [in] */ ModuleID moduleID, /* [in] */ mdTypeDef typeDef, /* [in] */ ULONG32 cTypeArgs, /* [size_is][in] */ ClassID typeArgs[ ], /* [out] */ ClassID *pClassID); HRESULT ( STDMETHODCALLTYPE *GetFunctionFromTokenAndTypeArgs )( ICorProfilerInfo10 * This, /* [in] */ ModuleID moduleID, /* [in] */ mdMethodDef funcDef, /* [in] */ ClassID classId, /* [in] */ ULONG32 cTypeArgs, /* [size_is][in] */ ClassID typeArgs[ ], /* [out] */ FunctionID *pFunctionID); HRESULT ( STDMETHODCALLTYPE *EnumModuleFrozenObjects )( ICorProfilerInfo10 * This, /* [in] */ ModuleID moduleID, /* [out] */ ICorProfilerObjectEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *GetArrayObjectInfo )( ICorProfilerInfo10 * This, /* [in] */ ObjectID objectId, /* [in] */ ULONG32 cDimensions, /* [size_is][out] */ ULONG32 pDimensionSizes[ ], /* [size_is][out] */ int pDimensionLowerBounds[ ], /* [out] */ BYTE **ppData); HRESULT ( STDMETHODCALLTYPE *GetBoxClassLayout )( ICorProfilerInfo10 * This, /* [in] */ ClassID classId, /* [out] */ ULONG32 *pBufferOffset); HRESULT ( STDMETHODCALLTYPE *GetThreadAppDomain )( ICorProfilerInfo10 * This, /* [in] */ ThreadID threadId, /* [out] */ AppDomainID *pAppDomainId); HRESULT ( STDMETHODCALLTYPE *GetRVAStaticAddress )( ICorProfilerInfo10 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [out] */ void **ppAddress); HRESULT ( STDMETHODCALLTYPE *GetAppDomainStaticAddress )( ICorProfilerInfo10 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [in] */ AppDomainID appDomainId, /* [out] */ void **ppAddress); HRESULT ( STDMETHODCALLTYPE *GetThreadStaticAddress )( ICorProfilerInfo10 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [in] */ ThreadID threadId, /* [out] */ void **ppAddress); HRESULT ( STDMETHODCALLTYPE *GetContextStaticAddress )( ICorProfilerInfo10 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [in] */ ContextID contextId, /* [out] */ void **ppAddress); HRESULT ( STDMETHODCALLTYPE *GetStaticFieldInfo )( ICorProfilerInfo10 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [out] */ COR_PRF_STATIC_TYPE *pFieldInfo); HRESULT ( STDMETHODCALLTYPE *GetGenerationBounds )( ICorProfilerInfo10 * This, /* [in] */ ULONG cObjectRanges, /* [out] */ ULONG *pcObjectRanges, /* [length_is][size_is][out] */ COR_PRF_GC_GENERATION_RANGE ranges[ ]); HRESULT ( STDMETHODCALLTYPE *GetObjectGeneration )( ICorProfilerInfo10 * This, /* [in] */ ObjectID objectId, /* [out] */ COR_PRF_GC_GENERATION_RANGE *range); HRESULT ( STDMETHODCALLTYPE *GetNotifiedExceptionClauseInfo )( ICorProfilerInfo10 * This, /* [out] */ COR_PRF_EX_CLAUSE_INFO *pinfo); HRESULT ( STDMETHODCALLTYPE *EnumJITedFunctions )( ICorProfilerInfo10 * This, /* [out] */ ICorProfilerFunctionEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *RequestProfilerDetach )( ICorProfilerInfo10 * This, /* [in] */ DWORD dwExpectedCompletionMilliseconds); HRESULT ( STDMETHODCALLTYPE *SetFunctionIDMapper2 )( ICorProfilerInfo10 * This, /* [in] */ FunctionIDMapper2 *pFunc, /* [in] */ void *clientData); HRESULT ( STDMETHODCALLTYPE *GetStringLayout2 )( ICorProfilerInfo10 * This, /* [out] */ ULONG *pStringLengthOffset, /* [out] */ ULONG *pBufferOffset); HRESULT ( STDMETHODCALLTYPE *SetEnterLeaveFunctionHooks3 )( ICorProfilerInfo10 * This, /* [in] */ FunctionEnter3 *pFuncEnter3, /* [in] */ FunctionLeave3 *pFuncLeave3, /* [in] */ FunctionTailcall3 *pFuncTailcall3); HRESULT ( STDMETHODCALLTYPE *SetEnterLeaveFunctionHooks3WithInfo )( ICorProfilerInfo10 * This, /* [in] */ FunctionEnter3WithInfo *pFuncEnter3WithInfo, /* [in] */ FunctionLeave3WithInfo *pFuncLeave3WithInfo, /* [in] */ FunctionTailcall3WithInfo *pFuncTailcall3WithInfo); HRESULT ( STDMETHODCALLTYPE *GetFunctionEnter3Info )( ICorProfilerInfo10 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_ELT_INFO eltInfo, /* [out] */ COR_PRF_FRAME_INFO *pFrameInfo, /* [out][in] */ ULONG *pcbArgumentInfo, /* [size_is][out] */ COR_PRF_FUNCTION_ARGUMENT_INFO *pArgumentInfo); HRESULT ( STDMETHODCALLTYPE *GetFunctionLeave3Info )( ICorProfilerInfo10 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_ELT_INFO eltInfo, /* [out] */ COR_PRF_FRAME_INFO *pFrameInfo, /* [out] */ COR_PRF_FUNCTION_ARGUMENT_RANGE *pRetvalRange); HRESULT ( STDMETHODCALLTYPE *GetFunctionTailcall3Info )( ICorProfilerInfo10 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_ELT_INFO eltInfo, /* [out] */ COR_PRF_FRAME_INFO *pFrameInfo); HRESULT ( STDMETHODCALLTYPE *EnumModules )( ICorProfilerInfo10 * This, /* [out] */ ICorProfilerModuleEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *GetRuntimeInformation )( ICorProfilerInfo10 * This, /* [out] */ USHORT *pClrInstanceId, /* [out] */ COR_PRF_RUNTIME_TYPE *pRuntimeType, /* [out] */ USHORT *pMajorVersion, /* [out] */ USHORT *pMinorVersion, /* [out] */ USHORT *pBuildNumber, /* [out] */ USHORT *pQFEVersion, /* [in] */ ULONG cchVersionString, /* [out] */ ULONG *pcchVersionString, /* [annotation][out] */ _Out_writes_to_(cchVersionString, *pcchVersionString) WCHAR szVersionString[ ]); HRESULT ( STDMETHODCALLTYPE *GetThreadStaticAddress2 )( ICorProfilerInfo10 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [in] */ AppDomainID appDomainId, /* [in] */ ThreadID threadId, /* [out] */ void **ppAddress); HRESULT ( STDMETHODCALLTYPE *GetAppDomainsContainingModule )( ICorProfilerInfo10 * This, /* [in] */ ModuleID moduleId, /* [in] */ ULONG32 cAppDomainIds, /* [out] */ ULONG32 *pcAppDomainIds, /* [length_is][size_is][out] */ AppDomainID appDomainIds[ ]); HRESULT ( STDMETHODCALLTYPE *GetModuleInfo2 )( ICorProfilerInfo10 * This, /* [in] */ ModuleID moduleId, /* [out] */ LPCBYTE *ppBaseLoadAddress, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [annotation][out] */ _Out_writes_to_(cchName, *pcchName) WCHAR szName[ ], /* [out] */ AssemblyID *pAssemblyId, /* [out] */ DWORD *pdwModuleFlags); HRESULT ( STDMETHODCALLTYPE *EnumThreads )( ICorProfilerInfo10 * This, /* [out] */ ICorProfilerThreadEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *InitializeCurrentThread )( ICorProfilerInfo10 * This); HRESULT ( STDMETHODCALLTYPE *RequestReJIT )( ICorProfilerInfo10 * This, /* [in] */ ULONG cFunctions, /* [size_is][in] */ ModuleID moduleIds[ ], /* [size_is][in] */ mdMethodDef methodIds[ ]); HRESULT ( STDMETHODCALLTYPE *RequestRevert )( ICorProfilerInfo10 * This, /* [in] */ ULONG cFunctions, /* [size_is][in] */ ModuleID moduleIds[ ], /* [size_is][in] */ mdMethodDef methodIds[ ], /* [size_is][out] */ HRESULT status[ ]); HRESULT ( STDMETHODCALLTYPE *GetCodeInfo3 )( ICorProfilerInfo10 * This, /* [in] */ FunctionID functionID, /* [in] */ ReJITID reJitId, /* [in] */ ULONG32 cCodeInfos, /* [out] */ ULONG32 *pcCodeInfos, /* [length_is][size_is][out] */ COR_PRF_CODE_INFO codeInfos[ ]); HRESULT ( STDMETHODCALLTYPE *GetFunctionFromIP2 )( ICorProfilerInfo10 * This, /* [in] */ LPCBYTE ip, /* [out] */ FunctionID *pFunctionId, /* [out] */ ReJITID *pReJitId); HRESULT ( STDMETHODCALLTYPE *GetReJITIDs )( ICorProfilerInfo10 * This, /* [in] */ FunctionID functionId, /* [in] */ ULONG cReJitIds, /* [out] */ ULONG *pcReJitIds, /* [length_is][size_is][out] */ ReJITID reJitIds[ ]); HRESULT ( STDMETHODCALLTYPE *GetILToNativeMapping2 )( ICorProfilerInfo10 * This, /* [in] */ FunctionID functionId, /* [in] */ ReJITID reJitId, /* [in] */ ULONG32 cMap, /* [out] */ ULONG32 *pcMap, /* [length_is][size_is][out] */ COR_DEBUG_IL_TO_NATIVE_MAP map[ ]); HRESULT ( STDMETHODCALLTYPE *EnumJITedFunctions2 )( ICorProfilerInfo10 * This, /* [out] */ ICorProfilerFunctionEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *GetObjectSize2 )( ICorProfilerInfo10 * This, /* [in] */ ObjectID objectId, /* [out] */ SIZE_T *pcSize); HRESULT ( STDMETHODCALLTYPE *GetEventMask2 )( ICorProfilerInfo10 * This, /* [out] */ DWORD *pdwEventsLow, /* [out] */ DWORD *pdwEventsHigh); HRESULT ( STDMETHODCALLTYPE *SetEventMask2 )( ICorProfilerInfo10 * This, /* [in] */ DWORD dwEventsLow, /* [in] */ DWORD dwEventsHigh); HRESULT ( STDMETHODCALLTYPE *EnumNgenModuleMethodsInliningThisMethod )( ICorProfilerInfo10 * This, /* [in] */ ModuleID inlinersModuleId, /* [in] */ ModuleID inlineeModuleId, /* [in] */ mdMethodDef inlineeMethodId, /* [out] */ BOOL *incompleteData, /* [out] */ ICorProfilerMethodEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *ApplyMetaData )( ICorProfilerInfo10 * This, /* [in] */ ModuleID moduleId); HRESULT ( STDMETHODCALLTYPE *GetInMemorySymbolsLength )( ICorProfilerInfo10 * This, /* [in] */ ModuleID moduleId, /* [out] */ DWORD *pCountSymbolBytes); HRESULT ( STDMETHODCALLTYPE *ReadInMemorySymbols )( ICorProfilerInfo10 * This, /* [in] */ ModuleID moduleId, /* [in] */ DWORD symbolsReadOffset, /* [out] */ BYTE *pSymbolBytes, /* [in] */ DWORD countSymbolBytes, /* [out] */ DWORD *pCountSymbolBytesRead); HRESULT ( STDMETHODCALLTYPE *IsFunctionDynamic )( ICorProfilerInfo10 * This, /* [in] */ FunctionID functionId, /* [out] */ BOOL *isDynamic); HRESULT ( STDMETHODCALLTYPE *GetFunctionFromIP3 )( ICorProfilerInfo10 * This, /* [in] */ LPCBYTE ip, /* [out] */ FunctionID *functionId, /* [out] */ ReJITID *pReJitId); HRESULT ( STDMETHODCALLTYPE *GetDynamicFunctionInfo )( ICorProfilerInfo10 * This, /* [in] */ FunctionID functionId, /* [out] */ ModuleID *moduleId, /* [out] */ PCCOR_SIGNATURE *ppvSig, /* [out] */ ULONG *pbSig, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [out] */ WCHAR wszName[ ]); HRESULT ( STDMETHODCALLTYPE *GetNativeCodeStartAddresses )( ICorProfilerInfo10 * This, FunctionID functionID, ReJITID reJitId, ULONG32 cCodeStartAddresses, ULONG32 *pcCodeStartAddresses, UINT_PTR codeStartAddresses[ ]); HRESULT ( STDMETHODCALLTYPE *GetILToNativeMapping3 )( ICorProfilerInfo10 * This, UINT_PTR pNativeCodeStartAddress, ULONG32 cMap, ULONG32 *pcMap, COR_DEBUG_IL_TO_NATIVE_MAP map[ ]); HRESULT ( STDMETHODCALLTYPE *GetCodeInfo4 )( ICorProfilerInfo10 * This, UINT_PTR pNativeCodeStartAddress, ULONG32 cCodeInfos, ULONG32 *pcCodeInfos, COR_PRF_CODE_INFO codeInfos[ ]); HRESULT ( STDMETHODCALLTYPE *EnumerateObjectReferences )( ICorProfilerInfo10 * This, ObjectID objectId, ObjectReferenceCallback callback, void *clientData); HRESULT ( STDMETHODCALLTYPE *IsFrozenObject )( ICorProfilerInfo10 * This, ObjectID objectId, BOOL *pbFrozen); HRESULT ( STDMETHODCALLTYPE *GetLOHObjectSizeThreshold )( ICorProfilerInfo10 * This, DWORD *pThreshold); HRESULT ( STDMETHODCALLTYPE *RequestReJITWithInliners )( ICorProfilerInfo10 * This, /* [in] */ DWORD dwRejitFlags, /* [in] */ ULONG cFunctions, /* [size_is][in] */ ModuleID moduleIds[ ], /* [size_is][in] */ mdMethodDef methodIds[ ]); HRESULT ( STDMETHODCALLTYPE *SuspendRuntime )( ICorProfilerInfo10 * This); HRESULT ( STDMETHODCALLTYPE *ResumeRuntime )( ICorProfilerInfo10 * This); END_INTERFACE } ICorProfilerInfo10Vtbl; interface ICorProfilerInfo10 { CONST_VTBL struct ICorProfilerInfo10Vtbl *lpVtbl; }; #ifdef COBJMACROS #define ICorProfilerInfo10_QueryInterface(This,riid,ppvObject) \ ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) ) #define ICorProfilerInfo10_AddRef(This) \ ( (This)->lpVtbl -> AddRef(This) ) #define ICorProfilerInfo10_Release(This) \ ( (This)->lpVtbl -> Release(This) ) #define ICorProfilerInfo10_GetClassFromObject(This,objectId,pClassId) \ ( (This)->lpVtbl -> GetClassFromObject(This,objectId,pClassId) ) #define ICorProfilerInfo10_GetClassFromToken(This,moduleId,typeDef,pClassId) \ ( (This)->lpVtbl -> GetClassFromToken(This,moduleId,typeDef,pClassId) ) #define ICorProfilerInfo10_GetCodeInfo(This,functionId,pStart,pcSize) \ ( (This)->lpVtbl -> GetCodeInfo(This,functionId,pStart,pcSize) ) #define ICorProfilerInfo10_GetEventMask(This,pdwEvents) \ ( (This)->lpVtbl -> GetEventMask(This,pdwEvents) ) #define ICorProfilerInfo10_GetFunctionFromIP(This,ip,pFunctionId) \ ( (This)->lpVtbl -> GetFunctionFromIP(This,ip,pFunctionId) ) #define ICorProfilerInfo10_GetFunctionFromToken(This,moduleId,token,pFunctionId) \ ( (This)->lpVtbl -> GetFunctionFromToken(This,moduleId,token,pFunctionId) ) #define ICorProfilerInfo10_GetHandleFromThread(This,threadId,phThread) \ ( (This)->lpVtbl -> GetHandleFromThread(This,threadId,phThread) ) #define ICorProfilerInfo10_GetObjectSize(This,objectId,pcSize) \ ( (This)->lpVtbl -> GetObjectSize(This,objectId,pcSize) ) #define ICorProfilerInfo10_IsArrayClass(This,classId,pBaseElemType,pBaseClassId,pcRank) \ ( (This)->lpVtbl -> IsArrayClass(This,classId,pBaseElemType,pBaseClassId,pcRank) ) #define ICorProfilerInfo10_GetThreadInfo(This,threadId,pdwWin32ThreadId) \ ( (This)->lpVtbl -> GetThreadInfo(This,threadId,pdwWin32ThreadId) ) #define ICorProfilerInfo10_GetCurrentThreadID(This,pThreadId) \ ( (This)->lpVtbl -> GetCurrentThreadID(This,pThreadId) ) #define ICorProfilerInfo10_GetClassIDInfo(This,classId,pModuleId,pTypeDefToken) \ ( (This)->lpVtbl -> GetClassIDInfo(This,classId,pModuleId,pTypeDefToken) ) #define ICorProfilerInfo10_GetFunctionInfo(This,functionId,pClassId,pModuleId,pToken) \ ( (This)->lpVtbl -> GetFunctionInfo(This,functionId,pClassId,pModuleId,pToken) ) #define ICorProfilerInfo10_SetEventMask(This,dwEvents) \ ( (This)->lpVtbl -> SetEventMask(This,dwEvents) ) #define ICorProfilerInfo10_SetEnterLeaveFunctionHooks(This,pFuncEnter,pFuncLeave,pFuncTailcall) \ ( (This)->lpVtbl -> SetEnterLeaveFunctionHooks(This,pFuncEnter,pFuncLeave,pFuncTailcall) ) #define ICorProfilerInfo10_SetFunctionIDMapper(This,pFunc) \ ( (This)->lpVtbl -> SetFunctionIDMapper(This,pFunc) ) #define ICorProfilerInfo10_GetTokenAndMetaDataFromFunction(This,functionId,riid,ppImport,pToken) \ ( (This)->lpVtbl -> GetTokenAndMetaDataFromFunction(This,functionId,riid,ppImport,pToken) ) #define ICorProfilerInfo10_GetModuleInfo(This,moduleId,ppBaseLoadAddress,cchName,pcchName,szName,pAssemblyId) \ ( (This)->lpVtbl -> GetModuleInfo(This,moduleId,ppBaseLoadAddress,cchName,pcchName,szName,pAssemblyId) ) #define ICorProfilerInfo10_GetModuleMetaData(This,moduleId,dwOpenFlags,riid,ppOut) \ ( (This)->lpVtbl -> GetModuleMetaData(This,moduleId,dwOpenFlags,riid,ppOut) ) #define ICorProfilerInfo10_GetILFunctionBody(This,moduleId,methodId,ppMethodHeader,pcbMethodSize) \ ( (This)->lpVtbl -> GetILFunctionBody(This,moduleId,methodId,ppMethodHeader,pcbMethodSize) ) #define ICorProfilerInfo10_GetILFunctionBodyAllocator(This,moduleId,ppMalloc) \ ( (This)->lpVtbl -> GetILFunctionBodyAllocator(This,moduleId,ppMalloc) ) #define ICorProfilerInfo10_SetILFunctionBody(This,moduleId,methodid,pbNewILMethodHeader) \ ( (This)->lpVtbl -> SetILFunctionBody(This,moduleId,methodid,pbNewILMethodHeader) ) #define ICorProfilerInfo10_GetAppDomainInfo(This,appDomainId,cchName,pcchName,szName,pProcessId) \ ( (This)->lpVtbl -> GetAppDomainInfo(This,appDomainId,cchName,pcchName,szName,pProcessId) ) #define ICorProfilerInfo10_GetAssemblyInfo(This,assemblyId,cchName,pcchName,szName,pAppDomainId,pModuleId) \ ( (This)->lpVtbl -> GetAssemblyInfo(This,assemblyId,cchName,pcchName,szName,pAppDomainId,pModuleId) ) #define ICorProfilerInfo10_SetFunctionReJIT(This,functionId) \ ( (This)->lpVtbl -> SetFunctionReJIT(This,functionId) ) #define ICorProfilerInfo10_ForceGC(This) \ ( (This)->lpVtbl -> ForceGC(This) ) #define ICorProfilerInfo10_SetILInstrumentedCodeMap(This,functionId,fStartJit,cILMapEntries,rgILMapEntries) \ ( (This)->lpVtbl -> SetILInstrumentedCodeMap(This,functionId,fStartJit,cILMapEntries,rgILMapEntries) ) #define ICorProfilerInfo10_GetInprocInspectionInterface(This,ppicd) \ ( (This)->lpVtbl -> GetInprocInspectionInterface(This,ppicd) ) #define ICorProfilerInfo10_GetInprocInspectionIThisThread(This,ppicd) \ ( (This)->lpVtbl -> GetInprocInspectionIThisThread(This,ppicd) ) #define ICorProfilerInfo10_GetThreadContext(This,threadId,pContextId) \ ( (This)->lpVtbl -> GetThreadContext(This,threadId,pContextId) ) #define ICorProfilerInfo10_BeginInprocDebugging(This,fThisThreadOnly,pdwProfilerContext) \ ( (This)->lpVtbl -> BeginInprocDebugging(This,fThisThreadOnly,pdwProfilerContext) ) #define ICorProfilerInfo10_EndInprocDebugging(This,dwProfilerContext) \ ( (This)->lpVtbl -> EndInprocDebugging(This,dwProfilerContext) ) #define ICorProfilerInfo10_GetILToNativeMapping(This,functionId,cMap,pcMap,map) \ ( (This)->lpVtbl -> GetILToNativeMapping(This,functionId,cMap,pcMap,map) ) #define ICorProfilerInfo10_DoStackSnapshot(This,thread,callback,infoFlags,clientData,context,contextSize) \ ( (This)->lpVtbl -> DoStackSnapshot(This,thread,callback,infoFlags,clientData,context,contextSize) ) #define ICorProfilerInfo10_SetEnterLeaveFunctionHooks2(This,pFuncEnter,pFuncLeave,pFuncTailcall) \ ( (This)->lpVtbl -> SetEnterLeaveFunctionHooks2(This,pFuncEnter,pFuncLeave,pFuncTailcall) ) #define ICorProfilerInfo10_GetFunctionInfo2(This,funcId,frameInfo,pClassId,pModuleId,pToken,cTypeArgs,pcTypeArgs,typeArgs) \ ( (This)->lpVtbl -> GetFunctionInfo2(This,funcId,frameInfo,pClassId,pModuleId,pToken,cTypeArgs,pcTypeArgs,typeArgs) ) #define ICorProfilerInfo10_GetStringLayout(This,pBufferLengthOffset,pStringLengthOffset,pBufferOffset) \ ( (This)->lpVtbl -> GetStringLayout(This,pBufferLengthOffset,pStringLengthOffset,pBufferOffset) ) #define ICorProfilerInfo10_GetClassLayout(This,classID,rFieldOffset,cFieldOffset,pcFieldOffset,pulClassSize) \ ( (This)->lpVtbl -> GetClassLayout(This,classID,rFieldOffset,cFieldOffset,pcFieldOffset,pulClassSize) ) #define ICorProfilerInfo10_GetClassIDInfo2(This,classId,pModuleId,pTypeDefToken,pParentClassId,cNumTypeArgs,pcNumTypeArgs,typeArgs) \ ( (This)->lpVtbl -> GetClassIDInfo2(This,classId,pModuleId,pTypeDefToken,pParentClassId,cNumTypeArgs,pcNumTypeArgs,typeArgs) ) #define ICorProfilerInfo10_GetCodeInfo2(This,functionID,cCodeInfos,pcCodeInfos,codeInfos) \ ( (This)->lpVtbl -> GetCodeInfo2(This,functionID,cCodeInfos,pcCodeInfos,codeInfos) ) #define ICorProfilerInfo10_GetClassFromTokenAndTypeArgs(This,moduleID,typeDef,cTypeArgs,typeArgs,pClassID) \ ( (This)->lpVtbl -> GetClassFromTokenAndTypeArgs(This,moduleID,typeDef,cTypeArgs,typeArgs,pClassID) ) #define ICorProfilerInfo10_GetFunctionFromTokenAndTypeArgs(This,moduleID,funcDef,classId,cTypeArgs,typeArgs,pFunctionID) \ ( (This)->lpVtbl -> GetFunctionFromTokenAndTypeArgs(This,moduleID,funcDef,classId,cTypeArgs,typeArgs,pFunctionID) ) #define ICorProfilerInfo10_EnumModuleFrozenObjects(This,moduleID,ppEnum) \ ( (This)->lpVtbl -> EnumModuleFrozenObjects(This,moduleID,ppEnum) ) #define ICorProfilerInfo10_GetArrayObjectInfo(This,objectId,cDimensions,pDimensionSizes,pDimensionLowerBounds,ppData) \ ( (This)->lpVtbl -> GetArrayObjectInfo(This,objectId,cDimensions,pDimensionSizes,pDimensionLowerBounds,ppData) ) #define ICorProfilerInfo10_GetBoxClassLayout(This,classId,pBufferOffset) \ ( (This)->lpVtbl -> GetBoxClassLayout(This,classId,pBufferOffset) ) #define ICorProfilerInfo10_GetThreadAppDomain(This,threadId,pAppDomainId) \ ( (This)->lpVtbl -> GetThreadAppDomain(This,threadId,pAppDomainId) ) #define ICorProfilerInfo10_GetRVAStaticAddress(This,classId,fieldToken,ppAddress) \ ( (This)->lpVtbl -> GetRVAStaticAddress(This,classId,fieldToken,ppAddress) ) #define ICorProfilerInfo10_GetAppDomainStaticAddress(This,classId,fieldToken,appDomainId,ppAddress) \ ( (This)->lpVtbl -> GetAppDomainStaticAddress(This,classId,fieldToken,appDomainId,ppAddress) ) #define ICorProfilerInfo10_GetThreadStaticAddress(This,classId,fieldToken,threadId,ppAddress) \ ( (This)->lpVtbl -> GetThreadStaticAddress(This,classId,fieldToken,threadId,ppAddress) ) #define ICorProfilerInfo10_GetContextStaticAddress(This,classId,fieldToken,contextId,ppAddress) \ ( (This)->lpVtbl -> GetContextStaticAddress(This,classId,fieldToken,contextId,ppAddress) ) #define ICorProfilerInfo10_GetStaticFieldInfo(This,classId,fieldToken,pFieldInfo) \ ( (This)->lpVtbl -> GetStaticFieldInfo(This,classId,fieldToken,pFieldInfo) ) #define ICorProfilerInfo10_GetGenerationBounds(This,cObjectRanges,pcObjectRanges,ranges) \ ( (This)->lpVtbl -> GetGenerationBounds(This,cObjectRanges,pcObjectRanges,ranges) ) #define ICorProfilerInfo10_GetObjectGeneration(This,objectId,range) \ ( (This)->lpVtbl -> GetObjectGeneration(This,objectId,range) ) #define ICorProfilerInfo10_GetNotifiedExceptionClauseInfo(This,pinfo) \ ( (This)->lpVtbl -> GetNotifiedExceptionClauseInfo(This,pinfo) ) #define ICorProfilerInfo10_EnumJITedFunctions(This,ppEnum) \ ( (This)->lpVtbl -> EnumJITedFunctions(This,ppEnum) ) #define ICorProfilerInfo10_RequestProfilerDetach(This,dwExpectedCompletionMilliseconds) \ ( (This)->lpVtbl -> RequestProfilerDetach(This,dwExpectedCompletionMilliseconds) ) #define ICorProfilerInfo10_SetFunctionIDMapper2(This,pFunc,clientData) \ ( (This)->lpVtbl -> SetFunctionIDMapper2(This,pFunc,clientData) ) #define ICorProfilerInfo10_GetStringLayout2(This,pStringLengthOffset,pBufferOffset) \ ( (This)->lpVtbl -> GetStringLayout2(This,pStringLengthOffset,pBufferOffset) ) #define ICorProfilerInfo10_SetEnterLeaveFunctionHooks3(This,pFuncEnter3,pFuncLeave3,pFuncTailcall3) \ ( (This)->lpVtbl -> SetEnterLeaveFunctionHooks3(This,pFuncEnter3,pFuncLeave3,pFuncTailcall3) ) #define ICorProfilerInfo10_SetEnterLeaveFunctionHooks3WithInfo(This,pFuncEnter3WithInfo,pFuncLeave3WithInfo,pFuncTailcall3WithInfo) \ ( (This)->lpVtbl -> SetEnterLeaveFunctionHooks3WithInfo(This,pFuncEnter3WithInfo,pFuncLeave3WithInfo,pFuncTailcall3WithInfo) ) #define ICorProfilerInfo10_GetFunctionEnter3Info(This,functionId,eltInfo,pFrameInfo,pcbArgumentInfo,pArgumentInfo) \ ( (This)->lpVtbl -> GetFunctionEnter3Info(This,functionId,eltInfo,pFrameInfo,pcbArgumentInfo,pArgumentInfo) ) #define ICorProfilerInfo10_GetFunctionLeave3Info(This,functionId,eltInfo,pFrameInfo,pRetvalRange) \ ( (This)->lpVtbl -> GetFunctionLeave3Info(This,functionId,eltInfo,pFrameInfo,pRetvalRange) ) #define ICorProfilerInfo10_GetFunctionTailcall3Info(This,functionId,eltInfo,pFrameInfo) \ ( (This)->lpVtbl -> GetFunctionTailcall3Info(This,functionId,eltInfo,pFrameInfo) ) #define ICorProfilerInfo10_EnumModules(This,ppEnum) \ ( (This)->lpVtbl -> EnumModules(This,ppEnum) ) #define ICorProfilerInfo10_GetRuntimeInformation(This,pClrInstanceId,pRuntimeType,pMajorVersion,pMinorVersion,pBuildNumber,pQFEVersion,cchVersionString,pcchVersionString,szVersionString) \ ( (This)->lpVtbl -> GetRuntimeInformation(This,pClrInstanceId,pRuntimeType,pMajorVersion,pMinorVersion,pBuildNumber,pQFEVersion,cchVersionString,pcchVersionString,szVersionString) ) #define ICorProfilerInfo10_GetThreadStaticAddress2(This,classId,fieldToken,appDomainId,threadId,ppAddress) \ ( (This)->lpVtbl -> GetThreadStaticAddress2(This,classId,fieldToken,appDomainId,threadId,ppAddress) ) #define ICorProfilerInfo10_GetAppDomainsContainingModule(This,moduleId,cAppDomainIds,pcAppDomainIds,appDomainIds) \ ( (This)->lpVtbl -> GetAppDomainsContainingModule(This,moduleId,cAppDomainIds,pcAppDomainIds,appDomainIds) ) #define ICorProfilerInfo10_GetModuleInfo2(This,moduleId,ppBaseLoadAddress,cchName,pcchName,szName,pAssemblyId,pdwModuleFlags) \ ( (This)->lpVtbl -> GetModuleInfo2(This,moduleId,ppBaseLoadAddress,cchName,pcchName,szName,pAssemblyId,pdwModuleFlags) ) #define ICorProfilerInfo10_EnumThreads(This,ppEnum) \ ( (This)->lpVtbl -> EnumThreads(This,ppEnum) ) #define ICorProfilerInfo10_InitializeCurrentThread(This) \ ( (This)->lpVtbl -> InitializeCurrentThread(This) ) #define ICorProfilerInfo10_RequestReJIT(This,cFunctions,moduleIds,methodIds) \ ( (This)->lpVtbl -> RequestReJIT(This,cFunctions,moduleIds,methodIds) ) #define ICorProfilerInfo10_RequestRevert(This,cFunctions,moduleIds,methodIds,status) \ ( (This)->lpVtbl -> RequestRevert(This,cFunctions,moduleIds,methodIds,status) ) #define ICorProfilerInfo10_GetCodeInfo3(This,functionID,reJitId,cCodeInfos,pcCodeInfos,codeInfos) \ ( (This)->lpVtbl -> GetCodeInfo3(This,functionID,reJitId,cCodeInfos,pcCodeInfos,codeInfos) ) #define ICorProfilerInfo10_GetFunctionFromIP2(This,ip,pFunctionId,pReJitId) \ ( (This)->lpVtbl -> GetFunctionFromIP2(This,ip,pFunctionId,pReJitId) ) #define ICorProfilerInfo10_GetReJITIDs(This,functionId,cReJitIds,pcReJitIds,reJitIds) \ ( (This)->lpVtbl -> GetReJITIDs(This,functionId,cReJitIds,pcReJitIds,reJitIds) ) #define ICorProfilerInfo10_GetILToNativeMapping2(This,functionId,reJitId,cMap,pcMap,map) \ ( (This)->lpVtbl -> GetILToNativeMapping2(This,functionId,reJitId,cMap,pcMap,map) ) #define ICorProfilerInfo10_EnumJITedFunctions2(This,ppEnum) \ ( (This)->lpVtbl -> EnumJITedFunctions2(This,ppEnum) ) #define ICorProfilerInfo10_GetObjectSize2(This,objectId,pcSize) \ ( (This)->lpVtbl -> GetObjectSize2(This,objectId,pcSize) ) #define ICorProfilerInfo10_GetEventMask2(This,pdwEventsLow,pdwEventsHigh) \ ( (This)->lpVtbl -> GetEventMask2(This,pdwEventsLow,pdwEventsHigh) ) #define ICorProfilerInfo10_SetEventMask2(This,dwEventsLow,dwEventsHigh) \ ( (This)->lpVtbl -> SetEventMask2(This,dwEventsLow,dwEventsHigh) ) #define ICorProfilerInfo10_EnumNgenModuleMethodsInliningThisMethod(This,inlinersModuleId,inlineeModuleId,inlineeMethodId,incompleteData,ppEnum) \ ( (This)->lpVtbl -> EnumNgenModuleMethodsInliningThisMethod(This,inlinersModuleId,inlineeModuleId,inlineeMethodId,incompleteData,ppEnum) ) #define ICorProfilerInfo10_ApplyMetaData(This,moduleId) \ ( (This)->lpVtbl -> ApplyMetaData(This,moduleId) ) #define ICorProfilerInfo10_GetInMemorySymbolsLength(This,moduleId,pCountSymbolBytes) \ ( (This)->lpVtbl -> GetInMemorySymbolsLength(This,moduleId,pCountSymbolBytes) ) #define ICorProfilerInfo10_ReadInMemorySymbols(This,moduleId,symbolsReadOffset,pSymbolBytes,countSymbolBytes,pCountSymbolBytesRead) \ ( (This)->lpVtbl -> ReadInMemorySymbols(This,moduleId,symbolsReadOffset,pSymbolBytes,countSymbolBytes,pCountSymbolBytesRead) ) #define ICorProfilerInfo10_IsFunctionDynamic(This,functionId,isDynamic) \ ( (This)->lpVtbl -> IsFunctionDynamic(This,functionId,isDynamic) ) #define ICorProfilerInfo10_GetFunctionFromIP3(This,ip,functionId,pReJitId) \ ( (This)->lpVtbl -> GetFunctionFromIP3(This,ip,functionId,pReJitId) ) #define ICorProfilerInfo10_GetDynamicFunctionInfo(This,functionId,moduleId,ppvSig,pbSig,cchName,pcchName,wszName) \ ( (This)->lpVtbl -> GetDynamicFunctionInfo(This,functionId,moduleId,ppvSig,pbSig,cchName,pcchName,wszName) ) #define ICorProfilerInfo10_GetNativeCodeStartAddresses(This,functionID,reJitId,cCodeStartAddresses,pcCodeStartAddresses,codeStartAddresses) \ ( (This)->lpVtbl -> GetNativeCodeStartAddresses(This,functionID,reJitId,cCodeStartAddresses,pcCodeStartAddresses,codeStartAddresses) ) #define ICorProfilerInfo10_GetILToNativeMapping3(This,pNativeCodeStartAddress,cMap,pcMap,map) \ ( (This)->lpVtbl -> GetILToNativeMapping3(This,pNativeCodeStartAddress,cMap,pcMap,map) ) #define ICorProfilerInfo10_GetCodeInfo4(This,pNativeCodeStartAddress,cCodeInfos,pcCodeInfos,codeInfos) \ ( (This)->lpVtbl -> GetCodeInfo4(This,pNativeCodeStartAddress,cCodeInfos,pcCodeInfos,codeInfos) ) #define ICorProfilerInfo10_EnumerateObjectReferences(This,objectId,callback,clientData) \ ( (This)->lpVtbl -> EnumerateObjectReferences(This,objectId,callback,clientData) ) #define ICorProfilerInfo10_IsFrozenObject(This,objectId,pbFrozen) \ ( (This)->lpVtbl -> IsFrozenObject(This,objectId,pbFrozen) ) #define ICorProfilerInfo10_GetLOHObjectSizeThreshold(This,pThreshold) \ ( (This)->lpVtbl -> GetLOHObjectSizeThreshold(This,pThreshold) ) #define ICorProfilerInfo10_RequestReJITWithInliners(This,dwRejitFlags,cFunctions,moduleIds,methodIds) \ ( (This)->lpVtbl -> RequestReJITWithInliners(This,dwRejitFlags,cFunctions,moduleIds,methodIds) ) #define ICorProfilerInfo10_SuspendRuntime(This) \ ( (This)->lpVtbl -> SuspendRuntime(This) ) #define ICorProfilerInfo10_ResumeRuntime(This) \ ( (This)->lpVtbl -> ResumeRuntime(This) ) #endif /* COBJMACROS */ #endif /* C style interface */ #endif /* __ICorProfilerInfo10_INTERFACE_DEFINED__ */ #ifndef __ICorProfilerInfo11_INTERFACE_DEFINED__ #define __ICorProfilerInfo11_INTERFACE_DEFINED__ /* interface ICorProfilerInfo11 */ /* [local][unique][uuid][object] */ EXTERN_C const IID IID_ICorProfilerInfo11; #if defined(__cplusplus) && !defined(CINTERFACE) MIDL_INTERFACE("06398876-8987-4154-B621-40A00D6E4D04") ICorProfilerInfo11 : public ICorProfilerInfo10 { public: virtual HRESULT STDMETHODCALLTYPE GetEnvironmentVariable( /* [string][in] */ const WCHAR *szName, /* [in] */ ULONG cchValue, /* [out] */ ULONG *pcchValue, /* [annotation][out] */ _Out_writes_to_(cchValue, *pcchValue) WCHAR szValue[ ]) = 0; virtual HRESULT STDMETHODCALLTYPE SetEnvironmentVariable( /* [string][in] */ const WCHAR *szName, /* [string][in] */ const WCHAR *szValue) = 0; }; #else /* C style interface */ typedef struct ICorProfilerInfo11Vtbl { BEGIN_INTERFACE HRESULT ( STDMETHODCALLTYPE *QueryInterface )( ICorProfilerInfo11 * This, /* [in] */ REFIID riid, /* [annotation][iid_is][out] */ _COM_Outptr_ void **ppvObject); ULONG ( STDMETHODCALLTYPE *AddRef )( ICorProfilerInfo11 * This); ULONG ( STDMETHODCALLTYPE *Release )( ICorProfilerInfo11 * This); HRESULT ( STDMETHODCALLTYPE *GetClassFromObject )( ICorProfilerInfo11 * This, /* [in] */ ObjectID objectId, /* [out] */ ClassID *pClassId); HRESULT ( STDMETHODCALLTYPE *GetClassFromToken )( ICorProfilerInfo11 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdTypeDef typeDef, /* [out] */ ClassID *pClassId); HRESULT ( STDMETHODCALLTYPE *GetCodeInfo )( ICorProfilerInfo11 * This, /* [in] */ FunctionID functionId, /* [out] */ LPCBYTE *pStart, /* [out] */ ULONG *pcSize); HRESULT ( STDMETHODCALLTYPE *GetEventMask )( ICorProfilerInfo11 * This, /* [out] */ DWORD *pdwEvents); HRESULT ( STDMETHODCALLTYPE *GetFunctionFromIP )( ICorProfilerInfo11 * This, /* [in] */ LPCBYTE ip, /* [out] */ FunctionID *pFunctionId); HRESULT ( STDMETHODCALLTYPE *GetFunctionFromToken )( ICorProfilerInfo11 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdToken token, /* [out] */ FunctionID *pFunctionId); HRESULT ( STDMETHODCALLTYPE *GetHandleFromThread )( ICorProfilerInfo11 * This, /* [in] */ ThreadID threadId, /* [out] */ HANDLE *phThread); HRESULT ( STDMETHODCALLTYPE *GetObjectSize )( ICorProfilerInfo11 * This, /* [in] */ ObjectID objectId, /* [out] */ ULONG *pcSize); HRESULT ( STDMETHODCALLTYPE *IsArrayClass )( ICorProfilerInfo11 * This, /* [in] */ ClassID classId, /* [out] */ CorElementType *pBaseElemType, /* [out] */ ClassID *pBaseClassId, /* [out] */ ULONG *pcRank); HRESULT ( STDMETHODCALLTYPE *GetThreadInfo )( ICorProfilerInfo11 * This, /* [in] */ ThreadID threadId, /* [out] */ DWORD *pdwWin32ThreadId); HRESULT ( STDMETHODCALLTYPE *GetCurrentThreadID )( ICorProfilerInfo11 * This, /* [out] */ ThreadID *pThreadId); HRESULT ( STDMETHODCALLTYPE *GetClassIDInfo )( ICorProfilerInfo11 * This, /* [in] */ ClassID classId, /* [out] */ ModuleID *pModuleId, /* [out] */ mdTypeDef *pTypeDefToken); HRESULT ( STDMETHODCALLTYPE *GetFunctionInfo )( ICorProfilerInfo11 * This, /* [in] */ FunctionID functionId, /* [out] */ ClassID *pClassId, /* [out] */ ModuleID *pModuleId, /* [out] */ mdToken *pToken); HRESULT ( STDMETHODCALLTYPE *SetEventMask )( ICorProfilerInfo11 * This, /* [in] */ DWORD dwEvents); HRESULT ( STDMETHODCALLTYPE *SetEnterLeaveFunctionHooks )( ICorProfilerInfo11 * This, /* [in] */ FunctionEnter *pFuncEnter, /* [in] */ FunctionLeave *pFuncLeave, /* [in] */ FunctionTailcall *pFuncTailcall); HRESULT ( STDMETHODCALLTYPE *SetFunctionIDMapper )( ICorProfilerInfo11 * This, /* [in] */ FunctionIDMapper *pFunc); HRESULT ( STDMETHODCALLTYPE *GetTokenAndMetaDataFromFunction )( ICorProfilerInfo11 * This, /* [in] */ FunctionID functionId, /* [in] */ REFIID riid, /* [out] */ IUnknown **ppImport, /* [out] */ mdToken *pToken); HRESULT ( STDMETHODCALLTYPE *GetModuleInfo )( ICorProfilerInfo11 * This, /* [in] */ ModuleID moduleId, /* [out] */ LPCBYTE *ppBaseLoadAddress, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [annotation][out] */ _Out_writes_to_(cchName, *pcchName) WCHAR szName[ ], /* [out] */ AssemblyID *pAssemblyId); HRESULT ( STDMETHODCALLTYPE *GetModuleMetaData )( ICorProfilerInfo11 * This, /* [in] */ ModuleID moduleId, /* [in] */ DWORD dwOpenFlags, /* [in] */ REFIID riid, /* [out] */ IUnknown **ppOut); HRESULT ( STDMETHODCALLTYPE *GetILFunctionBody )( ICorProfilerInfo11 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdMethodDef methodId, /* [out] */ LPCBYTE *ppMethodHeader, /* [out] */ ULONG *pcbMethodSize); HRESULT ( STDMETHODCALLTYPE *GetILFunctionBodyAllocator )( ICorProfilerInfo11 * This, /* [in] */ ModuleID moduleId, /* [out] */ IMethodMalloc **ppMalloc); HRESULT ( STDMETHODCALLTYPE *SetILFunctionBody )( ICorProfilerInfo11 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdMethodDef methodid, /* [in] */ LPCBYTE pbNewILMethodHeader); HRESULT ( STDMETHODCALLTYPE *GetAppDomainInfo )( ICorProfilerInfo11 * This, /* [in] */ AppDomainID appDomainId, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [annotation][out] */ _Out_writes_to_(cchName, *pcchName) WCHAR szName[ ], /* [out] */ ProcessID *pProcessId); HRESULT ( STDMETHODCALLTYPE *GetAssemblyInfo )( ICorProfilerInfo11 * This, /* [in] */ AssemblyID assemblyId, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [annotation][out] */ _Out_writes_to_(cchName, *pcchName) WCHAR szName[ ], /* [out] */ AppDomainID *pAppDomainId, /* [out] */ ModuleID *pModuleId); HRESULT ( STDMETHODCALLTYPE *SetFunctionReJIT )( ICorProfilerInfo11 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ForceGC )( ICorProfilerInfo11 * This); HRESULT ( STDMETHODCALLTYPE *SetILInstrumentedCodeMap )( ICorProfilerInfo11 * This, /* [in] */ FunctionID functionId, /* [in] */ BOOL fStartJit, /* [in] */ ULONG cILMapEntries, /* [size_is][in] */ COR_IL_MAP rgILMapEntries[ ]); HRESULT ( STDMETHODCALLTYPE *GetInprocInspectionInterface )( ICorProfilerInfo11 * This, /* [out] */ IUnknown **ppicd); HRESULT ( STDMETHODCALLTYPE *GetInprocInspectionIThisThread )( ICorProfilerInfo11 * This, /* [out] */ IUnknown **ppicd); HRESULT ( STDMETHODCALLTYPE *GetThreadContext )( ICorProfilerInfo11 * This, /* [in] */ ThreadID threadId, /* [out] */ ContextID *pContextId); HRESULT ( STDMETHODCALLTYPE *BeginInprocDebugging )( ICorProfilerInfo11 * This, /* [in] */ BOOL fThisThreadOnly, /* [out] */ DWORD *pdwProfilerContext); HRESULT ( STDMETHODCALLTYPE *EndInprocDebugging )( ICorProfilerInfo11 * This, /* [in] */ DWORD dwProfilerContext); HRESULT ( STDMETHODCALLTYPE *GetILToNativeMapping )( ICorProfilerInfo11 * This, /* [in] */ FunctionID functionId, /* [in] */ ULONG32 cMap, /* [out] */ ULONG32 *pcMap, /* [length_is][size_is][out] */ COR_DEBUG_IL_TO_NATIVE_MAP map[ ]); HRESULT ( STDMETHODCALLTYPE *DoStackSnapshot )( ICorProfilerInfo11 * This, /* [in] */ ThreadID thread, /* [in] */ StackSnapshotCallback *callback, /* [in] */ ULONG32 infoFlags, /* [in] */ void *clientData, /* [size_is][in] */ BYTE context[ ], /* [in] */ ULONG32 contextSize); HRESULT ( STDMETHODCALLTYPE *SetEnterLeaveFunctionHooks2 )( ICorProfilerInfo11 * This, /* [in] */ FunctionEnter2 *pFuncEnter, /* [in] */ FunctionLeave2 *pFuncLeave, /* [in] */ FunctionTailcall2 *pFuncTailcall); HRESULT ( STDMETHODCALLTYPE *GetFunctionInfo2 )( ICorProfilerInfo11 * This, /* [in] */ FunctionID funcId, /* [in] */ COR_PRF_FRAME_INFO frameInfo, /* [out] */ ClassID *pClassId, /* [out] */ ModuleID *pModuleId, /* [out] */ mdToken *pToken, /* [in] */ ULONG32 cTypeArgs, /* [out] */ ULONG32 *pcTypeArgs, /* [out] */ ClassID typeArgs[ ]); HRESULT ( STDMETHODCALLTYPE *GetStringLayout )( ICorProfilerInfo11 * This, /* [out] */ ULONG *pBufferLengthOffset, /* [out] */ ULONG *pStringLengthOffset, /* [out] */ ULONG *pBufferOffset); HRESULT ( STDMETHODCALLTYPE *GetClassLayout )( ICorProfilerInfo11 * This, /* [in] */ ClassID classID, /* [out][in] */ COR_FIELD_OFFSET rFieldOffset[ ], /* [in] */ ULONG cFieldOffset, /* [out] */ ULONG *pcFieldOffset, /* [out] */ ULONG *pulClassSize); HRESULT ( STDMETHODCALLTYPE *GetClassIDInfo2 )( ICorProfilerInfo11 * This, /* [in] */ ClassID classId, /* [out] */ ModuleID *pModuleId, /* [out] */ mdTypeDef *pTypeDefToken, /* [out] */ ClassID *pParentClassId, /* [in] */ ULONG32 cNumTypeArgs, /* [out] */ ULONG32 *pcNumTypeArgs, /* [out] */ ClassID typeArgs[ ]); HRESULT ( STDMETHODCALLTYPE *GetCodeInfo2 )( ICorProfilerInfo11 * This, /* [in] */ FunctionID functionID, /* [in] */ ULONG32 cCodeInfos, /* [out] */ ULONG32 *pcCodeInfos, /* [length_is][size_is][out] */ COR_PRF_CODE_INFO codeInfos[ ]); HRESULT ( STDMETHODCALLTYPE *GetClassFromTokenAndTypeArgs )( ICorProfilerInfo11 * This, /* [in] */ ModuleID moduleID, /* [in] */ mdTypeDef typeDef, /* [in] */ ULONG32 cTypeArgs, /* [size_is][in] */ ClassID typeArgs[ ], /* [out] */ ClassID *pClassID); HRESULT ( STDMETHODCALLTYPE *GetFunctionFromTokenAndTypeArgs )( ICorProfilerInfo11 * This, /* [in] */ ModuleID moduleID, /* [in] */ mdMethodDef funcDef, /* [in] */ ClassID classId, /* [in] */ ULONG32 cTypeArgs, /* [size_is][in] */ ClassID typeArgs[ ], /* [out] */ FunctionID *pFunctionID); HRESULT ( STDMETHODCALLTYPE *EnumModuleFrozenObjects )( ICorProfilerInfo11 * This, /* [in] */ ModuleID moduleID, /* [out] */ ICorProfilerObjectEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *GetArrayObjectInfo )( ICorProfilerInfo11 * This, /* [in] */ ObjectID objectId, /* [in] */ ULONG32 cDimensions, /* [size_is][out] */ ULONG32 pDimensionSizes[ ], /* [size_is][out] */ int pDimensionLowerBounds[ ], /* [out] */ BYTE **ppData); HRESULT ( STDMETHODCALLTYPE *GetBoxClassLayout )( ICorProfilerInfo11 * This, /* [in] */ ClassID classId, /* [out] */ ULONG32 *pBufferOffset); HRESULT ( STDMETHODCALLTYPE *GetThreadAppDomain )( ICorProfilerInfo11 * This, /* [in] */ ThreadID threadId, /* [out] */ AppDomainID *pAppDomainId); HRESULT ( STDMETHODCALLTYPE *GetRVAStaticAddress )( ICorProfilerInfo11 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [out] */ void **ppAddress); HRESULT ( STDMETHODCALLTYPE *GetAppDomainStaticAddress )( ICorProfilerInfo11 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [in] */ AppDomainID appDomainId, /* [out] */ void **ppAddress); HRESULT ( STDMETHODCALLTYPE *GetThreadStaticAddress )( ICorProfilerInfo11 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [in] */ ThreadID threadId, /* [out] */ void **ppAddress); HRESULT ( STDMETHODCALLTYPE *GetContextStaticAddress )( ICorProfilerInfo11 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [in] */ ContextID contextId, /* [out] */ void **ppAddress); HRESULT ( STDMETHODCALLTYPE *GetStaticFieldInfo )( ICorProfilerInfo11 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [out] */ COR_PRF_STATIC_TYPE *pFieldInfo); HRESULT ( STDMETHODCALLTYPE *GetGenerationBounds )( ICorProfilerInfo11 * This, /* [in] */ ULONG cObjectRanges, /* [out] */ ULONG *pcObjectRanges, /* [length_is][size_is][out] */ COR_PRF_GC_GENERATION_RANGE ranges[ ]); HRESULT ( STDMETHODCALLTYPE *GetObjectGeneration )( ICorProfilerInfo11 * This, /* [in] */ ObjectID objectId, /* [out] */ COR_PRF_GC_GENERATION_RANGE *range); HRESULT ( STDMETHODCALLTYPE *GetNotifiedExceptionClauseInfo )( ICorProfilerInfo11 * This, /* [out] */ COR_PRF_EX_CLAUSE_INFO *pinfo); HRESULT ( STDMETHODCALLTYPE *EnumJITedFunctions )( ICorProfilerInfo11 * This, /* [out] */ ICorProfilerFunctionEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *RequestProfilerDetach )( ICorProfilerInfo11 * This, /* [in] */ DWORD dwExpectedCompletionMilliseconds); HRESULT ( STDMETHODCALLTYPE *SetFunctionIDMapper2 )( ICorProfilerInfo11 * This, /* [in] */ FunctionIDMapper2 *pFunc, /* [in] */ void *clientData); HRESULT ( STDMETHODCALLTYPE *GetStringLayout2 )( ICorProfilerInfo11 * This, /* [out] */ ULONG *pStringLengthOffset, /* [out] */ ULONG *pBufferOffset); HRESULT ( STDMETHODCALLTYPE *SetEnterLeaveFunctionHooks3 )( ICorProfilerInfo11 * This, /* [in] */ FunctionEnter3 *pFuncEnter3, /* [in] */ FunctionLeave3 *pFuncLeave3, /* [in] */ FunctionTailcall3 *pFuncTailcall3); HRESULT ( STDMETHODCALLTYPE *SetEnterLeaveFunctionHooks3WithInfo )( ICorProfilerInfo11 * This, /* [in] */ FunctionEnter3WithInfo *pFuncEnter3WithInfo, /* [in] */ FunctionLeave3WithInfo *pFuncLeave3WithInfo, /* [in] */ FunctionTailcall3WithInfo *pFuncTailcall3WithInfo); HRESULT ( STDMETHODCALLTYPE *GetFunctionEnter3Info )( ICorProfilerInfo11 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_ELT_INFO eltInfo, /* [out] */ COR_PRF_FRAME_INFO *pFrameInfo, /* [out][in] */ ULONG *pcbArgumentInfo, /* [size_is][out] */ COR_PRF_FUNCTION_ARGUMENT_INFO *pArgumentInfo); HRESULT ( STDMETHODCALLTYPE *GetFunctionLeave3Info )( ICorProfilerInfo11 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_ELT_INFO eltInfo, /* [out] */ COR_PRF_FRAME_INFO *pFrameInfo, /* [out] */ COR_PRF_FUNCTION_ARGUMENT_RANGE *pRetvalRange); HRESULT ( STDMETHODCALLTYPE *GetFunctionTailcall3Info )( ICorProfilerInfo11 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_ELT_INFO eltInfo, /* [out] */ COR_PRF_FRAME_INFO *pFrameInfo); HRESULT ( STDMETHODCALLTYPE *EnumModules )( ICorProfilerInfo11 * This, /* [out] */ ICorProfilerModuleEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *GetRuntimeInformation )( ICorProfilerInfo11 * This, /* [out] */ USHORT *pClrInstanceId, /* [out] */ COR_PRF_RUNTIME_TYPE *pRuntimeType, /* [out] */ USHORT *pMajorVersion, /* [out] */ USHORT *pMinorVersion, /* [out] */ USHORT *pBuildNumber, /* [out] */ USHORT *pQFEVersion, /* [in] */ ULONG cchVersionString, /* [out] */ ULONG *pcchVersionString, /* [annotation][out] */ _Out_writes_to_(cchVersionString, *pcchVersionString) WCHAR szVersionString[ ]); HRESULT ( STDMETHODCALLTYPE *GetThreadStaticAddress2 )( ICorProfilerInfo11 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [in] */ AppDomainID appDomainId, /* [in] */ ThreadID threadId, /* [out] */ void **ppAddress); HRESULT ( STDMETHODCALLTYPE *GetAppDomainsContainingModule )( ICorProfilerInfo11 * This, /* [in] */ ModuleID moduleId, /* [in] */ ULONG32 cAppDomainIds, /* [out] */ ULONG32 *pcAppDomainIds, /* [length_is][size_is][out] */ AppDomainID appDomainIds[ ]); HRESULT ( STDMETHODCALLTYPE *GetModuleInfo2 )( ICorProfilerInfo11 * This, /* [in] */ ModuleID moduleId, /* [out] */ LPCBYTE *ppBaseLoadAddress, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [annotation][out] */ _Out_writes_to_(cchName, *pcchName) WCHAR szName[ ], /* [out] */ AssemblyID *pAssemblyId, /* [out] */ DWORD *pdwModuleFlags); HRESULT ( STDMETHODCALLTYPE *EnumThreads )( ICorProfilerInfo11 * This, /* [out] */ ICorProfilerThreadEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *InitializeCurrentThread )( ICorProfilerInfo11 * This); HRESULT ( STDMETHODCALLTYPE *RequestReJIT )( ICorProfilerInfo11 * This, /* [in] */ ULONG cFunctions, /* [size_is][in] */ ModuleID moduleIds[ ], /* [size_is][in] */ mdMethodDef methodIds[ ]); HRESULT ( STDMETHODCALLTYPE *RequestRevert )( ICorProfilerInfo11 * This, /* [in] */ ULONG cFunctions, /* [size_is][in] */ ModuleID moduleIds[ ], /* [size_is][in] */ mdMethodDef methodIds[ ], /* [size_is][out] */ HRESULT status[ ]); HRESULT ( STDMETHODCALLTYPE *GetCodeInfo3 )( ICorProfilerInfo11 * This, /* [in] */ FunctionID functionID, /* [in] */ ReJITID reJitId, /* [in] */ ULONG32 cCodeInfos, /* [out] */ ULONG32 *pcCodeInfos, /* [length_is][size_is][out] */ COR_PRF_CODE_INFO codeInfos[ ]); HRESULT ( STDMETHODCALLTYPE *GetFunctionFromIP2 )( ICorProfilerInfo11 * This, /* [in] */ LPCBYTE ip, /* [out] */ FunctionID *pFunctionId, /* [out] */ ReJITID *pReJitId); HRESULT ( STDMETHODCALLTYPE *GetReJITIDs )( ICorProfilerInfo11 * This, /* [in] */ FunctionID functionId, /* [in] */ ULONG cReJitIds, /* [out] */ ULONG *pcReJitIds, /* [length_is][size_is][out] */ ReJITID reJitIds[ ]); HRESULT ( STDMETHODCALLTYPE *GetILToNativeMapping2 )( ICorProfilerInfo11 * This, /* [in] */ FunctionID functionId, /* [in] */ ReJITID reJitId, /* [in] */ ULONG32 cMap, /* [out] */ ULONG32 *pcMap, /* [length_is][size_is][out] */ COR_DEBUG_IL_TO_NATIVE_MAP map[ ]); HRESULT ( STDMETHODCALLTYPE *EnumJITedFunctions2 )( ICorProfilerInfo11 * This, /* [out] */ ICorProfilerFunctionEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *GetObjectSize2 )( ICorProfilerInfo11 * This, /* [in] */ ObjectID objectId, /* [out] */ SIZE_T *pcSize); HRESULT ( STDMETHODCALLTYPE *GetEventMask2 )( ICorProfilerInfo11 * This, /* [out] */ DWORD *pdwEventsLow, /* [out] */ DWORD *pdwEventsHigh); HRESULT ( STDMETHODCALLTYPE *SetEventMask2 )( ICorProfilerInfo11 * This, /* [in] */ DWORD dwEventsLow, /* [in] */ DWORD dwEventsHigh); HRESULT ( STDMETHODCALLTYPE *EnumNgenModuleMethodsInliningThisMethod )( ICorProfilerInfo11 * This, /* [in] */ ModuleID inlinersModuleId, /* [in] */ ModuleID inlineeModuleId, /* [in] */ mdMethodDef inlineeMethodId, /* [out] */ BOOL *incompleteData, /* [out] */ ICorProfilerMethodEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *ApplyMetaData )( ICorProfilerInfo11 * This, /* [in] */ ModuleID moduleId); HRESULT ( STDMETHODCALLTYPE *GetInMemorySymbolsLength )( ICorProfilerInfo11 * This, /* [in] */ ModuleID moduleId, /* [out] */ DWORD *pCountSymbolBytes); HRESULT ( STDMETHODCALLTYPE *ReadInMemorySymbols )( ICorProfilerInfo11 * This, /* [in] */ ModuleID moduleId, /* [in] */ DWORD symbolsReadOffset, /* [out] */ BYTE *pSymbolBytes, /* [in] */ DWORD countSymbolBytes, /* [out] */ DWORD *pCountSymbolBytesRead); HRESULT ( STDMETHODCALLTYPE *IsFunctionDynamic )( ICorProfilerInfo11 * This, /* [in] */ FunctionID functionId, /* [out] */ BOOL *isDynamic); HRESULT ( STDMETHODCALLTYPE *GetFunctionFromIP3 )( ICorProfilerInfo11 * This, /* [in] */ LPCBYTE ip, /* [out] */ FunctionID *functionId, /* [out] */ ReJITID *pReJitId); HRESULT ( STDMETHODCALLTYPE *GetDynamicFunctionInfo )( ICorProfilerInfo11 * This, /* [in] */ FunctionID functionId, /* [out] */ ModuleID *moduleId, /* [out] */ PCCOR_SIGNATURE *ppvSig, /* [out] */ ULONG *pbSig, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [out] */ WCHAR wszName[ ]); HRESULT ( STDMETHODCALLTYPE *GetNativeCodeStartAddresses )( ICorProfilerInfo11 * This, FunctionID functionID, ReJITID reJitId, ULONG32 cCodeStartAddresses, ULONG32 *pcCodeStartAddresses, UINT_PTR codeStartAddresses[ ]); HRESULT ( STDMETHODCALLTYPE *GetILToNativeMapping3 )( ICorProfilerInfo11 * This, UINT_PTR pNativeCodeStartAddress, ULONG32 cMap, ULONG32 *pcMap, COR_DEBUG_IL_TO_NATIVE_MAP map[ ]); HRESULT ( STDMETHODCALLTYPE *GetCodeInfo4 )( ICorProfilerInfo11 * This, UINT_PTR pNativeCodeStartAddress, ULONG32 cCodeInfos, ULONG32 *pcCodeInfos, COR_PRF_CODE_INFO codeInfos[ ]); HRESULT ( STDMETHODCALLTYPE *EnumerateObjectReferences )( ICorProfilerInfo11 * This, ObjectID objectId, ObjectReferenceCallback callback, void *clientData); HRESULT ( STDMETHODCALLTYPE *IsFrozenObject )( ICorProfilerInfo11 * This, ObjectID objectId, BOOL *pbFrozen); HRESULT ( STDMETHODCALLTYPE *GetLOHObjectSizeThreshold )( ICorProfilerInfo11 * This, DWORD *pThreshold); HRESULT ( STDMETHODCALLTYPE *RequestReJITWithInliners )( ICorProfilerInfo11 * This, /* [in] */ DWORD dwRejitFlags, /* [in] */ ULONG cFunctions, /* [size_is][in] */ ModuleID moduleIds[ ], /* [size_is][in] */ mdMethodDef methodIds[ ]); HRESULT ( STDMETHODCALLTYPE *SuspendRuntime )( ICorProfilerInfo11 * This); HRESULT ( STDMETHODCALLTYPE *ResumeRuntime )( ICorProfilerInfo11 * This); HRESULT ( STDMETHODCALLTYPE *GetEnvironmentVariable )( ICorProfilerInfo11 * This, /* [string][in] */ const WCHAR *szName, /* [in] */ ULONG cchValue, /* [out] */ ULONG *pcchValue, /* [annotation][out] */ _Out_writes_to_(cchValue, *pcchValue) WCHAR szValue[ ]); HRESULT ( STDMETHODCALLTYPE *SetEnvironmentVariable )( ICorProfilerInfo11 * This, /* [string][in] */ const WCHAR *szName, /* [string][in] */ const WCHAR *szValue); END_INTERFACE } ICorProfilerInfo11Vtbl; interface ICorProfilerInfo11 { CONST_VTBL struct ICorProfilerInfo11Vtbl *lpVtbl; }; #ifdef COBJMACROS #define ICorProfilerInfo11_QueryInterface(This,riid,ppvObject) \ ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) ) #define ICorProfilerInfo11_AddRef(This) \ ( (This)->lpVtbl -> AddRef(This) ) #define ICorProfilerInfo11_Release(This) \ ( (This)->lpVtbl -> Release(This) ) #define ICorProfilerInfo11_GetClassFromObject(This,objectId,pClassId) \ ( (This)->lpVtbl -> GetClassFromObject(This,objectId,pClassId) ) #define ICorProfilerInfo11_GetClassFromToken(This,moduleId,typeDef,pClassId) \ ( (This)->lpVtbl -> GetClassFromToken(This,moduleId,typeDef,pClassId) ) #define ICorProfilerInfo11_GetCodeInfo(This,functionId,pStart,pcSize) \ ( (This)->lpVtbl -> GetCodeInfo(This,functionId,pStart,pcSize) ) #define ICorProfilerInfo11_GetEventMask(This,pdwEvents) \ ( (This)->lpVtbl -> GetEventMask(This,pdwEvents) ) #define ICorProfilerInfo11_GetFunctionFromIP(This,ip,pFunctionId) \ ( (This)->lpVtbl -> GetFunctionFromIP(This,ip,pFunctionId) ) #define ICorProfilerInfo11_GetFunctionFromToken(This,moduleId,token,pFunctionId) \ ( (This)->lpVtbl -> GetFunctionFromToken(This,moduleId,token,pFunctionId) ) #define ICorProfilerInfo11_GetHandleFromThread(This,threadId,phThread) \ ( (This)->lpVtbl -> GetHandleFromThread(This,threadId,phThread) ) #define ICorProfilerInfo11_GetObjectSize(This,objectId,pcSize) \ ( (This)->lpVtbl -> GetObjectSize(This,objectId,pcSize) ) #define ICorProfilerInfo11_IsArrayClass(This,classId,pBaseElemType,pBaseClassId,pcRank) \ ( (This)->lpVtbl -> IsArrayClass(This,classId,pBaseElemType,pBaseClassId,pcRank) ) #define ICorProfilerInfo11_GetThreadInfo(This,threadId,pdwWin32ThreadId) \ ( (This)->lpVtbl -> GetThreadInfo(This,threadId,pdwWin32ThreadId) ) #define ICorProfilerInfo11_GetCurrentThreadID(This,pThreadId) \ ( (This)->lpVtbl -> GetCurrentThreadID(This,pThreadId) ) #define ICorProfilerInfo11_GetClassIDInfo(This,classId,pModuleId,pTypeDefToken) \ ( (This)->lpVtbl -> GetClassIDInfo(This,classId,pModuleId,pTypeDefToken) ) #define ICorProfilerInfo11_GetFunctionInfo(This,functionId,pClassId,pModuleId,pToken) \ ( (This)->lpVtbl -> GetFunctionInfo(This,functionId,pClassId,pModuleId,pToken) ) #define ICorProfilerInfo11_SetEventMask(This,dwEvents) \ ( (This)->lpVtbl -> SetEventMask(This,dwEvents) ) #define ICorProfilerInfo11_SetEnterLeaveFunctionHooks(This,pFuncEnter,pFuncLeave,pFuncTailcall) \ ( (This)->lpVtbl -> SetEnterLeaveFunctionHooks(This,pFuncEnter,pFuncLeave,pFuncTailcall) ) #define ICorProfilerInfo11_SetFunctionIDMapper(This,pFunc) \ ( (This)->lpVtbl -> SetFunctionIDMapper(This,pFunc) ) #define ICorProfilerInfo11_GetTokenAndMetaDataFromFunction(This,functionId,riid,ppImport,pToken) \ ( (This)->lpVtbl -> GetTokenAndMetaDataFromFunction(This,functionId,riid,ppImport,pToken) ) #define ICorProfilerInfo11_GetModuleInfo(This,moduleId,ppBaseLoadAddress,cchName,pcchName,szName,pAssemblyId) \ ( (This)->lpVtbl -> GetModuleInfo(This,moduleId,ppBaseLoadAddress,cchName,pcchName,szName,pAssemblyId) ) #define ICorProfilerInfo11_GetModuleMetaData(This,moduleId,dwOpenFlags,riid,ppOut) \ ( (This)->lpVtbl -> GetModuleMetaData(This,moduleId,dwOpenFlags,riid,ppOut) ) #define ICorProfilerInfo11_GetILFunctionBody(This,moduleId,methodId,ppMethodHeader,pcbMethodSize) \ ( (This)->lpVtbl -> GetILFunctionBody(This,moduleId,methodId,ppMethodHeader,pcbMethodSize) ) #define ICorProfilerInfo11_GetILFunctionBodyAllocator(This,moduleId,ppMalloc) \ ( (This)->lpVtbl -> GetILFunctionBodyAllocator(This,moduleId,ppMalloc) ) #define ICorProfilerInfo11_SetILFunctionBody(This,moduleId,methodid,pbNewILMethodHeader) \ ( (This)->lpVtbl -> SetILFunctionBody(This,moduleId,methodid,pbNewILMethodHeader) ) #define ICorProfilerInfo11_GetAppDomainInfo(This,appDomainId,cchName,pcchName,szName,pProcessId) \ ( (This)->lpVtbl -> GetAppDomainInfo(This,appDomainId,cchName,pcchName,szName,pProcessId) ) #define ICorProfilerInfo11_GetAssemblyInfo(This,assemblyId,cchName,pcchName,szName,pAppDomainId,pModuleId) \ ( (This)->lpVtbl -> GetAssemblyInfo(This,assemblyId,cchName,pcchName,szName,pAppDomainId,pModuleId) ) #define ICorProfilerInfo11_SetFunctionReJIT(This,functionId) \ ( (This)->lpVtbl -> SetFunctionReJIT(This,functionId) ) #define ICorProfilerInfo11_ForceGC(This) \ ( (This)->lpVtbl -> ForceGC(This) ) #define ICorProfilerInfo11_SetILInstrumentedCodeMap(This,functionId,fStartJit,cILMapEntries,rgILMapEntries) \ ( (This)->lpVtbl -> SetILInstrumentedCodeMap(This,functionId,fStartJit,cILMapEntries,rgILMapEntries) ) #define ICorProfilerInfo11_GetInprocInspectionInterface(This,ppicd) \ ( (This)->lpVtbl -> GetInprocInspectionInterface(This,ppicd) ) #define ICorProfilerInfo11_GetInprocInspectionIThisThread(This,ppicd) \ ( (This)->lpVtbl -> GetInprocInspectionIThisThread(This,ppicd) ) #define ICorProfilerInfo11_GetThreadContext(This,threadId,pContextId) \ ( (This)->lpVtbl -> GetThreadContext(This,threadId,pContextId) ) #define ICorProfilerInfo11_BeginInprocDebugging(This,fThisThreadOnly,pdwProfilerContext) \ ( (This)->lpVtbl -> BeginInprocDebugging(This,fThisThreadOnly,pdwProfilerContext) ) #define ICorProfilerInfo11_EndInprocDebugging(This,dwProfilerContext) \ ( (This)->lpVtbl -> EndInprocDebugging(This,dwProfilerContext) ) #define ICorProfilerInfo11_GetILToNativeMapping(This,functionId,cMap,pcMap,map) \ ( (This)->lpVtbl -> GetILToNativeMapping(This,functionId,cMap,pcMap,map) ) #define ICorProfilerInfo11_DoStackSnapshot(This,thread,callback,infoFlags,clientData,context,contextSize) \ ( (This)->lpVtbl -> DoStackSnapshot(This,thread,callback,infoFlags,clientData,context,contextSize) ) #define ICorProfilerInfo11_SetEnterLeaveFunctionHooks2(This,pFuncEnter,pFuncLeave,pFuncTailcall) \ ( (This)->lpVtbl -> SetEnterLeaveFunctionHooks2(This,pFuncEnter,pFuncLeave,pFuncTailcall) ) #define ICorProfilerInfo11_GetFunctionInfo2(This,funcId,frameInfo,pClassId,pModuleId,pToken,cTypeArgs,pcTypeArgs,typeArgs) \ ( (This)->lpVtbl -> GetFunctionInfo2(This,funcId,frameInfo,pClassId,pModuleId,pToken,cTypeArgs,pcTypeArgs,typeArgs) ) #define ICorProfilerInfo11_GetStringLayout(This,pBufferLengthOffset,pStringLengthOffset,pBufferOffset) \ ( (This)->lpVtbl -> GetStringLayout(This,pBufferLengthOffset,pStringLengthOffset,pBufferOffset) ) #define ICorProfilerInfo11_GetClassLayout(This,classID,rFieldOffset,cFieldOffset,pcFieldOffset,pulClassSize) \ ( (This)->lpVtbl -> GetClassLayout(This,classID,rFieldOffset,cFieldOffset,pcFieldOffset,pulClassSize) ) #define ICorProfilerInfo11_GetClassIDInfo2(This,classId,pModuleId,pTypeDefToken,pParentClassId,cNumTypeArgs,pcNumTypeArgs,typeArgs) \ ( (This)->lpVtbl -> GetClassIDInfo2(This,classId,pModuleId,pTypeDefToken,pParentClassId,cNumTypeArgs,pcNumTypeArgs,typeArgs) ) #define ICorProfilerInfo11_GetCodeInfo2(This,functionID,cCodeInfos,pcCodeInfos,codeInfos) \ ( (This)->lpVtbl -> GetCodeInfo2(This,functionID,cCodeInfos,pcCodeInfos,codeInfos) ) #define ICorProfilerInfo11_GetClassFromTokenAndTypeArgs(This,moduleID,typeDef,cTypeArgs,typeArgs,pClassID) \ ( (This)->lpVtbl -> GetClassFromTokenAndTypeArgs(This,moduleID,typeDef,cTypeArgs,typeArgs,pClassID) ) #define ICorProfilerInfo11_GetFunctionFromTokenAndTypeArgs(This,moduleID,funcDef,classId,cTypeArgs,typeArgs,pFunctionID) \ ( (This)->lpVtbl -> GetFunctionFromTokenAndTypeArgs(This,moduleID,funcDef,classId,cTypeArgs,typeArgs,pFunctionID) ) #define ICorProfilerInfo11_EnumModuleFrozenObjects(This,moduleID,ppEnum) \ ( (This)->lpVtbl -> EnumModuleFrozenObjects(This,moduleID,ppEnum) ) #define ICorProfilerInfo11_GetArrayObjectInfo(This,objectId,cDimensions,pDimensionSizes,pDimensionLowerBounds,ppData) \ ( (This)->lpVtbl -> GetArrayObjectInfo(This,objectId,cDimensions,pDimensionSizes,pDimensionLowerBounds,ppData) ) #define ICorProfilerInfo11_GetBoxClassLayout(This,classId,pBufferOffset) \ ( (This)->lpVtbl -> GetBoxClassLayout(This,classId,pBufferOffset) ) #define ICorProfilerInfo11_GetThreadAppDomain(This,threadId,pAppDomainId) \ ( (This)->lpVtbl -> GetThreadAppDomain(This,threadId,pAppDomainId) ) #define ICorProfilerInfo11_GetRVAStaticAddress(This,classId,fieldToken,ppAddress) \ ( (This)->lpVtbl -> GetRVAStaticAddress(This,classId,fieldToken,ppAddress) ) #define ICorProfilerInfo11_GetAppDomainStaticAddress(This,classId,fieldToken,appDomainId,ppAddress) \ ( (This)->lpVtbl -> GetAppDomainStaticAddress(This,classId,fieldToken,appDomainId,ppAddress) ) #define ICorProfilerInfo11_GetThreadStaticAddress(This,classId,fieldToken,threadId,ppAddress) \ ( (This)->lpVtbl -> GetThreadStaticAddress(This,classId,fieldToken,threadId,ppAddress) ) #define ICorProfilerInfo11_GetContextStaticAddress(This,classId,fieldToken,contextId,ppAddress) \ ( (This)->lpVtbl -> GetContextStaticAddress(This,classId,fieldToken,contextId,ppAddress) ) #define ICorProfilerInfo11_GetStaticFieldInfo(This,classId,fieldToken,pFieldInfo) \ ( (This)->lpVtbl -> GetStaticFieldInfo(This,classId,fieldToken,pFieldInfo) ) #define ICorProfilerInfo11_GetGenerationBounds(This,cObjectRanges,pcObjectRanges,ranges) \ ( (This)->lpVtbl -> GetGenerationBounds(This,cObjectRanges,pcObjectRanges,ranges) ) #define ICorProfilerInfo11_GetObjectGeneration(This,objectId,range) \ ( (This)->lpVtbl -> GetObjectGeneration(This,objectId,range) ) #define ICorProfilerInfo11_GetNotifiedExceptionClauseInfo(This,pinfo) \ ( (This)->lpVtbl -> GetNotifiedExceptionClauseInfo(This,pinfo) ) #define ICorProfilerInfo11_EnumJITedFunctions(This,ppEnum) \ ( (This)->lpVtbl -> EnumJITedFunctions(This,ppEnum) ) #define ICorProfilerInfo11_RequestProfilerDetach(This,dwExpectedCompletionMilliseconds) \ ( (This)->lpVtbl -> RequestProfilerDetach(This,dwExpectedCompletionMilliseconds) ) #define ICorProfilerInfo11_SetFunctionIDMapper2(This,pFunc,clientData) \ ( (This)->lpVtbl -> SetFunctionIDMapper2(This,pFunc,clientData) ) #define ICorProfilerInfo11_GetStringLayout2(This,pStringLengthOffset,pBufferOffset) \ ( (This)->lpVtbl -> GetStringLayout2(This,pStringLengthOffset,pBufferOffset) ) #define ICorProfilerInfo11_SetEnterLeaveFunctionHooks3(This,pFuncEnter3,pFuncLeave3,pFuncTailcall3) \ ( (This)->lpVtbl -> SetEnterLeaveFunctionHooks3(This,pFuncEnter3,pFuncLeave3,pFuncTailcall3) ) #define ICorProfilerInfo11_SetEnterLeaveFunctionHooks3WithInfo(This,pFuncEnter3WithInfo,pFuncLeave3WithInfo,pFuncTailcall3WithInfo) \ ( (This)->lpVtbl -> SetEnterLeaveFunctionHooks3WithInfo(This,pFuncEnter3WithInfo,pFuncLeave3WithInfo,pFuncTailcall3WithInfo) ) #define ICorProfilerInfo11_GetFunctionEnter3Info(This,functionId,eltInfo,pFrameInfo,pcbArgumentInfo,pArgumentInfo) \ ( (This)->lpVtbl -> GetFunctionEnter3Info(This,functionId,eltInfo,pFrameInfo,pcbArgumentInfo,pArgumentInfo) ) #define ICorProfilerInfo11_GetFunctionLeave3Info(This,functionId,eltInfo,pFrameInfo,pRetvalRange) \ ( (This)->lpVtbl -> GetFunctionLeave3Info(This,functionId,eltInfo,pFrameInfo,pRetvalRange) ) #define ICorProfilerInfo11_GetFunctionTailcall3Info(This,functionId,eltInfo,pFrameInfo) \ ( (This)->lpVtbl -> GetFunctionTailcall3Info(This,functionId,eltInfo,pFrameInfo) ) #define ICorProfilerInfo11_EnumModules(This,ppEnum) \ ( (This)->lpVtbl -> EnumModules(This,ppEnum) ) #define ICorProfilerInfo11_GetRuntimeInformation(This,pClrInstanceId,pRuntimeType,pMajorVersion,pMinorVersion,pBuildNumber,pQFEVersion,cchVersionString,pcchVersionString,szVersionString) \ ( (This)->lpVtbl -> GetRuntimeInformation(This,pClrInstanceId,pRuntimeType,pMajorVersion,pMinorVersion,pBuildNumber,pQFEVersion,cchVersionString,pcchVersionString,szVersionString) ) #define ICorProfilerInfo11_GetThreadStaticAddress2(This,classId,fieldToken,appDomainId,threadId,ppAddress) \ ( (This)->lpVtbl -> GetThreadStaticAddress2(This,classId,fieldToken,appDomainId,threadId,ppAddress) ) #define ICorProfilerInfo11_GetAppDomainsContainingModule(This,moduleId,cAppDomainIds,pcAppDomainIds,appDomainIds) \ ( (This)->lpVtbl -> GetAppDomainsContainingModule(This,moduleId,cAppDomainIds,pcAppDomainIds,appDomainIds) ) #define ICorProfilerInfo11_GetModuleInfo2(This,moduleId,ppBaseLoadAddress,cchName,pcchName,szName,pAssemblyId,pdwModuleFlags) \ ( (This)->lpVtbl -> GetModuleInfo2(This,moduleId,ppBaseLoadAddress,cchName,pcchName,szName,pAssemblyId,pdwModuleFlags) ) #define ICorProfilerInfo11_EnumThreads(This,ppEnum) \ ( (This)->lpVtbl -> EnumThreads(This,ppEnum) ) #define ICorProfilerInfo11_InitializeCurrentThread(This) \ ( (This)->lpVtbl -> InitializeCurrentThread(This) ) #define ICorProfilerInfo11_RequestReJIT(This,cFunctions,moduleIds,methodIds) \ ( (This)->lpVtbl -> RequestReJIT(This,cFunctions,moduleIds,methodIds) ) #define ICorProfilerInfo11_RequestRevert(This,cFunctions,moduleIds,methodIds,status) \ ( (This)->lpVtbl -> RequestRevert(This,cFunctions,moduleIds,methodIds,status) ) #define ICorProfilerInfo11_GetCodeInfo3(This,functionID,reJitId,cCodeInfos,pcCodeInfos,codeInfos) \ ( (This)->lpVtbl -> GetCodeInfo3(This,functionID,reJitId,cCodeInfos,pcCodeInfos,codeInfos) ) #define ICorProfilerInfo11_GetFunctionFromIP2(This,ip,pFunctionId,pReJitId) \ ( (This)->lpVtbl -> GetFunctionFromIP2(This,ip,pFunctionId,pReJitId) ) #define ICorProfilerInfo11_GetReJITIDs(This,functionId,cReJitIds,pcReJitIds,reJitIds) \ ( (This)->lpVtbl -> GetReJITIDs(This,functionId,cReJitIds,pcReJitIds,reJitIds) ) #define ICorProfilerInfo11_GetILToNativeMapping2(This,functionId,reJitId,cMap,pcMap,map) \ ( (This)->lpVtbl -> GetILToNativeMapping2(This,functionId,reJitId,cMap,pcMap,map) ) #define ICorProfilerInfo11_EnumJITedFunctions2(This,ppEnum) \ ( (This)->lpVtbl -> EnumJITedFunctions2(This,ppEnum) ) #define ICorProfilerInfo11_GetObjectSize2(This,objectId,pcSize) \ ( (This)->lpVtbl -> GetObjectSize2(This,objectId,pcSize) ) #define ICorProfilerInfo11_GetEventMask2(This,pdwEventsLow,pdwEventsHigh) \ ( (This)->lpVtbl -> GetEventMask2(This,pdwEventsLow,pdwEventsHigh) ) #define ICorProfilerInfo11_SetEventMask2(This,dwEventsLow,dwEventsHigh) \ ( (This)->lpVtbl -> SetEventMask2(This,dwEventsLow,dwEventsHigh) ) #define ICorProfilerInfo11_EnumNgenModuleMethodsInliningThisMethod(This,inlinersModuleId,inlineeModuleId,inlineeMethodId,incompleteData,ppEnum) \ ( (This)->lpVtbl -> EnumNgenModuleMethodsInliningThisMethod(This,inlinersModuleId,inlineeModuleId,inlineeMethodId,incompleteData,ppEnum) ) #define ICorProfilerInfo11_ApplyMetaData(This,moduleId) \ ( (This)->lpVtbl -> ApplyMetaData(This,moduleId) ) #define ICorProfilerInfo11_GetInMemorySymbolsLength(This,moduleId,pCountSymbolBytes) \ ( (This)->lpVtbl -> GetInMemorySymbolsLength(This,moduleId,pCountSymbolBytes) ) #define ICorProfilerInfo11_ReadInMemorySymbols(This,moduleId,symbolsReadOffset,pSymbolBytes,countSymbolBytes,pCountSymbolBytesRead) \ ( (This)->lpVtbl -> ReadInMemorySymbols(This,moduleId,symbolsReadOffset,pSymbolBytes,countSymbolBytes,pCountSymbolBytesRead) ) #define ICorProfilerInfo11_IsFunctionDynamic(This,functionId,isDynamic) \ ( (This)->lpVtbl -> IsFunctionDynamic(This,functionId,isDynamic) ) #define ICorProfilerInfo11_GetFunctionFromIP3(This,ip,functionId,pReJitId) \ ( (This)->lpVtbl -> GetFunctionFromIP3(This,ip,functionId,pReJitId) ) #define ICorProfilerInfo11_GetDynamicFunctionInfo(This,functionId,moduleId,ppvSig,pbSig,cchName,pcchName,wszName) \ ( (This)->lpVtbl -> GetDynamicFunctionInfo(This,functionId,moduleId,ppvSig,pbSig,cchName,pcchName,wszName) ) #define ICorProfilerInfo11_GetNativeCodeStartAddresses(This,functionID,reJitId,cCodeStartAddresses,pcCodeStartAddresses,codeStartAddresses) \ ( (This)->lpVtbl -> GetNativeCodeStartAddresses(This,functionID,reJitId,cCodeStartAddresses,pcCodeStartAddresses,codeStartAddresses) ) #define ICorProfilerInfo11_GetILToNativeMapping3(This,pNativeCodeStartAddress,cMap,pcMap,map) \ ( (This)->lpVtbl -> GetILToNativeMapping3(This,pNativeCodeStartAddress,cMap,pcMap,map) ) #define ICorProfilerInfo11_GetCodeInfo4(This,pNativeCodeStartAddress,cCodeInfos,pcCodeInfos,codeInfos) \ ( (This)->lpVtbl -> GetCodeInfo4(This,pNativeCodeStartAddress,cCodeInfos,pcCodeInfos,codeInfos) ) #define ICorProfilerInfo11_EnumerateObjectReferences(This,objectId,callback,clientData) \ ( (This)->lpVtbl -> EnumerateObjectReferences(This,objectId,callback,clientData) ) #define ICorProfilerInfo11_IsFrozenObject(This,objectId,pbFrozen) \ ( (This)->lpVtbl -> IsFrozenObject(This,objectId,pbFrozen) ) #define ICorProfilerInfo11_GetLOHObjectSizeThreshold(This,pThreshold) \ ( (This)->lpVtbl -> GetLOHObjectSizeThreshold(This,pThreshold) ) #define ICorProfilerInfo11_RequestReJITWithInliners(This,dwRejitFlags,cFunctions,moduleIds,methodIds) \ ( (This)->lpVtbl -> RequestReJITWithInliners(This,dwRejitFlags,cFunctions,moduleIds,methodIds) ) #define ICorProfilerInfo11_SuspendRuntime(This) \ ( (This)->lpVtbl -> SuspendRuntime(This) ) #define ICorProfilerInfo11_ResumeRuntime(This) \ ( (This)->lpVtbl -> ResumeRuntime(This) ) #define ICorProfilerInfo11_GetEnvironmentVariable(This,szName,cchValue,pcchValue,szValue) \ ( (This)->lpVtbl -> GetEnvironmentVariable(This,szName,cchValue,pcchValue,szValue) ) #define ICorProfilerInfo11_SetEnvironmentVariable(This,szName,szValue) \ ( (This)->lpVtbl -> SetEnvironmentVariable(This,szName,szValue) ) #endif /* COBJMACROS */ #endif /* C style interface */ #endif /* __ICorProfilerInfo11_INTERFACE_DEFINED__ */ #ifndef __ICorProfilerInfo12_INTERFACE_DEFINED__ #define __ICorProfilerInfo12_INTERFACE_DEFINED__ /* interface ICorProfilerInfo12 */ /* [local][unique][uuid][object] */ EXTERN_C const IID IID_ICorProfilerInfo12; #if defined(__cplusplus) && !defined(CINTERFACE) MIDL_INTERFACE("27b24ccd-1cb1-47c5-96ee-98190dc30959") ICorProfilerInfo12 : public ICorProfilerInfo11 { public: virtual HRESULT STDMETHODCALLTYPE EventPipeStartSession( /* [in] */ UINT32 cProviderConfigs, /* [size_is][in] */ COR_PRF_EVENTPIPE_PROVIDER_CONFIG pProviderConfigs[ ], /* [in] */ BOOL requestRundown, /* [out] */ EVENTPIPE_SESSION *pSession) = 0; virtual HRESULT STDMETHODCALLTYPE EventPipeAddProviderToSession( /* [in] */ EVENTPIPE_SESSION session, /* [in] */ COR_PRF_EVENTPIPE_PROVIDER_CONFIG providerConfig) = 0; virtual HRESULT STDMETHODCALLTYPE EventPipeStopSession( /* [in] */ EVENTPIPE_SESSION session) = 0; virtual HRESULT STDMETHODCALLTYPE EventPipeCreateProvider( /* [string][in] */ const WCHAR *providerName, /* [out] */ EVENTPIPE_PROVIDER *pProvider) = 0; virtual HRESULT STDMETHODCALLTYPE EventPipeGetProviderInfo( /* [in] */ EVENTPIPE_PROVIDER provider, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [annotation][out] */ _Out_writes_to_(cchName, *pcchName) WCHAR providerName[ ]) = 0; virtual HRESULT STDMETHODCALLTYPE EventPipeDefineEvent( /* [in] */ EVENTPIPE_PROVIDER provider, /* [string][in] */ const WCHAR *eventName, /* [in] */ UINT32 eventID, /* [in] */ UINT64 keywords, /* [in] */ UINT32 eventVersion, /* [in] */ UINT32 level, /* [in] */ UINT8 opcode, /* [in] */ BOOL needStack, /* [in] */ UINT32 cParamDescs, /* [size_is][in] */ COR_PRF_EVENTPIPE_PARAM_DESC pParamDescs[ ], /* [out] */ EVENTPIPE_EVENT *pEvent) = 0; virtual HRESULT STDMETHODCALLTYPE EventPipeWriteEvent( /* [in] */ EVENTPIPE_EVENT event, /* [in] */ UINT32 cData, /* [size_is][in] */ COR_PRF_EVENT_DATA data[ ], /* [in] */ LPCGUID pActivityId, /* [in] */ LPCGUID pRelatedActivityId) = 0; }; #else /* C style interface */ typedef struct ICorProfilerInfo12Vtbl { BEGIN_INTERFACE HRESULT ( STDMETHODCALLTYPE *QueryInterface )( ICorProfilerInfo12 * This, /* [in] */ REFIID riid, /* [annotation][iid_is][out] */ _COM_Outptr_ void **ppvObject); ULONG ( STDMETHODCALLTYPE *AddRef )( ICorProfilerInfo12 * This); ULONG ( STDMETHODCALLTYPE *Release )( ICorProfilerInfo12 * This); HRESULT ( STDMETHODCALLTYPE *GetClassFromObject )( ICorProfilerInfo12 * This, /* [in] */ ObjectID objectId, /* [out] */ ClassID *pClassId); HRESULT ( STDMETHODCALLTYPE *GetClassFromToken )( ICorProfilerInfo12 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdTypeDef typeDef, /* [out] */ ClassID *pClassId); HRESULT ( STDMETHODCALLTYPE *GetCodeInfo )( ICorProfilerInfo12 * This, /* [in] */ FunctionID functionId, /* [out] */ LPCBYTE *pStart, /* [out] */ ULONG *pcSize); HRESULT ( STDMETHODCALLTYPE *GetEventMask )( ICorProfilerInfo12 * This, /* [out] */ DWORD *pdwEvents); HRESULT ( STDMETHODCALLTYPE *GetFunctionFromIP )( ICorProfilerInfo12 * This, /* [in] */ LPCBYTE ip, /* [out] */ FunctionID *pFunctionId); HRESULT ( STDMETHODCALLTYPE *GetFunctionFromToken )( ICorProfilerInfo12 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdToken token, /* [out] */ FunctionID *pFunctionId); HRESULT ( STDMETHODCALLTYPE *GetHandleFromThread )( ICorProfilerInfo12 * This, /* [in] */ ThreadID threadId, /* [out] */ HANDLE *phThread); HRESULT ( STDMETHODCALLTYPE *GetObjectSize )( ICorProfilerInfo12 * This, /* [in] */ ObjectID objectId, /* [out] */ ULONG *pcSize); HRESULT ( STDMETHODCALLTYPE *IsArrayClass )( ICorProfilerInfo12 * This, /* [in] */ ClassID classId, /* [out] */ CorElementType *pBaseElemType, /* [out] */ ClassID *pBaseClassId, /* [out] */ ULONG *pcRank); HRESULT ( STDMETHODCALLTYPE *GetThreadInfo )( ICorProfilerInfo12 * This, /* [in] */ ThreadID threadId, /* [out] */ DWORD *pdwWin32ThreadId); HRESULT ( STDMETHODCALLTYPE *GetCurrentThreadID )( ICorProfilerInfo12 * This, /* [out] */ ThreadID *pThreadId); HRESULT ( STDMETHODCALLTYPE *GetClassIDInfo )( ICorProfilerInfo12 * This, /* [in] */ ClassID classId, /* [out] */ ModuleID *pModuleId, /* [out] */ mdTypeDef *pTypeDefToken); HRESULT ( STDMETHODCALLTYPE *GetFunctionInfo )( ICorProfilerInfo12 * This, /* [in] */ FunctionID functionId, /* [out] */ ClassID *pClassId, /* [out] */ ModuleID *pModuleId, /* [out] */ mdToken *pToken); HRESULT ( STDMETHODCALLTYPE *SetEventMask )( ICorProfilerInfo12 * This, /* [in] */ DWORD dwEvents); HRESULT ( STDMETHODCALLTYPE *SetEnterLeaveFunctionHooks )( ICorProfilerInfo12 * This, /* [in] */ FunctionEnter *pFuncEnter, /* [in] */ FunctionLeave *pFuncLeave, /* [in] */ FunctionTailcall *pFuncTailcall); HRESULT ( STDMETHODCALLTYPE *SetFunctionIDMapper )( ICorProfilerInfo12 * This, /* [in] */ FunctionIDMapper *pFunc); HRESULT ( STDMETHODCALLTYPE *GetTokenAndMetaDataFromFunction )( ICorProfilerInfo12 * This, /* [in] */ FunctionID functionId, /* [in] */ REFIID riid, /* [out] */ IUnknown **ppImport, /* [out] */ mdToken *pToken); HRESULT ( STDMETHODCALLTYPE *GetModuleInfo )( ICorProfilerInfo12 * This, /* [in] */ ModuleID moduleId, /* [out] */ LPCBYTE *ppBaseLoadAddress, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [annotation][out] */ _Out_writes_to_(cchName, *pcchName) WCHAR szName[ ], /* [out] */ AssemblyID *pAssemblyId); HRESULT ( STDMETHODCALLTYPE *GetModuleMetaData )( ICorProfilerInfo12 * This, /* [in] */ ModuleID moduleId, /* [in] */ DWORD dwOpenFlags, /* [in] */ REFIID riid, /* [out] */ IUnknown **ppOut); HRESULT ( STDMETHODCALLTYPE *GetILFunctionBody )( ICorProfilerInfo12 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdMethodDef methodId, /* [out] */ LPCBYTE *ppMethodHeader, /* [out] */ ULONG *pcbMethodSize); HRESULT ( STDMETHODCALLTYPE *GetILFunctionBodyAllocator )( ICorProfilerInfo12 * This, /* [in] */ ModuleID moduleId, /* [out] */ IMethodMalloc **ppMalloc); HRESULT ( STDMETHODCALLTYPE *SetILFunctionBody )( ICorProfilerInfo12 * This, /* [in] */ ModuleID moduleId, /* [in] */ mdMethodDef methodid, /* [in] */ LPCBYTE pbNewILMethodHeader); HRESULT ( STDMETHODCALLTYPE *GetAppDomainInfo )( ICorProfilerInfo12 * This, /* [in] */ AppDomainID appDomainId, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [annotation][out] */ _Out_writes_to_(cchName, *pcchName) WCHAR szName[ ], /* [out] */ ProcessID *pProcessId); HRESULT ( STDMETHODCALLTYPE *GetAssemblyInfo )( ICorProfilerInfo12 * This, /* [in] */ AssemblyID assemblyId, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [annotation][out] */ _Out_writes_to_(cchName, *pcchName) WCHAR szName[ ], /* [out] */ AppDomainID *pAppDomainId, /* [out] */ ModuleID *pModuleId); HRESULT ( STDMETHODCALLTYPE *SetFunctionReJIT )( ICorProfilerInfo12 * This, /* [in] */ FunctionID functionId); HRESULT ( STDMETHODCALLTYPE *ForceGC )( ICorProfilerInfo12 * This); HRESULT ( STDMETHODCALLTYPE *SetILInstrumentedCodeMap )( ICorProfilerInfo12 * This, /* [in] */ FunctionID functionId, /* [in] */ BOOL fStartJit, /* [in] */ ULONG cILMapEntries, /* [size_is][in] */ COR_IL_MAP rgILMapEntries[ ]); HRESULT ( STDMETHODCALLTYPE *GetInprocInspectionInterface )( ICorProfilerInfo12 * This, /* [out] */ IUnknown **ppicd); HRESULT ( STDMETHODCALLTYPE *GetInprocInspectionIThisThread )( ICorProfilerInfo12 * This, /* [out] */ IUnknown **ppicd); HRESULT ( STDMETHODCALLTYPE *GetThreadContext )( ICorProfilerInfo12 * This, /* [in] */ ThreadID threadId, /* [out] */ ContextID *pContextId); HRESULT ( STDMETHODCALLTYPE *BeginInprocDebugging )( ICorProfilerInfo12 * This, /* [in] */ BOOL fThisThreadOnly, /* [out] */ DWORD *pdwProfilerContext); HRESULT ( STDMETHODCALLTYPE *EndInprocDebugging )( ICorProfilerInfo12 * This, /* [in] */ DWORD dwProfilerContext); HRESULT ( STDMETHODCALLTYPE *GetILToNativeMapping )( ICorProfilerInfo12 * This, /* [in] */ FunctionID functionId, /* [in] */ ULONG32 cMap, /* [out] */ ULONG32 *pcMap, /* [length_is][size_is][out] */ COR_DEBUG_IL_TO_NATIVE_MAP map[ ]); HRESULT ( STDMETHODCALLTYPE *DoStackSnapshot )( ICorProfilerInfo12 * This, /* [in] */ ThreadID thread, /* [in] */ StackSnapshotCallback *callback, /* [in] */ ULONG32 infoFlags, /* [in] */ void *clientData, /* [size_is][in] */ BYTE context[ ], /* [in] */ ULONG32 contextSize); HRESULT ( STDMETHODCALLTYPE *SetEnterLeaveFunctionHooks2 )( ICorProfilerInfo12 * This, /* [in] */ FunctionEnter2 *pFuncEnter, /* [in] */ FunctionLeave2 *pFuncLeave, /* [in] */ FunctionTailcall2 *pFuncTailcall); HRESULT ( STDMETHODCALLTYPE *GetFunctionInfo2 )( ICorProfilerInfo12 * This, /* [in] */ FunctionID funcId, /* [in] */ COR_PRF_FRAME_INFO frameInfo, /* [out] */ ClassID *pClassId, /* [out] */ ModuleID *pModuleId, /* [out] */ mdToken *pToken, /* [in] */ ULONG32 cTypeArgs, /* [out] */ ULONG32 *pcTypeArgs, /* [out] */ ClassID typeArgs[ ]); HRESULT ( STDMETHODCALLTYPE *GetStringLayout )( ICorProfilerInfo12 * This, /* [out] */ ULONG *pBufferLengthOffset, /* [out] */ ULONG *pStringLengthOffset, /* [out] */ ULONG *pBufferOffset); HRESULT ( STDMETHODCALLTYPE *GetClassLayout )( ICorProfilerInfo12 * This, /* [in] */ ClassID classID, /* [out][in] */ COR_FIELD_OFFSET rFieldOffset[ ], /* [in] */ ULONG cFieldOffset, /* [out] */ ULONG *pcFieldOffset, /* [out] */ ULONG *pulClassSize); HRESULT ( STDMETHODCALLTYPE *GetClassIDInfo2 )( ICorProfilerInfo12 * This, /* [in] */ ClassID classId, /* [out] */ ModuleID *pModuleId, /* [out] */ mdTypeDef *pTypeDefToken, /* [out] */ ClassID *pParentClassId, /* [in] */ ULONG32 cNumTypeArgs, /* [out] */ ULONG32 *pcNumTypeArgs, /* [out] */ ClassID typeArgs[ ]); HRESULT ( STDMETHODCALLTYPE *GetCodeInfo2 )( ICorProfilerInfo12 * This, /* [in] */ FunctionID functionID, /* [in] */ ULONG32 cCodeInfos, /* [out] */ ULONG32 *pcCodeInfos, /* [length_is][size_is][out] */ COR_PRF_CODE_INFO codeInfos[ ]); HRESULT ( STDMETHODCALLTYPE *GetClassFromTokenAndTypeArgs )( ICorProfilerInfo12 * This, /* [in] */ ModuleID moduleID, /* [in] */ mdTypeDef typeDef, /* [in] */ ULONG32 cTypeArgs, /* [size_is][in] */ ClassID typeArgs[ ], /* [out] */ ClassID *pClassID); HRESULT ( STDMETHODCALLTYPE *GetFunctionFromTokenAndTypeArgs )( ICorProfilerInfo12 * This, /* [in] */ ModuleID moduleID, /* [in] */ mdMethodDef funcDef, /* [in] */ ClassID classId, /* [in] */ ULONG32 cTypeArgs, /* [size_is][in] */ ClassID typeArgs[ ], /* [out] */ FunctionID *pFunctionID); HRESULT ( STDMETHODCALLTYPE *EnumModuleFrozenObjects )( ICorProfilerInfo12 * This, /* [in] */ ModuleID moduleID, /* [out] */ ICorProfilerObjectEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *GetArrayObjectInfo )( ICorProfilerInfo12 * This, /* [in] */ ObjectID objectId, /* [in] */ ULONG32 cDimensions, /* [size_is][out] */ ULONG32 pDimensionSizes[ ], /* [size_is][out] */ int pDimensionLowerBounds[ ], /* [out] */ BYTE **ppData); HRESULT ( STDMETHODCALLTYPE *GetBoxClassLayout )( ICorProfilerInfo12 * This, /* [in] */ ClassID classId, /* [out] */ ULONG32 *pBufferOffset); HRESULT ( STDMETHODCALLTYPE *GetThreadAppDomain )( ICorProfilerInfo12 * This, /* [in] */ ThreadID threadId, /* [out] */ AppDomainID *pAppDomainId); HRESULT ( STDMETHODCALLTYPE *GetRVAStaticAddress )( ICorProfilerInfo12 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [out] */ void **ppAddress); HRESULT ( STDMETHODCALLTYPE *GetAppDomainStaticAddress )( ICorProfilerInfo12 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [in] */ AppDomainID appDomainId, /* [out] */ void **ppAddress); HRESULT ( STDMETHODCALLTYPE *GetThreadStaticAddress )( ICorProfilerInfo12 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [in] */ ThreadID threadId, /* [out] */ void **ppAddress); HRESULT ( STDMETHODCALLTYPE *GetContextStaticAddress )( ICorProfilerInfo12 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [in] */ ContextID contextId, /* [out] */ void **ppAddress); HRESULT ( STDMETHODCALLTYPE *GetStaticFieldInfo )( ICorProfilerInfo12 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [out] */ COR_PRF_STATIC_TYPE *pFieldInfo); HRESULT ( STDMETHODCALLTYPE *GetGenerationBounds )( ICorProfilerInfo12 * This, /* [in] */ ULONG cObjectRanges, /* [out] */ ULONG *pcObjectRanges, /* [length_is][size_is][out] */ COR_PRF_GC_GENERATION_RANGE ranges[ ]); HRESULT ( STDMETHODCALLTYPE *GetObjectGeneration )( ICorProfilerInfo12 * This, /* [in] */ ObjectID objectId, /* [out] */ COR_PRF_GC_GENERATION_RANGE *range); HRESULT ( STDMETHODCALLTYPE *GetNotifiedExceptionClauseInfo )( ICorProfilerInfo12 * This, /* [out] */ COR_PRF_EX_CLAUSE_INFO *pinfo); HRESULT ( STDMETHODCALLTYPE *EnumJITedFunctions )( ICorProfilerInfo12 * This, /* [out] */ ICorProfilerFunctionEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *RequestProfilerDetach )( ICorProfilerInfo12 * This, /* [in] */ DWORD dwExpectedCompletionMilliseconds); HRESULT ( STDMETHODCALLTYPE *SetFunctionIDMapper2 )( ICorProfilerInfo12 * This, /* [in] */ FunctionIDMapper2 *pFunc, /* [in] */ void *clientData); HRESULT ( STDMETHODCALLTYPE *GetStringLayout2 )( ICorProfilerInfo12 * This, /* [out] */ ULONG *pStringLengthOffset, /* [out] */ ULONG *pBufferOffset); HRESULT ( STDMETHODCALLTYPE *SetEnterLeaveFunctionHooks3 )( ICorProfilerInfo12 * This, /* [in] */ FunctionEnter3 *pFuncEnter3, /* [in] */ FunctionLeave3 *pFuncLeave3, /* [in] */ FunctionTailcall3 *pFuncTailcall3); HRESULT ( STDMETHODCALLTYPE *SetEnterLeaveFunctionHooks3WithInfo )( ICorProfilerInfo12 * This, /* [in] */ FunctionEnter3WithInfo *pFuncEnter3WithInfo, /* [in] */ FunctionLeave3WithInfo *pFuncLeave3WithInfo, /* [in] */ FunctionTailcall3WithInfo *pFuncTailcall3WithInfo); HRESULT ( STDMETHODCALLTYPE *GetFunctionEnter3Info )( ICorProfilerInfo12 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_ELT_INFO eltInfo, /* [out] */ COR_PRF_FRAME_INFO *pFrameInfo, /* [out][in] */ ULONG *pcbArgumentInfo, /* [size_is][out] */ COR_PRF_FUNCTION_ARGUMENT_INFO *pArgumentInfo); HRESULT ( STDMETHODCALLTYPE *GetFunctionLeave3Info )( ICorProfilerInfo12 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_ELT_INFO eltInfo, /* [out] */ COR_PRF_FRAME_INFO *pFrameInfo, /* [out] */ COR_PRF_FUNCTION_ARGUMENT_RANGE *pRetvalRange); HRESULT ( STDMETHODCALLTYPE *GetFunctionTailcall3Info )( ICorProfilerInfo12 * This, /* [in] */ FunctionID functionId, /* [in] */ COR_PRF_ELT_INFO eltInfo, /* [out] */ COR_PRF_FRAME_INFO *pFrameInfo); HRESULT ( STDMETHODCALLTYPE *EnumModules )( ICorProfilerInfo12 * This, /* [out] */ ICorProfilerModuleEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *GetRuntimeInformation )( ICorProfilerInfo12 * This, /* [out] */ USHORT *pClrInstanceId, /* [out] */ COR_PRF_RUNTIME_TYPE *pRuntimeType, /* [out] */ USHORT *pMajorVersion, /* [out] */ USHORT *pMinorVersion, /* [out] */ USHORT *pBuildNumber, /* [out] */ USHORT *pQFEVersion, /* [in] */ ULONG cchVersionString, /* [out] */ ULONG *pcchVersionString, /* [annotation][out] */ _Out_writes_to_(cchVersionString, *pcchVersionString) WCHAR szVersionString[ ]); HRESULT ( STDMETHODCALLTYPE *GetThreadStaticAddress2 )( ICorProfilerInfo12 * This, /* [in] */ ClassID classId, /* [in] */ mdFieldDef fieldToken, /* [in] */ AppDomainID appDomainId, /* [in] */ ThreadID threadId, /* [out] */ void **ppAddress); HRESULT ( STDMETHODCALLTYPE *GetAppDomainsContainingModule )( ICorProfilerInfo12 * This, /* [in] */ ModuleID moduleId, /* [in] */ ULONG32 cAppDomainIds, /* [out] */ ULONG32 *pcAppDomainIds, /* [length_is][size_is][out] */ AppDomainID appDomainIds[ ]); HRESULT ( STDMETHODCALLTYPE *GetModuleInfo2 )( ICorProfilerInfo12 * This, /* [in] */ ModuleID moduleId, /* [out] */ LPCBYTE *ppBaseLoadAddress, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [annotation][out] */ _Out_writes_to_(cchName, *pcchName) WCHAR szName[ ], /* [out] */ AssemblyID *pAssemblyId, /* [out] */ DWORD *pdwModuleFlags); HRESULT ( STDMETHODCALLTYPE *EnumThreads )( ICorProfilerInfo12 * This, /* [out] */ ICorProfilerThreadEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *InitializeCurrentThread )( ICorProfilerInfo12 * This); HRESULT ( STDMETHODCALLTYPE *RequestReJIT )( ICorProfilerInfo12 * This, /* [in] */ ULONG cFunctions, /* [size_is][in] */ ModuleID moduleIds[ ], /* [size_is][in] */ mdMethodDef methodIds[ ]); HRESULT ( STDMETHODCALLTYPE *RequestRevert )( ICorProfilerInfo12 * This, /* [in] */ ULONG cFunctions, /* [size_is][in] */ ModuleID moduleIds[ ], /* [size_is][in] */ mdMethodDef methodIds[ ], /* [size_is][out] */ HRESULT status[ ]); HRESULT ( STDMETHODCALLTYPE *GetCodeInfo3 )( ICorProfilerInfo12 * This, /* [in] */ FunctionID functionID, /* [in] */ ReJITID reJitId, /* [in] */ ULONG32 cCodeInfos, /* [out] */ ULONG32 *pcCodeInfos, /* [length_is][size_is][out] */ COR_PRF_CODE_INFO codeInfos[ ]); HRESULT ( STDMETHODCALLTYPE *GetFunctionFromIP2 )( ICorProfilerInfo12 * This, /* [in] */ LPCBYTE ip, /* [out] */ FunctionID *pFunctionId, /* [out] */ ReJITID *pReJitId); HRESULT ( STDMETHODCALLTYPE *GetReJITIDs )( ICorProfilerInfo12 * This, /* [in] */ FunctionID functionId, /* [in] */ ULONG cReJitIds, /* [out] */ ULONG *pcReJitIds, /* [length_is][size_is][out] */ ReJITID reJitIds[ ]); HRESULT ( STDMETHODCALLTYPE *GetILToNativeMapping2 )( ICorProfilerInfo12 * This, /* [in] */ FunctionID functionId, /* [in] */ ReJITID reJitId, /* [in] */ ULONG32 cMap, /* [out] */ ULONG32 *pcMap, /* [length_is][size_is][out] */ COR_DEBUG_IL_TO_NATIVE_MAP map[ ]); HRESULT ( STDMETHODCALLTYPE *EnumJITedFunctions2 )( ICorProfilerInfo12 * This, /* [out] */ ICorProfilerFunctionEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *GetObjectSize2 )( ICorProfilerInfo12 * This, /* [in] */ ObjectID objectId, /* [out] */ SIZE_T *pcSize); HRESULT ( STDMETHODCALLTYPE *GetEventMask2 )( ICorProfilerInfo12 * This, /* [out] */ DWORD *pdwEventsLow, /* [out] */ DWORD *pdwEventsHigh); HRESULT ( STDMETHODCALLTYPE *SetEventMask2 )( ICorProfilerInfo12 * This, /* [in] */ DWORD dwEventsLow, /* [in] */ DWORD dwEventsHigh); HRESULT ( STDMETHODCALLTYPE *EnumNgenModuleMethodsInliningThisMethod )( ICorProfilerInfo12 * This, /* [in] */ ModuleID inlinersModuleId, /* [in] */ ModuleID inlineeModuleId, /* [in] */ mdMethodDef inlineeMethodId, /* [out] */ BOOL *incompleteData, /* [out] */ ICorProfilerMethodEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *ApplyMetaData )( ICorProfilerInfo12 * This, /* [in] */ ModuleID moduleId); HRESULT ( STDMETHODCALLTYPE *GetInMemorySymbolsLength )( ICorProfilerInfo12 * This, /* [in] */ ModuleID moduleId, /* [out] */ DWORD *pCountSymbolBytes); HRESULT ( STDMETHODCALLTYPE *ReadInMemorySymbols )( ICorProfilerInfo12 * This, /* [in] */ ModuleID moduleId, /* [in] */ DWORD symbolsReadOffset, /* [out] */ BYTE *pSymbolBytes, /* [in] */ DWORD countSymbolBytes, /* [out] */ DWORD *pCountSymbolBytesRead); HRESULT ( STDMETHODCALLTYPE *IsFunctionDynamic )( ICorProfilerInfo12 * This, /* [in] */ FunctionID functionId, /* [out] */ BOOL *isDynamic); HRESULT ( STDMETHODCALLTYPE *GetFunctionFromIP3 )( ICorProfilerInfo12 * This, /* [in] */ LPCBYTE ip, /* [out] */ FunctionID *functionId, /* [out] */ ReJITID *pReJitId); HRESULT ( STDMETHODCALLTYPE *GetDynamicFunctionInfo )( ICorProfilerInfo12 * This, /* [in] */ FunctionID functionId, /* [out] */ ModuleID *moduleId, /* [out] */ PCCOR_SIGNATURE *ppvSig, /* [out] */ ULONG *pbSig, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [out] */ WCHAR wszName[ ]); HRESULT ( STDMETHODCALLTYPE *GetNativeCodeStartAddresses )( ICorProfilerInfo12 * This, FunctionID functionID, ReJITID reJitId, ULONG32 cCodeStartAddresses, ULONG32 *pcCodeStartAddresses, UINT_PTR codeStartAddresses[ ]); HRESULT ( STDMETHODCALLTYPE *GetILToNativeMapping3 )( ICorProfilerInfo12 * This, UINT_PTR pNativeCodeStartAddress, ULONG32 cMap, ULONG32 *pcMap, COR_DEBUG_IL_TO_NATIVE_MAP map[ ]); HRESULT ( STDMETHODCALLTYPE *GetCodeInfo4 )( ICorProfilerInfo12 * This, UINT_PTR pNativeCodeStartAddress, ULONG32 cCodeInfos, ULONG32 *pcCodeInfos, COR_PRF_CODE_INFO codeInfos[ ]); HRESULT ( STDMETHODCALLTYPE *EnumerateObjectReferences )( ICorProfilerInfo12 * This, ObjectID objectId, ObjectReferenceCallback callback, void *clientData); HRESULT ( STDMETHODCALLTYPE *IsFrozenObject )( ICorProfilerInfo12 * This, ObjectID objectId, BOOL *pbFrozen); HRESULT ( STDMETHODCALLTYPE *GetLOHObjectSizeThreshold )( ICorProfilerInfo12 * This, DWORD *pThreshold); HRESULT ( STDMETHODCALLTYPE *RequestReJITWithInliners )( ICorProfilerInfo12 * This, /* [in] */ DWORD dwRejitFlags, /* [in] */ ULONG cFunctions, /* [size_is][in] */ ModuleID moduleIds[ ], /* [size_is][in] */ mdMethodDef methodIds[ ]); HRESULT ( STDMETHODCALLTYPE *SuspendRuntime )( ICorProfilerInfo12 * This); HRESULT ( STDMETHODCALLTYPE *ResumeRuntime )( ICorProfilerInfo12 * This); HRESULT ( STDMETHODCALLTYPE *GetEnvironmentVariable )( ICorProfilerInfo12 * This, /* [string][in] */ const WCHAR *szName, /* [in] */ ULONG cchValue, /* [out] */ ULONG *pcchValue, /* [annotation][out] */ _Out_writes_to_(cchValue, *pcchValue) WCHAR szValue[ ]); HRESULT ( STDMETHODCALLTYPE *SetEnvironmentVariable )( ICorProfilerInfo12 * This, /* [string][in] */ const WCHAR *szName, /* [string][in] */ const WCHAR *szValue); HRESULT ( STDMETHODCALLTYPE *EventPipeStartSession )( ICorProfilerInfo12 * This, /* [in] */ UINT32 cProviderConfigs, /* [size_is][in] */ COR_PRF_EVENTPIPE_PROVIDER_CONFIG pProviderConfigs[ ], /* [in] */ BOOL requestRundown, /* [out] */ EVENTPIPE_SESSION *pSession); HRESULT ( STDMETHODCALLTYPE *EventPipeAddProviderToSession )( ICorProfilerInfo12 * This, /* [in] */ EVENTPIPE_SESSION session, /* [in] */ COR_PRF_EVENTPIPE_PROVIDER_CONFIG providerConfig); HRESULT ( STDMETHODCALLTYPE *EventPipeStopSession )( ICorProfilerInfo12 * This, /* [in] */ EVENTPIPE_SESSION session); HRESULT ( STDMETHODCALLTYPE *EventPipeCreateProvider )( ICorProfilerInfo12 * This, /* [string][in] */ const WCHAR *providerName, /* [out] */ EVENTPIPE_PROVIDER *pProvider); HRESULT ( STDMETHODCALLTYPE *EventPipeGetProviderInfo )( ICorProfilerInfo12 * This, /* [in] */ EVENTPIPE_PROVIDER provider, /* [in] */ ULONG cchName, /* [out] */ ULONG *pcchName, /* [annotation][out] */ _Out_writes_to_(cchName, *pcchName) WCHAR providerName[ ]); HRESULT ( STDMETHODCALLTYPE *EventPipeDefineEvent )( ICorProfilerInfo12 * This, /* [in] */ EVENTPIPE_PROVIDER provider, /* [string][in] */ const WCHAR *eventName, /* [in] */ UINT32 eventID, /* [in] */ UINT64 keywords, /* [in] */ UINT32 eventVersion, /* [in] */ UINT32 level, /* [in] */ UINT8 opcode, /* [in] */ BOOL needStack, /* [in] */ UINT32 cParamDescs, /* [size_is][in] */ COR_PRF_EVENTPIPE_PARAM_DESC pParamDescs[ ], /* [out] */ EVENTPIPE_EVENT *pEvent); HRESULT ( STDMETHODCALLTYPE *EventPipeWriteEvent )( ICorProfilerInfo12 * This, /* [in] */ EVENTPIPE_EVENT event, /* [in] */ UINT32 cData, /* [size_is][in] */ COR_PRF_EVENT_DATA data[ ], /* [in] */ LPCGUID pActivityId, /* [in] */ LPCGUID pRelatedActivityId); END_INTERFACE } ICorProfilerInfo12Vtbl; interface ICorProfilerInfo12 { CONST_VTBL struct ICorProfilerInfo12Vtbl *lpVtbl; }; #ifdef COBJMACROS #define ICorProfilerInfo12_QueryInterface(This,riid,ppvObject) \ ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) ) #define ICorProfilerInfo12_AddRef(This) \ ( (This)->lpVtbl -> AddRef(This) ) #define ICorProfilerInfo12_Release(This) \ ( (This)->lpVtbl -> Release(This) ) #define ICorProfilerInfo12_GetClassFromObject(This,objectId,pClassId) \ ( (This)->lpVtbl -> GetClassFromObject(This,objectId,pClassId) ) #define ICorProfilerInfo12_GetClassFromToken(This,moduleId,typeDef,pClassId) \ ( (This)->lpVtbl -> GetClassFromToken(This,moduleId,typeDef,pClassId) ) #define ICorProfilerInfo12_GetCodeInfo(This,functionId,pStart,pcSize) \ ( (This)->lpVtbl -> GetCodeInfo(This,functionId,pStart,pcSize) ) #define ICorProfilerInfo12_GetEventMask(This,pdwEvents) \ ( (This)->lpVtbl -> GetEventMask(This,pdwEvents) ) #define ICorProfilerInfo12_GetFunctionFromIP(This,ip,pFunctionId) \ ( (This)->lpVtbl -> GetFunctionFromIP(This,ip,pFunctionId) ) #define ICorProfilerInfo12_GetFunctionFromToken(This,moduleId,token,pFunctionId) \ ( (This)->lpVtbl -> GetFunctionFromToken(This,moduleId,token,pFunctionId) ) #define ICorProfilerInfo12_GetHandleFromThread(This,threadId,phThread) \ ( (This)->lpVtbl -> GetHandleFromThread(This,threadId,phThread) ) #define ICorProfilerInfo12_GetObjectSize(This,objectId,pcSize) \ ( (This)->lpVtbl -> GetObjectSize(This,objectId,pcSize) ) #define ICorProfilerInfo12_IsArrayClass(This,classId,pBaseElemType,pBaseClassId,pcRank) \ ( (This)->lpVtbl -> IsArrayClass(This,classId,pBaseElemType,pBaseClassId,pcRank) ) #define ICorProfilerInfo12_GetThreadInfo(This,threadId,pdwWin32ThreadId) \ ( (This)->lpVtbl -> GetThreadInfo(This,threadId,pdwWin32ThreadId) ) #define ICorProfilerInfo12_GetCurrentThreadID(This,pThreadId) \ ( (This)->lpVtbl -> GetCurrentThreadID(This,pThreadId) ) #define ICorProfilerInfo12_GetClassIDInfo(This,classId,pModuleId,pTypeDefToken) \ ( (This)->lpVtbl -> GetClassIDInfo(This,classId,pModuleId,pTypeDefToken) ) #define ICorProfilerInfo12_GetFunctionInfo(This,functionId,pClassId,pModuleId,pToken) \ ( (This)->lpVtbl -> GetFunctionInfo(This,functionId,pClassId,pModuleId,pToken) ) #define ICorProfilerInfo12_SetEventMask(This,dwEvents) \ ( (This)->lpVtbl -> SetEventMask(This,dwEvents) ) #define ICorProfilerInfo12_SetEnterLeaveFunctionHooks(This,pFuncEnter,pFuncLeave,pFuncTailcall) \ ( (This)->lpVtbl -> SetEnterLeaveFunctionHooks(This,pFuncEnter,pFuncLeave,pFuncTailcall) ) #define ICorProfilerInfo12_SetFunctionIDMapper(This,pFunc) \ ( (This)->lpVtbl -> SetFunctionIDMapper(This,pFunc) ) #define ICorProfilerInfo12_GetTokenAndMetaDataFromFunction(This,functionId,riid,ppImport,pToken) \ ( (This)->lpVtbl -> GetTokenAndMetaDataFromFunction(This,functionId,riid,ppImport,pToken) ) #define ICorProfilerInfo12_GetModuleInfo(This,moduleId,ppBaseLoadAddress,cchName,pcchName,szName,pAssemblyId) \ ( (This)->lpVtbl -> GetModuleInfo(This,moduleId,ppBaseLoadAddress,cchName,pcchName,szName,pAssemblyId) ) #define ICorProfilerInfo12_GetModuleMetaData(This,moduleId,dwOpenFlags,riid,ppOut) \ ( (This)->lpVtbl -> GetModuleMetaData(This,moduleId,dwOpenFlags,riid,ppOut) ) #define ICorProfilerInfo12_GetILFunctionBody(This,moduleId,methodId,ppMethodHeader,pcbMethodSize) \ ( (This)->lpVtbl -> GetILFunctionBody(This,moduleId,methodId,ppMethodHeader,pcbMethodSize) ) #define ICorProfilerInfo12_GetILFunctionBodyAllocator(This,moduleId,ppMalloc) \ ( (This)->lpVtbl -> GetILFunctionBodyAllocator(This,moduleId,ppMalloc) ) #define ICorProfilerInfo12_SetILFunctionBody(This,moduleId,methodid,pbNewILMethodHeader) \ ( (This)->lpVtbl -> SetILFunctionBody(This,moduleId,methodid,pbNewILMethodHeader) ) #define ICorProfilerInfo12_GetAppDomainInfo(This,appDomainId,cchName,pcchName,szName,pProcessId) \ ( (This)->lpVtbl -> GetAppDomainInfo(This,appDomainId,cchName,pcchName,szName,pProcessId) ) #define ICorProfilerInfo12_GetAssemblyInfo(This,assemblyId,cchName,pcchName,szName,pAppDomainId,pModuleId) \ ( (This)->lpVtbl -> GetAssemblyInfo(This,assemblyId,cchName,pcchName,szName,pAppDomainId,pModuleId) ) #define ICorProfilerInfo12_SetFunctionReJIT(This,functionId) \ ( (This)->lpVtbl -> SetFunctionReJIT(This,functionId) ) #define ICorProfilerInfo12_ForceGC(This) \ ( (This)->lpVtbl -> ForceGC(This) ) #define ICorProfilerInfo12_SetILInstrumentedCodeMap(This,functionId,fStartJit,cILMapEntries,rgILMapEntries) \ ( (This)->lpVtbl -> SetILInstrumentedCodeMap(This,functionId,fStartJit,cILMapEntries,rgILMapEntries) ) #define ICorProfilerInfo12_GetInprocInspectionInterface(This,ppicd) \ ( (This)->lpVtbl -> GetInprocInspectionInterface(This,ppicd) ) #define ICorProfilerInfo12_GetInprocInspectionIThisThread(This,ppicd) \ ( (This)->lpVtbl -> GetInprocInspectionIThisThread(This,ppicd) ) #define ICorProfilerInfo12_GetThreadContext(This,threadId,pContextId) \ ( (This)->lpVtbl -> GetThreadContext(This,threadId,pContextId) ) #define ICorProfilerInfo12_BeginInprocDebugging(This,fThisThreadOnly,pdwProfilerContext) \ ( (This)->lpVtbl -> BeginInprocDebugging(This,fThisThreadOnly,pdwProfilerContext) ) #define ICorProfilerInfo12_EndInprocDebugging(This,dwProfilerContext) \ ( (This)->lpVtbl -> EndInprocDebugging(This,dwProfilerContext) ) #define ICorProfilerInfo12_GetILToNativeMapping(This,functionId,cMap,pcMap,map) \ ( (This)->lpVtbl -> GetILToNativeMapping(This,functionId,cMap,pcMap,map) ) #define ICorProfilerInfo12_DoStackSnapshot(This,thread,callback,infoFlags,clientData,context,contextSize) \ ( (This)->lpVtbl -> DoStackSnapshot(This,thread,callback,infoFlags,clientData,context,contextSize) ) #define ICorProfilerInfo12_SetEnterLeaveFunctionHooks2(This,pFuncEnter,pFuncLeave,pFuncTailcall) \ ( (This)->lpVtbl -> SetEnterLeaveFunctionHooks2(This,pFuncEnter,pFuncLeave,pFuncTailcall) ) #define ICorProfilerInfo12_GetFunctionInfo2(This,funcId,frameInfo,pClassId,pModuleId,pToken,cTypeArgs,pcTypeArgs,typeArgs) \ ( (This)->lpVtbl -> GetFunctionInfo2(This,funcId,frameInfo,pClassId,pModuleId,pToken,cTypeArgs,pcTypeArgs,typeArgs) ) #define ICorProfilerInfo12_GetStringLayout(This,pBufferLengthOffset,pStringLengthOffset,pBufferOffset) \ ( (This)->lpVtbl -> GetStringLayout(This,pBufferLengthOffset,pStringLengthOffset,pBufferOffset) ) #define ICorProfilerInfo12_GetClassLayout(This,classID,rFieldOffset,cFieldOffset,pcFieldOffset,pulClassSize) \ ( (This)->lpVtbl -> GetClassLayout(This,classID,rFieldOffset,cFieldOffset,pcFieldOffset,pulClassSize) ) #define ICorProfilerInfo12_GetClassIDInfo2(This,classId,pModuleId,pTypeDefToken,pParentClassId,cNumTypeArgs,pcNumTypeArgs,typeArgs) \ ( (This)->lpVtbl -> GetClassIDInfo2(This,classId,pModuleId,pTypeDefToken,pParentClassId,cNumTypeArgs,pcNumTypeArgs,typeArgs) ) #define ICorProfilerInfo12_GetCodeInfo2(This,functionID,cCodeInfos,pcCodeInfos,codeInfos) \ ( (This)->lpVtbl -> GetCodeInfo2(This,functionID,cCodeInfos,pcCodeInfos,codeInfos) ) #define ICorProfilerInfo12_GetClassFromTokenAndTypeArgs(This,moduleID,typeDef,cTypeArgs,typeArgs,pClassID) \ ( (This)->lpVtbl -> GetClassFromTokenAndTypeArgs(This,moduleID,typeDef,cTypeArgs,typeArgs,pClassID) ) #define ICorProfilerInfo12_GetFunctionFromTokenAndTypeArgs(This,moduleID,funcDef,classId,cTypeArgs,typeArgs,pFunctionID) \ ( (This)->lpVtbl -> GetFunctionFromTokenAndTypeArgs(This,moduleID,funcDef,classId,cTypeArgs,typeArgs,pFunctionID) ) #define ICorProfilerInfo12_EnumModuleFrozenObjects(This,moduleID,ppEnum) \ ( (This)->lpVtbl -> EnumModuleFrozenObjects(This,moduleID,ppEnum) ) #define ICorProfilerInfo12_GetArrayObjectInfo(This,objectId,cDimensions,pDimensionSizes,pDimensionLowerBounds,ppData) \ ( (This)->lpVtbl -> GetArrayObjectInfo(This,objectId,cDimensions,pDimensionSizes,pDimensionLowerBounds,ppData) ) #define ICorProfilerInfo12_GetBoxClassLayout(This,classId,pBufferOffset) \ ( (This)->lpVtbl -> GetBoxClassLayout(This,classId,pBufferOffset) ) #define ICorProfilerInfo12_GetThreadAppDomain(This,threadId,pAppDomainId) \ ( (This)->lpVtbl -> GetThreadAppDomain(This,threadId,pAppDomainId) ) #define ICorProfilerInfo12_GetRVAStaticAddress(This,classId,fieldToken,ppAddress) \ ( (This)->lpVtbl -> GetRVAStaticAddress(This,classId,fieldToken,ppAddress) ) #define ICorProfilerInfo12_GetAppDomainStaticAddress(This,classId,fieldToken,appDomainId,ppAddress) \ ( (This)->lpVtbl -> GetAppDomainStaticAddress(This,classId,fieldToken,appDomainId,ppAddress) ) #define ICorProfilerInfo12_GetThreadStaticAddress(This,classId,fieldToken,threadId,ppAddress) \ ( (This)->lpVtbl -> GetThreadStaticAddress(This,classId,fieldToken,threadId,ppAddress) ) #define ICorProfilerInfo12_GetContextStaticAddress(This,classId,fieldToken,contextId,ppAddress) \ ( (This)->lpVtbl -> GetContextStaticAddress(This,classId,fieldToken,contextId,ppAddress) ) #define ICorProfilerInfo12_GetStaticFieldInfo(This,classId,fieldToken,pFieldInfo) \ ( (This)->lpVtbl -> GetStaticFieldInfo(This,classId,fieldToken,pFieldInfo) ) #define ICorProfilerInfo12_GetGenerationBounds(This,cObjectRanges,pcObjectRanges,ranges) \ ( (This)->lpVtbl -> GetGenerationBounds(This,cObjectRanges,pcObjectRanges,ranges) ) #define ICorProfilerInfo12_GetObjectGeneration(This,objectId,range) \ ( (This)->lpVtbl -> GetObjectGeneration(This,objectId,range) ) #define ICorProfilerInfo12_GetNotifiedExceptionClauseInfo(This,pinfo) \ ( (This)->lpVtbl -> GetNotifiedExceptionClauseInfo(This,pinfo) ) #define ICorProfilerInfo12_EnumJITedFunctions(This,ppEnum) \ ( (This)->lpVtbl -> EnumJITedFunctions(This,ppEnum) ) #define ICorProfilerInfo12_RequestProfilerDetach(This,dwExpectedCompletionMilliseconds) \ ( (This)->lpVtbl -> RequestProfilerDetach(This,dwExpectedCompletionMilliseconds) ) #define ICorProfilerInfo12_SetFunctionIDMapper2(This,pFunc,clientData) \ ( (This)->lpVtbl -> SetFunctionIDMapper2(This,pFunc,clientData) ) #define ICorProfilerInfo12_GetStringLayout2(This,pStringLengthOffset,pBufferOffset) \ ( (This)->lpVtbl -> GetStringLayout2(This,pStringLengthOffset,pBufferOffset) ) #define ICorProfilerInfo12_SetEnterLeaveFunctionHooks3(This,pFuncEnter3,pFuncLeave3,pFuncTailcall3) \ ( (This)->lpVtbl -> SetEnterLeaveFunctionHooks3(This,pFuncEnter3,pFuncLeave3,pFuncTailcall3) ) #define ICorProfilerInfo12_SetEnterLeaveFunctionHooks3WithInfo(This,pFuncEnter3WithInfo,pFuncLeave3WithInfo,pFuncTailcall3WithInfo) \ ( (This)->lpVtbl -> SetEnterLeaveFunctionHooks3WithInfo(This,pFuncEnter3WithInfo,pFuncLeave3WithInfo,pFuncTailcall3WithInfo) ) #define ICorProfilerInfo12_GetFunctionEnter3Info(This,functionId,eltInfo,pFrameInfo,pcbArgumentInfo,pArgumentInfo) \ ( (This)->lpVtbl -> GetFunctionEnter3Info(This,functionId,eltInfo,pFrameInfo,pcbArgumentInfo,pArgumentInfo) ) #define ICorProfilerInfo12_GetFunctionLeave3Info(This,functionId,eltInfo,pFrameInfo,pRetvalRange) \ ( (This)->lpVtbl -> GetFunctionLeave3Info(This,functionId,eltInfo,pFrameInfo,pRetvalRange) ) #define ICorProfilerInfo12_GetFunctionTailcall3Info(This,functionId,eltInfo,pFrameInfo) \ ( (This)->lpVtbl -> GetFunctionTailcall3Info(This,functionId,eltInfo,pFrameInfo) ) #define ICorProfilerInfo12_EnumModules(This,ppEnum) \ ( (This)->lpVtbl -> EnumModules(This,ppEnum) ) #define ICorProfilerInfo12_GetRuntimeInformation(This,pClrInstanceId,pRuntimeType,pMajorVersion,pMinorVersion,pBuildNumber,pQFEVersion,cchVersionString,pcchVersionString,szVersionString) \ ( (This)->lpVtbl -> GetRuntimeInformation(This,pClrInstanceId,pRuntimeType,pMajorVersion,pMinorVersion,pBuildNumber,pQFEVersion,cchVersionString,pcchVersionString,szVersionString) ) #define ICorProfilerInfo12_GetThreadStaticAddress2(This,classId,fieldToken,appDomainId,threadId,ppAddress) \ ( (This)->lpVtbl -> GetThreadStaticAddress2(This,classId,fieldToken,appDomainId,threadId,ppAddress) ) #define ICorProfilerInfo12_GetAppDomainsContainingModule(This,moduleId,cAppDomainIds,pcAppDomainIds,appDomainIds) \ ( (This)->lpVtbl -> GetAppDomainsContainingModule(This,moduleId,cAppDomainIds,pcAppDomainIds,appDomainIds) ) #define ICorProfilerInfo12_GetModuleInfo2(This,moduleId,ppBaseLoadAddress,cchName,pcchName,szName,pAssemblyId,pdwModuleFlags) \ ( (This)->lpVtbl -> GetModuleInfo2(This,moduleId,ppBaseLoadAddress,cchName,pcchName,szName,pAssemblyId,pdwModuleFlags) ) #define ICorProfilerInfo12_EnumThreads(This,ppEnum) \ ( (This)->lpVtbl -> EnumThreads(This,ppEnum) ) #define ICorProfilerInfo12_InitializeCurrentThread(This) \ ( (This)->lpVtbl -> InitializeCurrentThread(This) ) #define ICorProfilerInfo12_RequestReJIT(This,cFunctions,moduleIds,methodIds) \ ( (This)->lpVtbl -> RequestReJIT(This,cFunctions,moduleIds,methodIds) ) #define ICorProfilerInfo12_RequestRevert(This,cFunctions,moduleIds,methodIds,status) \ ( (This)->lpVtbl -> RequestRevert(This,cFunctions,moduleIds,methodIds,status) ) #define ICorProfilerInfo12_GetCodeInfo3(This,functionID,reJitId,cCodeInfos,pcCodeInfos,codeInfos) \ ( (This)->lpVtbl -> GetCodeInfo3(This,functionID,reJitId,cCodeInfos,pcCodeInfos,codeInfos) ) #define ICorProfilerInfo12_GetFunctionFromIP2(This,ip,pFunctionId,pReJitId) \ ( (This)->lpVtbl -> GetFunctionFromIP2(This,ip,pFunctionId,pReJitId) ) #define ICorProfilerInfo12_GetReJITIDs(This,functionId,cReJitIds,pcReJitIds,reJitIds) \ ( (This)->lpVtbl -> GetReJITIDs(This,functionId,cReJitIds,pcReJitIds,reJitIds) ) #define ICorProfilerInfo12_GetILToNativeMapping2(This,functionId,reJitId,cMap,pcMap,map) \ ( (This)->lpVtbl -> GetILToNativeMapping2(This,functionId,reJitId,cMap,pcMap,map) ) #define ICorProfilerInfo12_EnumJITedFunctions2(This,ppEnum) \ ( (This)->lpVtbl -> EnumJITedFunctions2(This,ppEnum) ) #define ICorProfilerInfo12_GetObjectSize2(This,objectId,pcSize) \ ( (This)->lpVtbl -> GetObjectSize2(This,objectId,pcSize) ) #define ICorProfilerInfo12_GetEventMask2(This,pdwEventsLow,pdwEventsHigh) \ ( (This)->lpVtbl -> GetEventMask2(This,pdwEventsLow,pdwEventsHigh) ) #define ICorProfilerInfo12_SetEventMask2(This,dwEventsLow,dwEventsHigh) \ ( (This)->lpVtbl -> SetEventMask2(This,dwEventsLow,dwEventsHigh) ) #define ICorProfilerInfo12_EnumNgenModuleMethodsInliningThisMethod(This,inlinersModuleId,inlineeModuleId,inlineeMethodId,incompleteData,ppEnum) \ ( (This)->lpVtbl -> EnumNgenModuleMethodsInliningThisMethod(This,inlinersModuleId,inlineeModuleId,inlineeMethodId,incompleteData,ppEnum) ) #define ICorProfilerInfo12_ApplyMetaData(This,moduleId) \ ( (This)->lpVtbl -> ApplyMetaData(This,moduleId) ) #define ICorProfilerInfo12_GetInMemorySymbolsLength(This,moduleId,pCountSymbolBytes) \ ( (This)->lpVtbl -> GetInMemorySymbolsLength(This,moduleId,pCountSymbolBytes) ) #define ICorProfilerInfo12_ReadInMemorySymbols(This,moduleId,symbolsReadOffset,pSymbolBytes,countSymbolBytes,pCountSymbolBytesRead) \ ( (This)->lpVtbl -> ReadInMemorySymbols(This,moduleId,symbolsReadOffset,pSymbolBytes,countSymbolBytes,pCountSymbolBytesRead) ) #define ICorProfilerInfo12_IsFunctionDynamic(This,functionId,isDynamic) \ ( (This)->lpVtbl -> IsFunctionDynamic(This,functionId,isDynamic) ) #define ICorProfilerInfo12_GetFunctionFromIP3(This,ip,functionId,pReJitId) \ ( (This)->lpVtbl -> GetFunctionFromIP3(This,ip,functionId,pReJitId) ) #define ICorProfilerInfo12_GetDynamicFunctionInfo(This,functionId,moduleId,ppvSig,pbSig,cchName,pcchName,wszName) \ ( (This)->lpVtbl -> GetDynamicFunctionInfo(This,functionId,moduleId,ppvSig,pbSig,cchName,pcchName,wszName) ) #define ICorProfilerInfo12_GetNativeCodeStartAddresses(This,functionID,reJitId,cCodeStartAddresses,pcCodeStartAddresses,codeStartAddresses) \ ( (This)->lpVtbl -> GetNativeCodeStartAddresses(This,functionID,reJitId,cCodeStartAddresses,pcCodeStartAddresses,codeStartAddresses) ) #define ICorProfilerInfo12_GetILToNativeMapping3(This,pNativeCodeStartAddress,cMap,pcMap,map) \ ( (This)->lpVtbl -> GetILToNativeMapping3(This,pNativeCodeStartAddress,cMap,pcMap,map) ) #define ICorProfilerInfo12_GetCodeInfo4(This,pNativeCodeStartAddress,cCodeInfos,pcCodeInfos,codeInfos) \ ( (This)->lpVtbl -> GetCodeInfo4(This,pNativeCodeStartAddress,cCodeInfos,pcCodeInfos,codeInfos) ) #define ICorProfilerInfo12_EnumerateObjectReferences(This,objectId,callback,clientData) \ ( (This)->lpVtbl -> EnumerateObjectReferences(This,objectId,callback,clientData) ) #define ICorProfilerInfo12_IsFrozenObject(This,objectId,pbFrozen) \ ( (This)->lpVtbl -> IsFrozenObject(This,objectId,pbFrozen) ) #define ICorProfilerInfo12_GetLOHObjectSizeThreshold(This,pThreshold) \ ( (This)->lpVtbl -> GetLOHObjectSizeThreshold(This,pThreshold) ) #define ICorProfilerInfo12_RequestReJITWithInliners(This,dwRejitFlags,cFunctions,moduleIds,methodIds) \ ( (This)->lpVtbl -> RequestReJITWithInliners(This,dwRejitFlags,cFunctions,moduleIds,methodIds) ) #define ICorProfilerInfo12_SuspendRuntime(This) \ ( (This)->lpVtbl -> SuspendRuntime(This) ) #define ICorProfilerInfo12_ResumeRuntime(This) \ ( (This)->lpVtbl -> ResumeRuntime(This) ) #define ICorProfilerInfo12_GetEnvironmentVariable(This,szName,cchValue,pcchValue,szValue) \ ( (This)->lpVtbl -> GetEnvironmentVariable(This,szName,cchValue,pcchValue,szValue) ) #define ICorProfilerInfo12_SetEnvironmentVariable(This,szName,szValue) \ ( (This)->lpVtbl -> SetEnvironmentVariable(This,szName,szValue) ) #define ICorProfilerInfo12_EventPipeStartSession(This,cProviderConfigs,pProviderConfigs,requestRundown,pSession) \ ( (This)->lpVtbl -> EventPipeStartSession(This,cProviderConfigs,pProviderConfigs,requestRundown,pSession) ) #define ICorProfilerInfo12_EventPipeAddProviderToSession(This,session,providerConfig) \ ( (This)->lpVtbl -> EventPipeAddProviderToSession(This,session,providerConfig) ) #define ICorProfilerInfo12_EventPipeStopSession(This,session) \ ( (This)->lpVtbl -> EventPipeStopSession(This,session) ) #define ICorProfilerInfo12_EventPipeCreateProvider(This,providerName,pProvider) \ ( (This)->lpVtbl -> EventPipeCreateProvider(This,providerName,pProvider) ) #define ICorProfilerInfo12_EventPipeGetProviderInfo(This,provider,cchName,pcchName,providerName) \ ( (This)->lpVtbl -> EventPipeGetProviderInfo(This,provider,cchName,pcchName,providerName) ) #define ICorProfilerInfo12_EventPipeDefineEvent(This,provider,eventName,eventID,keywords,eventVersion,level,opcode,needStack,cParamDescs,pParamDescs,pEvent) \ ( (This)->lpVtbl -> EventPipeDefineEvent(This,provider,eventName,eventID,keywords,eventVersion,level,opcode,needStack,cParamDescs,pParamDescs,pEvent) ) #define ICorProfilerInfo12_EventPipeWriteEvent(This,event,cData,data,pActivityId,pRelatedActivityId) \ ( (This)->lpVtbl -> EventPipeWriteEvent(This,event,cData,data,pActivityId,pRelatedActivityId) ) #endif /* COBJMACROS */ #endif /* C style interface */ #endif /* __ICorProfilerInfo12_INTERFACE_DEFINED__ */ #ifndef __ICorProfilerMethodEnum_INTERFACE_DEFINED__ #define __ICorProfilerMethodEnum_INTERFACE_DEFINED__ /* interface ICorProfilerMethodEnum */ /* [local][unique][uuid][object] */ EXTERN_C const IID IID_ICorProfilerMethodEnum; #if defined(__cplusplus) && !defined(CINTERFACE) MIDL_INTERFACE("FCCEE788-0088-454B-A811-C99F298D1942") ICorProfilerMethodEnum : public IUnknown { public: virtual HRESULT STDMETHODCALLTYPE Skip( /* [in] */ ULONG celt) = 0; virtual HRESULT STDMETHODCALLTYPE Reset( void) = 0; virtual HRESULT STDMETHODCALLTYPE Clone( /* [out] */ ICorProfilerMethodEnum **ppEnum) = 0; virtual HRESULT STDMETHODCALLTYPE GetCount( /* [out] */ ULONG *pcelt) = 0; virtual HRESULT STDMETHODCALLTYPE Next( /* [in] */ ULONG celt, /* [length_is][size_is][out] */ COR_PRF_METHOD elements[ ], /* [out] */ ULONG *pceltFetched) = 0; }; #else /* C style interface */ typedef struct ICorProfilerMethodEnumVtbl { BEGIN_INTERFACE HRESULT ( STDMETHODCALLTYPE *QueryInterface )( ICorProfilerMethodEnum * This, /* [in] */ REFIID riid, /* [annotation][iid_is][out] */ _COM_Outptr_ void **ppvObject); ULONG ( STDMETHODCALLTYPE *AddRef )( ICorProfilerMethodEnum * This); ULONG ( STDMETHODCALLTYPE *Release )( ICorProfilerMethodEnum * This); HRESULT ( STDMETHODCALLTYPE *Skip )( ICorProfilerMethodEnum * This, /* [in] */ ULONG celt); HRESULT ( STDMETHODCALLTYPE *Reset )( ICorProfilerMethodEnum * This); HRESULT ( STDMETHODCALLTYPE *Clone )( ICorProfilerMethodEnum * This, /* [out] */ ICorProfilerMethodEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *GetCount )( ICorProfilerMethodEnum * This, /* [out] */ ULONG *pcelt); HRESULT ( STDMETHODCALLTYPE *Next )( ICorProfilerMethodEnum * This, /* [in] */ ULONG celt, /* [length_is][size_is][out] */ COR_PRF_METHOD elements[ ], /* [out] */ ULONG *pceltFetched); END_INTERFACE } ICorProfilerMethodEnumVtbl; interface ICorProfilerMethodEnum { CONST_VTBL struct ICorProfilerMethodEnumVtbl *lpVtbl; }; #ifdef COBJMACROS #define ICorProfilerMethodEnum_QueryInterface(This,riid,ppvObject) \ ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) ) #define ICorProfilerMethodEnum_AddRef(This) \ ( (This)->lpVtbl -> AddRef(This) ) #define ICorProfilerMethodEnum_Release(This) \ ( (This)->lpVtbl -> Release(This) ) #define ICorProfilerMethodEnum_Skip(This,celt) \ ( (This)->lpVtbl -> Skip(This,celt) ) #define ICorProfilerMethodEnum_Reset(This) \ ( (This)->lpVtbl -> Reset(This) ) #define ICorProfilerMethodEnum_Clone(This,ppEnum) \ ( (This)->lpVtbl -> Clone(This,ppEnum) ) #define ICorProfilerMethodEnum_GetCount(This,pcelt) \ ( (This)->lpVtbl -> GetCount(This,pcelt) ) #define ICorProfilerMethodEnum_Next(This,celt,elements,pceltFetched) \ ( (This)->lpVtbl -> Next(This,celt,elements,pceltFetched) ) #endif /* COBJMACROS */ #endif /* C style interface */ #endif /* __ICorProfilerMethodEnum_INTERFACE_DEFINED__ */ #ifndef __ICorProfilerThreadEnum_INTERFACE_DEFINED__ #define __ICorProfilerThreadEnum_INTERFACE_DEFINED__ /* interface ICorProfilerThreadEnum */ /* [local][unique][uuid][object] */ EXTERN_C const IID IID_ICorProfilerThreadEnum; #if defined(__cplusplus) && !defined(CINTERFACE) MIDL_INTERFACE("571194f7-25ed-419f-aa8b-7016b3159701") ICorProfilerThreadEnum : public IUnknown { public: virtual HRESULT STDMETHODCALLTYPE Skip( /* [in] */ ULONG celt) = 0; virtual HRESULT STDMETHODCALLTYPE Reset( void) = 0; virtual HRESULT STDMETHODCALLTYPE Clone( /* [out] */ ICorProfilerThreadEnum **ppEnum) = 0; virtual HRESULT STDMETHODCALLTYPE GetCount( /* [out] */ ULONG *pcelt) = 0; virtual HRESULT STDMETHODCALLTYPE Next( /* [in] */ ULONG celt, /* [length_is][size_is][out] */ ThreadID ids[ ], /* [out] */ ULONG *pceltFetched) = 0; }; #else /* C style interface */ typedef struct ICorProfilerThreadEnumVtbl { BEGIN_INTERFACE HRESULT ( STDMETHODCALLTYPE *QueryInterface )( ICorProfilerThreadEnum * This, /* [in] */ REFIID riid, /* [annotation][iid_is][out] */ _COM_Outptr_ void **ppvObject); ULONG ( STDMETHODCALLTYPE *AddRef )( ICorProfilerThreadEnum * This); ULONG ( STDMETHODCALLTYPE *Release )( ICorProfilerThreadEnum * This); HRESULT ( STDMETHODCALLTYPE *Skip )( ICorProfilerThreadEnum * This, /* [in] */ ULONG celt); HRESULT ( STDMETHODCALLTYPE *Reset )( ICorProfilerThreadEnum * This); HRESULT ( STDMETHODCALLTYPE *Clone )( ICorProfilerThreadEnum * This, /* [out] */ ICorProfilerThreadEnum **ppEnum); HRESULT ( STDMETHODCALLTYPE *GetCount )( ICorProfilerThreadEnum * This, /* [out] */ ULONG *pcelt); HRESULT ( STDMETHODCALLTYPE *Next )( ICorProfilerThreadEnum * This, /* [in] */ ULONG celt, /* [length_is][size_is][out] */ ThreadID ids[ ], /* [out] */ ULONG *pceltFetched); END_INTERFACE } ICorProfilerThreadEnumVtbl; interface ICorProfilerThreadEnum { CONST_VTBL struct ICorProfilerThreadEnumVtbl *lpVtbl; }; #ifdef COBJMACROS #define ICorProfilerThreadEnum_QueryInterface(This,riid,ppvObject) \ ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) ) #define ICorProfilerThreadEnum_AddRef(This) \ ( (This)->lpVtbl -> AddRef(This) ) #define ICorProfilerThreadEnum_Release(This) \ ( (This)->lpVtbl -> Release(This) ) #define ICorProfilerThreadEnum_Skip(This,celt) \ ( (This)->lpVtbl -> Skip(This,celt) ) #define ICorProfilerThreadEnum_Reset(This) \ ( (This)->lpVtbl -> Reset(This) ) #define ICorProfilerThreadEnum_Clone(This,ppEnum) \ ( (This)->lpVtbl -> Clone(This,ppEnum) ) #define ICorProfilerThreadEnum_GetCount(This,pcelt) \ ( (This)->lpVtbl -> GetCount(This,pcelt) ) #define ICorProfilerThreadEnum_Next(This,celt,ids,pceltFetched) \ ( (This)->lpVtbl -> Next(This,celt,ids,pceltFetched) ) #endif /* COBJMACROS */ #endif /* C style interface */ #endif /* __ICorProfilerThreadEnum_INTERFACE_DEFINED__ */ #ifndef __ICorProfilerAssemblyReferenceProvider_INTERFACE_DEFINED__ #define __ICorProfilerAssemblyReferenceProvider_INTERFACE_DEFINED__ /* interface ICorProfilerAssemblyReferenceProvider */ /* [local][unique][uuid][object] */ EXTERN_C const IID IID_ICorProfilerAssemblyReferenceProvider; #if defined(__cplusplus) && !defined(CINTERFACE) MIDL_INTERFACE("66A78C24-2EEF-4F65-B45F-DD1D8038BF3C") ICorProfilerAssemblyReferenceProvider : public IUnknown { public: virtual HRESULT STDMETHODCALLTYPE AddAssemblyReference( const COR_PRF_ASSEMBLY_REFERENCE_INFO *pAssemblyRefInfo) = 0; }; #else /* C style interface */ typedef struct ICorProfilerAssemblyReferenceProviderVtbl { BEGIN_INTERFACE HRESULT ( STDMETHODCALLTYPE *QueryInterface )( ICorProfilerAssemblyReferenceProvider * This, /* [in] */ REFIID riid, /* [annotation][iid_is][out] */ _COM_Outptr_ void **ppvObject); ULONG ( STDMETHODCALLTYPE *AddRef )( ICorProfilerAssemblyReferenceProvider * This); ULONG ( STDMETHODCALLTYPE *Release )( ICorProfilerAssemblyReferenceProvider * This); HRESULT ( STDMETHODCALLTYPE *AddAssemblyReference )( ICorProfilerAssemblyReferenceProvider * This, const COR_PRF_ASSEMBLY_REFERENCE_INFO *pAssemblyRefInfo); END_INTERFACE } ICorProfilerAssemblyReferenceProviderVtbl; interface ICorProfilerAssemblyReferenceProvider { CONST_VTBL struct ICorProfilerAssemblyReferenceProviderVtbl *lpVtbl; }; #ifdef COBJMACROS #define ICorProfilerAssemblyReferenceProvider_QueryInterface(This,riid,ppvObject) \ ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) ) #define ICorProfilerAssemblyReferenceProvider_AddRef(This) \ ( (This)->lpVtbl -> AddRef(This) ) #define ICorProfilerAssemblyReferenceProvider_Release(This) \ ( (This)->lpVtbl -> Release(This) ) #define ICorProfilerAssemblyReferenceProvider_AddAssemblyReference(This,pAssemblyRefInfo) \ ( (This)->lpVtbl -> AddAssemblyReference(This,pAssemblyRefInfo) ) #endif /* COBJMACROS */ #endif /* C style interface */ #endif /* __ICorProfilerAssemblyReferenceProvider_INTERFACE_DEFINED__ */ /* Additional Prototypes for ALL interfaces */ /* end of Additional Prototypes */ #ifdef __cplusplus } #endif #endif
-1
dotnet/runtime
66,282
Enable Fast Tail Call Optimization for ARM32
- Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
clamp03
2022-03-07T05:47:20Z
2022-03-13T11:50:20Z
227ff3d53784f655fa04dad059a98b3e8d291d61
51b90cc60b8528c77829ef18481b0f58db812776
Enable Fast Tail Call Optimization for ARM32. - Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
./src/coreclr/pal/tests/palsuite/miscellaneous/IsBadWritePtr/test3/test3.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*============================================================ ** ** Source: test3.c ** ** Purpose: ** Check that IsBadWritePtr returns non-zero on Read-only memory. ** ** **=========================================================*/ #include <palsuite.h> PALTEST(miscellaneous_IsBadWritePtr_test3_paltest_isbadwriteptr_test3, "miscellaneous/IsBadWritePtr/test3/paltest_isbadwriteptr_test3") { LPVOID PageOne; if(0 != (PAL_Initialize(argc, argv))) { return FAIL; } /* Reserve enough space for four pages. We'll commit this memory and set the correct access for each page below. */ PageOne = VirtualAlloc(NULL, GetOsPageSize(), MEM_COMMIT, PAGE_READONLY); if(PageOne == NULL) { Fail("ERROR: VirtualAlloc failed to commit the required memory.\n"); } if(IsBadWritePtr(PageOne,GetOsPageSize()) == 0) { VirtualFree(PageOne,0,MEM_RELEASE); Fail("ERROR: IsBadWritePtr returned 0 when checking a section of " "read-only memory. It should be non-zero.\n"); } VirtualFree(PageOne,0,MEM_RELEASE); PAL_Terminate(); return PASS; }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*============================================================ ** ** Source: test3.c ** ** Purpose: ** Check that IsBadWritePtr returns non-zero on Read-only memory. ** ** **=========================================================*/ #include <palsuite.h> PALTEST(miscellaneous_IsBadWritePtr_test3_paltest_isbadwriteptr_test3, "miscellaneous/IsBadWritePtr/test3/paltest_isbadwriteptr_test3") { LPVOID PageOne; if(0 != (PAL_Initialize(argc, argv))) { return FAIL; } /* Reserve enough space for four pages. We'll commit this memory and set the correct access for each page below. */ PageOne = VirtualAlloc(NULL, GetOsPageSize(), MEM_COMMIT, PAGE_READONLY); if(PageOne == NULL) { Fail("ERROR: VirtualAlloc failed to commit the required memory.\n"); } if(IsBadWritePtr(PageOne,GetOsPageSize()) == 0) { VirtualFree(PageOne,0,MEM_RELEASE); Fail("ERROR: IsBadWritePtr returned 0 when checking a section of " "read-only memory. It should be non-zero.\n"); } VirtualFree(PageOne,0,MEM_RELEASE); PAL_Terminate(); return PASS; }
-1
dotnet/runtime
66,282
Enable Fast Tail Call Optimization for ARM32
- Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
clamp03
2022-03-07T05:47:20Z
2022-03-13T11:50:20Z
227ff3d53784f655fa04dad059a98b3e8d291d61
51b90cc60b8528c77829ef18481b0f58db812776
Enable Fast Tail Call Optimization for ARM32. - Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
./src/coreclr/pal/src/exception/machmessage.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*++ Module Name: machmessage.h Abstract: Abstraction over Mach messages used during exception handling. --*/ #include <mach/mach.h> #include <mach/mach_error.h> #include <mach/thread_status.h> using namespace CorUnix; #if HAVE_MACH_EXCEPTIONS #if defined(HOST_AMD64) #define MACH_EH_TYPE(x) mach_##x #else #define MACH_EH_TYPE(x) x #endif // defined(HOST_AMD64) // The vast majority of Mach calls we make in this module are critical: we cannot recover from failures of // these methods (principally because we're handling hardware exceptions in the context of a single dedicated // handler thread). The following macro encapsulates checking the return code from Mach methods and emitting // some useful data and aborting the process on failure. #define CHECK_MACH(_msg, machret) do { \ if (machret != KERN_SUCCESS) \ { \ char _szError[1024]; \ snprintf(_szError, ARRAY_SIZE(_szError), "%s: %u: %s", __FUNCTION__, __LINE__, _msg); \ mach_error(_szError, machret); \ abort(); \ } \ } while (false) // This macro terminates the process with some useful debug info as above, but for the general failure points // that have nothing to do with Mach. #define NONPAL_RETAIL_ASSERT(_msg, ...) do { \ fprintf(stdout, "%s: %u: " _msg "\n", __FUNCTION__, __LINE__, ## __VA_ARGS__); \ fflush(stdout); \ abort(); \ } while (false) #define NONPAL_RETAIL_ASSERTE(_expr) do { \ if (!(_expr)) \ NONPAL_RETAIL_ASSERT("ASSERT: %s\n", #_expr); \ } while (false) #ifdef _DEBUG #define NONPAL_TRACE_ENABLED EnvironGetenv("NONPAL_TRACING", /* copyValue */ false) #define NONPAL_ASSERT(_msg, ...) NONPAL_RETAIL_ASSERT(_msg, __VA_ARGS__) // Assert macro that doesn't rely on the PAL. #define NONPAL_ASSERTE(_expr) do { \ if (!(_expr)) \ NONPAL_RETAIL_ASSERT("ASSERT: %s\n", #_expr); \ } while (false) // Debug-only output with printf-style formatting. #define NONPAL_TRACE(_format, ...) do { \ if (NONPAL_TRACE_ENABLED) { fprintf(stdout, "NONPAL_TRACE: " _format, ## __VA_ARGS__); fflush(stdout); } \ } while (false) #else // _DEBUG #define NONPAL_TRACE_ENABLED false #define NONPAL_ASSERT(_msg, ...) #define NONPAL_ASSERTE(_expr) #define NONPAL_TRACE(_format, ...) #endif // _DEBUG class MachMessage; // Contains all the exception and thread state information needed to forward the exception. struct MachExceptionInfo { exception_type_t ExceptionType; mach_msg_type_number_t SubcodeCount; MACH_EH_TYPE(exception_data_type_t) Subcodes[2]; #if defined(HOST_AMD64) x86_thread_state_t ThreadState; x86_float_state_t FloatState; x86_debug_state_t DebugState; #elif defined(HOST_ARM64) arm_thread_state64_t ThreadState; arm_neon_state64_t FloatState; arm_debug_state64_t DebugState; #else #error Unexpected architecture #endif MachExceptionInfo(mach_port_t thread, MachMessage& message); void RestoreState(mach_port_t thread); }; // Abstraction of a subset of Mach message types. Provides accessors that hide the subtle differences in the // message layout of similar message types. class MachMessage { public: // The message types handled by this class. The values are the actual type codes set in the Mach message // header. enum MessageType { SET_THREAD_MESSAGE_ID = 1, FORWARD_EXCEPTION_MESSAGE_ID = 2, NOTIFY_SEND_ONCE_MESSAGE_ID = 71, EXCEPTION_RAISE_MESSAGE_ID = 2401, EXCEPTION_RAISE_STATE_MESSAGE_ID = 2402, EXCEPTION_RAISE_STATE_IDENTITY_MESSAGE_ID = 2403, EXCEPTION_RAISE_64_MESSAGE_ID = 2405, EXCEPTION_RAISE_STATE_64_MESSAGE_ID = 2406, EXCEPTION_RAISE_STATE_IDENTITY_64_MESSAGE_ID = 2407, EXCEPTION_RAISE_REPLY_MESSAGE_ID = 2501, EXCEPTION_RAISE_STATE_REPLY_MESSAGE_ID = 2502, EXCEPTION_RAISE_STATE_IDENTITY_REPLY_MESSAGE_ID = 2503, EXCEPTION_RAISE_REPLY_64_MESSAGE_ID = 2505, EXCEPTION_RAISE_STATE_REPLY_64_MESSAGE_ID = 2506, EXCEPTION_RAISE_STATE_IDENTITY_REPLY_64_MESSAGE_ID = 2507 }; // Construct an empty message. Use Receive() to form a message that can be inspected or SendSetThread(), // ForwardNotification() or ReplyToNotification() to construct a message and sent it. MachMessage(); // Listen for the next message on the given port and initialize this class with the contents. The message // type must match one of the MessageTypes indicated above (or the process will be aborted). void Receive(mach_port_t hPort); // Indicate whether a received message belongs to a particular semantic class. bool IsSetThreadRequest(); // Message is a request to set the context of a particular thread bool IsForwardExceptionRequest(); // Message is a request to forward the exception bool IsSendOnceDestroyedNotify(); // Message is a notification that a send-once message was destroyed by the receiver bool IsExceptionNotification(); // Message is a notification of an exception bool IsExceptionReply(); // Message is a reply to the notification of an exception // Get properties of a received message header. MessageType GetMessageType(); // The message type const char *GetMessageTypeName(); // An ASCII representation of the message type for logging purposes mach_port_t GetLocalPort(); // The destination port the message was sent to mach_port_t GetRemotePort(); // The source port the message came from (if a reply is expected) // Get the properties of a set thread request. Fills in the provided context structure with the context // from the message and returns the target thread to which the context should be applied. thread_act_t GetThreadContext(CONTEXT *pContext); // Returns the pal thread instance for the forward exception message CPalThread *GetPalThread(); // Returns the exception info from the forward exception message MachExceptionInfo *GetExceptionInfo(); // Get properties of the type-specific portion of the message. The following properties are supported by // exception notification messages only. thread_act_t GetThread(); // Get the faulting thread exception_type_t GetException(); // Get the exception type (e.g. EXC_BAD_ACCESS) int GetExceptionCodeCount(); // Get the number of exception sub-codes MACH_EH_TYPE(exception_data_type_t) GetExceptionCode(int iIndex); // Get the exception sub-code at the given index // Fetch the thread state flavor from a notification or reply message (return THREAD_STATE_NONE for the // messages that don't contain a thread state). thread_state_flavor_t GetThreadStateFlavor(); // Get the thread state with the given flavor from the exception or exception reply message. If the // message doesn't contain a thread state or the flavor of the state in the message doesn't match, the // state will be fetched directly from the target thread instead (which can be computed implicitly for // exception messages or passed explicitly for reply messages). mach_msg_type_number_t GetThreadState(thread_state_flavor_t eFlavor, thread_state_t pState, thread_act_t thread = NULL); // Fetch the return code from a reply type message. kern_return_t GetReturnCode(); // Initialize and send a request to set the register context of a particular thread. void SendSetThread(mach_port_t hServerPort, CONTEXT *pContext); // Initialize and send a request to forward the exception message to the notification thread void SendForwardException(mach_port_t hServerPort, MachExceptionInfo *pExceptionInfo, CPalThread *ppalThread); // Initialize the message (overwriting any previous content) to represent a forwarded version of the given // exception notification message and send that message to the chain-back handler previously registered // for the exception type being notified. The new message takes account of the fact that the target // handler may not have requested the same notification behavior or flavor as our handler. void ForwardNotification(MachExceptionHandler *pHandler, MachMessage& message); // Initialize the message (overwriting any previous content) to represent a reply to the given exception // notification and send that reply back to the original sender of the notification. This is used when our // handler handles the exception rather than forwarding it to a chain-back handler. void ReplyToNotification(MachMessage& message, kern_return_t eResult); private: // The maximum size in bytes of any Mach message we can send or receive. Calculating an exact size for // this is non trivial (basically because of the security trailers that Mach appends) but the current // value has proven to be more than enough so far. static const size_t kcbMaxMessageSize = 1500; // The following are structures describing the formats of the Mach messages we understand. // Request to set the register context on a particular thread. // SET_THREAD_MESSAGE_ID struct set_thread_request_t { thread_act_t thread; CONTEXT new_context; }; // Request to forward the exception notification // FORWARD_EXCEPTION_MESSAGE_ID struct forward_exception_request_t { thread_act_t thread; CPalThread *ppalThread; MachExceptionInfo exception_info; }; #pragma pack(4) // EXCEPTION_RAISE_MESSAGE_ID struct exception_raise_notification_t { mach_msg_body_t msgh_body; mach_msg_port_descriptor_t thread_port; mach_msg_port_descriptor_t task_port; NDR_record_t ndr; exception_type_t exception; mach_msg_type_number_t code_count; exception_data_type_t code[2]; }; // EXCEPTION_RAISE_REPLY_MESSAGE_ID struct exception_raise_reply_t { NDR_record_t ndr; kern_return_t ret; }; // EXCEPTION_RAISE_64_MESSAGE_ID struct exception_raise_notification_64_t { mach_msg_body_t msgh_body; mach_msg_port_descriptor_t thread_port; mach_msg_port_descriptor_t task_port; NDR_record_t ndr; exception_type_t exception; mach_msg_type_number_t code_count; mach_exception_data_type_t code[2]; }; // EXCEPTION_RAISE_REPLY_64_MESSAGE_ID struct exception_raise_reply_64_t { NDR_record_t ndr; kern_return_t ret; }; // EXCEPTION_RAISE_STATE_MESSAGE_ID struct exception_raise_state_notification_t { NDR_record_t ndr; exception_type_t exception; mach_msg_type_number_t code_count; exception_data_type_t code[2]; thread_state_flavor_t flavor; mach_msg_type_number_t old_state_count; natural_t old_state[THREAD_STATE_MAX]; }; // EXCEPTION_RAISE_STATE_REPLY_MESSAGE_ID struct exception_raise_state_reply_t { NDR_record_t ndr; kern_return_t ret; thread_state_flavor_t flavor; mach_msg_type_number_t new_state_count; natural_t new_state[THREAD_STATE_MAX]; }; // EXCEPTION_RAISE_STATE_64_MESSAGE_ID struct exception_raise_state_notification_64_t { NDR_record_t ndr; exception_type_t exception; mach_msg_type_number_t code_count; mach_exception_data_type_t code[2]; thread_state_flavor_t flavor; mach_msg_type_number_t old_state_count; natural_t old_state[THREAD_STATE_MAX]; }; // EXCEPTION_RAISE_STATE_REPLY_64_MESSAGE_ID struct exception_raise_state_reply_64_t { NDR_record_t ndr; kern_return_t ret; thread_state_flavor_t flavor; mach_msg_type_number_t new_state_count; natural_t new_state[THREAD_STATE_MAX]; }; // EXCEPTION_RAISE_STATE_IDENTITY_MESSAGE_ID struct exception_raise_state_identity_notification_t { mach_msg_body_t msgh_body; mach_msg_port_descriptor_t thread_port; mach_msg_port_descriptor_t task_port; NDR_record_t ndr; exception_type_t exception; mach_msg_type_number_t code_count; exception_data_type_t code[2]; thread_state_flavor_t flavor; mach_msg_type_number_t old_state_count; natural_t old_state[THREAD_STATE_MAX]; }; // EXCEPTION_RAISE_STATE_IDENTITY_REPLY_MESSAGE_ID struct exception_raise_state_identity_reply_t { NDR_record_t ndr; kern_return_t ret; thread_state_flavor_t flavor; mach_msg_type_number_t new_state_count; natural_t new_state[THREAD_STATE_MAX]; }; // EXCEPTION_RAISE_STATE_IDENTITY_64_MESSAGE_ID struct exception_raise_state_identity_notification_64_t { mach_msg_body_t msgh_body; mach_msg_port_descriptor_t thread_port; mach_msg_port_descriptor_t task_port; NDR_record_t ndr; exception_type_t exception; mach_msg_type_number_t code_count; mach_exception_data_type_t code[2]; thread_state_flavor_t flavor; mach_msg_type_number_t old_state_count; natural_t old_state[THREAD_STATE_MAX]; }; // EXCEPTION_RAISE_STATE_IDENTITY_REPLY_64_MESSAGE_ID struct exception_raise_state_identity_reply_64_t { NDR_record_t ndr; kern_return_t ret; thread_state_flavor_t flavor; mach_msg_type_number_t new_state_count; natural_t new_state[THREAD_STATE_MAX]; }; #pragma pack() // All the above messages are sent with a standard Mach header prepended. This structure unifies the // message formats. struct mach_message_t { mach_msg_header_t header; union { set_thread_request_t set_thread; forward_exception_request_t forward_exception; exception_raise_notification_t raise; exception_raise_state_notification_t raise_state; exception_raise_state_identity_notification_t raise_state_identity; exception_raise_notification_64_t raise_64; exception_raise_state_notification_64_t raise_state_64; exception_raise_state_identity_notification_64_t raise_state_identity_64; exception_raise_reply_t raise_reply; exception_raise_state_reply_t raise_state_reply; exception_raise_state_identity_reply_t raise_state_identity_reply; exception_raise_reply_64_t raise_reply_64; exception_raise_state_reply_64_t raise_state_reply_64; exception_raise_state_identity_reply_64_t raise_state_identity_reply_64; } data; } __attribute__((packed));; // Re-initializes this data structure (to the same state as default construction, containing no message). void ResetMessage(); // Initialize those fields of a message that are invariant. This method expects that the msgh_id field has // been filled in prior to the call so it can determine which non-header fields to initialize. void InitFixedFields(); // Initialize the size field of the message header (msgh_size) based on the message type and other fields. // This should be called after all other fields have been initialized. void InitMessageSize(); // Do the work of getting ports from the message. // * fCalculate -- calculate the thread port if the message did not contain it. // * fValidate -- failfast if the message was not one expected to have a (calculable) thread port. void GetPorts(bool fCalculate, bool fValidThread); // Given a thread's register context, locate and return the Mach port representing that thread. Only the // x86_THREAD_STATE and x86_THREAD_STATE32 state flavors are supported. thread_act_t GetThreadFromState(thread_state_flavor_t eFlavor, thread_state_t pState); // Transform an exception handler behavior type into the corresponding Mach message ID for the // notification. mach_msg_id_t MapBehaviorToNotificationType(exception_behavior_t eBehavior); // Transform a Mach message ID for an exception notification into the corresponding ID for the reply. mach_msg_id_t MapNotificationToReplyType(mach_msg_id_t eNotificationType); // The following methods initialize fields on the message prior to transmission. Each is valid for either // notification, replies or both. If a particular setter is defined for replies, say, then it will be a // no-op for any replies which don't contain that field. This makes transforming between notifications and // replies of different types simpler (we can copy a super-set of all fields between the two, but only // those operations that make sense will do any work). // Defined for notifications: void SetThread(thread_act_t thread); void SetException(exception_type_t eException); void SetExceptionCodeCount(int cCodes); void SetExceptionCode(int iIndex, MACH_EH_TYPE(exception_data_type_t) iCode); // Defined for replies: void SetReturnCode(kern_return_t eReturnCode); // Defined for both notifications and replies. void SetThreadState(thread_state_flavor_t eFlavor, thread_state_t pState, mach_msg_type_number_t count); // Maximally sized buffer for the message to be received into or transmitted out of this class. unsigned char m_rgMessageBuffer[kcbMaxMessageSize]; // Initialized by ResetMessage() to point to the buffer above. Gives a typed view of the encapsulated Mach // message. mach_message_t *m_pMessage; // Cached value of GetThread() or MACH_PORT_NULL if that has not been computed yet. thread_act_t m_hThread; // Cached value of the task port or MACH_PORT_NULL if the message doesn't have one. mach_port_t m_hTask; // Considered whether we are responsible for the deallocation of the ports in // this message. It is true for messages we receive, and false for messages we send. bool m_fPortsOwned; }; #endif // HAVE_MACH_EXCEPTIONS
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*++ Module Name: machmessage.h Abstract: Abstraction over Mach messages used during exception handling. --*/ #include <mach/mach.h> #include <mach/mach_error.h> #include <mach/thread_status.h> using namespace CorUnix; #if HAVE_MACH_EXCEPTIONS #if defined(HOST_AMD64) #define MACH_EH_TYPE(x) mach_##x #else #define MACH_EH_TYPE(x) x #endif // defined(HOST_AMD64) // The vast majority of Mach calls we make in this module are critical: we cannot recover from failures of // these methods (principally because we're handling hardware exceptions in the context of a single dedicated // handler thread). The following macro encapsulates checking the return code from Mach methods and emitting // some useful data and aborting the process on failure. #define CHECK_MACH(_msg, machret) do { \ if (machret != KERN_SUCCESS) \ { \ char _szError[1024]; \ snprintf(_szError, ARRAY_SIZE(_szError), "%s: %u: %s", __FUNCTION__, __LINE__, _msg); \ mach_error(_szError, machret); \ abort(); \ } \ } while (false) // This macro terminates the process with some useful debug info as above, but for the general failure points // that have nothing to do with Mach. #define NONPAL_RETAIL_ASSERT(_msg, ...) do { \ fprintf(stdout, "%s: %u: " _msg "\n", __FUNCTION__, __LINE__, ## __VA_ARGS__); \ fflush(stdout); \ abort(); \ } while (false) #define NONPAL_RETAIL_ASSERTE(_expr) do { \ if (!(_expr)) \ NONPAL_RETAIL_ASSERT("ASSERT: %s\n", #_expr); \ } while (false) #ifdef _DEBUG #define NONPAL_TRACE_ENABLED EnvironGetenv("NONPAL_TRACING", /* copyValue */ false) #define NONPAL_ASSERT(_msg, ...) NONPAL_RETAIL_ASSERT(_msg, __VA_ARGS__) // Assert macro that doesn't rely on the PAL. #define NONPAL_ASSERTE(_expr) do { \ if (!(_expr)) \ NONPAL_RETAIL_ASSERT("ASSERT: %s\n", #_expr); \ } while (false) // Debug-only output with printf-style formatting. #define NONPAL_TRACE(_format, ...) do { \ if (NONPAL_TRACE_ENABLED) { fprintf(stdout, "NONPAL_TRACE: " _format, ## __VA_ARGS__); fflush(stdout); } \ } while (false) #else // _DEBUG #define NONPAL_TRACE_ENABLED false #define NONPAL_ASSERT(_msg, ...) #define NONPAL_ASSERTE(_expr) #define NONPAL_TRACE(_format, ...) #endif // _DEBUG class MachMessage; // Contains all the exception and thread state information needed to forward the exception. struct MachExceptionInfo { exception_type_t ExceptionType; mach_msg_type_number_t SubcodeCount; MACH_EH_TYPE(exception_data_type_t) Subcodes[2]; #if defined(HOST_AMD64) x86_thread_state_t ThreadState; x86_float_state_t FloatState; x86_debug_state_t DebugState; #elif defined(HOST_ARM64) arm_thread_state64_t ThreadState; arm_neon_state64_t FloatState; arm_debug_state64_t DebugState; #else #error Unexpected architecture #endif MachExceptionInfo(mach_port_t thread, MachMessage& message); void RestoreState(mach_port_t thread); }; // Abstraction of a subset of Mach message types. Provides accessors that hide the subtle differences in the // message layout of similar message types. class MachMessage { public: // The message types handled by this class. The values are the actual type codes set in the Mach message // header. enum MessageType { SET_THREAD_MESSAGE_ID = 1, FORWARD_EXCEPTION_MESSAGE_ID = 2, NOTIFY_SEND_ONCE_MESSAGE_ID = 71, EXCEPTION_RAISE_MESSAGE_ID = 2401, EXCEPTION_RAISE_STATE_MESSAGE_ID = 2402, EXCEPTION_RAISE_STATE_IDENTITY_MESSAGE_ID = 2403, EXCEPTION_RAISE_64_MESSAGE_ID = 2405, EXCEPTION_RAISE_STATE_64_MESSAGE_ID = 2406, EXCEPTION_RAISE_STATE_IDENTITY_64_MESSAGE_ID = 2407, EXCEPTION_RAISE_REPLY_MESSAGE_ID = 2501, EXCEPTION_RAISE_STATE_REPLY_MESSAGE_ID = 2502, EXCEPTION_RAISE_STATE_IDENTITY_REPLY_MESSAGE_ID = 2503, EXCEPTION_RAISE_REPLY_64_MESSAGE_ID = 2505, EXCEPTION_RAISE_STATE_REPLY_64_MESSAGE_ID = 2506, EXCEPTION_RAISE_STATE_IDENTITY_REPLY_64_MESSAGE_ID = 2507 }; // Construct an empty message. Use Receive() to form a message that can be inspected or SendSetThread(), // ForwardNotification() or ReplyToNotification() to construct a message and sent it. MachMessage(); // Listen for the next message on the given port and initialize this class with the contents. The message // type must match one of the MessageTypes indicated above (or the process will be aborted). void Receive(mach_port_t hPort); // Indicate whether a received message belongs to a particular semantic class. bool IsSetThreadRequest(); // Message is a request to set the context of a particular thread bool IsForwardExceptionRequest(); // Message is a request to forward the exception bool IsSendOnceDestroyedNotify(); // Message is a notification that a send-once message was destroyed by the receiver bool IsExceptionNotification(); // Message is a notification of an exception bool IsExceptionReply(); // Message is a reply to the notification of an exception // Get properties of a received message header. MessageType GetMessageType(); // The message type const char *GetMessageTypeName(); // An ASCII representation of the message type for logging purposes mach_port_t GetLocalPort(); // The destination port the message was sent to mach_port_t GetRemotePort(); // The source port the message came from (if a reply is expected) // Get the properties of a set thread request. Fills in the provided context structure with the context // from the message and returns the target thread to which the context should be applied. thread_act_t GetThreadContext(CONTEXT *pContext); // Returns the pal thread instance for the forward exception message CPalThread *GetPalThread(); // Returns the exception info from the forward exception message MachExceptionInfo *GetExceptionInfo(); // Get properties of the type-specific portion of the message. The following properties are supported by // exception notification messages only. thread_act_t GetThread(); // Get the faulting thread exception_type_t GetException(); // Get the exception type (e.g. EXC_BAD_ACCESS) int GetExceptionCodeCount(); // Get the number of exception sub-codes MACH_EH_TYPE(exception_data_type_t) GetExceptionCode(int iIndex); // Get the exception sub-code at the given index // Fetch the thread state flavor from a notification or reply message (return THREAD_STATE_NONE for the // messages that don't contain a thread state). thread_state_flavor_t GetThreadStateFlavor(); // Get the thread state with the given flavor from the exception or exception reply message. If the // message doesn't contain a thread state or the flavor of the state in the message doesn't match, the // state will be fetched directly from the target thread instead (which can be computed implicitly for // exception messages or passed explicitly for reply messages). mach_msg_type_number_t GetThreadState(thread_state_flavor_t eFlavor, thread_state_t pState, thread_act_t thread = NULL); // Fetch the return code from a reply type message. kern_return_t GetReturnCode(); // Initialize and send a request to set the register context of a particular thread. void SendSetThread(mach_port_t hServerPort, CONTEXT *pContext); // Initialize and send a request to forward the exception message to the notification thread void SendForwardException(mach_port_t hServerPort, MachExceptionInfo *pExceptionInfo, CPalThread *ppalThread); // Initialize the message (overwriting any previous content) to represent a forwarded version of the given // exception notification message and send that message to the chain-back handler previously registered // for the exception type being notified. The new message takes account of the fact that the target // handler may not have requested the same notification behavior or flavor as our handler. void ForwardNotification(MachExceptionHandler *pHandler, MachMessage& message); // Initialize the message (overwriting any previous content) to represent a reply to the given exception // notification and send that reply back to the original sender of the notification. This is used when our // handler handles the exception rather than forwarding it to a chain-back handler. void ReplyToNotification(MachMessage& message, kern_return_t eResult); private: // The maximum size in bytes of any Mach message we can send or receive. Calculating an exact size for // this is non trivial (basically because of the security trailers that Mach appends) but the current // value has proven to be more than enough so far. static const size_t kcbMaxMessageSize = 1500; // The following are structures describing the formats of the Mach messages we understand. // Request to set the register context on a particular thread. // SET_THREAD_MESSAGE_ID struct set_thread_request_t { thread_act_t thread; CONTEXT new_context; }; // Request to forward the exception notification // FORWARD_EXCEPTION_MESSAGE_ID struct forward_exception_request_t { thread_act_t thread; CPalThread *ppalThread; MachExceptionInfo exception_info; }; #pragma pack(4) // EXCEPTION_RAISE_MESSAGE_ID struct exception_raise_notification_t { mach_msg_body_t msgh_body; mach_msg_port_descriptor_t thread_port; mach_msg_port_descriptor_t task_port; NDR_record_t ndr; exception_type_t exception; mach_msg_type_number_t code_count; exception_data_type_t code[2]; }; // EXCEPTION_RAISE_REPLY_MESSAGE_ID struct exception_raise_reply_t { NDR_record_t ndr; kern_return_t ret; }; // EXCEPTION_RAISE_64_MESSAGE_ID struct exception_raise_notification_64_t { mach_msg_body_t msgh_body; mach_msg_port_descriptor_t thread_port; mach_msg_port_descriptor_t task_port; NDR_record_t ndr; exception_type_t exception; mach_msg_type_number_t code_count; mach_exception_data_type_t code[2]; }; // EXCEPTION_RAISE_REPLY_64_MESSAGE_ID struct exception_raise_reply_64_t { NDR_record_t ndr; kern_return_t ret; }; // EXCEPTION_RAISE_STATE_MESSAGE_ID struct exception_raise_state_notification_t { NDR_record_t ndr; exception_type_t exception; mach_msg_type_number_t code_count; exception_data_type_t code[2]; thread_state_flavor_t flavor; mach_msg_type_number_t old_state_count; natural_t old_state[THREAD_STATE_MAX]; }; // EXCEPTION_RAISE_STATE_REPLY_MESSAGE_ID struct exception_raise_state_reply_t { NDR_record_t ndr; kern_return_t ret; thread_state_flavor_t flavor; mach_msg_type_number_t new_state_count; natural_t new_state[THREAD_STATE_MAX]; }; // EXCEPTION_RAISE_STATE_64_MESSAGE_ID struct exception_raise_state_notification_64_t { NDR_record_t ndr; exception_type_t exception; mach_msg_type_number_t code_count; mach_exception_data_type_t code[2]; thread_state_flavor_t flavor; mach_msg_type_number_t old_state_count; natural_t old_state[THREAD_STATE_MAX]; }; // EXCEPTION_RAISE_STATE_REPLY_64_MESSAGE_ID struct exception_raise_state_reply_64_t { NDR_record_t ndr; kern_return_t ret; thread_state_flavor_t flavor; mach_msg_type_number_t new_state_count; natural_t new_state[THREAD_STATE_MAX]; }; // EXCEPTION_RAISE_STATE_IDENTITY_MESSAGE_ID struct exception_raise_state_identity_notification_t { mach_msg_body_t msgh_body; mach_msg_port_descriptor_t thread_port; mach_msg_port_descriptor_t task_port; NDR_record_t ndr; exception_type_t exception; mach_msg_type_number_t code_count; exception_data_type_t code[2]; thread_state_flavor_t flavor; mach_msg_type_number_t old_state_count; natural_t old_state[THREAD_STATE_MAX]; }; // EXCEPTION_RAISE_STATE_IDENTITY_REPLY_MESSAGE_ID struct exception_raise_state_identity_reply_t { NDR_record_t ndr; kern_return_t ret; thread_state_flavor_t flavor; mach_msg_type_number_t new_state_count; natural_t new_state[THREAD_STATE_MAX]; }; // EXCEPTION_RAISE_STATE_IDENTITY_64_MESSAGE_ID struct exception_raise_state_identity_notification_64_t { mach_msg_body_t msgh_body; mach_msg_port_descriptor_t thread_port; mach_msg_port_descriptor_t task_port; NDR_record_t ndr; exception_type_t exception; mach_msg_type_number_t code_count; mach_exception_data_type_t code[2]; thread_state_flavor_t flavor; mach_msg_type_number_t old_state_count; natural_t old_state[THREAD_STATE_MAX]; }; // EXCEPTION_RAISE_STATE_IDENTITY_REPLY_64_MESSAGE_ID struct exception_raise_state_identity_reply_64_t { NDR_record_t ndr; kern_return_t ret; thread_state_flavor_t flavor; mach_msg_type_number_t new_state_count; natural_t new_state[THREAD_STATE_MAX]; }; #pragma pack() // All the above messages are sent with a standard Mach header prepended. This structure unifies the // message formats. struct mach_message_t { mach_msg_header_t header; union { set_thread_request_t set_thread; forward_exception_request_t forward_exception; exception_raise_notification_t raise; exception_raise_state_notification_t raise_state; exception_raise_state_identity_notification_t raise_state_identity; exception_raise_notification_64_t raise_64; exception_raise_state_notification_64_t raise_state_64; exception_raise_state_identity_notification_64_t raise_state_identity_64; exception_raise_reply_t raise_reply; exception_raise_state_reply_t raise_state_reply; exception_raise_state_identity_reply_t raise_state_identity_reply; exception_raise_reply_64_t raise_reply_64; exception_raise_state_reply_64_t raise_state_reply_64; exception_raise_state_identity_reply_64_t raise_state_identity_reply_64; } data; } __attribute__((packed));; // Re-initializes this data structure (to the same state as default construction, containing no message). void ResetMessage(); // Initialize those fields of a message that are invariant. This method expects that the msgh_id field has // been filled in prior to the call so it can determine which non-header fields to initialize. void InitFixedFields(); // Initialize the size field of the message header (msgh_size) based on the message type and other fields. // This should be called after all other fields have been initialized. void InitMessageSize(); // Do the work of getting ports from the message. // * fCalculate -- calculate the thread port if the message did not contain it. // * fValidate -- failfast if the message was not one expected to have a (calculable) thread port. void GetPorts(bool fCalculate, bool fValidThread); // Given a thread's register context, locate and return the Mach port representing that thread. Only the // x86_THREAD_STATE and x86_THREAD_STATE32 state flavors are supported. thread_act_t GetThreadFromState(thread_state_flavor_t eFlavor, thread_state_t pState); // Transform an exception handler behavior type into the corresponding Mach message ID for the // notification. mach_msg_id_t MapBehaviorToNotificationType(exception_behavior_t eBehavior); // Transform a Mach message ID for an exception notification into the corresponding ID for the reply. mach_msg_id_t MapNotificationToReplyType(mach_msg_id_t eNotificationType); // The following methods initialize fields on the message prior to transmission. Each is valid for either // notification, replies or both. If a particular setter is defined for replies, say, then it will be a // no-op for any replies which don't contain that field. This makes transforming between notifications and // replies of different types simpler (we can copy a super-set of all fields between the two, but only // those operations that make sense will do any work). // Defined for notifications: void SetThread(thread_act_t thread); void SetException(exception_type_t eException); void SetExceptionCodeCount(int cCodes); void SetExceptionCode(int iIndex, MACH_EH_TYPE(exception_data_type_t) iCode); // Defined for replies: void SetReturnCode(kern_return_t eReturnCode); // Defined for both notifications and replies. void SetThreadState(thread_state_flavor_t eFlavor, thread_state_t pState, mach_msg_type_number_t count); // Maximally sized buffer for the message to be received into or transmitted out of this class. unsigned char m_rgMessageBuffer[kcbMaxMessageSize]; // Initialized by ResetMessage() to point to the buffer above. Gives a typed view of the encapsulated Mach // message. mach_message_t *m_pMessage; // Cached value of GetThread() or MACH_PORT_NULL if that has not been computed yet. thread_act_t m_hThread; // Cached value of the task port or MACH_PORT_NULL if the message doesn't have one. mach_port_t m_hTask; // Considered whether we are responsible for the deallocation of the ports in // this message. It is true for messages we receive, and false for messages we send. bool m_fPortsOwned; }; #endif // HAVE_MACH_EXCEPTIONS
-1
dotnet/runtime
66,282
Enable Fast Tail Call Optimization for ARM32
- Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
clamp03
2022-03-07T05:47:20Z
2022-03-13T11:50:20Z
227ff3d53784f655fa04dad059a98b3e8d291d61
51b90cc60b8528c77829ef18481b0f58db812776
Enable Fast Tail Call Optimization for ARM32. - Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
./src/coreclr/pal/tests/palsuite/c_runtime/_wfopen/test5/test5.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*===================================================================== ** ** Source: test5.c ** ** Purpose: Tests the PAL implementation of the _wfopen function. ** Test to ensure that you can write to a 'r+' mode file. ** And that you can read from a 'r+' mode file. ** ** Depends: ** fprintf ** fclose ** fgets ** fseek ** ** **===================================================================*/ #define UNICODE #include <palsuite.h> PALTEST(c_runtime__wfopen_test5_paltest_wfopen_test5, "c_runtime/_wfopen/test5/paltest_wfopen_test5") { FILE *fp; char buffer[128]; WCHAR filename[] = {'t','e','s','t','f','i','l','e','\0'}; WCHAR write[] = {'w','\0'}; WCHAR readplus[] = {'r','+','\0'}; if (PAL_Initialize(argc, argv)) { return FAIL; } /* Open a file with 'w' mode */ if( (fp = _wfopen( filename,write )) == NULL ) { Fail( "ERROR: The file failed to open with 'w' mode.\n" ); } if(fclose(fp)) { Fail("ERROR: Attempted to close a file, but fclose failed. " "This test depends upon it."); } if( (fp = _wfopen( filename, readplus )) == NULL ) { Fail( "ERROR: The file failed to open with 'r+' mode.\n" ); } /* Write some text to the file */ if(fprintf(fp,"%s","some text") <= 0) { Fail("ERROR: Attempted to WRITE to a file opened with 'r+' mode " "but fprintf failed. Either fopen or fprintf have problems."); } if(fseek(fp, 0, SEEK_SET)) { Fail("ERROR: fseek failed, and this test depends on it."); } /* Attempt to read from the 'r+' only file, should pass */ if(fgets(buffer,10,fp) == NULL) { Fail("ERROR: Tried to READ from a file with 'r+' mode set. " "This should succeed, but fgets returned NULL. Either fgets " "or fopen is broken."); } PAL_Terminate(); return PASS; }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*===================================================================== ** ** Source: test5.c ** ** Purpose: Tests the PAL implementation of the _wfopen function. ** Test to ensure that you can write to a 'r+' mode file. ** And that you can read from a 'r+' mode file. ** ** Depends: ** fprintf ** fclose ** fgets ** fseek ** ** **===================================================================*/ #define UNICODE #include <palsuite.h> PALTEST(c_runtime__wfopen_test5_paltest_wfopen_test5, "c_runtime/_wfopen/test5/paltest_wfopen_test5") { FILE *fp; char buffer[128]; WCHAR filename[] = {'t','e','s','t','f','i','l','e','\0'}; WCHAR write[] = {'w','\0'}; WCHAR readplus[] = {'r','+','\0'}; if (PAL_Initialize(argc, argv)) { return FAIL; } /* Open a file with 'w' mode */ if( (fp = _wfopen( filename,write )) == NULL ) { Fail( "ERROR: The file failed to open with 'w' mode.\n" ); } if(fclose(fp)) { Fail("ERROR: Attempted to close a file, but fclose failed. " "This test depends upon it."); } if( (fp = _wfopen( filename, readplus )) == NULL ) { Fail( "ERROR: The file failed to open with 'r+' mode.\n" ); } /* Write some text to the file */ if(fprintf(fp,"%s","some text") <= 0) { Fail("ERROR: Attempted to WRITE to a file opened with 'r+' mode " "but fprintf failed. Either fopen or fprintf have problems."); } if(fseek(fp, 0, SEEK_SET)) { Fail("ERROR: fseek failed, and this test depends on it."); } /* Attempt to read from the 'r+' only file, should pass */ if(fgets(buffer,10,fp) == NULL) { Fail("ERROR: Tried to READ from a file with 'r+' mode set. " "This should succeed, but fgets returned NULL. Either fgets " "or fopen is broken."); } PAL_Terminate(); return PASS; }
-1
dotnet/runtime
66,282
Enable Fast Tail Call Optimization for ARM32
- Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
clamp03
2022-03-07T05:47:20Z
2022-03-13T11:50:20Z
227ff3d53784f655fa04dad059a98b3e8d291d61
51b90cc60b8528c77829ef18481b0f58db812776
Enable Fast Tail Call Optimization for ARM32. - Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
./src/native/libs/System.Native/pal_errno.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #pragma once #include <pal_error_common.h> /** * Converts the given raw numeric value obtained via errno -> * GetLastWin32Error() to a standard numeric value defined by enum * Error above. If the value is not recognized, returns * Error_ENONSTANDARD. */ PALEXPORT int32_t SystemNative_ConvertErrorPlatformToPal(int32_t platformErrno); /** * Converts the given PAL Error value to a platform-specific errno * value. This is to be used when we want to synthesize a given error * and obtain the appropriate error message via StrErrorR. */ PALEXPORT int32_t SystemNative_ConvertErrorPalToPlatform(int32_t error); /** * Obtains the system error message for the given raw numeric value * obtained by errno/ Marhsal.GetLastWin32Error(). * * By design, this does not take a PAL errno, but a raw system errno, * so that: * * 1. We don't waste cycles converting back and forth (generally, if * we have a PAL errno, we had a platform errno just a few * instructions ago.) * * 2. We don't lose the ability to get the system error message for * non-standard, platform-specific errors. * * Note that buffer may or may not be used and the error message is * passed back via the return value. * * If the buffer was too small to fit the full message, null is * returned and the buffer is filled with as much of the message * as possible and null-terminated. */ PALEXPORT const char* SystemNative_StrErrorR(int32_t platformErrno, char* buffer, int32_t bufferSize); /** * Gets the current errno value */ PALEXPORT int32_t SystemNative_GetErrNo(void); /** * Sets the errno value */ PALEXPORT void SystemNative_SetErrNo(int32_t errorCode);
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #pragma once #include <pal_error_common.h> /** * Converts the given raw numeric value obtained via errno -> * GetLastWin32Error() to a standard numeric value defined by enum * Error above. If the value is not recognized, returns * Error_ENONSTANDARD. */ PALEXPORT int32_t SystemNative_ConvertErrorPlatformToPal(int32_t platformErrno); /** * Converts the given PAL Error value to a platform-specific errno * value. This is to be used when we want to synthesize a given error * and obtain the appropriate error message via StrErrorR. */ PALEXPORT int32_t SystemNative_ConvertErrorPalToPlatform(int32_t error); /** * Obtains the system error message for the given raw numeric value * obtained by errno/ Marhsal.GetLastWin32Error(). * * By design, this does not take a PAL errno, but a raw system errno, * so that: * * 1. We don't waste cycles converting back and forth (generally, if * we have a PAL errno, we had a platform errno just a few * instructions ago.) * * 2. We don't lose the ability to get the system error message for * non-standard, platform-specific errors. * * Note that buffer may or may not be used and the error message is * passed back via the return value. * * If the buffer was too small to fit the full message, null is * returned and the buffer is filled with as much of the message * as possible and null-terminated. */ PALEXPORT const char* SystemNative_StrErrorR(int32_t platformErrno, char* buffer, int32_t bufferSize); /** * Gets the current errno value */ PALEXPORT int32_t SystemNative_GetErrNo(void); /** * Sets the errno value */ PALEXPORT void SystemNative_SetErrNo(int32_t errorCode);
-1
dotnet/runtime
66,282
Enable Fast Tail Call Optimization for ARM32
- Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
clamp03
2022-03-07T05:47:20Z
2022-03-13T11:50:20Z
227ff3d53784f655fa04dad059a98b3e8d291d61
51b90cc60b8528c77829ef18481b0f58db812776
Enable Fast Tail Call Optimization for ARM32. - Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
./src/coreclr/pal/tests/palsuite/threading/WaitForMultipleObjectsEx/test3/test3.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*===================================================================== ** ** Source: test3.c ** ** Purpose: Tests that waiting on an open mutex will a return ** WAIT_OBJECT_0. Does this by creating a child thread that ** acquires the mutex, releases it, and exits. ** ** **===================================================================*/ #include <palsuite.h> const int ChildThreadWaitTime = 1000; const int ParentDelayTime = 2000; DWORD PALAPI AcquiringProc(LPVOID lpParameter); PALTEST(threading_WaitForMultipleObjectsEx_test3_paltest_waitformultipleobjectsex_test3, "threading/WaitForMultipleObjectsEx/test3/paltest_waitformultipleobjectsex_test3") { HANDLE Mutex; HANDLE hThread = 0; DWORD dwThreadId = 0; int ret; if (0 != (PAL_Initialize(argc, argv))) { return FAIL; } Mutex = CreateMutexW(NULL, FALSE, NULL); if (Mutex == NULL) { Fail("Unable to create the mutex. GetLastError returned %d\n", GetLastError()); } hThread = CreateThread( NULL, 0, (LPTHREAD_START_ROUTINE)AcquiringProc, (LPVOID) Mutex, 0, &dwThreadId); if (hThread == NULL) { Fail("ERROR: Was not able to create the thread to test!\n" "GetLastError returned %d\n", GetLastError()); } Sleep(ParentDelayTime); ret = WaitForMultipleObjectsEx(1, &Mutex, FALSE, INFINITE, FALSE); if (ret != WAIT_OBJECT_0) { Fail("Expected WaitForMultipleObjectsEx to return WAIT_OBJECT_0\n" "Got %d\n", ret); } if (!CloseHandle(Mutex)) { Fail("CloseHandle on the mutex failed!\n"); } if (!CloseHandle(hThread)) { Fail("CloseHandle on the thread failed!\n"); } PAL_Terminate(); return PASS; } /* * Entry Point for child thread. Acquries a mutex, releases it, and exits. */ DWORD PALAPI AcquiringProc(LPVOID lpParameter) { HANDLE Mutex; DWORD ret; Mutex = (HANDLE) lpParameter; Sleep(ChildThreadWaitTime); ret = WaitForSingleObject(Mutex, 0); if (ret != WAIT_OBJECT_0) { Fail("Expected the WaitForSingleObject call on the mutex to succeed\n" "Expected return of WAIT_OBJECT_0, got %d\n", ret); } ret = ReleaseMutex(Mutex); if (!ret) { Fail("Unable to release mutex! GetLastError returned %d\n", GetLastError()); } return 0; }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*===================================================================== ** ** Source: test3.c ** ** Purpose: Tests that waiting on an open mutex will a return ** WAIT_OBJECT_0. Does this by creating a child thread that ** acquires the mutex, releases it, and exits. ** ** **===================================================================*/ #include <palsuite.h> const int ChildThreadWaitTime = 1000; const int ParentDelayTime = 2000; DWORD PALAPI AcquiringProc(LPVOID lpParameter); PALTEST(threading_WaitForMultipleObjectsEx_test3_paltest_waitformultipleobjectsex_test3, "threading/WaitForMultipleObjectsEx/test3/paltest_waitformultipleobjectsex_test3") { HANDLE Mutex; HANDLE hThread = 0; DWORD dwThreadId = 0; int ret; if (0 != (PAL_Initialize(argc, argv))) { return FAIL; } Mutex = CreateMutexW(NULL, FALSE, NULL); if (Mutex == NULL) { Fail("Unable to create the mutex. GetLastError returned %d\n", GetLastError()); } hThread = CreateThread( NULL, 0, (LPTHREAD_START_ROUTINE)AcquiringProc, (LPVOID) Mutex, 0, &dwThreadId); if (hThread == NULL) { Fail("ERROR: Was not able to create the thread to test!\n" "GetLastError returned %d\n", GetLastError()); } Sleep(ParentDelayTime); ret = WaitForMultipleObjectsEx(1, &Mutex, FALSE, INFINITE, FALSE); if (ret != WAIT_OBJECT_0) { Fail("Expected WaitForMultipleObjectsEx to return WAIT_OBJECT_0\n" "Got %d\n", ret); } if (!CloseHandle(Mutex)) { Fail("CloseHandle on the mutex failed!\n"); } if (!CloseHandle(hThread)) { Fail("CloseHandle on the thread failed!\n"); } PAL_Terminate(); return PASS; } /* * Entry Point for child thread. Acquries a mutex, releases it, and exits. */ DWORD PALAPI AcquiringProc(LPVOID lpParameter) { HANDLE Mutex; DWORD ret; Mutex = (HANDLE) lpParameter; Sleep(ChildThreadWaitTime); ret = WaitForSingleObject(Mutex, 0); if (ret != WAIT_OBJECT_0) { Fail("Expected the WaitForSingleObject call on the mutex to succeed\n" "Expected return of WAIT_OBJECT_0, got %d\n", ret); } ret = ReleaseMutex(Mutex); if (!ret) { Fail("Unable to release mutex! GetLastError returned %d\n", GetLastError()); } return 0; }
-1
dotnet/runtime
66,282
Enable Fast Tail Call Optimization for ARM32
- Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
clamp03
2022-03-07T05:47:20Z
2022-03-13T11:50:20Z
227ff3d53784f655fa04dad059a98b3e8d291d61
51b90cc60b8528c77829ef18481b0f58db812776
Enable Fast Tail Call Optimization for ARM32. - Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
./src/coreclr/nativeaot/Runtime/profheapwalkhelper.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #ifndef _GCHEAPWALKHELPER_H_ #define _GCHEAPWALKHELPER_H_ // These two functions are utilized to scan the heap if requested by ETW // or a profiler. The implementations of these two functions are in profheapwalkhelper.cpp. #if defined(FEATURE_EVENT_TRACE) || defined(GC_PROFILING) void ScanRootsHelper(Object* pObj, Object** ppRoot, ScanContext* pSC, DWORD dwFlags); bool HeapWalkHelper(Object* pBO, void* pvContext); #endif #endif // _GCHEAPWALKHELPER_H_
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #ifndef _GCHEAPWALKHELPER_H_ #define _GCHEAPWALKHELPER_H_ // These two functions are utilized to scan the heap if requested by ETW // or a profiler. The implementations of these two functions are in profheapwalkhelper.cpp. #if defined(FEATURE_EVENT_TRACE) || defined(GC_PROFILING) void ScanRootsHelper(Object* pObj, Object** ppRoot, ScanContext* pSC, DWORD dwFlags); bool HeapWalkHelper(Object* pBO, void* pvContext); #endif #endif // _GCHEAPWALKHELPER_H_
-1
dotnet/runtime
66,282
Enable Fast Tail Call Optimization for ARM32
- Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
clamp03
2022-03-07T05:47:20Z
2022-03-13T11:50:20Z
227ff3d53784f655fa04dad059a98b3e8d291d61
51b90cc60b8528c77829ef18481b0f58db812776
Enable Fast Tail Call Optimization for ARM32. - Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
./src/coreclr/pal/tests/palsuite/c_runtime/wcstoul/test6/test6.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*============================================================================ ** ** Source: test6.c ** ** Purpose: Test #6 for the wcstoul function. Tests strings with octal/hex ** number specifers ** ** **==========================================================================*/ #include <palsuite.h> /* * Notes: wcstoul should depend on the current locale's LC_NUMERIC category, * this is not currently tested. */ PALTEST(c_runtime_wcstoul_test6_paltest_wcstoul_test6, "c_runtime/wcstoul/test6/paltest_wcstoul_test6") { WCHAR test1[] = {'0','x','1','2', 0}; WCHAR test2[] = {'0','1','2',0}; WCHAR *end; ULONG l; if (0 != PAL_Initialize(argc, argv)) { return FAIL; } l = wcstoul(test1, &end, 16); if (l != 0x12) { Fail("ERROR: Expected wcstoul to return %u, got %u\n", 0x12, l); } if (end != test1 + 4) { Fail("ERROR: Expected wcstoul to give an end value of %p, got %p\n", test1 + 4, end); } l = wcstoul(test1, &end, 10); if (l != 0) { Fail("ERROR: Expected wcstoul to return %u, got %u\n", 0, l); } if (end != test1+1) { Fail("ERROR: Expected wcstoul to give an end value of %p, got %p\n", test1+1, end); } l = wcstoul(test2, &end, 8); if (l != 10) { Fail("ERROR: Expected wcstoul to return %u, got %u\n", 10, l); } if (end != test2 + 3) { Fail("ERROR: Expected wcstoul to give an end value of %p, got %p\n", test2 + 3, end); } l = wcstoul(test2, &end, 10); if (l != 12) { Fail("ERROR: Expected wcstoul to return %u, got %u\n", 12, l); } if (end != test2 + 3) { Fail("ERROR: Expected wcstoul to give an end value of %p, got %p\n", test2 + 3, end); } PAL_Terminate(); return PASS; }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*============================================================================ ** ** Source: test6.c ** ** Purpose: Test #6 for the wcstoul function. Tests strings with octal/hex ** number specifers ** ** **==========================================================================*/ #include <palsuite.h> /* * Notes: wcstoul should depend on the current locale's LC_NUMERIC category, * this is not currently tested. */ PALTEST(c_runtime_wcstoul_test6_paltest_wcstoul_test6, "c_runtime/wcstoul/test6/paltest_wcstoul_test6") { WCHAR test1[] = {'0','x','1','2', 0}; WCHAR test2[] = {'0','1','2',0}; WCHAR *end; ULONG l; if (0 != PAL_Initialize(argc, argv)) { return FAIL; } l = wcstoul(test1, &end, 16); if (l != 0x12) { Fail("ERROR: Expected wcstoul to return %u, got %u\n", 0x12, l); } if (end != test1 + 4) { Fail("ERROR: Expected wcstoul to give an end value of %p, got %p\n", test1 + 4, end); } l = wcstoul(test1, &end, 10); if (l != 0) { Fail("ERROR: Expected wcstoul to return %u, got %u\n", 0, l); } if (end != test1+1) { Fail("ERROR: Expected wcstoul to give an end value of %p, got %p\n", test1+1, end); } l = wcstoul(test2, &end, 8); if (l != 10) { Fail("ERROR: Expected wcstoul to return %u, got %u\n", 10, l); } if (end != test2 + 3) { Fail("ERROR: Expected wcstoul to give an end value of %p, got %p\n", test2 + 3, end); } l = wcstoul(test2, &end, 10); if (l != 12) { Fail("ERROR: Expected wcstoul to return %u, got %u\n", 12, l); } if (end != test2 + 3) { Fail("ERROR: Expected wcstoul to give an end value of %p, got %p\n", test2 + 3, end); } PAL_Terminate(); return PASS; }
-1
dotnet/runtime
66,282
Enable Fast Tail Call Optimization for ARM32
- Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
clamp03
2022-03-07T05:47:20Z
2022-03-13T11:50:20Z
227ff3d53784f655fa04dad059a98b3e8d291d61
51b90cc60b8528c77829ef18481b0f58db812776
Enable Fast Tail Call Optimization for ARM32. - Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
./src/coreclr/pal/inc/rt/eh.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include "palrt.h"
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include "palrt.h"
-1
dotnet/runtime
66,282
Enable Fast Tail Call Optimization for ARM32
- Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
clamp03
2022-03-07T05:47:20Z
2022-03-13T11:50:20Z
227ff3d53784f655fa04dad059a98b3e8d291d61
51b90cc60b8528c77829ef18481b0f58db812776
Enable Fast Tail Call Optimization for ARM32. - Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
./src/native/libs/System.Security.Cryptography.Native.Android/pal_evp.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #pragma once #include "pal_jni.h" #define EVP_MAX_MD_SIZE 64 PALEXPORT int32_t CryptoNative_EvpMdSize(intptr_t md); PALEXPORT int32_t CryptoNative_GetMaxMdSize(void); PALEXPORT intptr_t CryptoNative_EvpMd5(void); PALEXPORT intptr_t CryptoNative_EvpSha1(void); PALEXPORT intptr_t CryptoNative_EvpSha256(void); PALEXPORT intptr_t CryptoNative_EvpSha384(void); PALEXPORT intptr_t CryptoNative_EvpSha512(void); PALEXPORT int32_t CryptoNative_EvpDigestOneShot(intptr_t type, void* source, int32_t sourceSize, uint8_t* md, uint32_t* mdSize); PALEXPORT jobject CryptoNative_EvpMdCtxCreate(intptr_t type); PALEXPORT int32_t CryptoNative_EvpDigestReset(jobject ctx, intptr_t type); PALEXPORT int32_t CryptoNative_EvpDigestUpdate(jobject ctx, void* d, int32_t cnt); PALEXPORT int32_t CryptoNative_EvpDigestFinalEx(jobject ctx, uint8_t* md, uint32_t* s); PALEXPORT int32_t CryptoNative_EvpDigestCurrent(jobject ctx, uint8_t* md, uint32_t* s); PALEXPORT void CryptoNative_EvpMdCtxDestroy(jobject ctx);
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #pragma once #include "pal_jni.h" #define EVP_MAX_MD_SIZE 64 PALEXPORT int32_t CryptoNative_EvpMdSize(intptr_t md); PALEXPORT int32_t CryptoNative_GetMaxMdSize(void); PALEXPORT intptr_t CryptoNative_EvpMd5(void); PALEXPORT intptr_t CryptoNative_EvpSha1(void); PALEXPORT intptr_t CryptoNative_EvpSha256(void); PALEXPORT intptr_t CryptoNative_EvpSha384(void); PALEXPORT intptr_t CryptoNative_EvpSha512(void); PALEXPORT int32_t CryptoNative_EvpDigestOneShot(intptr_t type, void* source, int32_t sourceSize, uint8_t* md, uint32_t* mdSize); PALEXPORT jobject CryptoNative_EvpMdCtxCreate(intptr_t type); PALEXPORT int32_t CryptoNative_EvpDigestReset(jobject ctx, intptr_t type); PALEXPORT int32_t CryptoNative_EvpDigestUpdate(jobject ctx, void* d, int32_t cnt); PALEXPORT int32_t CryptoNative_EvpDigestFinalEx(jobject ctx, uint8_t* md, uint32_t* s); PALEXPORT int32_t CryptoNative_EvpDigestCurrent(jobject ctx, uint8_t* md, uint32_t* s); PALEXPORT void CryptoNative_EvpMdCtxDestroy(jobject ctx);
-1
dotnet/runtime
66,282
Enable Fast Tail Call Optimization for ARM32
- Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
clamp03
2022-03-07T05:47:20Z
2022-03-13T11:50:20Z
227ff3d53784f655fa04dad059a98b3e8d291d61
51b90cc60b8528c77829ef18481b0f58db812776
Enable Fast Tail Call Optimization for ARM32. - Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
./src/coreclr/pal/tests/palsuite/threading/ExitThread/test1/test1.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*============================================================ ** ** Source: test1.c ** ** Purpose: Test for ExitThread. Create a thread and then call ** exit thread within the threading function. Ensure that it exits ** immediatly. ** ** **=========================================================*/ #include <palsuite.h> DWORD dwExitThreadTestParameter = 0; DWORD PALAPI ExitThreadTestThread( LPVOID lpParameter) { DWORD dwRet = 0; /* Save parameter for test */ dwExitThreadTestParameter = (DWORD)(SIZE_T)lpParameter; /* Call the ExitThread function */ ExitThread(dwRet); /* If we didn't exit, get caught in this loop. But, the program will exit. */ while (!dwRet) { Fail("ERROR: Entered an infinite loop because ExitThread " "failed to exit from the thread. Forcing exit from " "the test now."); } return dwRet; } BOOL ExitThreadTest() { BOOL bRet = FALSE; DWORD dwRet = 0; LPSECURITY_ATTRIBUTES lpThreadAttributes = NULL; DWORD dwStackSize = 0; LPTHREAD_START_ROUTINE lpStartAddress = &ExitThreadTestThread; LPVOID lpParameter = (LPVOID)lpStartAddress; DWORD dwCreationFlags = 0; //run immediately DWORD dwThreadId = 0; HANDLE hThread = 0; dwExitThreadTestParameter = 0; /* Create a Thread. We'll need this to test that we're able to exit the thread. */ hThread = CreateThread( lpThreadAttributes, dwStackSize, lpStartAddress, lpParameter, dwCreationFlags, &dwThreadId ); if (hThread != INVALID_HANDLE_VALUE) { dwRet = WaitForSingleObject(hThread,INFINITE); if (dwRet != WAIT_OBJECT_0) { Trace("ExitThreadTest:WaitForSingleObject failed " "(%x)\n",GetLastError()); } else { /* Check to ensure that the parameter set in the Thread function is correct. */ if (dwExitThreadTestParameter != (DWORD)(SIZE_T)lpParameter) { Trace("ERROR: The paramater passed should have been " "%d but turned up as %d.", dwExitThreadTestParameter, lpParameter); } else { bRet = TRUE; } } } else { Trace("ExitThreadTest:CreateThread failed (%x)\n",GetLastError()); } return bRet; } PALTEST(threading_ExitThread_test1_paltest_exitthread_test1, "threading/ExitThread/test1/paltest_exitthread_test1") { if(0 != (PAL_Initialize(argc, argv))) { return ( FAIL ); } if(!ExitThreadTest()) { Fail ("Test failed\n"); } PAL_Terminate(); return ( PASS ); }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*============================================================ ** ** Source: test1.c ** ** Purpose: Test for ExitThread. Create a thread and then call ** exit thread within the threading function. Ensure that it exits ** immediatly. ** ** **=========================================================*/ #include <palsuite.h> DWORD dwExitThreadTestParameter = 0; DWORD PALAPI ExitThreadTestThread( LPVOID lpParameter) { DWORD dwRet = 0; /* Save parameter for test */ dwExitThreadTestParameter = (DWORD)(SIZE_T)lpParameter; /* Call the ExitThread function */ ExitThread(dwRet); /* If we didn't exit, get caught in this loop. But, the program will exit. */ while (!dwRet) { Fail("ERROR: Entered an infinite loop because ExitThread " "failed to exit from the thread. Forcing exit from " "the test now."); } return dwRet; } BOOL ExitThreadTest() { BOOL bRet = FALSE; DWORD dwRet = 0; LPSECURITY_ATTRIBUTES lpThreadAttributes = NULL; DWORD dwStackSize = 0; LPTHREAD_START_ROUTINE lpStartAddress = &ExitThreadTestThread; LPVOID lpParameter = (LPVOID)lpStartAddress; DWORD dwCreationFlags = 0; //run immediately DWORD dwThreadId = 0; HANDLE hThread = 0; dwExitThreadTestParameter = 0; /* Create a Thread. We'll need this to test that we're able to exit the thread. */ hThread = CreateThread( lpThreadAttributes, dwStackSize, lpStartAddress, lpParameter, dwCreationFlags, &dwThreadId ); if (hThread != INVALID_HANDLE_VALUE) { dwRet = WaitForSingleObject(hThread,INFINITE); if (dwRet != WAIT_OBJECT_0) { Trace("ExitThreadTest:WaitForSingleObject failed " "(%x)\n",GetLastError()); } else { /* Check to ensure that the parameter set in the Thread function is correct. */ if (dwExitThreadTestParameter != (DWORD)(SIZE_T)lpParameter) { Trace("ERROR: The paramater passed should have been " "%d but turned up as %d.", dwExitThreadTestParameter, lpParameter); } else { bRet = TRUE; } } } else { Trace("ExitThreadTest:CreateThread failed (%x)\n",GetLastError()); } return bRet; } PALTEST(threading_ExitThread_test1_paltest_exitthread_test1, "threading/ExitThread/test1/paltest_exitthread_test1") { if(0 != (PAL_Initialize(argc, argv))) { return ( FAIL ); } if(!ExitThreadTest()) { Fail ("Test failed\n"); } PAL_Terminate(); return ( PASS ); }
-1
dotnet/runtime
66,282
Enable Fast Tail Call Optimization for ARM32
- Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
clamp03
2022-03-07T05:47:20Z
2022-03-13T11:50:20Z
227ff3d53784f655fa04dad059a98b3e8d291d61
51b90cc60b8528c77829ef18481b0f58db812776
Enable Fast Tail Call Optimization for ARM32. - Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
./src/native/libs/System.Globalization.Native/pal_localeStringData.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // #pragma once #include "pal_locale.h" #include "pal_compiler.h" // Enum that corresponds to managed enum CultureData.LocaleStringData. // The numeric values of the enum members match their Win32 counterparts. typedef enum { LocaleString_LocalizedDisplayName = 0x02, LocaleString_EnglishDisplayName = 0x00000072, LocaleString_NativeDisplayName = 0x00000073, LocaleString_LocalizedLanguageName = 0x0000006f, LocaleString_EnglishLanguageName = 0x00001001, LocaleString_NativeLanguageName = 0x04, LocaleString_EnglishCountryName = 0x00001002, LocaleString_NativeCountryName = 0x08, LocaleString_DecimalSeparator = 0x0E, LocaleString_ThousandSeparator = 0x0F, LocaleString_Digits = 0x00000013, LocaleString_MonetarySymbol = 0x00000014, LocaleString_CurrencyEnglishName = 0x00001007, LocaleString_CurrencyNativeName = 0x00001008, LocaleString_Iso4217MonetarySymbol = 0x00000015, LocaleString_MonetaryDecimalSeparator = 0x00000016, LocaleString_MonetaryThousandSeparator = 0x00000017, LocaleString_AMDesignator = 0x00000028, LocaleString_PMDesignator = 0x00000029, LocaleString_PositiveSign = 0x00000050, LocaleString_NegativeSign = 0x00000051, LocaleString_Iso639LanguageTwoLetterName = 0x00000059, LocaleString_Iso639LanguageThreeLetterName = 0x00000067, LocaleString_Iso3166CountryName = 0x0000005A, LocaleString_Iso3166CountryName2= 0x00000068, LocaleString_NaNSymbol = 0x00000069, LocaleString_PositiveInfinitySymbol = 0x0000006a, LocaleString_ParentName = 0x0000006d, LocaleString_PercentSymbol = 0x00000076, LocaleString_PerMilleSymbol = 0x00000077 } LocaleStringData; PALEXPORT int32_t GlobalizationNative_GetLocaleInfoString(const UChar* localeName, LocaleStringData localeStringData, UChar* value, int32_t valueLength, const UChar* uiLocaleName); PALEXPORT int32_t GlobalizationNative_GetLocaleTimeFormat(const UChar* localeName, int shortFormat, UChar* value, int32_t valueLength);
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // #pragma once #include "pal_locale.h" #include "pal_compiler.h" // Enum that corresponds to managed enum CultureData.LocaleStringData. // The numeric values of the enum members match their Win32 counterparts. typedef enum { LocaleString_LocalizedDisplayName = 0x02, LocaleString_EnglishDisplayName = 0x00000072, LocaleString_NativeDisplayName = 0x00000073, LocaleString_LocalizedLanguageName = 0x0000006f, LocaleString_EnglishLanguageName = 0x00001001, LocaleString_NativeLanguageName = 0x04, LocaleString_EnglishCountryName = 0x00001002, LocaleString_NativeCountryName = 0x08, LocaleString_DecimalSeparator = 0x0E, LocaleString_ThousandSeparator = 0x0F, LocaleString_Digits = 0x00000013, LocaleString_MonetarySymbol = 0x00000014, LocaleString_CurrencyEnglishName = 0x00001007, LocaleString_CurrencyNativeName = 0x00001008, LocaleString_Iso4217MonetarySymbol = 0x00000015, LocaleString_MonetaryDecimalSeparator = 0x00000016, LocaleString_MonetaryThousandSeparator = 0x00000017, LocaleString_AMDesignator = 0x00000028, LocaleString_PMDesignator = 0x00000029, LocaleString_PositiveSign = 0x00000050, LocaleString_NegativeSign = 0x00000051, LocaleString_Iso639LanguageTwoLetterName = 0x00000059, LocaleString_Iso639LanguageThreeLetterName = 0x00000067, LocaleString_Iso3166CountryName = 0x0000005A, LocaleString_Iso3166CountryName2= 0x00000068, LocaleString_NaNSymbol = 0x00000069, LocaleString_PositiveInfinitySymbol = 0x0000006a, LocaleString_ParentName = 0x0000006d, LocaleString_PercentSymbol = 0x00000076, LocaleString_PerMilleSymbol = 0x00000077 } LocaleStringData; PALEXPORT int32_t GlobalizationNative_GetLocaleInfoString(const UChar* localeName, LocaleStringData localeStringData, UChar* value, int32_t valueLength, const UChar* uiLocaleName); PALEXPORT int32_t GlobalizationNative_GetLocaleTimeFormat(const UChar* localeName, int shortFormat, UChar* value, int32_t valueLength);
-1
dotnet/runtime
66,282
Enable Fast Tail Call Optimization for ARM32
- Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
clamp03
2022-03-07T05:47:20Z
2022-03-13T11:50:20Z
227ff3d53784f655fa04dad059a98b3e8d291d61
51b90cc60b8528c77829ef18481b0f58db812776
Enable Fast Tail Call Optimization for ARM32. - Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
./src/coreclr/debug/ee/arm/primitives.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // #include "stdafx.h" #include "threads.h" #include "../../shared/arm/primitives.cpp" void CopyREGDISPLAY(REGDISPLAY* pDst, REGDISPLAY* pSrc) { CONTEXT tmp; CopyRegDisplay(pSrc, pDst, &tmp); } void SetSSFlag(DT_CONTEXT *, Thread *pThread) { _ASSERTE(pThread != NULL); pThread->EnableSingleStep(); } void UnsetSSFlag(DT_CONTEXT *, Thread *pThread) { _ASSERTE(pThread != NULL); pThread->DisableSingleStep(); } // Check if single stepping is enabled. bool IsSSFlagEnabled(DT_CONTEXT *, Thread *pThread) { _ASSERTE(pThread != NULL); return pThread->IsSingleStepEnabled(); }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // #include "stdafx.h" #include "threads.h" #include "../../shared/arm/primitives.cpp" void CopyREGDISPLAY(REGDISPLAY* pDst, REGDISPLAY* pSrc) { CONTEXT tmp; CopyRegDisplay(pSrc, pDst, &tmp); } void SetSSFlag(DT_CONTEXT *, Thread *pThread) { _ASSERTE(pThread != NULL); pThread->EnableSingleStep(); } void UnsetSSFlag(DT_CONTEXT *, Thread *pThread) { _ASSERTE(pThread != NULL); pThread->DisableSingleStep(); } // Check if single stepping is enabled. bool IsSSFlagEnabled(DT_CONTEXT *, Thread *pThread) { _ASSERTE(pThread != NULL); return pThread->IsSingleStepEnabled(); }
-1
dotnet/runtime
66,282
Enable Fast Tail Call Optimization for ARM32
- Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
clamp03
2022-03-07T05:47:20Z
2022-03-13T11:50:20Z
227ff3d53784f655fa04dad059a98b3e8d291d61
51b90cc60b8528c77829ef18481b0f58db812776
Enable Fast Tail Call Optimization for ARM32. - Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
./src/coreclr/vm/stdinterfaces_wrapper.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. //--------------------------------------------------------------------------------- // stdinterfaces_wrapper.cpp // // Defines various standard com interfaces //--------------------------------------------------------------------------------- #include "common.h" #include <ole2.h> #include <guidfromname.h> #include <olectl.h> #include <objsafe.h> // IID_IObjectSafe #include "vars.hpp" #include "object.h" #include "excep.h" #include "frames.h" #include "vars.hpp" #include "runtimecallablewrapper.h" #include "comcallablewrapper.h" #include "field.h" #include "threads.h" #include "interoputil.h" #include "comdelegate.h" #include "olevariant.h" #include "eeconfig.h" #include "typehandle.h" #include "posterror.h" #include <corerror.h> #include <mscoree.h> #include "mtx.h" #include "cgencpu.h" #include "interopconverter.h" #include "cominterfacemarshaler.h" #include "stdinterfaces.h" #include "stdinterfaces_internal.h" #include "interoputil.inl" interface IEnumConnectionPoints; // IUnknown is part of IDispatch // Common vtables for well-known COM interfaces // shared by all COM+ callable wrappers. // All Com+ created vtables have well known IUnknown methods, which is used to identify // the type of the interface // For e.g. all com+ created tear-offs have the same QI method in their IUnknown portion // Unknown_QueryInterface is the QI method for all the tear-offs created from COM+ // // Tearoff interfaces created for std. interfaces such as IProvideClassInfo, IErrorInfo etc. // have the AddRef & Release function point to Unknown_AddRefSpecial & Unknown_ReleaseSpecial // // Inner unknown, or the original unknown for a wrapper has // AddRef & Release point to a Unknown_AddRefInner & Unknown_ReleaseInner // global inner Unknown vtable const StdInterfaceDesc<3> g_InnerUnknown = { enum_InnerUnknown, { (UINT_PTR*)Unknown_QueryInterface, (UINT_PTR*)Unknown_AddRefInner, // special addref to distinguish inner unk (UINT_PTR*)Unknown_ReleaseInner, // special release to distinguish inner unknown } }; // global IProvideClassInfo vtable const StdInterfaceDesc<4> g_IProvideClassInfo = { enum_IProvideClassInfo, { (UINT_PTR*)Unknown_QueryInterface, // don't change this (UINT_PTR*)Unknown_AddRefSpecial, // special addref for std. interface (UINT_PTR*)Unknown_ReleaseSpecial, // special release for std. interface (UINT_PTR*)ClassInfo_GetClassInfo_Wrapper // GetClassInfo } }; // global IMarshal vtable const StdInterfaceDesc<9> g_IMarshal = { enum_IMarshal, { (UINT_PTR*)Unknown_QueryInterface, (UINT_PTR*)Unknown_AddRefSpecial, (UINT_PTR*)Unknown_ReleaseSpecial, (UINT_PTR*)Marshal_GetUnmarshalClass_Wrapper, (UINT_PTR*)Marshal_GetMarshalSizeMax_Wrapper, (UINT_PTR*)Marshal_MarshalInterface_Wrapper, (UINT_PTR*)Marshal_UnmarshalInterface_Wrapper, (UINT_PTR*)Marshal_ReleaseMarshalData_Wrapper, (UINT_PTR*)Marshal_DisconnectObject_Wrapper } }; // global ISupportsErrorInfo vtable const StdInterfaceDesc<4> g_ISupportsErrorInfo = { enum_ISupportsErrorInfo, { (UINT_PTR*)Unknown_QueryInterface, (UINT_PTR*)Unknown_AddRefSpecial, (UINT_PTR*)Unknown_ReleaseSpecial, (UINT_PTR*)SupportsErroInfo_IntfSupportsErrorInfo_Wrapper } }; // global IErrorInfo vtable const StdInterfaceDesc<8> g_IErrorInfo = { enum_IErrorInfo, { (UINT_PTR*)Unknown_QueryInterface_IErrorInfo, (UINT_PTR*)Unknown_AddRefSpecial, (UINT_PTR*)Unknown_ReleaseSpecial_IErrorInfo, (UINT_PTR*)ErrorInfo_GetGUID_Wrapper, (UINT_PTR*)ErrorInfo_GetSource_Wrapper, (UINT_PTR*)ErrorInfo_GetDescription_Wrapper, (UINT_PTR*)ErrorInfo_GetHelpFile_Wrapper, (UINT_PTR*)ErrorInfo_GetHelpContext_Wrapper } }; // global IConnectionPointContainer vtable const StdInterfaceDesc<5> g_IConnectionPointContainer = { enum_IConnectionPointContainer, { (UINT_PTR*)Unknown_QueryInterface, (UINT_PTR*)Unknown_AddRefSpecial, (UINT_PTR*)Unknown_ReleaseSpecial, (UINT_PTR*)ConnectionPointContainer_EnumConnectionPoints_Wrapper, (UINT_PTR*)ConnectionPointContainer_FindConnectionPoint_Wrapper } }; // global IObjectSafety vtable const StdInterfaceDesc<5> g_IObjectSafety = { enum_IObjectSafety, { (UINT_PTR*)Unknown_QueryInterface, (UINT_PTR*)Unknown_AddRefSpecial, (UINT_PTR*)Unknown_ReleaseSpecial, (UINT_PTR*)ObjectSafety_GetInterfaceSafetyOptions_Wrapper, (UINT_PTR*)ObjectSafety_SetInterfaceSafetyOptions_Wrapper } }; // global IDispatchEx vtable const StdInterfaceDesc<15> g_IDispatchEx = { enum_IDispatchEx, { (UINT_PTR*)Unknown_QueryInterface, (UINT_PTR*)Unknown_AddRefSpecial, (UINT_PTR*)Unknown_ReleaseSpecial, (UINT_PTR*)DispatchEx_GetTypeInfoCount_Wrapper, (UINT_PTR*)DispatchEx_GetTypeInfo_Wrapper, (UINT_PTR*)DispatchEx_GetIDsOfNames_Wrapper, (UINT_PTR*)DispatchEx_Invoke_Wrapper, (UINT_PTR*)DispatchEx_GetDispID_Wrapper, (UINT_PTR*)DispatchEx_InvokeEx_Wrapper, (UINT_PTR*)DispatchEx_DeleteMemberByName_Wrapper, (UINT_PTR*)DispatchEx_DeleteMemberByDispID_Wrapper, (UINT_PTR*)DispatchEx_GetMemberProperties_Wrapper, (UINT_PTR*)DispatchEx_GetMemberName_Wrapper, (UINT_PTR*)DispatchEx_GetNextDispID_Wrapper, (UINT_PTR*)DispatchEx_GetNameSpaceParent_Wrapper } }; // global IAgileObject vtable const StdInterfaceDesc<3> g_IAgileObject = { enum_IAgileObject, { (UINT_PTR*)Unknown_QueryInterface, (UINT_PTR*)Unknown_AddRefSpecial, (UINT_PTR*)Unknown_ReleaseSpecial } }; // Generic helper to check if AppDomain matches and perform a DoCallBack otherwise inline BOOL IsCurrentDomainValid(ComCallWrapper* pWrap, Thread* pThread) { CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_ANY; PRECONDITION(CheckPointer(pWrap)); PRECONDITION(CheckPointer(pThread)); } CONTRACTL_END; _ASSERTE(pWrap != NULL); PREFIX_ASSUME(pWrap != NULL); // If we are finalizing all alive objects, or after this stage, we do not allow // a thread to enter EE. if ((g_fEEShutDown & ShutDown_Finalize2) || g_fForbidEnterEE) return FALSE; return TRUE; } BOOL IsCurrentDomainValid(ComCallWrapper* pWrap) { CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_ANY; } CONTRACTL_END; return IsCurrentDomainValid(pWrap, GetThread()); } struct AppDomainSwitchToPreemptiveHelperArgs { ADCallBackFcnType pRealCallback; void* pRealArgs; }; VOID __stdcall AppDomainSwitchToPreemptiveHelper(LPVOID pv) { AppDomainSwitchToPreemptiveHelperArgs* pArgs = (AppDomainSwitchToPreemptiveHelperArgs*)pv; CONTRACTL { GC_TRIGGERS; MODE_ANY; PRECONDITION(CheckPointer(pv)); VOID __stdcall Dispatch_Invoke_CallBack(LPVOID ptr); if (pArgs->pRealCallback == Dispatch_Invoke_CallBack) THROWS; else NOTHROW; } CONTRACTL_END; GCX_PREEMP(); pArgs->pRealCallback(pArgs->pRealArgs); } VOID AppDomainDoCallBack(ComCallWrapper* pWrap, ADCallBackFcnType pTarget, LPVOID pArgs, HRESULT* phr) { CONTRACTL { DISABLED(NOTHROW); GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(pWrap)); PRECONDITION(CheckPointer(pTarget)); PRECONDITION(CheckPointer(pArgs)); PRECONDITION(CheckPointer(phr)); } CONTRACTL_END; // If we are finalizing all alive objects, or after this stage, we do not allow // a thread to enter EE. if ((g_fEEShutDown & ShutDown_Finalize2) || g_fForbidEnterEE) { *phr = E_FAIL; return; } BEGIN_EXTERNAL_ENTRYPOINT(phr) { // make the call directly not forgetting to switch to preemptive GC mode GCX_PREEMP(); ((ADCallBackFcnType)pTarget)(pArgs); } END_EXTERNAL_ENTRYPOINT; } //------------------------------------------------------------------------- // IUnknown methods struct QIArgs { ComCallWrapper* pWrap; IUnknown* pUnk; const IID* riid; void** ppv; HRESULT* hr; }; VOID __stdcall Unknown_QueryInterface_CallBack(LPVOID ptr) { CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(ptr)); } CONTRACTL_END; QIArgs* pArgs = (QIArgs*)ptr; ComCallWrapper* pWrap = MapIUnknownToWrapper(pArgs->pUnk); if (IsCurrentDomainValid(pWrap)) { *(pArgs->hr) = Unknown_QueryInterface_Internal(pArgs->pWrap, pArgs->pUnk, *pArgs->riid, pArgs->ppv); } else { AppDomainDoCallBack(pWrap, Unknown_QueryInterface_CallBack, pArgs, pArgs->hr);; } } HRESULT __stdcall Unknown_QueryInterface(IUnknown* pUnk, REFIID riid, void** ppv) { SetupThreadForComCall(E_OUTOFMEMORY); CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(pUnk)); PRECONDITION(CheckPointer(ppv, NULL_OK)); } CONTRACTL_END; ComCallWrapper* pWrap = MapIUnknownToWrapper(pUnk); if (IsCurrentDomainValid(pWrap, GET_THREAD())) { return Unknown_QueryInterface_Internal(pWrap, pUnk, riid, ppv); } else { HRESULT hr = S_OK; QIArgs args = {pWrap, pUnk, &riid, ppv, &hr}; Unknown_QueryInterface_CallBack(&args); return hr; } } struct AddRefReleaseArgs { IUnknown* pUnk; ULONG* pLong; HRESULT* hr; }; ULONG __stdcall Unknown_AddRef(IUnknown* pUnk) { // Ensure the Thread is available for contracts and other users of the Thread, but don't do any of // the other "entering managed code" work like checking for reentrancy. // We don't really need to "enter" the runtime to do an interlocked increment on a refcount, so // all of that stuff should be isolated to rare paths here. SetupThreadForComCall(-1); CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; ENTRY_POINT; } CONTRACTL_END; // Allow addrefs to go through, coz we are allowing // all releases to go through, otherwise we would // have a mismatch of ref-counts return Unknown_AddRef_Internal(pUnk); } ULONG __stdcall Unknown_Release(IUnknown* pUnk) { // Ensure the Thread is available for contracts and other users of the Thread, but don't do any of // the other "entering managed code" work like checking for reentrancy. // We don't really need to "enter" the runtime to do an interlocked decrement on a refcount, so // all of that stuff should be isolated to rare paths here. SetupThreadForComCall(-1); CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; ENTRY_POINT; } CONTRACTL_END; // Don't switch domains since we need to allow release calls to go through // even after the AD has been unlaoded. Furthermore release doesn't require // us to transition into the domain to work properly. return Unknown_Release_Internal(pUnk); } ULONG __stdcall Unknown_AddRefInner(IUnknown* pUnk) { // Ensure the Thread is available for contracts and other users of the Thread, but don't do any of // the other "entering managed code" work like checking for reentrancy. // We don't really need to "enter" the runtime to do an interlocked increment on a refcount, so // all of that stuff should be isolated to rare paths here. SetupThreadForComCall(-1); CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; ENTRY_POINT; } CONTRACTL_END; // Allow addrefs to go through, coz we are allowing // all releases to go through, otherwise we would // have a mismatch of ref-counts return Unknown_AddRefInner_Internal(pUnk); } ULONG __stdcall Unknown_ReleaseInner(IUnknown* pUnk) { // Ensure the Thread is available for contracts and other users of the Thread, but don't do any of // the other "entering managed code" work like checking for reentrancy. // We don't really need to "enter" the runtime to do an interlocked decrement on a refcount, so // all of that stuff should be isolated to rare paths here. SetupThreadForComCall(-1); CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; ENTRY_POINT; } CONTRACTL_END; // Don't switch domains since we need to allow release calls to go through // even after the AD has been unlaoded. Furthermore release doesn't require // us to transition into the domain to work properly. return Unknown_ReleaseInner_Internal(pUnk); } ULONG __stdcall Unknown_AddRefSpecial(IUnknown* pUnk) { // Ensure the Thread is available for contracts and other users of the Thread, but don't do any of // the other "entering managed code" work like checking for reentrancy. // We don't really need to "enter" the runtime to do an interlocked increment on a refcount, so // all of that stuff should be isolated to rare paths here. SetupThreadForComCall(-1); CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; ENTRY_POINT; } CONTRACTL_END; // Allow addrefs to go through, coz we are allowing // all releases to go through, otherwise we would // have a mismatch of ref-counts return Unknown_AddRefSpecial_Internal(pUnk); } ULONG __stdcall Unknown_ReleaseSpecial(IUnknown* pUnk) { // Ensure the Thread is available for contracts and other users of the Thread, but don't do any of // the other "entering managed code" work like checking for reentrancy. // We don't really need to "enter" the runtime to do an interlocked decrement on a refcount, so // all of that stuff should be isolated to rare paths here. SetupThreadForComCall(-1); CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; ENTRY_POINT; } CONTRACTL_END; // Don't switch domains since we need to allow release calls to go through // even after the AD has been unlaoded. Furthermore release doesn't require // us to transition into the domain to work properly. return Unknown_ReleaseSpecial_Internal(pUnk); } HRESULT __stdcall Unknown_QueryInterface_IErrorInfo(IUnknown* pUnk, REFIID riid, void** ppv) { SetupForComCallHR(); WRAPPER_NO_CONTRACT; // otherwise do a regular QI return Unknown_QueryInterface(pUnk, riid, ppv); } // --------------------------------------------------------------------------- // Release for IErrorInfo that takes into account that this can be called // while holding the loader lock // --------------------------------------------------------------------------- ULONG __stdcall Unknown_ReleaseSpecial_IErrorInfo(IUnknown* pUnk) { SetupForComCallDWORD(); WRAPPER_NO_CONTRACT; CONTRACT_VIOLATION(GCViolation); // Don't switch domains since we need to allow release calls to go through // even after the AD has been unlaoded. Furthermore release doesn't require // us to transition into the domain to work properly. return Unknown_ReleaseSpecial_IErrorInfo_Internal(pUnk); } //------------------------------------------------------------------------- // IProvideClassInfo methods struct GetClassInfoArgs { IUnknown* pUnk; ITypeInfo** ppTI; //Address of output variable that receives the type info. HRESULT* hr; }; VOID __stdcall ClassInfo_GetClassInfo_CallBack(LPVOID ptr) { CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(ptr)); } CONTRACTL_END; GetClassInfoArgs* pArgs = (GetClassInfoArgs*)ptr; ComCallWrapper* pWrap = MapIUnknownToWrapper(pArgs->pUnk); if (IsCurrentDomainValid(pWrap)) { *(pArgs->hr) = ClassInfo_GetClassInfo(pArgs->pUnk, pArgs->ppTI); } else { AppDomainDoCallBack(pWrap, ClassInfo_GetClassInfo_CallBack, pArgs, pArgs->hr); } } HRESULT __stdcall ClassInfo_GetClassInfo_Wrapper(IUnknown* pUnk, ITypeInfo** ppTI) { SetupForComCallHR(); CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(pUnk)); PRECONDITION(CheckPointer(ppTI, NULL_OK)); } CONTRACTL_END; HRESULT hr = S_OK; GetClassInfoArgs args = {pUnk, ppTI, &hr}; ClassInfo_GetClassInfo_CallBack(&args); return hr; } // --------------------------------------------------------------------------- // Interface ISupportsErrorInfo struct IntfSupportsErrorInfoArgs { IUnknown* pUnk; const IID* riid; HRESULT* hr; }; VOID __stdcall SupportsErroInfo_IntfSupportsErrorInfo_CallBack(LPVOID ptr) { CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(ptr)); } CONTRACTL_END; IntfSupportsErrorInfoArgs* pArgs = (IntfSupportsErrorInfoArgs*)ptr; ComCallWrapper* pWrap = MapIUnknownToWrapper(pArgs->pUnk); if (IsCurrentDomainValid(pWrap)) { *(pArgs->hr) = SupportsErroInfo_IntfSupportsErrorInfo(pArgs->pUnk, *pArgs->riid); } else { AppDomainDoCallBack(pWrap, SupportsErroInfo_IntfSupportsErrorInfo_CallBack, pArgs, pArgs->hr);; } } HRESULT __stdcall SupportsErroInfo_IntfSupportsErrorInfo_Wrapper(IUnknown* pUnk, REFIID riid) { SetupForComCallHR(); CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(pUnk)); } CONTRACTL_END; HRESULT hr = S_OK; IntfSupportsErrorInfoArgs args = {pUnk, &riid, &hr}; SupportsErroInfo_IntfSupportsErrorInfo_CallBack(&args); return hr; } // --------------------------------------------------------------------------- // Interface IErrorInfo struct GetDescriptionArgs { IUnknown* pUnk; BSTR* pbstDescription; HRESULT* hr; }; VOID __stdcall ErrorInfo_GetDescription_CallBack(LPVOID ptr) { CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(ptr)); } CONTRACTL_END; GetDescriptionArgs* pArgs = (GetDescriptionArgs*)ptr; ComCallWrapper* pWrap = MapIUnknownToWrapper(pArgs->pUnk); if (IsCurrentDomainValid(pWrap)) { *(pArgs->hr) = ErrorInfo_GetDescription(pArgs->pUnk, pArgs->pbstDescription); } else { AppDomainDoCallBack(pWrap, ErrorInfo_GetDescription_CallBack, pArgs, pArgs->hr);; } } HRESULT __stdcall ErrorInfo_GetDescription_Wrapper(IUnknown* pUnk, BSTR* pbstrDescription) { SetupForComCallHR(); CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(pUnk)); PRECONDITION(CheckPointer(pbstrDescription, NULL_OK)); } CONTRACTL_END; HRESULT hr = S_OK; GetDescriptionArgs args = {pUnk, pbstrDescription, &hr}; ErrorInfo_GetDescription_CallBack(&args); return hr; } struct GetGUIDArgs { IUnknown* pUnk; GUID* pguid; HRESULT* hr; }; VOID __stdcall ErrorInfo_GetGUID_CallBack(LPVOID ptr) { CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(ptr)); } CONTRACTL_END; GetGUIDArgs* pArgs = (GetGUIDArgs*)ptr; ComCallWrapper* pWrap = MapIUnknownToWrapper(pArgs->pUnk); if (IsCurrentDomainValid(pWrap)) { *(pArgs->hr) = ErrorInfo_GetGUID(pArgs->pUnk, pArgs->pguid); } else { AppDomainDoCallBack(pWrap, ErrorInfo_GetGUID_CallBack, pArgs, pArgs->hr); } } HRESULT __stdcall ErrorInfo_GetGUID_Wrapper(IUnknown* pUnk, GUID* pguid) { SetupForComCallHR(); CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(pUnk)); PRECONDITION(CheckPointer(pguid, NULL_OK)); } CONTRACTL_END; HRESULT hr = S_OK; GetGUIDArgs args = {pUnk, pguid, &hr}; ErrorInfo_GetGUID_CallBack(&args); return hr; } struct GetHelpContextArgs { IUnknown* pUnk; DWORD* pdwHelpCtxt; HRESULT* hr; }; VOID _stdcall ErrorInfo_GetHelpContext_CallBack(LPVOID ptr) { CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(ptr)); } CONTRACTL_END; GetHelpContextArgs* pArgs = (GetHelpContextArgs*)ptr; ComCallWrapper* pWrap = MapIUnknownToWrapper(pArgs->pUnk); if (IsCurrentDomainValid(pWrap)) { *(pArgs->hr) = ErrorInfo_GetHelpContext(pArgs->pUnk, pArgs->pdwHelpCtxt); } else { AppDomainDoCallBack(pWrap, ErrorInfo_GetHelpContext_CallBack, pArgs, pArgs->hr); } } HRESULT _stdcall ErrorInfo_GetHelpContext_Wrapper(IUnknown* pUnk, DWORD* pdwHelpCtxt) { SetupForComCallHR(); CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(pUnk)); PRECONDITION(CheckPointer(pdwHelpCtxt, NULL_OK)); } CONTRACTL_END; HRESULT hr = S_OK; GetHelpContextArgs args = {pUnk, pdwHelpCtxt, &hr}; ErrorInfo_GetHelpContext_CallBack(&args); return hr; } struct GetHelpFileArgs { IUnknown* pUnk; BSTR* pbstrHelpFile; HRESULT* hr; }; VOID __stdcall ErrorInfo_GetHelpFile_CallBack(LPVOID ptr) { CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(ptr)); } CONTRACTL_END; GetHelpFileArgs* pArgs = (GetHelpFileArgs*)ptr; ComCallWrapper* pWrap = MapIUnknownToWrapper(pArgs->pUnk); if (IsCurrentDomainValid(pWrap)) { *(pArgs->hr) = ErrorInfo_GetHelpFile(pArgs->pUnk, pArgs->pbstrHelpFile); } else { AppDomainDoCallBack(pWrap, ErrorInfo_GetHelpFile_CallBack, pArgs, pArgs->hr); } } HRESULT __stdcall ErrorInfo_GetHelpFile_Wrapper(IUnknown* pUnk, BSTR* pbstrHelpFile) { SetupForComCallHR(); CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(pUnk)); PRECONDITION(CheckPointer(pbstrHelpFile, NULL_OK)); } CONTRACTL_END; HRESULT hr = S_OK; GetHelpFileArgs args = {pUnk, pbstrHelpFile, &hr}; ErrorInfo_GetHelpFile_CallBack(&args); return hr; } struct GetSourceArgs { IUnknown* pUnk; BSTR* pbstrSource; HRESULT* hr; }; VOID __stdcall ErrorInfo_GetSource_CallBack(LPVOID ptr) { CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(ptr)); } CONTRACTL_END; GetSourceArgs* pArgs = (GetSourceArgs*)ptr; ComCallWrapper* pWrap = MapIUnknownToWrapper(pArgs->pUnk); if (IsCurrentDomainValid(pWrap)) { *(pArgs->hr) = ErrorInfo_GetSource(pArgs->pUnk, pArgs->pbstrSource); } else { AppDomainDoCallBack(pWrap, ErrorInfo_GetSource_CallBack, pArgs, pArgs->hr); } } HRESULT __stdcall ErrorInfo_GetSource_Wrapper(IUnknown* pUnk, BSTR* pbstrSource) { SetupForComCallHR(); CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(pUnk)); PRECONDITION(CheckPointer(pbstrSource, NULL_OK)); } CONTRACTL_END; HRESULT hr = S_OK; GetSourceArgs args = {pUnk, pbstrSource, &hr}; ErrorInfo_GetSource_CallBack(&args); return hr; } // --------------------------------------------------------------------------- // Interface IDispatch // // IDispatch methods for COM+ objects. These methods dispatch's to the // appropriate implementation based on the flags of the class that // implements them. struct GetTypeInfoCountArgs { IDispatch* pUnk; unsigned int *pctinfo; HRESULT* hr; }; VOID __stdcall Dispatch_GetTypeInfoCount_CallBack(LPVOID ptr) { CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(ptr)); } CONTRACTL_END; GetTypeInfoCountArgs* pArgs = (GetTypeInfoCountArgs*)ptr; ComCallWrapper* pWrap = MapIUnknownToWrapper(pArgs->pUnk); if (IsCurrentDomainValid(pWrap)) { *(pArgs->hr) = Dispatch_GetTypeInfoCount(pArgs->pUnk, pArgs->pctinfo); } else { AppDomainDoCallBack(pWrap, Dispatch_GetTypeInfoCount_CallBack, pArgs, pArgs->hr); } } HRESULT __stdcall Dispatch_GetTypeInfoCount_Wrapper(IDispatch* pDisp, unsigned int *pctinfo) { SetupForComCallHR(); CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(pDisp)); PRECONDITION(CheckPointer(pctinfo, NULL_OK)); } CONTRACTL_END; HRESULT hr = S_OK; GetTypeInfoCountArgs args = {pDisp, pctinfo, &hr}; Dispatch_GetTypeInfoCount_CallBack(&args); return hr; } struct GetTypeInfoArgs { IDispatch* pUnk; unsigned int itinfo; LCID lcid; ITypeInfo **pptinfo; HRESULT* hr; }; VOID __stdcall Dispatch_GetTypeInfo_CallBack (LPVOID ptr) { CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(ptr)); } CONTRACTL_END; GetTypeInfoArgs* pArgs = (GetTypeInfoArgs*)ptr; ComCallWrapper* pWrap = MapIUnknownToWrapper(pArgs->pUnk); if (IsCurrentDomainValid(pWrap)) { *(pArgs->hr) = Dispatch_GetTypeInfo(pArgs->pUnk, pArgs->itinfo, pArgs->lcid, pArgs->pptinfo); } else { AppDomainDoCallBack(pWrap, Dispatch_GetTypeInfo_CallBack, pArgs, pArgs->hr); } } HRESULT __stdcall Dispatch_GetTypeInfo_Wrapper(IDispatch* pDisp, unsigned int itinfo, LCID lcid, ITypeInfo **pptinfo) { SetupForComCallHR(); CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(pDisp)); PRECONDITION(CheckPointer(pptinfo, NULL_OK)); } CONTRACTL_END; HRESULT hr = S_OK; GetTypeInfoArgs args = {pDisp, itinfo, lcid, pptinfo, &hr}; Dispatch_GetTypeInfo_CallBack(&args); return hr; } struct GetIDsOfNamesArgs { IDispatch* pUnk; const IID* riid; OLECHAR **rgszNames; unsigned int cNames; LCID lcid; DISPID *rgdispid; HRESULT* hr; }; VOID __stdcall Dispatch_GetIDsOfNames_CallBack(LPVOID ptr) { CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(ptr)); } CONTRACTL_END; GetIDsOfNamesArgs* pArgs = (GetIDsOfNamesArgs*)ptr; ComCallWrapper* pWrap = MapIUnknownToWrapper(pArgs->pUnk); if (IsCurrentDomainValid(pWrap)) { *(pArgs->hr) = Dispatch_GetIDsOfNames(pArgs->pUnk, *pArgs->riid, pArgs->rgszNames, pArgs->cNames, pArgs->lcid, pArgs->rgdispid); } else { AppDomainDoCallBack(pWrap, Dispatch_GetIDsOfNames_CallBack, pArgs, pArgs->hr); } } HRESULT __stdcall Dispatch_GetIDsOfNames_Wrapper(IDispatch* pDisp, REFIID riid, _In_reads_(cNames) OLECHAR **rgszNames, unsigned int cNames, LCID lcid, DISPID *rgdispid) { SetupForComCallHR(); CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(pDisp)); PRECONDITION(CheckPointer(rgszNames, NULL_OK)); PRECONDITION(CheckPointer(rgdispid, NULL_OK)); } CONTRACTL_END; HRESULT hr = S_OK; GetIDsOfNamesArgs args = {pDisp, &riid, rgszNames, cNames, lcid, rgdispid, &hr}; Dispatch_GetIDsOfNames_CallBack(&args); return hr; } VOID __stdcall InternalDispatchImpl_GetIDsOfNames_CallBack(LPVOID ptr) { CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(ptr)); } CONTRACTL_END; GetIDsOfNamesArgs* pArgs = (GetIDsOfNamesArgs*)ptr; ComCallWrapper* pWrap = MapIUnknownToWrapper(pArgs->pUnk); if (IsCurrentDomainValid(pWrap)) { *(pArgs->hr) = InternalDispatchImpl_GetIDsOfNames(pArgs->pUnk, *pArgs->riid, pArgs->rgszNames, pArgs->cNames, pArgs->lcid, pArgs->rgdispid); } else { AppDomainDoCallBack(pWrap, InternalDispatchImpl_GetIDsOfNames_CallBack, pArgs, pArgs->hr); } } HRESULT __stdcall InternalDispatchImpl_GetIDsOfNames_Wrapper(IDispatch* pDisp, REFIID riid, _In_reads_(cNames) OLECHAR **rgszNames, unsigned int cNames, LCID lcid, DISPID *rgdispid) { SetupForComCallHR(); CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(pDisp)); PRECONDITION(CheckPointer(rgszNames, NULL_OK)); PRECONDITION(CheckPointer(rgdispid, NULL_OK)); } CONTRACTL_END; HRESULT hr = S_OK; GetIDsOfNamesArgs args = {pDisp, &riid, rgszNames, cNames, lcid, rgdispid, &hr}; InternalDispatchImpl_GetIDsOfNames_CallBack(&args); return hr; } struct InvokeArgs { IDispatch* pUnk; DISPID dispidMember; const IID* riid; LCID lcid; unsigned short wFlags; DISPPARAMS *pdispparams; VARIANT *pvarResult; EXCEPINFO *pexcepinfo; unsigned int *puArgErr; HRESULT* hr; }; VOID __stdcall Dispatch_Invoke_CallBack(LPVOID ptr) { CONTRACTL { THROWS; // Dispatch_Invoke can throw GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(ptr)); } CONTRACTL_END; InvokeArgs* pArgs = (InvokeArgs*)ptr; ComCallWrapper* pWrap = MapIUnknownToWrapper(pArgs->pUnk); if (IsCurrentDomainValid(pWrap)) { *(pArgs->hr) = Dispatch_Invoke(pArgs->pUnk, pArgs->dispidMember, *pArgs->riid, pArgs->lcid, pArgs->wFlags, pArgs->pdispparams, pArgs->pvarResult, pArgs->pexcepinfo, pArgs->puArgErr); } else { AppDomainDoCallBack(pWrap, Dispatch_Invoke_CallBack, pArgs, pArgs->hr); } } HRESULT __stdcall Dispatch_Invoke_Wrapper(IDispatch* pDisp, DISPID dispidMember, REFIID riid, LCID lcid, unsigned short wFlags, DISPPARAMS *pdispparams, VARIANT *pvarResult, EXCEPINFO *pexcepinfo, unsigned int *puArgErr) { HRESULT hrRetVal = S_OK; SetupForComCallHR(); CONTRACTL { THROWS; // Dispatch_Invoke_CallBack can throw GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(pDisp)); PRECONDITION(CheckPointer(pdispparams, NULL_OK)); PRECONDITION(CheckPointer(pvarResult, NULL_OK)); PRECONDITION(CheckPointer(pexcepinfo, NULL_OK)); PRECONDITION(CheckPointer(puArgErr, NULL_OK)); } CONTRACTL_END; InvokeArgs args = {pDisp, dispidMember, &riid, lcid, wFlags, pdispparams, pvarResult, pexcepinfo, puArgErr, &hrRetVal}; Dispatch_Invoke_CallBack(&args); return hrRetVal; } VOID __stdcall InternalDispatchImpl_Invoke_CallBack(LPVOID ptr) { CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(ptr)); } CONTRACTL_END; InvokeArgs* pArgs = (InvokeArgs*)ptr; ComCallWrapper* pWrap = MapIUnknownToWrapper(pArgs->pUnk); if (IsCurrentDomainValid(pWrap)) { *(pArgs->hr) = InternalDispatchImpl_Invoke(pArgs->pUnk, pArgs->dispidMember, *pArgs->riid, pArgs->lcid, pArgs->wFlags, pArgs->pdispparams, pArgs->pvarResult, pArgs->pexcepinfo, pArgs->puArgErr); } else { AppDomainDoCallBack(pWrap, InternalDispatchImpl_Invoke_CallBack, pArgs, pArgs->hr); } } HRESULT __stdcall InternalDispatchImpl_Invoke_Wrapper(IDispatch* pDisp, DISPID dispidMember, REFIID riid, LCID lcid, unsigned short wFlags, DISPPARAMS *pdispparams, VARIANT *pvarResult, EXCEPINFO *pexcepinfo, unsigned int *puArgErr) { SetupForComCallHR(); CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(pDisp)); PRECONDITION(CheckPointer(pdispparams, NULL_OK)); PRECONDITION(CheckPointer(pvarResult, NULL_OK)); PRECONDITION(CheckPointer(pexcepinfo, NULL_OK)); PRECONDITION(CheckPointer(puArgErr, NULL_OK)); } CONTRACTL_END; HRESULT hr = S_OK; InvokeArgs args = {pDisp, dispidMember, &riid, lcid, wFlags, pdispparams, pvarResult, pexcepinfo, puArgErr, &hr}; InternalDispatchImpl_Invoke_CallBack(&args); return hr; } // --------------------------------------------------------------------------- // Interface IDispatchEx struct GetTypeInfoCountExArgs { IDispatchEx* pUnk; unsigned int *pctinfo; HRESULT* hr; }; VOID __stdcall DispatchEx_GetTypeInfoCount_CallBack (LPVOID ptr) { CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(ptr)); } CONTRACTL_END; GetTypeInfoCountExArgs* pArgs = (GetTypeInfoCountExArgs*)ptr; ComCallWrapper* pWrap = MapIUnknownToWrapper(pArgs->pUnk); if (IsCurrentDomainValid(pWrap)) { *(pArgs->hr) = DispatchEx_GetTypeInfoCount(pArgs->pUnk, pArgs->pctinfo); } else { AppDomainDoCallBack(pWrap, DispatchEx_GetTypeInfoCount_CallBack, pArgs, pArgs->hr); } } HRESULT __stdcall DispatchEx_GetTypeInfoCount_Wrapper(IDispatchEx* pDisp, unsigned int *pctinfo) { SetupForComCallHR(); CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(pDisp)); PRECONDITION(CheckPointer(pctinfo, NULL_OK)); } CONTRACTL_END; HRESULT hr = S_OK; GetTypeInfoCountExArgs args = {pDisp, pctinfo, &hr}; DispatchEx_GetTypeInfoCount_CallBack(&args); return hr; } struct GetTypeInfoExArgs { IDispatch* pUnk; unsigned int itinfo; LCID lcid; ITypeInfo **pptinfo; HRESULT* hr; }; VOID __stdcall DispatchEx_GetTypeInfo_CallBack(LPVOID ptr) { CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(ptr)); } CONTRACTL_END; GetTypeInfoExArgs* pArgs = (GetTypeInfoExArgs*)ptr; ComCallWrapper* pWrap = MapIUnknownToWrapper(pArgs->pUnk); if (IsCurrentDomainValid(pWrap)) { *(pArgs->hr) = DispatchEx_GetTypeInfo(pArgs->pUnk, pArgs->itinfo, pArgs->lcid, pArgs->pptinfo); } else { AppDomainDoCallBack(pWrap, DispatchEx_GetTypeInfo_CallBack, pArgs, pArgs->hr); } } HRESULT __stdcall DispatchEx_GetTypeInfo_Wrapper(IDispatchEx* pDisp, unsigned int itinfo, LCID lcid, ITypeInfo **pptinfo) { SetupForComCallHR(); CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(pDisp)); PRECONDITION(CheckPointer(pptinfo, NULL_OK)); } CONTRACTL_END; HRESULT hr = S_OK; GetTypeInfoExArgs args = {pDisp, itinfo, lcid, pptinfo, &hr}; DispatchEx_GetTypeInfo_CallBack(&args); return hr; } struct GetIDsOfNamesExArgs { IDispatchEx* pUnk; const IID* riid; OLECHAR **rgszNames; unsigned int cNames; LCID lcid; DISPID *rgdispid; HRESULT* hr; }; VOID __stdcall DispatchEx_GetIDsOfNames_CallBack(LPVOID ptr) { CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(ptr)); } CONTRACTL_END; GetIDsOfNamesExArgs* pArgs = (GetIDsOfNamesExArgs*)ptr; ComCallWrapper* pWrap = MapIUnknownToWrapper(pArgs->pUnk); if (IsCurrentDomainValid(pWrap)) { *(pArgs->hr) = DispatchEx_GetIDsOfNames(pArgs->pUnk, *pArgs->riid, pArgs->rgszNames, pArgs->cNames, pArgs->lcid, pArgs->rgdispid); } else { AppDomainDoCallBack(pWrap, DispatchEx_GetIDsOfNames_CallBack, pArgs, pArgs->hr); } } HRESULT __stdcall DispatchEx_GetIDsOfNames_Wrapper(IDispatchEx* pDisp, REFIID riid, _In_reads_(cNames) OLECHAR **rgszNames, unsigned int cNames, LCID lcid, DISPID *rgdispid) { SetupForComCallHR(); CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(pDisp)); PRECONDITION(CheckPointer(rgszNames, NULL_OK)); PRECONDITION(CheckPointer(rgdispid, NULL_OK)); } CONTRACTL_END; HRESULT hr = S_OK; GetIDsOfNamesExArgs args = {pDisp, &riid, rgszNames, cNames, lcid, rgdispid, &hr}; DispatchEx_GetIDsOfNames_CallBack(&args); return hr; } struct DispExInvokeArgs { IDispatchEx* pUnk; DISPID dispidMember; const IID* riid; LCID lcid; unsigned short wFlags; DISPPARAMS *pdispparams; VARIANT *pvarResult; EXCEPINFO *pexcepinfo; unsigned int *puArgErr; HRESULT* hr; }; VOID __stdcall DispatchEx_Invoke_CallBack(LPVOID ptr) { CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(ptr)); } CONTRACTL_END; DispExInvokeArgs* pArgs = (DispExInvokeArgs*)ptr; ComCallWrapper* pWrap = MapIUnknownToWrapper(pArgs->pUnk); if (IsCurrentDomainValid(pWrap)) { *(pArgs->hr) = DispatchEx_Invoke(pArgs->pUnk, pArgs->dispidMember, *pArgs->riid, pArgs->lcid, pArgs->wFlags, pArgs->pdispparams, pArgs->pvarResult, pArgs->pexcepinfo, pArgs->puArgErr); } else { AppDomainDoCallBack(pWrap, DispatchEx_Invoke_CallBack, pArgs, pArgs->hr); } } HRESULT __stdcall DispatchEx_Invoke_Wrapper(IDispatchEx* pDisp, DISPID dispidMember, REFIID riid, LCID lcid, unsigned short wFlags, DISPPARAMS *pdispparams, VARIANT *pvarResult, EXCEPINFO *pexcepinfo, unsigned int *puArgErr) { SetupForComCallHR(); CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(pDisp)); PRECONDITION(CheckPointer(pdispparams, NULL_OK)); PRECONDITION(CheckPointer(pvarResult, NULL_OK)); PRECONDITION(CheckPointer(pexcepinfo, NULL_OK)); PRECONDITION(CheckPointer(puArgErr, NULL_OK)); } CONTRACTL_END; HRESULT hr = S_OK; DispExInvokeArgs args = {pDisp, dispidMember, &riid, lcid, wFlags, pdispparams, pvarResult, pexcepinfo, puArgErr, &hr}; DispatchEx_Invoke_CallBack(&args); return hr; } struct DeleteMemberByDispIDArgs { IDispatchEx* pDisp; DISPID id; HRESULT* hr; }; VOID __stdcall DispatchEx_DeleteMemberByDispID_CallBack(LPVOID ptr) { CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(ptr)); } CONTRACTL_END; DeleteMemberByDispIDArgs* pArgs = (DeleteMemberByDispIDArgs*)ptr; ComCallWrapper* pWrap = MapIUnknownToWrapper(pArgs->pDisp); if (IsCurrentDomainValid(pWrap)) { *(pArgs->hr) = DispatchEx_DeleteMemberByDispID(pArgs->pDisp, pArgs->id); } else { AppDomainDoCallBack(pWrap, DispatchEx_DeleteMemberByDispID_CallBack, pArgs, pArgs->hr); } } HRESULT __stdcall DispatchEx_DeleteMemberByDispID_Wrapper(IDispatchEx* pDisp, DISPID id) { SetupForComCallHR(); CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(pDisp)); } CONTRACTL_END; HRESULT hr = S_OK; DeleteMemberByDispIDArgs args = {pDisp, id, &hr}; DispatchEx_DeleteMemberByDispID_CallBack(&args); return hr; } struct DeleteMemberByNameArgs { IDispatchEx* pDisp; BSTR bstrName; DWORD grfdex; HRESULT* hr; }; VOID __stdcall DispatchEx_DeleteMemberByName_CallBack(LPVOID ptr) { CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(ptr)); } CONTRACTL_END; DeleteMemberByNameArgs* pArgs = (DeleteMemberByNameArgs*)ptr; ComCallWrapper* pWrap = MapIUnknownToWrapper(pArgs->pDisp); if (IsCurrentDomainValid(pWrap)) { *(pArgs->hr) = DispatchEx_DeleteMemberByName(pArgs->pDisp, pArgs->bstrName, pArgs->grfdex); } else { AppDomainDoCallBack(pWrap, DispatchEx_DeleteMemberByName_CallBack, pArgs, pArgs->hr); } } HRESULT __stdcall DispatchEx_DeleteMemberByName_Wrapper(IDispatchEx* pDisp, BSTR bstrName, DWORD grfdex) { SetupForComCallHR(); CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(pDisp)); } CONTRACTL_END; HRESULT hr = S_OK; DeleteMemberByNameArgs args = {pDisp, bstrName, grfdex, &hr}; DispatchEx_DeleteMemberByName_CallBack(&args); return hr; } struct GetMemberNameArgs { IDispatchEx* pDisp; DISPID id; BSTR *pbstrName; HRESULT* hr; }; VOID __stdcall DispatchEx_GetMemberName_CallBack(LPVOID ptr) { CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(ptr)); } CONTRACTL_END; GetMemberNameArgs* pArgs = (GetMemberNameArgs*)ptr; ComCallWrapper* pWrap = MapIUnknownToWrapper(pArgs->pDisp); if (IsCurrentDomainValid(pWrap)) { *(pArgs->hr) = DispatchEx_GetMemberName(pArgs->pDisp, pArgs->id, pArgs->pbstrName); } else { AppDomainDoCallBack(pWrap, DispatchEx_GetMemberName_CallBack, pArgs, pArgs->hr); } } HRESULT __stdcall DispatchEx_GetMemberName_Wrapper(IDispatchEx* pDisp, DISPID id, BSTR *pbstrName) { SetupForComCallHR(); CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(pDisp)); PRECONDITION(CheckPointer(pbstrName, NULL_OK)); } CONTRACTL_END; HRESULT hr = S_OK; GetMemberNameArgs args = {pDisp, id, pbstrName, &hr}; DispatchEx_GetMemberName_CallBack(&args); return hr; } struct GetDispIDArgs { IDispatchEx* pDisp; BSTR bstrName; DWORD grfdex; DISPID *pid; HRESULT* hr; }; VOID __stdcall DispatchEx_GetDispID_CallBack(LPVOID ptr) { CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(ptr)); } CONTRACTL_END; GetDispIDArgs* pArgs = (GetDispIDArgs*)ptr; ComCallWrapper* pWrap = MapIUnknownToWrapper(pArgs->pDisp); if (IsCurrentDomainValid(pWrap)) { *(pArgs->hr) = DispatchEx_GetDispID(pArgs->pDisp, pArgs->bstrName, pArgs->grfdex, pArgs->pid); } else { AppDomainDoCallBack(pWrap, DispatchEx_GetDispID_CallBack, pArgs, pArgs->hr); } } HRESULT __stdcall DispatchEx_GetDispID_Wrapper(IDispatchEx* pDisp, BSTR bstrName, DWORD grfdex, DISPID *pid) { SetupForComCallHR(); CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(pDisp)); PRECONDITION(CheckPointer(pid, NULL_OK)); } CONTRACTL_END; HRESULT hr = S_OK; GetDispIDArgs args = {pDisp, bstrName, grfdex, pid, &hr}; DispatchEx_GetDispID_CallBack(&args); return hr; } struct GetMemberPropertiesArgs { IDispatchEx* pDisp; DISPID id; DWORD grfdexFetch; DWORD *pgrfdex; HRESULT* hr; }; VOID __stdcall DispatchEx_GetMemberProperties_CallBack(LPVOID ptr) { CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(ptr)); } CONTRACTL_END; GetMemberPropertiesArgs* pArgs = (GetMemberPropertiesArgs*)ptr; ComCallWrapper* pWrap = MapIUnknownToWrapper(pArgs->pDisp); if (IsCurrentDomainValid(pWrap)) { *(pArgs->hr) = DispatchEx_GetMemberProperties(pArgs->pDisp, pArgs->id, pArgs->grfdexFetch, pArgs->pgrfdex); } else { AppDomainDoCallBack(pWrap, DispatchEx_GetMemberProperties_CallBack, pArgs, pArgs->hr); } } HRESULT __stdcall DispatchEx_GetMemberProperties_Wrapper(IDispatchEx* pDisp, DISPID id, DWORD grfdexFetch, DWORD *pgrfdex) { SetupForComCallHR(); CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(pDisp)); PRECONDITION(CheckPointer(pgrfdex, NULL_OK)); } CONTRACTL_END; HRESULT hr = S_OK; GetMemberPropertiesArgs args = {pDisp, id, grfdexFetch, pgrfdex, &hr}; DispatchEx_GetMemberProperties_CallBack(&args); return hr; } struct GetNameSpaceParentArgs { IDispatchEx* pDisp; IUnknown **ppunk; HRESULT* hr; }; VOID __stdcall DispatchEx_GetNameSpaceParent_CallBack(LPVOID ptr) { CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(ptr)); } CONTRACTL_END; GetNameSpaceParentArgs* pArgs = (GetNameSpaceParentArgs*)ptr; ComCallWrapper* pWrap = MapIUnknownToWrapper(pArgs->pDisp); if (IsCurrentDomainValid(pWrap)) { *(pArgs->hr) = DispatchEx_GetNameSpaceParent(pArgs->pDisp, pArgs->ppunk); } else { AppDomainDoCallBack(pWrap, DispatchEx_GetNameSpaceParent_CallBack, pArgs, pArgs->hr); } } HRESULT __stdcall DispatchEx_GetNameSpaceParent_Wrapper(IDispatchEx* pDisp, IUnknown **ppunk) { SetupForComCallHR(); CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(pDisp)); PRECONDITION(CheckPointer(ppunk, NULL_OK)); } CONTRACTL_END; HRESULT hr = S_OK; GetNameSpaceParentArgs args = {pDisp, ppunk, &hr}; DispatchEx_GetNameSpaceParent_CallBack(&args); return hr; } struct GetNextDispIDArgs { IDispatchEx* pDisp; DWORD grfdex; DISPID id; DISPID *pid; HRESULT* hr; }; VOID __stdcall DispatchEx_GetNextDispID_CallBack(LPVOID ptr) { CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(ptr)); } CONTRACTL_END; GetNextDispIDArgs* pArgs = (GetNextDispIDArgs*)ptr; ComCallWrapper* pWrap = MapIUnknownToWrapper(pArgs->pDisp); if (IsCurrentDomainValid(pWrap)) { *(pArgs->hr) = DispatchEx_GetNextDispID(pArgs->pDisp, pArgs->grfdex, pArgs->id, pArgs->pid); } else { AppDomainDoCallBack(pWrap, DispatchEx_GetNextDispID_CallBack, pArgs, pArgs->hr); } } HRESULT __stdcall DispatchEx_GetNextDispID_Wrapper(IDispatchEx* pDisp, DWORD grfdex, DISPID id, DISPID *pid) { SetupForComCallHR(); CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(pDisp)); PRECONDITION(CheckPointer(pid, NULL_OK)); } CONTRACTL_END; HRESULT hr = S_OK; GetNextDispIDArgs args = {pDisp, grfdex, id, pid, &hr}; DispatchEx_GetNextDispID_CallBack(&args); return hr; } struct DispExInvokeExArgs { IDispatchEx* pDisp; DISPID id; LCID lcid; WORD wFlags; DISPPARAMS *pdp; VARIANT *pVarRes; EXCEPINFO *pei; IServiceProvider *pspCaller; HRESULT* hr; }; VOID __stdcall DispatchEx_InvokeEx_CallBack(LPVOID ptr) { CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(ptr)); } CONTRACTL_END; DispExInvokeExArgs* pArgs = (DispExInvokeExArgs*)ptr; ComCallWrapper* pWrap = MapIUnknownToWrapper(pArgs->pDisp); if (IsCurrentDomainValid(pWrap)) { *(pArgs->hr) = DispatchEx_InvokeEx(pArgs->pDisp, pArgs->id, pArgs->lcid, pArgs->wFlags, pArgs->pdp, pArgs->pVarRes, pArgs->pei, pArgs->pspCaller); } else { AppDomainDoCallBack(pWrap, DispatchEx_InvokeEx_CallBack, pArgs, pArgs->hr); } } HRESULT __stdcall DispatchEx_InvokeEx_Wrapper(IDispatchEx* pDisp, DISPID id, LCID lcid, WORD wFlags, DISPPARAMS *pdp, VARIANT *pVarRes, EXCEPINFO *pei, IServiceProvider *pspCaller) { SetupForComCallHR(); CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(pDisp)); PRECONDITION(CheckPointer(pdp, NULL_OK)); PRECONDITION(CheckPointer(pVarRes, NULL_OK)); PRECONDITION(CheckPointer(pei, NULL_OK)); PRECONDITION(CheckPointer(pspCaller, NULL_OK)); } CONTRACTL_END; HRESULT hr = S_OK; DispExInvokeExArgs args = {pDisp, id, lcid, wFlags, pdp, pVarRes, pei, pspCaller, &hr}; DispatchEx_InvokeEx_CallBack(&args); return hr; } // --------------------------------------------------------------------------- // Interface IMarshal struct GetUnmarshalClassArgs { IMarshal* pUnk; const IID* riid; void * pv; ULONG dwDestContext; void * pvDestContext; ULONG mshlflags; LPCLSID pclsid; HRESULT* hr; }; VOID __stdcall Marshal_GetUnmarshalClass_CallBack(LPVOID ptr) { CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(ptr)); } CONTRACTL_END; GetUnmarshalClassArgs* pArgs = (GetUnmarshalClassArgs*)ptr; ComCallWrapper* pWrap = MapIUnknownToWrapper(pArgs->pUnk); if (IsCurrentDomainValid(pWrap)) { *(pArgs->hr) = Marshal_GetUnmarshalClass(pArgs->pUnk, *(pArgs->riid), pArgs->pv, pArgs->dwDestContext, pArgs->pvDestContext, pArgs->mshlflags, pArgs->pclsid); } else { AppDomainDoCallBack(pWrap, Marshal_GetUnmarshalClass_CallBack, pArgs, pArgs->hr); } } HRESULT __stdcall Marshal_GetUnmarshalClass_Wrapper(IMarshal* pMarsh, REFIID riid, void * pv, ULONG dwDestContext, void * pvDestContext, ULONG mshlflags, LPCLSID pclsid) { SetupForComCallHR(); CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(pMarsh)); PRECONDITION(CheckPointer(pv, NULL_OK)); PRECONDITION(CheckPointer(pvDestContext, NULL_OK)); PRECONDITION(CheckPointer(pclsid, NULL_OK)); } CONTRACTL_END; HRESULT hr = S_OK; GetUnmarshalClassArgs args = {pMarsh, &riid, pv, dwDestContext, pvDestContext, mshlflags, pclsid, &hr}; Marshal_GetUnmarshalClass_CallBack(&args); return hr; } struct GetMarshalSizeMaxArgs { IMarshal* pUnk; const IID* riid; void * pv; ULONG dwDestContext; void * pvDestContext; ULONG mshlflags; ULONG * pSize; HRESULT* hr; }; VOID __stdcall Marshal_GetMarshalSizeMax_CallBack(LPVOID ptr) { CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(ptr)); } CONTRACTL_END; GetMarshalSizeMaxArgs* pArgs = (GetMarshalSizeMaxArgs*)ptr; ComCallWrapper* pWrap = MapIUnknownToWrapper(pArgs->pUnk); if (IsCurrentDomainValid(pWrap)) { *(pArgs->hr) = Marshal_GetMarshalSizeMax(pArgs->pUnk, *(pArgs->riid), pArgs->pv, pArgs->dwDestContext, pArgs->pvDestContext, pArgs->mshlflags, pArgs->pSize); } else { AppDomainDoCallBack(pWrap, Marshal_GetMarshalSizeMax_CallBack, pArgs, pArgs->hr); } } HRESULT __stdcall Marshal_GetMarshalSizeMax_Wrapper(IMarshal* pMarsh, REFIID riid, void * pv, ULONG dwDestContext, void * pvDestContext, ULONG mshlflags, ULONG * pSize) { SetupForComCallHR(); CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(pMarsh)); PRECONDITION(CheckPointer(pv, NULL_OK)); PRECONDITION(CheckPointer(pvDestContext, NULL_OK)); PRECONDITION(CheckPointer(pSize, NULL_OK)); } CONTRACTL_END; HRESULT hr = S_OK; GetMarshalSizeMaxArgs args = {pMarsh, &riid, pv, dwDestContext, pvDestContext, mshlflags, pSize, &hr}; Marshal_GetMarshalSizeMax_CallBack(&args); return hr; } struct MarshalInterfaceArgs { IMarshal* pUnk; LPSTREAM pStm; const IID* riid; void * pv; ULONG dwDestContext; void * pvDestContext; ULONG mshlflags; HRESULT* hr; }; VOID __stdcall Marshal_MarshalInterface_CallBack(LPVOID ptr) { CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(ptr)); } CONTRACTL_END; MarshalInterfaceArgs* pArgs = (MarshalInterfaceArgs*)ptr; ComCallWrapper* pWrap = MapIUnknownToWrapper(pArgs->pUnk); if (IsCurrentDomainValid(pWrap)) { *(pArgs->hr) = Marshal_MarshalInterface(pArgs->pUnk, pArgs->pStm, *(pArgs->riid), pArgs->pv, pArgs->dwDestContext, pArgs->pvDestContext, pArgs->mshlflags); } else { AppDomainDoCallBack(pWrap, Marshal_MarshalInterface_CallBack, pArgs, pArgs->hr); } } HRESULT __stdcall Marshal_MarshalInterface_Wrapper(IMarshal* pMarsh, LPSTREAM pStm, REFIID riid, void * pv, ULONG dwDestContext, LPVOID pvDestContext, ULONG mshlflags) { SetupForComCallHR(); CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(pMarsh)); PRECONDITION(CheckPointer(pv, NULL_OK)); PRECONDITION(CheckPointer(pvDestContext, NULL_OK)); } CONTRACTL_END; HRESULT hr = S_OK; MarshalInterfaceArgs args = {pMarsh, pStm, &riid, pv, dwDestContext, pvDestContext, mshlflags, &hr}; Marshal_MarshalInterface_CallBack(&args); return hr; } struct UnmarshalInterfaceArgs { IMarshal* pUnk; LPSTREAM pStm; const IID* riid; void ** ppvObj; HRESULT* hr; }; VOID __stdcall Marshal_UnmarshalInterface_CallBack(LPVOID ptr) { CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(ptr)); } CONTRACTL_END; UnmarshalInterfaceArgs* pArgs = (UnmarshalInterfaceArgs*)ptr; ComCallWrapper* pWrap = MapIUnknownToWrapper(pArgs->pUnk); if (IsCurrentDomainValid(pWrap)) { *(pArgs->hr) = Marshal_UnmarshalInterface(pArgs->pUnk, pArgs->pStm, *(pArgs->riid), pArgs->ppvObj); } else { AppDomainDoCallBack(pWrap, Marshal_UnmarshalInterface_CallBack, pArgs, pArgs->hr); } } HRESULT __stdcall Marshal_UnmarshalInterface_Wrapper(IMarshal* pMarsh, LPSTREAM pStm, REFIID riid, void ** ppvObj) { SetupForComCallHR(); CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(pMarsh)); PRECONDITION(CheckPointer(pStm, NULL_OK)); PRECONDITION(CheckPointer(ppvObj, NULL_OK)); } CONTRACTL_END; HRESULT hr = S_OK; UnmarshalInterfaceArgs args = {pMarsh, pStm, &riid, ppvObj, &hr}; Marshal_UnmarshalInterface_CallBack(&args); return hr; } struct ReleaseMarshalDataArgs { IMarshal* pUnk; LPSTREAM pStm; HRESULT* hr; }; VOID __stdcall Marshal_ReleaseMarshalData_CallBack(LPVOID ptr) { CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(ptr)); } CONTRACTL_END; ReleaseMarshalDataArgs* pArgs = (ReleaseMarshalDataArgs*)ptr; ComCallWrapper* pWrap = MapIUnknownToWrapper(pArgs->pUnk); if (IsCurrentDomainValid(pWrap)) { *(pArgs->hr) = Marshal_ReleaseMarshalData(pArgs->pUnk, pArgs->pStm); } else { AppDomainDoCallBack(pWrap, Marshal_ReleaseMarshalData_CallBack, pArgs, pArgs->hr); } } HRESULT __stdcall Marshal_ReleaseMarshalData_Wrapper(IMarshal* pMarsh, LPSTREAM pStm) { SetupForComCallHR(); CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(pMarsh)); PRECONDITION(CheckPointer(pStm, NULL_OK)); } CONTRACTL_END; HRESULT hr = S_OK; ReleaseMarshalDataArgs args = {pMarsh, pStm, &hr}; Marshal_ReleaseMarshalData_CallBack(&args); return hr; } struct DisconnectObjectArgs { IMarshal* pUnk; ULONG dwReserved; HRESULT* hr; }; VOID __stdcall Marshal_DisconnectObject_CallBack(LPVOID ptr) { CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(ptr)); } CONTRACTL_END; DisconnectObjectArgs* pArgs = (DisconnectObjectArgs*)ptr; ComCallWrapper* pWrap = MapIUnknownToWrapper(pArgs->pUnk); if (IsCurrentDomainValid(pWrap)) { *(pArgs->hr) = Marshal_DisconnectObject(pArgs->pUnk, pArgs->dwReserved); } else { AppDomainDoCallBack(pWrap, Marshal_DisconnectObject_CallBack, pArgs, pArgs->hr); } } HRESULT __stdcall Marshal_DisconnectObject_Wrapper(IMarshal* pMarsh, ULONG dwReserved) { SetupForComCallHR(); CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(pMarsh)); } CONTRACTL_END; HRESULT hr = S_OK; DisconnectObjectArgs args = {pMarsh, dwReserved, &hr}; Marshal_DisconnectObject_CallBack(&args); return hr; } // --------------------------------------------------------------------------- // Interface IConnectionPointContainer struct EnumConnectionPointsArgs { IUnknown* pUnk; IEnumConnectionPoints **ppEnum; HRESULT* hr; }; VOID __stdcall ConnectionPointContainer_EnumConnectionPoints_CallBack(LPVOID ptr) { CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(ptr)); } CONTRACTL_END; EnumConnectionPointsArgs* pArgs = (EnumConnectionPointsArgs*)ptr; ComCallWrapper* pWrap = MapIUnknownToWrapper(pArgs->pUnk); if (IsCurrentDomainValid(pWrap)) { *(pArgs->hr) = ConnectionPointContainer_EnumConnectionPoints(pArgs->pUnk, pArgs->ppEnum); } else { AppDomainDoCallBack(pWrap, ConnectionPointContainer_EnumConnectionPoints_CallBack, pArgs, pArgs->hr); } } HRESULT __stdcall ConnectionPointContainer_EnumConnectionPoints_Wrapper(IUnknown* pUnk, IEnumConnectionPoints **ppEnum) { SetupForComCallHR(); CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(pUnk)); PRECONDITION(CheckPointer(ppEnum, NULL_OK)); } CONTRACTL_END; HRESULT hr = S_OK; EnumConnectionPointsArgs args = {pUnk, ppEnum, &hr}; ConnectionPointContainer_EnumConnectionPoints_CallBack(&args); return hr; } struct FindConnectionPointArgs { IUnknown* pUnk; const IID* riid; IConnectionPoint **ppCP; HRESULT* hr; }; VOID __stdcall ConnectionPointContainer_FindConnectionPoint_CallBack(LPVOID ptr) { CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(ptr)); } CONTRACTL_END; FindConnectionPointArgs* pArgs = (FindConnectionPointArgs*)ptr; ComCallWrapper* pWrap = MapIUnknownToWrapper(pArgs->pUnk); if (IsCurrentDomainValid(pWrap)) { *(pArgs->hr) = ConnectionPointContainer_FindConnectionPoint(pArgs->pUnk, *(pArgs->riid), pArgs->ppCP); } else { AppDomainDoCallBack(pWrap, ConnectionPointContainer_FindConnectionPoint_CallBack, pArgs, pArgs->hr); } } HRESULT __stdcall ConnectionPointContainer_FindConnectionPoint_Wrapper(IUnknown* pUnk, REFIID riid, IConnectionPoint **ppCP) { SetupForComCallHR(); CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(pUnk)); PRECONDITION(CheckPointer(ppCP, NULL_OK)); } CONTRACTL_END; HRESULT hr = S_OK; FindConnectionPointArgs args = {pUnk, &riid, ppCP, &hr}; ConnectionPointContainer_FindConnectionPoint_CallBack(&args); return hr; } //------------------------------------------------------------------------------------------ // IObjectSafety methods for COM+ objects struct GetInterfaceSafetyArgs { IUnknown* pUnk; const IID* riid; DWORD *pdwSupportedOptions; DWORD *pdwEnabledOptions; HRESULT* hr; }; VOID __stdcall ObjectSafety_GetInterfaceSafetyOptions_CallBack(LPVOID ptr) { CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(ptr)); } CONTRACTL_END; GetInterfaceSafetyArgs* pArgs = (GetInterfaceSafetyArgs*)ptr; ComCallWrapper* pWrap = MapIUnknownToWrapper(pArgs->pUnk); if (IsCurrentDomainValid(pWrap)) { *(pArgs->hr) = ObjectSafety_GetInterfaceSafetyOptions(pArgs->pUnk, *(pArgs->riid), pArgs->pdwSupportedOptions, pArgs->pdwEnabledOptions); } else { AppDomainDoCallBack(pWrap, ObjectSafety_GetInterfaceSafetyOptions_CallBack, pArgs, pArgs->hr); } } HRESULT __stdcall ObjectSafety_GetInterfaceSafetyOptions_Wrapper(IUnknown* pUnk, REFIID riid, DWORD *pdwSupportedOptions, DWORD *pdwEnabledOptions) { SetupForComCallHR(); CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(pUnk)); PRECONDITION(CheckPointer(pdwSupportedOptions, NULL_OK)); PRECONDITION(CheckPointer(pdwEnabledOptions, NULL_OK)); } CONTRACTL_END; HRESULT hr = S_OK; GetInterfaceSafetyArgs args = {pUnk, &riid, pdwSupportedOptions, pdwEnabledOptions, &hr}; ObjectSafety_GetInterfaceSafetyOptions_CallBack(&args); return hr; } struct SetInterfaceSafetyArgs { IUnknown* pUnk; const IID* riid; DWORD dwOptionSetMask; DWORD dwEnabledOptions; HRESULT* hr; }; VOID __stdcall ObjectSafety_SetInterfaceSafetyOptions_CallBack(LPVOID ptr) { CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(ptr)); } CONTRACTL_END; SetInterfaceSafetyArgs* pArgs = (SetInterfaceSafetyArgs*)ptr; ComCallWrapper* pWrap = MapIUnknownToWrapper(pArgs->pUnk); if (IsCurrentDomainValid(pWrap)) { *(pArgs->hr) = ObjectSafety_SetInterfaceSafetyOptions(pArgs->pUnk, *(pArgs->riid), pArgs->dwOptionSetMask, pArgs->dwEnabledOptions ); } else { AppDomainDoCallBack(pWrap, ObjectSafety_SetInterfaceSafetyOptions_CallBack, pArgs, pArgs->hr); } } HRESULT __stdcall ObjectSafety_SetInterfaceSafetyOptions_Wrapper(IUnknown* pUnk, REFIID riid, DWORD dwOptionSetMask, DWORD dwEnabledOptions) { SetupForComCallHR(); CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(pUnk)); } CONTRACTL_END; HRESULT hr = S_OK; SetInterfaceSafetyArgs args = {pUnk, &riid, dwOptionSetMask, dwEnabledOptions, &hr}; ObjectSafety_SetInterfaceSafetyOptions_CallBack(&args); return hr; }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. //--------------------------------------------------------------------------------- // stdinterfaces_wrapper.cpp // // Defines various standard com interfaces //--------------------------------------------------------------------------------- #include "common.h" #include <ole2.h> #include <guidfromname.h> #include <olectl.h> #include <objsafe.h> // IID_IObjectSafe #include "vars.hpp" #include "object.h" #include "excep.h" #include "frames.h" #include "vars.hpp" #include "runtimecallablewrapper.h" #include "comcallablewrapper.h" #include "field.h" #include "threads.h" #include "interoputil.h" #include "comdelegate.h" #include "olevariant.h" #include "eeconfig.h" #include "typehandle.h" #include "posterror.h" #include <corerror.h> #include <mscoree.h> #include "mtx.h" #include "cgencpu.h" #include "interopconverter.h" #include "cominterfacemarshaler.h" #include "stdinterfaces.h" #include "stdinterfaces_internal.h" #include "interoputil.inl" interface IEnumConnectionPoints; // IUnknown is part of IDispatch // Common vtables for well-known COM interfaces // shared by all COM+ callable wrappers. // All Com+ created vtables have well known IUnknown methods, which is used to identify // the type of the interface // For e.g. all com+ created tear-offs have the same QI method in their IUnknown portion // Unknown_QueryInterface is the QI method for all the tear-offs created from COM+ // // Tearoff interfaces created for std. interfaces such as IProvideClassInfo, IErrorInfo etc. // have the AddRef & Release function point to Unknown_AddRefSpecial & Unknown_ReleaseSpecial // // Inner unknown, or the original unknown for a wrapper has // AddRef & Release point to a Unknown_AddRefInner & Unknown_ReleaseInner // global inner Unknown vtable const StdInterfaceDesc<3> g_InnerUnknown = { enum_InnerUnknown, { (UINT_PTR*)Unknown_QueryInterface, (UINT_PTR*)Unknown_AddRefInner, // special addref to distinguish inner unk (UINT_PTR*)Unknown_ReleaseInner, // special release to distinguish inner unknown } }; // global IProvideClassInfo vtable const StdInterfaceDesc<4> g_IProvideClassInfo = { enum_IProvideClassInfo, { (UINT_PTR*)Unknown_QueryInterface, // don't change this (UINT_PTR*)Unknown_AddRefSpecial, // special addref for std. interface (UINT_PTR*)Unknown_ReleaseSpecial, // special release for std. interface (UINT_PTR*)ClassInfo_GetClassInfo_Wrapper // GetClassInfo } }; // global IMarshal vtable const StdInterfaceDesc<9> g_IMarshal = { enum_IMarshal, { (UINT_PTR*)Unknown_QueryInterface, (UINT_PTR*)Unknown_AddRefSpecial, (UINT_PTR*)Unknown_ReleaseSpecial, (UINT_PTR*)Marshal_GetUnmarshalClass_Wrapper, (UINT_PTR*)Marshal_GetMarshalSizeMax_Wrapper, (UINT_PTR*)Marshal_MarshalInterface_Wrapper, (UINT_PTR*)Marshal_UnmarshalInterface_Wrapper, (UINT_PTR*)Marshal_ReleaseMarshalData_Wrapper, (UINT_PTR*)Marshal_DisconnectObject_Wrapper } }; // global ISupportsErrorInfo vtable const StdInterfaceDesc<4> g_ISupportsErrorInfo = { enum_ISupportsErrorInfo, { (UINT_PTR*)Unknown_QueryInterface, (UINT_PTR*)Unknown_AddRefSpecial, (UINT_PTR*)Unknown_ReleaseSpecial, (UINT_PTR*)SupportsErroInfo_IntfSupportsErrorInfo_Wrapper } }; // global IErrorInfo vtable const StdInterfaceDesc<8> g_IErrorInfo = { enum_IErrorInfo, { (UINT_PTR*)Unknown_QueryInterface_IErrorInfo, (UINT_PTR*)Unknown_AddRefSpecial, (UINT_PTR*)Unknown_ReleaseSpecial_IErrorInfo, (UINT_PTR*)ErrorInfo_GetGUID_Wrapper, (UINT_PTR*)ErrorInfo_GetSource_Wrapper, (UINT_PTR*)ErrorInfo_GetDescription_Wrapper, (UINT_PTR*)ErrorInfo_GetHelpFile_Wrapper, (UINT_PTR*)ErrorInfo_GetHelpContext_Wrapper } }; // global IConnectionPointContainer vtable const StdInterfaceDesc<5> g_IConnectionPointContainer = { enum_IConnectionPointContainer, { (UINT_PTR*)Unknown_QueryInterface, (UINT_PTR*)Unknown_AddRefSpecial, (UINT_PTR*)Unknown_ReleaseSpecial, (UINT_PTR*)ConnectionPointContainer_EnumConnectionPoints_Wrapper, (UINT_PTR*)ConnectionPointContainer_FindConnectionPoint_Wrapper } }; // global IObjectSafety vtable const StdInterfaceDesc<5> g_IObjectSafety = { enum_IObjectSafety, { (UINT_PTR*)Unknown_QueryInterface, (UINT_PTR*)Unknown_AddRefSpecial, (UINT_PTR*)Unknown_ReleaseSpecial, (UINT_PTR*)ObjectSafety_GetInterfaceSafetyOptions_Wrapper, (UINT_PTR*)ObjectSafety_SetInterfaceSafetyOptions_Wrapper } }; // global IDispatchEx vtable const StdInterfaceDesc<15> g_IDispatchEx = { enum_IDispatchEx, { (UINT_PTR*)Unknown_QueryInterface, (UINT_PTR*)Unknown_AddRefSpecial, (UINT_PTR*)Unknown_ReleaseSpecial, (UINT_PTR*)DispatchEx_GetTypeInfoCount_Wrapper, (UINT_PTR*)DispatchEx_GetTypeInfo_Wrapper, (UINT_PTR*)DispatchEx_GetIDsOfNames_Wrapper, (UINT_PTR*)DispatchEx_Invoke_Wrapper, (UINT_PTR*)DispatchEx_GetDispID_Wrapper, (UINT_PTR*)DispatchEx_InvokeEx_Wrapper, (UINT_PTR*)DispatchEx_DeleteMemberByName_Wrapper, (UINT_PTR*)DispatchEx_DeleteMemberByDispID_Wrapper, (UINT_PTR*)DispatchEx_GetMemberProperties_Wrapper, (UINT_PTR*)DispatchEx_GetMemberName_Wrapper, (UINT_PTR*)DispatchEx_GetNextDispID_Wrapper, (UINT_PTR*)DispatchEx_GetNameSpaceParent_Wrapper } }; // global IAgileObject vtable const StdInterfaceDesc<3> g_IAgileObject = { enum_IAgileObject, { (UINT_PTR*)Unknown_QueryInterface, (UINT_PTR*)Unknown_AddRefSpecial, (UINT_PTR*)Unknown_ReleaseSpecial } }; // Generic helper to check if AppDomain matches and perform a DoCallBack otherwise inline BOOL IsCurrentDomainValid(ComCallWrapper* pWrap, Thread* pThread) { CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_ANY; PRECONDITION(CheckPointer(pWrap)); PRECONDITION(CheckPointer(pThread)); } CONTRACTL_END; _ASSERTE(pWrap != NULL); PREFIX_ASSUME(pWrap != NULL); // If we are finalizing all alive objects, or after this stage, we do not allow // a thread to enter EE. if ((g_fEEShutDown & ShutDown_Finalize2) || g_fForbidEnterEE) return FALSE; return TRUE; } BOOL IsCurrentDomainValid(ComCallWrapper* pWrap) { CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_ANY; } CONTRACTL_END; return IsCurrentDomainValid(pWrap, GetThread()); } struct AppDomainSwitchToPreemptiveHelperArgs { ADCallBackFcnType pRealCallback; void* pRealArgs; }; VOID __stdcall AppDomainSwitchToPreemptiveHelper(LPVOID pv) { AppDomainSwitchToPreemptiveHelperArgs* pArgs = (AppDomainSwitchToPreemptiveHelperArgs*)pv; CONTRACTL { GC_TRIGGERS; MODE_ANY; PRECONDITION(CheckPointer(pv)); VOID __stdcall Dispatch_Invoke_CallBack(LPVOID ptr); if (pArgs->pRealCallback == Dispatch_Invoke_CallBack) THROWS; else NOTHROW; } CONTRACTL_END; GCX_PREEMP(); pArgs->pRealCallback(pArgs->pRealArgs); } VOID AppDomainDoCallBack(ComCallWrapper* pWrap, ADCallBackFcnType pTarget, LPVOID pArgs, HRESULT* phr) { CONTRACTL { DISABLED(NOTHROW); GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(pWrap)); PRECONDITION(CheckPointer(pTarget)); PRECONDITION(CheckPointer(pArgs)); PRECONDITION(CheckPointer(phr)); } CONTRACTL_END; // If we are finalizing all alive objects, or after this stage, we do not allow // a thread to enter EE. if ((g_fEEShutDown & ShutDown_Finalize2) || g_fForbidEnterEE) { *phr = E_FAIL; return; } BEGIN_EXTERNAL_ENTRYPOINT(phr) { // make the call directly not forgetting to switch to preemptive GC mode GCX_PREEMP(); ((ADCallBackFcnType)pTarget)(pArgs); } END_EXTERNAL_ENTRYPOINT; } //------------------------------------------------------------------------- // IUnknown methods struct QIArgs { ComCallWrapper* pWrap; IUnknown* pUnk; const IID* riid; void** ppv; HRESULT* hr; }; VOID __stdcall Unknown_QueryInterface_CallBack(LPVOID ptr) { CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(ptr)); } CONTRACTL_END; QIArgs* pArgs = (QIArgs*)ptr; ComCallWrapper* pWrap = MapIUnknownToWrapper(pArgs->pUnk); if (IsCurrentDomainValid(pWrap)) { *(pArgs->hr) = Unknown_QueryInterface_Internal(pArgs->pWrap, pArgs->pUnk, *pArgs->riid, pArgs->ppv); } else { AppDomainDoCallBack(pWrap, Unknown_QueryInterface_CallBack, pArgs, pArgs->hr);; } } HRESULT __stdcall Unknown_QueryInterface(IUnknown* pUnk, REFIID riid, void** ppv) { SetupThreadForComCall(E_OUTOFMEMORY); CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(pUnk)); PRECONDITION(CheckPointer(ppv, NULL_OK)); } CONTRACTL_END; ComCallWrapper* pWrap = MapIUnknownToWrapper(pUnk); if (IsCurrentDomainValid(pWrap, GET_THREAD())) { return Unknown_QueryInterface_Internal(pWrap, pUnk, riid, ppv); } else { HRESULT hr = S_OK; QIArgs args = {pWrap, pUnk, &riid, ppv, &hr}; Unknown_QueryInterface_CallBack(&args); return hr; } } struct AddRefReleaseArgs { IUnknown* pUnk; ULONG* pLong; HRESULT* hr; }; ULONG __stdcall Unknown_AddRef(IUnknown* pUnk) { // Ensure the Thread is available for contracts and other users of the Thread, but don't do any of // the other "entering managed code" work like checking for reentrancy. // We don't really need to "enter" the runtime to do an interlocked increment on a refcount, so // all of that stuff should be isolated to rare paths here. SetupThreadForComCall(-1); CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; ENTRY_POINT; } CONTRACTL_END; // Allow addrefs to go through, coz we are allowing // all releases to go through, otherwise we would // have a mismatch of ref-counts return Unknown_AddRef_Internal(pUnk); } ULONG __stdcall Unknown_Release(IUnknown* pUnk) { // Ensure the Thread is available for contracts and other users of the Thread, but don't do any of // the other "entering managed code" work like checking for reentrancy. // We don't really need to "enter" the runtime to do an interlocked decrement on a refcount, so // all of that stuff should be isolated to rare paths here. SetupThreadForComCall(-1); CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; ENTRY_POINT; } CONTRACTL_END; // Don't switch domains since we need to allow release calls to go through // even after the AD has been unlaoded. Furthermore release doesn't require // us to transition into the domain to work properly. return Unknown_Release_Internal(pUnk); } ULONG __stdcall Unknown_AddRefInner(IUnknown* pUnk) { // Ensure the Thread is available for contracts and other users of the Thread, but don't do any of // the other "entering managed code" work like checking for reentrancy. // We don't really need to "enter" the runtime to do an interlocked increment on a refcount, so // all of that stuff should be isolated to rare paths here. SetupThreadForComCall(-1); CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; ENTRY_POINT; } CONTRACTL_END; // Allow addrefs to go through, coz we are allowing // all releases to go through, otherwise we would // have a mismatch of ref-counts return Unknown_AddRefInner_Internal(pUnk); } ULONG __stdcall Unknown_ReleaseInner(IUnknown* pUnk) { // Ensure the Thread is available for contracts and other users of the Thread, but don't do any of // the other "entering managed code" work like checking for reentrancy. // We don't really need to "enter" the runtime to do an interlocked decrement on a refcount, so // all of that stuff should be isolated to rare paths here. SetupThreadForComCall(-1); CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; ENTRY_POINT; } CONTRACTL_END; // Don't switch domains since we need to allow release calls to go through // even after the AD has been unlaoded. Furthermore release doesn't require // us to transition into the domain to work properly. return Unknown_ReleaseInner_Internal(pUnk); } ULONG __stdcall Unknown_AddRefSpecial(IUnknown* pUnk) { // Ensure the Thread is available for contracts and other users of the Thread, but don't do any of // the other "entering managed code" work like checking for reentrancy. // We don't really need to "enter" the runtime to do an interlocked increment on a refcount, so // all of that stuff should be isolated to rare paths here. SetupThreadForComCall(-1); CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; ENTRY_POINT; } CONTRACTL_END; // Allow addrefs to go through, coz we are allowing // all releases to go through, otherwise we would // have a mismatch of ref-counts return Unknown_AddRefSpecial_Internal(pUnk); } ULONG __stdcall Unknown_ReleaseSpecial(IUnknown* pUnk) { // Ensure the Thread is available for contracts and other users of the Thread, but don't do any of // the other "entering managed code" work like checking for reentrancy. // We don't really need to "enter" the runtime to do an interlocked decrement on a refcount, so // all of that stuff should be isolated to rare paths here. SetupThreadForComCall(-1); CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; ENTRY_POINT; } CONTRACTL_END; // Don't switch domains since we need to allow release calls to go through // even after the AD has been unlaoded. Furthermore release doesn't require // us to transition into the domain to work properly. return Unknown_ReleaseSpecial_Internal(pUnk); } HRESULT __stdcall Unknown_QueryInterface_IErrorInfo(IUnknown* pUnk, REFIID riid, void** ppv) { SetupForComCallHR(); WRAPPER_NO_CONTRACT; // otherwise do a regular QI return Unknown_QueryInterface(pUnk, riid, ppv); } // --------------------------------------------------------------------------- // Release for IErrorInfo that takes into account that this can be called // while holding the loader lock // --------------------------------------------------------------------------- ULONG __stdcall Unknown_ReleaseSpecial_IErrorInfo(IUnknown* pUnk) { SetupForComCallDWORD(); WRAPPER_NO_CONTRACT; CONTRACT_VIOLATION(GCViolation); // Don't switch domains since we need to allow release calls to go through // even after the AD has been unlaoded. Furthermore release doesn't require // us to transition into the domain to work properly. return Unknown_ReleaseSpecial_IErrorInfo_Internal(pUnk); } //------------------------------------------------------------------------- // IProvideClassInfo methods struct GetClassInfoArgs { IUnknown* pUnk; ITypeInfo** ppTI; //Address of output variable that receives the type info. HRESULT* hr; }; VOID __stdcall ClassInfo_GetClassInfo_CallBack(LPVOID ptr) { CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(ptr)); } CONTRACTL_END; GetClassInfoArgs* pArgs = (GetClassInfoArgs*)ptr; ComCallWrapper* pWrap = MapIUnknownToWrapper(pArgs->pUnk); if (IsCurrentDomainValid(pWrap)) { *(pArgs->hr) = ClassInfo_GetClassInfo(pArgs->pUnk, pArgs->ppTI); } else { AppDomainDoCallBack(pWrap, ClassInfo_GetClassInfo_CallBack, pArgs, pArgs->hr); } } HRESULT __stdcall ClassInfo_GetClassInfo_Wrapper(IUnknown* pUnk, ITypeInfo** ppTI) { SetupForComCallHR(); CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(pUnk)); PRECONDITION(CheckPointer(ppTI, NULL_OK)); } CONTRACTL_END; HRESULT hr = S_OK; GetClassInfoArgs args = {pUnk, ppTI, &hr}; ClassInfo_GetClassInfo_CallBack(&args); return hr; } // --------------------------------------------------------------------------- // Interface ISupportsErrorInfo struct IntfSupportsErrorInfoArgs { IUnknown* pUnk; const IID* riid; HRESULT* hr; }; VOID __stdcall SupportsErroInfo_IntfSupportsErrorInfo_CallBack(LPVOID ptr) { CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(ptr)); } CONTRACTL_END; IntfSupportsErrorInfoArgs* pArgs = (IntfSupportsErrorInfoArgs*)ptr; ComCallWrapper* pWrap = MapIUnknownToWrapper(pArgs->pUnk); if (IsCurrentDomainValid(pWrap)) { *(pArgs->hr) = SupportsErroInfo_IntfSupportsErrorInfo(pArgs->pUnk, *pArgs->riid); } else { AppDomainDoCallBack(pWrap, SupportsErroInfo_IntfSupportsErrorInfo_CallBack, pArgs, pArgs->hr);; } } HRESULT __stdcall SupportsErroInfo_IntfSupportsErrorInfo_Wrapper(IUnknown* pUnk, REFIID riid) { SetupForComCallHR(); CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(pUnk)); } CONTRACTL_END; HRESULT hr = S_OK; IntfSupportsErrorInfoArgs args = {pUnk, &riid, &hr}; SupportsErroInfo_IntfSupportsErrorInfo_CallBack(&args); return hr; } // --------------------------------------------------------------------------- // Interface IErrorInfo struct GetDescriptionArgs { IUnknown* pUnk; BSTR* pbstDescription; HRESULT* hr; }; VOID __stdcall ErrorInfo_GetDescription_CallBack(LPVOID ptr) { CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(ptr)); } CONTRACTL_END; GetDescriptionArgs* pArgs = (GetDescriptionArgs*)ptr; ComCallWrapper* pWrap = MapIUnknownToWrapper(pArgs->pUnk); if (IsCurrentDomainValid(pWrap)) { *(pArgs->hr) = ErrorInfo_GetDescription(pArgs->pUnk, pArgs->pbstDescription); } else { AppDomainDoCallBack(pWrap, ErrorInfo_GetDescription_CallBack, pArgs, pArgs->hr);; } } HRESULT __stdcall ErrorInfo_GetDescription_Wrapper(IUnknown* pUnk, BSTR* pbstrDescription) { SetupForComCallHR(); CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(pUnk)); PRECONDITION(CheckPointer(pbstrDescription, NULL_OK)); } CONTRACTL_END; HRESULT hr = S_OK; GetDescriptionArgs args = {pUnk, pbstrDescription, &hr}; ErrorInfo_GetDescription_CallBack(&args); return hr; } struct GetGUIDArgs { IUnknown* pUnk; GUID* pguid; HRESULT* hr; }; VOID __stdcall ErrorInfo_GetGUID_CallBack(LPVOID ptr) { CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(ptr)); } CONTRACTL_END; GetGUIDArgs* pArgs = (GetGUIDArgs*)ptr; ComCallWrapper* pWrap = MapIUnknownToWrapper(pArgs->pUnk); if (IsCurrentDomainValid(pWrap)) { *(pArgs->hr) = ErrorInfo_GetGUID(pArgs->pUnk, pArgs->pguid); } else { AppDomainDoCallBack(pWrap, ErrorInfo_GetGUID_CallBack, pArgs, pArgs->hr); } } HRESULT __stdcall ErrorInfo_GetGUID_Wrapper(IUnknown* pUnk, GUID* pguid) { SetupForComCallHR(); CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(pUnk)); PRECONDITION(CheckPointer(pguid, NULL_OK)); } CONTRACTL_END; HRESULT hr = S_OK; GetGUIDArgs args = {pUnk, pguid, &hr}; ErrorInfo_GetGUID_CallBack(&args); return hr; } struct GetHelpContextArgs { IUnknown* pUnk; DWORD* pdwHelpCtxt; HRESULT* hr; }; VOID _stdcall ErrorInfo_GetHelpContext_CallBack(LPVOID ptr) { CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(ptr)); } CONTRACTL_END; GetHelpContextArgs* pArgs = (GetHelpContextArgs*)ptr; ComCallWrapper* pWrap = MapIUnknownToWrapper(pArgs->pUnk); if (IsCurrentDomainValid(pWrap)) { *(pArgs->hr) = ErrorInfo_GetHelpContext(pArgs->pUnk, pArgs->pdwHelpCtxt); } else { AppDomainDoCallBack(pWrap, ErrorInfo_GetHelpContext_CallBack, pArgs, pArgs->hr); } } HRESULT _stdcall ErrorInfo_GetHelpContext_Wrapper(IUnknown* pUnk, DWORD* pdwHelpCtxt) { SetupForComCallHR(); CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(pUnk)); PRECONDITION(CheckPointer(pdwHelpCtxt, NULL_OK)); } CONTRACTL_END; HRESULT hr = S_OK; GetHelpContextArgs args = {pUnk, pdwHelpCtxt, &hr}; ErrorInfo_GetHelpContext_CallBack(&args); return hr; } struct GetHelpFileArgs { IUnknown* pUnk; BSTR* pbstrHelpFile; HRESULT* hr; }; VOID __stdcall ErrorInfo_GetHelpFile_CallBack(LPVOID ptr) { CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(ptr)); } CONTRACTL_END; GetHelpFileArgs* pArgs = (GetHelpFileArgs*)ptr; ComCallWrapper* pWrap = MapIUnknownToWrapper(pArgs->pUnk); if (IsCurrentDomainValid(pWrap)) { *(pArgs->hr) = ErrorInfo_GetHelpFile(pArgs->pUnk, pArgs->pbstrHelpFile); } else { AppDomainDoCallBack(pWrap, ErrorInfo_GetHelpFile_CallBack, pArgs, pArgs->hr); } } HRESULT __stdcall ErrorInfo_GetHelpFile_Wrapper(IUnknown* pUnk, BSTR* pbstrHelpFile) { SetupForComCallHR(); CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(pUnk)); PRECONDITION(CheckPointer(pbstrHelpFile, NULL_OK)); } CONTRACTL_END; HRESULT hr = S_OK; GetHelpFileArgs args = {pUnk, pbstrHelpFile, &hr}; ErrorInfo_GetHelpFile_CallBack(&args); return hr; } struct GetSourceArgs { IUnknown* pUnk; BSTR* pbstrSource; HRESULT* hr; }; VOID __stdcall ErrorInfo_GetSource_CallBack(LPVOID ptr) { CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(ptr)); } CONTRACTL_END; GetSourceArgs* pArgs = (GetSourceArgs*)ptr; ComCallWrapper* pWrap = MapIUnknownToWrapper(pArgs->pUnk); if (IsCurrentDomainValid(pWrap)) { *(pArgs->hr) = ErrorInfo_GetSource(pArgs->pUnk, pArgs->pbstrSource); } else { AppDomainDoCallBack(pWrap, ErrorInfo_GetSource_CallBack, pArgs, pArgs->hr); } } HRESULT __stdcall ErrorInfo_GetSource_Wrapper(IUnknown* pUnk, BSTR* pbstrSource) { SetupForComCallHR(); CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(pUnk)); PRECONDITION(CheckPointer(pbstrSource, NULL_OK)); } CONTRACTL_END; HRESULT hr = S_OK; GetSourceArgs args = {pUnk, pbstrSource, &hr}; ErrorInfo_GetSource_CallBack(&args); return hr; } // --------------------------------------------------------------------------- // Interface IDispatch // // IDispatch methods for COM+ objects. These methods dispatch's to the // appropriate implementation based on the flags of the class that // implements them. struct GetTypeInfoCountArgs { IDispatch* pUnk; unsigned int *pctinfo; HRESULT* hr; }; VOID __stdcall Dispatch_GetTypeInfoCount_CallBack(LPVOID ptr) { CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(ptr)); } CONTRACTL_END; GetTypeInfoCountArgs* pArgs = (GetTypeInfoCountArgs*)ptr; ComCallWrapper* pWrap = MapIUnknownToWrapper(pArgs->pUnk); if (IsCurrentDomainValid(pWrap)) { *(pArgs->hr) = Dispatch_GetTypeInfoCount(pArgs->pUnk, pArgs->pctinfo); } else { AppDomainDoCallBack(pWrap, Dispatch_GetTypeInfoCount_CallBack, pArgs, pArgs->hr); } } HRESULT __stdcall Dispatch_GetTypeInfoCount_Wrapper(IDispatch* pDisp, unsigned int *pctinfo) { SetupForComCallHR(); CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(pDisp)); PRECONDITION(CheckPointer(pctinfo, NULL_OK)); } CONTRACTL_END; HRESULT hr = S_OK; GetTypeInfoCountArgs args = {pDisp, pctinfo, &hr}; Dispatch_GetTypeInfoCount_CallBack(&args); return hr; } struct GetTypeInfoArgs { IDispatch* pUnk; unsigned int itinfo; LCID lcid; ITypeInfo **pptinfo; HRESULT* hr; }; VOID __stdcall Dispatch_GetTypeInfo_CallBack (LPVOID ptr) { CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(ptr)); } CONTRACTL_END; GetTypeInfoArgs* pArgs = (GetTypeInfoArgs*)ptr; ComCallWrapper* pWrap = MapIUnknownToWrapper(pArgs->pUnk); if (IsCurrentDomainValid(pWrap)) { *(pArgs->hr) = Dispatch_GetTypeInfo(pArgs->pUnk, pArgs->itinfo, pArgs->lcid, pArgs->pptinfo); } else { AppDomainDoCallBack(pWrap, Dispatch_GetTypeInfo_CallBack, pArgs, pArgs->hr); } } HRESULT __stdcall Dispatch_GetTypeInfo_Wrapper(IDispatch* pDisp, unsigned int itinfo, LCID lcid, ITypeInfo **pptinfo) { SetupForComCallHR(); CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(pDisp)); PRECONDITION(CheckPointer(pptinfo, NULL_OK)); } CONTRACTL_END; HRESULT hr = S_OK; GetTypeInfoArgs args = {pDisp, itinfo, lcid, pptinfo, &hr}; Dispatch_GetTypeInfo_CallBack(&args); return hr; } struct GetIDsOfNamesArgs { IDispatch* pUnk; const IID* riid; OLECHAR **rgszNames; unsigned int cNames; LCID lcid; DISPID *rgdispid; HRESULT* hr; }; VOID __stdcall Dispatch_GetIDsOfNames_CallBack(LPVOID ptr) { CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(ptr)); } CONTRACTL_END; GetIDsOfNamesArgs* pArgs = (GetIDsOfNamesArgs*)ptr; ComCallWrapper* pWrap = MapIUnknownToWrapper(pArgs->pUnk); if (IsCurrentDomainValid(pWrap)) { *(pArgs->hr) = Dispatch_GetIDsOfNames(pArgs->pUnk, *pArgs->riid, pArgs->rgszNames, pArgs->cNames, pArgs->lcid, pArgs->rgdispid); } else { AppDomainDoCallBack(pWrap, Dispatch_GetIDsOfNames_CallBack, pArgs, pArgs->hr); } } HRESULT __stdcall Dispatch_GetIDsOfNames_Wrapper(IDispatch* pDisp, REFIID riid, _In_reads_(cNames) OLECHAR **rgszNames, unsigned int cNames, LCID lcid, DISPID *rgdispid) { SetupForComCallHR(); CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(pDisp)); PRECONDITION(CheckPointer(rgszNames, NULL_OK)); PRECONDITION(CheckPointer(rgdispid, NULL_OK)); } CONTRACTL_END; HRESULT hr = S_OK; GetIDsOfNamesArgs args = {pDisp, &riid, rgszNames, cNames, lcid, rgdispid, &hr}; Dispatch_GetIDsOfNames_CallBack(&args); return hr; } VOID __stdcall InternalDispatchImpl_GetIDsOfNames_CallBack(LPVOID ptr) { CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(ptr)); } CONTRACTL_END; GetIDsOfNamesArgs* pArgs = (GetIDsOfNamesArgs*)ptr; ComCallWrapper* pWrap = MapIUnknownToWrapper(pArgs->pUnk); if (IsCurrentDomainValid(pWrap)) { *(pArgs->hr) = InternalDispatchImpl_GetIDsOfNames(pArgs->pUnk, *pArgs->riid, pArgs->rgszNames, pArgs->cNames, pArgs->lcid, pArgs->rgdispid); } else { AppDomainDoCallBack(pWrap, InternalDispatchImpl_GetIDsOfNames_CallBack, pArgs, pArgs->hr); } } HRESULT __stdcall InternalDispatchImpl_GetIDsOfNames_Wrapper(IDispatch* pDisp, REFIID riid, _In_reads_(cNames) OLECHAR **rgszNames, unsigned int cNames, LCID lcid, DISPID *rgdispid) { SetupForComCallHR(); CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(pDisp)); PRECONDITION(CheckPointer(rgszNames, NULL_OK)); PRECONDITION(CheckPointer(rgdispid, NULL_OK)); } CONTRACTL_END; HRESULT hr = S_OK; GetIDsOfNamesArgs args = {pDisp, &riid, rgszNames, cNames, lcid, rgdispid, &hr}; InternalDispatchImpl_GetIDsOfNames_CallBack(&args); return hr; } struct InvokeArgs { IDispatch* pUnk; DISPID dispidMember; const IID* riid; LCID lcid; unsigned short wFlags; DISPPARAMS *pdispparams; VARIANT *pvarResult; EXCEPINFO *pexcepinfo; unsigned int *puArgErr; HRESULT* hr; }; VOID __stdcall Dispatch_Invoke_CallBack(LPVOID ptr) { CONTRACTL { THROWS; // Dispatch_Invoke can throw GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(ptr)); } CONTRACTL_END; InvokeArgs* pArgs = (InvokeArgs*)ptr; ComCallWrapper* pWrap = MapIUnknownToWrapper(pArgs->pUnk); if (IsCurrentDomainValid(pWrap)) { *(pArgs->hr) = Dispatch_Invoke(pArgs->pUnk, pArgs->dispidMember, *pArgs->riid, pArgs->lcid, pArgs->wFlags, pArgs->pdispparams, pArgs->pvarResult, pArgs->pexcepinfo, pArgs->puArgErr); } else { AppDomainDoCallBack(pWrap, Dispatch_Invoke_CallBack, pArgs, pArgs->hr); } } HRESULT __stdcall Dispatch_Invoke_Wrapper(IDispatch* pDisp, DISPID dispidMember, REFIID riid, LCID lcid, unsigned short wFlags, DISPPARAMS *pdispparams, VARIANT *pvarResult, EXCEPINFO *pexcepinfo, unsigned int *puArgErr) { HRESULT hrRetVal = S_OK; SetupForComCallHR(); CONTRACTL { THROWS; // Dispatch_Invoke_CallBack can throw GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(pDisp)); PRECONDITION(CheckPointer(pdispparams, NULL_OK)); PRECONDITION(CheckPointer(pvarResult, NULL_OK)); PRECONDITION(CheckPointer(pexcepinfo, NULL_OK)); PRECONDITION(CheckPointer(puArgErr, NULL_OK)); } CONTRACTL_END; InvokeArgs args = {pDisp, dispidMember, &riid, lcid, wFlags, pdispparams, pvarResult, pexcepinfo, puArgErr, &hrRetVal}; Dispatch_Invoke_CallBack(&args); return hrRetVal; } VOID __stdcall InternalDispatchImpl_Invoke_CallBack(LPVOID ptr) { CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(ptr)); } CONTRACTL_END; InvokeArgs* pArgs = (InvokeArgs*)ptr; ComCallWrapper* pWrap = MapIUnknownToWrapper(pArgs->pUnk); if (IsCurrentDomainValid(pWrap)) { *(pArgs->hr) = InternalDispatchImpl_Invoke(pArgs->pUnk, pArgs->dispidMember, *pArgs->riid, pArgs->lcid, pArgs->wFlags, pArgs->pdispparams, pArgs->pvarResult, pArgs->pexcepinfo, pArgs->puArgErr); } else { AppDomainDoCallBack(pWrap, InternalDispatchImpl_Invoke_CallBack, pArgs, pArgs->hr); } } HRESULT __stdcall InternalDispatchImpl_Invoke_Wrapper(IDispatch* pDisp, DISPID dispidMember, REFIID riid, LCID lcid, unsigned short wFlags, DISPPARAMS *pdispparams, VARIANT *pvarResult, EXCEPINFO *pexcepinfo, unsigned int *puArgErr) { SetupForComCallHR(); CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(pDisp)); PRECONDITION(CheckPointer(pdispparams, NULL_OK)); PRECONDITION(CheckPointer(pvarResult, NULL_OK)); PRECONDITION(CheckPointer(pexcepinfo, NULL_OK)); PRECONDITION(CheckPointer(puArgErr, NULL_OK)); } CONTRACTL_END; HRESULT hr = S_OK; InvokeArgs args = {pDisp, dispidMember, &riid, lcid, wFlags, pdispparams, pvarResult, pexcepinfo, puArgErr, &hr}; InternalDispatchImpl_Invoke_CallBack(&args); return hr; } // --------------------------------------------------------------------------- // Interface IDispatchEx struct GetTypeInfoCountExArgs { IDispatchEx* pUnk; unsigned int *pctinfo; HRESULT* hr; }; VOID __stdcall DispatchEx_GetTypeInfoCount_CallBack (LPVOID ptr) { CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(ptr)); } CONTRACTL_END; GetTypeInfoCountExArgs* pArgs = (GetTypeInfoCountExArgs*)ptr; ComCallWrapper* pWrap = MapIUnknownToWrapper(pArgs->pUnk); if (IsCurrentDomainValid(pWrap)) { *(pArgs->hr) = DispatchEx_GetTypeInfoCount(pArgs->pUnk, pArgs->pctinfo); } else { AppDomainDoCallBack(pWrap, DispatchEx_GetTypeInfoCount_CallBack, pArgs, pArgs->hr); } } HRESULT __stdcall DispatchEx_GetTypeInfoCount_Wrapper(IDispatchEx* pDisp, unsigned int *pctinfo) { SetupForComCallHR(); CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(pDisp)); PRECONDITION(CheckPointer(pctinfo, NULL_OK)); } CONTRACTL_END; HRESULT hr = S_OK; GetTypeInfoCountExArgs args = {pDisp, pctinfo, &hr}; DispatchEx_GetTypeInfoCount_CallBack(&args); return hr; } struct GetTypeInfoExArgs { IDispatch* pUnk; unsigned int itinfo; LCID lcid; ITypeInfo **pptinfo; HRESULT* hr; }; VOID __stdcall DispatchEx_GetTypeInfo_CallBack(LPVOID ptr) { CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(ptr)); } CONTRACTL_END; GetTypeInfoExArgs* pArgs = (GetTypeInfoExArgs*)ptr; ComCallWrapper* pWrap = MapIUnknownToWrapper(pArgs->pUnk); if (IsCurrentDomainValid(pWrap)) { *(pArgs->hr) = DispatchEx_GetTypeInfo(pArgs->pUnk, pArgs->itinfo, pArgs->lcid, pArgs->pptinfo); } else { AppDomainDoCallBack(pWrap, DispatchEx_GetTypeInfo_CallBack, pArgs, pArgs->hr); } } HRESULT __stdcall DispatchEx_GetTypeInfo_Wrapper(IDispatchEx* pDisp, unsigned int itinfo, LCID lcid, ITypeInfo **pptinfo) { SetupForComCallHR(); CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(pDisp)); PRECONDITION(CheckPointer(pptinfo, NULL_OK)); } CONTRACTL_END; HRESULT hr = S_OK; GetTypeInfoExArgs args = {pDisp, itinfo, lcid, pptinfo, &hr}; DispatchEx_GetTypeInfo_CallBack(&args); return hr; } struct GetIDsOfNamesExArgs { IDispatchEx* pUnk; const IID* riid; OLECHAR **rgszNames; unsigned int cNames; LCID lcid; DISPID *rgdispid; HRESULT* hr; }; VOID __stdcall DispatchEx_GetIDsOfNames_CallBack(LPVOID ptr) { CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(ptr)); } CONTRACTL_END; GetIDsOfNamesExArgs* pArgs = (GetIDsOfNamesExArgs*)ptr; ComCallWrapper* pWrap = MapIUnknownToWrapper(pArgs->pUnk); if (IsCurrentDomainValid(pWrap)) { *(pArgs->hr) = DispatchEx_GetIDsOfNames(pArgs->pUnk, *pArgs->riid, pArgs->rgszNames, pArgs->cNames, pArgs->lcid, pArgs->rgdispid); } else { AppDomainDoCallBack(pWrap, DispatchEx_GetIDsOfNames_CallBack, pArgs, pArgs->hr); } } HRESULT __stdcall DispatchEx_GetIDsOfNames_Wrapper(IDispatchEx* pDisp, REFIID riid, _In_reads_(cNames) OLECHAR **rgszNames, unsigned int cNames, LCID lcid, DISPID *rgdispid) { SetupForComCallHR(); CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(pDisp)); PRECONDITION(CheckPointer(rgszNames, NULL_OK)); PRECONDITION(CheckPointer(rgdispid, NULL_OK)); } CONTRACTL_END; HRESULT hr = S_OK; GetIDsOfNamesExArgs args = {pDisp, &riid, rgszNames, cNames, lcid, rgdispid, &hr}; DispatchEx_GetIDsOfNames_CallBack(&args); return hr; } struct DispExInvokeArgs { IDispatchEx* pUnk; DISPID dispidMember; const IID* riid; LCID lcid; unsigned short wFlags; DISPPARAMS *pdispparams; VARIANT *pvarResult; EXCEPINFO *pexcepinfo; unsigned int *puArgErr; HRESULT* hr; }; VOID __stdcall DispatchEx_Invoke_CallBack(LPVOID ptr) { CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(ptr)); } CONTRACTL_END; DispExInvokeArgs* pArgs = (DispExInvokeArgs*)ptr; ComCallWrapper* pWrap = MapIUnknownToWrapper(pArgs->pUnk); if (IsCurrentDomainValid(pWrap)) { *(pArgs->hr) = DispatchEx_Invoke(pArgs->pUnk, pArgs->dispidMember, *pArgs->riid, pArgs->lcid, pArgs->wFlags, pArgs->pdispparams, pArgs->pvarResult, pArgs->pexcepinfo, pArgs->puArgErr); } else { AppDomainDoCallBack(pWrap, DispatchEx_Invoke_CallBack, pArgs, pArgs->hr); } } HRESULT __stdcall DispatchEx_Invoke_Wrapper(IDispatchEx* pDisp, DISPID dispidMember, REFIID riid, LCID lcid, unsigned short wFlags, DISPPARAMS *pdispparams, VARIANT *pvarResult, EXCEPINFO *pexcepinfo, unsigned int *puArgErr) { SetupForComCallHR(); CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(pDisp)); PRECONDITION(CheckPointer(pdispparams, NULL_OK)); PRECONDITION(CheckPointer(pvarResult, NULL_OK)); PRECONDITION(CheckPointer(pexcepinfo, NULL_OK)); PRECONDITION(CheckPointer(puArgErr, NULL_OK)); } CONTRACTL_END; HRESULT hr = S_OK; DispExInvokeArgs args = {pDisp, dispidMember, &riid, lcid, wFlags, pdispparams, pvarResult, pexcepinfo, puArgErr, &hr}; DispatchEx_Invoke_CallBack(&args); return hr; } struct DeleteMemberByDispIDArgs { IDispatchEx* pDisp; DISPID id; HRESULT* hr; }; VOID __stdcall DispatchEx_DeleteMemberByDispID_CallBack(LPVOID ptr) { CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(ptr)); } CONTRACTL_END; DeleteMemberByDispIDArgs* pArgs = (DeleteMemberByDispIDArgs*)ptr; ComCallWrapper* pWrap = MapIUnknownToWrapper(pArgs->pDisp); if (IsCurrentDomainValid(pWrap)) { *(pArgs->hr) = DispatchEx_DeleteMemberByDispID(pArgs->pDisp, pArgs->id); } else { AppDomainDoCallBack(pWrap, DispatchEx_DeleteMemberByDispID_CallBack, pArgs, pArgs->hr); } } HRESULT __stdcall DispatchEx_DeleteMemberByDispID_Wrapper(IDispatchEx* pDisp, DISPID id) { SetupForComCallHR(); CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(pDisp)); } CONTRACTL_END; HRESULT hr = S_OK; DeleteMemberByDispIDArgs args = {pDisp, id, &hr}; DispatchEx_DeleteMemberByDispID_CallBack(&args); return hr; } struct DeleteMemberByNameArgs { IDispatchEx* pDisp; BSTR bstrName; DWORD grfdex; HRESULT* hr; }; VOID __stdcall DispatchEx_DeleteMemberByName_CallBack(LPVOID ptr) { CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(ptr)); } CONTRACTL_END; DeleteMemberByNameArgs* pArgs = (DeleteMemberByNameArgs*)ptr; ComCallWrapper* pWrap = MapIUnknownToWrapper(pArgs->pDisp); if (IsCurrentDomainValid(pWrap)) { *(pArgs->hr) = DispatchEx_DeleteMemberByName(pArgs->pDisp, pArgs->bstrName, pArgs->grfdex); } else { AppDomainDoCallBack(pWrap, DispatchEx_DeleteMemberByName_CallBack, pArgs, pArgs->hr); } } HRESULT __stdcall DispatchEx_DeleteMemberByName_Wrapper(IDispatchEx* pDisp, BSTR bstrName, DWORD grfdex) { SetupForComCallHR(); CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(pDisp)); } CONTRACTL_END; HRESULT hr = S_OK; DeleteMemberByNameArgs args = {pDisp, bstrName, grfdex, &hr}; DispatchEx_DeleteMemberByName_CallBack(&args); return hr; } struct GetMemberNameArgs { IDispatchEx* pDisp; DISPID id; BSTR *pbstrName; HRESULT* hr; }; VOID __stdcall DispatchEx_GetMemberName_CallBack(LPVOID ptr) { CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(ptr)); } CONTRACTL_END; GetMemberNameArgs* pArgs = (GetMemberNameArgs*)ptr; ComCallWrapper* pWrap = MapIUnknownToWrapper(pArgs->pDisp); if (IsCurrentDomainValid(pWrap)) { *(pArgs->hr) = DispatchEx_GetMemberName(pArgs->pDisp, pArgs->id, pArgs->pbstrName); } else { AppDomainDoCallBack(pWrap, DispatchEx_GetMemberName_CallBack, pArgs, pArgs->hr); } } HRESULT __stdcall DispatchEx_GetMemberName_Wrapper(IDispatchEx* pDisp, DISPID id, BSTR *pbstrName) { SetupForComCallHR(); CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(pDisp)); PRECONDITION(CheckPointer(pbstrName, NULL_OK)); } CONTRACTL_END; HRESULT hr = S_OK; GetMemberNameArgs args = {pDisp, id, pbstrName, &hr}; DispatchEx_GetMemberName_CallBack(&args); return hr; } struct GetDispIDArgs { IDispatchEx* pDisp; BSTR bstrName; DWORD grfdex; DISPID *pid; HRESULT* hr; }; VOID __stdcall DispatchEx_GetDispID_CallBack(LPVOID ptr) { CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(ptr)); } CONTRACTL_END; GetDispIDArgs* pArgs = (GetDispIDArgs*)ptr; ComCallWrapper* pWrap = MapIUnknownToWrapper(pArgs->pDisp); if (IsCurrentDomainValid(pWrap)) { *(pArgs->hr) = DispatchEx_GetDispID(pArgs->pDisp, pArgs->bstrName, pArgs->grfdex, pArgs->pid); } else { AppDomainDoCallBack(pWrap, DispatchEx_GetDispID_CallBack, pArgs, pArgs->hr); } } HRESULT __stdcall DispatchEx_GetDispID_Wrapper(IDispatchEx* pDisp, BSTR bstrName, DWORD grfdex, DISPID *pid) { SetupForComCallHR(); CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(pDisp)); PRECONDITION(CheckPointer(pid, NULL_OK)); } CONTRACTL_END; HRESULT hr = S_OK; GetDispIDArgs args = {pDisp, bstrName, grfdex, pid, &hr}; DispatchEx_GetDispID_CallBack(&args); return hr; } struct GetMemberPropertiesArgs { IDispatchEx* pDisp; DISPID id; DWORD grfdexFetch; DWORD *pgrfdex; HRESULT* hr; }; VOID __stdcall DispatchEx_GetMemberProperties_CallBack(LPVOID ptr) { CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(ptr)); } CONTRACTL_END; GetMemberPropertiesArgs* pArgs = (GetMemberPropertiesArgs*)ptr; ComCallWrapper* pWrap = MapIUnknownToWrapper(pArgs->pDisp); if (IsCurrentDomainValid(pWrap)) { *(pArgs->hr) = DispatchEx_GetMemberProperties(pArgs->pDisp, pArgs->id, pArgs->grfdexFetch, pArgs->pgrfdex); } else { AppDomainDoCallBack(pWrap, DispatchEx_GetMemberProperties_CallBack, pArgs, pArgs->hr); } } HRESULT __stdcall DispatchEx_GetMemberProperties_Wrapper(IDispatchEx* pDisp, DISPID id, DWORD grfdexFetch, DWORD *pgrfdex) { SetupForComCallHR(); CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(pDisp)); PRECONDITION(CheckPointer(pgrfdex, NULL_OK)); } CONTRACTL_END; HRESULT hr = S_OK; GetMemberPropertiesArgs args = {pDisp, id, grfdexFetch, pgrfdex, &hr}; DispatchEx_GetMemberProperties_CallBack(&args); return hr; } struct GetNameSpaceParentArgs { IDispatchEx* pDisp; IUnknown **ppunk; HRESULT* hr; }; VOID __stdcall DispatchEx_GetNameSpaceParent_CallBack(LPVOID ptr) { CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(ptr)); } CONTRACTL_END; GetNameSpaceParentArgs* pArgs = (GetNameSpaceParentArgs*)ptr; ComCallWrapper* pWrap = MapIUnknownToWrapper(pArgs->pDisp); if (IsCurrentDomainValid(pWrap)) { *(pArgs->hr) = DispatchEx_GetNameSpaceParent(pArgs->pDisp, pArgs->ppunk); } else { AppDomainDoCallBack(pWrap, DispatchEx_GetNameSpaceParent_CallBack, pArgs, pArgs->hr); } } HRESULT __stdcall DispatchEx_GetNameSpaceParent_Wrapper(IDispatchEx* pDisp, IUnknown **ppunk) { SetupForComCallHR(); CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(pDisp)); PRECONDITION(CheckPointer(ppunk, NULL_OK)); } CONTRACTL_END; HRESULT hr = S_OK; GetNameSpaceParentArgs args = {pDisp, ppunk, &hr}; DispatchEx_GetNameSpaceParent_CallBack(&args); return hr; } struct GetNextDispIDArgs { IDispatchEx* pDisp; DWORD grfdex; DISPID id; DISPID *pid; HRESULT* hr; }; VOID __stdcall DispatchEx_GetNextDispID_CallBack(LPVOID ptr) { CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(ptr)); } CONTRACTL_END; GetNextDispIDArgs* pArgs = (GetNextDispIDArgs*)ptr; ComCallWrapper* pWrap = MapIUnknownToWrapper(pArgs->pDisp); if (IsCurrentDomainValid(pWrap)) { *(pArgs->hr) = DispatchEx_GetNextDispID(pArgs->pDisp, pArgs->grfdex, pArgs->id, pArgs->pid); } else { AppDomainDoCallBack(pWrap, DispatchEx_GetNextDispID_CallBack, pArgs, pArgs->hr); } } HRESULT __stdcall DispatchEx_GetNextDispID_Wrapper(IDispatchEx* pDisp, DWORD grfdex, DISPID id, DISPID *pid) { SetupForComCallHR(); CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(pDisp)); PRECONDITION(CheckPointer(pid, NULL_OK)); } CONTRACTL_END; HRESULT hr = S_OK; GetNextDispIDArgs args = {pDisp, grfdex, id, pid, &hr}; DispatchEx_GetNextDispID_CallBack(&args); return hr; } struct DispExInvokeExArgs { IDispatchEx* pDisp; DISPID id; LCID lcid; WORD wFlags; DISPPARAMS *pdp; VARIANT *pVarRes; EXCEPINFO *pei; IServiceProvider *pspCaller; HRESULT* hr; }; VOID __stdcall DispatchEx_InvokeEx_CallBack(LPVOID ptr) { CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(ptr)); } CONTRACTL_END; DispExInvokeExArgs* pArgs = (DispExInvokeExArgs*)ptr; ComCallWrapper* pWrap = MapIUnknownToWrapper(pArgs->pDisp); if (IsCurrentDomainValid(pWrap)) { *(pArgs->hr) = DispatchEx_InvokeEx(pArgs->pDisp, pArgs->id, pArgs->lcid, pArgs->wFlags, pArgs->pdp, pArgs->pVarRes, pArgs->pei, pArgs->pspCaller); } else { AppDomainDoCallBack(pWrap, DispatchEx_InvokeEx_CallBack, pArgs, pArgs->hr); } } HRESULT __stdcall DispatchEx_InvokeEx_Wrapper(IDispatchEx* pDisp, DISPID id, LCID lcid, WORD wFlags, DISPPARAMS *pdp, VARIANT *pVarRes, EXCEPINFO *pei, IServiceProvider *pspCaller) { SetupForComCallHR(); CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(pDisp)); PRECONDITION(CheckPointer(pdp, NULL_OK)); PRECONDITION(CheckPointer(pVarRes, NULL_OK)); PRECONDITION(CheckPointer(pei, NULL_OK)); PRECONDITION(CheckPointer(pspCaller, NULL_OK)); } CONTRACTL_END; HRESULT hr = S_OK; DispExInvokeExArgs args = {pDisp, id, lcid, wFlags, pdp, pVarRes, pei, pspCaller, &hr}; DispatchEx_InvokeEx_CallBack(&args); return hr; } // --------------------------------------------------------------------------- // Interface IMarshal struct GetUnmarshalClassArgs { IMarshal* pUnk; const IID* riid; void * pv; ULONG dwDestContext; void * pvDestContext; ULONG mshlflags; LPCLSID pclsid; HRESULT* hr; }; VOID __stdcall Marshal_GetUnmarshalClass_CallBack(LPVOID ptr) { CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(ptr)); } CONTRACTL_END; GetUnmarshalClassArgs* pArgs = (GetUnmarshalClassArgs*)ptr; ComCallWrapper* pWrap = MapIUnknownToWrapper(pArgs->pUnk); if (IsCurrentDomainValid(pWrap)) { *(pArgs->hr) = Marshal_GetUnmarshalClass(pArgs->pUnk, *(pArgs->riid), pArgs->pv, pArgs->dwDestContext, pArgs->pvDestContext, pArgs->mshlflags, pArgs->pclsid); } else { AppDomainDoCallBack(pWrap, Marshal_GetUnmarshalClass_CallBack, pArgs, pArgs->hr); } } HRESULT __stdcall Marshal_GetUnmarshalClass_Wrapper(IMarshal* pMarsh, REFIID riid, void * pv, ULONG dwDestContext, void * pvDestContext, ULONG mshlflags, LPCLSID pclsid) { SetupForComCallHR(); CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(pMarsh)); PRECONDITION(CheckPointer(pv, NULL_OK)); PRECONDITION(CheckPointer(pvDestContext, NULL_OK)); PRECONDITION(CheckPointer(pclsid, NULL_OK)); } CONTRACTL_END; HRESULT hr = S_OK; GetUnmarshalClassArgs args = {pMarsh, &riid, pv, dwDestContext, pvDestContext, mshlflags, pclsid, &hr}; Marshal_GetUnmarshalClass_CallBack(&args); return hr; } struct GetMarshalSizeMaxArgs { IMarshal* pUnk; const IID* riid; void * pv; ULONG dwDestContext; void * pvDestContext; ULONG mshlflags; ULONG * pSize; HRESULT* hr; }; VOID __stdcall Marshal_GetMarshalSizeMax_CallBack(LPVOID ptr) { CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(ptr)); } CONTRACTL_END; GetMarshalSizeMaxArgs* pArgs = (GetMarshalSizeMaxArgs*)ptr; ComCallWrapper* pWrap = MapIUnknownToWrapper(pArgs->pUnk); if (IsCurrentDomainValid(pWrap)) { *(pArgs->hr) = Marshal_GetMarshalSizeMax(pArgs->pUnk, *(pArgs->riid), pArgs->pv, pArgs->dwDestContext, pArgs->pvDestContext, pArgs->mshlflags, pArgs->pSize); } else { AppDomainDoCallBack(pWrap, Marshal_GetMarshalSizeMax_CallBack, pArgs, pArgs->hr); } } HRESULT __stdcall Marshal_GetMarshalSizeMax_Wrapper(IMarshal* pMarsh, REFIID riid, void * pv, ULONG dwDestContext, void * pvDestContext, ULONG mshlflags, ULONG * pSize) { SetupForComCallHR(); CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(pMarsh)); PRECONDITION(CheckPointer(pv, NULL_OK)); PRECONDITION(CheckPointer(pvDestContext, NULL_OK)); PRECONDITION(CheckPointer(pSize, NULL_OK)); } CONTRACTL_END; HRESULT hr = S_OK; GetMarshalSizeMaxArgs args = {pMarsh, &riid, pv, dwDestContext, pvDestContext, mshlflags, pSize, &hr}; Marshal_GetMarshalSizeMax_CallBack(&args); return hr; } struct MarshalInterfaceArgs { IMarshal* pUnk; LPSTREAM pStm; const IID* riid; void * pv; ULONG dwDestContext; void * pvDestContext; ULONG mshlflags; HRESULT* hr; }; VOID __stdcall Marshal_MarshalInterface_CallBack(LPVOID ptr) { CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(ptr)); } CONTRACTL_END; MarshalInterfaceArgs* pArgs = (MarshalInterfaceArgs*)ptr; ComCallWrapper* pWrap = MapIUnknownToWrapper(pArgs->pUnk); if (IsCurrentDomainValid(pWrap)) { *(pArgs->hr) = Marshal_MarshalInterface(pArgs->pUnk, pArgs->pStm, *(pArgs->riid), pArgs->pv, pArgs->dwDestContext, pArgs->pvDestContext, pArgs->mshlflags); } else { AppDomainDoCallBack(pWrap, Marshal_MarshalInterface_CallBack, pArgs, pArgs->hr); } } HRESULT __stdcall Marshal_MarshalInterface_Wrapper(IMarshal* pMarsh, LPSTREAM pStm, REFIID riid, void * pv, ULONG dwDestContext, LPVOID pvDestContext, ULONG mshlflags) { SetupForComCallHR(); CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(pMarsh)); PRECONDITION(CheckPointer(pv, NULL_OK)); PRECONDITION(CheckPointer(pvDestContext, NULL_OK)); } CONTRACTL_END; HRESULT hr = S_OK; MarshalInterfaceArgs args = {pMarsh, pStm, &riid, pv, dwDestContext, pvDestContext, mshlflags, &hr}; Marshal_MarshalInterface_CallBack(&args); return hr; } struct UnmarshalInterfaceArgs { IMarshal* pUnk; LPSTREAM pStm; const IID* riid; void ** ppvObj; HRESULT* hr; }; VOID __stdcall Marshal_UnmarshalInterface_CallBack(LPVOID ptr) { CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(ptr)); } CONTRACTL_END; UnmarshalInterfaceArgs* pArgs = (UnmarshalInterfaceArgs*)ptr; ComCallWrapper* pWrap = MapIUnknownToWrapper(pArgs->pUnk); if (IsCurrentDomainValid(pWrap)) { *(pArgs->hr) = Marshal_UnmarshalInterface(pArgs->pUnk, pArgs->pStm, *(pArgs->riid), pArgs->ppvObj); } else { AppDomainDoCallBack(pWrap, Marshal_UnmarshalInterface_CallBack, pArgs, pArgs->hr); } } HRESULT __stdcall Marshal_UnmarshalInterface_Wrapper(IMarshal* pMarsh, LPSTREAM pStm, REFIID riid, void ** ppvObj) { SetupForComCallHR(); CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(pMarsh)); PRECONDITION(CheckPointer(pStm, NULL_OK)); PRECONDITION(CheckPointer(ppvObj, NULL_OK)); } CONTRACTL_END; HRESULT hr = S_OK; UnmarshalInterfaceArgs args = {pMarsh, pStm, &riid, ppvObj, &hr}; Marshal_UnmarshalInterface_CallBack(&args); return hr; } struct ReleaseMarshalDataArgs { IMarshal* pUnk; LPSTREAM pStm; HRESULT* hr; }; VOID __stdcall Marshal_ReleaseMarshalData_CallBack(LPVOID ptr) { CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(ptr)); } CONTRACTL_END; ReleaseMarshalDataArgs* pArgs = (ReleaseMarshalDataArgs*)ptr; ComCallWrapper* pWrap = MapIUnknownToWrapper(pArgs->pUnk); if (IsCurrentDomainValid(pWrap)) { *(pArgs->hr) = Marshal_ReleaseMarshalData(pArgs->pUnk, pArgs->pStm); } else { AppDomainDoCallBack(pWrap, Marshal_ReleaseMarshalData_CallBack, pArgs, pArgs->hr); } } HRESULT __stdcall Marshal_ReleaseMarshalData_Wrapper(IMarshal* pMarsh, LPSTREAM pStm) { SetupForComCallHR(); CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(pMarsh)); PRECONDITION(CheckPointer(pStm, NULL_OK)); } CONTRACTL_END; HRESULT hr = S_OK; ReleaseMarshalDataArgs args = {pMarsh, pStm, &hr}; Marshal_ReleaseMarshalData_CallBack(&args); return hr; } struct DisconnectObjectArgs { IMarshal* pUnk; ULONG dwReserved; HRESULT* hr; }; VOID __stdcall Marshal_DisconnectObject_CallBack(LPVOID ptr) { CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(ptr)); } CONTRACTL_END; DisconnectObjectArgs* pArgs = (DisconnectObjectArgs*)ptr; ComCallWrapper* pWrap = MapIUnknownToWrapper(pArgs->pUnk); if (IsCurrentDomainValid(pWrap)) { *(pArgs->hr) = Marshal_DisconnectObject(pArgs->pUnk, pArgs->dwReserved); } else { AppDomainDoCallBack(pWrap, Marshal_DisconnectObject_CallBack, pArgs, pArgs->hr); } } HRESULT __stdcall Marshal_DisconnectObject_Wrapper(IMarshal* pMarsh, ULONG dwReserved) { SetupForComCallHR(); CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(pMarsh)); } CONTRACTL_END; HRESULT hr = S_OK; DisconnectObjectArgs args = {pMarsh, dwReserved, &hr}; Marshal_DisconnectObject_CallBack(&args); return hr; } // --------------------------------------------------------------------------- // Interface IConnectionPointContainer struct EnumConnectionPointsArgs { IUnknown* pUnk; IEnumConnectionPoints **ppEnum; HRESULT* hr; }; VOID __stdcall ConnectionPointContainer_EnumConnectionPoints_CallBack(LPVOID ptr) { CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(ptr)); } CONTRACTL_END; EnumConnectionPointsArgs* pArgs = (EnumConnectionPointsArgs*)ptr; ComCallWrapper* pWrap = MapIUnknownToWrapper(pArgs->pUnk); if (IsCurrentDomainValid(pWrap)) { *(pArgs->hr) = ConnectionPointContainer_EnumConnectionPoints(pArgs->pUnk, pArgs->ppEnum); } else { AppDomainDoCallBack(pWrap, ConnectionPointContainer_EnumConnectionPoints_CallBack, pArgs, pArgs->hr); } } HRESULT __stdcall ConnectionPointContainer_EnumConnectionPoints_Wrapper(IUnknown* pUnk, IEnumConnectionPoints **ppEnum) { SetupForComCallHR(); CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(pUnk)); PRECONDITION(CheckPointer(ppEnum, NULL_OK)); } CONTRACTL_END; HRESULT hr = S_OK; EnumConnectionPointsArgs args = {pUnk, ppEnum, &hr}; ConnectionPointContainer_EnumConnectionPoints_CallBack(&args); return hr; } struct FindConnectionPointArgs { IUnknown* pUnk; const IID* riid; IConnectionPoint **ppCP; HRESULT* hr; }; VOID __stdcall ConnectionPointContainer_FindConnectionPoint_CallBack(LPVOID ptr) { CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(ptr)); } CONTRACTL_END; FindConnectionPointArgs* pArgs = (FindConnectionPointArgs*)ptr; ComCallWrapper* pWrap = MapIUnknownToWrapper(pArgs->pUnk); if (IsCurrentDomainValid(pWrap)) { *(pArgs->hr) = ConnectionPointContainer_FindConnectionPoint(pArgs->pUnk, *(pArgs->riid), pArgs->ppCP); } else { AppDomainDoCallBack(pWrap, ConnectionPointContainer_FindConnectionPoint_CallBack, pArgs, pArgs->hr); } } HRESULT __stdcall ConnectionPointContainer_FindConnectionPoint_Wrapper(IUnknown* pUnk, REFIID riid, IConnectionPoint **ppCP) { SetupForComCallHR(); CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(pUnk)); PRECONDITION(CheckPointer(ppCP, NULL_OK)); } CONTRACTL_END; HRESULT hr = S_OK; FindConnectionPointArgs args = {pUnk, &riid, ppCP, &hr}; ConnectionPointContainer_FindConnectionPoint_CallBack(&args); return hr; } //------------------------------------------------------------------------------------------ // IObjectSafety methods for COM+ objects struct GetInterfaceSafetyArgs { IUnknown* pUnk; const IID* riid; DWORD *pdwSupportedOptions; DWORD *pdwEnabledOptions; HRESULT* hr; }; VOID __stdcall ObjectSafety_GetInterfaceSafetyOptions_CallBack(LPVOID ptr) { CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(ptr)); } CONTRACTL_END; GetInterfaceSafetyArgs* pArgs = (GetInterfaceSafetyArgs*)ptr; ComCallWrapper* pWrap = MapIUnknownToWrapper(pArgs->pUnk); if (IsCurrentDomainValid(pWrap)) { *(pArgs->hr) = ObjectSafety_GetInterfaceSafetyOptions(pArgs->pUnk, *(pArgs->riid), pArgs->pdwSupportedOptions, pArgs->pdwEnabledOptions); } else { AppDomainDoCallBack(pWrap, ObjectSafety_GetInterfaceSafetyOptions_CallBack, pArgs, pArgs->hr); } } HRESULT __stdcall ObjectSafety_GetInterfaceSafetyOptions_Wrapper(IUnknown* pUnk, REFIID riid, DWORD *pdwSupportedOptions, DWORD *pdwEnabledOptions) { SetupForComCallHR(); CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(pUnk)); PRECONDITION(CheckPointer(pdwSupportedOptions, NULL_OK)); PRECONDITION(CheckPointer(pdwEnabledOptions, NULL_OK)); } CONTRACTL_END; HRESULT hr = S_OK; GetInterfaceSafetyArgs args = {pUnk, &riid, pdwSupportedOptions, pdwEnabledOptions, &hr}; ObjectSafety_GetInterfaceSafetyOptions_CallBack(&args); return hr; } struct SetInterfaceSafetyArgs { IUnknown* pUnk; const IID* riid; DWORD dwOptionSetMask; DWORD dwEnabledOptions; HRESULT* hr; }; VOID __stdcall ObjectSafety_SetInterfaceSafetyOptions_CallBack(LPVOID ptr) { CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(ptr)); } CONTRACTL_END; SetInterfaceSafetyArgs* pArgs = (SetInterfaceSafetyArgs*)ptr; ComCallWrapper* pWrap = MapIUnknownToWrapper(pArgs->pUnk); if (IsCurrentDomainValid(pWrap)) { *(pArgs->hr) = ObjectSafety_SetInterfaceSafetyOptions(pArgs->pUnk, *(pArgs->riid), pArgs->dwOptionSetMask, pArgs->dwEnabledOptions ); } else { AppDomainDoCallBack(pWrap, ObjectSafety_SetInterfaceSafetyOptions_CallBack, pArgs, pArgs->hr); } } HRESULT __stdcall ObjectSafety_SetInterfaceSafetyOptions_Wrapper(IUnknown* pUnk, REFIID riid, DWORD dwOptionSetMask, DWORD dwEnabledOptions) { SetupForComCallHR(); CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_PREEMPTIVE; PRECONDITION(CheckPointer(pUnk)); } CONTRACTL_END; HRESULT hr = S_OK; SetInterfaceSafetyArgs args = {pUnk, &riid, dwOptionSetMask, dwEnabledOptions, &hr}; ObjectSafety_SetInterfaceSafetyOptions_CallBack(&args); return hr; }
-1
dotnet/runtime
66,282
Enable Fast Tail Call Optimization for ARM32
- Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
clamp03
2022-03-07T05:47:20Z
2022-03-13T11:50:20Z
227ff3d53784f655fa04dad059a98b3e8d291d61
51b90cc60b8528c77829ef18481b0f58db812776
Enable Fast Tail Call Optimization for ARM32. - Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
./src/coreclr/pal/tests/palsuite/c_runtime/sprintf_s/test8/test8.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*============================================================================ ** ** Source: test8.c ** ** Purpose: Test #8 for the sprintf_s function. Tests the decimal ** specifier (%d). ** ** **==========================================================================*/ #include <palsuite.h> #include "../sprintf_s.h" /* * Depends on memcmp and strlen */ PALTEST(c_runtime_sprintf_s_test8_paltest_sprintf_test8, "c_runtime/sprintf_s/test8/paltest_sprintf_test8") { int neg = -42; int pos = 42; INT64 l = 42; if (PAL_Initialize(argc, argv) != 0) { return FAIL; } DoNumTest("foo %d", pos, "foo 42"); DoNumTest("foo %ld", 0xFFFF, "foo 65535"); DoNumTest("foo %hd", 0xFFFF, "foo -1"); DoNumTest("foo %Ld", pos, "foo 42"); DoI64Test("foo %I64d", l, "42", "foo 42"); DoNumTest("foo %3d", pos, "foo 42"); DoNumTest("foo %-3d", pos, "foo 42 "); DoNumTest("foo %.1d", pos, "foo 42"); DoNumTest("foo %.3d", pos, "foo 042"); DoNumTest("foo %03d", pos, "foo 042"); DoNumTest("foo %#d", pos, "foo 42"); DoNumTest("foo %+d", pos, "foo +42"); DoNumTest("foo % d", pos, "foo 42"); DoNumTest("foo %+d", neg, "foo -42"); DoNumTest("foo % d", neg, "foo -42"); PAL_Terminate(); return PASS; }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*============================================================================ ** ** Source: test8.c ** ** Purpose: Test #8 for the sprintf_s function. Tests the decimal ** specifier (%d). ** ** **==========================================================================*/ #include <palsuite.h> #include "../sprintf_s.h" /* * Depends on memcmp and strlen */ PALTEST(c_runtime_sprintf_s_test8_paltest_sprintf_test8, "c_runtime/sprintf_s/test8/paltest_sprintf_test8") { int neg = -42; int pos = 42; INT64 l = 42; if (PAL_Initialize(argc, argv) != 0) { return FAIL; } DoNumTest("foo %d", pos, "foo 42"); DoNumTest("foo %ld", 0xFFFF, "foo 65535"); DoNumTest("foo %hd", 0xFFFF, "foo -1"); DoNumTest("foo %Ld", pos, "foo 42"); DoI64Test("foo %I64d", l, "42", "foo 42"); DoNumTest("foo %3d", pos, "foo 42"); DoNumTest("foo %-3d", pos, "foo 42 "); DoNumTest("foo %.1d", pos, "foo 42"); DoNumTest("foo %.3d", pos, "foo 042"); DoNumTest("foo %03d", pos, "foo 042"); DoNumTest("foo %#d", pos, "foo 42"); DoNumTest("foo %+d", pos, "foo +42"); DoNumTest("foo % d", pos, "foo 42"); DoNumTest("foo %+d", neg, "foo -42"); DoNumTest("foo % d", neg, "foo -42"); PAL_Terminate(); return PASS; }
-1
dotnet/runtime
66,282
Enable Fast Tail Call Optimization for ARM32
- Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
clamp03
2022-03-07T05:47:20Z
2022-03-13T11:50:20Z
227ff3d53784f655fa04dad059a98b3e8d291d61
51b90cc60b8528c77829ef18481b0f58db812776
Enable Fast Tail Call Optimization for ARM32. - Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
./src/tests/Interop/PInvoke/IEnumerator/IEnumeratorNative.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include "IEnumeratorNative.h" #include <xplatform.h> extern "C" DLL_EXPORT HRESULT STDMETHODCALLTYPE GetIntegerEnumerator(int start, int count, IEnumVARIANT** ppEnum) { if (count < 0) { return E_INVALIDARG; } *ppEnum = new IntegerEnumerator(start, count); return S_OK; } extern "C" DLL_EXPORT HRESULT STDMETHODCALLTYPE VerifyIntegerEnumerator(IEnumVARIANT* pEnum, int start, int count) { if (count < 0) { return E_INVALIDARG; } HRESULT hr = S_OK; VARIANT element; ULONG numFetched; for(int i = start; i < start + count; ++i) { VariantClear(&element); hr = pEnum->Next(1, &element, &numFetched); if(FAILED(hr) || numFetched != 1) { return hr; } if (V_I4(&element) != i) { return E_UNEXPECTED; } } hr = pEnum->Next(1, &element, &numFetched); if (hr != S_FALSE || numFetched != 0) { return E_UNEXPECTED; } return S_OK; } extern "C" DLL_EXPORT HRESULT STDMETHODCALLTYPE GetIntegerEnumeration(int start, int count, IDispatch** ppDisp) { if (count < 0) { return E_INVALIDARG; } *ppDisp = new IntegerEnumerable(start, count); return S_OK; } extern "C" DLL_EXPORT HRESULT STDMETHODCALLTYPE VerifyIntegerEnumeration(IDispatch* pDisp, int start, int count) { DISPPARAMS params{}; VARIANT result; HRESULT hr = pDisp->Invoke( DISPID_NEWENUM, IID_NULL, LOCALE_USER_DEFAULT, DISPATCH_METHOD | DISPATCH_PROPERTYGET, &params, &result, NULL, NULL ); if (FAILED(hr)) { return hr; } if(!((V_VT(&result) == VT_UNKNOWN) || (V_VT(&result) == VT_DISPATCH))) { return E_UNEXPECTED; } IEnumVARIANT* pEnum; hr = V_UNKNOWN(&result)->QueryInterface<IEnumVARIANT>(&pEnum); if (FAILED(hr)) { return hr; } hr = VerifyIntegerEnumerator(pEnum, start, count); pEnum->Release(); return hr; } extern "C" DLL_EXPORT HRESULT STDMETHODCALLTYPE PassThroughEnumerator(IEnumVARIANT* in, IEnumVARIANT** out) { return in->QueryInterface(out); }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include "IEnumeratorNative.h" #include <xplatform.h> extern "C" DLL_EXPORT HRESULT STDMETHODCALLTYPE GetIntegerEnumerator(int start, int count, IEnumVARIANT** ppEnum) { if (count < 0) { return E_INVALIDARG; } *ppEnum = new IntegerEnumerator(start, count); return S_OK; } extern "C" DLL_EXPORT HRESULT STDMETHODCALLTYPE VerifyIntegerEnumerator(IEnumVARIANT* pEnum, int start, int count) { if (count < 0) { return E_INVALIDARG; } HRESULT hr = S_OK; VARIANT element; ULONG numFetched; for(int i = start; i < start + count; ++i) { VariantClear(&element); hr = pEnum->Next(1, &element, &numFetched); if(FAILED(hr) || numFetched != 1) { return hr; } if (V_I4(&element) != i) { return E_UNEXPECTED; } } hr = pEnum->Next(1, &element, &numFetched); if (hr != S_FALSE || numFetched != 0) { return E_UNEXPECTED; } return S_OK; } extern "C" DLL_EXPORT HRESULT STDMETHODCALLTYPE GetIntegerEnumeration(int start, int count, IDispatch** ppDisp) { if (count < 0) { return E_INVALIDARG; } *ppDisp = new IntegerEnumerable(start, count); return S_OK; } extern "C" DLL_EXPORT HRESULT STDMETHODCALLTYPE VerifyIntegerEnumeration(IDispatch* pDisp, int start, int count) { DISPPARAMS params{}; VARIANT result; HRESULT hr = pDisp->Invoke( DISPID_NEWENUM, IID_NULL, LOCALE_USER_DEFAULT, DISPATCH_METHOD | DISPATCH_PROPERTYGET, &params, &result, NULL, NULL ); if (FAILED(hr)) { return hr; } if(!((V_VT(&result) == VT_UNKNOWN) || (V_VT(&result) == VT_DISPATCH))) { return E_UNEXPECTED; } IEnumVARIANT* pEnum; hr = V_UNKNOWN(&result)->QueryInterface<IEnumVARIANT>(&pEnum); if (FAILED(hr)) { return hr; } hr = VerifyIntegerEnumerator(pEnum, start, count); pEnum->Release(); return hr; } extern "C" DLL_EXPORT HRESULT STDMETHODCALLTYPE PassThroughEnumerator(IEnumVARIANT* in, IEnumVARIANT** out) { return in->QueryInterface(out); }
-1
dotnet/runtime
66,282
Enable Fast Tail Call Optimization for ARM32
- Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
clamp03
2022-03-07T05:47:20Z
2022-03-13T11:50:20Z
227ff3d53784f655fa04dad059a98b3e8d291d61
51b90cc60b8528c77829ef18481b0f58db812776
Enable Fast Tail Call Optimization for ARM32. - Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
./src/coreclr/pal/tests/palsuite/pal_specific/PAL_RegisterLibraryW_UnregisterLibraryW/test2_neg/reg_unreg_libraryw_neg.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*============================================================= ** ** Source: pal_registerlibraryw_unregisterlibraryw_neg.c ** ** Purpose: Negative test the PAL_RegisterLibrary API. ** Call PAL_RegisterLibrary to map a non-existant module ** into the calling process address space. ** ** **============================================================*/ #define UNICODE #include <palsuite.h> PALTEST(pal_specific_PAL_RegisterLibraryW_UnregisterLibraryW_test2_neg_paltest_reg_unreg_libraryw_neg, "pal_specific/PAL_RegisterLibraryW_UnregisterLibraryW/test2_neg/paltest_reg_unreg_libraryw_neg") { HMODULE ModuleHandle; char ModuleName[64]; WCHAR *wpModuleName = NULL; int err; /*Initialize the PAL environment*/ err = PAL_Initialize(argc, argv); if(0 != err) { return FAIL; } memset(ModuleName, 0, 64); sprintf_s(ModuleName, ARRAY_SIZE(ModuleName), "%s", "not_exist_module_name"); /*convert a normal string to a wide one*/ wpModuleName = convert(ModuleName); /*load a not exist module*/ ModuleHandle = PAL_RegisterLibrary(wpModuleName); /*free the memory*/ free(wpModuleName); if(NULL != ModuleHandle) { Trace("ERROR: PAL_RegisterLibrary successfully mapped " "a module that does not exist into the calling process\n"); /*decrement the reference count of the loaded DLL*/ err = PAL_UnregisterLibrary(ModuleHandle); if(0 == err) { Trace("\nFailed to call PAL_UnregisterLibrary API to decrement the " "count of the loaded DLL module!\n"); } Fail(""); } PAL_Terminate(); return PASS; }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*============================================================= ** ** Source: pal_registerlibraryw_unregisterlibraryw_neg.c ** ** Purpose: Negative test the PAL_RegisterLibrary API. ** Call PAL_RegisterLibrary to map a non-existant module ** into the calling process address space. ** ** **============================================================*/ #define UNICODE #include <palsuite.h> PALTEST(pal_specific_PAL_RegisterLibraryW_UnregisterLibraryW_test2_neg_paltest_reg_unreg_libraryw_neg, "pal_specific/PAL_RegisterLibraryW_UnregisterLibraryW/test2_neg/paltest_reg_unreg_libraryw_neg") { HMODULE ModuleHandle; char ModuleName[64]; WCHAR *wpModuleName = NULL; int err; /*Initialize the PAL environment*/ err = PAL_Initialize(argc, argv); if(0 != err) { return FAIL; } memset(ModuleName, 0, 64); sprintf_s(ModuleName, ARRAY_SIZE(ModuleName), "%s", "not_exist_module_name"); /*convert a normal string to a wide one*/ wpModuleName = convert(ModuleName); /*load a not exist module*/ ModuleHandle = PAL_RegisterLibrary(wpModuleName); /*free the memory*/ free(wpModuleName); if(NULL != ModuleHandle) { Trace("ERROR: PAL_RegisterLibrary successfully mapped " "a module that does not exist into the calling process\n"); /*decrement the reference count of the loaded DLL*/ err = PAL_UnregisterLibrary(ModuleHandle); if(0 == err) { Trace("\nFailed to call PAL_UnregisterLibrary API to decrement the " "count of the loaded DLL module!\n"); } Fail(""); } PAL_Terminate(); return PASS; }
-1
dotnet/runtime
66,282
Enable Fast Tail Call Optimization for ARM32
- Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
clamp03
2022-03-07T05:47:20Z
2022-03-13T11:50:20Z
227ff3d53784f655fa04dad059a98b3e8d291d61
51b90cc60b8528c77829ef18481b0f58db812776
Enable Fast Tail Call Optimization for ARM32. - Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
./src/native/eventpipe/ep-event-instance.h
#ifndef __EVENTPIPE_EVENT_INSTANCE_H__ #define __EVENTPIPE_EVENT_INSTANCE_H__ #include "ep-rt-config.h" #ifdef ENABLE_PERFTRACING #include "ep-types.h" #include "ep-stack-contents.h" #undef EP_IMPL_GETTER_SETTER #ifdef EP_IMPL_EVENT_INSTANCE_GETTER_SETTER #define EP_IMPL_GETTER_SETTER #endif #include "ep-getter-setter.h" /* * EventPipeEventInstance. */ #if defined(EP_INLINE_GETTER_SETTER) || defined(EP_IMPL_EVENT_INSTANCE_GETTER_SETTER) struct _EventPipeEventInstance { #else struct _EventPipeEventInstance_Internal { #endif uint8_t activity_id [EP_ACTIVITY_ID_SIZE]; uint8_t related_activity_id [EP_ACTIVITY_ID_SIZE]; uint64_t thread_id; ep_timestamp_t timestamp; EventPipeEvent *ep_event; const uint8_t *data; uint32_t metadata_id; uint32_t proc_num; uint32_t data_len; // TODO: Look at optimizing this when writing into buffer manager. // Only write up to next available frame to better utilize memory. // Even events not requesting a stack will still waste space in buffer manager. // Needs to go last since number of frames will set size in stream. EventPipeStackContents stack_contents; #ifdef EP_CHECKED_BUILD uint32_t debug_event_start; uint32_t debug_event_end; #endif }; #if !defined(EP_INLINE_GETTER_SETTER) && !defined(EP_IMPL_EVENT_INSTANCE_GETTER_SETTER) struct _EventPipeEventInstance { uint8_t _internal [sizeof (struct _EventPipeEventInstance_Internal)]; }; #endif EP_DEFINE_GETTER(EventPipeEventInstance *, event_instance, EventPipeEvent *, ep_event) EP_DEFINE_GETTER(EventPipeEventInstance *, event_instance, uint32_t, metadata_id) EP_DEFINE_SETTER(EventPipeEventInstance *, event_instance, uint32_t, metadata_id) EP_DEFINE_GETTER(EventPipeEventInstance *, event_instance, uint32_t, proc_num) EP_DEFINE_GETTER(EventPipeEventInstance *, event_instance, uint64_t, thread_id) EP_DEFINE_GETTER(EventPipeEventInstance *, event_instance, ep_timestamp_t, timestamp) EP_DEFINE_SETTER(EventPipeEventInstance *, event_instance, ep_timestamp_t, timestamp) EP_DEFINE_GETTER_ARRAY_REF(EventPipeEventInstance *, event_instance, uint8_t *, const uint8_t *, activity_id, activity_id[0]) EP_DEFINE_GETTER_ARRAY_REF(EventPipeEventInstance *, event_instance, uint8_t *, const uint8_t *, related_activity_id, related_activity_id[0]) EP_DEFINE_GETTER(EventPipeEventInstance *, event_instance, const uint8_t *, data) EP_DEFINE_GETTER(EventPipeEventInstance *, event_instance, uint32_t, data_len) EP_DEFINE_GETTER_REF(EventPipeEventInstance *, event_instance, EventPipeStackContents *, stack_contents) EventPipeEventInstance * ep_event_instance_alloc ( EventPipeEvent *ep_event, uint32_t proc_num, uint64_t thread_id, const uint8_t *data, uint32_t data_len, const uint8_t *activity_id, const uint8_t *related_activity_id); EventPipeEventInstance * ep_event_instance_init ( EventPipeEventInstance *ep_event_instance, EventPipeEvent *ep_event, uint32_t proc_num, uint64_t thread_id, const uint8_t *data, uint32_t data_len, const uint8_t *activity_id, const uint8_t *related_activity_id); void ep_event_instance_fini (EventPipeEventInstance *ep_event_instance); void ep_event_instance_free (EventPipeEventInstance *ep_event_instance); bool ep_event_instance_ensure_consistency (const EventPipeEventInstance *ep_event_instance); uint32_t ep_event_instance_get_aligned_total_size ( const EventPipeEventInstance *ep_event_instance, EventPipeSerializationFormat format); void ep_event_instance_serialize_to_json_file ( EventPipeEventInstance *ep_event_instance, EventPipeJsonFile *json_file); /* * EventPipeSequencePoint. */ // A point in time marker that is used as a boundary when emitting events. // The events in a Nettrace file are not emitted in a fully sorted order // but we do guarantee that all events before a sequence point are emitted // prior to any events after the sequence point. #if defined(EP_INLINE_GETTER_SETTER) || defined(EP_IMPL_EVENT_INSTANCE_GETTER_SETTER) struct _EventPipeSequencePoint { #else struct _EventPipeSequencePoint_Internal { #endif ep_rt_thread_sequence_number_hash_map_t thread_sequence_numbers; ep_timestamp_t timestamp; }; #if !defined(EP_INLINE_GETTER_SETTER) && !defined(EP_IMPL_EVENT_INSTANCE_GETTER_SETTER) struct _EventPipeSequencePoint { uint8_t _internal [sizeof (struct _EventPipeSequencePoint_Internal)]; }; #endif EP_DEFINE_GETTER_REF(EventPipeSequencePoint *, sequence_point, ep_rt_thread_sequence_number_hash_map_t *, thread_sequence_numbers) EP_DEFINE_GETTER(EventPipeSequencePoint *, sequence_point, ep_timestamp_t, timestamp) EP_DEFINE_SETTER(EventPipeSequencePoint *, sequence_point, ep_timestamp_t, timestamp) EventPipeSequencePoint * ep_sequence_point_alloc (void); EventPipeSequencePoint * ep_sequence_point_init (EventPipeSequencePoint *sequence_point); void ep_sequence_point_fini (EventPipeSequencePoint *sequence_point); void ep_sequence_point_free (EventPipeSequencePoint *sequence_point); #endif /* ENABLE_PERFTRACING */ #endif /* __EVENTPIPE_EVENT_INSTANCE_H__ */
#ifndef __EVENTPIPE_EVENT_INSTANCE_H__ #define __EVENTPIPE_EVENT_INSTANCE_H__ #include "ep-rt-config.h" #ifdef ENABLE_PERFTRACING #include "ep-types.h" #include "ep-stack-contents.h" #undef EP_IMPL_GETTER_SETTER #ifdef EP_IMPL_EVENT_INSTANCE_GETTER_SETTER #define EP_IMPL_GETTER_SETTER #endif #include "ep-getter-setter.h" /* * EventPipeEventInstance. */ #if defined(EP_INLINE_GETTER_SETTER) || defined(EP_IMPL_EVENT_INSTANCE_GETTER_SETTER) struct _EventPipeEventInstance { #else struct _EventPipeEventInstance_Internal { #endif uint8_t activity_id [EP_ACTIVITY_ID_SIZE]; uint8_t related_activity_id [EP_ACTIVITY_ID_SIZE]; uint64_t thread_id; ep_timestamp_t timestamp; EventPipeEvent *ep_event; const uint8_t *data; uint32_t metadata_id; uint32_t proc_num; uint32_t data_len; // TODO: Look at optimizing this when writing into buffer manager. // Only write up to next available frame to better utilize memory. // Even events not requesting a stack will still waste space in buffer manager. // Needs to go last since number of frames will set size in stream. EventPipeStackContents stack_contents; #ifdef EP_CHECKED_BUILD uint32_t debug_event_start; uint32_t debug_event_end; #endif }; #if !defined(EP_INLINE_GETTER_SETTER) && !defined(EP_IMPL_EVENT_INSTANCE_GETTER_SETTER) struct _EventPipeEventInstance { uint8_t _internal [sizeof (struct _EventPipeEventInstance_Internal)]; }; #endif EP_DEFINE_GETTER(EventPipeEventInstance *, event_instance, EventPipeEvent *, ep_event) EP_DEFINE_GETTER(EventPipeEventInstance *, event_instance, uint32_t, metadata_id) EP_DEFINE_SETTER(EventPipeEventInstance *, event_instance, uint32_t, metadata_id) EP_DEFINE_GETTER(EventPipeEventInstance *, event_instance, uint32_t, proc_num) EP_DEFINE_GETTER(EventPipeEventInstance *, event_instance, uint64_t, thread_id) EP_DEFINE_GETTER(EventPipeEventInstance *, event_instance, ep_timestamp_t, timestamp) EP_DEFINE_SETTER(EventPipeEventInstance *, event_instance, ep_timestamp_t, timestamp) EP_DEFINE_GETTER_ARRAY_REF(EventPipeEventInstance *, event_instance, uint8_t *, const uint8_t *, activity_id, activity_id[0]) EP_DEFINE_GETTER_ARRAY_REF(EventPipeEventInstance *, event_instance, uint8_t *, const uint8_t *, related_activity_id, related_activity_id[0]) EP_DEFINE_GETTER(EventPipeEventInstance *, event_instance, const uint8_t *, data) EP_DEFINE_GETTER(EventPipeEventInstance *, event_instance, uint32_t, data_len) EP_DEFINE_GETTER_REF(EventPipeEventInstance *, event_instance, EventPipeStackContents *, stack_contents) EventPipeEventInstance * ep_event_instance_alloc ( EventPipeEvent *ep_event, uint32_t proc_num, uint64_t thread_id, const uint8_t *data, uint32_t data_len, const uint8_t *activity_id, const uint8_t *related_activity_id); EventPipeEventInstance * ep_event_instance_init ( EventPipeEventInstance *ep_event_instance, EventPipeEvent *ep_event, uint32_t proc_num, uint64_t thread_id, const uint8_t *data, uint32_t data_len, const uint8_t *activity_id, const uint8_t *related_activity_id); void ep_event_instance_fini (EventPipeEventInstance *ep_event_instance); void ep_event_instance_free (EventPipeEventInstance *ep_event_instance); bool ep_event_instance_ensure_consistency (const EventPipeEventInstance *ep_event_instance); uint32_t ep_event_instance_get_aligned_total_size ( const EventPipeEventInstance *ep_event_instance, EventPipeSerializationFormat format); void ep_event_instance_serialize_to_json_file ( EventPipeEventInstance *ep_event_instance, EventPipeJsonFile *json_file); /* * EventPipeSequencePoint. */ // A point in time marker that is used as a boundary when emitting events. // The events in a Nettrace file are not emitted in a fully sorted order // but we do guarantee that all events before a sequence point are emitted // prior to any events after the sequence point. #if defined(EP_INLINE_GETTER_SETTER) || defined(EP_IMPL_EVENT_INSTANCE_GETTER_SETTER) struct _EventPipeSequencePoint { #else struct _EventPipeSequencePoint_Internal { #endif ep_rt_thread_sequence_number_hash_map_t thread_sequence_numbers; ep_timestamp_t timestamp; }; #if !defined(EP_INLINE_GETTER_SETTER) && !defined(EP_IMPL_EVENT_INSTANCE_GETTER_SETTER) struct _EventPipeSequencePoint { uint8_t _internal [sizeof (struct _EventPipeSequencePoint_Internal)]; }; #endif EP_DEFINE_GETTER_REF(EventPipeSequencePoint *, sequence_point, ep_rt_thread_sequence_number_hash_map_t *, thread_sequence_numbers) EP_DEFINE_GETTER(EventPipeSequencePoint *, sequence_point, ep_timestamp_t, timestamp) EP_DEFINE_SETTER(EventPipeSequencePoint *, sequence_point, ep_timestamp_t, timestamp) EventPipeSequencePoint * ep_sequence_point_alloc (void); EventPipeSequencePoint * ep_sequence_point_init (EventPipeSequencePoint *sequence_point); void ep_sequence_point_fini (EventPipeSequencePoint *sequence_point); void ep_sequence_point_free (EventPipeSequencePoint *sequence_point); #endif /* ENABLE_PERFTRACING */ #endif /* __EVENTPIPE_EVENT_INSTANCE_H__ */
-1
dotnet/runtime
66,282
Enable Fast Tail Call Optimization for ARM32
- Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
clamp03
2022-03-07T05:47:20Z
2022-03-13T11:50:20Z
227ff3d53784f655fa04dad059a98b3e8d291d61
51b90cc60b8528c77829ef18481b0f58db812776
Enable Fast Tail Call Optimization for ARM32. - Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
./src/coreclr/pal/src/libunwind/src/ia64/unwind_decoder.h
/* libunwind - a platform-independent unwind library Copyright (C) 2001-2002 Hewlett-Packard Co Contributed by David Mosberger-Tang <[email protected]> This file is part of libunwind. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ /* * Generic IA-64 unwind info decoder. * * This file is used both by the Linux kernel and objdump. Please keep * the two copies of this file in sync. * * You need to customize the decoder by defining the following * macros/constants before including this file: * * Types: * unw_word Unsigned integer type with at least 64 bits * * Register names: * UNW_REG_BSP * UNW_REG_BSPSTORE * UNW_REG_FPSR * UNW_REG_LC * UNW_REG_PFS * UNW_REG_PR * UNW_REG_RNAT * UNW_REG_PSP * UNW_REG_RP * UNW_REG_UNAT * * Decoder action macros: * UNW_DEC_BAD_CODE(code) * UNW_DEC_ABI(fmt,abi,context,arg) * UNW_DEC_BR_GR(fmt,brmask,gr,arg) * UNW_DEC_BR_MEM(fmt,brmask,arg) * UNW_DEC_COPY_STATE(fmt,label,arg) * UNW_DEC_EPILOGUE(fmt,t,ecount,arg) * UNW_DEC_FRGR_MEM(fmt,grmask,frmask,arg) * UNW_DEC_FR_MEM(fmt,frmask,arg) * UNW_DEC_GR_GR(fmt,grmask,gr,arg) * UNW_DEC_GR_MEM(fmt,grmask,arg) * UNW_DEC_LABEL_STATE(fmt,label,arg) * UNW_DEC_MEM_STACK_F(fmt,t,size,arg) * UNW_DEC_MEM_STACK_V(fmt,t,arg) * UNW_DEC_PRIUNAT_GR(fmt,r,arg) * UNW_DEC_PRIUNAT_WHEN_GR(fmt,t,arg) * UNW_DEC_PRIUNAT_WHEN_MEM(fmt,t,arg) * UNW_DEC_PRIUNAT_WHEN_PSPREL(fmt,pspoff,arg) * UNW_DEC_PRIUNAT_WHEN_SPREL(fmt,spoff,arg) * UNW_DEC_PROLOGUE(fmt,body,rlen,arg) * UNW_DEC_PROLOGUE_GR(fmt,rlen,mask,grsave,arg) * UNW_DEC_REG_PSPREL(fmt,reg,pspoff,arg) * UNW_DEC_REG_REG(fmt,src,dst,arg) * UNW_DEC_REG_SPREL(fmt,reg,spoff,arg) * UNW_DEC_REG_WHEN(fmt,reg,t,arg) * UNW_DEC_RESTORE(fmt,t,abreg,arg) * UNW_DEC_RESTORE_P(fmt,qp,t,abreg,arg) * UNW_DEC_SPILL_BASE(fmt,pspoff,arg) * UNW_DEC_SPILL_MASK(fmt,imaskp,arg) * UNW_DEC_SPILL_PSPREL(fmt,t,abreg,pspoff,arg) * UNW_DEC_SPILL_PSPREL_P(fmt,qp,t,abreg,pspoff,arg) * UNW_DEC_SPILL_REG(fmt,t,abreg,x,ytreg,arg) * UNW_DEC_SPILL_REG_P(fmt,qp,t,abreg,x,ytreg,arg) * UNW_DEC_SPILL_SPREL(fmt,t,abreg,spoff,arg) * UNW_DEC_SPILL_SPREL_P(fmt,qp,t,abreg,pspoff,arg) */ static unw_word unw_decode_uleb128 (unsigned char **dpp) { unsigned shift = 0; unw_word byte, result = 0; unsigned char *bp = *dpp; while (1) { byte = *bp++; result |= (byte & 0x7f) << shift; if ((byte & 0x80) == 0) break; shift += 7; } *dpp = bp; return result; } static unsigned char * unw_decode_x1 (unsigned char *dp, unsigned char code, void *arg) { unsigned char byte1, abreg; unw_word t, off; byte1 = *dp++; t = unw_decode_uleb128 (&dp); off = unw_decode_uleb128 (&dp); abreg = (byte1 & 0x7f); if (byte1 & 0x80) UNW_DEC_SPILL_SPREL(X1, t, abreg, off, arg); else UNW_DEC_SPILL_PSPREL(X1, t, abreg, off, arg); return dp; } static unsigned char * unw_decode_x2 (unsigned char *dp, unsigned char code, void *arg) { unsigned char byte1, byte2, abreg, x, ytreg; unw_word t; byte1 = *dp++; byte2 = *dp++; t = unw_decode_uleb128 (&dp); abreg = (byte1 & 0x7f); ytreg = byte2; x = (byte1 >> 7) & 1; if ((byte1 & 0x80) == 0 && ytreg == 0) UNW_DEC_RESTORE(X2, t, abreg, arg); else UNW_DEC_SPILL_REG(X2, t, abreg, x, ytreg, arg); return dp; } static unsigned char * unw_decode_x3 (unsigned char *dp, unsigned char code, void *arg) { unsigned char byte1, byte2, abreg, qp; unw_word t, off; byte1 = *dp++; byte2 = *dp++; t = unw_decode_uleb128 (&dp); off = unw_decode_uleb128 (&dp); qp = (byte1 & 0x3f); abreg = (byte2 & 0x7f); if (byte1 & 0x80) UNW_DEC_SPILL_SPREL_P(X3, qp, t, abreg, off, arg); else UNW_DEC_SPILL_PSPREL_P(X3, qp, t, abreg, off, arg); return dp; } static unsigned char * unw_decode_x4 (unsigned char *dp, unsigned char code, void *arg) { unsigned char byte1, byte2, byte3, qp, abreg, x, ytreg; unw_word t; byte1 = *dp++; byte2 = *dp++; byte3 = *dp++; t = unw_decode_uleb128 (&dp); qp = (byte1 & 0x3f); abreg = (byte2 & 0x7f); x = (byte2 >> 7) & 1; ytreg = byte3; if ((byte2 & 0x80) == 0 && byte3 == 0) UNW_DEC_RESTORE_P(X4, qp, t, abreg, arg); else UNW_DEC_SPILL_REG_P(X4, qp, t, abreg, x, ytreg, arg); return dp; } static inline unsigned char * unw_decode_r1 (unsigned char *dp, unsigned char code, void *arg) { int body = (code & 0x20) != 0; unw_word rlen; rlen = (code & 0x1f); UNW_DEC_PROLOGUE(R1, body, rlen, arg); return dp; } static inline unsigned char * unw_decode_r2 (unsigned char *dp, unsigned char code, void *arg) { unsigned char byte1, mask, grsave; unw_word rlen; byte1 = *dp++; mask = ((code & 0x7) << 1) | ((byte1 >> 7) & 1); grsave = (byte1 & 0x7f); rlen = unw_decode_uleb128 (&dp); UNW_DEC_PROLOGUE_GR(R2, rlen, mask, grsave, arg); return dp; } static inline unsigned char * unw_decode_r3 (unsigned char *dp, unsigned char code, void *arg) { unw_word rlen; rlen = unw_decode_uleb128 (&dp); UNW_DEC_PROLOGUE(R3, ((code & 0x3) == 1), rlen, arg); return dp; } static inline unsigned char * unw_decode_p1 (unsigned char *dp, unsigned char code, void *arg) { unsigned char brmask = (code & 0x1f); UNW_DEC_BR_MEM(P1, brmask, arg); return dp; } static inline unsigned char * unw_decode_p2_p5 (unsigned char *dp, unsigned char code, void *arg) { if ((code & 0x10) == 0) { unsigned char byte1 = *dp++; UNW_DEC_BR_GR(P2, ((code & 0xf) << 1) | ((byte1 >> 7) & 1), (byte1 & 0x7f), arg); } else if ((code & 0x08) == 0) { unsigned char byte1 = *dp++, r, dst; r = ((code & 0x7) << 1) | ((byte1 >> 7) & 1); dst = (byte1 & 0x7f); switch (r) { case 0: UNW_DEC_REG_GR(P3, UNW_REG_PSP, dst, arg); break; case 1: UNW_DEC_REG_GR(P3, UNW_REG_RP, dst, arg); break; case 2: UNW_DEC_REG_GR(P3, UNW_REG_PFS, dst, arg); break; case 3: UNW_DEC_REG_GR(P3, UNW_REG_PR, dst, arg); break; case 4: UNW_DEC_REG_GR(P3, UNW_REG_UNAT, dst, arg); break; case 5: UNW_DEC_REG_GR(P3, UNW_REG_LC, dst, arg); break; case 6: UNW_DEC_RP_BR(P3, dst, arg); break; case 7: UNW_DEC_REG_GR(P3, UNW_REG_RNAT, dst, arg); break; case 8: UNW_DEC_REG_GR(P3, UNW_REG_BSP, dst, arg); break; case 9: UNW_DEC_REG_GR(P3, UNW_REG_BSPSTORE, dst, arg); break; case 10: UNW_DEC_REG_GR(P3, UNW_REG_FPSR, dst, arg); break; case 11: UNW_DEC_PRIUNAT_GR(P3, dst, arg); break; default: UNW_DEC_BAD_CODE(r); break; } } else if ((code & 0x7) == 0) UNW_DEC_SPILL_MASK(P4, dp, arg); else if ((code & 0x7) == 1) { unw_word grmask, frmask, byte1, byte2, byte3; byte1 = *dp++; byte2 = *dp++; byte3 = *dp++; grmask = ((byte1 >> 4) & 0xf); frmask = ((byte1 & 0xf) << 16) | (byte2 << 8) | byte3; UNW_DEC_FRGR_MEM(P5, grmask, frmask, arg); } else UNW_DEC_BAD_CODE(code); return dp; } static inline unsigned char * unw_decode_p6 (unsigned char *dp, unsigned char code, void *arg) { int gregs = (code & 0x10) != 0; unsigned char mask = (code & 0x0f); if (gregs) UNW_DEC_GR_MEM(P6, mask, arg); else UNW_DEC_FR_MEM(P6, mask, arg); return dp; } static inline unsigned char * unw_decode_p7_p10 (unsigned char *dp, unsigned char code, void *arg) { unsigned char r, byte1, byte2; unw_word t, size; if ((code & 0x10) == 0) { r = (code & 0xf); t = unw_decode_uleb128 (&dp); switch (r) { case 0: size = unw_decode_uleb128 (&dp); UNW_DEC_MEM_STACK_F(P7, t, size, arg); break; case 1: UNW_DEC_MEM_STACK_V(P7, t, arg); break; case 2: UNW_DEC_SPILL_BASE(P7, t, arg); break; case 3: UNW_DEC_REG_SPREL(P7, UNW_REG_PSP, t, arg); break; case 4: UNW_DEC_REG_WHEN(P7, UNW_REG_RP, t, arg); break; case 5: UNW_DEC_REG_PSPREL(P7, UNW_REG_RP, t, arg); break; case 6: UNW_DEC_REG_WHEN(P7, UNW_REG_PFS, t, arg); break; case 7: UNW_DEC_REG_PSPREL(P7, UNW_REG_PFS, t, arg); break; case 8: UNW_DEC_REG_WHEN(P7, UNW_REG_PR, t, arg); break; case 9: UNW_DEC_REG_PSPREL(P7, UNW_REG_PR, t, arg); break; case 10: UNW_DEC_REG_WHEN(P7, UNW_REG_LC, t, arg); break; case 11: UNW_DEC_REG_PSPREL(P7, UNW_REG_LC, t, arg); break; case 12: UNW_DEC_REG_WHEN(P7, UNW_REG_UNAT, t, arg); break; case 13: UNW_DEC_REG_PSPREL(P7, UNW_REG_UNAT, t, arg); break; case 14: UNW_DEC_REG_WHEN(P7, UNW_REG_FPSR, t, arg); break; case 15: UNW_DEC_REG_PSPREL(P7, UNW_REG_FPSR, t, arg); break; default: UNW_DEC_BAD_CODE(r); break; } } else { switch (code & 0xf) { case 0x0: /* p8 */ { r = *dp++; t = unw_decode_uleb128 (&dp); switch (r) { case 1: UNW_DEC_REG_SPREL(P8, UNW_REG_RP, t, arg); break; case 2: UNW_DEC_REG_SPREL(P8, UNW_REG_PFS, t, arg); break; case 3: UNW_DEC_REG_SPREL(P8, UNW_REG_PR, t, arg); break; case 4: UNW_DEC_REG_SPREL(P8, UNW_REG_LC, t, arg); break; case 5: UNW_DEC_REG_SPREL(P8, UNW_REG_UNAT, t, arg); break; case 6: UNW_DEC_REG_SPREL(P8, UNW_REG_FPSR, t, arg); break; case 7: UNW_DEC_REG_WHEN(P8, UNW_REG_BSP, t, arg); break; case 8: UNW_DEC_REG_PSPREL(P8, UNW_REG_BSP, t, arg); break; case 9: UNW_DEC_REG_SPREL(P8, UNW_REG_BSP, t, arg); break; case 10: UNW_DEC_REG_WHEN(P8, UNW_REG_BSPSTORE, t, arg); break; case 11: UNW_DEC_REG_PSPREL(P8, UNW_REG_BSPSTORE, t, arg); break; case 12: UNW_DEC_REG_SPREL(P8, UNW_REG_BSPSTORE, t, arg); break; case 13: UNW_DEC_REG_WHEN(P8, UNW_REG_RNAT, t, arg); break; case 14: UNW_DEC_REG_PSPREL(P8, UNW_REG_RNAT, t, arg); break; case 15: UNW_DEC_REG_SPREL(P8, UNW_REG_RNAT, t, arg); break; case 16: UNW_DEC_PRIUNAT_WHEN_GR(P8, t, arg); break; case 17: UNW_DEC_PRIUNAT_PSPREL(P8, t, arg); break; case 18: UNW_DEC_PRIUNAT_SPREL(P8, t, arg); break; case 19: UNW_DEC_PRIUNAT_WHEN_MEM(P8, t, arg); break; default: UNW_DEC_BAD_CODE(r); break; } } break; case 0x1: byte1 = *dp++; byte2 = *dp++; UNW_DEC_GR_GR(P9, (byte1 & 0xf), (byte2 & 0x7f), arg); break; case 0xf: /* p10 */ byte1 = *dp++; byte2 = *dp++; UNW_DEC_ABI(P10, byte1, byte2, arg); break; case 0x9: return unw_decode_x1 (dp, code, arg); case 0xa: return unw_decode_x2 (dp, code, arg); case 0xb: return unw_decode_x3 (dp, code, arg); case 0xc: return unw_decode_x4 (dp, code, arg); default: UNW_DEC_BAD_CODE(code); break; } } return dp; } static inline unsigned char * unw_decode_b1 (unsigned char *dp, unsigned char code, void *arg) { unw_word label = (code & 0x1f); if ((code & 0x20) != 0) UNW_DEC_COPY_STATE(B1, label, arg); else UNW_DEC_LABEL_STATE(B1, label, arg); return dp; } static inline unsigned char * unw_decode_b2 (unsigned char *dp, unsigned char code, void *arg) { unw_word t; t = unw_decode_uleb128 (&dp); UNW_DEC_EPILOGUE(B2, t, (code & 0x1f), arg); return dp; } static inline unsigned char * unw_decode_b3_x4 (unsigned char *dp, unsigned char code, void *arg) { unw_word t, ecount, label; if ((code & 0x10) == 0) { t = unw_decode_uleb128 (&dp); ecount = unw_decode_uleb128 (&dp); UNW_DEC_EPILOGUE(B3, t, ecount, arg); } else if ((code & 0x07) == 0) { label = unw_decode_uleb128 (&dp); if ((code & 0x08) != 0) UNW_DEC_COPY_STATE(B4, label, arg); else UNW_DEC_LABEL_STATE(B4, label, arg); } else switch (code & 0x7) { case 1: return unw_decode_x1 (dp, code, arg); case 2: return unw_decode_x2 (dp, code, arg); case 3: return unw_decode_x3 (dp, code, arg); case 4: return unw_decode_x4 (dp, code, arg); default: UNW_DEC_BAD_CODE(code); break; } return dp; } typedef unsigned char *(*unw_decoder) (unsigned char *, unsigned char, void *); /* * Decode one descriptor and return address of next descriptor. */ static inline unsigned char * unw_decode (unsigned char *dp, int inside_body, void *arg) { unsigned char code, primary; code = *dp++; primary = code >> 5; if (primary < 2) dp = unw_decode_r1 (dp, code, arg); else if (primary == 2) dp = unw_decode_r2 (dp, code, arg); else if (primary == 3) dp = unw_decode_r3 (dp, code, arg); else if (inside_body) switch (primary) { case 4: case 5: dp = unw_decode_b1 (dp, code, arg); break; case 6: dp = unw_decode_b2 (dp, code, arg); break; case 7: dp = unw_decode_b3_x4 (dp, code, arg); break; } else switch (primary) { case 4: dp = unw_decode_p1 (dp, code, arg); break; case 5: dp = unw_decode_p2_p5 (dp, code, arg); break; case 6: dp = unw_decode_p6 (dp, code, arg); break; case 7: dp = unw_decode_p7_p10 (dp, code, arg); break; } return dp; }
/* libunwind - a platform-independent unwind library Copyright (C) 2001-2002 Hewlett-Packard Co Contributed by David Mosberger-Tang <[email protected]> This file is part of libunwind. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ /* * Generic IA-64 unwind info decoder. * * This file is used both by the Linux kernel and objdump. Please keep * the two copies of this file in sync. * * You need to customize the decoder by defining the following * macros/constants before including this file: * * Types: * unw_word Unsigned integer type with at least 64 bits * * Register names: * UNW_REG_BSP * UNW_REG_BSPSTORE * UNW_REG_FPSR * UNW_REG_LC * UNW_REG_PFS * UNW_REG_PR * UNW_REG_RNAT * UNW_REG_PSP * UNW_REG_RP * UNW_REG_UNAT * * Decoder action macros: * UNW_DEC_BAD_CODE(code) * UNW_DEC_ABI(fmt,abi,context,arg) * UNW_DEC_BR_GR(fmt,brmask,gr,arg) * UNW_DEC_BR_MEM(fmt,brmask,arg) * UNW_DEC_COPY_STATE(fmt,label,arg) * UNW_DEC_EPILOGUE(fmt,t,ecount,arg) * UNW_DEC_FRGR_MEM(fmt,grmask,frmask,arg) * UNW_DEC_FR_MEM(fmt,frmask,arg) * UNW_DEC_GR_GR(fmt,grmask,gr,arg) * UNW_DEC_GR_MEM(fmt,grmask,arg) * UNW_DEC_LABEL_STATE(fmt,label,arg) * UNW_DEC_MEM_STACK_F(fmt,t,size,arg) * UNW_DEC_MEM_STACK_V(fmt,t,arg) * UNW_DEC_PRIUNAT_GR(fmt,r,arg) * UNW_DEC_PRIUNAT_WHEN_GR(fmt,t,arg) * UNW_DEC_PRIUNAT_WHEN_MEM(fmt,t,arg) * UNW_DEC_PRIUNAT_WHEN_PSPREL(fmt,pspoff,arg) * UNW_DEC_PRIUNAT_WHEN_SPREL(fmt,spoff,arg) * UNW_DEC_PROLOGUE(fmt,body,rlen,arg) * UNW_DEC_PROLOGUE_GR(fmt,rlen,mask,grsave,arg) * UNW_DEC_REG_PSPREL(fmt,reg,pspoff,arg) * UNW_DEC_REG_REG(fmt,src,dst,arg) * UNW_DEC_REG_SPREL(fmt,reg,spoff,arg) * UNW_DEC_REG_WHEN(fmt,reg,t,arg) * UNW_DEC_RESTORE(fmt,t,abreg,arg) * UNW_DEC_RESTORE_P(fmt,qp,t,abreg,arg) * UNW_DEC_SPILL_BASE(fmt,pspoff,arg) * UNW_DEC_SPILL_MASK(fmt,imaskp,arg) * UNW_DEC_SPILL_PSPREL(fmt,t,abreg,pspoff,arg) * UNW_DEC_SPILL_PSPREL_P(fmt,qp,t,abreg,pspoff,arg) * UNW_DEC_SPILL_REG(fmt,t,abreg,x,ytreg,arg) * UNW_DEC_SPILL_REG_P(fmt,qp,t,abreg,x,ytreg,arg) * UNW_DEC_SPILL_SPREL(fmt,t,abreg,spoff,arg) * UNW_DEC_SPILL_SPREL_P(fmt,qp,t,abreg,pspoff,arg) */ static unw_word unw_decode_uleb128 (unsigned char **dpp) { unsigned shift = 0; unw_word byte, result = 0; unsigned char *bp = *dpp; while (1) { byte = *bp++; result |= (byte & 0x7f) << shift; if ((byte & 0x80) == 0) break; shift += 7; } *dpp = bp; return result; } static unsigned char * unw_decode_x1 (unsigned char *dp, unsigned char code, void *arg) { unsigned char byte1, abreg; unw_word t, off; byte1 = *dp++; t = unw_decode_uleb128 (&dp); off = unw_decode_uleb128 (&dp); abreg = (byte1 & 0x7f); if (byte1 & 0x80) UNW_DEC_SPILL_SPREL(X1, t, abreg, off, arg); else UNW_DEC_SPILL_PSPREL(X1, t, abreg, off, arg); return dp; } static unsigned char * unw_decode_x2 (unsigned char *dp, unsigned char code, void *arg) { unsigned char byte1, byte2, abreg, x, ytreg; unw_word t; byte1 = *dp++; byte2 = *dp++; t = unw_decode_uleb128 (&dp); abreg = (byte1 & 0x7f); ytreg = byte2; x = (byte1 >> 7) & 1; if ((byte1 & 0x80) == 0 && ytreg == 0) UNW_DEC_RESTORE(X2, t, abreg, arg); else UNW_DEC_SPILL_REG(X2, t, abreg, x, ytreg, arg); return dp; } static unsigned char * unw_decode_x3 (unsigned char *dp, unsigned char code, void *arg) { unsigned char byte1, byte2, abreg, qp; unw_word t, off; byte1 = *dp++; byte2 = *dp++; t = unw_decode_uleb128 (&dp); off = unw_decode_uleb128 (&dp); qp = (byte1 & 0x3f); abreg = (byte2 & 0x7f); if (byte1 & 0x80) UNW_DEC_SPILL_SPREL_P(X3, qp, t, abreg, off, arg); else UNW_DEC_SPILL_PSPREL_P(X3, qp, t, abreg, off, arg); return dp; } static unsigned char * unw_decode_x4 (unsigned char *dp, unsigned char code, void *arg) { unsigned char byte1, byte2, byte3, qp, abreg, x, ytreg; unw_word t; byte1 = *dp++; byte2 = *dp++; byte3 = *dp++; t = unw_decode_uleb128 (&dp); qp = (byte1 & 0x3f); abreg = (byte2 & 0x7f); x = (byte2 >> 7) & 1; ytreg = byte3; if ((byte2 & 0x80) == 0 && byte3 == 0) UNW_DEC_RESTORE_P(X4, qp, t, abreg, arg); else UNW_DEC_SPILL_REG_P(X4, qp, t, abreg, x, ytreg, arg); return dp; } static inline unsigned char * unw_decode_r1 (unsigned char *dp, unsigned char code, void *arg) { int body = (code & 0x20) != 0; unw_word rlen; rlen = (code & 0x1f); UNW_DEC_PROLOGUE(R1, body, rlen, arg); return dp; } static inline unsigned char * unw_decode_r2 (unsigned char *dp, unsigned char code, void *arg) { unsigned char byte1, mask, grsave; unw_word rlen; byte1 = *dp++; mask = ((code & 0x7) << 1) | ((byte1 >> 7) & 1); grsave = (byte1 & 0x7f); rlen = unw_decode_uleb128 (&dp); UNW_DEC_PROLOGUE_GR(R2, rlen, mask, grsave, arg); return dp; } static inline unsigned char * unw_decode_r3 (unsigned char *dp, unsigned char code, void *arg) { unw_word rlen; rlen = unw_decode_uleb128 (&dp); UNW_DEC_PROLOGUE(R3, ((code & 0x3) == 1), rlen, arg); return dp; } static inline unsigned char * unw_decode_p1 (unsigned char *dp, unsigned char code, void *arg) { unsigned char brmask = (code & 0x1f); UNW_DEC_BR_MEM(P1, brmask, arg); return dp; } static inline unsigned char * unw_decode_p2_p5 (unsigned char *dp, unsigned char code, void *arg) { if ((code & 0x10) == 0) { unsigned char byte1 = *dp++; UNW_DEC_BR_GR(P2, ((code & 0xf) << 1) | ((byte1 >> 7) & 1), (byte1 & 0x7f), arg); } else if ((code & 0x08) == 0) { unsigned char byte1 = *dp++, r, dst; r = ((code & 0x7) << 1) | ((byte1 >> 7) & 1); dst = (byte1 & 0x7f); switch (r) { case 0: UNW_DEC_REG_GR(P3, UNW_REG_PSP, dst, arg); break; case 1: UNW_DEC_REG_GR(P3, UNW_REG_RP, dst, arg); break; case 2: UNW_DEC_REG_GR(P3, UNW_REG_PFS, dst, arg); break; case 3: UNW_DEC_REG_GR(P3, UNW_REG_PR, dst, arg); break; case 4: UNW_DEC_REG_GR(P3, UNW_REG_UNAT, dst, arg); break; case 5: UNW_DEC_REG_GR(P3, UNW_REG_LC, dst, arg); break; case 6: UNW_DEC_RP_BR(P3, dst, arg); break; case 7: UNW_DEC_REG_GR(P3, UNW_REG_RNAT, dst, arg); break; case 8: UNW_DEC_REG_GR(P3, UNW_REG_BSP, dst, arg); break; case 9: UNW_DEC_REG_GR(P3, UNW_REG_BSPSTORE, dst, arg); break; case 10: UNW_DEC_REG_GR(P3, UNW_REG_FPSR, dst, arg); break; case 11: UNW_DEC_PRIUNAT_GR(P3, dst, arg); break; default: UNW_DEC_BAD_CODE(r); break; } } else if ((code & 0x7) == 0) UNW_DEC_SPILL_MASK(P4, dp, arg); else if ((code & 0x7) == 1) { unw_word grmask, frmask, byte1, byte2, byte3; byte1 = *dp++; byte2 = *dp++; byte3 = *dp++; grmask = ((byte1 >> 4) & 0xf); frmask = ((byte1 & 0xf) << 16) | (byte2 << 8) | byte3; UNW_DEC_FRGR_MEM(P5, grmask, frmask, arg); } else UNW_DEC_BAD_CODE(code); return dp; } static inline unsigned char * unw_decode_p6 (unsigned char *dp, unsigned char code, void *arg) { int gregs = (code & 0x10) != 0; unsigned char mask = (code & 0x0f); if (gregs) UNW_DEC_GR_MEM(P6, mask, arg); else UNW_DEC_FR_MEM(P6, mask, arg); return dp; } static inline unsigned char * unw_decode_p7_p10 (unsigned char *dp, unsigned char code, void *arg) { unsigned char r, byte1, byte2; unw_word t, size; if ((code & 0x10) == 0) { r = (code & 0xf); t = unw_decode_uleb128 (&dp); switch (r) { case 0: size = unw_decode_uleb128 (&dp); UNW_DEC_MEM_STACK_F(P7, t, size, arg); break; case 1: UNW_DEC_MEM_STACK_V(P7, t, arg); break; case 2: UNW_DEC_SPILL_BASE(P7, t, arg); break; case 3: UNW_DEC_REG_SPREL(P7, UNW_REG_PSP, t, arg); break; case 4: UNW_DEC_REG_WHEN(P7, UNW_REG_RP, t, arg); break; case 5: UNW_DEC_REG_PSPREL(P7, UNW_REG_RP, t, arg); break; case 6: UNW_DEC_REG_WHEN(P7, UNW_REG_PFS, t, arg); break; case 7: UNW_DEC_REG_PSPREL(P7, UNW_REG_PFS, t, arg); break; case 8: UNW_DEC_REG_WHEN(P7, UNW_REG_PR, t, arg); break; case 9: UNW_DEC_REG_PSPREL(P7, UNW_REG_PR, t, arg); break; case 10: UNW_DEC_REG_WHEN(P7, UNW_REG_LC, t, arg); break; case 11: UNW_DEC_REG_PSPREL(P7, UNW_REG_LC, t, arg); break; case 12: UNW_DEC_REG_WHEN(P7, UNW_REG_UNAT, t, arg); break; case 13: UNW_DEC_REG_PSPREL(P7, UNW_REG_UNAT, t, arg); break; case 14: UNW_DEC_REG_WHEN(P7, UNW_REG_FPSR, t, arg); break; case 15: UNW_DEC_REG_PSPREL(P7, UNW_REG_FPSR, t, arg); break; default: UNW_DEC_BAD_CODE(r); break; } } else { switch (code & 0xf) { case 0x0: /* p8 */ { r = *dp++; t = unw_decode_uleb128 (&dp); switch (r) { case 1: UNW_DEC_REG_SPREL(P8, UNW_REG_RP, t, arg); break; case 2: UNW_DEC_REG_SPREL(P8, UNW_REG_PFS, t, arg); break; case 3: UNW_DEC_REG_SPREL(P8, UNW_REG_PR, t, arg); break; case 4: UNW_DEC_REG_SPREL(P8, UNW_REG_LC, t, arg); break; case 5: UNW_DEC_REG_SPREL(P8, UNW_REG_UNAT, t, arg); break; case 6: UNW_DEC_REG_SPREL(P8, UNW_REG_FPSR, t, arg); break; case 7: UNW_DEC_REG_WHEN(P8, UNW_REG_BSP, t, arg); break; case 8: UNW_DEC_REG_PSPREL(P8, UNW_REG_BSP, t, arg); break; case 9: UNW_DEC_REG_SPREL(P8, UNW_REG_BSP, t, arg); break; case 10: UNW_DEC_REG_WHEN(P8, UNW_REG_BSPSTORE, t, arg); break; case 11: UNW_DEC_REG_PSPREL(P8, UNW_REG_BSPSTORE, t, arg); break; case 12: UNW_DEC_REG_SPREL(P8, UNW_REG_BSPSTORE, t, arg); break; case 13: UNW_DEC_REG_WHEN(P8, UNW_REG_RNAT, t, arg); break; case 14: UNW_DEC_REG_PSPREL(P8, UNW_REG_RNAT, t, arg); break; case 15: UNW_DEC_REG_SPREL(P8, UNW_REG_RNAT, t, arg); break; case 16: UNW_DEC_PRIUNAT_WHEN_GR(P8, t, arg); break; case 17: UNW_DEC_PRIUNAT_PSPREL(P8, t, arg); break; case 18: UNW_DEC_PRIUNAT_SPREL(P8, t, arg); break; case 19: UNW_DEC_PRIUNAT_WHEN_MEM(P8, t, arg); break; default: UNW_DEC_BAD_CODE(r); break; } } break; case 0x1: byte1 = *dp++; byte2 = *dp++; UNW_DEC_GR_GR(P9, (byte1 & 0xf), (byte2 & 0x7f), arg); break; case 0xf: /* p10 */ byte1 = *dp++; byte2 = *dp++; UNW_DEC_ABI(P10, byte1, byte2, arg); break; case 0x9: return unw_decode_x1 (dp, code, arg); case 0xa: return unw_decode_x2 (dp, code, arg); case 0xb: return unw_decode_x3 (dp, code, arg); case 0xc: return unw_decode_x4 (dp, code, arg); default: UNW_DEC_BAD_CODE(code); break; } } return dp; } static inline unsigned char * unw_decode_b1 (unsigned char *dp, unsigned char code, void *arg) { unw_word label = (code & 0x1f); if ((code & 0x20) != 0) UNW_DEC_COPY_STATE(B1, label, arg); else UNW_DEC_LABEL_STATE(B1, label, arg); return dp; } static inline unsigned char * unw_decode_b2 (unsigned char *dp, unsigned char code, void *arg) { unw_word t; t = unw_decode_uleb128 (&dp); UNW_DEC_EPILOGUE(B2, t, (code & 0x1f), arg); return dp; } static inline unsigned char * unw_decode_b3_x4 (unsigned char *dp, unsigned char code, void *arg) { unw_word t, ecount, label; if ((code & 0x10) == 0) { t = unw_decode_uleb128 (&dp); ecount = unw_decode_uleb128 (&dp); UNW_DEC_EPILOGUE(B3, t, ecount, arg); } else if ((code & 0x07) == 0) { label = unw_decode_uleb128 (&dp); if ((code & 0x08) != 0) UNW_DEC_COPY_STATE(B4, label, arg); else UNW_DEC_LABEL_STATE(B4, label, arg); } else switch (code & 0x7) { case 1: return unw_decode_x1 (dp, code, arg); case 2: return unw_decode_x2 (dp, code, arg); case 3: return unw_decode_x3 (dp, code, arg); case 4: return unw_decode_x4 (dp, code, arg); default: UNW_DEC_BAD_CODE(code); break; } return dp; } typedef unsigned char *(*unw_decoder) (unsigned char *, unsigned char, void *); /* * Decode one descriptor and return address of next descriptor. */ static inline unsigned char * unw_decode (unsigned char *dp, int inside_body, void *arg) { unsigned char code, primary; code = *dp++; primary = code >> 5; if (primary < 2) dp = unw_decode_r1 (dp, code, arg); else if (primary == 2) dp = unw_decode_r2 (dp, code, arg); else if (primary == 3) dp = unw_decode_r3 (dp, code, arg); else if (inside_body) switch (primary) { case 4: case 5: dp = unw_decode_b1 (dp, code, arg); break; case 6: dp = unw_decode_b2 (dp, code, arg); break; case 7: dp = unw_decode_b3_x4 (dp, code, arg); break; } else switch (primary) { case 4: dp = unw_decode_p1 (dp, code, arg); break; case 5: dp = unw_decode_p2_p5 (dp, code, arg); break; case 6: dp = unw_decode_p6 (dp, code, arg); break; case 7: dp = unw_decode_p7_p10 (dp, code, arg); break; } return dp; }
-1
dotnet/runtime
66,282
Enable Fast Tail Call Optimization for ARM32
- Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
clamp03
2022-03-07T05:47:20Z
2022-03-13T11:50:20Z
227ff3d53784f655fa04dad059a98b3e8d291d61
51b90cc60b8528c77829ef18481b0f58db812776
Enable Fast Tail Call Optimization for ARM32. - Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
./src/coreclr/pal/tests/palsuite/file_io/WriteFile/test5/writefile.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*===================================================================== ** ** Source: WriteFile.c (test 5) ** ** Purpose: Tests the PAL implementation of the WriteFile function. ** Performs writing a huge file. ** ** dependency: ** CreateFile. ** GetFileSize. ** FlushFileBuffers ** CloseHandle ** DeleteFile ** ** **===================================================================*/ #include <palsuite.h> BOOL CleanUp_WriteFile_test5(HANDLE hFile, const char * fileName) { BOOL bRc = TRUE; if (CloseHandle(hFile) != TRUE) { bRc = FALSE; Trace("WriteFile: ERROR -> Unable to close file \"%s\"," " error: %ld.\n", fileName, GetLastError()); } if (!DeleteFileA(fileName)) { bRc = FALSE; Trace("WriteFile: ERROR -> Unable to delete file \"%s\"," " error: %ld.\n", fileName, GetLastError()); } return bRc; } PALTEST(file_io_WriteFile_test5_paltest_writefile_test5, "file_io/WriteFile/test5/paltest_writefile_test5") { HANDLE hFile = NULL; DWORD dwBytesWritten; const char* hugeStringTest = "1234567890123456789012345678901234567890"; const char* szWritableFile = "writeable.txt"; int i =0; if (0 != PAL_Initialize(argc,argv)) { return FAIL; } /* create the test file */ hFile = CreateFile(szWritableFile, GENERIC_WRITE, FILE_SHARE_WRITE, NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL); if(hFile == INVALID_HANDLE_VALUE) { Fail("WriteFile: ERROR -> Unable to create file \"%s\".\n", szWritableFile); } /* write 4000 000 chars to the file.*/ for (i=0; i<100000;i++) { if( WriteFile(hFile, /* HANDLE handle to file */ hugeStringTest, /* data buffer */ strlen(hugeStringTest), /* number of bytes to write */ &dwBytesWritten, /* number of bytes written */ NULL) /* overlapped buffer */ ==0) { Trace("WriteFile: ERROR -> Unable to write to file error: %ld \n", GetLastError()); CleanUp_WriteFile_test5(hFile,szWritableFile); Fail(""); } } if(!FlushFileBuffers(hFile)) { Trace("WriteFile: ERROR -> Call to FlushFileBuffers failed" "error %ld \n",GetLastError()); CleanUp_WriteFile_test5(hFile,szWritableFile); Fail(""); } /* test if the size changed properly. */ if(GetFileSize(hFile,NULL) != 4000000) { Trace("WriteFile: ERROR -> file size did not change properly" " after writing 4000 000 chars to it ( size= %u )\n", GetFileSize(hFile,NULL)); CleanUp_WriteFile_test5(hFile,szWritableFile); Fail(""); } if (!CleanUp_WriteFile_test5(hFile,szWritableFile)) { Fail(""); } PAL_Terminate(); return PASS; }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*===================================================================== ** ** Source: WriteFile.c (test 5) ** ** Purpose: Tests the PAL implementation of the WriteFile function. ** Performs writing a huge file. ** ** dependency: ** CreateFile. ** GetFileSize. ** FlushFileBuffers ** CloseHandle ** DeleteFile ** ** **===================================================================*/ #include <palsuite.h> BOOL CleanUp_WriteFile_test5(HANDLE hFile, const char * fileName) { BOOL bRc = TRUE; if (CloseHandle(hFile) != TRUE) { bRc = FALSE; Trace("WriteFile: ERROR -> Unable to close file \"%s\"," " error: %ld.\n", fileName, GetLastError()); } if (!DeleteFileA(fileName)) { bRc = FALSE; Trace("WriteFile: ERROR -> Unable to delete file \"%s\"," " error: %ld.\n", fileName, GetLastError()); } return bRc; } PALTEST(file_io_WriteFile_test5_paltest_writefile_test5, "file_io/WriteFile/test5/paltest_writefile_test5") { HANDLE hFile = NULL; DWORD dwBytesWritten; const char* hugeStringTest = "1234567890123456789012345678901234567890"; const char* szWritableFile = "writeable.txt"; int i =0; if (0 != PAL_Initialize(argc,argv)) { return FAIL; } /* create the test file */ hFile = CreateFile(szWritableFile, GENERIC_WRITE, FILE_SHARE_WRITE, NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL); if(hFile == INVALID_HANDLE_VALUE) { Fail("WriteFile: ERROR -> Unable to create file \"%s\".\n", szWritableFile); } /* write 4000 000 chars to the file.*/ for (i=0; i<100000;i++) { if( WriteFile(hFile, /* HANDLE handle to file */ hugeStringTest, /* data buffer */ strlen(hugeStringTest), /* number of bytes to write */ &dwBytesWritten, /* number of bytes written */ NULL) /* overlapped buffer */ ==0) { Trace("WriteFile: ERROR -> Unable to write to file error: %ld \n", GetLastError()); CleanUp_WriteFile_test5(hFile,szWritableFile); Fail(""); } } if(!FlushFileBuffers(hFile)) { Trace("WriteFile: ERROR -> Call to FlushFileBuffers failed" "error %ld \n",GetLastError()); CleanUp_WriteFile_test5(hFile,szWritableFile); Fail(""); } /* test if the size changed properly. */ if(GetFileSize(hFile,NULL) != 4000000) { Trace("WriteFile: ERROR -> file size did not change properly" " after writing 4000 000 chars to it ( size= %u )\n", GetFileSize(hFile,NULL)); CleanUp_WriteFile_test5(hFile,szWritableFile); Fail(""); } if (!CleanUp_WriteFile_test5(hFile,szWritableFile)) { Fail(""); } PAL_Terminate(); return PASS; }
-1
dotnet/runtime
66,282
Enable Fast Tail Call Optimization for ARM32
- Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
clamp03
2022-03-07T05:47:20Z
2022-03-13T11:50:20Z
227ff3d53784f655fa04dad059a98b3e8d291d61
51b90cc60b8528c77829ef18481b0f58db812776
Enable Fast Tail Call Optimization for ARM32. - Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
./src/coreclr/md/enc/stdafx.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. //***************************************************************************** // stdafx.h // // // Precompiled headers. // //***************************************************************************** #ifndef __STDAFX_H_ #define __STDAFX_H_ #include <crtwrap.h> #include <winwrap.h> #include <utilcode.h> #include <cor.h> #include <corpriv.h> #include <metamodelro.h> #include <liteweightstgdb.h> #include "mdcommon.h" #include "utsem.h" #endif // __STDAFX_H__
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. //***************************************************************************** // stdafx.h // // // Precompiled headers. // //***************************************************************************** #ifndef __STDAFX_H_ #define __STDAFX_H_ #include <crtwrap.h> #include <winwrap.h> #include <utilcode.h> #include <cor.h> #include <corpriv.h> #include <metamodelro.h> #include <liteweightstgdb.h> #include "mdcommon.h" #include "utsem.h" #endif // __STDAFX_H__
-1
dotnet/runtime
66,282
Enable Fast Tail Call Optimization for ARM32
- Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
clamp03
2022-03-07T05:47:20Z
2022-03-13T11:50:20Z
227ff3d53784f655fa04dad059a98b3e8d291d61
51b90cc60b8528c77829ef18481b0f58db812776
Enable Fast Tail Call Optimization for ARM32. - Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
./src/coreclr/vm/arm64/excepcpu.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // // #ifndef __excepcpu_h__ #define __excepcpu_h__ #define THROW_CONTROL_FOR_THREAD_FUNCTION RedirectForThreadAbort EXTERN_C void RedirectForThreadAbort(); #define STATUS_CLR_GCCOVER_CODE STATUS_ILLEGAL_INSTRUCTION class Thread; class FaultingExceptionFrame; #define INSTALL_EXCEPTION_HANDLING_RECORD(record) #define UNINSTALL_EXCEPTION_HANDLING_RECORD(record) // // On ARM, the COMPlusFrameHandler's work is done by our personality routine. // #define DECLARE_CPFH_EH_RECORD(pCurThread) // // Retrieves the redirected CONTEXT* from the stack frame of one of the // RedirectedHandledJITCaseForXXX_Stub's. // PTR_CONTEXT GetCONTEXTFromRedirectedStubStackFrame(T_DISPATCHER_CONTEXT * pDispatcherContext); PTR_CONTEXT GetCONTEXTFromRedirectedStubStackFrame(T_CONTEXT * pContext); // // Retrieves the FaultingExceptionFrame* from the stack frame of // RedirectForThrowControl or NakedThrowHelper. // FaultingExceptionFrame *GetFrameFromRedirectedStubStackFrame (T_DISPATCHER_CONTEXT *pDispatcherContext); inline PCODE GetAdjustedCallAddress(PCODE returnAddress) { LIMITED_METHOD_CONTRACT; return returnAddress - 4; } BOOL AdjustContextForVirtualStub(EXCEPTION_RECORD *pExceptionRecord, T_CONTEXT *pContext); #endif // __excepcpu_h__
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // // #ifndef __excepcpu_h__ #define __excepcpu_h__ #define THROW_CONTROL_FOR_THREAD_FUNCTION RedirectForThreadAbort EXTERN_C void RedirectForThreadAbort(); #define STATUS_CLR_GCCOVER_CODE STATUS_ILLEGAL_INSTRUCTION class Thread; class FaultingExceptionFrame; #define INSTALL_EXCEPTION_HANDLING_RECORD(record) #define UNINSTALL_EXCEPTION_HANDLING_RECORD(record) // // On ARM, the COMPlusFrameHandler's work is done by our personality routine. // #define DECLARE_CPFH_EH_RECORD(pCurThread) // // Retrieves the redirected CONTEXT* from the stack frame of one of the // RedirectedHandledJITCaseForXXX_Stub's. // PTR_CONTEXT GetCONTEXTFromRedirectedStubStackFrame(T_DISPATCHER_CONTEXT * pDispatcherContext); PTR_CONTEXT GetCONTEXTFromRedirectedStubStackFrame(T_CONTEXT * pContext); // // Retrieves the FaultingExceptionFrame* from the stack frame of // RedirectForThrowControl or NakedThrowHelper. // FaultingExceptionFrame *GetFrameFromRedirectedStubStackFrame (T_DISPATCHER_CONTEXT *pDispatcherContext); inline PCODE GetAdjustedCallAddress(PCODE returnAddress) { LIMITED_METHOD_CONTRACT; return returnAddress - 4; } BOOL AdjustContextForVirtualStub(EXCEPTION_RECORD *pExceptionRecord, T_CONTEXT *pContext); #endif // __excepcpu_h__
-1
dotnet/runtime
66,282
Enable Fast Tail Call Optimization for ARM32
- Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
clamp03
2022-03-07T05:47:20Z
2022-03-13T11:50:20Z
227ff3d53784f655fa04dad059a98b3e8d291d61
51b90cc60b8528c77829ef18481b0f58db812776
Enable Fast Tail Call Optimization for ARM32. - Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
./src/coreclr/pal/tests/palsuite/file_io/GetFullPathNameW/test4/test4.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*===================================================================== ** ** Source: test4.c ** ** Purpose: Tests the PAL implementation of the GetFullPathNameW API. ** GetFullPathNameW will be passed a directory that begins with '..'. ** Example: ..\test_directory\testing.tmp. ** To add to this test, we will also call SetCurrentDirectory to ** ensure this is handled properly. ** The test will create a file with in the parent directory ** to verify that the returned directory is valid. ** ** Depends: SetCurrentDirectory, ** CreateDirectory, ** strcat, ** memset, ** CreateFile, ** CloseHandle, ** strcmp, ** DeleteFileW, ** RemoveDirectory. ** ** **===================================================================*/ #define UNICODE #include <palsuite.h> PALTEST(file_io_GetFullPathNameW_test4_paltest_getfullpathnamew_test4, "file_io/GetFullPathNameW/test4/paltest_getfullpathnamew_test4") { #ifdef WIN32 const WCHAR szSeperator[] = {'\\','\\','\0'}; #else const WCHAR szSeperator[] = {'/','/','\0'}; #endif const WCHAR szDotDot[] = {'.','.','\0'}; const WCHAR szFileName[] = {'t','e','s','t','i','n','g','.','t','m','p','\0'}; DWORD dwRc = 0; WCHAR szReturnedPath[_MAX_DIR+1]; WCHAR szFullFileName[_MAX_DIR+1]; WCHAR szDirectory[256]; WCHAR szCreatedDir[] = {'t','e','s','t','_','d','i','r','\0'}; LPWSTR pPathPtr; HANDLE hFile = NULL; BOOL bRetVal = FAIL; /* Initialize the PAL. */ if (0 != PAL_Initialize(argc,argv)) { return (FAIL); } /* Initialize the buffer. */ memset(szDirectory, '\0', 256 * sizeof(szDirectory[0])); /* Create the path to the next level of directory to create. */ wcscat(szDirectory, szDotDot); /* .. */ wcscat(szDirectory, szSeperator); /* ../ */ wcscat(szDirectory, szCreatedDir); /* ../test_directory */ /* Create a test directory. */ if (!CreateDirectoryW(szDirectory, NULL)) { Fail("ERROR:%u: Unable to create directories \"%S\".\n", GetLastError(), szDirectory); } /* Initialize the receiving char buffers. */ memset(szReturnedPath, 0, sizeof(szFullFileName)); memset(szFullFileName, 0, sizeof(szFullFileName)); /* Create Full filename to pass, will include '..\' * in the middle of the path. */ wcscat( szFullFileName, szDotDot ); /* .. */ wcscat( szFullFileName, szSeperator ); /* ../ */ wcscat( szFullFileName, szCreatedDir ); /* ../test_directory */ wcscat( szFullFileName, szSeperator ); /* ../test_directory/ */ wcscat( szFullFileName, szFileName ); /* ../test_directory/testing.tmp */ /* Get the full path to the filename. */ dwRc = GetFullPathNameW(szFullFileName, _MAX_DIR, szReturnedPath, &pPathPtr); if (dwRc == 0) { Trace("ERROR :%ld: GetFullPathName failed to " "retrieve the path of \"%S\".\n", GetLastError(), szFileName); bRetVal = FAIL; goto cleanUpOne; } /* The returned value should be the parent directory with the * file name appended. */ hFile = CreateFileW(szReturnedPath, GENERIC_READ, FILE_SHARE_READ, NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL); if (hFile == INVALID_HANDLE_VALUE) { Trace("ERROR :%ld: CreateFileA failed to create \"%S\".\n", GetLastError(), szReturnedPath); bRetVal = FAIL; goto cleanUpOne; } /* Close the handle to the created file. */ if (CloseHandle(hFile) != TRUE) { Trace("ERROR :%ld: CloseHandle failed close hFile=0x%lx.\n", GetLastError()); bRetVal = FAIL; goto cleanUpTwo; } /* Verify that the file was created, attempt to create * the file again. */ hFile = CreateFileW(szReturnedPath, GENERIC_READ, FILE_SHARE_READ, NULL, CREATE_NEW, FILE_ATTRIBUTE_NORMAL, NULL); if ((hFile != INVALID_HANDLE_VALUE) && (GetLastError() != ERROR_ALREADY_EXISTS)) { Trace("ERROR :%ld: CreateFileA succeeded to create file " "\"%S\", that already existed.\n", GetLastError(), szFullFileName); bRetVal = FAIL; goto cleanUpTwo; } /* Verify that the returned filename is the same as the supplied. */ if (wcscmp(pPathPtr, szFileName) != 0) { Trace("ERROR : Returned filename \"%S\" is not equal to " "supplied filename \"%S\".\n", pPathPtr, szFileName); bRetVal = FAIL; goto cleanUpTwo; } /* Successful test. */ bRetVal = PASS; cleanUpTwo: /* Delete the create file. */ if (DeleteFileW(szReturnedPath) != TRUE) { Fail("ERROR :%ld: DeleteFileA failed to delete \"%S\".\n", GetLastError(), szFileName); } cleanUpOne: /* Remove the empty directory. */ if (!RemoveDirectoryW(szDirectory)) { Fail("ERROR:%u: Unable to remove directory \"%s\".\n", GetLastError(), szCreatedDir); } /* Terminate the PAL.*/ PAL_TerminateEx(bRetVal); return bRetVal; }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*===================================================================== ** ** Source: test4.c ** ** Purpose: Tests the PAL implementation of the GetFullPathNameW API. ** GetFullPathNameW will be passed a directory that begins with '..'. ** Example: ..\test_directory\testing.tmp. ** To add to this test, we will also call SetCurrentDirectory to ** ensure this is handled properly. ** The test will create a file with in the parent directory ** to verify that the returned directory is valid. ** ** Depends: SetCurrentDirectory, ** CreateDirectory, ** strcat, ** memset, ** CreateFile, ** CloseHandle, ** strcmp, ** DeleteFileW, ** RemoveDirectory. ** ** **===================================================================*/ #define UNICODE #include <palsuite.h> PALTEST(file_io_GetFullPathNameW_test4_paltest_getfullpathnamew_test4, "file_io/GetFullPathNameW/test4/paltest_getfullpathnamew_test4") { #ifdef WIN32 const WCHAR szSeperator[] = {'\\','\\','\0'}; #else const WCHAR szSeperator[] = {'/','/','\0'}; #endif const WCHAR szDotDot[] = {'.','.','\0'}; const WCHAR szFileName[] = {'t','e','s','t','i','n','g','.','t','m','p','\0'}; DWORD dwRc = 0; WCHAR szReturnedPath[_MAX_DIR+1]; WCHAR szFullFileName[_MAX_DIR+1]; WCHAR szDirectory[256]; WCHAR szCreatedDir[] = {'t','e','s','t','_','d','i','r','\0'}; LPWSTR pPathPtr; HANDLE hFile = NULL; BOOL bRetVal = FAIL; /* Initialize the PAL. */ if (0 != PAL_Initialize(argc,argv)) { return (FAIL); } /* Initialize the buffer. */ memset(szDirectory, '\0', 256 * sizeof(szDirectory[0])); /* Create the path to the next level of directory to create. */ wcscat(szDirectory, szDotDot); /* .. */ wcscat(szDirectory, szSeperator); /* ../ */ wcscat(szDirectory, szCreatedDir); /* ../test_directory */ /* Create a test directory. */ if (!CreateDirectoryW(szDirectory, NULL)) { Fail("ERROR:%u: Unable to create directories \"%S\".\n", GetLastError(), szDirectory); } /* Initialize the receiving char buffers. */ memset(szReturnedPath, 0, sizeof(szFullFileName)); memset(szFullFileName, 0, sizeof(szFullFileName)); /* Create Full filename to pass, will include '..\' * in the middle of the path. */ wcscat( szFullFileName, szDotDot ); /* .. */ wcscat( szFullFileName, szSeperator ); /* ../ */ wcscat( szFullFileName, szCreatedDir ); /* ../test_directory */ wcscat( szFullFileName, szSeperator ); /* ../test_directory/ */ wcscat( szFullFileName, szFileName ); /* ../test_directory/testing.tmp */ /* Get the full path to the filename. */ dwRc = GetFullPathNameW(szFullFileName, _MAX_DIR, szReturnedPath, &pPathPtr); if (dwRc == 0) { Trace("ERROR :%ld: GetFullPathName failed to " "retrieve the path of \"%S\".\n", GetLastError(), szFileName); bRetVal = FAIL; goto cleanUpOne; } /* The returned value should be the parent directory with the * file name appended. */ hFile = CreateFileW(szReturnedPath, GENERIC_READ, FILE_SHARE_READ, NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL); if (hFile == INVALID_HANDLE_VALUE) { Trace("ERROR :%ld: CreateFileA failed to create \"%S\".\n", GetLastError(), szReturnedPath); bRetVal = FAIL; goto cleanUpOne; } /* Close the handle to the created file. */ if (CloseHandle(hFile) != TRUE) { Trace("ERROR :%ld: CloseHandle failed close hFile=0x%lx.\n", GetLastError()); bRetVal = FAIL; goto cleanUpTwo; } /* Verify that the file was created, attempt to create * the file again. */ hFile = CreateFileW(szReturnedPath, GENERIC_READ, FILE_SHARE_READ, NULL, CREATE_NEW, FILE_ATTRIBUTE_NORMAL, NULL); if ((hFile != INVALID_HANDLE_VALUE) && (GetLastError() != ERROR_ALREADY_EXISTS)) { Trace("ERROR :%ld: CreateFileA succeeded to create file " "\"%S\", that already existed.\n", GetLastError(), szFullFileName); bRetVal = FAIL; goto cleanUpTwo; } /* Verify that the returned filename is the same as the supplied. */ if (wcscmp(pPathPtr, szFileName) != 0) { Trace("ERROR : Returned filename \"%S\" is not equal to " "supplied filename \"%S\".\n", pPathPtr, szFileName); bRetVal = FAIL; goto cleanUpTwo; } /* Successful test. */ bRetVal = PASS; cleanUpTwo: /* Delete the create file. */ if (DeleteFileW(szReturnedPath) != TRUE) { Fail("ERROR :%ld: DeleteFileA failed to delete \"%S\".\n", GetLastError(), szFileName); } cleanUpOne: /* Remove the empty directory. */ if (!RemoveDirectoryW(szDirectory)) { Fail("ERROR:%u: Unable to remove directory \"%s\".\n", GetLastError(), szCreatedDir); } /* Terminate the PAL.*/ PAL_TerminateEx(bRetVal); return bRetVal; }
-1
dotnet/runtime
66,282
Enable Fast Tail Call Optimization for ARM32
- Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
clamp03
2022-03-07T05:47:20Z
2022-03-13T11:50:20Z
227ff3d53784f655fa04dad059a98b3e8d291d61
51b90cc60b8528c77829ef18481b0f58db812776
Enable Fast Tail Call Optimization for ARM32. - Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
./src/native/external/brotli/enc/compress_fragment_two_pass.h
/* Copyright 2015 Google Inc. All Rights Reserved. Distributed under MIT license. See file LICENSE for detail or copy at https://opensource.org/licenses/MIT */ /* Function for fast encoding of an input fragment, independently from the input history. This function uses two-pass processing: in the first pass we save the found backward matches and literal bytes into a buffer, and in the second pass we emit them into the bit stream using prefix codes built based on the actual command and literal byte histograms. */ #ifndef BROTLI_ENC_COMPRESS_FRAGMENT_TWO_PASS_H_ #define BROTLI_ENC_COMPRESS_FRAGMENT_TWO_PASS_H_ #include "../common/platform.h" #include <brotli/types.h> #include "./memory.h" #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif static const size_t kCompressFragmentTwoPassBlockSize = 1 << 17; /* Compresses "input" string to the "*storage" buffer as one or more complete meta-blocks, and updates the "*storage_ix" bit position. If "is_last" is 1, emits an additional empty last meta-block. REQUIRES: "input_size" is greater than zero, or "is_last" is 1. REQUIRES: "input_size" is less or equal to maximal metablock size (1 << 24). REQUIRES: "command_buf" and "literal_buf" point to at least kCompressFragmentTwoPassBlockSize long arrays. REQUIRES: All elements in "table[0..table_size-1]" are initialized to zero. REQUIRES: "table_size" is a power of two OUTPUT: maximal copy distance <= |input_size| OUTPUT: maximal copy distance <= BROTLI_MAX_BACKWARD_LIMIT(18) */ BROTLI_INTERNAL void BrotliCompressFragmentTwoPass(MemoryManager* m, const uint8_t* input, size_t input_size, BROTLI_BOOL is_last, uint32_t* command_buf, uint8_t* literal_buf, int* table, size_t table_size, size_t* storage_ix, uint8_t* storage); #if defined(__cplusplus) || defined(c_plusplus) } /* extern "C" */ #endif #endif /* BROTLI_ENC_COMPRESS_FRAGMENT_TWO_PASS_H_ */
/* Copyright 2015 Google Inc. All Rights Reserved. Distributed under MIT license. See file LICENSE for detail or copy at https://opensource.org/licenses/MIT */ /* Function for fast encoding of an input fragment, independently from the input history. This function uses two-pass processing: in the first pass we save the found backward matches and literal bytes into a buffer, and in the second pass we emit them into the bit stream using prefix codes built based on the actual command and literal byte histograms. */ #ifndef BROTLI_ENC_COMPRESS_FRAGMENT_TWO_PASS_H_ #define BROTLI_ENC_COMPRESS_FRAGMENT_TWO_PASS_H_ #include "../common/platform.h" #include <brotli/types.h> #include "./memory.h" #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif static const size_t kCompressFragmentTwoPassBlockSize = 1 << 17; /* Compresses "input" string to the "*storage" buffer as one or more complete meta-blocks, and updates the "*storage_ix" bit position. If "is_last" is 1, emits an additional empty last meta-block. REQUIRES: "input_size" is greater than zero, or "is_last" is 1. REQUIRES: "input_size" is less or equal to maximal metablock size (1 << 24). REQUIRES: "command_buf" and "literal_buf" point to at least kCompressFragmentTwoPassBlockSize long arrays. REQUIRES: All elements in "table[0..table_size-1]" are initialized to zero. REQUIRES: "table_size" is a power of two OUTPUT: maximal copy distance <= |input_size| OUTPUT: maximal copy distance <= BROTLI_MAX_BACKWARD_LIMIT(18) */ BROTLI_INTERNAL void BrotliCompressFragmentTwoPass(MemoryManager* m, const uint8_t* input, size_t input_size, BROTLI_BOOL is_last, uint32_t* command_buf, uint8_t* literal_buf, int* table, size_t table_size, size_t* storage_ix, uint8_t* storage); #if defined(__cplusplus) || defined(c_plusplus) } /* extern "C" */ #endif #endif /* BROTLI_ENC_COMPRESS_FRAGMENT_TWO_PASS_H_ */
-1
dotnet/runtime
66,282
Enable Fast Tail Call Optimization for ARM32
- Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
clamp03
2022-03-07T05:47:20Z
2022-03-13T11:50:20Z
227ff3d53784f655fa04dad059a98b3e8d291d61
51b90cc60b8528c77829ef18481b0f58db812776
Enable Fast Tail Call Optimization for ARM32. - Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
./src/mono/mono/sgen/sgen-minor-copy-object.h
/** * \file * Copy functions for nursery collections. * * Copyright 2001-2003 Ximian, Inc * Copyright 2003-2010 Novell, Inc. * Copyright (C) 2012 Xamarin Inc * * Licensed under the MIT license. See LICENSE file in the project root for full license information. */ #undef SERIAL_COPY_OBJECT #undef SERIAL_COPY_OBJECT_FROM_OBJ #if defined(SGEN_SIMPLE_NURSERY) #ifdef SGEN_SIMPLE_PAR_NURSERY #ifdef SGEN_CONCURRENT_MAJOR #define SERIAL_COPY_OBJECT simple_par_nursery_with_concurrent_major_copy_object #define SERIAL_COPY_OBJECT_FROM_OBJ simple_par_nursery_with_concurrent_major_copy_object_from_obj #else #define SERIAL_COPY_OBJECT simple_par_nursery_copy_object #define SERIAL_COPY_OBJECT_FROM_OBJ simple_par_nursery_copy_object_from_obj #endif #else #ifdef SGEN_CONCURRENT_MAJOR #define SERIAL_COPY_OBJECT simple_nursery_serial_with_concurrent_major_copy_object #define SERIAL_COPY_OBJECT_FROM_OBJ simple_nursery_serial_with_concurrent_major_copy_object_from_obj #else #define SERIAL_COPY_OBJECT simple_nursery_serial_copy_object #define SERIAL_COPY_OBJECT_FROM_OBJ simple_nursery_serial_copy_object_from_obj #endif #endif #elif defined (SGEN_SPLIT_NURSERY) #ifdef SGEN_CONCURRENT_MAJOR #define SERIAL_COPY_OBJECT split_nursery_serial_with_concurrent_major_copy_object #define SERIAL_COPY_OBJECT_FROM_OBJ split_nursery_serial_with_concurrent_major_copy_object_from_obj #else #define SERIAL_COPY_OBJECT split_nursery_serial_copy_object #define SERIAL_COPY_OBJECT_FROM_OBJ split_nursery_serial_copy_object_from_obj #endif #else #error "No nursery configuration specified" #endif extern guint64 stat_nursery_copy_object_failed_to_space; /* from sgen-gc.c */ /* * This is how the copying happens from the nursery to the old generation. * We assume that at this time all the pinned objects have been identified and * marked as such. * We run scan_object() for each pinned object so that each referenced * objects if possible are copied. The new gray objects created can have * scan_object() run on them right away, too. * Then we run copy_object() for the precisely tracked roots. At this point * all the roots are either gray or black. We run scan_object() on the gray * objects until no more gray objects are created. * At the end of the process we walk again the pinned list and we unmark * the pinned flag. As we go we also create the list of free space for use * in the next allocation runs. * * We need to remember objects from the old generation that point to the new one * (or just addresses?). * * copy_object could be made into a macro once debugged (use inline for now). */ static MONO_ALWAYS_INLINE void SERIAL_COPY_OBJECT (GCObject **obj_slot, SgenGrayQueue *queue) { GCObject *forwarded; GCObject *copy; GCObject *obj = *obj_slot; SGEN_ASSERT (9, sgen_current_collection_generation == GENERATION_NURSERY, "calling minor-serial-copy from a %d generation collection", sgen_current_collection_generation); HEAVY_STAT (++stat_copy_object_called_nursery); if (!sgen_ptr_in_nursery (obj)) { HEAVY_STAT (++stat_nursery_copy_object_failed_from_space); return; } SGEN_LOG (9, "Precise copy of %p from %p", obj, obj_slot); /* * Before we can copy the object we must make sure that we are * allowed to, i.e. that the object not pinned, not already * forwarded or belongs to the nursery To Space. */ if ((forwarded = SGEN_OBJECT_IS_FORWARDED (obj))) { SGEN_ASSERT (9, sgen_obj_get_descriptor (forwarded), "forwarded object %p has no gc descriptor", forwarded); SGEN_LOG (9, " (already forwarded to %p)", forwarded); HEAVY_STAT (++stat_nursery_copy_object_failed_forwarded); SGEN_UPDATE_REFERENCE (obj_slot, forwarded); return; } if (G_UNLIKELY (SGEN_OBJECT_IS_PINNED (obj))) { SGEN_ASSERT (9, sgen_vtable_get_descriptor (SGEN_LOAD_VTABLE(obj)), "pinned object %p has no gc descriptor", obj); SGEN_LOG (9, " (pinned, no change)"); HEAVY_STAT (++stat_nursery_copy_object_failed_pinned); return; } #ifndef SGEN_SIMPLE_NURSERY if (sgen_nursery_is_to_space (obj)) { SGEN_ASSERT (9, sgen_vtable_get_descriptor (SGEN_LOAD_VTABLE(obj)), "to space object %p has no gc descriptor", obj); SGEN_LOG (9, " (tospace, no change)"); HEAVY_STAT (++stat_nursery_copy_object_failed_to_space); return; } #endif HEAVY_STAT (++stat_objects_copied_nursery); #ifdef SGEN_SIMPLE_PAR_NURSERY copy = copy_object_no_checks_par (obj, queue); #else copy = copy_object_no_checks (obj, queue); #endif SGEN_UPDATE_REFERENCE (obj_slot, copy); } /* * SERIAL_COPY_OBJECT_FROM_OBJ: * * Similar to SERIAL_COPY_OBJECT, but assumes that OBJ_SLOT is part of an object, so it handles global remsets as well. */ static MONO_ALWAYS_INLINE void SERIAL_COPY_OBJECT_FROM_OBJ (GCObject **obj_slot, SgenGrayQueue *queue) { GCObject *forwarded; GCObject *obj = *obj_slot; GCObject *copy; SGEN_ASSERT (9, sgen_current_collection_generation == GENERATION_NURSERY, "calling minor-serial-copy-from-obj from a %d generation collection", sgen_current_collection_generation); HEAVY_STAT (++stat_copy_object_called_nursery); if (!sgen_ptr_in_nursery (obj)) { HEAVY_STAT (++stat_nursery_copy_object_failed_from_space); return; } SGEN_LOG (9, "Precise copy of %p from %p", obj, obj_slot); /* * Before we can copy the object we must make sure that we are * allowed to, i.e. that the object not pinned, not already * forwarded or belongs to the nursery To Space. */ if ((forwarded = SGEN_OBJECT_IS_FORWARDED (obj))) { SGEN_ASSERT (9, sgen_obj_get_descriptor (forwarded), "forwarded object %p has no gc descriptor", forwarded); SGEN_LOG (9, " (already forwarded to %p)", forwarded); HEAVY_STAT (++stat_nursery_copy_object_failed_forwarded); #ifdef SGEN_CONCURRENT_MAJOR /* See comment on STORE_STORE_FENCE below. */ STORE_STORE_FENCE; #endif SGEN_UPDATE_REFERENCE (obj_slot, forwarded); #ifndef SGEN_SIMPLE_NURSERY if (G_UNLIKELY (sgen_ptr_in_nursery (forwarded) && !sgen_ptr_in_nursery (obj_slot) && !SGEN_OBJECT_IS_CEMENTED (forwarded))) sgen_add_to_global_remset (obj_slot, forwarded); #endif return; } if (G_UNLIKELY (SGEN_OBJECT_IS_PINNED (obj))) { SGEN_ASSERT (9, sgen_vtable_get_descriptor (SGEN_LOAD_VTABLE(obj)), "pinned object %p has no gc descriptor", obj); SGEN_LOG (9, " (pinned, no change)"); HEAVY_STAT (++stat_nursery_copy_object_failed_pinned); if (!sgen_ptr_in_nursery (obj_slot) && !SGEN_OBJECT_IS_CEMENTED (obj)) sgen_add_to_global_remset (obj_slot, obj); return; } #ifndef SGEN_SIMPLE_NURSERY if (sgen_nursery_is_to_space (obj)) { /* FIXME: all of these could just use `sgen_obj_get_descriptor_safe()` */ SGEN_ASSERT (9, sgen_vtable_get_descriptor (SGEN_LOAD_VTABLE(obj)), "to space object %p has no gc descriptor", obj); SGEN_LOG (9, " (tospace, no change)"); HEAVY_STAT (++stat_nursery_copy_object_failed_to_space); /* * FIXME: * * The card table scanning code sometimes clears cards * that have just been set for a global remset. In * the split nursery the following situation can * occur: * * Let's say object A starts in card C but continues * into C+1. Within A, at offset O there's a * reference to a new nursery object X. A+O is in * card C+1. Now card C is scanned, and as part of * it, object A. The reference at A+O is processed by * copying X into nursery to-space at Y. Since it's * still in the nursery, a global remset must be added * for A+O, so card C+1 is marked. Now, however, card * C+1 is scanned, which means that it's cleared * first. This wouldn't be terribly bad if reference * A+O were re-scanned and the global remset re-added, * but since the reference points to to-space, that * doesn't happen, and C+1 remains cleared: the remset * is lost. * * There's at least two ways to fix this. The easy * one is to re-add the remset on the re-scan. This * is that - the following two lines of code. * * The proper solution appears to be to first make a * copy of the cards before scanning a block, then to * clear all the cards and scan from the copy, so no * remsets will be overwritten. Scanning objects at * most once would be the icing on the cake. */ if (!sgen_ptr_in_nursery (obj_slot) && !SGEN_OBJECT_IS_CEMENTED (obj)) sgen_add_to_global_remset (obj_slot, obj); return; } #endif HEAVY_STAT (++stat_objects_copied_nursery); #ifdef SGEN_SIMPLE_PAR_NURSERY copy = copy_object_no_checks_par (obj, queue); #else copy = copy_object_no_checks (obj, queue); #endif #ifdef SGEN_CONCURRENT_MAJOR /* * If an object is evacuated to the major heap and a reference to it, from the major * heap, updated, the concurrent major collector might follow that reference and * scan the new major object. To make sure the object contents are seen by the * major collector we need this write barrier, so that the reference is seen after * the object. */ STORE_STORE_FENCE; #endif SGEN_UPDATE_REFERENCE (obj_slot, copy); #ifndef SGEN_SIMPLE_NURSERY if (G_UNLIKELY (sgen_ptr_in_nursery (copy) && !sgen_ptr_in_nursery (obj_slot) && !SGEN_OBJECT_IS_CEMENTED (copy))) sgen_add_to_global_remset (obj_slot, copy); #else /* copy_object_no_checks () can return obj on OOM */ if (G_UNLIKELY (obj == copy)) { if (G_UNLIKELY (sgen_ptr_in_nursery (copy) && !sgen_ptr_in_nursery (obj_slot) && !SGEN_OBJECT_IS_CEMENTED (copy))) sgen_add_to_global_remset (obj_slot, copy); } #endif }
/** * \file * Copy functions for nursery collections. * * Copyright 2001-2003 Ximian, Inc * Copyright 2003-2010 Novell, Inc. * Copyright (C) 2012 Xamarin Inc * * Licensed under the MIT license. See LICENSE file in the project root for full license information. */ #undef SERIAL_COPY_OBJECT #undef SERIAL_COPY_OBJECT_FROM_OBJ #if defined(SGEN_SIMPLE_NURSERY) #ifdef SGEN_SIMPLE_PAR_NURSERY #ifdef SGEN_CONCURRENT_MAJOR #define SERIAL_COPY_OBJECT simple_par_nursery_with_concurrent_major_copy_object #define SERIAL_COPY_OBJECT_FROM_OBJ simple_par_nursery_with_concurrent_major_copy_object_from_obj #else #define SERIAL_COPY_OBJECT simple_par_nursery_copy_object #define SERIAL_COPY_OBJECT_FROM_OBJ simple_par_nursery_copy_object_from_obj #endif #else #ifdef SGEN_CONCURRENT_MAJOR #define SERIAL_COPY_OBJECT simple_nursery_serial_with_concurrent_major_copy_object #define SERIAL_COPY_OBJECT_FROM_OBJ simple_nursery_serial_with_concurrent_major_copy_object_from_obj #else #define SERIAL_COPY_OBJECT simple_nursery_serial_copy_object #define SERIAL_COPY_OBJECT_FROM_OBJ simple_nursery_serial_copy_object_from_obj #endif #endif #elif defined (SGEN_SPLIT_NURSERY) #ifdef SGEN_CONCURRENT_MAJOR #define SERIAL_COPY_OBJECT split_nursery_serial_with_concurrent_major_copy_object #define SERIAL_COPY_OBJECT_FROM_OBJ split_nursery_serial_with_concurrent_major_copy_object_from_obj #else #define SERIAL_COPY_OBJECT split_nursery_serial_copy_object #define SERIAL_COPY_OBJECT_FROM_OBJ split_nursery_serial_copy_object_from_obj #endif #else #error "No nursery configuration specified" #endif extern guint64 stat_nursery_copy_object_failed_to_space; /* from sgen-gc.c */ /* * This is how the copying happens from the nursery to the old generation. * We assume that at this time all the pinned objects have been identified and * marked as such. * We run scan_object() for each pinned object so that each referenced * objects if possible are copied. The new gray objects created can have * scan_object() run on them right away, too. * Then we run copy_object() for the precisely tracked roots. At this point * all the roots are either gray or black. We run scan_object() on the gray * objects until no more gray objects are created. * At the end of the process we walk again the pinned list and we unmark * the pinned flag. As we go we also create the list of free space for use * in the next allocation runs. * * We need to remember objects from the old generation that point to the new one * (or just addresses?). * * copy_object could be made into a macro once debugged (use inline for now). */ static MONO_ALWAYS_INLINE void SERIAL_COPY_OBJECT (GCObject **obj_slot, SgenGrayQueue *queue) { GCObject *forwarded; GCObject *copy; GCObject *obj = *obj_slot; SGEN_ASSERT (9, sgen_current_collection_generation == GENERATION_NURSERY, "calling minor-serial-copy from a %d generation collection", sgen_current_collection_generation); HEAVY_STAT (++stat_copy_object_called_nursery); if (!sgen_ptr_in_nursery (obj)) { HEAVY_STAT (++stat_nursery_copy_object_failed_from_space); return; } SGEN_LOG (9, "Precise copy of %p from %p", obj, obj_slot); /* * Before we can copy the object we must make sure that we are * allowed to, i.e. that the object not pinned, not already * forwarded or belongs to the nursery To Space. */ if ((forwarded = SGEN_OBJECT_IS_FORWARDED (obj))) { SGEN_ASSERT (9, sgen_obj_get_descriptor (forwarded), "forwarded object %p has no gc descriptor", forwarded); SGEN_LOG (9, " (already forwarded to %p)", forwarded); HEAVY_STAT (++stat_nursery_copy_object_failed_forwarded); SGEN_UPDATE_REFERENCE (obj_slot, forwarded); return; } if (G_UNLIKELY (SGEN_OBJECT_IS_PINNED (obj))) { SGEN_ASSERT (9, sgen_vtable_get_descriptor (SGEN_LOAD_VTABLE(obj)), "pinned object %p has no gc descriptor", obj); SGEN_LOG (9, " (pinned, no change)"); HEAVY_STAT (++stat_nursery_copy_object_failed_pinned); return; } #ifndef SGEN_SIMPLE_NURSERY if (sgen_nursery_is_to_space (obj)) { SGEN_ASSERT (9, sgen_vtable_get_descriptor (SGEN_LOAD_VTABLE(obj)), "to space object %p has no gc descriptor", obj); SGEN_LOG (9, " (tospace, no change)"); HEAVY_STAT (++stat_nursery_copy_object_failed_to_space); return; } #endif HEAVY_STAT (++stat_objects_copied_nursery); #ifdef SGEN_SIMPLE_PAR_NURSERY copy = copy_object_no_checks_par (obj, queue); #else copy = copy_object_no_checks (obj, queue); #endif SGEN_UPDATE_REFERENCE (obj_slot, copy); } /* * SERIAL_COPY_OBJECT_FROM_OBJ: * * Similar to SERIAL_COPY_OBJECT, but assumes that OBJ_SLOT is part of an object, so it handles global remsets as well. */ static MONO_ALWAYS_INLINE void SERIAL_COPY_OBJECT_FROM_OBJ (GCObject **obj_slot, SgenGrayQueue *queue) { GCObject *forwarded; GCObject *obj = *obj_slot; GCObject *copy; SGEN_ASSERT (9, sgen_current_collection_generation == GENERATION_NURSERY, "calling minor-serial-copy-from-obj from a %d generation collection", sgen_current_collection_generation); HEAVY_STAT (++stat_copy_object_called_nursery); if (!sgen_ptr_in_nursery (obj)) { HEAVY_STAT (++stat_nursery_copy_object_failed_from_space); return; } SGEN_LOG (9, "Precise copy of %p from %p", obj, obj_slot); /* * Before we can copy the object we must make sure that we are * allowed to, i.e. that the object not pinned, not already * forwarded or belongs to the nursery To Space. */ if ((forwarded = SGEN_OBJECT_IS_FORWARDED (obj))) { SGEN_ASSERT (9, sgen_obj_get_descriptor (forwarded), "forwarded object %p has no gc descriptor", forwarded); SGEN_LOG (9, " (already forwarded to %p)", forwarded); HEAVY_STAT (++stat_nursery_copy_object_failed_forwarded); #ifdef SGEN_CONCURRENT_MAJOR /* See comment on STORE_STORE_FENCE below. */ STORE_STORE_FENCE; #endif SGEN_UPDATE_REFERENCE (obj_slot, forwarded); #ifndef SGEN_SIMPLE_NURSERY if (G_UNLIKELY (sgen_ptr_in_nursery (forwarded) && !sgen_ptr_in_nursery (obj_slot) && !SGEN_OBJECT_IS_CEMENTED (forwarded))) sgen_add_to_global_remset (obj_slot, forwarded); #endif return; } if (G_UNLIKELY (SGEN_OBJECT_IS_PINNED (obj))) { SGEN_ASSERT (9, sgen_vtable_get_descriptor (SGEN_LOAD_VTABLE(obj)), "pinned object %p has no gc descriptor", obj); SGEN_LOG (9, " (pinned, no change)"); HEAVY_STAT (++stat_nursery_copy_object_failed_pinned); if (!sgen_ptr_in_nursery (obj_slot) && !SGEN_OBJECT_IS_CEMENTED (obj)) sgen_add_to_global_remset (obj_slot, obj); return; } #ifndef SGEN_SIMPLE_NURSERY if (sgen_nursery_is_to_space (obj)) { /* FIXME: all of these could just use `sgen_obj_get_descriptor_safe()` */ SGEN_ASSERT (9, sgen_vtable_get_descriptor (SGEN_LOAD_VTABLE(obj)), "to space object %p has no gc descriptor", obj); SGEN_LOG (9, " (tospace, no change)"); HEAVY_STAT (++stat_nursery_copy_object_failed_to_space); /* * FIXME: * * The card table scanning code sometimes clears cards * that have just been set for a global remset. In * the split nursery the following situation can * occur: * * Let's say object A starts in card C but continues * into C+1. Within A, at offset O there's a * reference to a new nursery object X. A+O is in * card C+1. Now card C is scanned, and as part of * it, object A. The reference at A+O is processed by * copying X into nursery to-space at Y. Since it's * still in the nursery, a global remset must be added * for A+O, so card C+1 is marked. Now, however, card * C+1 is scanned, which means that it's cleared * first. This wouldn't be terribly bad if reference * A+O were re-scanned and the global remset re-added, * but since the reference points to to-space, that * doesn't happen, and C+1 remains cleared: the remset * is lost. * * There's at least two ways to fix this. The easy * one is to re-add the remset on the re-scan. This * is that - the following two lines of code. * * The proper solution appears to be to first make a * copy of the cards before scanning a block, then to * clear all the cards and scan from the copy, so no * remsets will be overwritten. Scanning objects at * most once would be the icing on the cake. */ if (!sgen_ptr_in_nursery (obj_slot) && !SGEN_OBJECT_IS_CEMENTED (obj)) sgen_add_to_global_remset (obj_slot, obj); return; } #endif HEAVY_STAT (++stat_objects_copied_nursery); #ifdef SGEN_SIMPLE_PAR_NURSERY copy = copy_object_no_checks_par (obj, queue); #else copy = copy_object_no_checks (obj, queue); #endif #ifdef SGEN_CONCURRENT_MAJOR /* * If an object is evacuated to the major heap and a reference to it, from the major * heap, updated, the concurrent major collector might follow that reference and * scan the new major object. To make sure the object contents are seen by the * major collector we need this write barrier, so that the reference is seen after * the object. */ STORE_STORE_FENCE; #endif SGEN_UPDATE_REFERENCE (obj_slot, copy); #ifndef SGEN_SIMPLE_NURSERY if (G_UNLIKELY (sgen_ptr_in_nursery (copy) && !sgen_ptr_in_nursery (obj_slot) && !SGEN_OBJECT_IS_CEMENTED (copy))) sgen_add_to_global_remset (obj_slot, copy); #else /* copy_object_no_checks () can return obj on OOM */ if (G_UNLIKELY (obj == copy)) { if (G_UNLIKELY (sgen_ptr_in_nursery (copy) && !sgen_ptr_in_nursery (obj_slot) && !SGEN_OBJECT_IS_CEMENTED (copy))) sgen_add_to_global_remset (obj_slot, copy); } #endif }
-1
dotnet/runtime
66,282
Enable Fast Tail Call Optimization for ARM32
- Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
clamp03
2022-03-07T05:47:20Z
2022-03-13T11:50:20Z
227ff3d53784f655fa04dad059a98b3e8d291d61
51b90cc60b8528c77829ef18481b0f58db812776
Enable Fast Tail Call Optimization for ARM32. - Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
./src/native/libs/System.Globalization.Native/pal_casing.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include "pal_locale.h" #include "pal_compiler.h" PALEXPORT void GlobalizationNative_ChangeCase(const UChar* lpSrc, int32_t cwSrcLength, UChar* lpDst, int32_t cwDstLength, int32_t bToUpper); PALEXPORT void GlobalizationNative_ChangeCaseInvariant(const UChar* lpSrc, int32_t cwSrcLength, UChar* lpDst, int32_t cwDstLength, int32_t bToUpper); PALEXPORT void GlobalizationNative_ChangeCaseTurkish(const UChar* lpSrc, int32_t cwSrcLength, UChar* lpDst, int32_t cwDstLength, int32_t bToUpper); PALEXPORT void GlobalizationNative_InitOrdinalCasingPage(int32_t pageNumber, UChar* pTarget);
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include "pal_locale.h" #include "pal_compiler.h" PALEXPORT void GlobalizationNative_ChangeCase(const UChar* lpSrc, int32_t cwSrcLength, UChar* lpDst, int32_t cwDstLength, int32_t bToUpper); PALEXPORT void GlobalizationNative_ChangeCaseInvariant(const UChar* lpSrc, int32_t cwSrcLength, UChar* lpDst, int32_t cwDstLength, int32_t bToUpper); PALEXPORT void GlobalizationNative_ChangeCaseTurkish(const UChar* lpSrc, int32_t cwSrcLength, UChar* lpDst, int32_t cwDstLength, int32_t bToUpper); PALEXPORT void GlobalizationNative_InitOrdinalCasingPage(int32_t pageNumber, UChar* pTarget);
-1
dotnet/runtime
66,282
Enable Fast Tail Call Optimization for ARM32
- Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
clamp03
2022-03-07T05:47:20Z
2022-03-13T11:50:20Z
227ff3d53784f655fa04dad059a98b3e8d291d61
51b90cc60b8528c77829ef18481b0f58db812776
Enable Fast Tail Call Optimization for ARM32. - Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
./src/coreclr/pal/tests/palsuite/c_runtime/_wfopen/test1/test1.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*===================================================================== ** ** Source: test1.c ** ** Purpose: Tests the PAL implementation of the _wfopen function. ** This test simply attempts to open a number of files with ** different modes. It checks to ensure a valid file ** pointer is returned. It doesn't do any checking to ** ensure the mode is really what it claims. ** ** **===================================================================*/ #define UNICODE #include <palsuite.h> struct testCase { int CorrectResult; WCHAR mode[20]; }; PALTEST(c_runtime__wfopen_test1_paltest_wfopen_test1, "c_runtime/_wfopen/test1/paltest_wfopen_test1") { FILE *fp; WCHAR name[128]; WCHAR base[] = {'t','e','s','t','f','i','l','e','s','\0'}; char * PrintResult; int i; struct testCase testCases[] = { {0, {'r','\0' }}, {1, {'w','\0'}}, {1, {'a','\0'}}, {0, {'r','+','\0'}}, {1, {'w','+','\0'}}, {1, {'a','+','\0'}}, {1, {'w','t','\0'}}, {1, {'w','b','\0'}}, {1, {'w','S','\0'}}, {1, {'w','c','\0'}}, {1, {'w','n','\0'}}, {1, {'w', 'R','\0'}}, {1, {'w','T','\0'}}, {0, {'t','w','\0'}}, {0, {'.','\0'}} }; if (PAL_Initialize(argc, argv)) { return FAIL; } for(i = 0; i < sizeof(testCases) / sizeof(struct testCase); i++) { wcscpy(name,base); wcscat(name,testCases[i].mode); fp = _wfopen(name,testCases[i].mode); if ((fp == 0 && testCases[i].CorrectResult != 0) || (testCases[i].CorrectResult == 0 && fp != 0) ) { PrintResult = convertC(testCases[i].mode); Fail("ERROR: fopen returned incorrectly " "opening a file in %s mode. Perhaps it opened a " "read only file which didn't exist and returned a correct " "pointer?",PrintResult); free(PrintResult); } memset(name, '\0', 128 * sizeof(name[0])); } PAL_Terminate(); return PASS; }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*===================================================================== ** ** Source: test1.c ** ** Purpose: Tests the PAL implementation of the _wfopen function. ** This test simply attempts to open a number of files with ** different modes. It checks to ensure a valid file ** pointer is returned. It doesn't do any checking to ** ensure the mode is really what it claims. ** ** **===================================================================*/ #define UNICODE #include <palsuite.h> struct testCase { int CorrectResult; WCHAR mode[20]; }; PALTEST(c_runtime__wfopen_test1_paltest_wfopen_test1, "c_runtime/_wfopen/test1/paltest_wfopen_test1") { FILE *fp; WCHAR name[128]; WCHAR base[] = {'t','e','s','t','f','i','l','e','s','\0'}; char * PrintResult; int i; struct testCase testCases[] = { {0, {'r','\0' }}, {1, {'w','\0'}}, {1, {'a','\0'}}, {0, {'r','+','\0'}}, {1, {'w','+','\0'}}, {1, {'a','+','\0'}}, {1, {'w','t','\0'}}, {1, {'w','b','\0'}}, {1, {'w','S','\0'}}, {1, {'w','c','\0'}}, {1, {'w','n','\0'}}, {1, {'w', 'R','\0'}}, {1, {'w','T','\0'}}, {0, {'t','w','\0'}}, {0, {'.','\0'}} }; if (PAL_Initialize(argc, argv)) { return FAIL; } for(i = 0; i < sizeof(testCases) / sizeof(struct testCase); i++) { wcscpy(name,base); wcscat(name,testCases[i].mode); fp = _wfopen(name,testCases[i].mode); if ((fp == 0 && testCases[i].CorrectResult != 0) || (testCases[i].CorrectResult == 0 && fp != 0) ) { PrintResult = convertC(testCases[i].mode); Fail("ERROR: fopen returned incorrectly " "opening a file in %s mode. Perhaps it opened a " "read only file which didn't exist and returned a correct " "pointer?",PrintResult); free(PrintResult); } memset(name, '\0', 128 * sizeof(name[0])); } PAL_Terminate(); return PASS; }
-1
dotnet/runtime
66,282
Enable Fast Tail Call Optimization for ARM32
- Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
clamp03
2022-03-07T05:47:20Z
2022-03-13T11:50:20Z
227ff3d53784f655fa04dad059a98b3e8d291d61
51b90cc60b8528c77829ef18481b0f58db812776
Enable Fast Tail Call Optimization for ARM32. - Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
./src/tests/BuildWasmApps/testassets/AppUsingNativeLib/native-lib.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include "native-lib.h" #include <stdio.h> int print_line(int x) { printf("print_line: %d\n", x); return 42 + x; }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include "native-lib.h" #include <stdio.h> int print_line(int x) { printf("print_line: %d\n", x); return 42 + x; }
-1
dotnet/runtime
66,282
Enable Fast Tail Call Optimization for ARM32
- Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
clamp03
2022-03-07T05:47:20Z
2022-03-13T11:50:20Z
227ff3d53784f655fa04dad059a98b3e8d291d61
51b90cc60b8528c77829ef18481b0f58db812776
Enable Fast Tail Call Optimization for ARM32. - Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
./src/coreclr/pal/tests/palsuite/file_io/GetTempFileNameW/test2/GetTempFileNameW.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*===================================================================== ** ** Source: GetTempFileNameW.c (test 2) ** ** Purpose: Tests the PAL implementation of the GetTempFileNameW function. ** ** **===================================================================*/ #include <palsuite.h> PALTEST(file_io_GetTempFileNameW_test2_paltest_gettempfilenamew_test2, "file_io/GetTempFileNameW/test2/paltest_gettempfilenamew_test2") { UINT uiError = 0; DWORD dwError = 0; const UINT uUnique = 0; WCHAR* wPrefix = NULL; WCHAR* wPath = NULL; WCHAR wReturnedName[256]; DWORD i; if (0 != PAL_Initialize(argc,argv)) { return FAIL; } // test the number of temp files that can be created wPrefix = convert("cfr"); wPath = convert("."); for (i = 0; i < 0x10005; i++) { uiError = GetTempFileNameW(wPath, wPrefix, uUnique, wReturnedName); if (uiError == 0) { dwError = GetLastError(); if (dwError == ERROR_FILE_EXISTS) { // file already existes so break out of the loop i--; // decrement the count because it wasn't successful break; } else { // it was something other than the file already existing? free (wPath); free (wPrefix); Fail("GetTempFileNameW: ERROR -> Call failed with a valid " "path and prefix with the error code: %ld\n", GetLastError()); } } else { // verify temp file was created if (GetFileAttributesW(wReturnedName) == -1) { free (wPath); free (wPrefix); Fail("GetTempFileNameW: ERROR -> GetFileAttributes failed " "on the returned temp file with error code: %ld.\n", GetLastError()); } } } free (wPath); free (wPrefix); // did it create more than 0xffff files if (i > 0xffff) { Fail("GetTempFileNameW: ERROR -> Was able to create more than 0xffff" " temp files.\n"); } PAL_Terminate(); return PASS; }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*===================================================================== ** ** Source: GetTempFileNameW.c (test 2) ** ** Purpose: Tests the PAL implementation of the GetTempFileNameW function. ** ** **===================================================================*/ #include <palsuite.h> PALTEST(file_io_GetTempFileNameW_test2_paltest_gettempfilenamew_test2, "file_io/GetTempFileNameW/test2/paltest_gettempfilenamew_test2") { UINT uiError = 0; DWORD dwError = 0; const UINT uUnique = 0; WCHAR* wPrefix = NULL; WCHAR* wPath = NULL; WCHAR wReturnedName[256]; DWORD i; if (0 != PAL_Initialize(argc,argv)) { return FAIL; } // test the number of temp files that can be created wPrefix = convert("cfr"); wPath = convert("."); for (i = 0; i < 0x10005; i++) { uiError = GetTempFileNameW(wPath, wPrefix, uUnique, wReturnedName); if (uiError == 0) { dwError = GetLastError(); if (dwError == ERROR_FILE_EXISTS) { // file already existes so break out of the loop i--; // decrement the count because it wasn't successful break; } else { // it was something other than the file already existing? free (wPath); free (wPrefix); Fail("GetTempFileNameW: ERROR -> Call failed with a valid " "path and prefix with the error code: %ld\n", GetLastError()); } } else { // verify temp file was created if (GetFileAttributesW(wReturnedName) == -1) { free (wPath); free (wPrefix); Fail("GetTempFileNameW: ERROR -> GetFileAttributes failed " "on the returned temp file with error code: %ld.\n", GetLastError()); } } } free (wPath); free (wPrefix); // did it create more than 0xffff files if (i > 0xffff) { Fail("GetTempFileNameW: ERROR -> Was able to create more than 0xffff" " temp files.\n"); } PAL_Terminate(); return PASS; }
-1
dotnet/runtime
66,282
Enable Fast Tail Call Optimization for ARM32
- Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
clamp03
2022-03-07T05:47:20Z
2022-03-13T11:50:20Z
227ff3d53784f655fa04dad059a98b3e8d291d61
51b90cc60b8528c77829ef18481b0f58db812776
Enable Fast Tail Call Optimization for ARM32. - Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
./src/coreclr/nativeaot/Runtime/rheventtrace.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // // This header provides Redhawk-specific ETW code and macros, to allow sharing of common // ETW code between Redhawk and desktop CLR. // #ifndef __RHEVENTTRACE_INCLUDED #define __RHEVENTTRACE_INCLUDED #ifdef FEATURE_ETW // FireEtwGCPerHeapHistorySpecial() has to be defined manually rather than via the manifest because it does // not have a standard signature. #define FireEtwGCPerHeapHistorySpecial(DataPerHeap, DataSize, ClrId) (MICROSOFT_WINDOWS_REDHAWK_GC_PRIVATE_PROVIDER_Context.IsEnabled && PalEventEnabled(Microsoft_Windows_Redhawk_GC_PrivateHandle, &GCPerHeapHistory)) ? Template_GCPerHeapHistorySpecial(Microsoft_Windows_Redhawk_GC_PrivateHandle, &GCPerHeapHistory, DataPerHeap, DataSize, ClrId) : 0 // Map the CLR private provider to our version so we can avoid inserting more #ifdef's in the code. #define MICROSOFT_WINDOWS_DOTNETRUNTIME_PRIVATE_PROVIDER_Context MICROSOFT_WINDOWS_REDHAWK_GC_PRIVATE_PROVIDER_Context #define MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context MICROSOFT_WINDOWS_REDHAWK_GC_PUBLIC_PROVIDER_Context #define Microsoft_Windows_DotNETRuntimeHandle Microsoft_Windows_Redhawk_GC_PublicHandle #define CLR_GC_KEYWORD 0x1 #define CLR_FUSION_KEYWORD 0x4 #define CLR_LOADER_KEYWORD 0x8 #define CLR_JIT_KEYWORD 0x10 #define CLR_NGEN_KEYWORD 0x20 #define CLR_STARTENUMERATION_KEYWORD 0x40 #define CLR_ENDENUMERATION_KEYWORD 0x80 #define CLR_SECURITY_KEYWORD 0x400 #define CLR_APPDOMAINRESOURCEMANAGEMENT_KEYWORD 0x800 #define CLR_JITTRACING_KEYWORD 0x1000 #define CLR_INTEROP_KEYWORD 0x2000 #define CLR_CONTENTION_KEYWORD 0x4000 #define CLR_EXCEPTION_KEYWORD 0x8000 #define CLR_THREADING_KEYWORD 0x10000 #define CLR_JITTEDMETHODILTONATIVEMAP_KEYWORD 0x20000 #define CLR_OVERRIDEANDSUPPRESSNGENEVENTS_KEYWORD 0x40000 #define CLR_TYPE_KEYWORD 0x80000 #define CLR_GCHEAPDUMP_KEYWORD 0x100000 #define CLR_GCHEAPALLOC_KEYWORD 0x200000 #define CLR_GCHEAPSURVIVALANDMOVEMENT_KEYWORD 0x400000 #define CLR_GCHEAPCOLLECT_KEYWORD 0x800000 #define CLR_GCHEAPANDTYPENAMES_KEYWORD 0x1000000 #define CLR_PERFTRACK_KEYWORD 0x20000000 #define CLR_STACK_KEYWORD 0x40000000 #ifndef ERROR_SUCCESS #define ERROR_SUCCESS 0 #endif #undef ETW_TRACING_INITIALIZED #define ETW_TRACING_INITIALIZED(RegHandle) (RegHandle != NULL) #undef ETW_CATEGORY_ENABLED #define ETW_CATEGORY_ENABLED(Context, LevelParam, Keyword) \ (Context.IsEnabled && \ ( \ (LevelParam <= ((Context).Level)) || \ ((Context.Level) == 0) \ ) && \ ( \ (Keyword == (ULONGLONG)0) || \ ( \ (Keyword & (Context.MatchAnyKeyword)) && \ ( \ (Keyword & (Context.MatchAllKeyword)) == (Context.MatchAllKeyword) \ ) \ ) \ ) \ ) class MethodTable; class BulkTypeEventLogger; namespace ETW { // Class to wrap all type system logic for ETW class TypeSystemLog { public: // This enum is unused on Redhawk, but remains here to keep Redhawk / desktop CLR // code shareable. enum TypeLogBehavior { kTypeLogBehaviorTakeLockAndLogIfFirstTime, kTypeLogBehaviorAssumeLockAndLogIfFirstTime, kTypeLogBehaviorAlwaysLog, }; static void LogTypeAndParametersIfNecessary(BulkTypeEventLogger * pLogger, uint64_t thAsAddr, TypeLogBehavior typeLogBehavior); }; }; #else #define FireEtwGCPerHeapHistorySpecial(DataPerHeap, DataSize, ClrId) #endif #endif //__RHEVENTTRACE_INCLUDED
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // // This header provides Redhawk-specific ETW code and macros, to allow sharing of common // ETW code between Redhawk and desktop CLR. // #ifndef __RHEVENTTRACE_INCLUDED #define __RHEVENTTRACE_INCLUDED #ifdef FEATURE_ETW // FireEtwGCPerHeapHistorySpecial() has to be defined manually rather than via the manifest because it does // not have a standard signature. #define FireEtwGCPerHeapHistorySpecial(DataPerHeap, DataSize, ClrId) (MICROSOFT_WINDOWS_REDHAWK_GC_PRIVATE_PROVIDER_Context.IsEnabled && PalEventEnabled(Microsoft_Windows_Redhawk_GC_PrivateHandle, &GCPerHeapHistory)) ? Template_GCPerHeapHistorySpecial(Microsoft_Windows_Redhawk_GC_PrivateHandle, &GCPerHeapHistory, DataPerHeap, DataSize, ClrId) : 0 // Map the CLR private provider to our version so we can avoid inserting more #ifdef's in the code. #define MICROSOFT_WINDOWS_DOTNETRUNTIME_PRIVATE_PROVIDER_Context MICROSOFT_WINDOWS_REDHAWK_GC_PRIVATE_PROVIDER_Context #define MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context MICROSOFT_WINDOWS_REDHAWK_GC_PUBLIC_PROVIDER_Context #define Microsoft_Windows_DotNETRuntimeHandle Microsoft_Windows_Redhawk_GC_PublicHandle #define CLR_GC_KEYWORD 0x1 #define CLR_FUSION_KEYWORD 0x4 #define CLR_LOADER_KEYWORD 0x8 #define CLR_JIT_KEYWORD 0x10 #define CLR_NGEN_KEYWORD 0x20 #define CLR_STARTENUMERATION_KEYWORD 0x40 #define CLR_ENDENUMERATION_KEYWORD 0x80 #define CLR_SECURITY_KEYWORD 0x400 #define CLR_APPDOMAINRESOURCEMANAGEMENT_KEYWORD 0x800 #define CLR_JITTRACING_KEYWORD 0x1000 #define CLR_INTEROP_KEYWORD 0x2000 #define CLR_CONTENTION_KEYWORD 0x4000 #define CLR_EXCEPTION_KEYWORD 0x8000 #define CLR_THREADING_KEYWORD 0x10000 #define CLR_JITTEDMETHODILTONATIVEMAP_KEYWORD 0x20000 #define CLR_OVERRIDEANDSUPPRESSNGENEVENTS_KEYWORD 0x40000 #define CLR_TYPE_KEYWORD 0x80000 #define CLR_GCHEAPDUMP_KEYWORD 0x100000 #define CLR_GCHEAPALLOC_KEYWORD 0x200000 #define CLR_GCHEAPSURVIVALANDMOVEMENT_KEYWORD 0x400000 #define CLR_GCHEAPCOLLECT_KEYWORD 0x800000 #define CLR_GCHEAPANDTYPENAMES_KEYWORD 0x1000000 #define CLR_PERFTRACK_KEYWORD 0x20000000 #define CLR_STACK_KEYWORD 0x40000000 #ifndef ERROR_SUCCESS #define ERROR_SUCCESS 0 #endif #undef ETW_TRACING_INITIALIZED #define ETW_TRACING_INITIALIZED(RegHandle) (RegHandle != NULL) #undef ETW_CATEGORY_ENABLED #define ETW_CATEGORY_ENABLED(Context, LevelParam, Keyword) \ (Context.IsEnabled && \ ( \ (LevelParam <= ((Context).Level)) || \ ((Context.Level) == 0) \ ) && \ ( \ (Keyword == (ULONGLONG)0) || \ ( \ (Keyword & (Context.MatchAnyKeyword)) && \ ( \ (Keyword & (Context.MatchAllKeyword)) == (Context.MatchAllKeyword) \ ) \ ) \ ) \ ) class MethodTable; class BulkTypeEventLogger; namespace ETW { // Class to wrap all type system logic for ETW class TypeSystemLog { public: // This enum is unused on Redhawk, but remains here to keep Redhawk / desktop CLR // code shareable. enum TypeLogBehavior { kTypeLogBehaviorTakeLockAndLogIfFirstTime, kTypeLogBehaviorAssumeLockAndLogIfFirstTime, kTypeLogBehaviorAlwaysLog, }; static void LogTypeAndParametersIfNecessary(BulkTypeEventLogger * pLogger, uint64_t thAsAddr, TypeLogBehavior typeLogBehavior); }; }; #else #define FireEtwGCPerHeapHistorySpecial(DataPerHeap, DataSize, ClrId) #endif #endif //__RHEVENTTRACE_INCLUDED
-1
dotnet/runtime
66,282
Enable Fast Tail Call Optimization for ARM32
- Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
clamp03
2022-03-07T05:47:20Z
2022-03-13T11:50:20Z
227ff3d53784f655fa04dad059a98b3e8d291d61
51b90cc60b8528c77829ef18481b0f58db812776
Enable Fast Tail Call Optimization for ARM32. - Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
./src/coreclr/debug/di/stdafx.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. //***************************************************************************** // stdafx.h // // // Common include file for utility code. //***************************************************************************** #include <stdio.h> #include <windows.h> #include <winnt.h> #include <dbgtargetcontext.h> #define RIGHT_SIDE_COMPILE //----------------------------------------------------------------------------- // Contracts for RS threading. // We only do this for debug builds and not for inproc //----------------------------------------------------------------------------- #if defined(_DEBUG) #define RSCONTRACTS #endif // In case of FEATURE_DBGIPC_TRANSPORT_DI we use pipe for debugger debugee communication // and event redirection is not needed. (won't work anyway) #ifndef FEATURE_DBGIPC_TRANSPORT_DI // Currently, we only can redirect exception events. Since real interop-debugging // neeeds all events, redirection can't work in real-interop. // However, whether we're interop-debugging is determined at runtime, so we always // enable at compile time and then we need a runtime check later. #define ENABLE_EVENT_REDIRECTION_PIPELINE #endif #include "ex.h" #include "sigparser.h" #include "corpub.h" #include "rspriv.h" // This is included to deal with GCC limitations around templates. // For GCC, if a compilation unit refers to a templated class (like Ptr<T>), GCC requires the compilation // unit to have T's definitions for anything that Ptr may call. // RsPriv.h has a RSExtSmartPtr<ShimProcess>, which will call ShimProcess::AddRef, which means the same compilation unit // must have the definition of ShimProcess::AddRef, and therefore the whole ShimProcess class. // CL.exe does not have this problem. // Practically, this means that anybody that includes rspriv.h must include shimpriv.h. #include "shimpriv.h" #ifdef _DEBUG #include "utilcode.h" #endif #ifndef TARGET_ARM #define DbiGetThreadContext(hThread, lpContext) ::GetThreadContext(hThread, (CONTEXT*)(lpContext)) #define DbiSetThreadContext(hThread, lpContext) ::SetThreadContext(hThread, (CONTEXT*)(lpContext)) #else BOOL DbiGetThreadContext(HANDLE hThread, DT_CONTEXT *lpContext); BOOL DbiSetThreadContext(HANDLE hThread, const DT_CONTEXT *lpContext); #endif
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. //***************************************************************************** // stdafx.h // // // Common include file for utility code. //***************************************************************************** #include <stdio.h> #include <windows.h> #include <winnt.h> #include <dbgtargetcontext.h> #define RIGHT_SIDE_COMPILE //----------------------------------------------------------------------------- // Contracts for RS threading. // We only do this for debug builds and not for inproc //----------------------------------------------------------------------------- #if defined(_DEBUG) #define RSCONTRACTS #endif // In case of FEATURE_DBGIPC_TRANSPORT_DI we use pipe for debugger debugee communication // and event redirection is not needed. (won't work anyway) #ifndef FEATURE_DBGIPC_TRANSPORT_DI // Currently, we only can redirect exception events. Since real interop-debugging // neeeds all events, redirection can't work in real-interop. // However, whether we're interop-debugging is determined at runtime, so we always // enable at compile time and then we need a runtime check later. #define ENABLE_EVENT_REDIRECTION_PIPELINE #endif #include "ex.h" #include "sigparser.h" #include "corpub.h" #include "rspriv.h" // This is included to deal with GCC limitations around templates. // For GCC, if a compilation unit refers to a templated class (like Ptr<T>), GCC requires the compilation // unit to have T's definitions for anything that Ptr may call. // RsPriv.h has a RSExtSmartPtr<ShimProcess>, which will call ShimProcess::AddRef, which means the same compilation unit // must have the definition of ShimProcess::AddRef, and therefore the whole ShimProcess class. // CL.exe does not have this problem. // Practically, this means that anybody that includes rspriv.h must include shimpriv.h. #include "shimpriv.h" #ifdef _DEBUG #include "utilcode.h" #endif #ifndef TARGET_ARM #define DbiGetThreadContext(hThread, lpContext) ::GetThreadContext(hThread, (CONTEXT*)(lpContext)) #define DbiSetThreadContext(hThread, lpContext) ::SetThreadContext(hThread, (CONTEXT*)(lpContext)) #else BOOL DbiGetThreadContext(HANDLE hThread, DT_CONTEXT *lpContext); BOOL DbiSetThreadContext(HANDLE hThread, const DT_CONTEXT *lpContext); #endif
-1
dotnet/runtime
66,282
Enable Fast Tail Call Optimization for ARM32
- Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
clamp03
2022-03-07T05:47:20Z
2022-03-13T11:50:20Z
227ff3d53784f655fa04dad059a98b3e8d291d61
51b90cc60b8528c77829ef18481b0f58db812776
Enable Fast Tail Call Optimization for ARM32. - Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
./src/coreclr/utilcode/sstring.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // --------------------------------------------------------------------------- // SString.cpp // // --------------------------------------------------------------------------- #include "stdafx.h" #include "sstring.h" #include "ex.h" #include "holder.h" #if defined(_MSC_VER) #pragma inline_depth (25) #endif //----------------------------------------------------------------------------- // Static variables //----------------------------------------------------------------------------- // Have one internal, well-known, literal for the empty string. const BYTE SString::s_EmptyBuffer[2] = { 0 }; // @todo: these need to be initialized by calling GetACP() UINT SString::s_ACP = 0; #ifndef DACCESS_COMPILE static BYTE s_EmptySpace[sizeof(SString)] = { 0 }; #endif // DACCESS_COMPILE SPTR_IMPL(SString,SString,s_Empty); void SString::Startup() { STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; if (s_ACP == 0) { UINT ACP = GetACP(); #ifndef DACCESS_COMPILE s_Empty = PTR_SString(new (s_EmptySpace) SString()); s_Empty->SetNormalized(); #endif // DACCESS_COMPILE MemoryBarrier(); s_ACP = ACP; } } CHECK SString::CheckStartup() { WRAPPER_NO_CONTRACT; CHECK(s_Empty != NULL); CHECK_OK; } //----------------------------------------------------------------------------- // Case insensitive helpers. //----------------------------------------------------------------------------- static WCHAR MapChar(WCHAR wc, DWORD dwFlags) { WRAPPER_NO_CONTRACT; WCHAR wTmp; #ifndef TARGET_UNIX int iRet = ::LCMapStringEx(LOCALE_NAME_INVARIANT, dwFlags, &wc, 1, &wTmp, 1, NULL, NULL, 0); if (!iRet) { // This can fail in non-exceptional cases becauseof unknown unicode characters. wTmp = wc; } #else // !TARGET_UNIX // For PAL, no locale specific processing is done if (dwFlags == LCMAP_UPPERCASE) { wTmp = #ifdef SELF_NO_HOST toupper(wc); #else PAL_ToUpperInvariant(wc); #endif } else { _ASSERTE(dwFlags == LCMAP_LOWERCASE); wTmp = #ifdef SELF_NO_HOST tolower(wc); #else PAL_ToLowerInvariant(wc); #endif } #endif // !TARGET_UNIX return wTmp; } #define IS_UPPER_A_TO_Z(x) (((x) >= W('A')) && ((x) <= W('Z'))) #define IS_LOWER_A_TO_Z(x) (((x) >= W('a')) && ((x) <= W('z'))) #define CAN_SIMPLE_UPCASE(x) (((x)&~0x7f) == 0) #define CAN_SIMPLE_DOWNCASE(x) (((x)&~0x7f) == 0) #define SIMPLE_UPCASE(x) (IS_LOWER_A_TO_Z(x) ? ((x) - W('a') + W('A')) : (x)) #define SIMPLE_DOWNCASE(x) (IS_UPPER_A_TO_Z(x) ? ((x) - W('A') + W('a')) : (x)) /* static */ int SString::CaseCompareHelper(const WCHAR *buffer1, const WCHAR *buffer2, COUNT_T count, BOOL stopOnNull, BOOL stopOnCount) { LIMITED_METHOD_CONTRACT; _ASSERTE(stopOnNull || stopOnCount); const WCHAR *buffer1End = buffer1 + count; int diff = 0; while (!stopOnCount || (buffer1 < buffer1End)) { WCHAR ch1 = *buffer1++; WCHAR ch2 = *buffer2++; diff = ch1 - ch2; if ((ch1 == 0) || (ch2 == 0)) { if (diff != 0 || stopOnNull) { break; } } else { if (diff != 0) { diff = ((CAN_SIMPLE_UPCASE(ch1) ? SIMPLE_UPCASE(ch1) : MapChar(ch1, LCMAP_UPPERCASE)) - (CAN_SIMPLE_UPCASE(ch2) ? SIMPLE_UPCASE(ch2) : MapChar(ch2, LCMAP_UPPERCASE))); } if (diff != 0) { break; } } } return diff; } #define IS_LOWER_A_TO_Z_ANSI(x) (((x) >= 'a') && ((x) <= 'z')) #define CAN_SIMPLE_UPCASE_ANSI(x) (((x) >= 0x20) && ((x) <= 0x7f)) #define SIMPLE_UPCASE_ANSI(x) (IS_LOWER_A_TO_Z(x) ? ((x) - 'a' + 'A') : (x)) /* static */ int SString::CaseCompareHelperA(const CHAR *buffer1, const CHAR *buffer2, COUNT_T count, BOOL stopOnNull, BOOL stopOnCount) { LIMITED_METHOD_CONTRACT; _ASSERTE(stopOnNull || stopOnCount); const CHAR *buffer1End = buffer1 + count; int diff = 0; while (!stopOnCount || (buffer1 < buffer1End)) { CHAR ch1 = *buffer1; CHAR ch2 = *buffer2; diff = ch1 - ch2; if (diff != 0 || stopOnNull) { if (ch1 == 0 || ch2 == 0) { break; } diff = (SIMPLE_UPCASE_ANSI(ch1) - SIMPLE_UPCASE_ANSI(ch2)); if (diff != 0) { break; } } buffer1++; buffer2++; } return diff; } int CaseHashHelper(const WCHAR *buffer, COUNT_T count) { LIMITED_METHOD_CONTRACT; const WCHAR *bufferEnd = buffer + count; ULONG hash = 5381; while (buffer < bufferEnd) { WCHAR ch = *buffer++; ch = CAN_SIMPLE_UPCASE(ch) ? SIMPLE_UPCASE(ch) : MapChar(ch, LCMAP_UPPERCASE); hash = (((hash << 5) + hash) ^ ch); } return hash; } static int CaseHashHelperA(const CHAR *buffer, COUNT_T count) { LIMITED_METHOD_CONTRACT; const CHAR *bufferEnd = buffer + count; ULONG hash = 5381; while (buffer < bufferEnd) { CHAR ch = *buffer++; ch = SIMPLE_UPCASE_ANSI(ch); hash = (((hash << 5) + hash) ^ ch); } return hash; } //----------------------------------------------------------------------------- // Set this string to a copy of the unicode string //----------------------------------------------------------------------------- void SString::Set(const WCHAR *string) { CONTRACT_VOID { INSTANCE_CHECK; PRECONDITION(CheckPointer(string, NULL_OK)); THROWS; GC_NOTRIGGER; SUPPORTS_DAC_HOST_ONLY; } CONTRACT_END; if (string == NULL || *string == 0) Clear(); else { Resize((COUNT_T) wcslen(string), REPRESENTATION_UNICODE); wcscpy_s(GetRawUnicode(), GetBufferSizeInCharIncludeNullChar(), string); } RETURN; } //----------------------------------------------------------------------------- // Set this string to a copy of the first count characters of the given // unicode string. //----------------------------------------------------------------------------- void SString::Set(const WCHAR *string, COUNT_T count) { SS_CONTRACT_VOID { INSTANCE_CHECK; PRECONDITION(CheckPointer(string, NULL_OK)); PRECONDITION(CheckCount(count)); THROWS; GC_NOTRIGGER; } SS_CONTRACT_END; if (count == 0) Clear(); else { Resize(count, REPRESENTATION_UNICODE); wcsncpy_s(GetRawUnicode(), GetBufferSizeInCharIncludeNullChar(), string, count); GetRawUnicode()[count] = 0; } SS_RETURN; } //----------------------------------------------------------------------------- // Set this string to a point to the first count characters of the given // preallocated unicode string (shallow copy). //----------------------------------------------------------------------------- void SString::SetPreallocated(const WCHAR *string, COUNT_T count) { SS_CONTRACT_VOID { INSTANCE_CHECK; PRECONDITION(CheckPointer(string, NULL_OK)); PRECONDITION(CheckCount(count)); SS_POSTCONDITION(IsEmpty()); GC_NOTRIGGER; NOTHROW; SUPPORTS_DAC_HOST_ONLY; } SS_CONTRACT_END; SetImmutable(); SetImmutable((BYTE*) string, count*2); ClearAllocated(); SetRepresentation(REPRESENTATION_UNICODE); SS_RETURN; } //----------------------------------------------------------------------------- // Set this string to a copy of the given ansi string //----------------------------------------------------------------------------- void SString::SetASCII(const ASCII *string) { SS_CONTRACT_VOID { INSTANCE_CHECK; PRECONDITION(CheckPointer(string, NULL_OK)); PRECONDITION(CheckASCIIString(string)); THROWS; GC_NOTRIGGER; } SS_CONTRACT_END; if (string == NULL || *string == 0) Clear(); else { Resize((COUNT_T) strlen(string), REPRESENTATION_ASCII); strcpy_s(GetRawUTF8(), GetBufferSizeInCharIncludeNullChar(), string); } SS_RETURN; } //----------------------------------------------------------------------------- // Set this string to a copy of the first count characters of the given // ascii string //----------------------------------------------------------------------------- void SString::SetASCII(const ASCII *string, COUNT_T count) { SS_CONTRACT_VOID { INSTANCE_CHECK; PRECONDITION(CheckPointer(string, NULL_OK)); PRECONDITION(CheckASCIIString(string, count)); PRECONDITION(CheckCount(count)); THROWS; GC_NOTRIGGER; } SS_CONTRACT_END; if (count == 0) Clear(); else { Resize(count, REPRESENTATION_ASCII); strncpy_s(GetRawASCII(), GetBufferSizeInCharIncludeNullChar(), string, count); GetRawASCII()[count] = 0; } SS_RETURN; } //----------------------------------------------------------------------------- // Set this string to a copy of the given UTF8 string //----------------------------------------------------------------------------- void SString::SetUTF8(const UTF8 *string) { SS_CONTRACT_VOID { // !!! Check for illegal UTF8 encoding? INSTANCE_CHECK; PRECONDITION(CheckPointer(string, NULL_OK)); THROWS; GC_NOTRIGGER; SUPPORTS_DAC_HOST_ONLY; } SS_CONTRACT_END; if (string == NULL || *string == 0) Clear(); else { Resize((COUNT_T) strlen(string), REPRESENTATION_UTF8); strcpy_s(GetRawUTF8(), GetBufferSizeInCharIncludeNullChar(), string); } SS_RETURN; } //----------------------------------------------------------------------------- // Set this string to a copy of the first count characters of the given // UTF8 string. //----------------------------------------------------------------------------- void SString::SetUTF8(const UTF8 *string, COUNT_T count) { SS_CONTRACT_VOID { // !!! Check for illegal UTF8 encoding? INSTANCE_CHECK; PRECONDITION(CheckPointer(string, NULL_OK)); PRECONDITION(CheckCount(count)); THROWS; GC_NOTRIGGER; } SS_CONTRACT_END; if (count == 0) Clear(); else { Resize(count, REPRESENTATION_UTF8); strncpy_s(GetRawUTF8(), GetBufferSizeInCharIncludeNullChar(), string, count); GetRawUTF8()[count] = 0; } SS_RETURN; } //----------------------------------------------------------------------------- // Set this string to a copy of the given ANSI string //----------------------------------------------------------------------------- void SString::SetANSI(const ANSI *string) { SS_CONTRACT_VOID { INSTANCE_CHECK; PRECONDITION(CheckPointer(string, NULL_OK)); THROWS; GC_NOTRIGGER; } SS_CONTRACT_END; if (string == NULL || *string == 0) Clear(); else { Resize((COUNT_T) strlen(string), REPRESENTATION_ANSI); strcpy_s(GetRawANSI(), GetBufferSizeInCharIncludeNullChar(), string); } SS_RETURN; } //----------------------------------------------------------------------------- // Set this string to a copy of the first count characters of the given // ANSI string. //----------------------------------------------------------------------------- void SString::SetANSI(const ANSI *string, COUNT_T count) { SS_CONTRACT_VOID { INSTANCE_CHECK; PRECONDITION(CheckPointer(string, NULL_OK)); PRECONDITION(CheckCount(count)); THROWS; GC_NOTRIGGER; } SS_CONTRACT_END; if (count == 0) Clear(); else { Resize(count, REPRESENTATION_ANSI); strncpy_s(GetRawANSI(), GetBufferSizeInCharIncludeNullChar(), string, count); GetRawANSI()[count] = 0; } SS_RETURN; } //----------------------------------------------------------------------------- // Set this string to the given unicode character //----------------------------------------------------------------------------- void SString::Set(WCHAR character) { SS_CONTRACT_VOID { INSTANCE_CHECK; THROWS; GC_NOTRIGGER; SUPPORTS_DAC_HOST_ONLY; } SS_CONTRACT_END; if (character == 0) Clear(); else { Resize(1, REPRESENTATION_UNICODE); GetRawUnicode()[0] = character; GetRawUnicode()[1] = 0; } SS_RETURN; } //----------------------------------------------------------------------------- // Set this string to the given UTF8 character //----------------------------------------------------------------------------- void SString::SetUTF8(CHAR character) { SS_CONTRACT_VOID { INSTANCE_CHECK; THROWS; GC_NOTRIGGER; } SS_CONTRACT_END; if (character == 0) Clear(); else { Resize(1, REPRESENTATION_UTF8); GetRawUTF8()[0] = character; GetRawUTF8()[1] = 0; } SS_RETURN; } //----------------------------------------------------------------------------- // Set this string to the given ansi literal. // This will share the memory and not make a copy. //----------------------------------------------------------------------------- void SString::SetLiteral(const ASCII *literal) { SS_CONTRACT_VOID { INSTANCE_CHECK; PRECONDITION(CheckPointer(literal)); PRECONDITION(CheckASCIIString(literal)); THROWS; GC_NOTRIGGER; } SS_CONTRACT_END; SString s(Literal, literal); Set(s); SS_RETURN; } //----------------------------------------------------------------------------- // Set this string to the given unicode literal. // This will share the memory and not make a copy. //----------------------------------------------------------------------------- void SString::SetLiteral(const WCHAR *literal) { SS_CONTRACT_VOID { INSTANCE_CHECK; PRECONDITION(CheckPointer(literal)); THROWS; GC_NOTRIGGER; } SS_CONTRACT_END; SString s(Literal, literal); Set(s); SS_RETURN; } //----------------------------------------------------------------------------- // Hash the string contents //----------------------------------------------------------------------------- ULONG SString::Hash() const { SS_CONTRACT(ULONG) { INSTANCE_CHECK; THROWS_UNLESS_NORMALIZED; GC_NOTRIGGER; } SS_CONTRACT_END; ConvertToUnicode(); SS_RETURN HashString(GetRawUnicode()); } //----------------------------------------------------------------------------- // Hash the string contents //----------------------------------------------------------------------------- ULONG SString::HashCaseInsensitive() const { SS_CONTRACT(ULONG) { INSTANCE_CHECK; THROWS_UNLESS_NORMALIZED; GC_NOTRIGGER; } SS_CONTRACT_END; ConvertToIteratable(); ULONG result; switch (GetRepresentation()) { case REPRESENTATION_UNICODE: case REPRESENTATION_EMPTY: result = CaseHashHelper(GetRawUnicode(), GetRawCount()); break; case REPRESENTATION_ASCII: result = CaseHashHelperA(GetRawASCII(), GetRawCount()); break; default: UNREACHABLE(); } SS_RETURN result; } //----------------------------------------------------------------------------- // Truncate this string to count characters. //----------------------------------------------------------------------------- void SString::Truncate(const Iterator &i) { SS_CONTRACT_VOID { INSTANCE_CHECK; PRECONDITION(CheckIteratorRange(i)); SS_POSTCONDITION(GetRawCount() == i - Begin()); THROWS; GC_NOTRIGGER; SUPPORTS_DAC_HOST_ONLY; } SS_CONTRACT_END; CONSISTENCY_CHECK(IsFixedSize()); COUNT_T size = i - Begin(); Resize(size, GetRepresentation(), PRESERVE); i.Resync(this, (BYTE *) (GetRawUnicode() + size)); SS_RETURN; } //----------------------------------------------------------------------------- // Convert the ASCII representation for this String to Unicode. We can do this // quickly and in-place (if this == &dest), which is why it is optimized. //----------------------------------------------------------------------------- void SString::ConvertASCIIToUnicode(SString &dest) const { CONTRACT_VOID { PRECONDITION(IsRepresentation(REPRESENTATION_ASCII)); POSTCONDITION(dest.IsRepresentation(REPRESENTATION_UNICODE)); THROWS; GC_NOTRIGGER; SUPPORTS_DAC_HOST_ONLY; } CONTRACT_END; // Handle the empty case. if (IsEmpty()) { dest.Clear(); RETURN; } CONSISTENCY_CHECK(CheckPointer(GetRawASCII())); CONSISTENCY_CHECK(GetRawCount() > 0); // If dest is the same as this, then we need to preserve on resize. dest.Resize(GetRawCount(), REPRESENTATION_UNICODE, this == &dest ? PRESERVE : DONT_PRESERVE); // Make sure the buffer is big enough. CONSISTENCY_CHECK(dest.GetAllocation() > (GetRawCount() * sizeof(WCHAR))); // This is a poor man's widen. Since we know that the representation is ASCII, // we can just pad the string with a bunch of zero-value bytes. Of course, // we move from the end of the string to the start so that we can convert in // place (in the case that &dest == this). WCHAR *outBuf = dest.GetRawUnicode() + dest.GetRawCount(); ASCII *inBuf = GetRawASCII() + GetRawCount(); while (GetRawASCII() <= inBuf) { CONSISTENCY_CHECK(dest.GetRawUnicode() <= outBuf); // The casting zero-extends the value, thus giving us the zero-valued byte. *outBuf = (WCHAR) *inBuf; outBuf--; inBuf--; } RETURN; } //----------------------------------------------------------------------------- // Convert the internal representation for this String to Unicode. //----------------------------------------------------------------------------- void SString::ConvertToUnicode() const { CONTRACT_VOID { POSTCONDITION(IsRepresentation(REPRESENTATION_UNICODE)); if (IsRepresentation(REPRESENTATION_UNICODE)) NOTHROW; else THROWS; GC_NOTRIGGER; SUPPORTS_DAC_HOST_ONLY; } CONTRACT_END; if (!IsRepresentation(REPRESENTATION_UNICODE)) { if (IsRepresentation(REPRESENTATION_ASCII)) { ConvertASCIIToUnicode(*(const_cast<SString *>(this))); } else { StackSString s; ConvertToUnicode(s); PREFIX_ASSUME(!s.IsImmutable()); (const_cast<SString*>(this))->Set(s); } } RETURN; } //----------------------------------------------------------------------------- // Convert the internal representation for this String to Unicode, while // preserving the iterator if the conversion is done. //----------------------------------------------------------------------------- void SString::ConvertToUnicode(const CIterator &i) const { CONTRACT_VOID { PRECONDITION(i.Check()); POSTCONDITION(IsRepresentation(REPRESENTATION_UNICODE)); if (IsRepresentation(REPRESENTATION_UNICODE)) NOTHROW; else THROWS; GC_NOTRIGGER; SUPPORTS_DAC_HOST_ONLY; } CONTRACT_END; if (!IsRepresentation(REPRESENTATION_UNICODE)) { CONSISTENCY_CHECK(IsFixedSize()); COUNT_T index = 0; // Get the current index of the iterator if (i.m_ptr != NULL) { CONSISTENCY_CHECK(GetCharacterSizeShift() == 0); index = (COUNT_T) (i.m_ptr - m_buffer); } if (IsRepresentation(REPRESENTATION_ASCII)) { ConvertASCIIToUnicode(*(const_cast<SString *>(this))); } else { StackSString s; ConvertToUnicode(s); (const_cast<SString*>(this))->Set(s); } // Move the iterator to the new location. if (i.m_ptr != NULL) { i.Resync(this, (BYTE *) (GetRawUnicode() + index)); } } RETURN; } //----------------------------------------------------------------------------- // Set s to be a copy of this string's contents, but in the unicode format. //----------------------------------------------------------------------------- void SString::ConvertToUnicode(SString &s) const { CONTRACT_VOID { PRECONDITION(s.Check()); POSTCONDITION(s.IsRepresentation(REPRESENTATION_UNICODE)); THROWS; GC_NOTRIGGER; SUPPORTS_DAC_HOST_ONLY; } CONTRACT_END; int page = 0; switch (GetRepresentation()) { case REPRESENTATION_EMPTY: s.Clear(); RETURN; case REPRESENTATION_UNICODE: s.Set(*this); RETURN; case REPRESENTATION_UTF8: page = CP_UTF8; break; case REPRESENTATION_ASCII: ConvertASCIIToUnicode(s); RETURN; case REPRESENTATION_ANSI: page = CP_ACP; break; default: UNREACHABLE(); } COUNT_T length = WszMultiByteToWideChar(page, 0, GetRawANSI(), GetRawCount()+1, 0, 0); if (length == 0) ThrowLastError(); s.Resize(length-1, REPRESENTATION_UNICODE); length = WszMultiByteToWideChar(page, 0, GetRawANSI(), GetRawCount()+1, s.GetRawUnicode(), length); if (length == 0) ThrowLastError(); RETURN; } //----------------------------------------------------------------------------- // Set s to be a copy of this string's contents, but in the ANSI format. //----------------------------------------------------------------------------- void SString::ConvertToANSI(SString &s) const { CONTRACT_VOID { PRECONDITION(s.Check()); POSTCONDITION(s.IsRepresentation(REPRESENTATION_ANSI)); THROWS; GC_NOTRIGGER; } CONTRACT_END; switch (GetRepresentation()) { case REPRESENTATION_EMPTY: s.Clear(); RETURN; case REPRESENTATION_ASCII: case REPRESENTATION_ANSI: s.Set(*this); RETURN; case REPRESENTATION_UTF8: // No direct conversion to ANSI ConvertToUnicode(); FALLTHROUGH; case REPRESENTATION_UNICODE: break; default: UNREACHABLE(); } // @todo: use WC_NO_BEST_FIT_CHARS COUNT_T length = WszWideCharToMultiByte(CP_ACP, 0, GetRawUnicode(), GetRawCount()+1, NULL, 0, NULL, NULL); s.Resize(length-1, REPRESENTATION_ANSI); // @todo: use WC_NO_BEST_FIT_CHARS length = WszWideCharToMultiByte(CP_ACP, 0, GetRawUnicode(), GetRawCount()+1, s.GetRawANSI(), length, NULL, NULL); if (length == 0) ThrowLastError(); RETURN; } //----------------------------------------------------------------------------- // Set s to be a copy of this string's contents, but in the utf8 format. //----------------------------------------------------------------------------- COUNT_T SString::ConvertToUTF8(SString &s) const { CONTRACT(COUNT_T) { PRECONDITION(s.Check()); POSTCONDITION(s.IsRepresentation(REPRESENTATION_UTF8)); THROWS; GC_NOTRIGGER; } CONTRACT_END; switch (GetRepresentation()) { case REPRESENTATION_EMPTY: s.Clear(); RETURN 1; case REPRESENTATION_ASCII: case REPRESENTATION_UTF8: s.Set(*this); RETURN s.GetRawCount()+1; case REPRESENTATION_ANSI: // No direct conversion from ANSI to UTF8 ConvertToUnicode(); FALLTHROUGH; case REPRESENTATION_UNICODE: break; default: UNREACHABLE(); } // <TODO> @todo: use WC_NO_BEST_FIT_CHARS </TODO> bool allAscii; DWORD length; HRESULT hr = FString::Unicode_Utf8_Length(GetRawUnicode(), & allAscii, & length); if (SUCCEEDED(hr)) { s.Resize(length, REPRESENTATION_UTF8); //FString::Unicode_Utf8 expects an array all the time //we optimize the empty string by replacing it with null for SString above in Resize if (length > 0) { hr = FString::Unicode_Utf8(GetRawUnicode(), allAscii, (LPSTR) s.GetRawUTF8(), length); } } IfFailThrow(hr); RETURN length + 1; } //----------------------------------------------------------------------------- // Replace a single character with another character. //----------------------------------------------------------------------------- void SString::Replace(const Iterator &i, WCHAR c) { CONTRACT_VOID { INSTANCE_CHECK; PRECONDITION(CheckIteratorRange(i, 1)); POSTCONDITION(Match(i, c)); THROWS; GC_NOTRIGGER; } CONTRACT_END; if (IsRepresentation(REPRESENTATION_ASCII) && ((c&~0x7f) == 0)) { *(BYTE*)i.m_ptr = (BYTE) c; } else { ConvertToUnicode(i); *(USHORT*)i.m_ptr = c; } RETURN; } //----------------------------------------------------------------------------- // Replace the substring specified by position, length with the given string s. //----------------------------------------------------------------------------- void SString::Replace(const Iterator &i, COUNT_T length, const SString &s) { CONTRACT_VOID { INSTANCE_CHECK; PRECONDITION(CheckIteratorRange(i, length)); PRECONDITION(s.Check()); POSTCONDITION(Match(i, s)); THROWS; GC_NOTRIGGER; SUPPORTS_DAC_HOST_ONLY; } CONTRACT_END; Representation representation = GetRepresentation(); if (representation == REPRESENTATION_EMPTY) { // This special case contains some optimizations (like literal sharing). Set(s); ConvertToIteratable(); i.Resync(this, m_buffer); } else { StackSString temp; const SString &source = GetCompatibleString(s, temp, i); COUNT_T deleteSize = length<<GetCharacterSizeShift(); COUNT_T insertSize = source.GetRawCount()<<source.GetCharacterSizeShift(); SBuffer::Replace(i, deleteSize, insertSize); SBuffer::Copy(i, source.m_buffer, insertSize); } RETURN; } //----------------------------------------------------------------------------- // Find s in this string starting at i. Return TRUE & update iterator if found. //----------------------------------------------------------------------------- BOOL SString::Find(CIterator &i, const SString &s) const { CONTRACT(BOOL) { INSTANCE_CHECK; PRECONDITION(CheckIteratorRange(i)); PRECONDITION(s.Check()); POSTCONDITION(RETVAL == Match(i, s)); THROWS_UNLESS_BOTH_NORMALIZED(s); GC_NOTRIGGER; } CONTRACT_END; // Get a compatible string from s StackSString temp; const SString &source = GetCompatibleString(s, temp, i); switch (GetRepresentation()) { case REPRESENTATION_UNICODE: { COUNT_T count = source.GetRawCount(); const WCHAR *start = i.GetUnicode(); const WCHAR *end = GetUnicode() + GetRawCount() - count; while (start <= end) { if (wcsncmp(start, source.GetRawUnicode(), count) == 0) { i.Resync(this, (BYTE*) start); RETURN TRUE; } start++; } } break; case REPRESENTATION_ANSI: case REPRESENTATION_ASCII: { COUNT_T count = source.GetRawCount(); const CHAR *start = i.GetASCII(); const CHAR *end = GetRawASCII() + GetRawCount() - count; while (start <= end) { if (strncmp(start, source.GetRawASCII(), count) == 0) { i.Resync(this, (BYTE*) start); RETURN TRUE; } start++; } } break; case REPRESENTATION_EMPTY: { if (source.GetRawCount() == 0) RETURN TRUE; } break; case REPRESENTATION_UTF8: default: UNREACHABLE(); } RETURN FALSE; } //----------------------------------------------------------------------------- // Find s in this string starting at i. Return TRUE & update iterator if found. //----------------------------------------------------------------------------- BOOL SString::Find(CIterator &i, WCHAR c) const { CONTRACT(BOOL) { INSTANCE_CHECK; PRECONDITION(CheckIteratorRange(i)); POSTCONDITION(RETVAL == Match(i, c)); THROWS_UNLESS_NORMALIZED; GC_NOTRIGGER; } CONTRACT_END; // Get a compatible string if (c & ~0x7f) ConvertToUnicode(i); switch (GetRepresentation()) { case REPRESENTATION_UNICODE: { const WCHAR *start = i.GetUnicode(); const WCHAR *end = GetUnicode() + GetRawCount() - 1; while (start <= end) { if (*start == c) { i.Resync(this, (BYTE*) start); RETURN TRUE; } start++; } } break; case REPRESENTATION_ANSI: case REPRESENTATION_ASCII: { const CHAR *start = i.GetASCII(); const CHAR *end = GetRawASCII() + GetRawCount() - 1; while (start <= end) { if (*start == c) { i.Resync(this, (BYTE*) start); RETURN TRUE; } start++; } } break; case REPRESENTATION_EMPTY: break; case REPRESENTATION_UTF8: default: UNREACHABLE(); } RETURN FALSE; } //----------------------------------------------------------------------------- // Find s in this string, working backwards staring at i. // Return TRUE and update iterator if found. //----------------------------------------------------------------------------- BOOL SString::FindBack(CIterator &i, const SString &s) const { CONTRACT(BOOL) { INSTANCE_CHECK; PRECONDITION(CheckIteratorRange(i)); PRECONDITION(s.Check()); POSTCONDITION(RETVAL == Match(i, s)); THROWS_UNLESS_BOTH_NORMALIZED(s); GC_NOTRIGGER; } CONTRACT_END; // Get a compatible string from s StackSString temp; const SString &source = GetCompatibleString(s, temp, i); switch (GetRepresentation()) { case REPRESENTATION_UNICODE: { COUNT_T count = source.GetRawCount(); const WCHAR *start = GetRawUnicode() + GetRawCount() - count; if (start > i.GetUnicode()) start = i.GetUnicode(); const WCHAR *end = GetRawUnicode(); while (start >= end) { if (wcsncmp(start, source.GetRawUnicode(), count) == 0) { i.Resync(this, (BYTE*) start); RETURN TRUE; } start--; } } break; case REPRESENTATION_ANSI: case REPRESENTATION_ASCII: { COUNT_T count = source.GetRawCount(); const CHAR *start = GetRawASCII() + GetRawCount() - count; if (start > i.GetASCII()) start = i.GetASCII(); const CHAR *end = GetRawASCII(); while (start >= end) { if (strncmp(start, source.GetRawASCII(), count) == 0) { i.Resync(this, (BYTE*) start); RETURN TRUE; } start--; } } break; case REPRESENTATION_EMPTY: { if (source.GetRawCount() == 0) RETURN TRUE; } break; case REPRESENTATION_UTF8: default: UNREACHABLE(); } RETURN FALSE; } //----------------------------------------------------------------------------- // Find s in this string, working backwards staring at i. // Return TRUE and update iterator if found. //----------------------------------------------------------------------------- BOOL SString::FindBack(CIterator &i, WCHAR c) const { CONTRACT(BOOL) { INSTANCE_CHECK; PRECONDITION(CheckIteratorRange(i)); POSTCONDITION(RETVAL == Match(i, c)); THROWS_UNLESS_NORMALIZED; GC_NOTRIGGER; } CONTRACT_END; // Get a compatible string from s if (c & ~0x7f) ConvertToUnicode(i); switch (GetRepresentation()) { case REPRESENTATION_UNICODE: { const WCHAR *start = GetRawUnicode() + GetRawCount() - 1; if (start > i.GetUnicode()) start = i.GetUnicode(); const WCHAR *end = GetRawUnicode(); while (start >= end) { if (*start == c) { i.Resync(this, (BYTE*) start); RETURN TRUE; } start--; } } break; case REPRESENTATION_ANSI: case REPRESENTATION_ASCII: { const CHAR *start = GetRawASCII() + GetRawCount() - 1; if (start > i.GetASCII()) start = i.GetASCII(); const CHAR *end = GetRawASCII(); while (start >= end) { if (*start == c) { i.Resync(this, (BYTE*) start); RETURN TRUE; } start--; } } break; case REPRESENTATION_EMPTY: break; case REPRESENTATION_UTF8: default: UNREACHABLE(); } RETURN FALSE; } //----------------------------------------------------------------------------- // Returns TRUE if this string begins with the contents of s //----------------------------------------------------------------------------- BOOL SString::BeginsWith(const SString &s) const { WRAPPER_NO_CONTRACT; return Match(Begin(), s); } //----------------------------------------------------------------------------- // Returns TRUE if this string begins with the contents of s //----------------------------------------------------------------------------- BOOL SString::BeginsWithCaseInsensitive(const SString &s) const { WRAPPER_NO_CONTRACT; return MatchCaseInsensitive(Begin(), s); } //----------------------------------------------------------------------------- // Returns TRUE if this string ends with the contents of s //----------------------------------------------------------------------------- BOOL SString::EndsWith(const SString &s) const { WRAPPER_NO_CONTRACT; // Need this check due to iterator arithmetic below. if (GetCount() < s.GetCount()) { return FALSE; } return Match(End() - s.GetCount(), s); } //----------------------------------------------------------------------------- // Returns TRUE if this string ends with the contents of s //----------------------------------------------------------------------------- BOOL SString::EndsWithCaseInsensitive(const SString &s) const { WRAPPER_NO_CONTRACT; // Need this check due to iterator arithmetic below. if (GetCount() < s.GetCount()) { return FALSE; } return MatchCaseInsensitive(End() - s.GetCount(), s); } //----------------------------------------------------------------------------- // Compare this string's contents to s's contents. // The comparison does not take into account localization issues like case folding. // Return 0 if equal, <0 if this < s, >0 is this > s. (same as strcmp). //----------------------------------------------------------------------------- int SString::Compare(const SString &s) const { CONTRACT(int) { INSTANCE_CHECK; PRECONDITION(s.Check()); THROWS_UNLESS_BOTH_NORMALIZED(s); GC_NOTRIGGER; } CONTRACT_END; StackSString temp; const SString &source = GetCompatibleString(s, temp); COUNT_T smaller; int equals = 0; int result = 0; if (GetRawCount() < source.GetRawCount()) { smaller = GetRawCount(); equals = -1; } else if (GetRawCount() > source.GetRawCount()) { smaller = source.GetRawCount(); equals = 1; } else { smaller = GetRawCount(); equals = 0; } switch (GetRepresentation()) { case REPRESENTATION_UNICODE: result = wcsncmp(GetRawUnicode(), source.GetRawUnicode(), smaller); break; case REPRESENTATION_ASCII: case REPRESENTATION_ANSI: result = strncmp(GetRawASCII(), source.GetRawASCII(), smaller); break; case REPRESENTATION_EMPTY: result = 0; break; default: case REPRESENTATION_UTF8: UNREACHABLE(); } if (result == 0) RETURN equals; else RETURN result; } //----------------------------------------------------------------------------- // Compare this string's contents to s's contents. // Return 0 if equal, <0 if this < s, >0 is this > s. (same as strcmp). //----------------------------------------------------------------------------- int SString::CompareCaseInsensitive(const SString &s) const { CONTRACT(int) { INSTANCE_CHECK; PRECONDITION(s.Check()); THROWS_UNLESS_BOTH_NORMALIZED(s); GC_NOTRIGGER; } CONTRACT_END; StackSString temp; const SString &source = GetCompatibleString(s, temp); COUNT_T smaller; int equals = 0; int result = 0; if (GetRawCount() < source.GetRawCount()) { smaller = GetRawCount(); equals = -1; } else if (GetRawCount() > source.GetRawCount()) { smaller = source.GetRawCount(); equals = 1; } else { smaller = GetRawCount(); equals = 0; } switch (GetRepresentation()) { case REPRESENTATION_UNICODE: case REPRESENTATION_ANSI: result = CaseCompareHelper(GetRawUnicode(), source.GetRawUnicode(), smaller, FALSE, TRUE); break; case REPRESENTATION_ASCII: result = CaseCompareHelperA(GetRawASCII(), source.GetRawASCII(), smaller, FALSE, TRUE); break; case REPRESENTATION_EMPTY: result = 0; break; default: case REPRESENTATION_UTF8: UNREACHABLE(); } if (result == 0) RETURN equals; else RETURN result; } //----------------------------------------------------------------------------- // Compare this string's contents to s's contents. // The comparison does not take into account localization issues like case folding. // Return 1 if equal, 0 if not. //----------------------------------------------------------------------------- BOOL SString::Equals(const SString &s) const { CONTRACT(BOOL) { INSTANCE_CHECK; PRECONDITION(s.Check()); THROWS_UNLESS_BOTH_NORMALIZED(s); FAULTS_UNLESS_BOTH_NORMALIZED(s, ThrowOutOfMemory()); GC_NOTRIGGER; } CONTRACT_END; StackSString temp; const SString &source = GetCompatibleString(s, temp); COUNT_T count = GetRawCount(); if (count != source.GetRawCount()) RETURN FALSE; switch (GetRepresentation()) { case REPRESENTATION_UNICODE: RETURN (wcsncmp(GetRawUnicode(), source.GetRawUnicode(), count) == 0); case REPRESENTATION_ASCII: case REPRESENTATION_ANSI: RETURN (strncmp(GetRawASCII(), source.GetRawASCII(), count) == 0); case REPRESENTATION_EMPTY: RETURN TRUE; default: case REPRESENTATION_UTF8: UNREACHABLE(); } RETURN FALSE; } //----------------------------------------------------------------------------- // Compare this string's contents case insensitively to s's contents. // Return 1 if equal, 0 if not. //----------------------------------------------------------------------------- BOOL SString::EqualsCaseInsensitive(const SString &s) const { CONTRACT(BOOL) { INSTANCE_CHECK; PRECONDITION(s.Check()); THROWS_UNLESS_BOTH_NORMALIZED(s); FAULTS_UNLESS_BOTH_NORMALIZED(s, ThrowOutOfMemory()); GC_NOTRIGGER; } CONTRACT_END; StackSString temp; const SString &source = GetCompatibleString(s, temp); COUNT_T count = GetRawCount(); if (count != source.GetRawCount()) RETURN FALSE; switch (GetRepresentation()) { case REPRESENTATION_UNICODE: case REPRESENTATION_ANSI: RETURN (CaseCompareHelper(GetRawUnicode(), source.GetRawUnicode(), count, FALSE, TRUE) == 0); case REPRESENTATION_ASCII: RETURN (CaseCompareHelperA(GetRawASCII(), source.GetRawASCII(), count, FALSE, TRUE) == 0); case REPRESENTATION_EMPTY: RETURN TRUE; default: case REPRESENTATION_UTF8: UNREACHABLE(); } RETURN FALSE; } //----------------------------------------------------------------------------- // Compare s's contents to the substring starting at position // The comparison does not take into account localization issues like case folding. // Return TRUE if equal, FALSE if not //----------------------------------------------------------------------------- BOOL SString::Match(const CIterator &i, const SString &s) const { CONTRACT(BOOL) { INSTANCE_CHECK; PRECONDITION(CheckIteratorRange(i)); PRECONDITION(s.Check()); THROWS_UNLESS_BOTH_NORMALIZED(s); GC_NOTRIGGER; } CONTRACT_END; StackSString temp; const SString &source = GetCompatibleString(s, temp, i); COUNT_T remaining = End() - i; COUNT_T count = source.GetRawCount(); if (remaining < count) RETURN FALSE; switch (GetRepresentation()) { case REPRESENTATION_UNICODE: RETURN (wcsncmp(i.GetUnicode(), source.GetRawUnicode(), count) == 0); case REPRESENTATION_ASCII: case REPRESENTATION_ANSI: RETURN (strncmp(i.GetASCII(), source.GetRawASCII(), count) == 0); case REPRESENTATION_EMPTY: RETURN TRUE; default: case REPRESENTATION_UTF8: UNREACHABLE(); } RETURN FALSE; } //----------------------------------------------------------------------------- // Compare s's contents case insensitively to the substring starting at position // Return TRUE if equal, FALSE if not //----------------------------------------------------------------------------- BOOL SString::MatchCaseInsensitive(const CIterator &i, const SString &s) const { CONTRACT(BOOL) { INSTANCE_CHECK; PRECONDITION(CheckIteratorRange(i)); PRECONDITION(s.Check()); THROWS_UNLESS_BOTH_NORMALIZED(s); GC_NOTRIGGER; } CONTRACT_END; StackSString temp; const SString &source = GetCompatibleString(s, temp, i); COUNT_T remaining = End() - i; COUNT_T count = source.GetRawCount(); if (remaining < count) RETURN FALSE; switch (GetRepresentation()) { case REPRESENTATION_UNICODE: case REPRESENTATION_ANSI: RETURN (CaseCompareHelper(i.GetUnicode(), source.GetRawUnicode(), count, FALSE, TRUE) == 0); case REPRESENTATION_ASCII: RETURN (CaseCompareHelperA(i.GetASCII(), source.GetRawASCII(), count, FALSE, TRUE) == 0); case REPRESENTATION_EMPTY: RETURN TRUE; default: case REPRESENTATION_UTF8: UNREACHABLE(); } RETURN FALSE; } //----------------------------------------------------------------------------- // Compare c case insensitively to the character at position // Return TRUE if equal, FALSE if not //----------------------------------------------------------------------------- BOOL SString::MatchCaseInsensitive(const CIterator &i, WCHAR c) const { SS_CONTRACT(BOOL) { GC_NOTRIGGER; INSTANCE_CHECK; PRECONDITION(CheckIteratorRange(i)); NOTHROW; } SS_CONTRACT_END; // End() will not throw here CONTRACT_VIOLATION(ThrowsViolation); if (i >= End()) SS_RETURN FALSE; WCHAR test = i[0]; SS_RETURN (test == c || ((CAN_SIMPLE_UPCASE(test) ? SIMPLE_UPCASE(test) : MapChar(test, LCMAP_UPPERCASE)) == (CAN_SIMPLE_UPCASE(c) ? SIMPLE_UPCASE(c) : MapChar(c, LCMAP_UPPERCASE)))); } //----------------------------------------------------------------------------- // Convert string to unicode lowercase using the invariant culture // Note: Please don't use it in PATH as multiple character can map to the same // lower case symbol //----------------------------------------------------------------------------- void SString::LowerCase() { SS_CONTRACT_VOID { GC_NOTRIGGER; PRECONDITION(CheckPointer(this)); SS_POSTCONDITION(CheckPointer(RETVAL)); if (IsRepresentation(REPRESENTATION_UNICODE)) NOTHROW; else THROWS; SUPPORTS_DAC; } SS_CONTRACT_END; ConvertToUnicode(); for (WCHAR *pwch = GetRawUnicode(); pwch < GetRawUnicode() + GetRawCount(); ++pwch) { *pwch = (CAN_SIMPLE_DOWNCASE(*pwch) ? SIMPLE_DOWNCASE(*pwch) : MapChar(*pwch, LCMAP_LOWERCASE)); } } //----------------------------------------------------------------------------- // Convert null-terminated string to lowercase using the invariant culture //----------------------------------------------------------------------------- //static void SString::LowerCase(__inout_z LPWSTR wszString) { SS_CONTRACT_VOID { GC_NOTRIGGER; NOTHROW; SUPPORTS_DAC; } SS_CONTRACT_END; if (wszString == NULL) { return; } for (WCHAR * pwch = wszString; *pwch != '\0'; ++pwch) { *pwch = (CAN_SIMPLE_DOWNCASE(*pwch) ? SIMPLE_DOWNCASE(*pwch) : MapChar(*pwch, LCMAP_LOWERCASE)); } } //----------------------------------------------------------------------------- // Convert string to unicode uppercase using the invariant culture // Note: Please don't use it in PATH as multiple character can map to the same // upper case symbol //----------------------------------------------------------------------------- void SString::UpperCase() { SS_CONTRACT_VOID { GC_NOTRIGGER; PRECONDITION(CheckPointer(this)); SS_POSTCONDITION(CheckPointer(RETVAL)); if (IsRepresentation(REPRESENTATION_UNICODE)) NOTHROW; else THROWS; GC_NOTRIGGER; SUPPORTS_DAC; } SS_CONTRACT_END; ConvertToUnicode(); for (WCHAR *pwch = GetRawUnicode(); pwch < GetRawUnicode() + GetRawCount(); ++pwch) { *pwch = (CAN_SIMPLE_UPCASE(*pwch) ? SIMPLE_UPCASE(*pwch) : MapChar(*pwch, LCMAP_UPPERCASE)); } } //----------------------------------------------------------------------------- // Get a const pointer to the internal buffer as an ANSI string. //----------------------------------------------------------------------------- const CHAR *SString::GetANSI(AbstractScratchBuffer &scratch) const { SS_CONTRACT(const CHAR *) { INSTANCE_CHECK_NULL; THROWS; GC_NOTRIGGER; } SS_CONTRACT_END; if (IsRepresentation(REPRESENTATION_ANSI)) SS_RETURN GetRawANSI(); ConvertToANSI((SString&)scratch); SS_RETURN ((SString&)scratch).GetRawANSI(); } //----------------------------------------------------------------------------- // Get a const pointer to the internal buffer as a UTF8 string. //----------------------------------------------------------------------------- const UTF8 *SString::GetUTF8(AbstractScratchBuffer &scratch) const { CONTRACT(const UTF8 *) { INSTANCE_CHECK_NULL; THROWS; GC_NOTRIGGER; } CONTRACT_END; if (IsRepresentation(REPRESENTATION_UTF8)) RETURN GetRawUTF8(); ConvertToUTF8((SString&)scratch); RETURN ((SString&)scratch).GetRawUTF8(); } const UTF8 *SString::GetUTF8(AbstractScratchBuffer &scratch, COUNT_T *pcbUtf8) const { CONTRACT(const UTF8 *) { INSTANCE_CHECK_NULL; THROWS; GC_NOTRIGGER; } CONTRACT_END; if (IsRepresentation(REPRESENTATION_UTF8)) { *pcbUtf8 = GetRawCount() + 1; RETURN GetRawUTF8(); } *pcbUtf8 = ConvertToUTF8((SString&)scratch); RETURN ((SString&)scratch).GetRawUTF8(); } //----------------------------------------------------------------------------- // Get a const pointer to the internal buffer which must already be a UTF8 string. // This avoids the need to create a scratch buffer we know will never be used. //----------------------------------------------------------------------------- const UTF8 *SString::GetUTF8NoConvert() const { CONTRACT(const UTF8 *) { INSTANCE_CHECK_NULL; THROWS; GC_NOTRIGGER; } CONTRACT_END; if (IsRepresentation(REPRESENTATION_UTF8)) RETURN GetRawUTF8(); ThrowHR(E_INVALIDARG); } //----------------------------------------------------------------------------- // Safe version of sprintf. // Prints formatted ansi text w/ var args to this buffer. //----------------------------------------------------------------------------- void SString::Printf(const CHAR *format, ...) { WRAPPER_NO_CONTRACT; va_list args; va_start(args, format); VPrintf(format, args); va_end(args); } #ifdef _DEBUG // // Check the Printf use for potential globalization bugs. %S formatting // specifier does Unicode->Ansi or Ansi->Unicode conversion using current // C-locale. This almost always means globalization bug in the CLR codebase. // // Ideally, we would elimitate %S from all format strings. Unfortunately, // %S is too widespread in non-shipping code that such cleanup is not feasible. // static void CheckForFormatStringGlobalizationIssues(const SString &format, const SString &result) { CONTRACTL { THROWS; GC_NOTRIGGER; DEBUG_ONLY; } CONTRACTL_END; BOOL fDangerousFormat = FALSE; // Check whether the format string contains the %S formatting specifier SString::CIterator itrFormat = format.Begin(); while (*itrFormat) { if (*itrFormat++ == '%') { // <TODO>Handle the complex format strings like %blahS</TODO> if (*itrFormat++ == 'S') { fDangerousFormat = TRUE; break; } } } if (fDangerousFormat) { BOOL fNonAsciiUsed = FALSE; // Now check whether there are any non-ASCII characters in the output. // Check whether the result contains non-Ascii characters SString::CIterator itrResult = format.Begin(); while (*itrResult) { if (*itrResult++ > 127) { fNonAsciiUsed = TRUE; break; } } CONSISTENCY_CHECK_MSGF(!fNonAsciiUsed, ("Non-ASCII string was produced by %%S format specifier. This is likely globalization bug." "To fix this, change the format string to %%s and do the correct encoding at the Printf callsite")); } } #endif #ifndef EBADF #define EBADF 9 #endif #ifndef ENOMEM #define ENOMEM 12 #endif #ifndef ERANGE #define ERANGE 34 #endif #if defined(_MSC_VER) #undef va_copy #define va_copy(dest,src) (dest = src) #endif void SString::VPrintf(const CHAR *format, va_list args) { CONTRACT_VOID { INSTANCE_CHECK; PRECONDITION(CheckPointer(format)); THROWS; GC_NOTRIGGER; } CONTRACT_END; va_list ap; // sprintf gives us no means to know how many characters are written // other than guessing and trying if (GetRawCount() > 0) { // First, try to use the existing buffer va_copy(ap, args); int result = _vsnprintf_s(GetRawANSI(), GetRawCount()+1, _TRUNCATE, format, ap); va_end(ap); if (result >=0) { // Succeeded in writing. Now resize - Resize(result, REPRESENTATION_ANSI, PRESERVE); SString sss(Ansi, format); INDEBUG(CheckForFormatStringGlobalizationIssues(sss, *this)); RETURN; } } // Make a guess how long the result will be (note this will be doubled) COUNT_T guess = (COUNT_T) strlen(format)+1; if (guess < GetRawCount()) guess = GetRawCount(); if (guess < MINIMUM_GUESS) guess = MINIMUM_GUESS; while (TRUE) { // Double the previous guess - eventually we will get enough space guess *= 2; Resize(guess, REPRESENTATION_ANSI); // Clear errno to avoid false alarms errno = 0; va_copy(ap, args); int result = _vsnprintf_s(GetRawANSI(), GetRawCount()+1, _TRUNCATE, format, ap); va_end(ap); if (result >= 0) { // Succeed in writing. Shrink the buffer to fit exactly. Resize(result, REPRESENTATION_ANSI, PRESERVE); SString sss(Ansi, format); INDEBUG(CheckForFormatStringGlobalizationIssues(sss, *this)); RETURN; } if (errno==ENOMEM) { ThrowOutOfMemory(); } else if (errno!=0 && errno!=EBADF && errno!=ERANGE) { CONSISTENCY_CHECK_MSG(FALSE, "_vsnprintf_s failed. Potential globalization bug."); ThrowHR(HRESULT_FROM_WIN32(ERROR_NO_UNICODE_TRANSLATION)); } } RETURN; } void SString::Printf(const WCHAR *format, ...) { WRAPPER_NO_CONTRACT; va_list args; va_start(args, format); VPrintf(format, args); va_end(args); } void SString::PPrintf(const WCHAR *format, ...) { CONTRACT_VOID { INSTANCE_CHECK; PRECONDITION(CheckPointer(format)); THROWS; GC_NOTRIGGER; } CONTRACT_END; va_list argItr; va_start(argItr, format); PVPrintf(format, argItr); va_end(argItr); RETURN; } void SString::VPrintf(const WCHAR *format, va_list args) { CONTRACT_VOID { INSTANCE_CHECK; PRECONDITION(CheckPointer(format)); THROWS; GC_NOTRIGGER; } CONTRACT_END; va_list ap; // sprintf gives us no means to know how many characters are written // other than guessing and trying if (GetRawCount() > 0) { // First, try to use the existing buffer va_copy(ap, args); int result = _vsnwprintf_s(GetRawUnicode(), GetRawCount()+1, _TRUNCATE, format, ap); va_end(ap); if (result >= 0) { // succeeded Resize(result, REPRESENTATION_UNICODE, PRESERVE); SString sss(format); INDEBUG(CheckForFormatStringGlobalizationIssues(sss, *this)); RETURN; } } // Make a guess how long the result will be (note this will be doubled) COUNT_T guess = (COUNT_T) wcslen(format)+1; if (guess < GetRawCount()) guess = GetRawCount(); if (guess < MINIMUM_GUESS) guess = MINIMUM_GUESS; while (TRUE) { // Double the previous guess - eventually we will get enough space guess *= 2; Resize(guess, REPRESENTATION_UNICODE); // Clear errno to avoid false alarms errno = 0; va_copy(ap, args); int result = _vsnwprintf_s(GetRawUnicode(), GetRawCount()+1, _TRUNCATE, format, ap); va_end(ap); if (result >= 0) { Resize(result, REPRESENTATION_UNICODE, PRESERVE); SString sss(format); INDEBUG(CheckForFormatStringGlobalizationIssues(sss, *this)); RETURN; } if (errno==ENOMEM) { ThrowOutOfMemory(); } else if (errno!=0 && errno!=EBADF && errno!=ERANGE) { CONSISTENCY_CHECK_MSG(FALSE, "_vsnwprintf_s failed. Potential globalization bug."); ThrowHR(HRESULT_FROM_WIN32(ERROR_NO_UNICODE_TRANSLATION)); } } RETURN; } void SString::PVPrintf(const WCHAR *format, va_list args) { CONTRACT_VOID { INSTANCE_CHECK; PRECONDITION(CheckPointer(format)); THROWS; GC_NOTRIGGER; } CONTRACT_END; va_list ap; // sprintf gives us no means to know how many characters are written // other than guessing and trying if (GetRawCount() > 0) { // First, try to use the existing buffer va_copy(ap, args); int result = _vsnwprintf_s(GetRawUnicode(), GetRawCount()+1, _TRUNCATE, format, ap); va_end(ap); if (result >= 0) { // succeeded Resize(result, REPRESENTATION_UNICODE, PRESERVE); SString sss(format); INDEBUG(CheckForFormatStringGlobalizationIssues(sss, *this)); RETURN; } } // Make a guess how long the result will be (note this will be doubled) COUNT_T guess = (COUNT_T) wcslen(format)+1; if (guess < GetRawCount()) guess = GetRawCount(); if (guess < MINIMUM_GUESS) guess = MINIMUM_GUESS; while (TRUE) { // Double the previous guess - eventually we will get enough space guess *= 2; Resize(guess, REPRESENTATION_UNICODE, DONT_PRESERVE); // Clear errno to avoid false alarms errno = 0; va_copy(ap, args); int result = _vsnwprintf_s(GetRawUnicode(), GetRawCount()+1, _TRUNCATE, format, ap); va_end(ap); if (result >= 0) { Resize(result, REPRESENTATION_UNICODE, PRESERVE); SString sss(format); INDEBUG(CheckForFormatStringGlobalizationIssues(sss, *this)); RETURN; } if (errno==ENOMEM) { ThrowOutOfMemory(); } else if (errno!=0 && errno!=EBADF && errno!=ERANGE) { CONSISTENCY_CHECK_MSG(FALSE, "_vsnwprintf_s failed. Potential globalization bug."); ThrowHR(HRESULT_FROM_WIN32(ERROR_NO_UNICODE_TRANSLATION)); } } RETURN; } void SString::AppendPrintf(const CHAR *format, ...) { WRAPPER_NO_CONTRACT; va_list args; va_start(args, format); AppendVPrintf(format, args); va_end(args); } void SString::AppendVPrintf(const CHAR *format, va_list args) { WRAPPER_NO_CONTRACT; StackSString s; s.VPrintf(format, args); Append(s); } void SString::AppendPrintf(const WCHAR *format, ...) { WRAPPER_NO_CONTRACT; va_list args; va_start(args, format); AppendVPrintf(format, args); va_end(args); } void SString::AppendVPrintf(const WCHAR *format, va_list args) { WRAPPER_NO_CONTRACT; StackSString s; s.VPrintf(format, args); Append(s); } //---------------------------------------------------------------------------- // LoadResource - moved to sstring_com.cpp //---------------------------------------------------------------------------- //---------------------------------------------------------------------------- // Format the message and put the contents in this string //---------------------------------------------------------------------------- BOOL SString::FormatMessage(DWORD dwFlags, LPCVOID lpSource, DWORD dwMessageId, DWORD dwLanguageId, const SString &arg1, const SString &arg2, const SString &arg3, const SString &arg4, const SString &arg5, const SString &arg6, const SString &arg7, const SString &arg8, const SString &arg9, const SString &arg10) { CONTRACT(BOOL) { INSTANCE_CHECK; THROWS; GC_NOTRIGGER; } CONTRACT_END; const WCHAR *args[] = {arg1.GetUnicode(), arg2.GetUnicode(), arg3.GetUnicode(), arg4.GetUnicode(), arg5.GetUnicode(), arg6.GetUnicode(), arg7.GetUnicode(), arg8.GetUnicode(), arg9.GetUnicode(), arg10.GetUnicode()}; if (GetRawCount() > 0) { // First, try to use our existing buffer to hold the result. Resize(GetRawCount(), REPRESENTATION_UNICODE); DWORD result = ::WszFormatMessage(dwFlags | FORMAT_MESSAGE_ARGUMENT_ARRAY, lpSource, dwMessageId, dwLanguageId, GetRawUnicode(), GetRawCount()+1, (va_list*)args); // Although we cannot directly detect truncation, we can tell if we // used up all the space (in which case we will assume truncation.) if (result != 0 && result < GetRawCount()) { if (GetRawUnicode()[result-1] == W(' ')) { GetRawUnicode()[result-1] = W('\0'); result -= 1; } Resize(result, REPRESENTATION_UNICODE, PRESERVE); RETURN TRUE; } } // We don't have enough space in our buffer, do dynamic allocation. LocalAllocHolder<WCHAR> string; DWORD result = ::WszFormatMessage(dwFlags | FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_ARGUMENT_ARRAY, lpSource, dwMessageId, dwLanguageId, (LPWSTR)(LPWSTR*)&string, 0, (va_list*)args); if (result == 0) RETURN FALSE; else { if (string[result-1] == W(' ')) string[result-1] = W('\0'); Set(string); RETURN TRUE; } } #if 1 //---------------------------------------------------------------------------- // Helper //---------------------------------------------------------------------------- // @todo -this should be removed and placed outside of SString void SString::MakeFullNamespacePath(const SString &nameSpace, const SString &name) { CONTRACT_VOID { INSTANCE_CHECK; THROWS; GC_NOTRIGGER; } CONTRACT_END; if (nameSpace.GetRepresentation() == REPRESENTATION_UTF8 && name.GetRepresentation() == REPRESENTATION_UTF8) { const UTF8 *ns = nameSpace.GetRawUTF8(); const UTF8 *n = name.GetRawUTF8(); COUNT_T count = ns::GetFullLength(ns, n)-1; Resize(count, REPRESENTATION_UTF8); if (count > 0) ns::MakePath(GetRawUTF8(), count+1, ns, n); } else { const WCHAR *ns = nameSpace; const WCHAR *n = name; COUNT_T count = ns::GetFullLength(ns, n)-1; Resize(count, REPRESENTATION_UNICODE); if (count > 0) ns::MakePath(GetRawUnicode(), count+1, ns, n); } RETURN; } #endif //---------------------------------------------------------------------------- // Private helper. // Check to see if the string fits the suggested representation //---------------------------------------------------------------------------- BOOL SString::IsRepresentation(Representation representation) const { CONTRACT(BOOL) { PRECONDITION(CheckRepresentation(representation)); NOTHROW; GC_NOTRIGGER; SUPPORTS_DAC; } CONTRACT_END; Representation currentRepresentation = GetRepresentation(); // If representations are the same, cool. if (currentRepresentation == representation) RETURN TRUE; // If we have an empty representation, we match everything if (currentRepresentation == REPRESENTATION_EMPTY) RETURN TRUE; // If we're a 1 byte charset, there are some more chances to match if (currentRepresentation != REPRESENTATION_UNICODE && representation != REPRESENTATION_UNICODE) { // If we're ASCII, we can be any 1 byte rep if (currentRepresentation == REPRESENTATION_ASCII) RETURN TRUE; // We really want to be ASCII - scan to see if we qualify if (ScanASCII()) RETURN TRUE; } // Sorry, must convert. RETURN FALSE; } //---------------------------------------------------------------------------- // Private helper. // Get the contents of the given string in a form which is compatible with our // string (and is in a fixed character set.) Updates the given iterator // if necessary to keep it in sync. //---------------------------------------------------------------------------- const SString &SString::GetCompatibleString(const SString &s, SString &scratch, const CIterator &i) const { CONTRACTL { PRECONDITION(s.Check()); PRECONDITION(scratch.Check()); PRECONDITION(scratch.CheckEmpty()); THROWS_UNLESS_BOTH_NORMALIZED(s); GC_NOTRIGGER; SUPPORTS_DAC; } CONTRACTL_END; // Since we have an iterator, we should be fixed size already CONSISTENCY_CHECK(IsFixedSize()); switch (GetRepresentation()) { case REPRESENTATION_EMPTY: return s; case REPRESENTATION_ASCII: if (s.IsRepresentation(REPRESENTATION_ASCII)) return s; // We can't in general convert to ASCII, so try unicode. ConvertToUnicode(i); FALLTHROUGH; case REPRESENTATION_UNICODE: if (s.IsRepresentation(REPRESENTATION_UNICODE)) return s; // @todo: we could convert s to unicode - is that a good policy???? s.ConvertToUnicode(scratch); return scratch; case REPRESENTATION_UTF8: case REPRESENTATION_ANSI: // These should all be impossible since we have an CIterator on us. default: UNREACHABLE_MSG("Unexpected string representation"); } return s; } //---------------------------------------------------------------------------- // Private helper. // Get the contents of the given string in a form which is compatible with our // string (and is in a fixed character set.) // May convert our string to unicode. //---------------------------------------------------------------------------- const SString &SString::GetCompatibleString(const SString &s, SString &scratch) const { CONTRACTL { PRECONDITION(s.Check()); PRECONDITION(scratch.Check()); PRECONDITION(scratch.CheckEmpty()); THROWS_UNLESS_BOTH_NORMALIZED(s); GC_NOTRIGGER; } CONTRACTL_END; // First, make sure we have a fixed size. ConvertToFixed(); switch (GetRepresentation()) { case REPRESENTATION_EMPTY: return s; case REPRESENTATION_ANSI: if (s.IsRepresentation(REPRESENTATION_ANSI)) return s; s.ConvertToANSI(scratch); return scratch; case REPRESENTATION_ASCII: if (s.IsRepresentation(REPRESENTATION_ASCII)) return s; // We can't in general convert to ASCII, so try unicode. ConvertToUnicode(); FALLTHROUGH; case REPRESENTATION_UNICODE: if (s.IsRepresentation(REPRESENTATION_UNICODE)) return s; // @todo: we could convert s to unicode in place - is that a good policy???? s.ConvertToUnicode(scratch); return scratch; case REPRESENTATION_UTF8: default: UNREACHABLE(); } return s; } //---------------------------------------------------------------------------- // Private helper. // If we have a 1 byte representation, scan the buffer to see if we can gain // some conversion flexibility by labelling it ASCII //---------------------------------------------------------------------------- BOOL SString::ScanASCII() const { CONTRACT(BOOL) { POSTCONDITION(IsRepresentation(REPRESENTATION_ASCII) || IsASCIIScanned()); NOTHROW; GC_NOTRIGGER; SUPPORTS_DAC; } CONTRACT_END; if (!IsASCIIScanned()) { const CHAR *c = GetRawANSI(); const CHAR *cEnd = c + GetRawCount(); while (c < cEnd) { if (*c & 0x80) break; c++; } if (c == cEnd) { const_cast<SString *>(this)->SetRepresentation(REPRESENTATION_ASCII); RETURN TRUE; } else const_cast<SString *>(this)->SetASCIIScanned(); } RETURN FALSE; } //---------------------------------------------------------------------------- // Private helper. // Resize updates the geometry of the string and ensures that // the space can be written to. // count - number of characters (not including null) to hold // preserve - if we realloc, do we copy data from old to new? //---------------------------------------------------------------------------- void SString::Resize(COUNT_T count, SString::Representation representation, Preserve preserve) { CONTRACT_VOID { PRECONDITION(CountToSize(count) >= count); POSTCONDITION(IsRepresentation(representation)); POSTCONDITION(GetRawCount() == count); if (count == 0) NOTHROW; else THROWS; GC_NOTRIGGER; SUPPORTS_DAC_HOST_ONLY; } CONTRACT_END; // If we are resizing to zero, Clear is more efficient if (count == 0) { Clear(); } else { SetRepresentation(representation); COUNT_T size = CountToSize(count); // detect overflow if (size < count) ThrowOutOfMemory(); ClearNormalized(); SBuffer::Resize(size, preserve); if (IsImmutable()) EnsureMutable(); NullTerminate(); } RETURN; } //----------------------------------------------------------------------------- // This is essentially a specialized version of the above for size 0 //----------------------------------------------------------------------------- void SString::Clear() { CONTRACT_VOID { INSTANCE_CHECK; POSTCONDITION(IsEmpty()); NOTHROW; GC_NOTRIGGER; SUPPORTS_DAC_HOST_ONLY; } CONTRACT_END; SetRepresentation(REPRESENTATION_EMPTY); if (IsImmutable()) { // Use shared empty string rather than allocating a new buffer SBuffer::SetImmutable(s_EmptyBuffer, sizeof(s_EmptyBuffer)); } else { // Leave allocated buffer for future growth SBuffer::TweakSize(sizeof(WCHAR)); GetRawUnicode()[0] = 0; } RETURN; } #ifdef DACCESS_COMPILE //--------------------------------------------------------------------------------------- // // Return a pointer to the raw buffer // // Returns: // A pointer to the raw string buffer. // void * SString::DacGetRawContent() const { if (IsEmpty()) { return NULL; } switch (GetRepresentation()) { case REPRESENTATION_EMPTY: return NULL; case REPRESENTATION_UNICODE: case REPRESENTATION_UTF8: case REPRESENTATION_ASCII: case REPRESENTATION_ANSI: // Note: no need to call DacInstantiateString because we know the exact length already. return SBuffer::DacGetRawContent(); default: DacNotImpl(); return NULL; } } //--------------------------------------------------------------------------------------- // // Return a pointer to the raw buffer as a pointer to a unicode string. Does not // do conversion, and thus requires that the representation already be in unicode. // // Returns: // A pointer to the raw string buffer as a unicode string. // const WCHAR * SString::DacGetRawUnicode() const { if (IsEmpty() || (GetRepresentation() == REPRESENTATION_EMPTY)) { return W(""); } if (GetRepresentation() != REPRESENTATION_UNICODE) { DacError(E_UNEXPECTED); } HRESULT status = S_OK; WCHAR* wszBuf = NULL; EX_TRY { wszBuf = static_cast<WCHAR*>(SBuffer::DacGetRawContent()); } EX_CATCH_HRESULT(status); if (SUCCEEDED(status)) { return wszBuf; } else { return NULL; } } //--------------------------------------------------------------------------------------- // // Copy the string from the target into the provided buffer, converting to unicode if necessary // // Arguments: // cBufChars - size of pBuffer in count of unicode characters. // pBuffer - a buffer of cBufChars unicode chars. // pcNeedChars - space to store the number of unicode chars in the SString. // // Returns: // true if successful - and buffer is filled with the unicode representation of // the string. // false if unsuccessful. // bool SString::DacGetUnicode(COUNT_T cBufChars, _Inout_updates_z_(cBufChars) WCHAR * pBuffer, COUNT_T * pcNeedChars) const { SUPPORTS_DAC; PVOID pContent = NULL; int iPage = CP_ACP; if (IsEmpty() || (GetRepresentation() == REPRESENTATION_EMPTY)) { if (pcNeedChars) { *pcNeedChars = 1; } if (pBuffer && cBufChars) { pBuffer[0] = 0; } return true; } HRESULT status = S_OK; EX_TRY { pContent = SBuffer::DacGetRawContent(); } EX_CATCH_HRESULT(status); if (SUCCEEDED(status) && pContent != NULL) { switch (GetRepresentation()) { case REPRESENTATION_UNICODE: if (pcNeedChars) { *pcNeedChars = GetCount() + 1; } if (pBuffer && cBufChars) { if (cBufChars > GetCount() + 1) { cBufChars = GetCount() + 1; } memcpy(pBuffer, pContent, cBufChars * sizeof(*pBuffer)); pBuffer[cBufChars - 1] = 0; } return true; case REPRESENTATION_UTF8: iPage = CP_UTF8; FALLTHROUGH; case REPRESENTATION_ASCII: case REPRESENTATION_ANSI: // iPage defaults to CP_ACP. if (pcNeedChars) { *pcNeedChars = WszMultiByteToWideChar(iPage, 0, reinterpret_cast<PSTR>(pContent), -1, NULL, 0); } if (pBuffer && cBufChars) { if (!WszMultiByteToWideChar(iPage, 0, reinterpret_cast<PSTR>(pContent), -1, pBuffer, cBufChars)) { return false; } } return true; default: DacNotImpl(); return false; } } return false; } #endif //DACCESS_COMPILE
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // --------------------------------------------------------------------------- // SString.cpp // // --------------------------------------------------------------------------- #include "stdafx.h" #include "sstring.h" #include "ex.h" #include "holder.h" #if defined(_MSC_VER) #pragma inline_depth (25) #endif //----------------------------------------------------------------------------- // Static variables //----------------------------------------------------------------------------- // Have one internal, well-known, literal for the empty string. const BYTE SString::s_EmptyBuffer[2] = { 0 }; // @todo: these need to be initialized by calling GetACP() UINT SString::s_ACP = 0; #ifndef DACCESS_COMPILE static BYTE s_EmptySpace[sizeof(SString)] = { 0 }; #endif // DACCESS_COMPILE SPTR_IMPL(SString,SString,s_Empty); void SString::Startup() { STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; if (s_ACP == 0) { UINT ACP = GetACP(); #ifndef DACCESS_COMPILE s_Empty = PTR_SString(new (s_EmptySpace) SString()); s_Empty->SetNormalized(); #endif // DACCESS_COMPILE MemoryBarrier(); s_ACP = ACP; } } CHECK SString::CheckStartup() { WRAPPER_NO_CONTRACT; CHECK(s_Empty != NULL); CHECK_OK; } //----------------------------------------------------------------------------- // Case insensitive helpers. //----------------------------------------------------------------------------- static WCHAR MapChar(WCHAR wc, DWORD dwFlags) { WRAPPER_NO_CONTRACT; WCHAR wTmp; #ifndef TARGET_UNIX int iRet = ::LCMapStringEx(LOCALE_NAME_INVARIANT, dwFlags, &wc, 1, &wTmp, 1, NULL, NULL, 0); if (!iRet) { // This can fail in non-exceptional cases becauseof unknown unicode characters. wTmp = wc; } #else // !TARGET_UNIX // For PAL, no locale specific processing is done if (dwFlags == LCMAP_UPPERCASE) { wTmp = #ifdef SELF_NO_HOST toupper(wc); #else PAL_ToUpperInvariant(wc); #endif } else { _ASSERTE(dwFlags == LCMAP_LOWERCASE); wTmp = #ifdef SELF_NO_HOST tolower(wc); #else PAL_ToLowerInvariant(wc); #endif } #endif // !TARGET_UNIX return wTmp; } #define IS_UPPER_A_TO_Z(x) (((x) >= W('A')) && ((x) <= W('Z'))) #define IS_LOWER_A_TO_Z(x) (((x) >= W('a')) && ((x) <= W('z'))) #define CAN_SIMPLE_UPCASE(x) (((x)&~0x7f) == 0) #define CAN_SIMPLE_DOWNCASE(x) (((x)&~0x7f) == 0) #define SIMPLE_UPCASE(x) (IS_LOWER_A_TO_Z(x) ? ((x) - W('a') + W('A')) : (x)) #define SIMPLE_DOWNCASE(x) (IS_UPPER_A_TO_Z(x) ? ((x) - W('A') + W('a')) : (x)) /* static */ int SString::CaseCompareHelper(const WCHAR *buffer1, const WCHAR *buffer2, COUNT_T count, BOOL stopOnNull, BOOL stopOnCount) { LIMITED_METHOD_CONTRACT; _ASSERTE(stopOnNull || stopOnCount); const WCHAR *buffer1End = buffer1 + count; int diff = 0; while (!stopOnCount || (buffer1 < buffer1End)) { WCHAR ch1 = *buffer1++; WCHAR ch2 = *buffer2++; diff = ch1 - ch2; if ((ch1 == 0) || (ch2 == 0)) { if (diff != 0 || stopOnNull) { break; } } else { if (diff != 0) { diff = ((CAN_SIMPLE_UPCASE(ch1) ? SIMPLE_UPCASE(ch1) : MapChar(ch1, LCMAP_UPPERCASE)) - (CAN_SIMPLE_UPCASE(ch2) ? SIMPLE_UPCASE(ch2) : MapChar(ch2, LCMAP_UPPERCASE))); } if (diff != 0) { break; } } } return diff; } #define IS_LOWER_A_TO_Z_ANSI(x) (((x) >= 'a') && ((x) <= 'z')) #define CAN_SIMPLE_UPCASE_ANSI(x) (((x) >= 0x20) && ((x) <= 0x7f)) #define SIMPLE_UPCASE_ANSI(x) (IS_LOWER_A_TO_Z(x) ? ((x) - 'a' + 'A') : (x)) /* static */ int SString::CaseCompareHelperA(const CHAR *buffer1, const CHAR *buffer2, COUNT_T count, BOOL stopOnNull, BOOL stopOnCount) { LIMITED_METHOD_CONTRACT; _ASSERTE(stopOnNull || stopOnCount); const CHAR *buffer1End = buffer1 + count; int diff = 0; while (!stopOnCount || (buffer1 < buffer1End)) { CHAR ch1 = *buffer1; CHAR ch2 = *buffer2; diff = ch1 - ch2; if (diff != 0 || stopOnNull) { if (ch1 == 0 || ch2 == 0) { break; } diff = (SIMPLE_UPCASE_ANSI(ch1) - SIMPLE_UPCASE_ANSI(ch2)); if (diff != 0) { break; } } buffer1++; buffer2++; } return diff; } int CaseHashHelper(const WCHAR *buffer, COUNT_T count) { LIMITED_METHOD_CONTRACT; const WCHAR *bufferEnd = buffer + count; ULONG hash = 5381; while (buffer < bufferEnd) { WCHAR ch = *buffer++; ch = CAN_SIMPLE_UPCASE(ch) ? SIMPLE_UPCASE(ch) : MapChar(ch, LCMAP_UPPERCASE); hash = (((hash << 5) + hash) ^ ch); } return hash; } static int CaseHashHelperA(const CHAR *buffer, COUNT_T count) { LIMITED_METHOD_CONTRACT; const CHAR *bufferEnd = buffer + count; ULONG hash = 5381; while (buffer < bufferEnd) { CHAR ch = *buffer++; ch = SIMPLE_UPCASE_ANSI(ch); hash = (((hash << 5) + hash) ^ ch); } return hash; } //----------------------------------------------------------------------------- // Set this string to a copy of the unicode string //----------------------------------------------------------------------------- void SString::Set(const WCHAR *string) { CONTRACT_VOID { INSTANCE_CHECK; PRECONDITION(CheckPointer(string, NULL_OK)); THROWS; GC_NOTRIGGER; SUPPORTS_DAC_HOST_ONLY; } CONTRACT_END; if (string == NULL || *string == 0) Clear(); else { Resize((COUNT_T) wcslen(string), REPRESENTATION_UNICODE); wcscpy_s(GetRawUnicode(), GetBufferSizeInCharIncludeNullChar(), string); } RETURN; } //----------------------------------------------------------------------------- // Set this string to a copy of the first count characters of the given // unicode string. //----------------------------------------------------------------------------- void SString::Set(const WCHAR *string, COUNT_T count) { SS_CONTRACT_VOID { INSTANCE_CHECK; PRECONDITION(CheckPointer(string, NULL_OK)); PRECONDITION(CheckCount(count)); THROWS; GC_NOTRIGGER; } SS_CONTRACT_END; if (count == 0) Clear(); else { Resize(count, REPRESENTATION_UNICODE); wcsncpy_s(GetRawUnicode(), GetBufferSizeInCharIncludeNullChar(), string, count); GetRawUnicode()[count] = 0; } SS_RETURN; } //----------------------------------------------------------------------------- // Set this string to a point to the first count characters of the given // preallocated unicode string (shallow copy). //----------------------------------------------------------------------------- void SString::SetPreallocated(const WCHAR *string, COUNT_T count) { SS_CONTRACT_VOID { INSTANCE_CHECK; PRECONDITION(CheckPointer(string, NULL_OK)); PRECONDITION(CheckCount(count)); SS_POSTCONDITION(IsEmpty()); GC_NOTRIGGER; NOTHROW; SUPPORTS_DAC_HOST_ONLY; } SS_CONTRACT_END; SetImmutable(); SetImmutable((BYTE*) string, count*2); ClearAllocated(); SetRepresentation(REPRESENTATION_UNICODE); SS_RETURN; } //----------------------------------------------------------------------------- // Set this string to a copy of the given ansi string //----------------------------------------------------------------------------- void SString::SetASCII(const ASCII *string) { SS_CONTRACT_VOID { INSTANCE_CHECK; PRECONDITION(CheckPointer(string, NULL_OK)); PRECONDITION(CheckASCIIString(string)); THROWS; GC_NOTRIGGER; } SS_CONTRACT_END; if (string == NULL || *string == 0) Clear(); else { Resize((COUNT_T) strlen(string), REPRESENTATION_ASCII); strcpy_s(GetRawUTF8(), GetBufferSizeInCharIncludeNullChar(), string); } SS_RETURN; } //----------------------------------------------------------------------------- // Set this string to a copy of the first count characters of the given // ascii string //----------------------------------------------------------------------------- void SString::SetASCII(const ASCII *string, COUNT_T count) { SS_CONTRACT_VOID { INSTANCE_CHECK; PRECONDITION(CheckPointer(string, NULL_OK)); PRECONDITION(CheckASCIIString(string, count)); PRECONDITION(CheckCount(count)); THROWS; GC_NOTRIGGER; } SS_CONTRACT_END; if (count == 0) Clear(); else { Resize(count, REPRESENTATION_ASCII); strncpy_s(GetRawASCII(), GetBufferSizeInCharIncludeNullChar(), string, count); GetRawASCII()[count] = 0; } SS_RETURN; } //----------------------------------------------------------------------------- // Set this string to a copy of the given UTF8 string //----------------------------------------------------------------------------- void SString::SetUTF8(const UTF8 *string) { SS_CONTRACT_VOID { // !!! Check for illegal UTF8 encoding? INSTANCE_CHECK; PRECONDITION(CheckPointer(string, NULL_OK)); THROWS; GC_NOTRIGGER; SUPPORTS_DAC_HOST_ONLY; } SS_CONTRACT_END; if (string == NULL || *string == 0) Clear(); else { Resize((COUNT_T) strlen(string), REPRESENTATION_UTF8); strcpy_s(GetRawUTF8(), GetBufferSizeInCharIncludeNullChar(), string); } SS_RETURN; } //----------------------------------------------------------------------------- // Set this string to a copy of the first count characters of the given // UTF8 string. //----------------------------------------------------------------------------- void SString::SetUTF8(const UTF8 *string, COUNT_T count) { SS_CONTRACT_VOID { // !!! Check for illegal UTF8 encoding? INSTANCE_CHECK; PRECONDITION(CheckPointer(string, NULL_OK)); PRECONDITION(CheckCount(count)); THROWS; GC_NOTRIGGER; } SS_CONTRACT_END; if (count == 0) Clear(); else { Resize(count, REPRESENTATION_UTF8); strncpy_s(GetRawUTF8(), GetBufferSizeInCharIncludeNullChar(), string, count); GetRawUTF8()[count] = 0; } SS_RETURN; } //----------------------------------------------------------------------------- // Set this string to a copy of the given ANSI string //----------------------------------------------------------------------------- void SString::SetANSI(const ANSI *string) { SS_CONTRACT_VOID { INSTANCE_CHECK; PRECONDITION(CheckPointer(string, NULL_OK)); THROWS; GC_NOTRIGGER; } SS_CONTRACT_END; if (string == NULL || *string == 0) Clear(); else { Resize((COUNT_T) strlen(string), REPRESENTATION_ANSI); strcpy_s(GetRawANSI(), GetBufferSizeInCharIncludeNullChar(), string); } SS_RETURN; } //----------------------------------------------------------------------------- // Set this string to a copy of the first count characters of the given // ANSI string. //----------------------------------------------------------------------------- void SString::SetANSI(const ANSI *string, COUNT_T count) { SS_CONTRACT_VOID { INSTANCE_CHECK; PRECONDITION(CheckPointer(string, NULL_OK)); PRECONDITION(CheckCount(count)); THROWS; GC_NOTRIGGER; } SS_CONTRACT_END; if (count == 0) Clear(); else { Resize(count, REPRESENTATION_ANSI); strncpy_s(GetRawANSI(), GetBufferSizeInCharIncludeNullChar(), string, count); GetRawANSI()[count] = 0; } SS_RETURN; } //----------------------------------------------------------------------------- // Set this string to the given unicode character //----------------------------------------------------------------------------- void SString::Set(WCHAR character) { SS_CONTRACT_VOID { INSTANCE_CHECK; THROWS; GC_NOTRIGGER; SUPPORTS_DAC_HOST_ONLY; } SS_CONTRACT_END; if (character == 0) Clear(); else { Resize(1, REPRESENTATION_UNICODE); GetRawUnicode()[0] = character; GetRawUnicode()[1] = 0; } SS_RETURN; } //----------------------------------------------------------------------------- // Set this string to the given UTF8 character //----------------------------------------------------------------------------- void SString::SetUTF8(CHAR character) { SS_CONTRACT_VOID { INSTANCE_CHECK; THROWS; GC_NOTRIGGER; } SS_CONTRACT_END; if (character == 0) Clear(); else { Resize(1, REPRESENTATION_UTF8); GetRawUTF8()[0] = character; GetRawUTF8()[1] = 0; } SS_RETURN; } //----------------------------------------------------------------------------- // Set this string to the given ansi literal. // This will share the memory and not make a copy. //----------------------------------------------------------------------------- void SString::SetLiteral(const ASCII *literal) { SS_CONTRACT_VOID { INSTANCE_CHECK; PRECONDITION(CheckPointer(literal)); PRECONDITION(CheckASCIIString(literal)); THROWS; GC_NOTRIGGER; } SS_CONTRACT_END; SString s(Literal, literal); Set(s); SS_RETURN; } //----------------------------------------------------------------------------- // Set this string to the given unicode literal. // This will share the memory and not make a copy. //----------------------------------------------------------------------------- void SString::SetLiteral(const WCHAR *literal) { SS_CONTRACT_VOID { INSTANCE_CHECK; PRECONDITION(CheckPointer(literal)); THROWS; GC_NOTRIGGER; } SS_CONTRACT_END; SString s(Literal, literal); Set(s); SS_RETURN; } //----------------------------------------------------------------------------- // Hash the string contents //----------------------------------------------------------------------------- ULONG SString::Hash() const { SS_CONTRACT(ULONG) { INSTANCE_CHECK; THROWS_UNLESS_NORMALIZED; GC_NOTRIGGER; } SS_CONTRACT_END; ConvertToUnicode(); SS_RETURN HashString(GetRawUnicode()); } //----------------------------------------------------------------------------- // Hash the string contents //----------------------------------------------------------------------------- ULONG SString::HashCaseInsensitive() const { SS_CONTRACT(ULONG) { INSTANCE_CHECK; THROWS_UNLESS_NORMALIZED; GC_NOTRIGGER; } SS_CONTRACT_END; ConvertToIteratable(); ULONG result; switch (GetRepresentation()) { case REPRESENTATION_UNICODE: case REPRESENTATION_EMPTY: result = CaseHashHelper(GetRawUnicode(), GetRawCount()); break; case REPRESENTATION_ASCII: result = CaseHashHelperA(GetRawASCII(), GetRawCount()); break; default: UNREACHABLE(); } SS_RETURN result; } //----------------------------------------------------------------------------- // Truncate this string to count characters. //----------------------------------------------------------------------------- void SString::Truncate(const Iterator &i) { SS_CONTRACT_VOID { INSTANCE_CHECK; PRECONDITION(CheckIteratorRange(i)); SS_POSTCONDITION(GetRawCount() == i - Begin()); THROWS; GC_NOTRIGGER; SUPPORTS_DAC_HOST_ONLY; } SS_CONTRACT_END; CONSISTENCY_CHECK(IsFixedSize()); COUNT_T size = i - Begin(); Resize(size, GetRepresentation(), PRESERVE); i.Resync(this, (BYTE *) (GetRawUnicode() + size)); SS_RETURN; } //----------------------------------------------------------------------------- // Convert the ASCII representation for this String to Unicode. We can do this // quickly and in-place (if this == &dest), which is why it is optimized. //----------------------------------------------------------------------------- void SString::ConvertASCIIToUnicode(SString &dest) const { CONTRACT_VOID { PRECONDITION(IsRepresentation(REPRESENTATION_ASCII)); POSTCONDITION(dest.IsRepresentation(REPRESENTATION_UNICODE)); THROWS; GC_NOTRIGGER; SUPPORTS_DAC_HOST_ONLY; } CONTRACT_END; // Handle the empty case. if (IsEmpty()) { dest.Clear(); RETURN; } CONSISTENCY_CHECK(CheckPointer(GetRawASCII())); CONSISTENCY_CHECK(GetRawCount() > 0); // If dest is the same as this, then we need to preserve on resize. dest.Resize(GetRawCount(), REPRESENTATION_UNICODE, this == &dest ? PRESERVE : DONT_PRESERVE); // Make sure the buffer is big enough. CONSISTENCY_CHECK(dest.GetAllocation() > (GetRawCount() * sizeof(WCHAR))); // This is a poor man's widen. Since we know that the representation is ASCII, // we can just pad the string with a bunch of zero-value bytes. Of course, // we move from the end of the string to the start so that we can convert in // place (in the case that &dest == this). WCHAR *outBuf = dest.GetRawUnicode() + dest.GetRawCount(); ASCII *inBuf = GetRawASCII() + GetRawCount(); while (GetRawASCII() <= inBuf) { CONSISTENCY_CHECK(dest.GetRawUnicode() <= outBuf); // The casting zero-extends the value, thus giving us the zero-valued byte. *outBuf = (WCHAR) *inBuf; outBuf--; inBuf--; } RETURN; } //----------------------------------------------------------------------------- // Convert the internal representation for this String to Unicode. //----------------------------------------------------------------------------- void SString::ConvertToUnicode() const { CONTRACT_VOID { POSTCONDITION(IsRepresentation(REPRESENTATION_UNICODE)); if (IsRepresentation(REPRESENTATION_UNICODE)) NOTHROW; else THROWS; GC_NOTRIGGER; SUPPORTS_DAC_HOST_ONLY; } CONTRACT_END; if (!IsRepresentation(REPRESENTATION_UNICODE)) { if (IsRepresentation(REPRESENTATION_ASCII)) { ConvertASCIIToUnicode(*(const_cast<SString *>(this))); } else { StackSString s; ConvertToUnicode(s); PREFIX_ASSUME(!s.IsImmutable()); (const_cast<SString*>(this))->Set(s); } } RETURN; } //----------------------------------------------------------------------------- // Convert the internal representation for this String to Unicode, while // preserving the iterator if the conversion is done. //----------------------------------------------------------------------------- void SString::ConvertToUnicode(const CIterator &i) const { CONTRACT_VOID { PRECONDITION(i.Check()); POSTCONDITION(IsRepresentation(REPRESENTATION_UNICODE)); if (IsRepresentation(REPRESENTATION_UNICODE)) NOTHROW; else THROWS; GC_NOTRIGGER; SUPPORTS_DAC_HOST_ONLY; } CONTRACT_END; if (!IsRepresentation(REPRESENTATION_UNICODE)) { CONSISTENCY_CHECK(IsFixedSize()); COUNT_T index = 0; // Get the current index of the iterator if (i.m_ptr != NULL) { CONSISTENCY_CHECK(GetCharacterSizeShift() == 0); index = (COUNT_T) (i.m_ptr - m_buffer); } if (IsRepresentation(REPRESENTATION_ASCII)) { ConvertASCIIToUnicode(*(const_cast<SString *>(this))); } else { StackSString s; ConvertToUnicode(s); (const_cast<SString*>(this))->Set(s); } // Move the iterator to the new location. if (i.m_ptr != NULL) { i.Resync(this, (BYTE *) (GetRawUnicode() + index)); } } RETURN; } //----------------------------------------------------------------------------- // Set s to be a copy of this string's contents, but in the unicode format. //----------------------------------------------------------------------------- void SString::ConvertToUnicode(SString &s) const { CONTRACT_VOID { PRECONDITION(s.Check()); POSTCONDITION(s.IsRepresentation(REPRESENTATION_UNICODE)); THROWS; GC_NOTRIGGER; SUPPORTS_DAC_HOST_ONLY; } CONTRACT_END; int page = 0; switch (GetRepresentation()) { case REPRESENTATION_EMPTY: s.Clear(); RETURN; case REPRESENTATION_UNICODE: s.Set(*this); RETURN; case REPRESENTATION_UTF8: page = CP_UTF8; break; case REPRESENTATION_ASCII: ConvertASCIIToUnicode(s); RETURN; case REPRESENTATION_ANSI: page = CP_ACP; break; default: UNREACHABLE(); } COUNT_T length = WszMultiByteToWideChar(page, 0, GetRawANSI(), GetRawCount()+1, 0, 0); if (length == 0) ThrowLastError(); s.Resize(length-1, REPRESENTATION_UNICODE); length = WszMultiByteToWideChar(page, 0, GetRawANSI(), GetRawCount()+1, s.GetRawUnicode(), length); if (length == 0) ThrowLastError(); RETURN; } //----------------------------------------------------------------------------- // Set s to be a copy of this string's contents, but in the ANSI format. //----------------------------------------------------------------------------- void SString::ConvertToANSI(SString &s) const { CONTRACT_VOID { PRECONDITION(s.Check()); POSTCONDITION(s.IsRepresentation(REPRESENTATION_ANSI)); THROWS; GC_NOTRIGGER; } CONTRACT_END; switch (GetRepresentation()) { case REPRESENTATION_EMPTY: s.Clear(); RETURN; case REPRESENTATION_ASCII: case REPRESENTATION_ANSI: s.Set(*this); RETURN; case REPRESENTATION_UTF8: // No direct conversion to ANSI ConvertToUnicode(); FALLTHROUGH; case REPRESENTATION_UNICODE: break; default: UNREACHABLE(); } // @todo: use WC_NO_BEST_FIT_CHARS COUNT_T length = WszWideCharToMultiByte(CP_ACP, 0, GetRawUnicode(), GetRawCount()+1, NULL, 0, NULL, NULL); s.Resize(length-1, REPRESENTATION_ANSI); // @todo: use WC_NO_BEST_FIT_CHARS length = WszWideCharToMultiByte(CP_ACP, 0, GetRawUnicode(), GetRawCount()+1, s.GetRawANSI(), length, NULL, NULL); if (length == 0) ThrowLastError(); RETURN; } //----------------------------------------------------------------------------- // Set s to be a copy of this string's contents, but in the utf8 format. //----------------------------------------------------------------------------- COUNT_T SString::ConvertToUTF8(SString &s) const { CONTRACT(COUNT_T) { PRECONDITION(s.Check()); POSTCONDITION(s.IsRepresentation(REPRESENTATION_UTF8)); THROWS; GC_NOTRIGGER; } CONTRACT_END; switch (GetRepresentation()) { case REPRESENTATION_EMPTY: s.Clear(); RETURN 1; case REPRESENTATION_ASCII: case REPRESENTATION_UTF8: s.Set(*this); RETURN s.GetRawCount()+1; case REPRESENTATION_ANSI: // No direct conversion from ANSI to UTF8 ConvertToUnicode(); FALLTHROUGH; case REPRESENTATION_UNICODE: break; default: UNREACHABLE(); } // <TODO> @todo: use WC_NO_BEST_FIT_CHARS </TODO> bool allAscii; DWORD length; HRESULT hr = FString::Unicode_Utf8_Length(GetRawUnicode(), & allAscii, & length); if (SUCCEEDED(hr)) { s.Resize(length, REPRESENTATION_UTF8); //FString::Unicode_Utf8 expects an array all the time //we optimize the empty string by replacing it with null for SString above in Resize if (length > 0) { hr = FString::Unicode_Utf8(GetRawUnicode(), allAscii, (LPSTR) s.GetRawUTF8(), length); } } IfFailThrow(hr); RETURN length + 1; } //----------------------------------------------------------------------------- // Replace a single character with another character. //----------------------------------------------------------------------------- void SString::Replace(const Iterator &i, WCHAR c) { CONTRACT_VOID { INSTANCE_CHECK; PRECONDITION(CheckIteratorRange(i, 1)); POSTCONDITION(Match(i, c)); THROWS; GC_NOTRIGGER; } CONTRACT_END; if (IsRepresentation(REPRESENTATION_ASCII) && ((c&~0x7f) == 0)) { *(BYTE*)i.m_ptr = (BYTE) c; } else { ConvertToUnicode(i); *(USHORT*)i.m_ptr = c; } RETURN; } //----------------------------------------------------------------------------- // Replace the substring specified by position, length with the given string s. //----------------------------------------------------------------------------- void SString::Replace(const Iterator &i, COUNT_T length, const SString &s) { CONTRACT_VOID { INSTANCE_CHECK; PRECONDITION(CheckIteratorRange(i, length)); PRECONDITION(s.Check()); POSTCONDITION(Match(i, s)); THROWS; GC_NOTRIGGER; SUPPORTS_DAC_HOST_ONLY; } CONTRACT_END; Representation representation = GetRepresentation(); if (representation == REPRESENTATION_EMPTY) { // This special case contains some optimizations (like literal sharing). Set(s); ConvertToIteratable(); i.Resync(this, m_buffer); } else { StackSString temp; const SString &source = GetCompatibleString(s, temp, i); COUNT_T deleteSize = length<<GetCharacterSizeShift(); COUNT_T insertSize = source.GetRawCount()<<source.GetCharacterSizeShift(); SBuffer::Replace(i, deleteSize, insertSize); SBuffer::Copy(i, source.m_buffer, insertSize); } RETURN; } //----------------------------------------------------------------------------- // Find s in this string starting at i. Return TRUE & update iterator if found. //----------------------------------------------------------------------------- BOOL SString::Find(CIterator &i, const SString &s) const { CONTRACT(BOOL) { INSTANCE_CHECK; PRECONDITION(CheckIteratorRange(i)); PRECONDITION(s.Check()); POSTCONDITION(RETVAL == Match(i, s)); THROWS_UNLESS_BOTH_NORMALIZED(s); GC_NOTRIGGER; } CONTRACT_END; // Get a compatible string from s StackSString temp; const SString &source = GetCompatibleString(s, temp, i); switch (GetRepresentation()) { case REPRESENTATION_UNICODE: { COUNT_T count = source.GetRawCount(); const WCHAR *start = i.GetUnicode(); const WCHAR *end = GetUnicode() + GetRawCount() - count; while (start <= end) { if (wcsncmp(start, source.GetRawUnicode(), count) == 0) { i.Resync(this, (BYTE*) start); RETURN TRUE; } start++; } } break; case REPRESENTATION_ANSI: case REPRESENTATION_ASCII: { COUNT_T count = source.GetRawCount(); const CHAR *start = i.GetASCII(); const CHAR *end = GetRawASCII() + GetRawCount() - count; while (start <= end) { if (strncmp(start, source.GetRawASCII(), count) == 0) { i.Resync(this, (BYTE*) start); RETURN TRUE; } start++; } } break; case REPRESENTATION_EMPTY: { if (source.GetRawCount() == 0) RETURN TRUE; } break; case REPRESENTATION_UTF8: default: UNREACHABLE(); } RETURN FALSE; } //----------------------------------------------------------------------------- // Find s in this string starting at i. Return TRUE & update iterator if found. //----------------------------------------------------------------------------- BOOL SString::Find(CIterator &i, WCHAR c) const { CONTRACT(BOOL) { INSTANCE_CHECK; PRECONDITION(CheckIteratorRange(i)); POSTCONDITION(RETVAL == Match(i, c)); THROWS_UNLESS_NORMALIZED; GC_NOTRIGGER; } CONTRACT_END; // Get a compatible string if (c & ~0x7f) ConvertToUnicode(i); switch (GetRepresentation()) { case REPRESENTATION_UNICODE: { const WCHAR *start = i.GetUnicode(); const WCHAR *end = GetUnicode() + GetRawCount() - 1; while (start <= end) { if (*start == c) { i.Resync(this, (BYTE*) start); RETURN TRUE; } start++; } } break; case REPRESENTATION_ANSI: case REPRESENTATION_ASCII: { const CHAR *start = i.GetASCII(); const CHAR *end = GetRawASCII() + GetRawCount() - 1; while (start <= end) { if (*start == c) { i.Resync(this, (BYTE*) start); RETURN TRUE; } start++; } } break; case REPRESENTATION_EMPTY: break; case REPRESENTATION_UTF8: default: UNREACHABLE(); } RETURN FALSE; } //----------------------------------------------------------------------------- // Find s in this string, working backwards staring at i. // Return TRUE and update iterator if found. //----------------------------------------------------------------------------- BOOL SString::FindBack(CIterator &i, const SString &s) const { CONTRACT(BOOL) { INSTANCE_CHECK; PRECONDITION(CheckIteratorRange(i)); PRECONDITION(s.Check()); POSTCONDITION(RETVAL == Match(i, s)); THROWS_UNLESS_BOTH_NORMALIZED(s); GC_NOTRIGGER; } CONTRACT_END; // Get a compatible string from s StackSString temp; const SString &source = GetCompatibleString(s, temp, i); switch (GetRepresentation()) { case REPRESENTATION_UNICODE: { COUNT_T count = source.GetRawCount(); const WCHAR *start = GetRawUnicode() + GetRawCount() - count; if (start > i.GetUnicode()) start = i.GetUnicode(); const WCHAR *end = GetRawUnicode(); while (start >= end) { if (wcsncmp(start, source.GetRawUnicode(), count) == 0) { i.Resync(this, (BYTE*) start); RETURN TRUE; } start--; } } break; case REPRESENTATION_ANSI: case REPRESENTATION_ASCII: { COUNT_T count = source.GetRawCount(); const CHAR *start = GetRawASCII() + GetRawCount() - count; if (start > i.GetASCII()) start = i.GetASCII(); const CHAR *end = GetRawASCII(); while (start >= end) { if (strncmp(start, source.GetRawASCII(), count) == 0) { i.Resync(this, (BYTE*) start); RETURN TRUE; } start--; } } break; case REPRESENTATION_EMPTY: { if (source.GetRawCount() == 0) RETURN TRUE; } break; case REPRESENTATION_UTF8: default: UNREACHABLE(); } RETURN FALSE; } //----------------------------------------------------------------------------- // Find s in this string, working backwards staring at i. // Return TRUE and update iterator if found. //----------------------------------------------------------------------------- BOOL SString::FindBack(CIterator &i, WCHAR c) const { CONTRACT(BOOL) { INSTANCE_CHECK; PRECONDITION(CheckIteratorRange(i)); POSTCONDITION(RETVAL == Match(i, c)); THROWS_UNLESS_NORMALIZED; GC_NOTRIGGER; } CONTRACT_END; // Get a compatible string from s if (c & ~0x7f) ConvertToUnicode(i); switch (GetRepresentation()) { case REPRESENTATION_UNICODE: { const WCHAR *start = GetRawUnicode() + GetRawCount() - 1; if (start > i.GetUnicode()) start = i.GetUnicode(); const WCHAR *end = GetRawUnicode(); while (start >= end) { if (*start == c) { i.Resync(this, (BYTE*) start); RETURN TRUE; } start--; } } break; case REPRESENTATION_ANSI: case REPRESENTATION_ASCII: { const CHAR *start = GetRawASCII() + GetRawCount() - 1; if (start > i.GetASCII()) start = i.GetASCII(); const CHAR *end = GetRawASCII(); while (start >= end) { if (*start == c) { i.Resync(this, (BYTE*) start); RETURN TRUE; } start--; } } break; case REPRESENTATION_EMPTY: break; case REPRESENTATION_UTF8: default: UNREACHABLE(); } RETURN FALSE; } //----------------------------------------------------------------------------- // Returns TRUE if this string begins with the contents of s //----------------------------------------------------------------------------- BOOL SString::BeginsWith(const SString &s) const { WRAPPER_NO_CONTRACT; return Match(Begin(), s); } //----------------------------------------------------------------------------- // Returns TRUE if this string begins with the contents of s //----------------------------------------------------------------------------- BOOL SString::BeginsWithCaseInsensitive(const SString &s) const { WRAPPER_NO_CONTRACT; return MatchCaseInsensitive(Begin(), s); } //----------------------------------------------------------------------------- // Returns TRUE if this string ends with the contents of s //----------------------------------------------------------------------------- BOOL SString::EndsWith(const SString &s) const { WRAPPER_NO_CONTRACT; // Need this check due to iterator arithmetic below. if (GetCount() < s.GetCount()) { return FALSE; } return Match(End() - s.GetCount(), s); } //----------------------------------------------------------------------------- // Returns TRUE if this string ends with the contents of s //----------------------------------------------------------------------------- BOOL SString::EndsWithCaseInsensitive(const SString &s) const { WRAPPER_NO_CONTRACT; // Need this check due to iterator arithmetic below. if (GetCount() < s.GetCount()) { return FALSE; } return MatchCaseInsensitive(End() - s.GetCount(), s); } //----------------------------------------------------------------------------- // Compare this string's contents to s's contents. // The comparison does not take into account localization issues like case folding. // Return 0 if equal, <0 if this < s, >0 is this > s. (same as strcmp). //----------------------------------------------------------------------------- int SString::Compare(const SString &s) const { CONTRACT(int) { INSTANCE_CHECK; PRECONDITION(s.Check()); THROWS_UNLESS_BOTH_NORMALIZED(s); GC_NOTRIGGER; } CONTRACT_END; StackSString temp; const SString &source = GetCompatibleString(s, temp); COUNT_T smaller; int equals = 0; int result = 0; if (GetRawCount() < source.GetRawCount()) { smaller = GetRawCount(); equals = -1; } else if (GetRawCount() > source.GetRawCount()) { smaller = source.GetRawCount(); equals = 1; } else { smaller = GetRawCount(); equals = 0; } switch (GetRepresentation()) { case REPRESENTATION_UNICODE: result = wcsncmp(GetRawUnicode(), source.GetRawUnicode(), smaller); break; case REPRESENTATION_ASCII: case REPRESENTATION_ANSI: result = strncmp(GetRawASCII(), source.GetRawASCII(), smaller); break; case REPRESENTATION_EMPTY: result = 0; break; default: case REPRESENTATION_UTF8: UNREACHABLE(); } if (result == 0) RETURN equals; else RETURN result; } //----------------------------------------------------------------------------- // Compare this string's contents to s's contents. // Return 0 if equal, <0 if this < s, >0 is this > s. (same as strcmp). //----------------------------------------------------------------------------- int SString::CompareCaseInsensitive(const SString &s) const { CONTRACT(int) { INSTANCE_CHECK; PRECONDITION(s.Check()); THROWS_UNLESS_BOTH_NORMALIZED(s); GC_NOTRIGGER; } CONTRACT_END; StackSString temp; const SString &source = GetCompatibleString(s, temp); COUNT_T smaller; int equals = 0; int result = 0; if (GetRawCount() < source.GetRawCount()) { smaller = GetRawCount(); equals = -1; } else if (GetRawCount() > source.GetRawCount()) { smaller = source.GetRawCount(); equals = 1; } else { smaller = GetRawCount(); equals = 0; } switch (GetRepresentation()) { case REPRESENTATION_UNICODE: case REPRESENTATION_ANSI: result = CaseCompareHelper(GetRawUnicode(), source.GetRawUnicode(), smaller, FALSE, TRUE); break; case REPRESENTATION_ASCII: result = CaseCompareHelperA(GetRawASCII(), source.GetRawASCII(), smaller, FALSE, TRUE); break; case REPRESENTATION_EMPTY: result = 0; break; default: case REPRESENTATION_UTF8: UNREACHABLE(); } if (result == 0) RETURN equals; else RETURN result; } //----------------------------------------------------------------------------- // Compare this string's contents to s's contents. // The comparison does not take into account localization issues like case folding. // Return 1 if equal, 0 if not. //----------------------------------------------------------------------------- BOOL SString::Equals(const SString &s) const { CONTRACT(BOOL) { INSTANCE_CHECK; PRECONDITION(s.Check()); THROWS_UNLESS_BOTH_NORMALIZED(s); FAULTS_UNLESS_BOTH_NORMALIZED(s, ThrowOutOfMemory()); GC_NOTRIGGER; } CONTRACT_END; StackSString temp; const SString &source = GetCompatibleString(s, temp); COUNT_T count = GetRawCount(); if (count != source.GetRawCount()) RETURN FALSE; switch (GetRepresentation()) { case REPRESENTATION_UNICODE: RETURN (wcsncmp(GetRawUnicode(), source.GetRawUnicode(), count) == 0); case REPRESENTATION_ASCII: case REPRESENTATION_ANSI: RETURN (strncmp(GetRawASCII(), source.GetRawASCII(), count) == 0); case REPRESENTATION_EMPTY: RETURN TRUE; default: case REPRESENTATION_UTF8: UNREACHABLE(); } RETURN FALSE; } //----------------------------------------------------------------------------- // Compare this string's contents case insensitively to s's contents. // Return 1 if equal, 0 if not. //----------------------------------------------------------------------------- BOOL SString::EqualsCaseInsensitive(const SString &s) const { CONTRACT(BOOL) { INSTANCE_CHECK; PRECONDITION(s.Check()); THROWS_UNLESS_BOTH_NORMALIZED(s); FAULTS_UNLESS_BOTH_NORMALIZED(s, ThrowOutOfMemory()); GC_NOTRIGGER; } CONTRACT_END; StackSString temp; const SString &source = GetCompatibleString(s, temp); COUNT_T count = GetRawCount(); if (count != source.GetRawCount()) RETURN FALSE; switch (GetRepresentation()) { case REPRESENTATION_UNICODE: case REPRESENTATION_ANSI: RETURN (CaseCompareHelper(GetRawUnicode(), source.GetRawUnicode(), count, FALSE, TRUE) == 0); case REPRESENTATION_ASCII: RETURN (CaseCompareHelperA(GetRawASCII(), source.GetRawASCII(), count, FALSE, TRUE) == 0); case REPRESENTATION_EMPTY: RETURN TRUE; default: case REPRESENTATION_UTF8: UNREACHABLE(); } RETURN FALSE; } //----------------------------------------------------------------------------- // Compare s's contents to the substring starting at position // The comparison does not take into account localization issues like case folding. // Return TRUE if equal, FALSE if not //----------------------------------------------------------------------------- BOOL SString::Match(const CIterator &i, const SString &s) const { CONTRACT(BOOL) { INSTANCE_CHECK; PRECONDITION(CheckIteratorRange(i)); PRECONDITION(s.Check()); THROWS_UNLESS_BOTH_NORMALIZED(s); GC_NOTRIGGER; } CONTRACT_END; StackSString temp; const SString &source = GetCompatibleString(s, temp, i); COUNT_T remaining = End() - i; COUNT_T count = source.GetRawCount(); if (remaining < count) RETURN FALSE; switch (GetRepresentation()) { case REPRESENTATION_UNICODE: RETURN (wcsncmp(i.GetUnicode(), source.GetRawUnicode(), count) == 0); case REPRESENTATION_ASCII: case REPRESENTATION_ANSI: RETURN (strncmp(i.GetASCII(), source.GetRawASCII(), count) == 0); case REPRESENTATION_EMPTY: RETURN TRUE; default: case REPRESENTATION_UTF8: UNREACHABLE(); } RETURN FALSE; } //----------------------------------------------------------------------------- // Compare s's contents case insensitively to the substring starting at position // Return TRUE if equal, FALSE if not //----------------------------------------------------------------------------- BOOL SString::MatchCaseInsensitive(const CIterator &i, const SString &s) const { CONTRACT(BOOL) { INSTANCE_CHECK; PRECONDITION(CheckIteratorRange(i)); PRECONDITION(s.Check()); THROWS_UNLESS_BOTH_NORMALIZED(s); GC_NOTRIGGER; } CONTRACT_END; StackSString temp; const SString &source = GetCompatibleString(s, temp, i); COUNT_T remaining = End() - i; COUNT_T count = source.GetRawCount(); if (remaining < count) RETURN FALSE; switch (GetRepresentation()) { case REPRESENTATION_UNICODE: case REPRESENTATION_ANSI: RETURN (CaseCompareHelper(i.GetUnicode(), source.GetRawUnicode(), count, FALSE, TRUE) == 0); case REPRESENTATION_ASCII: RETURN (CaseCompareHelperA(i.GetASCII(), source.GetRawASCII(), count, FALSE, TRUE) == 0); case REPRESENTATION_EMPTY: RETURN TRUE; default: case REPRESENTATION_UTF8: UNREACHABLE(); } RETURN FALSE; } //----------------------------------------------------------------------------- // Compare c case insensitively to the character at position // Return TRUE if equal, FALSE if not //----------------------------------------------------------------------------- BOOL SString::MatchCaseInsensitive(const CIterator &i, WCHAR c) const { SS_CONTRACT(BOOL) { GC_NOTRIGGER; INSTANCE_CHECK; PRECONDITION(CheckIteratorRange(i)); NOTHROW; } SS_CONTRACT_END; // End() will not throw here CONTRACT_VIOLATION(ThrowsViolation); if (i >= End()) SS_RETURN FALSE; WCHAR test = i[0]; SS_RETURN (test == c || ((CAN_SIMPLE_UPCASE(test) ? SIMPLE_UPCASE(test) : MapChar(test, LCMAP_UPPERCASE)) == (CAN_SIMPLE_UPCASE(c) ? SIMPLE_UPCASE(c) : MapChar(c, LCMAP_UPPERCASE)))); } //----------------------------------------------------------------------------- // Convert string to unicode lowercase using the invariant culture // Note: Please don't use it in PATH as multiple character can map to the same // lower case symbol //----------------------------------------------------------------------------- void SString::LowerCase() { SS_CONTRACT_VOID { GC_NOTRIGGER; PRECONDITION(CheckPointer(this)); SS_POSTCONDITION(CheckPointer(RETVAL)); if (IsRepresentation(REPRESENTATION_UNICODE)) NOTHROW; else THROWS; SUPPORTS_DAC; } SS_CONTRACT_END; ConvertToUnicode(); for (WCHAR *pwch = GetRawUnicode(); pwch < GetRawUnicode() + GetRawCount(); ++pwch) { *pwch = (CAN_SIMPLE_DOWNCASE(*pwch) ? SIMPLE_DOWNCASE(*pwch) : MapChar(*pwch, LCMAP_LOWERCASE)); } } //----------------------------------------------------------------------------- // Convert null-terminated string to lowercase using the invariant culture //----------------------------------------------------------------------------- //static void SString::LowerCase(__inout_z LPWSTR wszString) { SS_CONTRACT_VOID { GC_NOTRIGGER; NOTHROW; SUPPORTS_DAC; } SS_CONTRACT_END; if (wszString == NULL) { return; } for (WCHAR * pwch = wszString; *pwch != '\0'; ++pwch) { *pwch = (CAN_SIMPLE_DOWNCASE(*pwch) ? SIMPLE_DOWNCASE(*pwch) : MapChar(*pwch, LCMAP_LOWERCASE)); } } //----------------------------------------------------------------------------- // Convert string to unicode uppercase using the invariant culture // Note: Please don't use it in PATH as multiple character can map to the same // upper case symbol //----------------------------------------------------------------------------- void SString::UpperCase() { SS_CONTRACT_VOID { GC_NOTRIGGER; PRECONDITION(CheckPointer(this)); SS_POSTCONDITION(CheckPointer(RETVAL)); if (IsRepresentation(REPRESENTATION_UNICODE)) NOTHROW; else THROWS; GC_NOTRIGGER; SUPPORTS_DAC; } SS_CONTRACT_END; ConvertToUnicode(); for (WCHAR *pwch = GetRawUnicode(); pwch < GetRawUnicode() + GetRawCount(); ++pwch) { *pwch = (CAN_SIMPLE_UPCASE(*pwch) ? SIMPLE_UPCASE(*pwch) : MapChar(*pwch, LCMAP_UPPERCASE)); } } //----------------------------------------------------------------------------- // Get a const pointer to the internal buffer as an ANSI string. //----------------------------------------------------------------------------- const CHAR *SString::GetANSI(AbstractScratchBuffer &scratch) const { SS_CONTRACT(const CHAR *) { INSTANCE_CHECK_NULL; THROWS; GC_NOTRIGGER; } SS_CONTRACT_END; if (IsRepresentation(REPRESENTATION_ANSI)) SS_RETURN GetRawANSI(); ConvertToANSI((SString&)scratch); SS_RETURN ((SString&)scratch).GetRawANSI(); } //----------------------------------------------------------------------------- // Get a const pointer to the internal buffer as a UTF8 string. //----------------------------------------------------------------------------- const UTF8 *SString::GetUTF8(AbstractScratchBuffer &scratch) const { CONTRACT(const UTF8 *) { INSTANCE_CHECK_NULL; THROWS; GC_NOTRIGGER; } CONTRACT_END; if (IsRepresentation(REPRESENTATION_UTF8)) RETURN GetRawUTF8(); ConvertToUTF8((SString&)scratch); RETURN ((SString&)scratch).GetRawUTF8(); } const UTF8 *SString::GetUTF8(AbstractScratchBuffer &scratch, COUNT_T *pcbUtf8) const { CONTRACT(const UTF8 *) { INSTANCE_CHECK_NULL; THROWS; GC_NOTRIGGER; } CONTRACT_END; if (IsRepresentation(REPRESENTATION_UTF8)) { *pcbUtf8 = GetRawCount() + 1; RETURN GetRawUTF8(); } *pcbUtf8 = ConvertToUTF8((SString&)scratch); RETURN ((SString&)scratch).GetRawUTF8(); } //----------------------------------------------------------------------------- // Get a const pointer to the internal buffer which must already be a UTF8 string. // This avoids the need to create a scratch buffer we know will never be used. //----------------------------------------------------------------------------- const UTF8 *SString::GetUTF8NoConvert() const { CONTRACT(const UTF8 *) { INSTANCE_CHECK_NULL; THROWS; GC_NOTRIGGER; } CONTRACT_END; if (IsRepresentation(REPRESENTATION_UTF8)) RETURN GetRawUTF8(); ThrowHR(E_INVALIDARG); } //----------------------------------------------------------------------------- // Safe version of sprintf. // Prints formatted ansi text w/ var args to this buffer. //----------------------------------------------------------------------------- void SString::Printf(const CHAR *format, ...) { WRAPPER_NO_CONTRACT; va_list args; va_start(args, format); VPrintf(format, args); va_end(args); } #ifdef _DEBUG // // Check the Printf use for potential globalization bugs. %S formatting // specifier does Unicode->Ansi or Ansi->Unicode conversion using current // C-locale. This almost always means globalization bug in the CLR codebase. // // Ideally, we would elimitate %S from all format strings. Unfortunately, // %S is too widespread in non-shipping code that such cleanup is not feasible. // static void CheckForFormatStringGlobalizationIssues(const SString &format, const SString &result) { CONTRACTL { THROWS; GC_NOTRIGGER; DEBUG_ONLY; } CONTRACTL_END; BOOL fDangerousFormat = FALSE; // Check whether the format string contains the %S formatting specifier SString::CIterator itrFormat = format.Begin(); while (*itrFormat) { if (*itrFormat++ == '%') { // <TODO>Handle the complex format strings like %blahS</TODO> if (*itrFormat++ == 'S') { fDangerousFormat = TRUE; break; } } } if (fDangerousFormat) { BOOL fNonAsciiUsed = FALSE; // Now check whether there are any non-ASCII characters in the output. // Check whether the result contains non-Ascii characters SString::CIterator itrResult = format.Begin(); while (*itrResult) { if (*itrResult++ > 127) { fNonAsciiUsed = TRUE; break; } } CONSISTENCY_CHECK_MSGF(!fNonAsciiUsed, ("Non-ASCII string was produced by %%S format specifier. This is likely globalization bug." "To fix this, change the format string to %%s and do the correct encoding at the Printf callsite")); } } #endif #ifndef EBADF #define EBADF 9 #endif #ifndef ENOMEM #define ENOMEM 12 #endif #ifndef ERANGE #define ERANGE 34 #endif #if defined(_MSC_VER) #undef va_copy #define va_copy(dest,src) (dest = src) #endif void SString::VPrintf(const CHAR *format, va_list args) { CONTRACT_VOID { INSTANCE_CHECK; PRECONDITION(CheckPointer(format)); THROWS; GC_NOTRIGGER; } CONTRACT_END; va_list ap; // sprintf gives us no means to know how many characters are written // other than guessing and trying if (GetRawCount() > 0) { // First, try to use the existing buffer va_copy(ap, args); int result = _vsnprintf_s(GetRawANSI(), GetRawCount()+1, _TRUNCATE, format, ap); va_end(ap); if (result >=0) { // Succeeded in writing. Now resize - Resize(result, REPRESENTATION_ANSI, PRESERVE); SString sss(Ansi, format); INDEBUG(CheckForFormatStringGlobalizationIssues(sss, *this)); RETURN; } } // Make a guess how long the result will be (note this will be doubled) COUNT_T guess = (COUNT_T) strlen(format)+1; if (guess < GetRawCount()) guess = GetRawCount(); if (guess < MINIMUM_GUESS) guess = MINIMUM_GUESS; while (TRUE) { // Double the previous guess - eventually we will get enough space guess *= 2; Resize(guess, REPRESENTATION_ANSI); // Clear errno to avoid false alarms errno = 0; va_copy(ap, args); int result = _vsnprintf_s(GetRawANSI(), GetRawCount()+1, _TRUNCATE, format, ap); va_end(ap); if (result >= 0) { // Succeed in writing. Shrink the buffer to fit exactly. Resize(result, REPRESENTATION_ANSI, PRESERVE); SString sss(Ansi, format); INDEBUG(CheckForFormatStringGlobalizationIssues(sss, *this)); RETURN; } if (errno==ENOMEM) { ThrowOutOfMemory(); } else if (errno!=0 && errno!=EBADF && errno!=ERANGE) { CONSISTENCY_CHECK_MSG(FALSE, "_vsnprintf_s failed. Potential globalization bug."); ThrowHR(HRESULT_FROM_WIN32(ERROR_NO_UNICODE_TRANSLATION)); } } RETURN; } void SString::Printf(const WCHAR *format, ...) { WRAPPER_NO_CONTRACT; va_list args; va_start(args, format); VPrintf(format, args); va_end(args); } void SString::PPrintf(const WCHAR *format, ...) { CONTRACT_VOID { INSTANCE_CHECK; PRECONDITION(CheckPointer(format)); THROWS; GC_NOTRIGGER; } CONTRACT_END; va_list argItr; va_start(argItr, format); PVPrintf(format, argItr); va_end(argItr); RETURN; } void SString::VPrintf(const WCHAR *format, va_list args) { CONTRACT_VOID { INSTANCE_CHECK; PRECONDITION(CheckPointer(format)); THROWS; GC_NOTRIGGER; } CONTRACT_END; va_list ap; // sprintf gives us no means to know how many characters are written // other than guessing and trying if (GetRawCount() > 0) { // First, try to use the existing buffer va_copy(ap, args); int result = _vsnwprintf_s(GetRawUnicode(), GetRawCount()+1, _TRUNCATE, format, ap); va_end(ap); if (result >= 0) { // succeeded Resize(result, REPRESENTATION_UNICODE, PRESERVE); SString sss(format); INDEBUG(CheckForFormatStringGlobalizationIssues(sss, *this)); RETURN; } } // Make a guess how long the result will be (note this will be doubled) COUNT_T guess = (COUNT_T) wcslen(format)+1; if (guess < GetRawCount()) guess = GetRawCount(); if (guess < MINIMUM_GUESS) guess = MINIMUM_GUESS; while (TRUE) { // Double the previous guess - eventually we will get enough space guess *= 2; Resize(guess, REPRESENTATION_UNICODE); // Clear errno to avoid false alarms errno = 0; va_copy(ap, args); int result = _vsnwprintf_s(GetRawUnicode(), GetRawCount()+1, _TRUNCATE, format, ap); va_end(ap); if (result >= 0) { Resize(result, REPRESENTATION_UNICODE, PRESERVE); SString sss(format); INDEBUG(CheckForFormatStringGlobalizationIssues(sss, *this)); RETURN; } if (errno==ENOMEM) { ThrowOutOfMemory(); } else if (errno!=0 && errno!=EBADF && errno!=ERANGE) { CONSISTENCY_CHECK_MSG(FALSE, "_vsnwprintf_s failed. Potential globalization bug."); ThrowHR(HRESULT_FROM_WIN32(ERROR_NO_UNICODE_TRANSLATION)); } } RETURN; } void SString::PVPrintf(const WCHAR *format, va_list args) { CONTRACT_VOID { INSTANCE_CHECK; PRECONDITION(CheckPointer(format)); THROWS; GC_NOTRIGGER; } CONTRACT_END; va_list ap; // sprintf gives us no means to know how many characters are written // other than guessing and trying if (GetRawCount() > 0) { // First, try to use the existing buffer va_copy(ap, args); int result = _vsnwprintf_s(GetRawUnicode(), GetRawCount()+1, _TRUNCATE, format, ap); va_end(ap); if (result >= 0) { // succeeded Resize(result, REPRESENTATION_UNICODE, PRESERVE); SString sss(format); INDEBUG(CheckForFormatStringGlobalizationIssues(sss, *this)); RETURN; } } // Make a guess how long the result will be (note this will be doubled) COUNT_T guess = (COUNT_T) wcslen(format)+1; if (guess < GetRawCount()) guess = GetRawCount(); if (guess < MINIMUM_GUESS) guess = MINIMUM_GUESS; while (TRUE) { // Double the previous guess - eventually we will get enough space guess *= 2; Resize(guess, REPRESENTATION_UNICODE, DONT_PRESERVE); // Clear errno to avoid false alarms errno = 0; va_copy(ap, args); int result = _vsnwprintf_s(GetRawUnicode(), GetRawCount()+1, _TRUNCATE, format, ap); va_end(ap); if (result >= 0) { Resize(result, REPRESENTATION_UNICODE, PRESERVE); SString sss(format); INDEBUG(CheckForFormatStringGlobalizationIssues(sss, *this)); RETURN; } if (errno==ENOMEM) { ThrowOutOfMemory(); } else if (errno!=0 && errno!=EBADF && errno!=ERANGE) { CONSISTENCY_CHECK_MSG(FALSE, "_vsnwprintf_s failed. Potential globalization bug."); ThrowHR(HRESULT_FROM_WIN32(ERROR_NO_UNICODE_TRANSLATION)); } } RETURN; } void SString::AppendPrintf(const CHAR *format, ...) { WRAPPER_NO_CONTRACT; va_list args; va_start(args, format); AppendVPrintf(format, args); va_end(args); } void SString::AppendVPrintf(const CHAR *format, va_list args) { WRAPPER_NO_CONTRACT; StackSString s; s.VPrintf(format, args); Append(s); } void SString::AppendPrintf(const WCHAR *format, ...) { WRAPPER_NO_CONTRACT; va_list args; va_start(args, format); AppendVPrintf(format, args); va_end(args); } void SString::AppendVPrintf(const WCHAR *format, va_list args) { WRAPPER_NO_CONTRACT; StackSString s; s.VPrintf(format, args); Append(s); } //---------------------------------------------------------------------------- // LoadResource - moved to sstring_com.cpp //---------------------------------------------------------------------------- //---------------------------------------------------------------------------- // Format the message and put the contents in this string //---------------------------------------------------------------------------- BOOL SString::FormatMessage(DWORD dwFlags, LPCVOID lpSource, DWORD dwMessageId, DWORD dwLanguageId, const SString &arg1, const SString &arg2, const SString &arg3, const SString &arg4, const SString &arg5, const SString &arg6, const SString &arg7, const SString &arg8, const SString &arg9, const SString &arg10) { CONTRACT(BOOL) { INSTANCE_CHECK; THROWS; GC_NOTRIGGER; } CONTRACT_END; const WCHAR *args[] = {arg1.GetUnicode(), arg2.GetUnicode(), arg3.GetUnicode(), arg4.GetUnicode(), arg5.GetUnicode(), arg6.GetUnicode(), arg7.GetUnicode(), arg8.GetUnicode(), arg9.GetUnicode(), arg10.GetUnicode()}; if (GetRawCount() > 0) { // First, try to use our existing buffer to hold the result. Resize(GetRawCount(), REPRESENTATION_UNICODE); DWORD result = ::WszFormatMessage(dwFlags | FORMAT_MESSAGE_ARGUMENT_ARRAY, lpSource, dwMessageId, dwLanguageId, GetRawUnicode(), GetRawCount()+1, (va_list*)args); // Although we cannot directly detect truncation, we can tell if we // used up all the space (in which case we will assume truncation.) if (result != 0 && result < GetRawCount()) { if (GetRawUnicode()[result-1] == W(' ')) { GetRawUnicode()[result-1] = W('\0'); result -= 1; } Resize(result, REPRESENTATION_UNICODE, PRESERVE); RETURN TRUE; } } // We don't have enough space in our buffer, do dynamic allocation. LocalAllocHolder<WCHAR> string; DWORD result = ::WszFormatMessage(dwFlags | FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_ARGUMENT_ARRAY, lpSource, dwMessageId, dwLanguageId, (LPWSTR)(LPWSTR*)&string, 0, (va_list*)args); if (result == 0) RETURN FALSE; else { if (string[result-1] == W(' ')) string[result-1] = W('\0'); Set(string); RETURN TRUE; } } #if 1 //---------------------------------------------------------------------------- // Helper //---------------------------------------------------------------------------- // @todo -this should be removed and placed outside of SString void SString::MakeFullNamespacePath(const SString &nameSpace, const SString &name) { CONTRACT_VOID { INSTANCE_CHECK; THROWS; GC_NOTRIGGER; } CONTRACT_END; if (nameSpace.GetRepresentation() == REPRESENTATION_UTF8 && name.GetRepresentation() == REPRESENTATION_UTF8) { const UTF8 *ns = nameSpace.GetRawUTF8(); const UTF8 *n = name.GetRawUTF8(); COUNT_T count = ns::GetFullLength(ns, n)-1; Resize(count, REPRESENTATION_UTF8); if (count > 0) ns::MakePath(GetRawUTF8(), count+1, ns, n); } else { const WCHAR *ns = nameSpace; const WCHAR *n = name; COUNT_T count = ns::GetFullLength(ns, n)-1; Resize(count, REPRESENTATION_UNICODE); if (count > 0) ns::MakePath(GetRawUnicode(), count+1, ns, n); } RETURN; } #endif //---------------------------------------------------------------------------- // Private helper. // Check to see if the string fits the suggested representation //---------------------------------------------------------------------------- BOOL SString::IsRepresentation(Representation representation) const { CONTRACT(BOOL) { PRECONDITION(CheckRepresentation(representation)); NOTHROW; GC_NOTRIGGER; SUPPORTS_DAC; } CONTRACT_END; Representation currentRepresentation = GetRepresentation(); // If representations are the same, cool. if (currentRepresentation == representation) RETURN TRUE; // If we have an empty representation, we match everything if (currentRepresentation == REPRESENTATION_EMPTY) RETURN TRUE; // If we're a 1 byte charset, there are some more chances to match if (currentRepresentation != REPRESENTATION_UNICODE && representation != REPRESENTATION_UNICODE) { // If we're ASCII, we can be any 1 byte rep if (currentRepresentation == REPRESENTATION_ASCII) RETURN TRUE; // We really want to be ASCII - scan to see if we qualify if (ScanASCII()) RETURN TRUE; } // Sorry, must convert. RETURN FALSE; } //---------------------------------------------------------------------------- // Private helper. // Get the contents of the given string in a form which is compatible with our // string (and is in a fixed character set.) Updates the given iterator // if necessary to keep it in sync. //---------------------------------------------------------------------------- const SString &SString::GetCompatibleString(const SString &s, SString &scratch, const CIterator &i) const { CONTRACTL { PRECONDITION(s.Check()); PRECONDITION(scratch.Check()); PRECONDITION(scratch.CheckEmpty()); THROWS_UNLESS_BOTH_NORMALIZED(s); GC_NOTRIGGER; SUPPORTS_DAC; } CONTRACTL_END; // Since we have an iterator, we should be fixed size already CONSISTENCY_CHECK(IsFixedSize()); switch (GetRepresentation()) { case REPRESENTATION_EMPTY: return s; case REPRESENTATION_ASCII: if (s.IsRepresentation(REPRESENTATION_ASCII)) return s; // We can't in general convert to ASCII, so try unicode. ConvertToUnicode(i); FALLTHROUGH; case REPRESENTATION_UNICODE: if (s.IsRepresentation(REPRESENTATION_UNICODE)) return s; // @todo: we could convert s to unicode - is that a good policy???? s.ConvertToUnicode(scratch); return scratch; case REPRESENTATION_UTF8: case REPRESENTATION_ANSI: // These should all be impossible since we have an CIterator on us. default: UNREACHABLE_MSG("Unexpected string representation"); } return s; } //---------------------------------------------------------------------------- // Private helper. // Get the contents of the given string in a form which is compatible with our // string (and is in a fixed character set.) // May convert our string to unicode. //---------------------------------------------------------------------------- const SString &SString::GetCompatibleString(const SString &s, SString &scratch) const { CONTRACTL { PRECONDITION(s.Check()); PRECONDITION(scratch.Check()); PRECONDITION(scratch.CheckEmpty()); THROWS_UNLESS_BOTH_NORMALIZED(s); GC_NOTRIGGER; } CONTRACTL_END; // First, make sure we have a fixed size. ConvertToFixed(); switch (GetRepresentation()) { case REPRESENTATION_EMPTY: return s; case REPRESENTATION_ANSI: if (s.IsRepresentation(REPRESENTATION_ANSI)) return s; s.ConvertToANSI(scratch); return scratch; case REPRESENTATION_ASCII: if (s.IsRepresentation(REPRESENTATION_ASCII)) return s; // We can't in general convert to ASCII, so try unicode. ConvertToUnicode(); FALLTHROUGH; case REPRESENTATION_UNICODE: if (s.IsRepresentation(REPRESENTATION_UNICODE)) return s; // @todo: we could convert s to unicode in place - is that a good policy???? s.ConvertToUnicode(scratch); return scratch; case REPRESENTATION_UTF8: default: UNREACHABLE(); } return s; } //---------------------------------------------------------------------------- // Private helper. // If we have a 1 byte representation, scan the buffer to see if we can gain // some conversion flexibility by labelling it ASCII //---------------------------------------------------------------------------- BOOL SString::ScanASCII() const { CONTRACT(BOOL) { POSTCONDITION(IsRepresentation(REPRESENTATION_ASCII) || IsASCIIScanned()); NOTHROW; GC_NOTRIGGER; SUPPORTS_DAC; } CONTRACT_END; if (!IsASCIIScanned()) { const CHAR *c = GetRawANSI(); const CHAR *cEnd = c + GetRawCount(); while (c < cEnd) { if (*c & 0x80) break; c++; } if (c == cEnd) { const_cast<SString *>(this)->SetRepresentation(REPRESENTATION_ASCII); RETURN TRUE; } else const_cast<SString *>(this)->SetASCIIScanned(); } RETURN FALSE; } //---------------------------------------------------------------------------- // Private helper. // Resize updates the geometry of the string and ensures that // the space can be written to. // count - number of characters (not including null) to hold // preserve - if we realloc, do we copy data from old to new? //---------------------------------------------------------------------------- void SString::Resize(COUNT_T count, SString::Representation representation, Preserve preserve) { CONTRACT_VOID { PRECONDITION(CountToSize(count) >= count); POSTCONDITION(IsRepresentation(representation)); POSTCONDITION(GetRawCount() == count); if (count == 0) NOTHROW; else THROWS; GC_NOTRIGGER; SUPPORTS_DAC_HOST_ONLY; } CONTRACT_END; // If we are resizing to zero, Clear is more efficient if (count == 0) { Clear(); } else { SetRepresentation(representation); COUNT_T size = CountToSize(count); // detect overflow if (size < count) ThrowOutOfMemory(); ClearNormalized(); SBuffer::Resize(size, preserve); if (IsImmutable()) EnsureMutable(); NullTerminate(); } RETURN; } //----------------------------------------------------------------------------- // This is essentially a specialized version of the above for size 0 //----------------------------------------------------------------------------- void SString::Clear() { CONTRACT_VOID { INSTANCE_CHECK; POSTCONDITION(IsEmpty()); NOTHROW; GC_NOTRIGGER; SUPPORTS_DAC_HOST_ONLY; } CONTRACT_END; SetRepresentation(REPRESENTATION_EMPTY); if (IsImmutable()) { // Use shared empty string rather than allocating a new buffer SBuffer::SetImmutable(s_EmptyBuffer, sizeof(s_EmptyBuffer)); } else { // Leave allocated buffer for future growth SBuffer::TweakSize(sizeof(WCHAR)); GetRawUnicode()[0] = 0; } RETURN; } #ifdef DACCESS_COMPILE //--------------------------------------------------------------------------------------- // // Return a pointer to the raw buffer // // Returns: // A pointer to the raw string buffer. // void * SString::DacGetRawContent() const { if (IsEmpty()) { return NULL; } switch (GetRepresentation()) { case REPRESENTATION_EMPTY: return NULL; case REPRESENTATION_UNICODE: case REPRESENTATION_UTF8: case REPRESENTATION_ASCII: case REPRESENTATION_ANSI: // Note: no need to call DacInstantiateString because we know the exact length already. return SBuffer::DacGetRawContent(); default: DacNotImpl(); return NULL; } } //--------------------------------------------------------------------------------------- // // Return a pointer to the raw buffer as a pointer to a unicode string. Does not // do conversion, and thus requires that the representation already be in unicode. // // Returns: // A pointer to the raw string buffer as a unicode string. // const WCHAR * SString::DacGetRawUnicode() const { if (IsEmpty() || (GetRepresentation() == REPRESENTATION_EMPTY)) { return W(""); } if (GetRepresentation() != REPRESENTATION_UNICODE) { DacError(E_UNEXPECTED); } HRESULT status = S_OK; WCHAR* wszBuf = NULL; EX_TRY { wszBuf = static_cast<WCHAR*>(SBuffer::DacGetRawContent()); } EX_CATCH_HRESULT(status); if (SUCCEEDED(status)) { return wszBuf; } else { return NULL; } } //--------------------------------------------------------------------------------------- // // Copy the string from the target into the provided buffer, converting to unicode if necessary // // Arguments: // cBufChars - size of pBuffer in count of unicode characters. // pBuffer - a buffer of cBufChars unicode chars. // pcNeedChars - space to store the number of unicode chars in the SString. // // Returns: // true if successful - and buffer is filled with the unicode representation of // the string. // false if unsuccessful. // bool SString::DacGetUnicode(COUNT_T cBufChars, _Inout_updates_z_(cBufChars) WCHAR * pBuffer, COUNT_T * pcNeedChars) const { SUPPORTS_DAC; PVOID pContent = NULL; int iPage = CP_ACP; if (IsEmpty() || (GetRepresentation() == REPRESENTATION_EMPTY)) { if (pcNeedChars) { *pcNeedChars = 1; } if (pBuffer && cBufChars) { pBuffer[0] = 0; } return true; } HRESULT status = S_OK; EX_TRY { pContent = SBuffer::DacGetRawContent(); } EX_CATCH_HRESULT(status); if (SUCCEEDED(status) && pContent != NULL) { switch (GetRepresentation()) { case REPRESENTATION_UNICODE: if (pcNeedChars) { *pcNeedChars = GetCount() + 1; } if (pBuffer && cBufChars) { if (cBufChars > GetCount() + 1) { cBufChars = GetCount() + 1; } memcpy(pBuffer, pContent, cBufChars * sizeof(*pBuffer)); pBuffer[cBufChars - 1] = 0; } return true; case REPRESENTATION_UTF8: iPage = CP_UTF8; FALLTHROUGH; case REPRESENTATION_ASCII: case REPRESENTATION_ANSI: // iPage defaults to CP_ACP. if (pcNeedChars) { *pcNeedChars = WszMultiByteToWideChar(iPage, 0, reinterpret_cast<PSTR>(pContent), -1, NULL, 0); } if (pBuffer && cBufChars) { if (!WszMultiByteToWideChar(iPage, 0, reinterpret_cast<PSTR>(pContent), -1, pBuffer, cBufChars)) { return false; } } return true; default: DacNotImpl(); return false; } } return false; } #endif //DACCESS_COMPILE
-1
dotnet/runtime
66,282
Enable Fast Tail Call Optimization for ARM32
- Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
clamp03
2022-03-07T05:47:20Z
2022-03-13T11:50:20Z
227ff3d53784f655fa04dad059a98b3e8d291d61
51b90cc60b8528c77829ef18481b0f58db812776
Enable Fast Tail Call Optimization for ARM32. - Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
./src/coreclr/debug/createdump/crashinfomac.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include "createdump.h" bool CrashInfo::Initialize() { m_ppid = 0; m_tgid = 0; kern_return_t result = ::task_for_pid(mach_task_self(), m_pid, &m_task); if (result != KERN_SUCCESS) { fprintf(stderr, "task_for_pid(%d) FAILED %x %s\n", m_pid, result, mach_error_string(result)); return false; } return true; } void CrashInfo::CleanupAndResumeProcess() { // Resume all the threads suspended in EnumerateAndSuspendThreads ::task_resume(Task()); } // // Suspends all the threads and creating a list of them. Should be the before gathering any info about the process. // bool CrashInfo::EnumerateAndSuspendThreads() { thread_act_port_array_t threadList; mach_msg_type_number_t threadCount; kern_return_t result = ::task_suspend(Task()); if (result != KERN_SUCCESS) { fprintf(stderr, "task_suspend(%d) FAILED %x %s\n", m_pid, result, mach_error_string(result)); return false; } result = ::task_threads(Task(), &threadList, &threadCount); if (result != KERN_SUCCESS) { fprintf(stderr, "task_threads(%d) FAILED %x %s\n", m_pid, result, mach_error_string(result)); return false; } for (int i = 0; i < threadCount; i++) { thread_identifier_info_data_t tident; mach_msg_type_number_t tident_count = THREAD_IDENTIFIER_INFO_COUNT; int tid; result = ::thread_info(threadList[i], THREAD_IDENTIFIER_INFO, (thread_info_t)&tident, &tident_count); if (result != KERN_SUCCESS) { TRACE("%d thread_info(%x) FAILED %x %s\n", i, threadList[i], result, mach_error_string(result)); tid = (int)threadList[i]; } else { tid = tident.thread_id; } // Add to the list of threads ThreadInfo* thread = new ThreadInfo(*this, tid, threadList[i]); m_threads.push_back(thread); } return true; } uint32_t ConvertProtectionFlags(vm_prot_t prot) { uint32_t regionFlags = 0; if (prot & VM_PROT_READ) { regionFlags |= PF_R; } if (prot & VM_PROT_WRITE) { regionFlags |= PF_W; } if (prot & VM_PROT_EXECUTE) { regionFlags |= PF_X; } return regionFlags; } bool CrashInfo::EnumerateMemoryRegions() { vm_region_submap_info_data_64_t info; mach_vm_address_t address = 1; mach_vm_size_t size = 0; uint32_t depth = 0; // First enumerate and add all the regions while (address > 0 && address < MACH_VM_MAX_ADDRESS) { mach_msg_type_number_t count = VM_REGION_SUBMAP_INFO_COUNT_64; kern_return_t result = ::mach_vm_region_recurse(Task(), &address, &size, &depth, (vm_region_recurse_info_t)&info, &count); if (result != KERN_SUCCESS) { // Iteration can be ended on a KERN_INVALID_ADDRESS // Allow other kernel errors to continue too so we can get at least part of a dump TRACE("mach_vm_region_recurse for address %016llx %08llx FAILED %x %s\n", address, size, result, mach_error_string(result)); break; } TRACE_VERBOSE("%016llx - %016llx (%06llx) %08llx %s %d %d %d %c%c%c %02x\n", address, address + size, size / PAGE_SIZE, info.offset, info.is_submap ? "sub" : " ", info.user_wired_count, info.share_mode, depth, (info.protection & VM_PROT_READ) ? 'r' : '-', (info.protection & VM_PROT_WRITE) ? 'w' : '-', (info.protection & VM_PROT_EXECUTE) ? 'x' : '-', info.protection); if (info.is_submap) { depth++; } else { if ((info.protection & (VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE)) != 0) { MemoryRegion memoryRegion(ConvertProtectionFlags(info.protection), address, address + size, info.offset); m_allMemoryRegions.insert(memoryRegion); } address += size; } } // Now find all the modules and add them to the module list for (const MemoryRegion& region : m_allMemoryRegions) { bool found; if (!TryFindDyLinker(region.StartAddress(), region.Size(), &found)) { return false; } if (found) { break; } } // Filter out the module regions from the memory regions gathered for (const MemoryRegion& region : m_allMemoryRegions) { std::set<MemoryRegion>::iterator found = m_moduleMappings.find(region); if (found == m_moduleMappings.end()) { m_otherMappings.insert(region); } else { // Skip any region that is fully contained in a module region if (!found->Contains(region)) { TRACE("Region: "); region.Trace(); // Now add all the gaps in "region" left by the module regions uint64_t previousEndAddress = region.StartAddress(); for (; found != m_moduleMappings.end(); found++) { if (region.Contains(*found)) { MemoryRegion gap(region.Flags(), previousEndAddress, found->StartAddress(), region.Offset()); if (gap.Size() > 0) { TRACE(" Gap: "); gap.Trace(); m_otherMappings.insert(gap); } previousEndAddress = found->EndAddress(); } } MemoryRegion endgap(region.Flags(), previousEndAddress, region.EndAddress(), region.Offset()); if (endgap.Size() > 0) { TRACE(" EndGap:"); endgap.Trace(); m_otherMappings.insert(endgap); } } } } return true; } bool CrashInfo::TryFindDyLinker(mach_vm_address_t address, mach_vm_size_t size, bool* found) { bool result = true; *found = false; if (size > sizeof(mach_header_64)) { mach_header_64 header; size_t read = 0; if (ReadProcessMemory((void*)address, &header, sizeof(mach_header_64), &read)) { if (header.magic == MH_MAGIC_64) { TRACE("TryFindDyLinker: found module header at %016llx %08llx ncmds %d sizeofcmds %08x type %02x\n", address, size, header.ncmds, header.sizeofcmds, header.filetype); if (header.filetype == MH_DYLINKER) { TRACE("TryFindDyLinker: found dylinker\n"); *found = true; // Enumerate all the modules in dyld's image cache. VisitModule is called for every module found. result = EnumerateModules(address, &header); } } } else { TRACE("TryFindDyLinker: ReadProcessMemory header at %p %d FAILED\n", address, read); } } return result; } void CrashInfo::VisitModule(MachOModule& module) { AddModuleInfo(false, module.BaseAddress(), nullptr, module.Name()); // Get the process name from the executable module file type if (m_name.empty() && module.Header().filetype == MH_EXECUTE) { m_name = GetFileName(module.Name()); } // Save the runtime module path if (m_coreclrPath.empty()) { size_t last = module.Name().rfind(DIRECTORY_SEPARATOR_STR_A MAKEDLLNAME_A("coreclr")); if (last != std::string::npos) { m_coreclrPath = module.Name().substr(0, last + 1); uint64_t symbolOffset; if (!module.TryLookupSymbol("g_dacTable", &symbolOffset)) { TRACE("TryLookupSymbol(g_dacTable) FAILED\n"); } } } // VisitSegment is called for each segment of the module module.EnumerateSegments(); } void CrashInfo::VisitSegment(MachOModule& module, const segment_command_64& segment) { if (segment.initprot != 0) { // The __LINKEDIT segment contains the raw data used by dynamic linker, such as symbol, // string and relocation table entries. More importantly, the same __LINKEDIT segment // can be shared by multiple modules so we need to skip them to prevent overlapping // module regions. if (strcmp(segment.segname, SEG_LINKEDIT) != 0) { uint32_t regionFlags = ConvertProtectionFlags(segment.initprot); uint64_t offset = segment.fileoff; uint64_t start = segment.vmaddr + module.LoadBias(); uint64_t end = start + segment.vmsize; // Add this module segment to the set used by the thread unwinding to lookup the module base address for an ip. AddModuleAddressRange(start, end, module.BaseAddress()); // Round to page boundary start = start & PAGE_MASK; _ASSERTE(start > 0); // Round up to page boundary end = (end + (PAGE_SIZE - 1)) & PAGE_MASK; _ASSERTE(end > 0); // Add module memory region if not already on the list MemoryRegion moduleRegion(regionFlags, start, end, offset); const auto& found = m_moduleMappings.find(moduleRegion); if (found == m_moduleMappings.end()) { if (g_diagnosticsVerbose) { TRACE_VERBOSE("VisitSegment: "); moduleRegion.Trace(); } // Add this module segment to the module mappings list m_moduleMappings.insert(moduleRegion); } else { TRACE("VisitSegment: WARNING: "); moduleRegion.Trace(); TRACE(" is overlapping: "); found->Trace(); } } } } void CrashInfo::VisitSection(MachOModule& module, const section_64& section) { // Add the unwind and eh frame info to the dump if ((strcmp(section.sectname, "__unwind_info") == 0) || (strcmp(section.sectname, "__eh_frame") == 0)) { InsertMemoryRegion(section.addr + module.LoadBias(), section.size); } } // // Get the memory region flags for a start address // uint32_t CrashInfo::GetMemoryRegionFlags(uint64_t start) { MemoryRegion search(0, start, start + PAGE_SIZE); const MemoryRegion* region = SearchMemoryRegions(m_allMemoryRegions, search); if (region != nullptr) { return region->Flags(); } TRACE("GetMemoryRegionFlags: %016llx FAILED\n", start); return PF_R | PF_W | PF_X; } // // Read raw memory // bool CrashInfo::ReadProcessMemory(void* address, void* buffer, size_t size, size_t* read) { assert(buffer != nullptr); assert(read != nullptr); // vm_read_overwrite usually requires that the address be page-aligned // and the size be a multiple of the page size. We can't differentiate // between the cases in which that's required and those in which it // isn't, so we do it all the time. vm_address_t addressAligned = (vm_address_t)address & ~(PAGE_SIZE - 1); ssize_t offset = (ssize_t)address & (PAGE_SIZE - 1); char *data = (char*)alloca(PAGE_SIZE); ssize_t numberOfBytesRead = 0; ssize_t bytesLeft = size; while (bytesLeft > 0) { vm_size_t bytesRead = PAGE_SIZE; kern_return_t result = ::vm_read_overwrite(Task(), addressAligned, PAGE_SIZE, (vm_address_t)data, &bytesRead); if (result != KERN_SUCCESS || bytesRead != PAGE_SIZE) { TRACE_VERBOSE("ReadProcessMemory(%p %d): vm_read_overwrite failed bytesLeft %d bytesRead %d from %p: %x %s\n", address, size, bytesLeft, bytesRead, (void*)addressAligned, result, mach_error_string(result)); break; } ssize_t bytesToCopy = PAGE_SIZE - offset; if (bytesToCopy > bytesLeft) { bytesToCopy = bytesLeft; } memcpy((LPSTR)buffer + numberOfBytesRead, data + offset, bytesToCopy); addressAligned = addressAligned + PAGE_SIZE; numberOfBytesRead += bytesToCopy; bytesLeft -= bytesToCopy; offset = 0; } *read = numberOfBytesRead; return size == 0 || numberOfBytesRead > 0; } const struct dyld_all_image_infos* g_image_infos = nullptr; void ModuleInfo::LoadModule() { if (m_module == nullptr) { m_module = dlopen(m_moduleName.c_str(), RTLD_LAZY); if (m_module != nullptr) { if (g_image_infos == nullptr) { struct task_dyld_info dyld_info; mach_msg_type_number_t count = TASK_DYLD_INFO_COUNT; kern_return_t result = task_info(mach_task_self_, TASK_DYLD_INFO, (task_info_t)&dyld_info, &count); if (result == KERN_SUCCESS) { g_image_infos = (const struct dyld_all_image_infos*)dyld_info.all_image_info_addr; } } if (g_image_infos != nullptr) { for (int i = 0; i < g_image_infos->infoArrayCount; ++i) { const struct dyld_image_info* image = g_image_infos->infoArray + i; if (strcasecmp(image->imageFilePath, m_moduleName.c_str()) == 0) { m_localBaseAddress = (uint64_t)image->imageLoadAddress; break; } } } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include "createdump.h" bool CrashInfo::Initialize() { m_ppid = 0; m_tgid = 0; kern_return_t result = ::task_for_pid(mach_task_self(), m_pid, &m_task); if (result != KERN_SUCCESS) { fprintf(stderr, "task_for_pid(%d) FAILED %x %s\n", m_pid, result, mach_error_string(result)); return false; } return true; } void CrashInfo::CleanupAndResumeProcess() { // Resume all the threads suspended in EnumerateAndSuspendThreads ::task_resume(Task()); } // // Suspends all the threads and creating a list of them. Should be the before gathering any info about the process. // bool CrashInfo::EnumerateAndSuspendThreads() { thread_act_port_array_t threadList; mach_msg_type_number_t threadCount; kern_return_t result = ::task_suspend(Task()); if (result != KERN_SUCCESS) { fprintf(stderr, "task_suspend(%d) FAILED %x %s\n", m_pid, result, mach_error_string(result)); return false; } result = ::task_threads(Task(), &threadList, &threadCount); if (result != KERN_SUCCESS) { fprintf(stderr, "task_threads(%d) FAILED %x %s\n", m_pid, result, mach_error_string(result)); return false; } for (int i = 0; i < threadCount; i++) { thread_identifier_info_data_t tident; mach_msg_type_number_t tident_count = THREAD_IDENTIFIER_INFO_COUNT; int tid; result = ::thread_info(threadList[i], THREAD_IDENTIFIER_INFO, (thread_info_t)&tident, &tident_count); if (result != KERN_SUCCESS) { TRACE("%d thread_info(%x) FAILED %x %s\n", i, threadList[i], result, mach_error_string(result)); tid = (int)threadList[i]; } else { tid = tident.thread_id; } // Add to the list of threads ThreadInfo* thread = new ThreadInfo(*this, tid, threadList[i]); m_threads.push_back(thread); } return true; } uint32_t ConvertProtectionFlags(vm_prot_t prot) { uint32_t regionFlags = 0; if (prot & VM_PROT_READ) { regionFlags |= PF_R; } if (prot & VM_PROT_WRITE) { regionFlags |= PF_W; } if (prot & VM_PROT_EXECUTE) { regionFlags |= PF_X; } return regionFlags; } bool CrashInfo::EnumerateMemoryRegions() { vm_region_submap_info_data_64_t info; mach_vm_address_t address = 1; mach_vm_size_t size = 0; uint32_t depth = 0; // First enumerate and add all the regions while (address > 0 && address < MACH_VM_MAX_ADDRESS) { mach_msg_type_number_t count = VM_REGION_SUBMAP_INFO_COUNT_64; kern_return_t result = ::mach_vm_region_recurse(Task(), &address, &size, &depth, (vm_region_recurse_info_t)&info, &count); if (result != KERN_SUCCESS) { // Iteration can be ended on a KERN_INVALID_ADDRESS // Allow other kernel errors to continue too so we can get at least part of a dump TRACE("mach_vm_region_recurse for address %016llx %08llx FAILED %x %s\n", address, size, result, mach_error_string(result)); break; } TRACE_VERBOSE("%016llx - %016llx (%06llx) %08llx %s %d %d %d %c%c%c %02x\n", address, address + size, size / PAGE_SIZE, info.offset, info.is_submap ? "sub" : " ", info.user_wired_count, info.share_mode, depth, (info.protection & VM_PROT_READ) ? 'r' : '-', (info.protection & VM_PROT_WRITE) ? 'w' : '-', (info.protection & VM_PROT_EXECUTE) ? 'x' : '-', info.protection); if (info.is_submap) { depth++; } else { if ((info.protection & (VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE)) != 0) { MemoryRegion memoryRegion(ConvertProtectionFlags(info.protection), address, address + size, info.offset); m_allMemoryRegions.insert(memoryRegion); } address += size; } } // Now find all the modules and add them to the module list for (const MemoryRegion& region : m_allMemoryRegions) { bool found; if (!TryFindDyLinker(region.StartAddress(), region.Size(), &found)) { return false; } if (found) { break; } } // Filter out the module regions from the memory regions gathered for (const MemoryRegion& region : m_allMemoryRegions) { std::set<MemoryRegion>::iterator found = m_moduleMappings.find(region); if (found == m_moduleMappings.end()) { m_otherMappings.insert(region); } else { // Skip any region that is fully contained in a module region if (!found->Contains(region)) { TRACE("Region: "); region.Trace(); // Now add all the gaps in "region" left by the module regions uint64_t previousEndAddress = region.StartAddress(); for (; found != m_moduleMappings.end(); found++) { if (region.Contains(*found)) { MemoryRegion gap(region.Flags(), previousEndAddress, found->StartAddress(), region.Offset()); if (gap.Size() > 0) { TRACE(" Gap: "); gap.Trace(); m_otherMappings.insert(gap); } previousEndAddress = found->EndAddress(); } } MemoryRegion endgap(region.Flags(), previousEndAddress, region.EndAddress(), region.Offset()); if (endgap.Size() > 0) { TRACE(" EndGap:"); endgap.Trace(); m_otherMappings.insert(endgap); } } } } return true; } bool CrashInfo::TryFindDyLinker(mach_vm_address_t address, mach_vm_size_t size, bool* found) { bool result = true; *found = false; if (size > sizeof(mach_header_64)) { mach_header_64 header; size_t read = 0; if (ReadProcessMemory((void*)address, &header, sizeof(mach_header_64), &read)) { if (header.magic == MH_MAGIC_64) { TRACE("TryFindDyLinker: found module header at %016llx %08llx ncmds %d sizeofcmds %08x type %02x\n", address, size, header.ncmds, header.sizeofcmds, header.filetype); if (header.filetype == MH_DYLINKER) { TRACE("TryFindDyLinker: found dylinker\n"); *found = true; // Enumerate all the modules in dyld's image cache. VisitModule is called for every module found. result = EnumerateModules(address, &header); } } } else { TRACE("TryFindDyLinker: ReadProcessMemory header at %p %d FAILED\n", address, read); } } return result; } void CrashInfo::VisitModule(MachOModule& module) { AddModuleInfo(false, module.BaseAddress(), nullptr, module.Name()); // Get the process name from the executable module file type if (m_name.empty() && module.Header().filetype == MH_EXECUTE) { m_name = GetFileName(module.Name()); } // Save the runtime module path if (m_coreclrPath.empty()) { size_t last = module.Name().rfind(DIRECTORY_SEPARATOR_STR_A MAKEDLLNAME_A("coreclr")); if (last != std::string::npos) { m_coreclrPath = module.Name().substr(0, last + 1); uint64_t symbolOffset; if (!module.TryLookupSymbol("g_dacTable", &symbolOffset)) { TRACE("TryLookupSymbol(g_dacTable) FAILED\n"); } } } // VisitSegment is called for each segment of the module module.EnumerateSegments(); } void CrashInfo::VisitSegment(MachOModule& module, const segment_command_64& segment) { if (segment.initprot != 0) { // The __LINKEDIT segment contains the raw data used by dynamic linker, such as symbol, // string and relocation table entries. More importantly, the same __LINKEDIT segment // can be shared by multiple modules so we need to skip them to prevent overlapping // module regions. if (strcmp(segment.segname, SEG_LINKEDIT) != 0) { uint32_t regionFlags = ConvertProtectionFlags(segment.initprot); uint64_t offset = segment.fileoff; uint64_t start = segment.vmaddr + module.LoadBias(); uint64_t end = start + segment.vmsize; // Add this module segment to the set used by the thread unwinding to lookup the module base address for an ip. AddModuleAddressRange(start, end, module.BaseAddress()); // Round to page boundary start = start & PAGE_MASK; _ASSERTE(start > 0); // Round up to page boundary end = (end + (PAGE_SIZE - 1)) & PAGE_MASK; _ASSERTE(end > 0); // Add module memory region if not already on the list MemoryRegion moduleRegion(regionFlags, start, end, offset); const auto& found = m_moduleMappings.find(moduleRegion); if (found == m_moduleMappings.end()) { if (g_diagnosticsVerbose) { TRACE_VERBOSE("VisitSegment: "); moduleRegion.Trace(); } // Add this module segment to the module mappings list m_moduleMappings.insert(moduleRegion); } else { TRACE("VisitSegment: WARNING: "); moduleRegion.Trace(); TRACE(" is overlapping: "); found->Trace(); } } } } void CrashInfo::VisitSection(MachOModule& module, const section_64& section) { // Add the unwind and eh frame info to the dump if ((strcmp(section.sectname, "__unwind_info") == 0) || (strcmp(section.sectname, "__eh_frame") == 0)) { InsertMemoryRegion(section.addr + module.LoadBias(), section.size); } } // // Get the memory region flags for a start address // uint32_t CrashInfo::GetMemoryRegionFlags(uint64_t start) { MemoryRegion search(0, start, start + PAGE_SIZE); const MemoryRegion* region = SearchMemoryRegions(m_allMemoryRegions, search); if (region != nullptr) { return region->Flags(); } TRACE("GetMemoryRegionFlags: %016llx FAILED\n", start); return PF_R | PF_W | PF_X; } // // Read raw memory // bool CrashInfo::ReadProcessMemory(void* address, void* buffer, size_t size, size_t* read) { assert(buffer != nullptr); assert(read != nullptr); // vm_read_overwrite usually requires that the address be page-aligned // and the size be a multiple of the page size. We can't differentiate // between the cases in which that's required and those in which it // isn't, so we do it all the time. vm_address_t addressAligned = (vm_address_t)address & ~(PAGE_SIZE - 1); ssize_t offset = (ssize_t)address & (PAGE_SIZE - 1); char *data = (char*)alloca(PAGE_SIZE); ssize_t numberOfBytesRead = 0; ssize_t bytesLeft = size; while (bytesLeft > 0) { vm_size_t bytesRead = PAGE_SIZE; kern_return_t result = ::vm_read_overwrite(Task(), addressAligned, PAGE_SIZE, (vm_address_t)data, &bytesRead); if (result != KERN_SUCCESS || bytesRead != PAGE_SIZE) { TRACE_VERBOSE("ReadProcessMemory(%p %d): vm_read_overwrite failed bytesLeft %d bytesRead %d from %p: %x %s\n", address, size, bytesLeft, bytesRead, (void*)addressAligned, result, mach_error_string(result)); break; } ssize_t bytesToCopy = PAGE_SIZE - offset; if (bytesToCopy > bytesLeft) { bytesToCopy = bytesLeft; } memcpy((LPSTR)buffer + numberOfBytesRead, data + offset, bytesToCopy); addressAligned = addressAligned + PAGE_SIZE; numberOfBytesRead += bytesToCopy; bytesLeft -= bytesToCopy; offset = 0; } *read = numberOfBytesRead; return size == 0 || numberOfBytesRead > 0; } const struct dyld_all_image_infos* g_image_infos = nullptr; void ModuleInfo::LoadModule() { if (m_module == nullptr) { m_module = dlopen(m_moduleName.c_str(), RTLD_LAZY); if (m_module != nullptr) { if (g_image_infos == nullptr) { struct task_dyld_info dyld_info; mach_msg_type_number_t count = TASK_DYLD_INFO_COUNT; kern_return_t result = task_info(mach_task_self_, TASK_DYLD_INFO, (task_info_t)&dyld_info, &count); if (result == KERN_SUCCESS) { g_image_infos = (const struct dyld_all_image_infos*)dyld_info.all_image_info_addr; } } if (g_image_infos != nullptr) { for (int i = 0; i < g_image_infos->infoArrayCount; ++i) { const struct dyld_image_info* image = g_image_infos->infoArray + i; if (strcasecmp(image->imageFilePath, m_moduleName.c_str()) == 0) { m_localBaseAddress = (uint64_t)image->imageLoadAddress; break; } } } } } }
-1
dotnet/runtime
66,282
Enable Fast Tail Call Optimization for ARM32
- Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
clamp03
2022-03-07T05:47:20Z
2022-03-13T11:50:20Z
227ff3d53784f655fa04dad059a98b3e8d291d61
51b90cc60b8528c77829ef18481b0f58db812776
Enable Fast Tail Call Optimization for ARM32. - Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
./src/coreclr/vm/instmethhash.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // // File: instmethhash.cpp // // // // ============================================================================ #include "common.h" #include "excep.h" #include "instmethhash.h" #include "eeconfig.h" #include "generics.h" #include "typestring.h" #include "dacenumerablehash.inl" PTR_MethodDesc InstMethodHashEntry::GetMethod() { LIMITED_METHOD_DAC_CONTRACT; return dac_cast<PTR_MethodDesc>(dac_cast<TADDR>(data) & ~0x3); } DWORD InstMethodHashEntry::GetFlags() { LIMITED_METHOD_DAC_CONTRACT; return (DWORD)(dac_cast<TADDR>(data) & 0x3); } #ifndef DACCESS_COMPILE void InstMethodHashEntry::SetMethodAndFlags(MethodDesc *pMethod, DWORD dwFlags) { LIMITED_METHOD_CONTRACT; _ASSERTE(dwFlags <= 0x3); _ASSERTE(((TADDR)pMethod & 0x3) == 0); data = (MethodDesc*)((TADDR)pMethod | dwFlags); } // ============================================================================ // Instantiated method hash table methods // ============================================================================ /* static */ InstMethodHashTable *InstMethodHashTable::Create(LoaderAllocator *pAllocator, Module *pModule, DWORD dwNumBuckets, AllocMemTracker *pamTracker) { CONTRACTL { THROWS; GC_NOTRIGGER; INJECT_FAULT(COMPlusThrowOM();); } CONTRACTL_END LoaderHeap *pHeap = pAllocator->GetLowFrequencyHeap(); InstMethodHashTable *pThis = (InstMethodHashTable*)pamTracker->Track(pHeap->AllocMem((S_SIZE_T)sizeof(InstMethodHashTable))); new (pThis) InstMethodHashTable(pModule, pHeap, dwNumBuckets); #ifdef _DEBUG pThis->InitUnseal(); #endif pThis->m_pLoaderAllocator = pAllocator; return pThis; } PTR_LoaderAllocator InstMethodHashTable::GetLoaderAllocator() { WRAPPER_NO_CONTRACT; if (m_pLoaderAllocator) { return m_pLoaderAllocator; } else { _ASSERTE(m_pModule != NULL); return GetModule()->GetLoaderAllocator(); } } // Calculate a hash value for a method-desc key static DWORD Hash(TypeHandle declaringType, mdMethodDef token, Instantiation inst) { STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; STATIC_CONTRACT_FORBID_FAULT; DWORD dwHash = 0x87654321; #define INST_HASH_ADD(_value) dwHash = ((dwHash << 5) + dwHash) ^ (_value) INST_HASH_ADD(declaringType.GetCl()); INST_HASH_ADD(token); for (DWORD i = 0; i < inst.GetNumArgs(); i++) { TypeHandle thArg = inst[i]; if (thArg.GetMethodTable()) { INST_HASH_ADD(thArg.GetCl()); Instantiation sArgInst = thArg.GetInstantiation(); for (DWORD j = 0; j < sArgInst.GetNumArgs(); j++) { TypeHandle thSubArg = sArgInst[j]; if (thSubArg.GetMethodTable()) INST_HASH_ADD(thSubArg.GetCl()); else INST_HASH_ADD(thSubArg.GetSignatureCorElementType()); } } else INST_HASH_ADD(thArg.GetSignatureCorElementType()); } return dwHash; } MethodDesc* InstMethodHashTable::FindMethodDesc(TypeHandle declaringType, mdMethodDef token, BOOL unboxingStub, Instantiation inst, BOOL getSharedNotStub) { CONTRACTL { NOTHROW; GC_NOTRIGGER; FORBID_FAULT; PRECONDITION(CheckPointer(declaringType)); } CONTRACTL_END // We temporarily disable IBC logging here // because the pMD that we search through may not be restored // and ComputePreferredZapModule will assert on finding an // encode fixup pointer // IBCLoggingDisabler disableIbcLogging; MethodDesc *pMDResult = NULL; DWORD dwHash = Hash(declaringType, token, inst); InstMethodHashEntry_t* pSearch; LookupContext sContext; for (pSearch = BaseFindFirstEntryByHash(dwHash, &sContext); pSearch != NULL; pSearch = BaseFindNextEntryByHash(&sContext)) { MethodDesc *pMD = pSearch->GetMethod(); if (pMD->GetMemberDef() != token) continue; // Next iteration of the for loop if (pMD->GetNumGenericMethodArgs() != inst.GetNumArgs()) continue; // Next iteration of the for loop DWORD dwKeyFlags = pSearch->GetFlags(); if ( ((dwKeyFlags & InstMethodHashEntry::RequiresInstArg) == 0) != (getSharedNotStub == 0) ) continue; if ( ((dwKeyFlags & InstMethodHashEntry::UnboxingStub) == 0) != (unboxingStub == 0) ) continue; if (TypeHandle(pMD->GetMethodTable()) != declaringType) { continue; // Next iteration of the for loop } if (!inst.IsEmpty()) { Instantiation candidateInst = pMD->GetMethodInstantiation(); // We have matched the method already, thus the number of arguments in the instantiation should match too. _ASSERTE(inst.GetNumArgs() == candidateInst.GetNumArgs()); bool match = true; // This is true when all instantiation arguments match for (DWORD i = 0; i < inst.GetNumArgs(); i++) { if (candidateInst[i] != inst[i]) { match = false; break; } } if (!match) continue; // Next iteration of the pSearch for loop; } // // Success, we found a pMD that matches pMDResult = pMD; break; // Exit the for loop and jump to the return pMDResult } return pMDResult; } BOOL InstMethodHashTable::ContainsMethodDesc(MethodDesc* pMD) { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_ANY; } CONTRACTL_END; return FindMethodDesc( pMD->GetMethodTable(), pMD->GetMemberDef(), pMD->IsUnboxingStub(), pMD->GetMethodInstantiation(), pMD->RequiresInstArg()) != NULL; } #endif // #ifndef DACCESS_COMPILE void InstMethodHashTable::Iterator::Reset() { WRAPPER_NO_CONTRACT; if (m_pTable) { #ifdef _DEBUG m_pTable->Unseal(); #endif m_pTable = NULL; } Init(); } void InstMethodHashTable::Iterator::Init() { WRAPPER_NO_CONTRACT; #ifdef _DEBUG if (m_pTable) m_pTable->Seal(); // The table cannot be changing while it is being iterated #endif m_fIterating = false; } InstMethodHashTable::Iterator::Iterator() { WRAPPER_NO_CONTRACT; m_pTable = NULL; Init(); } InstMethodHashTable::Iterator::Iterator(InstMethodHashTable * pTable) { WRAPPER_NO_CONTRACT; m_pTable = pTable; Init(); } InstMethodHashTable::Iterator::~Iterator() { WRAPPER_NO_CONTRACT; #ifdef _DEBUG if (m_pTable) m_pTable->Unseal(); // Done with the iterator so we unseal #endif } BOOL InstMethodHashTable::FindNext(Iterator *it, InstMethodHashEntry **ppEntry) { LIMITED_METHOD_CONTRACT; if (!it->m_fIterating) { BaseInitIterator(&it->m_sIterator); it->m_fIterating = true; } *ppEntry = it->m_sIterator.Next(); return *ppEntry ? TRUE : FALSE; } DWORD InstMethodHashTable::GetCount() { LIMITED_METHOD_CONTRACT; return BaseGetElementCount(); } #ifndef DACCESS_COMPILE // Add method desc to the hash table; must not be present already void InstMethodHashTable::InsertMethodDesc(MethodDesc *pMD) { CONTRACTL { THROWS; GC_NOTRIGGER; INJECT_FAULT(COMPlusThrowOM();); PRECONDITION(IsUnsealed()); // If we are sealed then we should not be adding to this hashtable PRECONDITION(CheckPointer(pMD)); // Generic method definitions (e.g. D.m<U> or C<int>.m<U>) belong in method tables, not here PRECONDITION(!pMD->IsGenericMethodDefinition()); } CONTRACTL_END InstMethodHashEntry_t * pNewEntry = (InstMethodHashEntry_t*)BaseAllocateEntry(NULL); DWORD dwKeyFlags = 0; if (pMD->RequiresInstArg()) dwKeyFlags |= InstMethodHashEntry::RequiresInstArg; if (pMD->IsUnboxingStub()) dwKeyFlags |= InstMethodHashEntry::UnboxingStub; pNewEntry->SetMethodAndFlags(pMD, dwKeyFlags); DWORD dwHash = Hash(pMD->GetMethodTable(), pMD->GetMemberDef(), pMD->GetMethodInstantiation()); BaseInsertEntry(dwHash, pNewEntry); } #endif // #ifndef DACCESS_COMPILE #ifdef DACCESS_COMPILE void InstMethodHashTable::EnumMemoryRegionsForEntry(InstMethodHashEntry_t *pEntry, CLRDataEnumMemoryFlags flags) { SUPPORTS_DAC; if (pEntry->GetMethod().IsValid()) pEntry->GetMethod()->EnumMemoryRegions(flags); } #endif // #ifdef DACCESS_COMPILE
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // // File: instmethhash.cpp // // // // ============================================================================ #include "common.h" #include "excep.h" #include "instmethhash.h" #include "eeconfig.h" #include "generics.h" #include "typestring.h" #include "dacenumerablehash.inl" PTR_MethodDesc InstMethodHashEntry::GetMethod() { LIMITED_METHOD_DAC_CONTRACT; return dac_cast<PTR_MethodDesc>(dac_cast<TADDR>(data) & ~0x3); } DWORD InstMethodHashEntry::GetFlags() { LIMITED_METHOD_DAC_CONTRACT; return (DWORD)(dac_cast<TADDR>(data) & 0x3); } #ifndef DACCESS_COMPILE void InstMethodHashEntry::SetMethodAndFlags(MethodDesc *pMethod, DWORD dwFlags) { LIMITED_METHOD_CONTRACT; _ASSERTE(dwFlags <= 0x3); _ASSERTE(((TADDR)pMethod & 0x3) == 0); data = (MethodDesc*)((TADDR)pMethod | dwFlags); } // ============================================================================ // Instantiated method hash table methods // ============================================================================ /* static */ InstMethodHashTable *InstMethodHashTable::Create(LoaderAllocator *pAllocator, Module *pModule, DWORD dwNumBuckets, AllocMemTracker *pamTracker) { CONTRACTL { THROWS; GC_NOTRIGGER; INJECT_FAULT(COMPlusThrowOM();); } CONTRACTL_END LoaderHeap *pHeap = pAllocator->GetLowFrequencyHeap(); InstMethodHashTable *pThis = (InstMethodHashTable*)pamTracker->Track(pHeap->AllocMem((S_SIZE_T)sizeof(InstMethodHashTable))); new (pThis) InstMethodHashTable(pModule, pHeap, dwNumBuckets); #ifdef _DEBUG pThis->InitUnseal(); #endif pThis->m_pLoaderAllocator = pAllocator; return pThis; } PTR_LoaderAllocator InstMethodHashTable::GetLoaderAllocator() { WRAPPER_NO_CONTRACT; if (m_pLoaderAllocator) { return m_pLoaderAllocator; } else { _ASSERTE(m_pModule != NULL); return GetModule()->GetLoaderAllocator(); } } // Calculate a hash value for a method-desc key static DWORD Hash(TypeHandle declaringType, mdMethodDef token, Instantiation inst) { STATIC_CONTRACT_NOTHROW; STATIC_CONTRACT_GC_NOTRIGGER; STATIC_CONTRACT_FORBID_FAULT; DWORD dwHash = 0x87654321; #define INST_HASH_ADD(_value) dwHash = ((dwHash << 5) + dwHash) ^ (_value) INST_HASH_ADD(declaringType.GetCl()); INST_HASH_ADD(token); for (DWORD i = 0; i < inst.GetNumArgs(); i++) { TypeHandle thArg = inst[i]; if (thArg.GetMethodTable()) { INST_HASH_ADD(thArg.GetCl()); Instantiation sArgInst = thArg.GetInstantiation(); for (DWORD j = 0; j < sArgInst.GetNumArgs(); j++) { TypeHandle thSubArg = sArgInst[j]; if (thSubArg.GetMethodTable()) INST_HASH_ADD(thSubArg.GetCl()); else INST_HASH_ADD(thSubArg.GetSignatureCorElementType()); } } else INST_HASH_ADD(thArg.GetSignatureCorElementType()); } return dwHash; } MethodDesc* InstMethodHashTable::FindMethodDesc(TypeHandle declaringType, mdMethodDef token, BOOL unboxingStub, Instantiation inst, BOOL getSharedNotStub) { CONTRACTL { NOTHROW; GC_NOTRIGGER; FORBID_FAULT; PRECONDITION(CheckPointer(declaringType)); } CONTRACTL_END // We temporarily disable IBC logging here // because the pMD that we search through may not be restored // and ComputePreferredZapModule will assert on finding an // encode fixup pointer // IBCLoggingDisabler disableIbcLogging; MethodDesc *pMDResult = NULL; DWORD dwHash = Hash(declaringType, token, inst); InstMethodHashEntry_t* pSearch; LookupContext sContext; for (pSearch = BaseFindFirstEntryByHash(dwHash, &sContext); pSearch != NULL; pSearch = BaseFindNextEntryByHash(&sContext)) { MethodDesc *pMD = pSearch->GetMethod(); if (pMD->GetMemberDef() != token) continue; // Next iteration of the for loop if (pMD->GetNumGenericMethodArgs() != inst.GetNumArgs()) continue; // Next iteration of the for loop DWORD dwKeyFlags = pSearch->GetFlags(); if ( ((dwKeyFlags & InstMethodHashEntry::RequiresInstArg) == 0) != (getSharedNotStub == 0) ) continue; if ( ((dwKeyFlags & InstMethodHashEntry::UnboxingStub) == 0) != (unboxingStub == 0) ) continue; if (TypeHandle(pMD->GetMethodTable()) != declaringType) { continue; // Next iteration of the for loop } if (!inst.IsEmpty()) { Instantiation candidateInst = pMD->GetMethodInstantiation(); // We have matched the method already, thus the number of arguments in the instantiation should match too. _ASSERTE(inst.GetNumArgs() == candidateInst.GetNumArgs()); bool match = true; // This is true when all instantiation arguments match for (DWORD i = 0; i < inst.GetNumArgs(); i++) { if (candidateInst[i] != inst[i]) { match = false; break; } } if (!match) continue; // Next iteration of the pSearch for loop; } // // Success, we found a pMD that matches pMDResult = pMD; break; // Exit the for loop and jump to the return pMDResult } return pMDResult; } BOOL InstMethodHashTable::ContainsMethodDesc(MethodDesc* pMD) { CONTRACTL { NOTHROW; GC_NOTRIGGER; MODE_ANY; } CONTRACTL_END; return FindMethodDesc( pMD->GetMethodTable(), pMD->GetMemberDef(), pMD->IsUnboxingStub(), pMD->GetMethodInstantiation(), pMD->RequiresInstArg()) != NULL; } #endif // #ifndef DACCESS_COMPILE void InstMethodHashTable::Iterator::Reset() { WRAPPER_NO_CONTRACT; if (m_pTable) { #ifdef _DEBUG m_pTable->Unseal(); #endif m_pTable = NULL; } Init(); } void InstMethodHashTable::Iterator::Init() { WRAPPER_NO_CONTRACT; #ifdef _DEBUG if (m_pTable) m_pTable->Seal(); // The table cannot be changing while it is being iterated #endif m_fIterating = false; } InstMethodHashTable::Iterator::Iterator() { WRAPPER_NO_CONTRACT; m_pTable = NULL; Init(); } InstMethodHashTable::Iterator::Iterator(InstMethodHashTable * pTable) { WRAPPER_NO_CONTRACT; m_pTable = pTable; Init(); } InstMethodHashTable::Iterator::~Iterator() { WRAPPER_NO_CONTRACT; #ifdef _DEBUG if (m_pTable) m_pTable->Unseal(); // Done with the iterator so we unseal #endif } BOOL InstMethodHashTable::FindNext(Iterator *it, InstMethodHashEntry **ppEntry) { LIMITED_METHOD_CONTRACT; if (!it->m_fIterating) { BaseInitIterator(&it->m_sIterator); it->m_fIterating = true; } *ppEntry = it->m_sIterator.Next(); return *ppEntry ? TRUE : FALSE; } DWORD InstMethodHashTable::GetCount() { LIMITED_METHOD_CONTRACT; return BaseGetElementCount(); } #ifndef DACCESS_COMPILE // Add method desc to the hash table; must not be present already void InstMethodHashTable::InsertMethodDesc(MethodDesc *pMD) { CONTRACTL { THROWS; GC_NOTRIGGER; INJECT_FAULT(COMPlusThrowOM();); PRECONDITION(IsUnsealed()); // If we are sealed then we should not be adding to this hashtable PRECONDITION(CheckPointer(pMD)); // Generic method definitions (e.g. D.m<U> or C<int>.m<U>) belong in method tables, not here PRECONDITION(!pMD->IsGenericMethodDefinition()); } CONTRACTL_END InstMethodHashEntry_t * pNewEntry = (InstMethodHashEntry_t*)BaseAllocateEntry(NULL); DWORD dwKeyFlags = 0; if (pMD->RequiresInstArg()) dwKeyFlags |= InstMethodHashEntry::RequiresInstArg; if (pMD->IsUnboxingStub()) dwKeyFlags |= InstMethodHashEntry::UnboxingStub; pNewEntry->SetMethodAndFlags(pMD, dwKeyFlags); DWORD dwHash = Hash(pMD->GetMethodTable(), pMD->GetMemberDef(), pMD->GetMethodInstantiation()); BaseInsertEntry(dwHash, pNewEntry); } #endif // #ifndef DACCESS_COMPILE #ifdef DACCESS_COMPILE void InstMethodHashTable::EnumMemoryRegionsForEntry(InstMethodHashEntry_t *pEntry, CLRDataEnumMemoryFlags flags) { SUPPORTS_DAC; if (pEntry->GetMethod().IsValid()) pEntry->GetMethod()->EnumMemoryRegions(flags); } #endif // #ifdef DACCESS_COMPILE
-1
dotnet/runtime
66,282
Enable Fast Tail Call Optimization for ARM32
- Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
clamp03
2022-03-07T05:47:20Z
2022-03-13T11:50:20Z
227ff3d53784f655fa04dad059a98b3e8d291d61
51b90cc60b8528c77829ef18481b0f58db812776
Enable Fast Tail Call Optimization for ARM32. - Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
./src/coreclr/vm/domainassembly.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // -------------------------------------------------------------------------------- // DomainAssembly.h // // -------------------------------------------------------------------------------- #ifndef _DOMAINASSEMBLY_H_ #define _DOMAINASSEMBLY_H_ // -------------------------------------------------------------------------------- // Required headers // -------------------------------------------------------------------------------- // -------------------------------------------------------------------------------- // Forward class declarations // -------------------------------------------------------------------------------- class AppDomain; class DomainAssembly; class Assembly; class Module; class DynamicMethodTable; enum FileLoadLevel { // These states are tracked by FileLoadLock // Note: This enum must match the static array fileLoadLevelName[] // which contains the printable names of the enum values // Note that semantics here are description is the LAST step done, not what is // currently being done. FILE_LOAD_CREATE, FILE_LOAD_BEGIN, FILE_LOAD_FIND_NATIVE_IMAGE, FILE_LOAD_VERIFY_NATIVE_IMAGE_DEPENDENCIES, FILE_LOAD_ALLOCATE, FILE_LOAD_ADD_DEPENDENCIES, FILE_LOAD_PRE_LOADLIBRARY, FILE_LOAD_LOADLIBRARY, FILE_LOAD_POST_LOADLIBRARY, FILE_LOAD_EAGER_FIXUPS, FILE_LOAD_DELIVER_EVENTS, FILE_LOAD_VTABLE_FIXUPS, FILE_LOADED, // Loaded by not yet active FILE_ACTIVE // Fully active (constructors run & security checked) }; enum NotificationStatus { NOT_NOTIFIED=0, PROFILER_NOTIFIED=1, DEBUGGER_NEEDNOTIFICATION=2, DEBUGGER_NOTIFIED=4 }; // -------------------------------------------------------------------------------- // DomainAssembly represents an assembly loaded (or being loaded) into an app domain. It // is guranteed to be unique per file per app domain. // -------------------------------------------------------------------------------- class DomainAssembly final { public: // ------------------------------------------------------------ // Public API // ------------------------------------------------------------ #ifndef DACCESS_COMPILE ~DomainAssembly(); DomainAssembly() {LIMITED_METHOD_CONTRACT;}; #endif PTR_AppDomain GetAppDomain() { LIMITED_METHOD_CONTRACT; SUPPORTS_DAC; return m_pDomain; } PEAssembly *GetPEAssembly() { LIMITED_METHOD_DAC_CONTRACT; return PTR_PEAssembly(m_pPEAssembly); } Assembly* GetAssembly() { LIMITED_METHOD_DAC_CONTRACT; CONSISTENCY_CHECK(CheckLoaded()); return m_pAssembly; } Module* GetModule() { LIMITED_METHOD_CONTRACT; CONSISTENCY_CHECK(CheckLoaded()); return m_pModule; } IMDInternalImport *GetMDImport() { WRAPPER_NO_CONTRACT; return m_pPEAssembly->GetMDImport(); } OBJECTREF GetExposedAssemblyObjectIfExists() { LIMITED_METHOD_CONTRACT; OBJECTREF objRet = NULL; GET_LOADERHANDLE_VALUE_FAST(GetLoaderAllocator(), m_hExposedAssemblyObject, &objRet); return objRet; } // Returns managed representation of the assembly (Assembly or AssemblyBuilder). // Returns NULL if the managed scout was already collected (see code:LoaderAllocator#AssemblyPhases). OBJECTREF GetExposedAssemblyObject(); OBJECTREF GetExposedModuleObjectIfExists() { LIMITED_METHOD_CONTRACT; OBJECTREF objRet = NULL; GET_LOADERHANDLE_VALUE_FAST(GetLoaderAllocator(), m_hExposedModuleObject, &objRet); return objRet; } OBJECTREF GetExposedModuleObject(); BOOL IsSystem() { WRAPPER_NO_CONTRACT; return GetPEAssembly()->IsSystem(); } LPCUTF8 GetSimpleName() { WRAPPER_NO_CONTRACT; return GetPEAssembly()->GetSimpleName(); } #ifdef LOGGING LPCWSTR GetDebugName() { WRAPPER_NO_CONTRACT; return GetPEAssembly()->GetDebugName(); } #endif BOOL IsCollectible() { LIMITED_METHOD_CONTRACT; return m_fCollectible; } ULONG HashIdentity() { WRAPPER_NO_CONTRACT; return GetPEAssembly()->HashIdentity(); } // ------------------------------------------------------------ // Loading state checks // ------------------------------------------------------------ // Return the File's load level. Note that this is the last level actually successfully completed. // Note that this is subtly different than the FileLoadLock's level, which is the last level // which was triggered (but potentially skipped if error or inappropriate.) FileLoadLevel GetLoadLevel() { LIMITED_METHOD_DAC_CONTRACT; return m_level; } // Error means that a permanent x-appdomain load error has occurred. BOOL IsError() { LIMITED_METHOD_DAC_CONTRACT; DACCOP_IGNORE(FieldAccess, "No marshalling required"); return m_pError != NULL; } // Loading means that the load is still being tracked by a FileLoadLock. BOOL IsLoading() { LIMITED_METHOD_CONTRACT; return m_loading; } // Loaded means that the file can be used passively. This includes loading types, reflection, and // jitting. BOOL IsLoaded() { LIMITED_METHOD_DAC_CONTRACT; return m_level >= FILE_LOAD_DELIVER_EVENTS; } // Active means that the file can be used actively in the current app domain. Note that a shared file // may conditionally not be able to be made active on a per app domain basis. BOOL IsActive() { LIMITED_METHOD_CONTRACT; return m_level >= FILE_ACTIVE; } // Checks if the load has reached the point where profilers may be notified // about the file. It's important that IF a profiler is notified, THEN this returns // TRUE, otherwise there can be profiler-attach races where the profiler doesn't see // the file via either enumeration or notification. As a result, this begins // returning TRUE just before the profiler is actually notified. See // code:ProfilerFunctionEnum::Init#ProfilerEnumAssemblies BOOL IsAvailableToProfilers() { LIMITED_METHOD_DAC_CONTRACT; return IsProfilerNotified(); // despite the name, this function returns TRUE just before we notify the profiler } // CheckLoaded is appropriate for asserts that the assembly can be passively used. CHECK CheckLoaded(); // CheckActivated is appropriate for asserts that the assembly can be actively used. Note that // it is slightly different from IsActive in that it deals with reentrancy cases properly. CHECK CheckActivated(); // Ensure that an assembly has reached at least the IsLoaded state. Throw if not. void EnsureLoaded() { WRAPPER_NO_CONTRACT; return EnsureLoadLevel(FILE_LOADED); } // Ensure that an assembly has reached at least the IsActive state. Throw if not. void EnsureActive() { WRAPPER_NO_CONTRACT; return EnsureLoadLevel(FILE_ACTIVE); } // Ensure that an assembly has reached at least the Allocated state. Throw if not. void EnsureAllocated() { WRAPPER_NO_CONTRACT; return EnsureLoadLevel(FILE_LOAD_ALLOCATE); } void EnsureLibraryLoaded() { WRAPPER_NO_CONTRACT; return EnsureLoadLevel(FILE_LOAD_LOADLIBRARY); } // EnsureLoadLevel is a generic routine used to ensure that the file is not in a delay loaded // state (unless it needs to be.) This should be used when a particular level of loading // is required for an operation. Note that deadlocks are tolerated so the level may be one void EnsureLoadLevel(FileLoadLevel targetLevel) DAC_EMPTY(); // CheckLoadLevel is an assert predicate used to verify the load level of an assembly. // deadlockOK indicates that the level is allowed to be one short if we are restricted // by loader reentrancy. CHECK CheckLoadLevel(FileLoadLevel requiredLevel, BOOL deadlockOK = TRUE) DAC_EMPTY_RET(CHECK::OK()); // RequireLoadLevel throws an exception if the domain file isn't loaded enough. Note // that this is intolerant of deadlock related failures so is only really appropriate for // checks inside the main loading loop. void RequireLoadLevel(FileLoadLevel targetLevel) DAC_EMPTY(); // Throws if a load error has occurred void ThrowIfError(FileLoadLevel targetLevel) DAC_EMPTY(); // Checks that a load error has not occurred before the given level CHECK CheckNoError(FileLoadLevel targetLevel) DAC_EMPTY_RET(CHECK::OK()); // IsNotified means that the profiler API notification has been delivered BOOL IsProfilerNotified() { LIMITED_METHOD_CONTRACT; return m_notifyflags & PROFILER_NOTIFIED; } BOOL IsDebuggerNotified() { LIMITED_METHOD_CONTRACT; return m_notifyflags & DEBUGGER_NOTIFIED; } BOOL ShouldNotifyDebugger() { LIMITED_METHOD_CONTRACT; return m_notifyflags & DEBUGGER_NEEDNOTIFICATION; } // ------------------------------------------------------------ // Other public APIs // ------------------------------------------------------------ #ifndef DACCESS_COMPILE BOOL Equals(DomainAssembly *pAssembly) { WRAPPER_NO_CONTRACT; return GetPEAssembly()->Equals(pAssembly->GetPEAssembly()); } BOOL Equals(PEAssembly *pPEAssembly) { WRAPPER_NO_CONTRACT; return GetPEAssembly()->Equals(pPEAssembly); } #endif // DACCESS_COMPILE #ifdef DACCESS_COMPILE void EnumMemoryRegions(CLRDataEnumMemoryFlags flags); #endif #ifndef DACCESS_COMPILE // light code gen. Keep the list of MethodTables needed for creating dynamic methods DynamicMethodTable* GetDynamicMethodTable(); #endif DomainAssembly* GetNextDomainAssemblyInSameALC() { return m_NextDomainAssemblyInSameALC; } void SetNextDomainAssemblyInSameALC(DomainAssembly* domainAssembly) { _ASSERTE(m_NextDomainAssemblyInSameALC == NULL); m_NextDomainAssemblyInSameALC = domainAssembly; } LoaderAllocator* GetLoaderAllocator() { LIMITED_METHOD_CONTRACT; return m_pLoaderAllocator; } // ------------------------------------------------------------ // Resource access // ------------------------------------------------------------ BOOL GetResource(LPCSTR szName, DWORD* cbResource, PBYTE* pbInMemoryResource, DomainAssembly** pAssemblyRef, LPCSTR* szFileName, DWORD* dwLocation, BOOL fSkipRaiseResolveEvent); private: // ------------------------------------------------------------ // Loader API // ------------------------------------------------------------ friend class AppDomain; friend class Assembly; friend class Module; friend class FileLoadLock; DomainAssembly(AppDomain* pDomain, PEAssembly* pPEAssembly, LoaderAllocator* pLoaderAllocator); BOOL DoIncrementalLoad(FileLoadLevel targetLevel); void ClearLoading() { LIMITED_METHOD_CONTRACT; m_loading = FALSE; } void SetLoadLevel(FileLoadLevel level) { LIMITED_METHOD_CONTRACT; m_level = level; } #ifndef DACCESS_COMPILE void Begin(); void Allocate(); void AddDependencies(); void PreLoadLibrary(); void LoadLibrary(); void PostLoadLibrary(); void EagerFixups(); void VtableFixups(); void DeliverSyncEvents(); void DeliverAsyncEvents(); void FinishLoad(); void Activate(); void RegisterWithHostAssembly(); void UnregisterFromHostAssembly(); #endif // This should be used to permanently set the load to fail. Do not use with transient conditions void SetError(Exception *ex); void SetAssembly(Assembly* pAssembly); void SetProfilerNotified() { LIMITED_METHOD_CONTRACT; m_notifyflags|= PROFILER_NOTIFIED; } void SetDebuggerNotified() { LIMITED_METHOD_CONTRACT; m_notifyflags|=DEBUGGER_NOTIFIED; } void SetShouldNotifyDebugger() { LIMITED_METHOD_CONTRACT; m_notifyflags|=DEBUGGER_NEEDNOTIFICATION; } class ExInfo { enum { ExType_ClrEx, ExType_HR } m_type; union { Exception* m_pEx; HRESULT m_hr; }; public: void Throw() { CONTRACTL { THROWS; GC_TRIGGERS; MODE_ANY; } CONTRACTL_END; if (m_type == ExType_ClrEx) { PAL_CPP_THROW(Exception*, m_pEx->DomainBoundClone()); } if (m_type == ExType_HR) ThrowHR(m_hr); _ASSERTE(!"Bad exception type"); ThrowHR(E_UNEXPECTED); }; ExInfo(Exception* pEx) { LIMITED_METHOD_CONTRACT; m_type = ExType_ClrEx; m_pEx = pEx; }; ~ExInfo() { LIMITED_METHOD_CONTRACT; if (m_type == ExType_ClrEx) delete m_pEx; } }; public: // ------------------------------------------------------------ // Debugger control API // ------------------------------------------------------------ DebuggerAssemblyControlFlags GetDebuggerInfoBits(void) { LIMITED_METHOD_CONTRACT; return m_debuggerFlags; } void SetDebuggerInfoBits(DebuggerAssemblyControlFlags newBits) { LIMITED_METHOD_CONTRACT; m_debuggerFlags = newBits; } void SetupDebuggingConfig(void); DWORD ComputeDebuggingConfig(void); HRESULT GetDebuggingCustomAttributes(DWORD* pdwFlags); BOOL IsVisibleToDebugger(); BOOL NotifyDebuggerLoad(int flags, BOOL attaching); void NotifyDebuggerUnload(); private: // ------------------------------------------------------------ // Instance data // ------------------------------------------------------------ PTR_Assembly m_pAssembly; PTR_AppDomain m_pDomain; PTR_PEAssembly m_pPEAssembly; PTR_Module m_pModule; BOOL m_fCollectible; DomainAssembly* m_NextDomainAssemblyInSameALC; PTR_LoaderAllocator m_pLoaderAllocator; FileLoadLevel m_level; BOOL m_loading; LOADERHANDLE m_hExposedModuleObject; LOADERHANDLE m_hExposedAssemblyObject; ExInfo* m_pError; BOOL m_bDisableActivationCheck; BOOL m_fHostAssemblyPublished; // m_pDynamicMethodTable is used by the light code generation to allow method // generation on the fly. They are lazily created when/if a dynamic method is requested // for this specific module DynamicMethodTable* m_pDynamicMethodTable; DebuggerAssemblyControlFlags m_debuggerFlags; DWORD m_notifyflags; BOOL m_fDebuggerUnloadStarted; }; #endif // _DOMAINASSEMBLY_H_
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // -------------------------------------------------------------------------------- // DomainAssembly.h // // -------------------------------------------------------------------------------- #ifndef _DOMAINASSEMBLY_H_ #define _DOMAINASSEMBLY_H_ // -------------------------------------------------------------------------------- // Required headers // -------------------------------------------------------------------------------- // -------------------------------------------------------------------------------- // Forward class declarations // -------------------------------------------------------------------------------- class AppDomain; class DomainAssembly; class Assembly; class Module; class DynamicMethodTable; enum FileLoadLevel { // These states are tracked by FileLoadLock // Note: This enum must match the static array fileLoadLevelName[] // which contains the printable names of the enum values // Note that semantics here are description is the LAST step done, not what is // currently being done. FILE_LOAD_CREATE, FILE_LOAD_BEGIN, FILE_LOAD_FIND_NATIVE_IMAGE, FILE_LOAD_VERIFY_NATIVE_IMAGE_DEPENDENCIES, FILE_LOAD_ALLOCATE, FILE_LOAD_ADD_DEPENDENCIES, FILE_LOAD_PRE_LOADLIBRARY, FILE_LOAD_LOADLIBRARY, FILE_LOAD_POST_LOADLIBRARY, FILE_LOAD_EAGER_FIXUPS, FILE_LOAD_DELIVER_EVENTS, FILE_LOAD_VTABLE_FIXUPS, FILE_LOADED, // Loaded by not yet active FILE_ACTIVE // Fully active (constructors run & security checked) }; enum NotificationStatus { NOT_NOTIFIED=0, PROFILER_NOTIFIED=1, DEBUGGER_NEEDNOTIFICATION=2, DEBUGGER_NOTIFIED=4 }; // -------------------------------------------------------------------------------- // DomainAssembly represents an assembly loaded (or being loaded) into an app domain. It // is guranteed to be unique per file per app domain. // -------------------------------------------------------------------------------- class DomainAssembly final { public: // ------------------------------------------------------------ // Public API // ------------------------------------------------------------ #ifndef DACCESS_COMPILE ~DomainAssembly(); DomainAssembly() {LIMITED_METHOD_CONTRACT;}; #endif PTR_AppDomain GetAppDomain() { LIMITED_METHOD_CONTRACT; SUPPORTS_DAC; return m_pDomain; } PEAssembly *GetPEAssembly() { LIMITED_METHOD_DAC_CONTRACT; return PTR_PEAssembly(m_pPEAssembly); } Assembly* GetAssembly() { LIMITED_METHOD_DAC_CONTRACT; CONSISTENCY_CHECK(CheckLoaded()); return m_pAssembly; } Module* GetModule() { LIMITED_METHOD_CONTRACT; CONSISTENCY_CHECK(CheckLoaded()); return m_pModule; } IMDInternalImport *GetMDImport() { WRAPPER_NO_CONTRACT; return m_pPEAssembly->GetMDImport(); } OBJECTREF GetExposedAssemblyObjectIfExists() { LIMITED_METHOD_CONTRACT; OBJECTREF objRet = NULL; GET_LOADERHANDLE_VALUE_FAST(GetLoaderAllocator(), m_hExposedAssemblyObject, &objRet); return objRet; } // Returns managed representation of the assembly (Assembly or AssemblyBuilder). // Returns NULL if the managed scout was already collected (see code:LoaderAllocator#AssemblyPhases). OBJECTREF GetExposedAssemblyObject(); OBJECTREF GetExposedModuleObjectIfExists() { LIMITED_METHOD_CONTRACT; OBJECTREF objRet = NULL; GET_LOADERHANDLE_VALUE_FAST(GetLoaderAllocator(), m_hExposedModuleObject, &objRet); return objRet; } OBJECTREF GetExposedModuleObject(); BOOL IsSystem() { WRAPPER_NO_CONTRACT; return GetPEAssembly()->IsSystem(); } LPCUTF8 GetSimpleName() { WRAPPER_NO_CONTRACT; return GetPEAssembly()->GetSimpleName(); } #ifdef LOGGING LPCWSTR GetDebugName() { WRAPPER_NO_CONTRACT; return GetPEAssembly()->GetDebugName(); } #endif BOOL IsCollectible() { LIMITED_METHOD_CONTRACT; return m_fCollectible; } ULONG HashIdentity() { WRAPPER_NO_CONTRACT; return GetPEAssembly()->HashIdentity(); } // ------------------------------------------------------------ // Loading state checks // ------------------------------------------------------------ // Return the File's load level. Note that this is the last level actually successfully completed. // Note that this is subtly different than the FileLoadLock's level, which is the last level // which was triggered (but potentially skipped if error or inappropriate.) FileLoadLevel GetLoadLevel() { LIMITED_METHOD_DAC_CONTRACT; return m_level; } // Error means that a permanent x-appdomain load error has occurred. BOOL IsError() { LIMITED_METHOD_DAC_CONTRACT; DACCOP_IGNORE(FieldAccess, "No marshalling required"); return m_pError != NULL; } // Loading means that the load is still being tracked by a FileLoadLock. BOOL IsLoading() { LIMITED_METHOD_CONTRACT; return m_loading; } // Loaded means that the file can be used passively. This includes loading types, reflection, and // jitting. BOOL IsLoaded() { LIMITED_METHOD_DAC_CONTRACT; return m_level >= FILE_LOAD_DELIVER_EVENTS; } // Active means that the file can be used actively in the current app domain. Note that a shared file // may conditionally not be able to be made active on a per app domain basis. BOOL IsActive() { LIMITED_METHOD_CONTRACT; return m_level >= FILE_ACTIVE; } // Checks if the load has reached the point where profilers may be notified // about the file. It's important that IF a profiler is notified, THEN this returns // TRUE, otherwise there can be profiler-attach races where the profiler doesn't see // the file via either enumeration or notification. As a result, this begins // returning TRUE just before the profiler is actually notified. See // code:ProfilerFunctionEnum::Init#ProfilerEnumAssemblies BOOL IsAvailableToProfilers() { LIMITED_METHOD_DAC_CONTRACT; return IsProfilerNotified(); // despite the name, this function returns TRUE just before we notify the profiler } // CheckLoaded is appropriate for asserts that the assembly can be passively used. CHECK CheckLoaded(); // CheckActivated is appropriate for asserts that the assembly can be actively used. Note that // it is slightly different from IsActive in that it deals with reentrancy cases properly. CHECK CheckActivated(); // Ensure that an assembly has reached at least the IsLoaded state. Throw if not. void EnsureLoaded() { WRAPPER_NO_CONTRACT; return EnsureLoadLevel(FILE_LOADED); } // Ensure that an assembly has reached at least the IsActive state. Throw if not. void EnsureActive() { WRAPPER_NO_CONTRACT; return EnsureLoadLevel(FILE_ACTIVE); } // Ensure that an assembly has reached at least the Allocated state. Throw if not. void EnsureAllocated() { WRAPPER_NO_CONTRACT; return EnsureLoadLevel(FILE_LOAD_ALLOCATE); } void EnsureLibraryLoaded() { WRAPPER_NO_CONTRACT; return EnsureLoadLevel(FILE_LOAD_LOADLIBRARY); } // EnsureLoadLevel is a generic routine used to ensure that the file is not in a delay loaded // state (unless it needs to be.) This should be used when a particular level of loading // is required for an operation. Note that deadlocks are tolerated so the level may be one void EnsureLoadLevel(FileLoadLevel targetLevel) DAC_EMPTY(); // CheckLoadLevel is an assert predicate used to verify the load level of an assembly. // deadlockOK indicates that the level is allowed to be one short if we are restricted // by loader reentrancy. CHECK CheckLoadLevel(FileLoadLevel requiredLevel, BOOL deadlockOK = TRUE) DAC_EMPTY_RET(CHECK::OK()); // RequireLoadLevel throws an exception if the domain file isn't loaded enough. Note // that this is intolerant of deadlock related failures so is only really appropriate for // checks inside the main loading loop. void RequireLoadLevel(FileLoadLevel targetLevel) DAC_EMPTY(); // Throws if a load error has occurred void ThrowIfError(FileLoadLevel targetLevel) DAC_EMPTY(); // Checks that a load error has not occurred before the given level CHECK CheckNoError(FileLoadLevel targetLevel) DAC_EMPTY_RET(CHECK::OK()); // IsNotified means that the profiler API notification has been delivered BOOL IsProfilerNotified() { LIMITED_METHOD_CONTRACT; return m_notifyflags & PROFILER_NOTIFIED; } BOOL IsDebuggerNotified() { LIMITED_METHOD_CONTRACT; return m_notifyflags & DEBUGGER_NOTIFIED; } BOOL ShouldNotifyDebugger() { LIMITED_METHOD_CONTRACT; return m_notifyflags & DEBUGGER_NEEDNOTIFICATION; } // ------------------------------------------------------------ // Other public APIs // ------------------------------------------------------------ #ifndef DACCESS_COMPILE BOOL Equals(DomainAssembly *pAssembly) { WRAPPER_NO_CONTRACT; return GetPEAssembly()->Equals(pAssembly->GetPEAssembly()); } BOOL Equals(PEAssembly *pPEAssembly) { WRAPPER_NO_CONTRACT; return GetPEAssembly()->Equals(pPEAssembly); } #endif // DACCESS_COMPILE #ifdef DACCESS_COMPILE void EnumMemoryRegions(CLRDataEnumMemoryFlags flags); #endif #ifndef DACCESS_COMPILE // light code gen. Keep the list of MethodTables needed for creating dynamic methods DynamicMethodTable* GetDynamicMethodTable(); #endif DomainAssembly* GetNextDomainAssemblyInSameALC() { return m_NextDomainAssemblyInSameALC; } void SetNextDomainAssemblyInSameALC(DomainAssembly* domainAssembly) { _ASSERTE(m_NextDomainAssemblyInSameALC == NULL); m_NextDomainAssemblyInSameALC = domainAssembly; } LoaderAllocator* GetLoaderAllocator() { LIMITED_METHOD_CONTRACT; return m_pLoaderAllocator; } // ------------------------------------------------------------ // Resource access // ------------------------------------------------------------ BOOL GetResource(LPCSTR szName, DWORD* cbResource, PBYTE* pbInMemoryResource, DomainAssembly** pAssemblyRef, LPCSTR* szFileName, DWORD* dwLocation, BOOL fSkipRaiseResolveEvent); private: // ------------------------------------------------------------ // Loader API // ------------------------------------------------------------ friend class AppDomain; friend class Assembly; friend class Module; friend class FileLoadLock; DomainAssembly(AppDomain* pDomain, PEAssembly* pPEAssembly, LoaderAllocator* pLoaderAllocator); BOOL DoIncrementalLoad(FileLoadLevel targetLevel); void ClearLoading() { LIMITED_METHOD_CONTRACT; m_loading = FALSE; } void SetLoadLevel(FileLoadLevel level) { LIMITED_METHOD_CONTRACT; m_level = level; } #ifndef DACCESS_COMPILE void Begin(); void Allocate(); void AddDependencies(); void PreLoadLibrary(); void LoadLibrary(); void PostLoadLibrary(); void EagerFixups(); void VtableFixups(); void DeliverSyncEvents(); void DeliverAsyncEvents(); void FinishLoad(); void Activate(); void RegisterWithHostAssembly(); void UnregisterFromHostAssembly(); #endif // This should be used to permanently set the load to fail. Do not use with transient conditions void SetError(Exception *ex); void SetAssembly(Assembly* pAssembly); void SetProfilerNotified() { LIMITED_METHOD_CONTRACT; m_notifyflags|= PROFILER_NOTIFIED; } void SetDebuggerNotified() { LIMITED_METHOD_CONTRACT; m_notifyflags|=DEBUGGER_NOTIFIED; } void SetShouldNotifyDebugger() { LIMITED_METHOD_CONTRACT; m_notifyflags|=DEBUGGER_NEEDNOTIFICATION; } class ExInfo { enum { ExType_ClrEx, ExType_HR } m_type; union { Exception* m_pEx; HRESULT m_hr; }; public: void Throw() { CONTRACTL { THROWS; GC_TRIGGERS; MODE_ANY; } CONTRACTL_END; if (m_type == ExType_ClrEx) { PAL_CPP_THROW(Exception*, m_pEx->DomainBoundClone()); } if (m_type == ExType_HR) ThrowHR(m_hr); _ASSERTE(!"Bad exception type"); ThrowHR(E_UNEXPECTED); }; ExInfo(Exception* pEx) { LIMITED_METHOD_CONTRACT; m_type = ExType_ClrEx; m_pEx = pEx; }; ~ExInfo() { LIMITED_METHOD_CONTRACT; if (m_type == ExType_ClrEx) delete m_pEx; } }; public: // ------------------------------------------------------------ // Debugger control API // ------------------------------------------------------------ DebuggerAssemblyControlFlags GetDebuggerInfoBits(void) { LIMITED_METHOD_CONTRACT; return m_debuggerFlags; } void SetDebuggerInfoBits(DebuggerAssemblyControlFlags newBits) { LIMITED_METHOD_CONTRACT; m_debuggerFlags = newBits; } void SetupDebuggingConfig(void); DWORD ComputeDebuggingConfig(void); HRESULT GetDebuggingCustomAttributes(DWORD* pdwFlags); BOOL IsVisibleToDebugger(); BOOL NotifyDebuggerLoad(int flags, BOOL attaching); void NotifyDebuggerUnload(); private: // ------------------------------------------------------------ // Instance data // ------------------------------------------------------------ PTR_Assembly m_pAssembly; PTR_AppDomain m_pDomain; PTR_PEAssembly m_pPEAssembly; PTR_Module m_pModule; BOOL m_fCollectible; DomainAssembly* m_NextDomainAssemblyInSameALC; PTR_LoaderAllocator m_pLoaderAllocator; FileLoadLevel m_level; BOOL m_loading; LOADERHANDLE m_hExposedModuleObject; LOADERHANDLE m_hExposedAssemblyObject; ExInfo* m_pError; BOOL m_bDisableActivationCheck; BOOL m_fHostAssemblyPublished; // m_pDynamicMethodTable is used by the light code generation to allow method // generation on the fly. They are lazily created when/if a dynamic method is requested // for this specific module DynamicMethodTable* m_pDynamicMethodTable; DebuggerAssemblyControlFlags m_debuggerFlags; DWORD m_notifyflags; BOOL m_fDebuggerUnloadStarted; }; #endif // _DOMAINASSEMBLY_H_
-1
dotnet/runtime
66,282
Enable Fast Tail Call Optimization for ARM32
- Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
clamp03
2022-03-07T05:47:20Z
2022-03-13T11:50:20Z
227ff3d53784f655fa04dad059a98b3e8d291d61
51b90cc60b8528c77829ef18481b0f58db812776
Enable Fast Tail Call Optimization for ARM32. - Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
./src/coreclr/tools/superpmi/mcs/verbdump.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. //---------------------------------------------------------- // verbDump.h - verb that Dumps a MC file //---------------------------------------------------------- #ifndef _verbDump #define _verbDump class verbDump { public: static int DoWork(const char* nameofInput, int indexCount, const int* indexes, bool simple); }; #endif
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. //---------------------------------------------------------- // verbDump.h - verb that Dumps a MC file //---------------------------------------------------------- #ifndef _verbDump #define _verbDump class verbDump { public: static int DoWork(const char* nameofInput, int indexCount, const int* indexes, bool simple); }; #endif
-1
dotnet/runtime
66,282
Enable Fast Tail Call Optimization for ARM32
- Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
clamp03
2022-03-07T05:47:20Z
2022-03-13T11:50:20Z
227ff3d53784f655fa04dad059a98b3e8d291d61
51b90cc60b8528c77829ef18481b0f58db812776
Enable Fast Tail Call Optimization for ARM32. - Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
./src/tests/Interop/StructMarshalling/ReversePInvoke/MarshalExpStruct/ExpStructAsParamNative.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include "ExpStructAsParamNative.h" #include <stdio.h> #include <stdlib.h> #include <xplatform.h> const char* NativeStr = "Native"; const size_t size=strlen(NativeStr); #define PRINT_ERR_INFO() \ printf("\t%s : unexpected error \n",__FUNCTION__) //----------method called byref----------// ///// extern "C" DLL_EXPORT BOOL _cdecl MarshalStructAsParam_AsExpByRefINNER2_Cdecl(INNER2* inner) { if(!IsCorrectINNER2(inner)) { PRINT_ERR_INFO(); PrintINNER2(inner,"inner"); return FALSE; } ChangeINNER2(inner); return TRUE; } extern "C" DLL_EXPORT BOOL __stdcall MarshalStructAsParam_AsExpByRefINNER2_Stdcall(INNER2* inner) { if(!IsCorrectINNER2(inner)) { PRINT_ERR_INFO(); PrintINNER2(inner,"inner"); return FALSE; } ChangeINNER2(inner); return TRUE; } ///// extern "C" DLL_EXPORT BOOL _cdecl MarshalStructAsParam_AsExpByRefInnerExplicit_Cdecl(InnerExplicit* inner) { if(inner->f1 != 1 || memcmp(inner->f3, "some string",11*sizeof(char)) != 0) { PRINT_ERR_INFO(); PrintInnerExplicit(inner,"inner"); return FALSE; } ChangeInnerExplicit(inner); return TRUE; } extern "C" DLL_EXPORT BOOL __stdcall MarshalStructAsParam_AsExpByRefInnerExplicit_Stdcall(InnerExplicit* inner) { if(inner->f1 != 1 || memcmp(inner->f3, "some string",11*sizeof(char)) != 0) { PRINT_ERR_INFO(); PrintInnerExplicit(inner,"inner"); return FALSE; } ChangeInnerExplicit(inner); return TRUE; } ///// extern "C" DLL_EXPORT BOOL _cdecl MarshalStructAsParam_AsExpByRefInnerArrayExplicit_Cdecl(InnerArrayExplicit* outer2) { for(int i = 0;i<NumArrElements;i++) { if(outer2->arr[i].f1 != 1) { PRINT_ERR_INFO(); return FALSE; } } if(memcmp(outer2->f4,"some string2",12) != 0) { PRINT_ERR_INFO(); return FALSE; } for(int i =0;i<NumArrElements;i++) { outer2->arr[i].f1 = 77; } const char* temp = "change string2"; size_t len = strlen(temp); LPCSTR str = (LPCSTR)CoreClrAlloc( sizeof(char)*(len+1) ); strcpy_s((char*)str,len+1,temp); outer2->f4 = str; return TRUE; } extern "C" DLL_EXPORT BOOL __stdcall MarshalStructAsParam_AsExpByRefInnerArrayExplicit_Stdcall(InnerArrayExplicit* outer2) { for(int i = 0;i<NumArrElements;i++) { if(outer2->arr[i].f1 != 1) { PRINT_ERR_INFO(); return FALSE; } } if(memcmp(outer2->f4,"some string2",12) != 0) { PRINT_ERR_INFO(); return FALSE; } for(int i =0;i<NumArrElements;i++) { outer2->arr[i].f1 = 77; } const char* temp = "change string2"; size_t len = strlen(temp); LPCSTR str = (LPCSTR)CoreClrAlloc( sizeof(char)*(len+1) ); strcpy_s((char*)str,len+1,temp); outer2->f4 = str; return TRUE; } ///// extern "C" DLL_EXPORT BOOL _cdecl MarshalStructAsParam_AsExpByRefOUTER3_Cdecl(OUTER3* outer3) { if(!IsCorrectOUTER3(outer3)) { PRINT_ERR_INFO(); PrintOUTER3(outer3,"OUTER3"); return FALSE; } ChangeOUTER3(outer3); return TRUE; } extern "C" DLL_EXPORT BOOL __stdcall MarshalStructAsParam_AsExpByRefOUTER3_Stdcall(OUTER3* outer3) { if(!IsCorrectOUTER3(outer3)) { PRINT_ERR_INFO(); PrintOUTER3(outer3,"OUTER3"); return FALSE; } ChangeOUTER3(outer3); return TRUE; } ///// extern "C" DLL_EXPORT BOOL _cdecl MarshalStructAsParam_AsExpByRefU_Cdecl(U* str1) { if(!IsCorrectU(str1)) { PRINT_ERR_INFO(); PrintU(str1, "str1"); return FALSE; } ChangeU(str1); return TRUE; } extern "C" DLL_EXPORT BOOL __stdcall MarshalStructAsParam_AsExpByRefU_Stdcall(U* str1) { if(!IsCorrectU(str1)) { PRINT_ERR_INFO(); PrintU(str1, "str1"); return FALSE; } ChangeU(str1); return TRUE; } ///// extern "C" DLL_EXPORT BOOL _cdecl MarshalStructAsParam_AsExpByRefByteStructPack2Explicit_Cdecl(ByteStructPack2Explicit* str1) { if(!IsCorrectByteStructPack2Explicit(str1)) { PRINT_ERR_INFO(); PrintByteStructPack2Explicit(str1, "str1"); return FALSE; } ChangeByteStructPack2Explicit(str1); return TRUE; } extern "C" DLL_EXPORT BOOL __stdcall MarshalStructAsParam_AsExpByRefByteStructPack2Explicit_Stdcall(ByteStructPack2Explicit* str1) { if(!IsCorrectByteStructPack2Explicit(str1)) { PRINT_ERR_INFO(); PrintByteStructPack2Explicit(str1, "str1"); return FALSE; } ChangeByteStructPack2Explicit(str1); return TRUE; } ///// extern "C" DLL_EXPORT BOOL _cdecl MarshalStructAsParam_AsExpByRefShortStructPack4Explicit_Cdecl(ShortStructPack4Explicit* str1) { if(!IsCorrectShortStructPack4Explicit(str1)) { PRINT_ERR_INFO(); PrintShortStructPack4Explicit(str1, "str1"); return FALSE; } ChangeShortStructPack4Explicit(str1); return TRUE; } extern "C" DLL_EXPORT BOOL __stdcall MarshalStructAsParam_AsExpByRefShortStructPack4Explicit_Stdcall(ShortStructPack4Explicit* str1) { if(!IsCorrectShortStructPack4Explicit(str1)) { PRINT_ERR_INFO(); PrintShortStructPack4Explicit(str1, "str1"); return FALSE; } ChangeShortStructPack4Explicit(str1); return TRUE; } ///// extern "C" DLL_EXPORT BOOL _cdecl MarshalStructAsParam_AsExpByRefIntStructPack8Explicit_Cdecl(IntStructPack8Explicit* str1) { if(!IsCorrectIntStructPack8Explicit(str1)) { PRINT_ERR_INFO(); PrintIntStructPack8Explicit(str1, "str1"); return FALSE; } ChangeIntStructPack8Explicit(str1); return TRUE; } extern "C" DLL_EXPORT BOOL __stdcall MarshalStructAsParam_AsExpByRefIntStructPack8Explicit_Stdcall(IntStructPack8Explicit* str1) { if(!IsCorrectIntStructPack8Explicit(str1)) { PRINT_ERR_INFO(); PrintIntStructPack8Explicit(str1, "str1"); return FALSE; } ChangeIntStructPack8Explicit(str1); return TRUE; } ///// extern "C" DLL_EXPORT BOOL _cdecl MarshalStructAsParam_AsExpByRefLongStructPack16Explicit_Cdecl(LongStructPack16Explicit* str1) { if(!IsCorrectLongStructPack16Explicit(str1)) { PRINT_ERR_INFO(); PrintLongStructPack16Explicit(str1, "str1"); return FALSE; } ChangeLongStructPack16Explicit(str1); return TRUE; } extern "C" DLL_EXPORT BOOL __stdcall MarshalStructAsParam_AsExpByRefLongStructPack16Explicit_Stdcall(LongStructPack16Explicit* str1) { if(!IsCorrectLongStructPack16Explicit(str1)) { PRINT_ERR_INFO(); PrintLongStructPack16Explicit(str1, "str1"); return FALSE; } ChangeLongStructPack16Explicit(str1); return TRUE; } ///// //---------------------------- ----------// //----------method called byval----------// ///// extern "C" DLL_EXPORT BOOL _cdecl MarshalStructAsParam_AsExpByValINNER2_Cdecl(INNER2 str1) { return MarshalStructAsParam_AsExpByRefINNER2_Cdecl(&str1); } extern "C" DLL_EXPORT BOOL __stdcall MarshalStructAsParam_AsExpByValINNER2_Stdcall(INNER2 str1) { return MarshalStructAsParam_AsExpByRefINNER2_Stdcall(&str1); } ///// extern "C" DLL_EXPORT BOOL _cdecl MarshalStructAsParam_AsExpByValInnerExplicit_Cdecl(InnerExplicit str1) { return MarshalStructAsParam_AsExpByRefInnerExplicit_Cdecl(&str1); } extern "C" DLL_EXPORT BOOL __stdcall MarshalStructAsParam_AsExpByValInnerExplicit_Stdcall(InnerExplicit str1) { return MarshalStructAsParam_AsExpByRefInnerExplicit_Stdcall(&str1); } ///// extern "C" DLL_EXPORT BOOL _cdecl MarshalStructAsParam_AsExpByValInnerArrayExplicit_Cdecl(InnerArrayExplicit str1) { return MarshalStructAsParam_AsExpByRefInnerArrayExplicit_Cdecl(&str1); } extern "C" DLL_EXPORT BOOL __stdcall MarshalStructAsParam_AsExpByValInnerArrayExplicit_Stdcall(InnerArrayExplicit str1) { return MarshalStructAsParam_AsExpByRefInnerArrayExplicit_Stdcall(&str1); } ///// extern "C" DLL_EXPORT BOOL _cdecl MarshalStructAsParam_AsExpByValOUTER3_Cdecl(OUTER3 str1) { return MarshalStructAsParam_AsExpByRefOUTER3_Cdecl(&str1); } extern "C" DLL_EXPORT BOOL __stdcall MarshalStructAsParam_AsExpByValOUTER3_Stdcall(OUTER3 str1) { return MarshalStructAsParam_AsExpByRefOUTER3_Stdcall( &str1); } ///// extern "C" DLL_EXPORT BOOL _cdecl MarshalStructAsParam_AsExpByValU_Cdecl(U str1) { return MarshalStructAsParam_AsExpByRefU_Cdecl(&str1); } extern "C" DLL_EXPORT BOOL __stdcall MarshalStructAsParam_AsExpByValU_Stdcall(U str1) { return MarshalStructAsParam_AsExpByRefU_Stdcall(&str1); } ///// extern "C" DLL_EXPORT BOOL _cdecl MarshalStructAsParam_AsExpByValByteStructPack2Explicit_Cdecl(ByteStructPack2Explicit str1) { return MarshalStructAsParam_AsExpByRefByteStructPack2Explicit_Cdecl(&str1); } extern "C" DLL_EXPORT BOOL __stdcall MarshalStructAsParam_AsExpByValByteStructPack2Explicit_Stdcall(ByteStructPack2Explicit str1) { return MarshalStructAsParam_AsExpByRefByteStructPack2Explicit_Stdcall(&str1); } ///// extern "C" DLL_EXPORT BOOL _cdecl MarshalStructAsParam_AsExpByValShortStructPack4Explicit_Cdecl(ShortStructPack4Explicit str1) { return MarshalStructAsParam_AsExpByRefShortStructPack4Explicit_Cdecl(&str1); } extern "C" DLL_EXPORT BOOL __stdcall MarshalStructAsParam_AsExpByValShortStructPack4Explicit_Stdcall(ShortStructPack4Explicit str1) { return MarshalStructAsParam_AsExpByRefShortStructPack4Explicit_Stdcall(&str1); } ///// extern "C" DLL_EXPORT BOOL _cdecl MarshalStructAsParam_AsExpByValIntStructPack8Explicit_Cdecl(IntStructPack8Explicit str1) { return MarshalStructAsParam_AsExpByRefIntStructPack8Explicit_Cdecl(&str1); } extern "C" DLL_EXPORT BOOL __stdcall MarshalStructAsParam_AsExpByValIntStructPack8Explicit_Stdcall(IntStructPack8Explicit str1) { return MarshalStructAsParam_AsExpByRefIntStructPack8Explicit_Stdcall(&str1); } ///// extern "C" DLL_EXPORT BOOL _cdecl MarshalStructAsParam_AsExpByValLongStructPack16Explicit_Cdecl(LongStructPack16Explicit str1) { return MarshalStructAsParam_AsExpByRefLongStructPack16Explicit_Cdecl(&str1); } extern "C" DLL_EXPORT BOOL __stdcall MarshalStructAsParam_AsExpByValLongStructPack16Explicit_Stdcall(LongStructPack16Explicit str1) { return MarshalStructAsParam_AsExpByRefLongStructPack16Explicit_Stdcall(&str1); } ///// //---------------------------- ----------// //----------Delegate Pinvoke. PassByRef----------// ///// typedef BOOL(_cdecl *DelegatePinvokeByRefCdeclCaller_INNER2)(INNER2* inner); extern "C" DLL_EXPORT DelegatePinvokeByRefCdeclCaller_INNER2 _cdecl Get_MarshalStructAsParam_AsExpByRefINNER2_Cdecl_FuncPtr() { return MarshalStructAsParam_AsExpByRefINNER2_Cdecl; } typedef BOOL(__stdcall *DelegatePinvokeByRefStdcallCaller_INNER2)(INNER2* inner); extern "C" DLL_EXPORT DelegatePinvokeByRefStdcallCaller_INNER2 __stdcall Get_MarshalStructAsParam_AsExpByRefINNER2_Stdcall_FuncPtr() { return MarshalStructAsParam_AsExpByRefINNER2_Stdcall; } ///// typedef BOOL(_cdecl *DelegatePinvokeByRefCdeclCaller_InnerExplicit)(InnerExplicit* ie); extern "C" DLL_EXPORT DelegatePinvokeByRefCdeclCaller_InnerExplicit _cdecl Get_MarshalStructAsParam_AsExpByRefInnerExplicit_Cdecl_FuncPtr() { return MarshalStructAsParam_AsExpByRefInnerExplicit_Cdecl; } typedef BOOL(__stdcall *DelegatePinvokeByRefStdcallCaller_InnerExplicit)(InnerExplicit* ie); extern "C" DLL_EXPORT DelegatePinvokeByRefStdcallCaller_InnerExplicit __stdcall Get_MarshalStructAsParam_AsExpByRefInnerExplicit_Stdcall_FuncPtr() { return MarshalStructAsParam_AsExpByRefInnerExplicit_Stdcall; } ///// typedef BOOL(_cdecl *DelegatePinvokeByRefCdeclCaller_InnerArrayExplicit)(InnerArrayExplicit* iae); extern "C" DLL_EXPORT DelegatePinvokeByRefCdeclCaller_InnerArrayExplicit _cdecl Get_MarshalStructAsParam_AsExpByRefInnerArrayExplicit_Cdecl_FuncPtr() { return MarshalStructAsParam_AsExpByRefInnerArrayExplicit_Cdecl; } typedef BOOL(__stdcall *DelegatePinvokeByRefStdcallCaller_InnerArrayExplicit)(InnerArrayExplicit* iae); extern "C" DLL_EXPORT DelegatePinvokeByRefStdcallCaller_InnerArrayExplicit __stdcall Get_MarshalStructAsParam_AsExpByRefInnerArrayExplicit_Stdcall_FuncPtr() { return MarshalStructAsParam_AsExpByRefInnerArrayExplicit_Stdcall; } ///// typedef BOOL(_cdecl *DelegatePinvokeByRefCdeclCaller_OUTER3)(OUTER3* outer); extern "C" DLL_EXPORT DelegatePinvokeByRefCdeclCaller_OUTER3 _cdecl Get_MarshalStructAsParam_AsExpByRefOUTER3_Cdecl_FuncPtr() { return MarshalStructAsParam_AsExpByRefOUTER3_Cdecl; } typedef BOOL(__stdcall *DelegatePinvokeByRefStdcallCaller_OUTER3)(OUTER3* outer); extern "C" DLL_EXPORT DelegatePinvokeByRefStdcallCaller_OUTER3 __stdcall Get_MarshalStructAsParam_AsExpByRefOUTER3_Stdcall_FuncPtr() { return MarshalStructAsParam_AsExpByRefOUTER3_Stdcall; } ///// typedef BOOL(_cdecl *DelegatePinvokeByRefCdeclCaller_U)(U* inner); extern "C" DLL_EXPORT DelegatePinvokeByRefCdeclCaller_U _cdecl Get_MarshalStructAsParam_AsExpByRefU_Cdecl_FuncPtr() { return MarshalStructAsParam_AsExpByRefU_Cdecl; } typedef BOOL(__stdcall *DelegatePinvokeByRefStdcallCaller_U)(U* inner); extern "C" DLL_EXPORT DelegatePinvokeByRefStdcallCaller_U __stdcall Get_MarshalStructAsParam_AsExpByRefU_Stdcall_FuncPtr() { return MarshalStructAsParam_AsExpByRefU_Stdcall; } ///// typedef BOOL(_cdecl *DelegatePinvokeByRefCdeclCaller_ByteStructPack2Explicit)(ByteStructPack2Explicit* bspe); extern "C" DLL_EXPORT DelegatePinvokeByRefCdeclCaller_ByteStructPack2Explicit _cdecl Get_MarshalStructAsParam_AsExpByRefByteStructPack2Explicit_Cdecl_FuncPtr() { return MarshalStructAsParam_AsExpByRefByteStructPack2Explicit_Cdecl; } typedef BOOL(__stdcall *DelegatePinvokeByRefStdcallCaller_ByteStructPack2Explicit)(ByteStructPack2Explicit* bspe); extern "C" DLL_EXPORT DelegatePinvokeByRefStdcallCaller_ByteStructPack2Explicit __stdcall Get_MarshalStructAsParam_AsExpByRefByteStructPack2Explicit_Stdcall_FuncPtr() { return MarshalStructAsParam_AsExpByRefByteStructPack2Explicit_Stdcall; } ///// typedef BOOL(_cdecl *DelegatePinvokeByRefCdeclCaller_ShortStructPack4Explicit)(ShortStructPack4Explicit* sspe); extern "C" DLL_EXPORT DelegatePinvokeByRefCdeclCaller_ShortStructPack4Explicit _cdecl Get_MarshalStructAsParam_AsExpByRefShortStructPack4Explicit_Cdecl_FuncPtr() { return MarshalStructAsParam_AsExpByRefShortStructPack4Explicit_Cdecl; } typedef BOOL(__stdcall *DelegatePinvokeByRefStdcallCaller_ShortStructPack4Explicit)(ShortStructPack4Explicit* sspe); extern "C" DLL_EXPORT DelegatePinvokeByRefStdcallCaller_ShortStructPack4Explicit __stdcall Get_MarshalStructAsParam_AsExpByRefShortStructPack4Explicit_Stdcall_FuncPtr() { return MarshalStructAsParam_AsExpByRefShortStructPack4Explicit_Stdcall; } ///// typedef BOOL(_cdecl *DelegatePinvokeByRefCdeclCaller_IntStructPack8Explicit)(IntStructPack8Explicit* ispe); extern "C" DLL_EXPORT DelegatePinvokeByRefCdeclCaller_IntStructPack8Explicit _cdecl Get_MarshalStructAsParam_AsExpByRefIntStructPack8Explicit_Cdecl_FuncPtr() { return MarshalStructAsParam_AsExpByRefIntStructPack8Explicit_Cdecl; } typedef BOOL(__stdcall *DelegatePinvokeByRefStdcallCaller_IntStructPack8Explicit)(IntStructPack8Explicit* ispe); extern "C" DLL_EXPORT DelegatePinvokeByRefStdcallCaller_IntStructPack8Explicit __stdcall Get_MarshalStructAsParam_AsExpByRefIntStructPack8Explicit_Stdcall_FuncPtr() { return MarshalStructAsParam_AsExpByRefIntStructPack8Explicit_Stdcall; } ///// typedef BOOL(_cdecl *DelegatePinvokeByRefCdeclCaller_LongStructPack16Explicit)(LongStructPack16Explicit* ispe); extern "C" DLL_EXPORT DelegatePinvokeByRefCdeclCaller_LongStructPack16Explicit _cdecl Get_MarshalStructAsParam_AsExpByRefLongStructPack16Explicit_Cdecl_FuncPtr() { return MarshalStructAsParam_AsExpByRefLongStructPack16Explicit_Cdecl; } typedef BOOL(__stdcall *DelegatePinvokeByRefStdcallCaller_LongStructPack16Explicit)(LongStructPack16Explicit* ispe); extern "C" DLL_EXPORT DelegatePinvokeByRefStdcallCaller_LongStructPack16Explicit __stdcall Get_MarshalStructAsParam_AsExpByRefLongStructPack16Explicit_Stdcall_FuncPtr() { return MarshalStructAsParam_AsExpByRefLongStructPack16Explicit_Stdcall; } ///// //---------------------------- ----------// //----------Delegate Pinvoke. PassByVal----------// ///// typedef BOOL(_cdecl *DelegatePinvokeByValCdeclCaller_INNER2)(INNER2 inner); extern "C" DLL_EXPORT DelegatePinvokeByValCdeclCaller_INNER2 _cdecl Get_MarshalStructAsParam_AsExpByValINNER2_Cdecl_FuncPtr() { return MarshalStructAsParam_AsExpByValINNER2_Cdecl; } typedef BOOL(__stdcall *DelegatePinvokeByValStdcallCaller_INNER2)(INNER2 inner); extern "C" DLL_EXPORT DelegatePinvokeByValStdcallCaller_INNER2 __stdcall Get_MarshalStructAsParam_AsExpByValINNER2_Stdcall_FuncPtr() { return MarshalStructAsParam_AsExpByValINNER2_Stdcall; } ///// typedef BOOL(_cdecl *DelegatePinvokeByValCdeclCaller_InnerExplicit)(InnerExplicit ie); extern "C" DLL_EXPORT DelegatePinvokeByValCdeclCaller_InnerExplicit _cdecl Get_MarshalStructAsParam_AsExpByValInnerExplicit_Cdecl_FuncPtr() { return MarshalStructAsParam_AsExpByValInnerExplicit_Cdecl; } typedef BOOL(__stdcall *DelegatePinvokeByValStdcallCaller_InnerExplicit)(InnerExplicit ie); extern "C" DLL_EXPORT DelegatePinvokeByValStdcallCaller_InnerExplicit __stdcall Get_MarshalStructAsParam_AsExpByValInnerExplicit_Stdcall_FuncPtr() { return MarshalStructAsParam_AsExpByValInnerExplicit_Stdcall; } ///// typedef BOOL(_cdecl *DelegatePinvokeByValCdeclCaller_InnerArrayExplicit)(InnerArrayExplicit iae); extern "C" DLL_EXPORT DelegatePinvokeByValCdeclCaller_InnerArrayExplicit _cdecl Get_MarshalStructAsParam_AsExpByValInnerArrayExplicit_Cdecl_FuncPtr() { return MarshalStructAsParam_AsExpByValInnerArrayExplicit_Cdecl; } typedef BOOL(__stdcall *DelegatePinvokeByValStdcallCaller_InnerArrayExplicit)(InnerArrayExplicit iae); extern "C" DLL_EXPORT DelegatePinvokeByValStdcallCaller_InnerArrayExplicit __stdcall Get_MarshalStructAsParam_AsExpByValInnerArrayExplicit_Stdcall_FuncPtr() { return MarshalStructAsParam_AsExpByValInnerArrayExplicit_Stdcall; } ///// typedef BOOL(_cdecl *DelegatePinvokeByValCdeclCaller_OUTER3)(OUTER3 outer); extern "C" DLL_EXPORT DelegatePinvokeByValCdeclCaller_OUTER3 _cdecl Get_MarshalStructAsParam_AsExpByValOUTER3_Cdecl_FuncPtr() { return MarshalStructAsParam_AsExpByValOUTER3_Cdecl; } typedef BOOL(__stdcall *DelegatePinvokeByValStdcallCaller_OUTER3)(OUTER3 outer); extern "C" DLL_EXPORT DelegatePinvokeByValStdcallCaller_OUTER3 __stdcall Get_MarshalStructAsParam_AsExpByValOUTER3_Stdcall_FuncPtr() { return MarshalStructAsParam_AsExpByValOUTER3_Stdcall; } ///// typedef BOOL(_cdecl *DelegatePinvokeByValCdeclCaller_U)(U inner); extern "C" DLL_EXPORT DelegatePinvokeByValCdeclCaller_U _cdecl Get_MarshalStructAsParam_AsExpByValU_Cdecl_FuncPtr() { return MarshalStructAsParam_AsExpByValU_Cdecl; } typedef BOOL(__stdcall *DelegatePinvokeByValStdcallCaller_U)(U inner); extern "C" DLL_EXPORT DelegatePinvokeByValStdcallCaller_U __stdcall Get_MarshalStructAsParam_AsExpByValU_Stdcall_FuncPtr() { return MarshalStructAsParam_AsExpByValU_Stdcall; } ///// typedef BOOL(_cdecl *DelegatePinvokeByValCdeclCaller_ByteStructPack2Explicit)(ByteStructPack2Explicit bspe); extern "C" DLL_EXPORT DelegatePinvokeByValCdeclCaller_ByteStructPack2Explicit _cdecl Get_MarshalStructAsParam_AsExpByValByteStructPack2Explicit_Cdecl_FuncPtr() { return MarshalStructAsParam_AsExpByValByteStructPack2Explicit_Cdecl; } typedef BOOL(__stdcall *DelegatePinvokeByValStdcallCaller_ByteStructPack2Explicit)(ByteStructPack2Explicit bspe); extern "C" DLL_EXPORT DelegatePinvokeByValStdcallCaller_ByteStructPack2Explicit __stdcall Get_MarshalStructAsParam_AsExpByValByteStructPack2Explicit_Stdcall_FuncPtr() { return MarshalStructAsParam_AsExpByValByteStructPack2Explicit_Stdcall; } ///// typedef BOOL(_cdecl *DelegatePinvokeByValCdeclCaller_ShortStructPack4Explicit)(ShortStructPack4Explicit sspe); extern "C" DLL_EXPORT DelegatePinvokeByValCdeclCaller_ShortStructPack4Explicit _cdecl Get_MarshalStructAsParam_AsExpByValShortStructPack4Explicit_Cdecl_FuncPtr() { return MarshalStructAsParam_AsExpByValShortStructPack4Explicit_Cdecl; } typedef BOOL(__stdcall *DelegatePinvokeByValStdcallCaller_ShortStructPack4Explicit)(ShortStructPack4Explicit sspe); extern "C" DLL_EXPORT DelegatePinvokeByValStdcallCaller_ShortStructPack4Explicit __stdcall Get_MarshalStructAsParam_AsExpByValShortStructPack4Explicit_Stdcall_FuncPtr() { return MarshalStructAsParam_AsExpByValShortStructPack4Explicit_Stdcall; } ///// typedef BOOL(_cdecl *DelegatePinvokeByValCdeclCaller_IntStructPack8Explicit)(IntStructPack8Explicit ispe); extern "C" DLL_EXPORT DelegatePinvokeByValCdeclCaller_IntStructPack8Explicit _cdecl Get_MarshalStructAsParam_AsExpByValIntStructPack8Explicit_Cdecl_FuncPtr() { return MarshalStructAsParam_AsExpByValIntStructPack8Explicit_Cdecl; } typedef BOOL(__stdcall *DelegatePinvokeByValStdcallCaller_IntStructPack8Explicit)(IntStructPack8Explicit ispe); extern "C" DLL_EXPORT DelegatePinvokeByValStdcallCaller_IntStructPack8Explicit __stdcall Get_MarshalStructAsParam_AsExpByValIntStructPack8Explicit_Stdcall_FuncPtr() { return MarshalStructAsParam_AsExpByValIntStructPack8Explicit_Stdcall; } ///// typedef BOOL(_cdecl *DelegatePinvokeByValCdeclCaller_LongStructPack16Explicit)(LongStructPack16Explicit ispe); extern "C" DLL_EXPORT DelegatePinvokeByValCdeclCaller_LongStructPack16Explicit _cdecl Get_MarshalStructAsParam_AsExpByValLongStructPack16Explicit_Cdecl_FuncPtr() { return MarshalStructAsParam_AsExpByValLongStructPack16Explicit_Cdecl; } typedef BOOL(__stdcall *DelegatePinvokeByValStdcallCaller_LongStructPack16Explicit)(LongStructPack16Explicit ispe); extern "C" DLL_EXPORT DelegatePinvokeByValStdcallCaller_LongStructPack16Explicit __stdcall Get_MarshalStructAsParam_AsExpByValLongStructPack16Explicit_Stdcall_FuncPtr() { return MarshalStructAsParam_AsExpByValLongStructPack16Explicit_Stdcall; } ///// //---------------------------- ----------// //----------Reverse Pinvoke. PassByRef----------// ///// typedef BOOL (_cdecl *ByRefCdeclCaller_INNER2)(INNER2* inner2); extern "C" DLL_EXPORT BOOL _cdecl DoCallBack_MarshalByRefStruct_Cdecl_INNER2(ByRefCdeclCaller_INNER2 caller) { //init INNER2 inner2; inner2.f1 = 77; inner2.f2 = 77.0; char* pstr = GetNativeString(); inner2.f3 = pstr; if(!caller(&inner2)) { PRINT_ERR_INFO(); return FALSE; } if(!IsCorrectINNER2(&inner2)) { PRINT_ERR_INFO(); return FALSE; } //TP_CoreClrFree((void*)inner2.f3); return TRUE; } typedef BOOL (__stdcall *ByRefStdcallCaller_INNER2)(INNER2* inner2); extern "C" DLL_EXPORT BOOL __stdcall DoCallBack_MarshalByRefStruct_Stdcall_INNER2(ByRefStdcallCaller_INNER2 caller) { const char* lNativeStr = "Native"; const size_t lsize = strlen(lNativeStr); //init INNER2 inner2; inner2.f1 = 77; inner2.f2 = 77.0; char* pstr = (char*)CoreClrAlloc(lsize + 1); memset(pstr,0,lsize+1); strncpy_s(pstr,lsize+1,lNativeStr,lsize); inner2.f3 = pstr; if(!caller(&inner2)) { PRINT_ERR_INFO(); return FALSE; } if(!IsCorrectINNER2(&inner2)) { PRINT_ERR_INFO(); return FALSE; } //TP_CoreClrFree((void*)inner2.f3); return TRUE; } ///// typedef BOOL (_cdecl *ByRefCdeclCaller_InnerExplicit)(InnerExplicit* inner2); extern "C" DLL_EXPORT BOOL _cdecl DoCallBack_MarshalByRefStruct_Cdecl_InnerExplicit(ByRefCdeclCaller_InnerExplicit caller) { const char* lNativeStr = "Native"; const size_t lsize = strlen(lNativeStr); //init InnerExplicit ie; ie.f1 = 77; char* pstr = (char*)CoreClrAlloc(lsize + 1); memset(pstr,0,lsize+1); strncpy_s(pstr,lsize+1,lNativeStr,lsize); ie.f3 = pstr; if(!caller(&ie)) { PRINT_ERR_INFO(); return FALSE; } if( ie.f1 != 1 || 0 != strcmp((char*)ie.f3, "some string") ) { PRINT_ERR_INFO(); return FALSE; } return TRUE; } typedef BOOL (__stdcall *ByRefStdcallCaller_InnerExplicit)(InnerExplicit* inner2); extern "C" DLL_EXPORT BOOL __stdcall DoCallBack_MarshalByRefStruct_Stdcall_InnerExplicit(ByRefStdcallCaller_InnerExplicit caller) { const char* lNativeStr = "Native"; const size_t lsize = strlen(lNativeStr); //init InnerExplicit ie; ie.f1 = 77; char* pstr = (char*)CoreClrAlloc(lsize + 1); memset(pstr,0,lsize+1); strncpy_s(pstr,lsize+1,lNativeStr,lsize); ie.f3 = pstr; if(!caller(&ie)) { PRINT_ERR_INFO(); return FALSE; } if( ie.f1 != 1 || 0 != strcmp((char*)ie.f3, "some string") ) { PRINT_ERR_INFO(); return FALSE; } return TRUE; } ///// typedef BOOL (_cdecl *ByRefCdeclCaller_InnerArrayExplicit)(InnerArrayExplicit* iae); extern "C" DLL_EXPORT BOOL _cdecl DoCallBack_MarshalByRefStruct_Cdecl_InnerArrayExplicit(ByRefCdeclCaller_InnerArrayExplicit caller) { const char* lNativeStr = "Native"; const size_t lsize = strlen(lNativeStr); //init InnerArrayExplicit iae; LPSTR str = NULL; for( size_t i = 0; i < NumArrElements; i++ ) { iae.arr[i].f1 = 77; str = (LPSTR)CoreClrAlloc( lsize+1 ); memset(str,0,lsize+1); strncpy_s((char*)str,lsize+1,lNativeStr,lsize); iae.arr[i].f3 = str; str = NULL; } str = (LPSTR)CoreClrAlloc( lsize+1 ); memset(str,0,lsize+1); strncpy_s((char*)str,lsize+1,lNativeStr,lsize); iae.f4 = str; if(!caller(&iae)) { PRINT_ERR_INFO(); return FALSE; } for( size_t i = 0; i < NumArrElements; i++ ) { if( iae.arr[i].f1 != 1 || 0 != strcmp((char*)iae.arr[i].f3, "some string")) { PRINT_ERR_INFO(); return FALSE; } } if( 0 != strcmp((char*)iae.f4, "some string") ) { PRINT_ERR_INFO(); return FALSE; } return TRUE; } typedef BOOL (__stdcall *ByRefStdcallCaller_InnerArrayExplicit)(InnerArrayExplicit* iae); extern "C" DLL_EXPORT BOOL __stdcall DoCallBack_MarshalByRefStruct_Stdcall_InnerArrayExplicit(ByRefStdcallCaller_InnerArrayExplicit caller) { //init InnerArrayExplicit iae; LPSTR str = NULL; for( size_t i = 0; i < NumArrElements; i++ ) { iae.arr[i].f1 = 77; str = GetNativeString(); iae.arr[i].f3 = str; str = NULL; } str = GetNativeString(); iae.f4 = str; if(!caller(&iae)) { PRINT_ERR_INFO(); return FALSE; } for( size_t i = 0; i < NumArrElements; i++ ) { if( iae.arr[i].f1 != 1 || 0 != strcmp((char*)iae.arr[i].f3, "some string")) { PRINT_ERR_INFO(); return FALSE; } } if( 0 != strcmp((char*)iae.f4, "some string") ) { PRINT_ERR_INFO(); return FALSE; } return TRUE; } ///// typedef BOOL (_cdecl *ByRefCdeclCaller_OUTER3)(OUTER3* outer3); extern "C" DLL_EXPORT BOOL _cdecl DoCallBack_MarshalByRefStruct_Cdecl_OUTER3(ByRefCdeclCaller_OUTER3 caller) { //init OUTER3 outer3; LPSTR str = NULL; for( size_t i = 0; i < NumArrElements; i++ ) { outer3.arr[i].f1 = 77; outer3.arr[i].f2 = 77.0; str = GetNativeString(); outer3.arr[i].f3 = (LPCSTR)str; str = NULL; } str = GetNativeString(); outer3.f4 = (LPCSTR)str; if(!caller(&outer3)) { PRINT_ERR_INFO(); return FALSE; } if( !IsCorrectOUTER3( &outer3 ) ) { PRINT_ERR_INFO(); return FALSE; } return TRUE; } typedef BOOL (__stdcall *ByRefStdcallCaller_OUTER3)(OUTER3* outer3); extern "C" DLL_EXPORT BOOL __stdcall DoCallBack_MarshalByRefStruct_Stdcall_OUTER3(ByRefStdcallCaller_OUTER3 caller) { //init OUTER3 outer3; LPSTR str = NULL; for( size_t i = 0; i < NumArrElements; i++ ) { outer3.arr[i].f1 = 77; outer3.arr[i].f2 = 77.0; str = GetNativeString(); outer3.arr[i].f3 = (LPCSTR)str; str = NULL; } str = GetNativeString(); outer3.f4 = (LPCSTR)str; if(!caller(&outer3)) { PRINT_ERR_INFO(); return FALSE; } if( !IsCorrectOUTER3( &outer3 ) ) { PRINT_ERR_INFO(); return FALSE; } return TRUE; } ///// typedef BOOL (_cdecl *ByRefCdeclCaller_U)(U* u); extern "C" DLL_EXPORT BOOL _cdecl DoCallBack_MarshalByRefStruct_Cdecl_U(ByRefCdeclCaller_U caller) { U u; u.d = 1.23; if(!caller(&u)) { PRINT_ERR_INFO(); return FALSE; } if( !IsCorrectU( &u ) ) { PRINT_ERR_INFO(); return FALSE; } return TRUE; } typedef BOOL (__stdcall *ByRefStdcallCaller_U)(U* u); extern "C" DLL_EXPORT BOOL __stdcall DoCallBack_MarshalByRefStruct_Stdcall_U(ByRefStdcallCaller_U caller) { U u; u.d = 1.23; if(!caller(&u)) { PRINT_ERR_INFO(); return FALSE; } if( !IsCorrectU( &u ) ) { PRINT_ERR_INFO(); return FALSE; } return TRUE; } ///// typedef BOOL (_cdecl *ByRefCdeclCaller_ByteStructPack2Explicit)(ByteStructPack2Explicit* bspe); extern "C" DLL_EXPORT BOOL _cdecl DoCallBack_MarshalByRefStruct_Cdecl_ByteStructPack2Explicit(ByRefCdeclCaller_ByteStructPack2Explicit caller) { ByteStructPack2Explicit bspe; bspe.b1 = 64; bspe.b2 = 64; if(!caller(&bspe)) { PRINT_ERR_INFO(); return FALSE; } if( !IsCorrectByteStructPack2Explicit( &bspe ) ) { PRINT_ERR_INFO(); return FALSE; } return TRUE; } typedef BOOL (__stdcall *ByRefStdcallCaller_ByteStructPack2Explicit)(ByteStructPack2Explicit* bspe); extern "C" DLL_EXPORT BOOL __stdcall DoCallBack_MarshalByRefStruct_Stdcall_ByteStructPack2Explicit(ByRefStdcallCaller_ByteStructPack2Explicit caller) { ByteStructPack2Explicit bspe; bspe.b1 = 64; bspe.b2 = 64; if(!caller(&bspe)) { PRINT_ERR_INFO(); return FALSE; } if( !IsCorrectByteStructPack2Explicit( &bspe ) ) { PRINT_ERR_INFO(); return FALSE; } return TRUE; } ///// typedef BOOL (_cdecl *ByRefCdeclCaller_ShortStructPack4Explicit)(ShortStructPack4Explicit* sspe); extern "C" DLL_EXPORT BOOL _cdecl DoCallBack_MarshalByRefStruct_Cdecl_ShortStructPack4Explicit(ByRefCdeclCaller_ShortStructPack4Explicit caller) { ShortStructPack4Explicit sspe; sspe.s1 = 64; sspe.s2 = 64; if(!caller(&sspe)) { PRINT_ERR_INFO(); return FALSE; } if( !IsCorrectShortStructPack4Explicit( &sspe ) ) { PRINT_ERR_INFO(); return FALSE; } return TRUE; } typedef BOOL (__stdcall *ByRefStdcallCaller_ShortStructPack4Explicit)(ShortStructPack4Explicit* sspe); extern "C" DLL_EXPORT BOOL __stdcall DoCallBack_MarshalByRefStruct_Stdcall_ShortStructPack4Explicit(ByRefStdcallCaller_ShortStructPack4Explicit caller) { ShortStructPack4Explicit sspe; sspe.s1 = 64; sspe.s2 = 64; if(!caller(&sspe)) { PRINT_ERR_INFO(); return FALSE; } if( !IsCorrectShortStructPack4Explicit( &sspe ) ) { PRINT_ERR_INFO(); return FALSE; } return TRUE; } ///// typedef BOOL (_cdecl *ByRefCdeclCaller_IntStructPack8Explicit)(IntStructPack8Explicit* ispe); extern "C" DLL_EXPORT BOOL _cdecl DoCallBack_MarshalByRefStruct_Cdecl_IntStructPack8Explicit(ByRefCdeclCaller_IntStructPack8Explicit caller) { IntStructPack8Explicit ispe; ispe.i1 = 64; ispe.i2 = 64; if(!caller(&ispe)) { PRINT_ERR_INFO(); return FALSE; } if( !IsCorrectIntStructPack8Explicit( &ispe ) ) { PRINT_ERR_INFO(); return FALSE; } return TRUE; } typedef BOOL (__stdcall *ByRefStdcallCaller_IntStructPack8Explicit)(IntStructPack8Explicit* ispe); extern "C" DLL_EXPORT BOOL __stdcall DoCallBack_MarshalByRefStruct_Stdcall_IntStructPack8Explicit(ByRefStdcallCaller_IntStructPack8Explicit caller) { IntStructPack8Explicit ispe; ispe.i1 = 64; ispe.i2 = 64; if(!caller(&ispe)) { PRINT_ERR_INFO(); return FALSE; } if( !IsCorrectIntStructPack8Explicit( &ispe ) ) { PRINT_ERR_INFO(); return FALSE; } return TRUE; } ///// typedef BOOL (_cdecl *ByRefCdeclCaller_LongStructPack16Explicit)(LongStructPack16Explicit* lspe); extern "C" DLL_EXPORT BOOL _cdecl DoCallBack_MarshalByRefStruct_Cdecl_LongStructPack16Explicit(ByRefCdeclCaller_LongStructPack16Explicit caller) { LongStructPack16Explicit lspe; lspe.l1 = 64; lspe.l2 = 64; if(!caller(&lspe)) { PRINT_ERR_INFO(); return FALSE; } if( !IsCorrectLongStructPack16Explicit( &lspe ) ) { PRINT_ERR_INFO(); return FALSE; } return TRUE; } typedef BOOL (__stdcall *ByRefStdcallCaller_LongStructPack16Explicit)(LongStructPack16Explicit* lspe); extern "C" DLL_EXPORT BOOL __stdcall DoCallBack_MarshalByRefStruct_Stdcall_LongStructPack16Explicit(ByRefStdcallCaller_LongStructPack16Explicit caller) { LongStructPack16Explicit lspe; lspe.l1 = 64; lspe.l2 = 64; if(!caller(&lspe)) { PRINT_ERR_INFO(); return FALSE; } if( !IsCorrectLongStructPack16Explicit( &lspe ) ) { PRINT_ERR_INFO(); return FALSE; } return TRUE; } ///// //---------------------------- ----------// //----------Reverse Pinvoke. PassByVal---------// ///// typedef BOOL (_cdecl *ByValCdeclCaller_INNER2)(INNER2 inner2); extern "C" DLL_EXPORT BOOL _cdecl DoCallBack_MarshalByValStruct_Cdecl_INNER2(ByValCdeclCaller_INNER2 caller) { //init INNER2 inner2; inner2.f1 = 1; inner2.f2 = 1.0; char* pstr = GetSomeString(); inner2.f3 = pstr; if(!caller(inner2)) { PRINT_ERR_INFO(); return FALSE; } if(!IsCorrectINNER2(&inner2)) { PRINT_ERR_INFO(); return FALSE; } return TRUE; } typedef BOOL (__stdcall *ByValStdcallCaller_INNER2)(INNER2 inner2); extern "C" DLL_EXPORT BOOL __stdcall DoCallBack_MarshalByValStruct_Stdcall_INNER2(ByValStdcallCaller_INNER2 caller) { //init INNER2 inner2; inner2.f1 = 1; inner2.f2 = 1.0; char* pstr = GetSomeString(); inner2.f3 = pstr; if(!caller(inner2)) { PRINT_ERR_INFO(); return FALSE; } if(!IsCorrectINNER2(&inner2)) { PRINT_ERR_INFO(); return FALSE; } return TRUE; } ///// typedef BOOL (_cdecl *ByValCdeclCaller_InnerExplicit)(InnerExplicit inner2); extern "C" DLL_EXPORT BOOL _cdecl DoCallBack_MarshalByValStruct_Cdecl_InnerExplicit(ByValCdeclCaller_InnerExplicit caller) { //init InnerExplicit ie; ie.f1 = 1; char* pstr = GetNativeString(); ie.f3 = pstr; if(!caller(ie)) { PRINT_ERR_INFO(); return FALSE; } if( ie.f1 != 1 || 0 != strcmp((char*)ie.f3, (char*)NativeStr) ) { PRINT_ERR_INFO(); return FALSE; } return TRUE; } typedef BOOL (__stdcall *ByValStdcallCaller_InnerExplicit)(InnerExplicit inner2); extern "C" DLL_EXPORT BOOL __stdcall DoCallBack_MarshalByValStruct_Stdcall_InnerExplicit(ByValStdcallCaller_InnerExplicit caller) { //init InnerExplicit ie; ie.f1 = 1; char* pstr = GetNativeString(); ie.f3 = pstr; if(!caller(ie)) { PRINT_ERR_INFO(); return FALSE; } if( ie.f1 != 1 || 0 != strcmp((char*)ie.f3, (char*)NativeStr) ) { PRINT_ERR_INFO(); return FALSE; } return TRUE; } ///// typedef BOOL (_cdecl *ByValCdeclCaller_InnerArrayExplicit)(InnerArrayExplicit iae); extern "C" DLL_EXPORT BOOL _cdecl DoCallBack_MarshalByValStruct_Cdecl_InnerArrayExplicit(ByValCdeclCaller_InnerArrayExplicit caller) { //init InnerArrayExplicit iae; LPSTR str = NULL; for( size_t i = 0; i < NumArrElements; i++ ) { iae.arr[i].f1 = 1; str = GetSomeString(); iae.arr[i].f3 = str; str = NULL; } str = GetSomeString(); iae.f4 = str; if(!caller(iae)) { PRINT_ERR_INFO(); return FALSE; } for( size_t i = 0; i < NumArrElements; i++ ) { if( iae.arr[i].f1 != 1 || 0 != strcmp((char*)iae.arr[i].f3, "some string")) { PRINT_ERR_INFO(); return FALSE; } } if( 0 != strcmp((char*)iae.f4, "some string") ) { PRINT_ERR_INFO(); return FALSE; } return TRUE; } typedef BOOL (__stdcall *ByValStdcallCaller_InnerArrayExplicit)(InnerArrayExplicit iae); extern "C" DLL_EXPORT BOOL __stdcall DoCallBack_MarshalByValStruct_Stdcall_InnerArrayExplicit(ByValStdcallCaller_InnerArrayExplicit caller) { //init InnerArrayExplicit iae; LPSTR str = NULL; for( size_t i = 0; i < NumArrElements; i++ ) { iae.arr[i].f1 = 1; str = GetSomeString(); iae.arr[i].f3 = str; str = NULL; } str = GetSomeString(); iae.f4 = str; if(!caller(iae)) { PRINT_ERR_INFO(); return FALSE; } for( size_t i = 0; i < NumArrElements; i++ ) { if( iae.arr[i].f1 != 1 || 0 != strcmp((char*)iae.arr[i].f3, "some string")) { PRINT_ERR_INFO(); return FALSE; } } if( 0 != strcmp((char*)iae.f4, "some string") ) { PRINT_ERR_INFO(); return FALSE; } return TRUE; } ///// typedef BOOL (_cdecl *ByValCdeclCaller_OUTER3)(OUTER3 outer3); extern "C" DLL_EXPORT BOOL _cdecl DoCallBack_MarshalByValStruct_Cdecl_OUTER3(ByValCdeclCaller_OUTER3 caller) { //init OUTER3 outer3; LPSTR str = NULL; for( size_t i = 0; i < NumArrElements; i++ ) { outer3.arr[i].f1 = 1; outer3.arr[i].f2 = 1.0; str = GetSomeString(); outer3.arr[i].f3 = (LPCSTR)str; str = NULL; } str = GetSomeString(); outer3.f4 = (LPCSTR)str; if(!caller(outer3)) { PRINT_ERR_INFO(); return FALSE; } if( !IsCorrectOUTER3( &outer3 ) ) { PRINT_ERR_INFO(); return FALSE; } return TRUE; } typedef BOOL (__stdcall *ByValStdcallCaller_OUTER3)(OUTER3 outer3); extern "C" DLL_EXPORT BOOL __stdcall DoCallBack_MarshalByValStruct_Stdcall_OUTER3(ByValStdcallCaller_OUTER3 caller) { //init OUTER3 outer3; LPSTR str = NULL; for( size_t i = 0; i < NumArrElements; i++ ) { outer3.arr[i].f1 = 1; outer3.arr[i].f2 = 1.0; str = GetSomeString(); outer3.arr[i].f3 = (LPCSTR)str; str = NULL; } str = GetSomeString(); outer3.f4 = (LPCSTR)str; if(!caller(outer3)) { PRINT_ERR_INFO(); return FALSE; } if( !IsCorrectOUTER3( &outer3 ) ) { PRINT_ERR_INFO(); return FALSE; } return TRUE; } ///// typedef BOOL (_cdecl *ByValCdeclCaller_U)(U u); extern "C" DLL_EXPORT BOOL _cdecl DoCallBack_MarshalByValStruct_Cdecl_U(ByValCdeclCaller_U caller) { U u; u.d = 3.2; if(!caller(u)) { PRINT_ERR_INFO(); return FALSE; } if( !IsCorrectU( &u ) ) { PRINT_ERR_INFO(); return FALSE; } return TRUE; } typedef BOOL (__stdcall *ByValStdcallCaller_U)(U u); extern "C" DLL_EXPORT BOOL __stdcall DoCallBack_MarshalByValStruct_Stdcall_U(ByValStdcallCaller_U caller) { U u; u.d = 3.2; if(!caller(u)) { PRINT_ERR_INFO(); return FALSE; } if( !IsCorrectU( &u ) ) { PRINT_ERR_INFO(); return FALSE; } return TRUE; } ///// typedef BOOL (_cdecl *ByValCdeclCaller_ByteStructPack2Explicit)(ByteStructPack2Explicit bspe); extern "C" DLL_EXPORT BOOL _cdecl DoCallBack_MarshalByValStruct_Cdecl_ByteStructPack2Explicit(ByValCdeclCaller_ByteStructPack2Explicit caller) { ByteStructPack2Explicit bspe; bspe.b1 = 32; bspe.b2 = 32; if(!caller(bspe)) { PRINT_ERR_INFO(); return FALSE; } if( !IsCorrectByteStructPack2Explicit( &bspe ) ) { PRINT_ERR_INFO(); return FALSE; } return TRUE; } typedef BOOL (__stdcall *ByValStdcallCaller_ByteStructPack2Explicit)(ByteStructPack2Explicit bspe); extern "C" DLL_EXPORT BOOL __stdcall DoCallBack_MarshalByValStruct_Stdcall_ByteStructPack2Explicit(ByValStdcallCaller_ByteStructPack2Explicit caller) { ByteStructPack2Explicit bspe; bspe.b1 = 32; bspe.b2 = 32; if(!caller(bspe)) { PRINT_ERR_INFO(); return FALSE; } if( !IsCorrectByteStructPack2Explicit( &bspe ) ) { PRINT_ERR_INFO(); return FALSE; } return TRUE; } ///// typedef BOOL (_cdecl *ByValCdeclCaller_ShortStructPack4Explicit)(ShortStructPack4Explicit sspe); extern "C" DLL_EXPORT BOOL _cdecl DoCallBack_MarshalByValStruct_Cdecl_ShortStructPack4Explicit(ByValCdeclCaller_ShortStructPack4Explicit caller) { ShortStructPack4Explicit sspe; sspe.s1 = 32; sspe.s2 = 32; if(!caller(sspe)) { PRINT_ERR_INFO(); return FALSE; } if( !IsCorrectShortStructPack4Explicit( &sspe ) ) { PRINT_ERR_INFO(); return FALSE; } return TRUE; } typedef BOOL (__stdcall *ByValStdcallCaller_ShortStructPack4Explicit)(ShortStructPack4Explicit sspe); extern "C" DLL_EXPORT BOOL __stdcall DoCallBack_MarshalByValStruct_Stdcall_ShortStructPack4Explicit(ByValStdcallCaller_ShortStructPack4Explicit caller) { ShortStructPack4Explicit sspe; sspe.s1 = 32; sspe.s2 = 32; if(!caller(sspe)) { PRINT_ERR_INFO(); return FALSE; } if( !IsCorrectShortStructPack4Explicit( &sspe ) ) { PRINT_ERR_INFO(); return FALSE; } return TRUE; } ///// typedef BOOL (_cdecl *ByValCdeclCaller_IntStructPack8Explicit)(IntStructPack8Explicit ispe); extern "C" DLL_EXPORT BOOL _cdecl DoCallBack_MarshalByValStruct_Cdecl_IntStructPack8Explicit(ByValCdeclCaller_IntStructPack8Explicit caller) { IntStructPack8Explicit ispe; ispe.i1 = 32; ispe.i2 = 32; if(!caller(ispe)) { PRINT_ERR_INFO(); return FALSE; } if( !IsCorrectIntStructPack8Explicit( &ispe ) ) { PRINT_ERR_INFO(); return FALSE; } return TRUE; } typedef BOOL (__stdcall *ByValStdcallCaller_IntStructPack8Explicit)(IntStructPack8Explicit ispe); extern "C" DLL_EXPORT BOOL __stdcall DoCallBack_MarshalByValStruct_Stdcall_IntStructPack8Explicit(ByValStdcallCaller_IntStructPack8Explicit caller) { IntStructPack8Explicit ispe; ispe.i1 = 32; ispe.i2 = 32; if(!caller(ispe)) { PRINT_ERR_INFO(); return FALSE; } if( !IsCorrectIntStructPack8Explicit( &ispe ) ) { PRINT_ERR_INFO(); return FALSE; } return TRUE; } ///// typedef BOOL (_cdecl *ByValCdeclCaller_LongStructPack16Explicit)(LongStructPack16Explicit lspe); extern "C" DLL_EXPORT BOOL _cdecl DoCallBack_MarshalByValStruct_Cdecl_LongStructPack16Explicit(ByValCdeclCaller_LongStructPack16Explicit caller) { LongStructPack16Explicit lspe; lspe.l1 = 32; lspe.l2 = 32; if(!caller(lspe)) { PRINT_ERR_INFO(); return FALSE; } if( !IsCorrectLongStructPack16Explicit( &lspe ) ) { PRINT_ERR_INFO(); return FALSE; } return TRUE; } typedef BOOL (__stdcall *ByValStdcallCaller_LongStructPack16Explicit)(LongStructPack16Explicit lspe); extern "C" DLL_EXPORT BOOL __stdcall DoCallBack_MarshalByValStruct_Stdcall_LongStructPack16Explicit(ByValStdcallCaller_LongStructPack16Explicit caller) { LongStructPack16Explicit lspe; lspe.l1 = 32; lspe.l2 = 32; if(!caller(lspe)) { PRINT_ERR_INFO(); return FALSE; } if( !IsCorrectLongStructPack16Explicit( &lspe ) ) { PRINT_ERR_INFO(); return FALSE; } return TRUE; } ///// //---------------------------- ----------//
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include "ExpStructAsParamNative.h" #include <stdio.h> #include <stdlib.h> #include <xplatform.h> const char* NativeStr = "Native"; const size_t size=strlen(NativeStr); #define PRINT_ERR_INFO() \ printf("\t%s : unexpected error \n",__FUNCTION__) //----------method called byref----------// ///// extern "C" DLL_EXPORT BOOL _cdecl MarshalStructAsParam_AsExpByRefINNER2_Cdecl(INNER2* inner) { if(!IsCorrectINNER2(inner)) { PRINT_ERR_INFO(); PrintINNER2(inner,"inner"); return FALSE; } ChangeINNER2(inner); return TRUE; } extern "C" DLL_EXPORT BOOL __stdcall MarshalStructAsParam_AsExpByRefINNER2_Stdcall(INNER2* inner) { if(!IsCorrectINNER2(inner)) { PRINT_ERR_INFO(); PrintINNER2(inner,"inner"); return FALSE; } ChangeINNER2(inner); return TRUE; } ///// extern "C" DLL_EXPORT BOOL _cdecl MarshalStructAsParam_AsExpByRefInnerExplicit_Cdecl(InnerExplicit* inner) { if(inner->f1 != 1 || memcmp(inner->f3, "some string",11*sizeof(char)) != 0) { PRINT_ERR_INFO(); PrintInnerExplicit(inner,"inner"); return FALSE; } ChangeInnerExplicit(inner); return TRUE; } extern "C" DLL_EXPORT BOOL __stdcall MarshalStructAsParam_AsExpByRefInnerExplicit_Stdcall(InnerExplicit* inner) { if(inner->f1 != 1 || memcmp(inner->f3, "some string",11*sizeof(char)) != 0) { PRINT_ERR_INFO(); PrintInnerExplicit(inner,"inner"); return FALSE; } ChangeInnerExplicit(inner); return TRUE; } ///// extern "C" DLL_EXPORT BOOL _cdecl MarshalStructAsParam_AsExpByRefInnerArrayExplicit_Cdecl(InnerArrayExplicit* outer2) { for(int i = 0;i<NumArrElements;i++) { if(outer2->arr[i].f1 != 1) { PRINT_ERR_INFO(); return FALSE; } } if(memcmp(outer2->f4,"some string2",12) != 0) { PRINT_ERR_INFO(); return FALSE; } for(int i =0;i<NumArrElements;i++) { outer2->arr[i].f1 = 77; } const char* temp = "change string2"; size_t len = strlen(temp); LPCSTR str = (LPCSTR)CoreClrAlloc( sizeof(char)*(len+1) ); strcpy_s((char*)str,len+1,temp); outer2->f4 = str; return TRUE; } extern "C" DLL_EXPORT BOOL __stdcall MarshalStructAsParam_AsExpByRefInnerArrayExplicit_Stdcall(InnerArrayExplicit* outer2) { for(int i = 0;i<NumArrElements;i++) { if(outer2->arr[i].f1 != 1) { PRINT_ERR_INFO(); return FALSE; } } if(memcmp(outer2->f4,"some string2",12) != 0) { PRINT_ERR_INFO(); return FALSE; } for(int i =0;i<NumArrElements;i++) { outer2->arr[i].f1 = 77; } const char* temp = "change string2"; size_t len = strlen(temp); LPCSTR str = (LPCSTR)CoreClrAlloc( sizeof(char)*(len+1) ); strcpy_s((char*)str,len+1,temp); outer2->f4 = str; return TRUE; } ///// extern "C" DLL_EXPORT BOOL _cdecl MarshalStructAsParam_AsExpByRefOUTER3_Cdecl(OUTER3* outer3) { if(!IsCorrectOUTER3(outer3)) { PRINT_ERR_INFO(); PrintOUTER3(outer3,"OUTER3"); return FALSE; } ChangeOUTER3(outer3); return TRUE; } extern "C" DLL_EXPORT BOOL __stdcall MarshalStructAsParam_AsExpByRefOUTER3_Stdcall(OUTER3* outer3) { if(!IsCorrectOUTER3(outer3)) { PRINT_ERR_INFO(); PrintOUTER3(outer3,"OUTER3"); return FALSE; } ChangeOUTER3(outer3); return TRUE; } ///// extern "C" DLL_EXPORT BOOL _cdecl MarshalStructAsParam_AsExpByRefU_Cdecl(U* str1) { if(!IsCorrectU(str1)) { PRINT_ERR_INFO(); PrintU(str1, "str1"); return FALSE; } ChangeU(str1); return TRUE; } extern "C" DLL_EXPORT BOOL __stdcall MarshalStructAsParam_AsExpByRefU_Stdcall(U* str1) { if(!IsCorrectU(str1)) { PRINT_ERR_INFO(); PrintU(str1, "str1"); return FALSE; } ChangeU(str1); return TRUE; } ///// extern "C" DLL_EXPORT BOOL _cdecl MarshalStructAsParam_AsExpByRefByteStructPack2Explicit_Cdecl(ByteStructPack2Explicit* str1) { if(!IsCorrectByteStructPack2Explicit(str1)) { PRINT_ERR_INFO(); PrintByteStructPack2Explicit(str1, "str1"); return FALSE; } ChangeByteStructPack2Explicit(str1); return TRUE; } extern "C" DLL_EXPORT BOOL __stdcall MarshalStructAsParam_AsExpByRefByteStructPack2Explicit_Stdcall(ByteStructPack2Explicit* str1) { if(!IsCorrectByteStructPack2Explicit(str1)) { PRINT_ERR_INFO(); PrintByteStructPack2Explicit(str1, "str1"); return FALSE; } ChangeByteStructPack2Explicit(str1); return TRUE; } ///// extern "C" DLL_EXPORT BOOL _cdecl MarshalStructAsParam_AsExpByRefShortStructPack4Explicit_Cdecl(ShortStructPack4Explicit* str1) { if(!IsCorrectShortStructPack4Explicit(str1)) { PRINT_ERR_INFO(); PrintShortStructPack4Explicit(str1, "str1"); return FALSE; } ChangeShortStructPack4Explicit(str1); return TRUE; } extern "C" DLL_EXPORT BOOL __stdcall MarshalStructAsParam_AsExpByRefShortStructPack4Explicit_Stdcall(ShortStructPack4Explicit* str1) { if(!IsCorrectShortStructPack4Explicit(str1)) { PRINT_ERR_INFO(); PrintShortStructPack4Explicit(str1, "str1"); return FALSE; } ChangeShortStructPack4Explicit(str1); return TRUE; } ///// extern "C" DLL_EXPORT BOOL _cdecl MarshalStructAsParam_AsExpByRefIntStructPack8Explicit_Cdecl(IntStructPack8Explicit* str1) { if(!IsCorrectIntStructPack8Explicit(str1)) { PRINT_ERR_INFO(); PrintIntStructPack8Explicit(str1, "str1"); return FALSE; } ChangeIntStructPack8Explicit(str1); return TRUE; } extern "C" DLL_EXPORT BOOL __stdcall MarshalStructAsParam_AsExpByRefIntStructPack8Explicit_Stdcall(IntStructPack8Explicit* str1) { if(!IsCorrectIntStructPack8Explicit(str1)) { PRINT_ERR_INFO(); PrintIntStructPack8Explicit(str1, "str1"); return FALSE; } ChangeIntStructPack8Explicit(str1); return TRUE; } ///// extern "C" DLL_EXPORT BOOL _cdecl MarshalStructAsParam_AsExpByRefLongStructPack16Explicit_Cdecl(LongStructPack16Explicit* str1) { if(!IsCorrectLongStructPack16Explicit(str1)) { PRINT_ERR_INFO(); PrintLongStructPack16Explicit(str1, "str1"); return FALSE; } ChangeLongStructPack16Explicit(str1); return TRUE; } extern "C" DLL_EXPORT BOOL __stdcall MarshalStructAsParam_AsExpByRefLongStructPack16Explicit_Stdcall(LongStructPack16Explicit* str1) { if(!IsCorrectLongStructPack16Explicit(str1)) { PRINT_ERR_INFO(); PrintLongStructPack16Explicit(str1, "str1"); return FALSE; } ChangeLongStructPack16Explicit(str1); return TRUE; } ///// //---------------------------- ----------// //----------method called byval----------// ///// extern "C" DLL_EXPORT BOOL _cdecl MarshalStructAsParam_AsExpByValINNER2_Cdecl(INNER2 str1) { return MarshalStructAsParam_AsExpByRefINNER2_Cdecl(&str1); } extern "C" DLL_EXPORT BOOL __stdcall MarshalStructAsParam_AsExpByValINNER2_Stdcall(INNER2 str1) { return MarshalStructAsParam_AsExpByRefINNER2_Stdcall(&str1); } ///// extern "C" DLL_EXPORT BOOL _cdecl MarshalStructAsParam_AsExpByValInnerExplicit_Cdecl(InnerExplicit str1) { return MarshalStructAsParam_AsExpByRefInnerExplicit_Cdecl(&str1); } extern "C" DLL_EXPORT BOOL __stdcall MarshalStructAsParam_AsExpByValInnerExplicit_Stdcall(InnerExplicit str1) { return MarshalStructAsParam_AsExpByRefInnerExplicit_Stdcall(&str1); } ///// extern "C" DLL_EXPORT BOOL _cdecl MarshalStructAsParam_AsExpByValInnerArrayExplicit_Cdecl(InnerArrayExplicit str1) { return MarshalStructAsParam_AsExpByRefInnerArrayExplicit_Cdecl(&str1); } extern "C" DLL_EXPORT BOOL __stdcall MarshalStructAsParam_AsExpByValInnerArrayExplicit_Stdcall(InnerArrayExplicit str1) { return MarshalStructAsParam_AsExpByRefInnerArrayExplicit_Stdcall(&str1); } ///// extern "C" DLL_EXPORT BOOL _cdecl MarshalStructAsParam_AsExpByValOUTER3_Cdecl(OUTER3 str1) { return MarshalStructAsParam_AsExpByRefOUTER3_Cdecl(&str1); } extern "C" DLL_EXPORT BOOL __stdcall MarshalStructAsParam_AsExpByValOUTER3_Stdcall(OUTER3 str1) { return MarshalStructAsParam_AsExpByRefOUTER3_Stdcall( &str1); } ///// extern "C" DLL_EXPORT BOOL _cdecl MarshalStructAsParam_AsExpByValU_Cdecl(U str1) { return MarshalStructAsParam_AsExpByRefU_Cdecl(&str1); } extern "C" DLL_EXPORT BOOL __stdcall MarshalStructAsParam_AsExpByValU_Stdcall(U str1) { return MarshalStructAsParam_AsExpByRefU_Stdcall(&str1); } ///// extern "C" DLL_EXPORT BOOL _cdecl MarshalStructAsParam_AsExpByValByteStructPack2Explicit_Cdecl(ByteStructPack2Explicit str1) { return MarshalStructAsParam_AsExpByRefByteStructPack2Explicit_Cdecl(&str1); } extern "C" DLL_EXPORT BOOL __stdcall MarshalStructAsParam_AsExpByValByteStructPack2Explicit_Stdcall(ByteStructPack2Explicit str1) { return MarshalStructAsParam_AsExpByRefByteStructPack2Explicit_Stdcall(&str1); } ///// extern "C" DLL_EXPORT BOOL _cdecl MarshalStructAsParam_AsExpByValShortStructPack4Explicit_Cdecl(ShortStructPack4Explicit str1) { return MarshalStructAsParam_AsExpByRefShortStructPack4Explicit_Cdecl(&str1); } extern "C" DLL_EXPORT BOOL __stdcall MarshalStructAsParam_AsExpByValShortStructPack4Explicit_Stdcall(ShortStructPack4Explicit str1) { return MarshalStructAsParam_AsExpByRefShortStructPack4Explicit_Stdcall(&str1); } ///// extern "C" DLL_EXPORT BOOL _cdecl MarshalStructAsParam_AsExpByValIntStructPack8Explicit_Cdecl(IntStructPack8Explicit str1) { return MarshalStructAsParam_AsExpByRefIntStructPack8Explicit_Cdecl(&str1); } extern "C" DLL_EXPORT BOOL __stdcall MarshalStructAsParam_AsExpByValIntStructPack8Explicit_Stdcall(IntStructPack8Explicit str1) { return MarshalStructAsParam_AsExpByRefIntStructPack8Explicit_Stdcall(&str1); } ///// extern "C" DLL_EXPORT BOOL _cdecl MarshalStructAsParam_AsExpByValLongStructPack16Explicit_Cdecl(LongStructPack16Explicit str1) { return MarshalStructAsParam_AsExpByRefLongStructPack16Explicit_Cdecl(&str1); } extern "C" DLL_EXPORT BOOL __stdcall MarshalStructAsParam_AsExpByValLongStructPack16Explicit_Stdcall(LongStructPack16Explicit str1) { return MarshalStructAsParam_AsExpByRefLongStructPack16Explicit_Stdcall(&str1); } ///// //---------------------------- ----------// //----------Delegate Pinvoke. PassByRef----------// ///// typedef BOOL(_cdecl *DelegatePinvokeByRefCdeclCaller_INNER2)(INNER2* inner); extern "C" DLL_EXPORT DelegatePinvokeByRefCdeclCaller_INNER2 _cdecl Get_MarshalStructAsParam_AsExpByRefINNER2_Cdecl_FuncPtr() { return MarshalStructAsParam_AsExpByRefINNER2_Cdecl; } typedef BOOL(__stdcall *DelegatePinvokeByRefStdcallCaller_INNER2)(INNER2* inner); extern "C" DLL_EXPORT DelegatePinvokeByRefStdcallCaller_INNER2 __stdcall Get_MarshalStructAsParam_AsExpByRefINNER2_Stdcall_FuncPtr() { return MarshalStructAsParam_AsExpByRefINNER2_Stdcall; } ///// typedef BOOL(_cdecl *DelegatePinvokeByRefCdeclCaller_InnerExplicit)(InnerExplicit* ie); extern "C" DLL_EXPORT DelegatePinvokeByRefCdeclCaller_InnerExplicit _cdecl Get_MarshalStructAsParam_AsExpByRefInnerExplicit_Cdecl_FuncPtr() { return MarshalStructAsParam_AsExpByRefInnerExplicit_Cdecl; } typedef BOOL(__stdcall *DelegatePinvokeByRefStdcallCaller_InnerExplicit)(InnerExplicit* ie); extern "C" DLL_EXPORT DelegatePinvokeByRefStdcallCaller_InnerExplicit __stdcall Get_MarshalStructAsParam_AsExpByRefInnerExplicit_Stdcall_FuncPtr() { return MarshalStructAsParam_AsExpByRefInnerExplicit_Stdcall; } ///// typedef BOOL(_cdecl *DelegatePinvokeByRefCdeclCaller_InnerArrayExplicit)(InnerArrayExplicit* iae); extern "C" DLL_EXPORT DelegatePinvokeByRefCdeclCaller_InnerArrayExplicit _cdecl Get_MarshalStructAsParam_AsExpByRefInnerArrayExplicit_Cdecl_FuncPtr() { return MarshalStructAsParam_AsExpByRefInnerArrayExplicit_Cdecl; } typedef BOOL(__stdcall *DelegatePinvokeByRefStdcallCaller_InnerArrayExplicit)(InnerArrayExplicit* iae); extern "C" DLL_EXPORT DelegatePinvokeByRefStdcallCaller_InnerArrayExplicit __stdcall Get_MarshalStructAsParam_AsExpByRefInnerArrayExplicit_Stdcall_FuncPtr() { return MarshalStructAsParam_AsExpByRefInnerArrayExplicit_Stdcall; } ///// typedef BOOL(_cdecl *DelegatePinvokeByRefCdeclCaller_OUTER3)(OUTER3* outer); extern "C" DLL_EXPORT DelegatePinvokeByRefCdeclCaller_OUTER3 _cdecl Get_MarshalStructAsParam_AsExpByRefOUTER3_Cdecl_FuncPtr() { return MarshalStructAsParam_AsExpByRefOUTER3_Cdecl; } typedef BOOL(__stdcall *DelegatePinvokeByRefStdcallCaller_OUTER3)(OUTER3* outer); extern "C" DLL_EXPORT DelegatePinvokeByRefStdcallCaller_OUTER3 __stdcall Get_MarshalStructAsParam_AsExpByRefOUTER3_Stdcall_FuncPtr() { return MarshalStructAsParam_AsExpByRefOUTER3_Stdcall; } ///// typedef BOOL(_cdecl *DelegatePinvokeByRefCdeclCaller_U)(U* inner); extern "C" DLL_EXPORT DelegatePinvokeByRefCdeclCaller_U _cdecl Get_MarshalStructAsParam_AsExpByRefU_Cdecl_FuncPtr() { return MarshalStructAsParam_AsExpByRefU_Cdecl; } typedef BOOL(__stdcall *DelegatePinvokeByRefStdcallCaller_U)(U* inner); extern "C" DLL_EXPORT DelegatePinvokeByRefStdcallCaller_U __stdcall Get_MarshalStructAsParam_AsExpByRefU_Stdcall_FuncPtr() { return MarshalStructAsParam_AsExpByRefU_Stdcall; } ///// typedef BOOL(_cdecl *DelegatePinvokeByRefCdeclCaller_ByteStructPack2Explicit)(ByteStructPack2Explicit* bspe); extern "C" DLL_EXPORT DelegatePinvokeByRefCdeclCaller_ByteStructPack2Explicit _cdecl Get_MarshalStructAsParam_AsExpByRefByteStructPack2Explicit_Cdecl_FuncPtr() { return MarshalStructAsParam_AsExpByRefByteStructPack2Explicit_Cdecl; } typedef BOOL(__stdcall *DelegatePinvokeByRefStdcallCaller_ByteStructPack2Explicit)(ByteStructPack2Explicit* bspe); extern "C" DLL_EXPORT DelegatePinvokeByRefStdcallCaller_ByteStructPack2Explicit __stdcall Get_MarshalStructAsParam_AsExpByRefByteStructPack2Explicit_Stdcall_FuncPtr() { return MarshalStructAsParam_AsExpByRefByteStructPack2Explicit_Stdcall; } ///// typedef BOOL(_cdecl *DelegatePinvokeByRefCdeclCaller_ShortStructPack4Explicit)(ShortStructPack4Explicit* sspe); extern "C" DLL_EXPORT DelegatePinvokeByRefCdeclCaller_ShortStructPack4Explicit _cdecl Get_MarshalStructAsParam_AsExpByRefShortStructPack4Explicit_Cdecl_FuncPtr() { return MarshalStructAsParam_AsExpByRefShortStructPack4Explicit_Cdecl; } typedef BOOL(__stdcall *DelegatePinvokeByRefStdcallCaller_ShortStructPack4Explicit)(ShortStructPack4Explicit* sspe); extern "C" DLL_EXPORT DelegatePinvokeByRefStdcallCaller_ShortStructPack4Explicit __stdcall Get_MarshalStructAsParam_AsExpByRefShortStructPack4Explicit_Stdcall_FuncPtr() { return MarshalStructAsParam_AsExpByRefShortStructPack4Explicit_Stdcall; } ///// typedef BOOL(_cdecl *DelegatePinvokeByRefCdeclCaller_IntStructPack8Explicit)(IntStructPack8Explicit* ispe); extern "C" DLL_EXPORT DelegatePinvokeByRefCdeclCaller_IntStructPack8Explicit _cdecl Get_MarshalStructAsParam_AsExpByRefIntStructPack8Explicit_Cdecl_FuncPtr() { return MarshalStructAsParam_AsExpByRefIntStructPack8Explicit_Cdecl; } typedef BOOL(__stdcall *DelegatePinvokeByRefStdcallCaller_IntStructPack8Explicit)(IntStructPack8Explicit* ispe); extern "C" DLL_EXPORT DelegatePinvokeByRefStdcallCaller_IntStructPack8Explicit __stdcall Get_MarshalStructAsParam_AsExpByRefIntStructPack8Explicit_Stdcall_FuncPtr() { return MarshalStructAsParam_AsExpByRefIntStructPack8Explicit_Stdcall; } ///// typedef BOOL(_cdecl *DelegatePinvokeByRefCdeclCaller_LongStructPack16Explicit)(LongStructPack16Explicit* ispe); extern "C" DLL_EXPORT DelegatePinvokeByRefCdeclCaller_LongStructPack16Explicit _cdecl Get_MarshalStructAsParam_AsExpByRefLongStructPack16Explicit_Cdecl_FuncPtr() { return MarshalStructAsParam_AsExpByRefLongStructPack16Explicit_Cdecl; } typedef BOOL(__stdcall *DelegatePinvokeByRefStdcallCaller_LongStructPack16Explicit)(LongStructPack16Explicit* ispe); extern "C" DLL_EXPORT DelegatePinvokeByRefStdcallCaller_LongStructPack16Explicit __stdcall Get_MarshalStructAsParam_AsExpByRefLongStructPack16Explicit_Stdcall_FuncPtr() { return MarshalStructAsParam_AsExpByRefLongStructPack16Explicit_Stdcall; } ///// //---------------------------- ----------// //----------Delegate Pinvoke. PassByVal----------// ///// typedef BOOL(_cdecl *DelegatePinvokeByValCdeclCaller_INNER2)(INNER2 inner); extern "C" DLL_EXPORT DelegatePinvokeByValCdeclCaller_INNER2 _cdecl Get_MarshalStructAsParam_AsExpByValINNER2_Cdecl_FuncPtr() { return MarshalStructAsParam_AsExpByValINNER2_Cdecl; } typedef BOOL(__stdcall *DelegatePinvokeByValStdcallCaller_INNER2)(INNER2 inner); extern "C" DLL_EXPORT DelegatePinvokeByValStdcallCaller_INNER2 __stdcall Get_MarshalStructAsParam_AsExpByValINNER2_Stdcall_FuncPtr() { return MarshalStructAsParam_AsExpByValINNER2_Stdcall; } ///// typedef BOOL(_cdecl *DelegatePinvokeByValCdeclCaller_InnerExplicit)(InnerExplicit ie); extern "C" DLL_EXPORT DelegatePinvokeByValCdeclCaller_InnerExplicit _cdecl Get_MarshalStructAsParam_AsExpByValInnerExplicit_Cdecl_FuncPtr() { return MarshalStructAsParam_AsExpByValInnerExplicit_Cdecl; } typedef BOOL(__stdcall *DelegatePinvokeByValStdcallCaller_InnerExplicit)(InnerExplicit ie); extern "C" DLL_EXPORT DelegatePinvokeByValStdcallCaller_InnerExplicit __stdcall Get_MarshalStructAsParam_AsExpByValInnerExplicit_Stdcall_FuncPtr() { return MarshalStructAsParam_AsExpByValInnerExplicit_Stdcall; } ///// typedef BOOL(_cdecl *DelegatePinvokeByValCdeclCaller_InnerArrayExplicit)(InnerArrayExplicit iae); extern "C" DLL_EXPORT DelegatePinvokeByValCdeclCaller_InnerArrayExplicit _cdecl Get_MarshalStructAsParam_AsExpByValInnerArrayExplicit_Cdecl_FuncPtr() { return MarshalStructAsParam_AsExpByValInnerArrayExplicit_Cdecl; } typedef BOOL(__stdcall *DelegatePinvokeByValStdcallCaller_InnerArrayExplicit)(InnerArrayExplicit iae); extern "C" DLL_EXPORT DelegatePinvokeByValStdcallCaller_InnerArrayExplicit __stdcall Get_MarshalStructAsParam_AsExpByValInnerArrayExplicit_Stdcall_FuncPtr() { return MarshalStructAsParam_AsExpByValInnerArrayExplicit_Stdcall; } ///// typedef BOOL(_cdecl *DelegatePinvokeByValCdeclCaller_OUTER3)(OUTER3 outer); extern "C" DLL_EXPORT DelegatePinvokeByValCdeclCaller_OUTER3 _cdecl Get_MarshalStructAsParam_AsExpByValOUTER3_Cdecl_FuncPtr() { return MarshalStructAsParam_AsExpByValOUTER3_Cdecl; } typedef BOOL(__stdcall *DelegatePinvokeByValStdcallCaller_OUTER3)(OUTER3 outer); extern "C" DLL_EXPORT DelegatePinvokeByValStdcallCaller_OUTER3 __stdcall Get_MarshalStructAsParam_AsExpByValOUTER3_Stdcall_FuncPtr() { return MarshalStructAsParam_AsExpByValOUTER3_Stdcall; } ///// typedef BOOL(_cdecl *DelegatePinvokeByValCdeclCaller_U)(U inner); extern "C" DLL_EXPORT DelegatePinvokeByValCdeclCaller_U _cdecl Get_MarshalStructAsParam_AsExpByValU_Cdecl_FuncPtr() { return MarshalStructAsParam_AsExpByValU_Cdecl; } typedef BOOL(__stdcall *DelegatePinvokeByValStdcallCaller_U)(U inner); extern "C" DLL_EXPORT DelegatePinvokeByValStdcallCaller_U __stdcall Get_MarshalStructAsParam_AsExpByValU_Stdcall_FuncPtr() { return MarshalStructAsParam_AsExpByValU_Stdcall; } ///// typedef BOOL(_cdecl *DelegatePinvokeByValCdeclCaller_ByteStructPack2Explicit)(ByteStructPack2Explicit bspe); extern "C" DLL_EXPORT DelegatePinvokeByValCdeclCaller_ByteStructPack2Explicit _cdecl Get_MarshalStructAsParam_AsExpByValByteStructPack2Explicit_Cdecl_FuncPtr() { return MarshalStructAsParam_AsExpByValByteStructPack2Explicit_Cdecl; } typedef BOOL(__stdcall *DelegatePinvokeByValStdcallCaller_ByteStructPack2Explicit)(ByteStructPack2Explicit bspe); extern "C" DLL_EXPORT DelegatePinvokeByValStdcallCaller_ByteStructPack2Explicit __stdcall Get_MarshalStructAsParam_AsExpByValByteStructPack2Explicit_Stdcall_FuncPtr() { return MarshalStructAsParam_AsExpByValByteStructPack2Explicit_Stdcall; } ///// typedef BOOL(_cdecl *DelegatePinvokeByValCdeclCaller_ShortStructPack4Explicit)(ShortStructPack4Explicit sspe); extern "C" DLL_EXPORT DelegatePinvokeByValCdeclCaller_ShortStructPack4Explicit _cdecl Get_MarshalStructAsParam_AsExpByValShortStructPack4Explicit_Cdecl_FuncPtr() { return MarshalStructAsParam_AsExpByValShortStructPack4Explicit_Cdecl; } typedef BOOL(__stdcall *DelegatePinvokeByValStdcallCaller_ShortStructPack4Explicit)(ShortStructPack4Explicit sspe); extern "C" DLL_EXPORT DelegatePinvokeByValStdcallCaller_ShortStructPack4Explicit __stdcall Get_MarshalStructAsParam_AsExpByValShortStructPack4Explicit_Stdcall_FuncPtr() { return MarshalStructAsParam_AsExpByValShortStructPack4Explicit_Stdcall; } ///// typedef BOOL(_cdecl *DelegatePinvokeByValCdeclCaller_IntStructPack8Explicit)(IntStructPack8Explicit ispe); extern "C" DLL_EXPORT DelegatePinvokeByValCdeclCaller_IntStructPack8Explicit _cdecl Get_MarshalStructAsParam_AsExpByValIntStructPack8Explicit_Cdecl_FuncPtr() { return MarshalStructAsParam_AsExpByValIntStructPack8Explicit_Cdecl; } typedef BOOL(__stdcall *DelegatePinvokeByValStdcallCaller_IntStructPack8Explicit)(IntStructPack8Explicit ispe); extern "C" DLL_EXPORT DelegatePinvokeByValStdcallCaller_IntStructPack8Explicit __stdcall Get_MarshalStructAsParam_AsExpByValIntStructPack8Explicit_Stdcall_FuncPtr() { return MarshalStructAsParam_AsExpByValIntStructPack8Explicit_Stdcall; } ///// typedef BOOL(_cdecl *DelegatePinvokeByValCdeclCaller_LongStructPack16Explicit)(LongStructPack16Explicit ispe); extern "C" DLL_EXPORT DelegatePinvokeByValCdeclCaller_LongStructPack16Explicit _cdecl Get_MarshalStructAsParam_AsExpByValLongStructPack16Explicit_Cdecl_FuncPtr() { return MarshalStructAsParam_AsExpByValLongStructPack16Explicit_Cdecl; } typedef BOOL(__stdcall *DelegatePinvokeByValStdcallCaller_LongStructPack16Explicit)(LongStructPack16Explicit ispe); extern "C" DLL_EXPORT DelegatePinvokeByValStdcallCaller_LongStructPack16Explicit __stdcall Get_MarshalStructAsParam_AsExpByValLongStructPack16Explicit_Stdcall_FuncPtr() { return MarshalStructAsParam_AsExpByValLongStructPack16Explicit_Stdcall; } ///// //---------------------------- ----------// //----------Reverse Pinvoke. PassByRef----------// ///// typedef BOOL (_cdecl *ByRefCdeclCaller_INNER2)(INNER2* inner2); extern "C" DLL_EXPORT BOOL _cdecl DoCallBack_MarshalByRefStruct_Cdecl_INNER2(ByRefCdeclCaller_INNER2 caller) { //init INNER2 inner2; inner2.f1 = 77; inner2.f2 = 77.0; char* pstr = GetNativeString(); inner2.f3 = pstr; if(!caller(&inner2)) { PRINT_ERR_INFO(); return FALSE; } if(!IsCorrectINNER2(&inner2)) { PRINT_ERR_INFO(); return FALSE; } //TP_CoreClrFree((void*)inner2.f3); return TRUE; } typedef BOOL (__stdcall *ByRefStdcallCaller_INNER2)(INNER2* inner2); extern "C" DLL_EXPORT BOOL __stdcall DoCallBack_MarshalByRefStruct_Stdcall_INNER2(ByRefStdcallCaller_INNER2 caller) { const char* lNativeStr = "Native"; const size_t lsize = strlen(lNativeStr); //init INNER2 inner2; inner2.f1 = 77; inner2.f2 = 77.0; char* pstr = (char*)CoreClrAlloc(lsize + 1); memset(pstr,0,lsize+1); strncpy_s(pstr,lsize+1,lNativeStr,lsize); inner2.f3 = pstr; if(!caller(&inner2)) { PRINT_ERR_INFO(); return FALSE; } if(!IsCorrectINNER2(&inner2)) { PRINT_ERR_INFO(); return FALSE; } //TP_CoreClrFree((void*)inner2.f3); return TRUE; } ///// typedef BOOL (_cdecl *ByRefCdeclCaller_InnerExplicit)(InnerExplicit* inner2); extern "C" DLL_EXPORT BOOL _cdecl DoCallBack_MarshalByRefStruct_Cdecl_InnerExplicit(ByRefCdeclCaller_InnerExplicit caller) { const char* lNativeStr = "Native"; const size_t lsize = strlen(lNativeStr); //init InnerExplicit ie; ie.f1 = 77; char* pstr = (char*)CoreClrAlloc(lsize + 1); memset(pstr,0,lsize+1); strncpy_s(pstr,lsize+1,lNativeStr,lsize); ie.f3 = pstr; if(!caller(&ie)) { PRINT_ERR_INFO(); return FALSE; } if( ie.f1 != 1 || 0 != strcmp((char*)ie.f3, "some string") ) { PRINT_ERR_INFO(); return FALSE; } return TRUE; } typedef BOOL (__stdcall *ByRefStdcallCaller_InnerExplicit)(InnerExplicit* inner2); extern "C" DLL_EXPORT BOOL __stdcall DoCallBack_MarshalByRefStruct_Stdcall_InnerExplicit(ByRefStdcallCaller_InnerExplicit caller) { const char* lNativeStr = "Native"; const size_t lsize = strlen(lNativeStr); //init InnerExplicit ie; ie.f1 = 77; char* pstr = (char*)CoreClrAlloc(lsize + 1); memset(pstr,0,lsize+1); strncpy_s(pstr,lsize+1,lNativeStr,lsize); ie.f3 = pstr; if(!caller(&ie)) { PRINT_ERR_INFO(); return FALSE; } if( ie.f1 != 1 || 0 != strcmp((char*)ie.f3, "some string") ) { PRINT_ERR_INFO(); return FALSE; } return TRUE; } ///// typedef BOOL (_cdecl *ByRefCdeclCaller_InnerArrayExplicit)(InnerArrayExplicit* iae); extern "C" DLL_EXPORT BOOL _cdecl DoCallBack_MarshalByRefStruct_Cdecl_InnerArrayExplicit(ByRefCdeclCaller_InnerArrayExplicit caller) { const char* lNativeStr = "Native"; const size_t lsize = strlen(lNativeStr); //init InnerArrayExplicit iae; LPSTR str = NULL; for( size_t i = 0; i < NumArrElements; i++ ) { iae.arr[i].f1 = 77; str = (LPSTR)CoreClrAlloc( lsize+1 ); memset(str,0,lsize+1); strncpy_s((char*)str,lsize+1,lNativeStr,lsize); iae.arr[i].f3 = str; str = NULL; } str = (LPSTR)CoreClrAlloc( lsize+1 ); memset(str,0,lsize+1); strncpy_s((char*)str,lsize+1,lNativeStr,lsize); iae.f4 = str; if(!caller(&iae)) { PRINT_ERR_INFO(); return FALSE; } for( size_t i = 0; i < NumArrElements; i++ ) { if( iae.arr[i].f1 != 1 || 0 != strcmp((char*)iae.arr[i].f3, "some string")) { PRINT_ERR_INFO(); return FALSE; } } if( 0 != strcmp((char*)iae.f4, "some string") ) { PRINT_ERR_INFO(); return FALSE; } return TRUE; } typedef BOOL (__stdcall *ByRefStdcallCaller_InnerArrayExplicit)(InnerArrayExplicit* iae); extern "C" DLL_EXPORT BOOL __stdcall DoCallBack_MarshalByRefStruct_Stdcall_InnerArrayExplicit(ByRefStdcallCaller_InnerArrayExplicit caller) { //init InnerArrayExplicit iae; LPSTR str = NULL; for( size_t i = 0; i < NumArrElements; i++ ) { iae.arr[i].f1 = 77; str = GetNativeString(); iae.arr[i].f3 = str; str = NULL; } str = GetNativeString(); iae.f4 = str; if(!caller(&iae)) { PRINT_ERR_INFO(); return FALSE; } for( size_t i = 0; i < NumArrElements; i++ ) { if( iae.arr[i].f1 != 1 || 0 != strcmp((char*)iae.arr[i].f3, "some string")) { PRINT_ERR_INFO(); return FALSE; } } if( 0 != strcmp((char*)iae.f4, "some string") ) { PRINT_ERR_INFO(); return FALSE; } return TRUE; } ///// typedef BOOL (_cdecl *ByRefCdeclCaller_OUTER3)(OUTER3* outer3); extern "C" DLL_EXPORT BOOL _cdecl DoCallBack_MarshalByRefStruct_Cdecl_OUTER3(ByRefCdeclCaller_OUTER3 caller) { //init OUTER3 outer3; LPSTR str = NULL; for( size_t i = 0; i < NumArrElements; i++ ) { outer3.arr[i].f1 = 77; outer3.arr[i].f2 = 77.0; str = GetNativeString(); outer3.arr[i].f3 = (LPCSTR)str; str = NULL; } str = GetNativeString(); outer3.f4 = (LPCSTR)str; if(!caller(&outer3)) { PRINT_ERR_INFO(); return FALSE; } if( !IsCorrectOUTER3( &outer3 ) ) { PRINT_ERR_INFO(); return FALSE; } return TRUE; } typedef BOOL (__stdcall *ByRefStdcallCaller_OUTER3)(OUTER3* outer3); extern "C" DLL_EXPORT BOOL __stdcall DoCallBack_MarshalByRefStruct_Stdcall_OUTER3(ByRefStdcallCaller_OUTER3 caller) { //init OUTER3 outer3; LPSTR str = NULL; for( size_t i = 0; i < NumArrElements; i++ ) { outer3.arr[i].f1 = 77; outer3.arr[i].f2 = 77.0; str = GetNativeString(); outer3.arr[i].f3 = (LPCSTR)str; str = NULL; } str = GetNativeString(); outer3.f4 = (LPCSTR)str; if(!caller(&outer3)) { PRINT_ERR_INFO(); return FALSE; } if( !IsCorrectOUTER3( &outer3 ) ) { PRINT_ERR_INFO(); return FALSE; } return TRUE; } ///// typedef BOOL (_cdecl *ByRefCdeclCaller_U)(U* u); extern "C" DLL_EXPORT BOOL _cdecl DoCallBack_MarshalByRefStruct_Cdecl_U(ByRefCdeclCaller_U caller) { U u; u.d = 1.23; if(!caller(&u)) { PRINT_ERR_INFO(); return FALSE; } if( !IsCorrectU( &u ) ) { PRINT_ERR_INFO(); return FALSE; } return TRUE; } typedef BOOL (__stdcall *ByRefStdcallCaller_U)(U* u); extern "C" DLL_EXPORT BOOL __stdcall DoCallBack_MarshalByRefStruct_Stdcall_U(ByRefStdcallCaller_U caller) { U u; u.d = 1.23; if(!caller(&u)) { PRINT_ERR_INFO(); return FALSE; } if( !IsCorrectU( &u ) ) { PRINT_ERR_INFO(); return FALSE; } return TRUE; } ///// typedef BOOL (_cdecl *ByRefCdeclCaller_ByteStructPack2Explicit)(ByteStructPack2Explicit* bspe); extern "C" DLL_EXPORT BOOL _cdecl DoCallBack_MarshalByRefStruct_Cdecl_ByteStructPack2Explicit(ByRefCdeclCaller_ByteStructPack2Explicit caller) { ByteStructPack2Explicit bspe; bspe.b1 = 64; bspe.b2 = 64; if(!caller(&bspe)) { PRINT_ERR_INFO(); return FALSE; } if( !IsCorrectByteStructPack2Explicit( &bspe ) ) { PRINT_ERR_INFO(); return FALSE; } return TRUE; } typedef BOOL (__stdcall *ByRefStdcallCaller_ByteStructPack2Explicit)(ByteStructPack2Explicit* bspe); extern "C" DLL_EXPORT BOOL __stdcall DoCallBack_MarshalByRefStruct_Stdcall_ByteStructPack2Explicit(ByRefStdcallCaller_ByteStructPack2Explicit caller) { ByteStructPack2Explicit bspe; bspe.b1 = 64; bspe.b2 = 64; if(!caller(&bspe)) { PRINT_ERR_INFO(); return FALSE; } if( !IsCorrectByteStructPack2Explicit( &bspe ) ) { PRINT_ERR_INFO(); return FALSE; } return TRUE; } ///// typedef BOOL (_cdecl *ByRefCdeclCaller_ShortStructPack4Explicit)(ShortStructPack4Explicit* sspe); extern "C" DLL_EXPORT BOOL _cdecl DoCallBack_MarshalByRefStruct_Cdecl_ShortStructPack4Explicit(ByRefCdeclCaller_ShortStructPack4Explicit caller) { ShortStructPack4Explicit sspe; sspe.s1 = 64; sspe.s2 = 64; if(!caller(&sspe)) { PRINT_ERR_INFO(); return FALSE; } if( !IsCorrectShortStructPack4Explicit( &sspe ) ) { PRINT_ERR_INFO(); return FALSE; } return TRUE; } typedef BOOL (__stdcall *ByRefStdcallCaller_ShortStructPack4Explicit)(ShortStructPack4Explicit* sspe); extern "C" DLL_EXPORT BOOL __stdcall DoCallBack_MarshalByRefStruct_Stdcall_ShortStructPack4Explicit(ByRefStdcallCaller_ShortStructPack4Explicit caller) { ShortStructPack4Explicit sspe; sspe.s1 = 64; sspe.s2 = 64; if(!caller(&sspe)) { PRINT_ERR_INFO(); return FALSE; } if( !IsCorrectShortStructPack4Explicit( &sspe ) ) { PRINT_ERR_INFO(); return FALSE; } return TRUE; } ///// typedef BOOL (_cdecl *ByRefCdeclCaller_IntStructPack8Explicit)(IntStructPack8Explicit* ispe); extern "C" DLL_EXPORT BOOL _cdecl DoCallBack_MarshalByRefStruct_Cdecl_IntStructPack8Explicit(ByRefCdeclCaller_IntStructPack8Explicit caller) { IntStructPack8Explicit ispe; ispe.i1 = 64; ispe.i2 = 64; if(!caller(&ispe)) { PRINT_ERR_INFO(); return FALSE; } if( !IsCorrectIntStructPack8Explicit( &ispe ) ) { PRINT_ERR_INFO(); return FALSE; } return TRUE; } typedef BOOL (__stdcall *ByRefStdcallCaller_IntStructPack8Explicit)(IntStructPack8Explicit* ispe); extern "C" DLL_EXPORT BOOL __stdcall DoCallBack_MarshalByRefStruct_Stdcall_IntStructPack8Explicit(ByRefStdcallCaller_IntStructPack8Explicit caller) { IntStructPack8Explicit ispe; ispe.i1 = 64; ispe.i2 = 64; if(!caller(&ispe)) { PRINT_ERR_INFO(); return FALSE; } if( !IsCorrectIntStructPack8Explicit( &ispe ) ) { PRINT_ERR_INFO(); return FALSE; } return TRUE; } ///// typedef BOOL (_cdecl *ByRefCdeclCaller_LongStructPack16Explicit)(LongStructPack16Explicit* lspe); extern "C" DLL_EXPORT BOOL _cdecl DoCallBack_MarshalByRefStruct_Cdecl_LongStructPack16Explicit(ByRefCdeclCaller_LongStructPack16Explicit caller) { LongStructPack16Explicit lspe; lspe.l1 = 64; lspe.l2 = 64; if(!caller(&lspe)) { PRINT_ERR_INFO(); return FALSE; } if( !IsCorrectLongStructPack16Explicit( &lspe ) ) { PRINT_ERR_INFO(); return FALSE; } return TRUE; } typedef BOOL (__stdcall *ByRefStdcallCaller_LongStructPack16Explicit)(LongStructPack16Explicit* lspe); extern "C" DLL_EXPORT BOOL __stdcall DoCallBack_MarshalByRefStruct_Stdcall_LongStructPack16Explicit(ByRefStdcallCaller_LongStructPack16Explicit caller) { LongStructPack16Explicit lspe; lspe.l1 = 64; lspe.l2 = 64; if(!caller(&lspe)) { PRINT_ERR_INFO(); return FALSE; } if( !IsCorrectLongStructPack16Explicit( &lspe ) ) { PRINT_ERR_INFO(); return FALSE; } return TRUE; } ///// //---------------------------- ----------// //----------Reverse Pinvoke. PassByVal---------// ///// typedef BOOL (_cdecl *ByValCdeclCaller_INNER2)(INNER2 inner2); extern "C" DLL_EXPORT BOOL _cdecl DoCallBack_MarshalByValStruct_Cdecl_INNER2(ByValCdeclCaller_INNER2 caller) { //init INNER2 inner2; inner2.f1 = 1; inner2.f2 = 1.0; char* pstr = GetSomeString(); inner2.f3 = pstr; if(!caller(inner2)) { PRINT_ERR_INFO(); return FALSE; } if(!IsCorrectINNER2(&inner2)) { PRINT_ERR_INFO(); return FALSE; } return TRUE; } typedef BOOL (__stdcall *ByValStdcallCaller_INNER2)(INNER2 inner2); extern "C" DLL_EXPORT BOOL __stdcall DoCallBack_MarshalByValStruct_Stdcall_INNER2(ByValStdcallCaller_INNER2 caller) { //init INNER2 inner2; inner2.f1 = 1; inner2.f2 = 1.0; char* pstr = GetSomeString(); inner2.f3 = pstr; if(!caller(inner2)) { PRINT_ERR_INFO(); return FALSE; } if(!IsCorrectINNER2(&inner2)) { PRINT_ERR_INFO(); return FALSE; } return TRUE; } ///// typedef BOOL (_cdecl *ByValCdeclCaller_InnerExplicit)(InnerExplicit inner2); extern "C" DLL_EXPORT BOOL _cdecl DoCallBack_MarshalByValStruct_Cdecl_InnerExplicit(ByValCdeclCaller_InnerExplicit caller) { //init InnerExplicit ie; ie.f1 = 1; char* pstr = GetNativeString(); ie.f3 = pstr; if(!caller(ie)) { PRINT_ERR_INFO(); return FALSE; } if( ie.f1 != 1 || 0 != strcmp((char*)ie.f3, (char*)NativeStr) ) { PRINT_ERR_INFO(); return FALSE; } return TRUE; } typedef BOOL (__stdcall *ByValStdcallCaller_InnerExplicit)(InnerExplicit inner2); extern "C" DLL_EXPORT BOOL __stdcall DoCallBack_MarshalByValStruct_Stdcall_InnerExplicit(ByValStdcallCaller_InnerExplicit caller) { //init InnerExplicit ie; ie.f1 = 1; char* pstr = GetNativeString(); ie.f3 = pstr; if(!caller(ie)) { PRINT_ERR_INFO(); return FALSE; } if( ie.f1 != 1 || 0 != strcmp((char*)ie.f3, (char*)NativeStr) ) { PRINT_ERR_INFO(); return FALSE; } return TRUE; } ///// typedef BOOL (_cdecl *ByValCdeclCaller_InnerArrayExplicit)(InnerArrayExplicit iae); extern "C" DLL_EXPORT BOOL _cdecl DoCallBack_MarshalByValStruct_Cdecl_InnerArrayExplicit(ByValCdeclCaller_InnerArrayExplicit caller) { //init InnerArrayExplicit iae; LPSTR str = NULL; for( size_t i = 0; i < NumArrElements; i++ ) { iae.arr[i].f1 = 1; str = GetSomeString(); iae.arr[i].f3 = str; str = NULL; } str = GetSomeString(); iae.f4 = str; if(!caller(iae)) { PRINT_ERR_INFO(); return FALSE; } for( size_t i = 0; i < NumArrElements; i++ ) { if( iae.arr[i].f1 != 1 || 0 != strcmp((char*)iae.arr[i].f3, "some string")) { PRINT_ERR_INFO(); return FALSE; } } if( 0 != strcmp((char*)iae.f4, "some string") ) { PRINT_ERR_INFO(); return FALSE; } return TRUE; } typedef BOOL (__stdcall *ByValStdcallCaller_InnerArrayExplicit)(InnerArrayExplicit iae); extern "C" DLL_EXPORT BOOL __stdcall DoCallBack_MarshalByValStruct_Stdcall_InnerArrayExplicit(ByValStdcallCaller_InnerArrayExplicit caller) { //init InnerArrayExplicit iae; LPSTR str = NULL; for( size_t i = 0; i < NumArrElements; i++ ) { iae.arr[i].f1 = 1; str = GetSomeString(); iae.arr[i].f3 = str; str = NULL; } str = GetSomeString(); iae.f4 = str; if(!caller(iae)) { PRINT_ERR_INFO(); return FALSE; } for( size_t i = 0; i < NumArrElements; i++ ) { if( iae.arr[i].f1 != 1 || 0 != strcmp((char*)iae.arr[i].f3, "some string")) { PRINT_ERR_INFO(); return FALSE; } } if( 0 != strcmp((char*)iae.f4, "some string") ) { PRINT_ERR_INFO(); return FALSE; } return TRUE; } ///// typedef BOOL (_cdecl *ByValCdeclCaller_OUTER3)(OUTER3 outer3); extern "C" DLL_EXPORT BOOL _cdecl DoCallBack_MarshalByValStruct_Cdecl_OUTER3(ByValCdeclCaller_OUTER3 caller) { //init OUTER3 outer3; LPSTR str = NULL; for( size_t i = 0; i < NumArrElements; i++ ) { outer3.arr[i].f1 = 1; outer3.arr[i].f2 = 1.0; str = GetSomeString(); outer3.arr[i].f3 = (LPCSTR)str; str = NULL; } str = GetSomeString(); outer3.f4 = (LPCSTR)str; if(!caller(outer3)) { PRINT_ERR_INFO(); return FALSE; } if( !IsCorrectOUTER3( &outer3 ) ) { PRINT_ERR_INFO(); return FALSE; } return TRUE; } typedef BOOL (__stdcall *ByValStdcallCaller_OUTER3)(OUTER3 outer3); extern "C" DLL_EXPORT BOOL __stdcall DoCallBack_MarshalByValStruct_Stdcall_OUTER3(ByValStdcallCaller_OUTER3 caller) { //init OUTER3 outer3; LPSTR str = NULL; for( size_t i = 0; i < NumArrElements; i++ ) { outer3.arr[i].f1 = 1; outer3.arr[i].f2 = 1.0; str = GetSomeString(); outer3.arr[i].f3 = (LPCSTR)str; str = NULL; } str = GetSomeString(); outer3.f4 = (LPCSTR)str; if(!caller(outer3)) { PRINT_ERR_INFO(); return FALSE; } if( !IsCorrectOUTER3( &outer3 ) ) { PRINT_ERR_INFO(); return FALSE; } return TRUE; } ///// typedef BOOL (_cdecl *ByValCdeclCaller_U)(U u); extern "C" DLL_EXPORT BOOL _cdecl DoCallBack_MarshalByValStruct_Cdecl_U(ByValCdeclCaller_U caller) { U u; u.d = 3.2; if(!caller(u)) { PRINT_ERR_INFO(); return FALSE; } if( !IsCorrectU( &u ) ) { PRINT_ERR_INFO(); return FALSE; } return TRUE; } typedef BOOL (__stdcall *ByValStdcallCaller_U)(U u); extern "C" DLL_EXPORT BOOL __stdcall DoCallBack_MarshalByValStruct_Stdcall_U(ByValStdcallCaller_U caller) { U u; u.d = 3.2; if(!caller(u)) { PRINT_ERR_INFO(); return FALSE; } if( !IsCorrectU( &u ) ) { PRINT_ERR_INFO(); return FALSE; } return TRUE; } ///// typedef BOOL (_cdecl *ByValCdeclCaller_ByteStructPack2Explicit)(ByteStructPack2Explicit bspe); extern "C" DLL_EXPORT BOOL _cdecl DoCallBack_MarshalByValStruct_Cdecl_ByteStructPack2Explicit(ByValCdeclCaller_ByteStructPack2Explicit caller) { ByteStructPack2Explicit bspe; bspe.b1 = 32; bspe.b2 = 32; if(!caller(bspe)) { PRINT_ERR_INFO(); return FALSE; } if( !IsCorrectByteStructPack2Explicit( &bspe ) ) { PRINT_ERR_INFO(); return FALSE; } return TRUE; } typedef BOOL (__stdcall *ByValStdcallCaller_ByteStructPack2Explicit)(ByteStructPack2Explicit bspe); extern "C" DLL_EXPORT BOOL __stdcall DoCallBack_MarshalByValStruct_Stdcall_ByteStructPack2Explicit(ByValStdcallCaller_ByteStructPack2Explicit caller) { ByteStructPack2Explicit bspe; bspe.b1 = 32; bspe.b2 = 32; if(!caller(bspe)) { PRINT_ERR_INFO(); return FALSE; } if( !IsCorrectByteStructPack2Explicit( &bspe ) ) { PRINT_ERR_INFO(); return FALSE; } return TRUE; } ///// typedef BOOL (_cdecl *ByValCdeclCaller_ShortStructPack4Explicit)(ShortStructPack4Explicit sspe); extern "C" DLL_EXPORT BOOL _cdecl DoCallBack_MarshalByValStruct_Cdecl_ShortStructPack4Explicit(ByValCdeclCaller_ShortStructPack4Explicit caller) { ShortStructPack4Explicit sspe; sspe.s1 = 32; sspe.s2 = 32; if(!caller(sspe)) { PRINT_ERR_INFO(); return FALSE; } if( !IsCorrectShortStructPack4Explicit( &sspe ) ) { PRINT_ERR_INFO(); return FALSE; } return TRUE; } typedef BOOL (__stdcall *ByValStdcallCaller_ShortStructPack4Explicit)(ShortStructPack4Explicit sspe); extern "C" DLL_EXPORT BOOL __stdcall DoCallBack_MarshalByValStruct_Stdcall_ShortStructPack4Explicit(ByValStdcallCaller_ShortStructPack4Explicit caller) { ShortStructPack4Explicit sspe; sspe.s1 = 32; sspe.s2 = 32; if(!caller(sspe)) { PRINT_ERR_INFO(); return FALSE; } if( !IsCorrectShortStructPack4Explicit( &sspe ) ) { PRINT_ERR_INFO(); return FALSE; } return TRUE; } ///// typedef BOOL (_cdecl *ByValCdeclCaller_IntStructPack8Explicit)(IntStructPack8Explicit ispe); extern "C" DLL_EXPORT BOOL _cdecl DoCallBack_MarshalByValStruct_Cdecl_IntStructPack8Explicit(ByValCdeclCaller_IntStructPack8Explicit caller) { IntStructPack8Explicit ispe; ispe.i1 = 32; ispe.i2 = 32; if(!caller(ispe)) { PRINT_ERR_INFO(); return FALSE; } if( !IsCorrectIntStructPack8Explicit( &ispe ) ) { PRINT_ERR_INFO(); return FALSE; } return TRUE; } typedef BOOL (__stdcall *ByValStdcallCaller_IntStructPack8Explicit)(IntStructPack8Explicit ispe); extern "C" DLL_EXPORT BOOL __stdcall DoCallBack_MarshalByValStruct_Stdcall_IntStructPack8Explicit(ByValStdcallCaller_IntStructPack8Explicit caller) { IntStructPack8Explicit ispe; ispe.i1 = 32; ispe.i2 = 32; if(!caller(ispe)) { PRINT_ERR_INFO(); return FALSE; } if( !IsCorrectIntStructPack8Explicit( &ispe ) ) { PRINT_ERR_INFO(); return FALSE; } return TRUE; } ///// typedef BOOL (_cdecl *ByValCdeclCaller_LongStructPack16Explicit)(LongStructPack16Explicit lspe); extern "C" DLL_EXPORT BOOL _cdecl DoCallBack_MarshalByValStruct_Cdecl_LongStructPack16Explicit(ByValCdeclCaller_LongStructPack16Explicit caller) { LongStructPack16Explicit lspe; lspe.l1 = 32; lspe.l2 = 32; if(!caller(lspe)) { PRINT_ERR_INFO(); return FALSE; } if( !IsCorrectLongStructPack16Explicit( &lspe ) ) { PRINT_ERR_INFO(); return FALSE; } return TRUE; } typedef BOOL (__stdcall *ByValStdcallCaller_LongStructPack16Explicit)(LongStructPack16Explicit lspe); extern "C" DLL_EXPORT BOOL __stdcall DoCallBack_MarshalByValStruct_Stdcall_LongStructPack16Explicit(ByValStdcallCaller_LongStructPack16Explicit caller) { LongStructPack16Explicit lspe; lspe.l1 = 32; lspe.l2 = 32; if(!caller(lspe)) { PRINT_ERR_INFO(); return FALSE; } if( !IsCorrectLongStructPack16Explicit( &lspe ) ) { PRINT_ERR_INFO(); return FALSE; } return TRUE; } ///// //---------------------------- ----------//
-1
dotnet/runtime
66,282
Enable Fast Tail Call Optimization for ARM32
- Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
clamp03
2022-03-07T05:47:20Z
2022-03-13T11:50:20Z
227ff3d53784f655fa04dad059a98b3e8d291d61
51b90cc60b8528c77829ef18481b0f58db812776
Enable Fast Tail Call Optimization for ARM32. - Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
./src/coreclr/pal/tests/palsuite/c_runtime/fwprintf/test17/test17.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*============================================================================ ** ** Source: test17.c ** ** Purpose: Tests the lowercase shorthand notation double specifier (%g). ** This test is modeled after the sprintf series. ** ** **==========================================================================*/ #include <palsuite.h> #include "../fwprintf.h" /* * Depends on memcmp, strlen, fopen, fseek and fgets. */ PALTEST(c_runtime_fwprintf_test17_paltest_fwprintf_test17, "c_runtime/fwprintf/test17/paltest_fwprintf_test17") { double val = 2560.001; double neg = -2560.001; if (PAL_Initialize(argc, argv) != 0) { return(FAIL); } DoDoubleTest(convert("foo %g"), val, "foo 2560", "foo 2560"); DoDoubleTest(convert("foo %lg"), val, "foo 2560", "foo 2560"); DoDoubleTest(convert("foo %hg"), val, "foo 2560", "foo 2560"); DoDoubleTest(convert("foo %Lg"), val, "foo 2560", "foo 2560"); DoDoubleTest(convert("foo %I64g"), val, "foo 2560", "foo 2560"); DoDoubleTest(convert("foo %5g"), val, "foo 2560", "foo 2560"); DoDoubleTest(convert("foo %-5g"), val, "foo 2560 ", "foo 2560 "); DoDoubleTest(convert("foo %.1g"), val, "foo 3e+003", "foo 3e+03"); DoDoubleTest(convert("foo %.2g"), val, "foo 2.6e+003", "foo 2.6e+03"); DoDoubleTest(convert("foo %.12g"), val, "foo 2560.001", "foo 2560.001"); DoDoubleTest(convert("foo %06g"), val, "foo 002560", "foo 002560"); DoDoubleTest(convert("foo %#g"), val, "foo 2560.00", "foo 2560.00"); DoDoubleTest(convert("foo %+g"), val, "foo +2560", "foo +2560"); DoDoubleTest(convert("foo % g"), val, "foo 2560", "foo 2560"); DoDoubleTest(convert("foo %+g"), neg, "foo -2560", "foo -2560"); DoDoubleTest(convert("foo % g"), neg, "foo -2560", "foo -2560"); PAL_Terminate(); return PASS; }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*============================================================================ ** ** Source: test17.c ** ** Purpose: Tests the lowercase shorthand notation double specifier (%g). ** This test is modeled after the sprintf series. ** ** **==========================================================================*/ #include <palsuite.h> #include "../fwprintf.h" /* * Depends on memcmp, strlen, fopen, fseek and fgets. */ PALTEST(c_runtime_fwprintf_test17_paltest_fwprintf_test17, "c_runtime/fwprintf/test17/paltest_fwprintf_test17") { double val = 2560.001; double neg = -2560.001; if (PAL_Initialize(argc, argv) != 0) { return(FAIL); } DoDoubleTest(convert("foo %g"), val, "foo 2560", "foo 2560"); DoDoubleTest(convert("foo %lg"), val, "foo 2560", "foo 2560"); DoDoubleTest(convert("foo %hg"), val, "foo 2560", "foo 2560"); DoDoubleTest(convert("foo %Lg"), val, "foo 2560", "foo 2560"); DoDoubleTest(convert("foo %I64g"), val, "foo 2560", "foo 2560"); DoDoubleTest(convert("foo %5g"), val, "foo 2560", "foo 2560"); DoDoubleTest(convert("foo %-5g"), val, "foo 2560 ", "foo 2560 "); DoDoubleTest(convert("foo %.1g"), val, "foo 3e+003", "foo 3e+03"); DoDoubleTest(convert("foo %.2g"), val, "foo 2.6e+003", "foo 2.6e+03"); DoDoubleTest(convert("foo %.12g"), val, "foo 2560.001", "foo 2560.001"); DoDoubleTest(convert("foo %06g"), val, "foo 002560", "foo 002560"); DoDoubleTest(convert("foo %#g"), val, "foo 2560.00", "foo 2560.00"); DoDoubleTest(convert("foo %+g"), val, "foo +2560", "foo +2560"); DoDoubleTest(convert("foo % g"), val, "foo 2560", "foo 2560"); DoDoubleTest(convert("foo %+g"), neg, "foo -2560", "foo -2560"); DoDoubleTest(convert("foo % g"), neg, "foo -2560", "foo -2560"); PAL_Terminate(); return PASS; }
-1
dotnet/runtime
66,282
Enable Fast Tail Call Optimization for ARM32
- Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
clamp03
2022-03-07T05:47:20Z
2022-03-13T11:50:20Z
227ff3d53784f655fa04dad059a98b3e8d291d61
51b90cc60b8528c77829ef18481b0f58db812776
Enable Fast Tail Call Optimization for ARM32. - Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
./src/native/eventpipe/ep-config-internals.h
#ifndef __EVENTPIPE_CONFIGURATION_INTERNALS_H__ #define __EVENTPIPE_CONFIGURATION_INTERNALS_H__ #include "ep-rt-config.h" #ifdef ENABLE_PERFTRACING #include "ep-types.h" /* * EventPipeConfiguration internal library functions. */ // _Requires_lock_held (config) EventPipeSessionProvider * config_get_session_provider ( const EventPipeConfiguration *config, const EventPipeSession *session, const EventPipeProvider *provider); // _Requires_lock_held (config) EventPipeProvider * config_get_provider ( EventPipeConfiguration *config, const ep_char8_t *name); // _Requires_lock_held (config) EventPipeProvider * config_create_provider ( EventPipeConfiguration *config, const ep_char8_t *provider_name, EventPipeCallback callback_func, EventPipeCallbackDataFree callback_data_free_func, void *callback_data, EventPipeProviderCallbackDataQueue *provider_callback_data_queue); // _Requires_lock_held (config) void config_delete_provider ( EventPipeConfiguration *config, EventPipeProvider *provider); // _Requires_lock_held (config) void config_delete_deferred_providers (EventPipeConfiguration *config); // _Requires_lock_held (config) void config_enable_disable ( EventPipeConfiguration *config, const EventPipeSession *session, EventPipeProviderCallbackDataQueue *provider_callback_data_queue, bool enable); #endif /* ENABLE_PERFTRACING */ #endif /* __EVENTPIPE_CONFIGURATION_INTERNALS_H__ */
#ifndef __EVENTPIPE_CONFIGURATION_INTERNALS_H__ #define __EVENTPIPE_CONFIGURATION_INTERNALS_H__ #include "ep-rt-config.h" #ifdef ENABLE_PERFTRACING #include "ep-types.h" /* * EventPipeConfiguration internal library functions. */ // _Requires_lock_held (config) EventPipeSessionProvider * config_get_session_provider ( const EventPipeConfiguration *config, const EventPipeSession *session, const EventPipeProvider *provider); // _Requires_lock_held (config) EventPipeProvider * config_get_provider ( EventPipeConfiguration *config, const ep_char8_t *name); // _Requires_lock_held (config) EventPipeProvider * config_create_provider ( EventPipeConfiguration *config, const ep_char8_t *provider_name, EventPipeCallback callback_func, EventPipeCallbackDataFree callback_data_free_func, void *callback_data, EventPipeProviderCallbackDataQueue *provider_callback_data_queue); // _Requires_lock_held (config) void config_delete_provider ( EventPipeConfiguration *config, EventPipeProvider *provider); // _Requires_lock_held (config) void config_delete_deferred_providers (EventPipeConfiguration *config); // _Requires_lock_held (config) void config_enable_disable ( EventPipeConfiguration *config, const EventPipeSession *session, EventPipeProviderCallbackDataQueue *provider_callback_data_queue, bool enable); #endif /* ENABLE_PERFTRACING */ #endif /* __EVENTPIPE_CONFIGURATION_INTERNALS_H__ */
-1
dotnet/runtime
66,282
Enable Fast Tail Call Optimization for ARM32
- Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
clamp03
2022-03-07T05:47:20Z
2022-03-13T11:50:20Z
227ff3d53784f655fa04dad059a98b3e8d291d61
51b90cc60b8528c77829ef18481b0f58db812776
Enable Fast Tail Call Optimization for ARM32. - Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
./src/libraries/System.Net.Http/src/System/Net/Http/Headers/TransferCodingWithQualityHeaderValue.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Diagnostics.CodeAnalysis; namespace System.Net.Http.Headers { public sealed class TransferCodingWithQualityHeaderValue : TransferCodingHeaderValue, ICloneable { public double? Quality { get => HeaderUtilities.GetQuality((UnvalidatedObjectCollection<NameValueHeaderValue>)Parameters); set => HeaderUtilities.SetQuality((UnvalidatedObjectCollection<NameValueHeaderValue>)Parameters, value); } internal TransferCodingWithQualityHeaderValue() { // Used by the parser to create a new instance of this type. } public TransferCodingWithQualityHeaderValue(string value) : base(value) { } public TransferCodingWithQualityHeaderValue(string value, double quality) : base(value) { Quality = quality; } private TransferCodingWithQualityHeaderValue(TransferCodingWithQualityHeaderValue source) : base(source) { // No additional members to initialize here. This constructor is used by Clone(). } object ICloneable.Clone() { return new TransferCodingWithQualityHeaderValue(this); } public static new TransferCodingWithQualityHeaderValue Parse(string? input) { int index = 0; return (TransferCodingWithQualityHeaderValue)TransferCodingHeaderParser.SingleValueWithQualityParser .ParseValue(input, null, ref index); } public static bool TryParse([NotNullWhen(true)] string? input, [NotNullWhen(true)] out TransferCodingWithQualityHeaderValue? parsedValue) { int index = 0; parsedValue = null; if (TransferCodingHeaderParser.SingleValueWithQualityParser.TryParseValue( input, null, ref index, out object? output)) { parsedValue = (TransferCodingWithQualityHeaderValue)output!; return true; } return false; } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Diagnostics.CodeAnalysis; namespace System.Net.Http.Headers { public sealed class TransferCodingWithQualityHeaderValue : TransferCodingHeaderValue, ICloneable { public double? Quality { get => HeaderUtilities.GetQuality((UnvalidatedObjectCollection<NameValueHeaderValue>)Parameters); set => HeaderUtilities.SetQuality((UnvalidatedObjectCollection<NameValueHeaderValue>)Parameters, value); } internal TransferCodingWithQualityHeaderValue() { // Used by the parser to create a new instance of this type. } public TransferCodingWithQualityHeaderValue(string value) : base(value) { } public TransferCodingWithQualityHeaderValue(string value, double quality) : base(value) { Quality = quality; } private TransferCodingWithQualityHeaderValue(TransferCodingWithQualityHeaderValue source) : base(source) { // No additional members to initialize here. This constructor is used by Clone(). } object ICloneable.Clone() { return new TransferCodingWithQualityHeaderValue(this); } public static new TransferCodingWithQualityHeaderValue Parse(string? input) { int index = 0; return (TransferCodingWithQualityHeaderValue)TransferCodingHeaderParser.SingleValueWithQualityParser .ParseValue(input, null, ref index); } public static bool TryParse([NotNullWhen(true)] string? input, [NotNullWhen(true)] out TransferCodingWithQualityHeaderValue? parsedValue) { int index = 0; parsedValue = null; if (TransferCodingHeaderParser.SingleValueWithQualityParser.TryParseValue( input, null, ref index, out object? output)) { parsedValue = (TransferCodingWithQualityHeaderValue)output!; return true; } return false; } } }
-1
dotnet/runtime
66,282
Enable Fast Tail Call Optimization for ARM32
- Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
clamp03
2022-03-07T05:47:20Z
2022-03-13T11:50:20Z
227ff3d53784f655fa04dad059a98b3e8d291d61
51b90cc60b8528c77829ef18481b0f58db812776
Enable Fast Tail Call Optimization for ARM32. - Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
./src/libraries/System.Collections.Immutable/src/System/Collections/Immutable/DictionaryEnumerator.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections.Generic; namespace System.Collections.Immutable { internal sealed class DictionaryEnumerator<TKey, TValue> : IDictionaryEnumerator where TKey : notnull { private readonly IEnumerator<KeyValuePair<TKey, TValue>> _inner; internal DictionaryEnumerator(IEnumerator<KeyValuePair<TKey, TValue>> inner) { Requires.NotNull(inner, nameof(inner)); _inner = inner; } public DictionaryEntry Entry { get { return new DictionaryEntry(_inner.Current.Key, _inner.Current.Value); } } public object Key { get { return _inner.Current.Key; } } public object? Value { get { return _inner.Current.Value; } } public object Current { get { return this.Entry; } } public bool MoveNext() { return _inner.MoveNext(); } public void Reset() { _inner.Reset(); } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections.Generic; namespace System.Collections.Immutable { internal sealed class DictionaryEnumerator<TKey, TValue> : IDictionaryEnumerator where TKey : notnull { private readonly IEnumerator<KeyValuePair<TKey, TValue>> _inner; internal DictionaryEnumerator(IEnumerator<KeyValuePair<TKey, TValue>> inner) { Requires.NotNull(inner, nameof(inner)); _inner = inner; } public DictionaryEntry Entry { get { return new DictionaryEntry(_inner.Current.Key, _inner.Current.Value); } } public object Key { get { return _inner.Current.Key; } } public object? Value { get { return _inner.Current.Value; } } public object Current { get { return this.Entry; } } public bool MoveNext() { return _inner.MoveNext(); } public void Reset() { _inner.Reset(); } } }
-1
dotnet/runtime
66,282
Enable Fast Tail Call Optimization for ARM32
- Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
clamp03
2022-03-07T05:47:20Z
2022-03-13T11:50:20Z
227ff3d53784f655fa04dad059a98b3e8d291d61
51b90cc60b8528c77829ef18481b0f58db812776
Enable Fast Tail Call Optimization for ARM32. - Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
./src/libraries/System.Private.CoreLib/src/System/Random.ImplBase.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. namespace System { public partial class Random { /// <summary>Base type for all generator implementations that plug into the base Random.</summary> internal abstract class ImplBase { public abstract double Sample(); public abstract int Next(); public abstract int Next(int maxValue); public abstract int Next(int minValue, int maxValue); public abstract long NextInt64(); public abstract long NextInt64(long maxValue); public abstract long NextInt64(long minValue, long maxValue); public abstract float NextSingle(); public abstract double NextDouble(); public abstract void NextBytes(byte[] buffer); public abstract void NextBytes(Span<byte> buffer); } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. namespace System { public partial class Random { /// <summary>Base type for all generator implementations that plug into the base Random.</summary> internal abstract class ImplBase { public abstract double Sample(); public abstract int Next(); public abstract int Next(int maxValue); public abstract int Next(int minValue, int maxValue); public abstract long NextInt64(); public abstract long NextInt64(long maxValue); public abstract long NextInt64(long minValue, long maxValue); public abstract float NextSingle(); public abstract double NextDouble(); public abstract void NextBytes(byte[] buffer); public abstract void NextBytes(Span<byte> buffer); } } }
-1
dotnet/runtime
66,282
Enable Fast Tail Call Optimization for ARM32
- Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
clamp03
2022-03-07T05:47:20Z
2022-03-13T11:50:20Z
227ff3d53784f655fa04dad059a98b3e8d291d61
51b90cc60b8528c77829ef18481b0f58db812776
Enable Fast Tail Call Optimization for ARM32. - Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
./src/tests/JIT/Methodical/eh/deadcode/badcodeinsidefinally.il
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. .assembly extern System.Console { .publickeytoken = (B0 3F 5F 7F 11 D5 0A 3A ) .ver 4:0:0:0 } .assembly extern mscorlib{ .ver 0:0:0:0 } .assembly extern eh_common{} .assembly 'badcodeinsidefinally' { .ver 0:0:0:0 } .assembly extern xunit.core {} .class public auto ansi beforefieldinit Test_badcodeinsidefinally extends [mscorlib]System.Object { .method static object GetObj() cil managed { .locals init (string str) try_l0_begin: try_l1_begin: ldstr "pass" stloc.0 leave done try_l1_end: finally_l1_begin: try_l2_begin: ldnull throw try_l2_end: catch_l2_begin: throw catch_l2_end: pop endfinally finally_l1_end: try_l0_end: catch_l0_begin: pop leave catch_l0_end catch_l0_end: done: ldloc.0 ret .try try_l2_begin to try_l2_end catch [mscorlib]System.Exception handler catch_l2_begin to catch_l2_end .try try_l1_begin to try_l1_end finally handler finally_l1_begin to finally_l1_end .try try_l0_begin to try_l0_end catch [mscorlib]System.Exception handler catch_l0_begin to catch_l0_end } .method public hidebysig static int32 Main() cil managed { .custom instance void [xunit.core]Xunit.FactAttribute::.ctor() = ( 01 00 00 00 ) .entrypoint .locals init (object V_0, class [mscorlib]System.IO.StringWriter expectedOut, class [eh_common]TestUtil.TestLog testLog ) newobj instance void [mscorlib]System.IO.StringWriter::.ctor() stloc.s expectedOut ldloc.s expectedOut ldstr "pass" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut newobj instance void [eh_common]TestUtil.TestLog::.ctor(object) stloc.s testLog ldloc.s testLog callvirt instance void [eh_common]TestUtil.TestLog::StartRecording() call object Test_badcodeinsidefinally::GetObj() call void [System.Console]System.Console::WriteLine(object) ldloc.s testLog callvirt instance void [eh_common]TestUtil.TestLog::StopRecording() ldloc.s testLog callvirt instance int32 [eh_common]TestUtil.TestLog::VerifyOutput() ret } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. .assembly extern System.Console { .publickeytoken = (B0 3F 5F 7F 11 D5 0A 3A ) .ver 4:0:0:0 } .assembly extern mscorlib{ .ver 0:0:0:0 } .assembly extern eh_common{} .assembly 'badcodeinsidefinally' { .ver 0:0:0:0 } .assembly extern xunit.core {} .class public auto ansi beforefieldinit Test_badcodeinsidefinally extends [mscorlib]System.Object { .method static object GetObj() cil managed { .locals init (string str) try_l0_begin: try_l1_begin: ldstr "pass" stloc.0 leave done try_l1_end: finally_l1_begin: try_l2_begin: ldnull throw try_l2_end: catch_l2_begin: throw catch_l2_end: pop endfinally finally_l1_end: try_l0_end: catch_l0_begin: pop leave catch_l0_end catch_l0_end: done: ldloc.0 ret .try try_l2_begin to try_l2_end catch [mscorlib]System.Exception handler catch_l2_begin to catch_l2_end .try try_l1_begin to try_l1_end finally handler finally_l1_begin to finally_l1_end .try try_l0_begin to try_l0_end catch [mscorlib]System.Exception handler catch_l0_begin to catch_l0_end } .method public hidebysig static int32 Main() cil managed { .custom instance void [xunit.core]Xunit.FactAttribute::.ctor() = ( 01 00 00 00 ) .entrypoint .locals init (object V_0, class [mscorlib]System.IO.StringWriter expectedOut, class [eh_common]TestUtil.TestLog testLog ) newobj instance void [mscorlib]System.IO.StringWriter::.ctor() stloc.s expectedOut ldloc.s expectedOut ldstr "pass" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut newobj instance void [eh_common]TestUtil.TestLog::.ctor(object) stloc.s testLog ldloc.s testLog callvirt instance void [eh_common]TestUtil.TestLog::StartRecording() call object Test_badcodeinsidefinally::GetObj() call void [System.Console]System.Console::WriteLine(object) ldloc.s testLog callvirt instance void [eh_common]TestUtil.TestLog::StopRecording() ldloc.s testLog callvirt instance int32 [eh_common]TestUtil.TestLog::VerifyOutput() ret } }
-1
dotnet/runtime
66,282
Enable Fast Tail Call Optimization for ARM32
- Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
clamp03
2022-03-07T05:47:20Z
2022-03-13T11:50:20Z
227ff3d53784f655fa04dad059a98b3e8d291d61
51b90cc60b8528c77829ef18481b0f58db812776
Enable Fast Tail Call Optimization for ARM32. - Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
./src/libraries/System.Private.CoreLib/src/System/Resources/ResourceSet.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections; using System.Collections.Generic; using System.Diagnostics; using System.Diagnostics.CodeAnalysis; using System.IO; using System.Reflection; namespace System.Resources { // A ResourceSet stores all the resources defined in one particular CultureInfo. // // The method used to load resources is straightforward - this class // enumerates over an IResourceReader, loading every name and value, and // stores them in a hash table. Custom IResourceReaders can be used. // public class ResourceSet : IDisposable, IEnumerable { protected IResourceReader Reader = null!; private Dictionary<object, object?>? _table; private Dictionary<string, object?>? _caseInsensitiveTable; // For case-insensitive lookups. protected ResourceSet() { // To not inconvenience people subclassing us, we should allocate a new // hashtable here just so that Table is set to something. _table = new Dictionary<object, object?>(); } // For RuntimeResourceSet, ignore the Table parameter - it's a wasted // allocation. internal ResourceSet(bool _) { } // Creates a ResourceSet using the system default ResourceReader // implementation. Use this constructor to open & read from a file // on disk. // public ResourceSet(string fileName) : this() { Reader = new ResourceReader(fileName); ReadResources(); } // Creates a ResourceSet using the system default ResourceReader // implementation. Use this constructor to read from an open stream // of data. // public ResourceSet(Stream stream) : this() { Reader = new ResourceReader(stream); ReadResources(); } public ResourceSet(IResourceReader reader!!) : this() { Reader = reader; ReadResources(); } // Closes and releases any resources used by this ResourceSet, if any. // All calls to methods on the ResourceSet after a call to close may // fail. Close is guaranteed to be safely callable multiple times on a // particular ResourceSet, and all subclasses must support these semantics. public virtual void Close() { Dispose(true); } protected virtual void Dispose(bool disposing) { if (disposing) { // Close the Reader in a thread-safe way. IResourceReader? copyOfReader = Reader; Reader = null!; if (copyOfReader != null) copyOfReader.Close(); } Reader = null!; _caseInsensitiveTable = null; _table = null; } public void Dispose() { Dispose(true); } // Returns the preferred IResourceReader class for this kind of ResourceSet. // Subclasses of ResourceSet using their own Readers &; should override // GetDefaultReader and GetDefaultWriter. public virtual Type GetDefaultReader() { return typeof(ResourceReader); } // Returns the preferred IResourceWriter class for this kind of ResourceSet. // Subclasses of ResourceSet using their own Readers &; should override // GetDefaultReader and GetDefaultWriter. public virtual Type GetDefaultWriter() { return Type.GetType("System.Resources.ResourceWriter, System.Resources.Writer", throwOnError: true)!; } public virtual IDictionaryEnumerator GetEnumerator() { return GetEnumeratorHelper(); } IEnumerator IEnumerable.GetEnumerator() { return GetEnumeratorHelper(); } private IDictionaryEnumerator GetEnumeratorHelper() { IDictionary? copyOfTableAsIDictionary = _table; // Avoid a race with Dispose if (copyOfTableAsIDictionary == null) throw new ObjectDisposedException(null, SR.ObjectDisposed_ResourceSet); // Use IDictionary.GetEnumerator() for backward compatibility. Callers expect the enumerator to return DictionaryEntry instances. return copyOfTableAsIDictionary.GetEnumerator(); } // Look up a string value for a resource given its name. // public virtual string? GetString(string name) { object? obj = GetObjectInternal(name); if (obj is string s) return s; if (obj is null) return null; throw new InvalidOperationException(SR.Format(SR.InvalidOperation_ResourceNotString_Name, name)); } public virtual string? GetString(string name, bool ignoreCase) { // Case-sensitive lookup object? obj = GetObjectInternal(name); if (obj is string s) return s; if (obj is not null) throw new InvalidOperationException(SR.Format(SR.InvalidOperation_ResourceNotString_Name, name)); if (!ignoreCase) return null; // Try doing a case-insensitive lookup obj = GetCaseInsensitiveObjectInternal(name); if (obj is string si) return si; if (obj is null) return null; throw new InvalidOperationException(SR.Format(SR.InvalidOperation_ResourceNotString_Name, name)); } // Look up an object value for a resource given its name. // public virtual object? GetObject(string name) { return GetObjectInternal(name); } public virtual object? GetObject(string name, bool ignoreCase) { object? obj = GetObjectInternal(name); if (obj != null || !ignoreCase) return obj; return GetCaseInsensitiveObjectInternal(name); } protected virtual void ReadResources() { Debug.Assert(_table != null); Debug.Assert(Reader != null); IDictionaryEnumerator en = Reader.GetEnumerator(); while (en.MoveNext()) { _table.Add(en.Key, en.Value); } // While technically possible to close the Reader here, don't close it // to help with some WinRes lifetime issues. } private object? GetObjectInternal(string name!!) { Dictionary<object, object?>? copyOfTable = _table; // Avoid a race with Dispose if (copyOfTable == null) throw new ObjectDisposedException(null, SR.ObjectDisposed_ResourceSet); copyOfTable.TryGetValue(name, out object? value); return value; } private object? GetCaseInsensitiveObjectInternal(string name) { Dictionary<object, object?>? copyOfTable = _table; // Avoid a race with Dispose if (copyOfTable == null) throw new ObjectDisposedException(null, SR.ObjectDisposed_ResourceSet); Dictionary<string, object?>? caseTable = _caseInsensitiveTable; // Avoid a race condition with Close if (caseTable == null) { caseTable = new Dictionary<string, object?>(copyOfTable.Count, StringComparer.OrdinalIgnoreCase); foreach (var item in copyOfTable) { if (item.Key is not string s) continue; caseTable.Add(s, item.Value); } _caseInsensitiveTable = caseTable; } caseTable.TryGetValue(name, out object? value); return value; } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections; using System.Collections.Generic; using System.Diagnostics; using System.Diagnostics.CodeAnalysis; using System.IO; using System.Reflection; namespace System.Resources { // A ResourceSet stores all the resources defined in one particular CultureInfo. // // The method used to load resources is straightforward - this class // enumerates over an IResourceReader, loading every name and value, and // stores them in a hash table. Custom IResourceReaders can be used. // public class ResourceSet : IDisposable, IEnumerable { protected IResourceReader Reader = null!; private Dictionary<object, object?>? _table; private Dictionary<string, object?>? _caseInsensitiveTable; // For case-insensitive lookups. protected ResourceSet() { // To not inconvenience people subclassing us, we should allocate a new // hashtable here just so that Table is set to something. _table = new Dictionary<object, object?>(); } // For RuntimeResourceSet, ignore the Table parameter - it's a wasted // allocation. internal ResourceSet(bool _) { } // Creates a ResourceSet using the system default ResourceReader // implementation. Use this constructor to open & read from a file // on disk. // public ResourceSet(string fileName) : this() { Reader = new ResourceReader(fileName); ReadResources(); } // Creates a ResourceSet using the system default ResourceReader // implementation. Use this constructor to read from an open stream // of data. // public ResourceSet(Stream stream) : this() { Reader = new ResourceReader(stream); ReadResources(); } public ResourceSet(IResourceReader reader!!) : this() { Reader = reader; ReadResources(); } // Closes and releases any resources used by this ResourceSet, if any. // All calls to methods on the ResourceSet after a call to close may // fail. Close is guaranteed to be safely callable multiple times on a // particular ResourceSet, and all subclasses must support these semantics. public virtual void Close() { Dispose(true); } protected virtual void Dispose(bool disposing) { if (disposing) { // Close the Reader in a thread-safe way. IResourceReader? copyOfReader = Reader; Reader = null!; if (copyOfReader != null) copyOfReader.Close(); } Reader = null!; _caseInsensitiveTable = null; _table = null; } public void Dispose() { Dispose(true); } // Returns the preferred IResourceReader class for this kind of ResourceSet. // Subclasses of ResourceSet using their own Readers &; should override // GetDefaultReader and GetDefaultWriter. public virtual Type GetDefaultReader() { return typeof(ResourceReader); } // Returns the preferred IResourceWriter class for this kind of ResourceSet. // Subclasses of ResourceSet using their own Readers &; should override // GetDefaultReader and GetDefaultWriter. public virtual Type GetDefaultWriter() { return Type.GetType("System.Resources.ResourceWriter, System.Resources.Writer", throwOnError: true)!; } public virtual IDictionaryEnumerator GetEnumerator() { return GetEnumeratorHelper(); } IEnumerator IEnumerable.GetEnumerator() { return GetEnumeratorHelper(); } private IDictionaryEnumerator GetEnumeratorHelper() { IDictionary? copyOfTableAsIDictionary = _table; // Avoid a race with Dispose if (copyOfTableAsIDictionary == null) throw new ObjectDisposedException(null, SR.ObjectDisposed_ResourceSet); // Use IDictionary.GetEnumerator() for backward compatibility. Callers expect the enumerator to return DictionaryEntry instances. return copyOfTableAsIDictionary.GetEnumerator(); } // Look up a string value for a resource given its name. // public virtual string? GetString(string name) { object? obj = GetObjectInternal(name); if (obj is string s) return s; if (obj is null) return null; throw new InvalidOperationException(SR.Format(SR.InvalidOperation_ResourceNotString_Name, name)); } public virtual string? GetString(string name, bool ignoreCase) { // Case-sensitive lookup object? obj = GetObjectInternal(name); if (obj is string s) return s; if (obj is not null) throw new InvalidOperationException(SR.Format(SR.InvalidOperation_ResourceNotString_Name, name)); if (!ignoreCase) return null; // Try doing a case-insensitive lookup obj = GetCaseInsensitiveObjectInternal(name); if (obj is string si) return si; if (obj is null) return null; throw new InvalidOperationException(SR.Format(SR.InvalidOperation_ResourceNotString_Name, name)); } // Look up an object value for a resource given its name. // public virtual object? GetObject(string name) { return GetObjectInternal(name); } public virtual object? GetObject(string name, bool ignoreCase) { object? obj = GetObjectInternal(name); if (obj != null || !ignoreCase) return obj; return GetCaseInsensitiveObjectInternal(name); } protected virtual void ReadResources() { Debug.Assert(_table != null); Debug.Assert(Reader != null); IDictionaryEnumerator en = Reader.GetEnumerator(); while (en.MoveNext()) { _table.Add(en.Key, en.Value); } // While technically possible to close the Reader here, don't close it // to help with some WinRes lifetime issues. } private object? GetObjectInternal(string name!!) { Dictionary<object, object?>? copyOfTable = _table; // Avoid a race with Dispose if (copyOfTable == null) throw new ObjectDisposedException(null, SR.ObjectDisposed_ResourceSet); copyOfTable.TryGetValue(name, out object? value); return value; } private object? GetCaseInsensitiveObjectInternal(string name) { Dictionary<object, object?>? copyOfTable = _table; // Avoid a race with Dispose if (copyOfTable == null) throw new ObjectDisposedException(null, SR.ObjectDisposed_ResourceSet); Dictionary<string, object?>? caseTable = _caseInsensitiveTable; // Avoid a race condition with Close if (caseTable == null) { caseTable = new Dictionary<string, object?>(copyOfTable.Count, StringComparer.OrdinalIgnoreCase); foreach (var item in copyOfTable) { if (item.Key is not string s) continue; caseTable.Add(s, item.Value); } _caseInsensitiveTable = caseTable; } caseTable.TryGetValue(name, out object? value); return value; } } }
-1
dotnet/runtime
66,282
Enable Fast Tail Call Optimization for ARM32
- Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
clamp03
2022-03-07T05:47:20Z
2022-03-13T11:50:20Z
227ff3d53784f655fa04dad059a98b3e8d291d61
51b90cc60b8528c77829ef18481b0f58db812776
Enable Fast Tail Call Optimization for ARM32. - Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
./src/tests/JIT/IL_Conformance/Old/Conformance_Base/ldc_c_initblk.il
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. .assembly extern legacy library mscorlib {} .assembly extern System.Console { .publickeytoken = (B0 3F 5F 7F 11 D5 0A 3A ) .ver 4:0:0:0 } //---- CLASS ---------------- .class _initblk { //---- GLOBAL DATA ---------- .field public static int32 DATA //---- METHODS -------------- //---- CONSTRUCTOR ---------- .method public void _initblk() { .maxstack 0 ret } //---- MAIN ----------------- .method public static int32 main(class [mscorlib]System.String[]) { .entrypoint .maxstack 5 //====== begin testing ====== // -- init a 4 byte block of memory to 0xAAAAAAAA ldsflda int32 _initblk::DATA ldc.i4 0xAA ldc.i4 4 initblk // -- load the 4 bytes of memory _and be sure it is 0xAAAAAAAA ldsflda int32 _initblk::DATA ldind.i4 ldc.i4 0xAAAAAAAA ceq brfalse FAIL //====== end testing ======== //---- branch here on pass -- PASS: ldc.i4 100 br END //---- branch here on fail -- FAIL: ldc.i4 101 //---- return the result ---- END: ret //---- END OF METHOD -------- } //---- EOF ------------------ } .assembly ldc_c_initblk {}
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. .assembly extern legacy library mscorlib {} .assembly extern System.Console { .publickeytoken = (B0 3F 5F 7F 11 D5 0A 3A ) .ver 4:0:0:0 } //---- CLASS ---------------- .class _initblk { //---- GLOBAL DATA ---------- .field public static int32 DATA //---- METHODS -------------- //---- CONSTRUCTOR ---------- .method public void _initblk() { .maxstack 0 ret } //---- MAIN ----------------- .method public static int32 main(class [mscorlib]System.String[]) { .entrypoint .maxstack 5 //====== begin testing ====== // -- init a 4 byte block of memory to 0xAAAAAAAA ldsflda int32 _initblk::DATA ldc.i4 0xAA ldc.i4 4 initblk // -- load the 4 bytes of memory _and be sure it is 0xAAAAAAAA ldsflda int32 _initblk::DATA ldind.i4 ldc.i4 0xAAAAAAAA ceq brfalse FAIL //====== end testing ======== //---- branch here on pass -- PASS: ldc.i4 100 br END //---- branch here on fail -- FAIL: ldc.i4 101 //---- return the result ---- END: ret //---- END OF METHOD -------- } //---- EOF ------------------ } .assembly ldc_c_initblk {}
-1
dotnet/runtime
66,282
Enable Fast Tail Call Optimization for ARM32
- Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
clamp03
2022-03-07T05:47:20Z
2022-03-13T11:50:20Z
227ff3d53784f655fa04dad059a98b3e8d291d61
51b90cc60b8528c77829ef18481b0f58db812776
Enable Fast Tail Call Optimization for ARM32. - Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
./src/native/eventpipe/ep-stream.c
#include "ep-rt-config.h" #ifdef ENABLE_PERFTRACING #if !defined(EP_INCLUDE_SOURCE_FILES) || defined(EP_FORCE_INCLUDE_SOURCE_FILES) #define EP_IMPL_STREAM_GETTER_SETTER #define EP_IMPL_IPC_STREAM_GETTER_SETTER #include "ep-stream.h" #include "ep-ipc-stream.h" #include "ep-rt.h" /* * Forward declares of all static functions. */ static void file_stream_writer_free_func (void *stream); static bool file_stream_writer_write_func ( void *stream, const uint8_t *buffer, uint32_t bytes_to_write, uint32_t *bytes_written); static void file_write_end (EventPipeFile *file); static void ipc_stream_writer_free_func (void *stream); static bool ipc_stream_writer_write_func ( void *stream, const uint8_t *buffer, uint32_t bytes_to_write, uint32_t *bytes_written); static void fast_serializer_write_serialization_type ( FastSerializer *fast_serializer, FastSerializableObject *fast_serializable_ojbect); /* * FastSerializableObject. */ FastSerializableObject * ep_fast_serializable_object_init ( FastSerializableObject *fast_serializable_object, FastSerializableObjectVtable *vtable, int32_t object_version, int32_t min_reader_version, bool is_private) { EP_ASSERT (fast_serializable_object != NULL); EP_ASSERT (vtable != NULL); fast_serializable_object->vtable = vtable; fast_serializable_object->object_version = object_version; fast_serializable_object->min_reader_version = min_reader_version; fast_serializable_object->is_private = is_private; return fast_serializable_object; } void ep_fast_serializable_object_fini (FastSerializableObject *fast_serializable_ojbect) { ; } void ep_fast_serializable_object_free_vcall (FastSerializableObject *fast_serializable_ojbect) { ep_return_void_if_nok (fast_serializable_ojbect != NULL); EP_ASSERT (fast_serializable_ojbect->vtable != NULL); FastSerializableObjectVtable *vtable = fast_serializable_ojbect->vtable; EP_ASSERT (vtable->free_func != NULL); vtable->free_func (fast_serializable_ojbect); } void ep_fast_serializable_object_fast_serialize_vcall ( FastSerializableObject *fast_serializable_ojbect, FastSerializer *fast_serializer) { EP_ASSERT (fast_serializable_ojbect != NULL); EP_ASSERT (fast_serializable_ojbect->vtable != NULL); FastSerializableObjectVtable *vtable = fast_serializable_ojbect->vtable; EP_ASSERT (vtable->fast_serialize_func != NULL); vtable->fast_serialize_func (fast_serializable_ojbect, fast_serializer); } const ep_char8_t * ep_fast_serializable_object_get_type_name_vcall (FastSerializableObject *fast_serializable_ojbect) { EP_ASSERT (fast_serializable_ojbect != NULL); EP_ASSERT (fast_serializable_ojbect->vtable != NULL); FastSerializableObjectVtable *vtable = fast_serializable_ojbect->vtable; EP_ASSERT (vtable->get_type_name_func != NULL); return vtable->get_type_name_func (fast_serializable_ojbect); } void ep_fast_serializable_object_fast_serialize ( FastSerializableObject *fast_serializable_ojbect, FastSerializer *fast_serializer) { ep_fast_serializable_object_fast_serialize_vcall (fast_serializable_ojbect, fast_serializer); } const ep_char8_t * ep_fast_serializable_object_get_type_name (FastSerializableObject *fast_serializable_ojbect) { return ep_fast_serializable_object_get_type_name_vcall (fast_serializable_ojbect); } /* * FastSerializer. */ static void fast_serializer_write_serialization_type ( FastSerializer *fast_serializer, FastSerializableObject *fast_serializable_ojbect) { EP_ASSERT (fast_serializable_ojbect != NULL); // Write the BeginObject tag. ep_fast_serializer_write_tag (fast_serializer, fast_serializable_ojbect->is_private ? FAST_SERIALIZER_TAGS_BEGIN_PRIVATE_OBJECT : FAST_SERIALIZER_TAGS_BEGIN_OBJECT, NULL, 0); // Write a NullReferenceTag, which implies that the following fields belong to SerializationType. ep_fast_serializer_write_tag (fast_serializer, FAST_SERIALIZER_TAGS_NULL_REFERENCE, NULL, 0); // Write the SerializationType version fields. int32_t serialization_type [2]; serialization_type [0] = fast_serializable_ojbect->object_version; serialization_type [1] = fast_serializable_ojbect->min_reader_version; ep_fast_serializer_write_buffer (fast_serializer, (const uint8_t *)serialization_type, sizeof (serialization_type)); // Write the SerializationType TypeName field. const ep_char8_t *type_name = ep_fast_serializable_object_get_type_name_vcall (fast_serializable_ojbect); if (type_name) ep_fast_serializer_write_string (fast_serializer, type_name, (uint32_t)strlen (type_name)); // Write the EndObject tag. ep_fast_serializer_write_tag (fast_serializer, FAST_SERIALIZER_TAGS_END_OBJECT, NULL, 0); } FastSerializer * ep_fast_serializer_alloc (StreamWriter *stream_writer) { EP_ASSERT (stream_writer != NULL); const ep_char8_t signature[] = "!FastSerialization.1"; // the consumer lib expects exactly the same string, it must not be changed uint32_t signature_len = (uint32_t)(STRING_LENGTH (signature)); FastSerializer *instance = ep_rt_object_alloc (FastSerializer); ep_raise_error_if_nok (instance != NULL); // Ownership transfered. instance->stream_writer = stream_writer; instance->required_padding = 0; instance->write_error_encountered = false; ep_fast_serializer_write_string (instance, signature, signature_len); ep_on_exit: return instance; ep_on_error: ep_fast_serializer_free (instance); instance = NULL; ep_exit_error_handler (); } void ep_fast_serializer_free (FastSerializer *fast_serializer) { ep_return_void_if_nok (fast_serializer != NULL); EP_ASSERT (fast_serializer->stream_writer != NULL); ep_stream_writer_free_vcall (fast_serializer->stream_writer); ep_rt_object_free (fast_serializer); } void ep_fast_serializer_write_buffer ( FastSerializer *fast_serializer, const uint8_t *buffer, uint32_t buffer_len) { EP_ASSERT (fast_serializer != NULL); EP_ASSERT (buffer != NULL); EP_ASSERT (buffer_len > 0); ep_return_void_if_nok (!fast_serializer->write_error_encountered && fast_serializer->stream_writer != NULL); uint32_t bytes_written = 0; bool result = ep_stream_writer_write (fast_serializer->stream_writer, buffer, buffer_len, &bytes_written); uint32_t required_padding = fast_serializer->required_padding; required_padding = (FAST_SERIALIZER_ALIGNMENT_SIZE + required_padding - (bytes_written % FAST_SERIALIZER_ALIGNMENT_SIZE)) % FAST_SERIALIZER_ALIGNMENT_SIZE; fast_serializer->required_padding = required_padding; // This will cause us to stop writing to the file. // The file will still remain open until shutdown so that we don't // have to take a lock at this level when we touch the file stream. fast_serializer->write_error_encountered = ((buffer_len != bytes_written) || !result); } void ep_fast_serializer_write_object ( FastSerializer *fast_serializer, FastSerializableObject *fast_serializable_ojbect) { EP_ASSERT (fast_serializer != NULL); EP_ASSERT (fast_serializable_ojbect != NULL); ep_fast_serializer_write_tag (fast_serializer, fast_serializable_ojbect->is_private ? FAST_SERIALIZER_TAGS_BEGIN_PRIVATE_OBJECT : FAST_SERIALIZER_TAGS_BEGIN_OBJECT, NULL, 0); fast_serializer_write_serialization_type (fast_serializer, fast_serializable_ojbect); // Ask the object to serialize itself using the current serializer. ep_fast_serializable_object_fast_serialize_vcall (fast_serializable_ojbect, fast_serializer); ep_fast_serializer_write_tag (fast_serializer, FAST_SERIALIZER_TAGS_END_OBJECT, NULL, 0); } void ep_fast_serializer_write_string ( FastSerializer *fast_serializer, const ep_char8_t *contents, uint32_t contents_len) { // Write the string length. ep_fast_serializer_write_buffer (fast_serializer, (const uint8_t *)&contents_len, sizeof (contents_len)); //Wirte the string contents. ep_fast_serializer_write_buffer (fast_serializer, (const uint8_t *)contents, contents_len); } void ep_fast_serializer_write_tag ( FastSerializer *fast_serializer, FastSerializerTags tag, const uint8_t *payload, uint32_t payload_len) { uint8_t tag_as_byte = tag; ep_fast_serializer_write_buffer (fast_serializer, &tag_as_byte, sizeof (tag_as_byte)); if (payload != NULL) { EP_ASSERT (payload_len > 0); ep_fast_serializer_write_buffer (fast_serializer, payload, payload_len); } } /* * FileStream. */ FileStream * ep_file_stream_alloc (void) { return ep_rt_object_alloc (FileStream); } void ep_file_stream_free (FileStream *file_stream) { ep_return_void_if_nok (file_stream != NULL); ep_file_stream_close (file_stream); ep_rt_object_free (file_stream); } bool ep_file_stream_open_write ( FileStream *file_stream, const ep_char8_t *path) { EP_ASSERT (file_stream != NULL); ep_rt_file_handle_t rt_file = ep_rt_file_open_write (path); ep_raise_error_if_nok (rt_file != NULL); file_stream->rt_file = rt_file; return true; ep_on_error: return false; } bool ep_file_stream_close (FileStream *file_stream) { ep_return_false_if_nok (file_stream != NULL); bool result = ep_rt_file_close (file_stream->rt_file); file_stream->rt_file = NULL; return result; } bool ep_file_stream_write ( FileStream *file_stream, const uint8_t *buffer, uint32_t bytes_to_write, uint32_t *bytes_written) { EP_ASSERT (file_stream != NULL); EP_ASSERT (buffer != NULL); EP_ASSERT (bytes_to_write > 0); EP_ASSERT (bytes_written != NULL); return ep_rt_file_write (file_stream->rt_file, buffer, bytes_to_write, bytes_written); } /* * FileStreamWriter. */ static void file_stream_writer_free_func (void *stream) { ep_file_stream_writer_free ((FileStreamWriter *)stream); } static bool file_stream_writer_write_func ( void *stream, const uint8_t *buffer, uint32_t bytes_to_write, uint32_t *bytes_written) { EP_ASSERT (stream != NULL); return ep_file_stream_writer_write ( (FileStreamWriter *)stream, buffer, bytes_to_write, bytes_written); } static StreamWriterVtable file_stream_writer_vtable = { file_stream_writer_free_func, file_stream_writer_write_func }; FileStreamWriter * ep_file_stream_writer_alloc (const ep_char8_t *output_file_path) { EP_ASSERT (output_file_path != NULL); FileStreamWriter *instance = ep_rt_object_alloc (FileStreamWriter); ep_raise_error_if_nok (instance != NULL); ep_raise_error_if_nok (ep_stream_writer_init ( &instance->stream_writer, &file_stream_writer_vtable) != NULL); instance->file_stream = ep_file_stream_alloc (); ep_raise_error_if_nok (instance->file_stream != NULL); ep_raise_error_if_nok (ep_file_stream_open_write (instance->file_stream, output_file_path)); ep_on_exit: return instance; ep_on_error: ep_file_stream_writer_free (instance); instance = NULL; ep_exit_error_handler (); } void ep_file_stream_writer_free (FileStreamWriter *file_stream_writer) { ep_return_void_if_nok (file_stream_writer != NULL); ep_file_stream_free (file_stream_writer->file_stream); ep_stream_writer_fini (&file_stream_writer->stream_writer); ep_rt_object_free (file_stream_writer); } bool ep_file_stream_writer_write ( FileStreamWriter *file_stream_writer, const uint8_t *buffer, uint32_t bytes_to_write, uint32_t *bytes_written) { EP_ASSERT (file_stream_writer != NULL); EP_ASSERT (buffer != NULL); EP_ASSERT (bytes_to_write > 0); EP_ASSERT (bytes_written != NULL); bool result = false; ep_raise_error_if_nok (ep_file_stream_writer_get_file_stream (file_stream_writer) != NULL); result = ep_file_stream_write (ep_file_stream_writer_get_file_stream (file_stream_writer), buffer, bytes_to_write, bytes_written); ep_on_exit: return result; ep_on_error: *bytes_written = 0; ep_exit_error_handler (); } /* * IpcStream. */ IpcStream * ep_ipc_stream_init ( IpcStream *ipc_stream, IpcStreamVtable *vtable) { EP_ASSERT (ipc_stream != NULL); EP_ASSERT (vtable != NULL); ipc_stream->vtable = vtable; return ipc_stream; } void ep_ipc_stream_fini (IpcStream *ipc_stream) { return; } void ep_ipc_stream_free_vcall (IpcStream *ipc_stream) { ep_return_void_if_nok (ipc_stream != NULL); EP_ASSERT (ipc_stream->vtable != NULL); IpcStreamVtable *vtable = ipc_stream->vtable; ep_ipc_stream_flush_vcall (ipc_stream); ep_ipc_stream_close_vcall (ipc_stream); EP_ASSERT (vtable->free_func != NULL); vtable->free_func (ipc_stream); } bool ep_ipc_stream_read_vcall ( IpcStream *ipc_stream, uint8_t *buffer, uint32_t bytes_to_read, uint32_t *bytes_read, uint32_t timeout_ms) { EP_ASSERT (ipc_stream != NULL); EP_ASSERT (ipc_stream->vtable != NULL); IpcStreamVtable *vtable = ipc_stream->vtable; EP_ASSERT (vtable->read_func != NULL); return vtable->read_func (ipc_stream, buffer, bytes_to_read, bytes_read, timeout_ms); } bool ep_ipc_stream_write_vcall ( IpcStream *ipc_stream, const uint8_t *buffer, uint32_t bytes_to_write, uint32_t *bytes_written, uint32_t timeout_ms) { EP_ASSERT (ipc_stream != NULL); EP_ASSERT (ipc_stream->vtable != NULL); IpcStreamVtable *vtable = ipc_stream->vtable; EP_ASSERT (vtable->write_func != NULL); return vtable->write_func (ipc_stream, buffer, bytes_to_write, bytes_written, timeout_ms); } bool ep_ipc_stream_flush_vcall (IpcStream *ipc_stream) { EP_ASSERT (ipc_stream != NULL); EP_ASSERT (ipc_stream->vtable != NULL); IpcStreamVtable *vtable = ipc_stream->vtable; EP_ASSERT (vtable->flush_func != NULL); return vtable->flush_func (ipc_stream); } bool ep_ipc_stream_close_vcall (IpcStream *ipc_stream) { EP_ASSERT (ipc_stream != NULL); EP_ASSERT (ipc_stream->vtable != NULL); IpcStreamVtable *vtable = ipc_stream->vtable; EP_ASSERT (vtable->close_func != NULL); return vtable->close_func (ipc_stream); } /* * IpcStreamWriter. */ static void ipc_stream_writer_free_func (void *stream) { ep_ipc_stream_writer_free ((IpcStreamWriter *)stream); } static bool ipc_stream_writer_write_func ( void *stream, const uint8_t *buffer, uint32_t bytes_to_write, uint32_t *bytes_written) { EP_ASSERT (stream != NULL); return ep_ipc_stream_writer_write ( (IpcStreamWriter *)stream, buffer, bytes_to_write, bytes_written); } static StreamWriterVtable ipc_stream_writer_vtable = { ipc_stream_writer_free_func, ipc_stream_writer_write_func }; IpcStreamWriter * ep_ipc_stream_writer_alloc ( uint64_t id, IpcStream *stream) { IpcStreamWriter *instance = ep_rt_object_alloc (IpcStreamWriter); ep_raise_error_if_nok (instance != NULL); ep_raise_error_if_nok (ep_stream_writer_init ( &instance->stream_writer, &ipc_stream_writer_vtable) != NULL); //Ownership transfered. instance->ipc_stream = stream; ep_on_exit: return instance; ep_on_error: ep_ipc_stream_writer_free (instance); instance = NULL; ep_exit_error_handler (); } void ep_ipc_stream_writer_free (IpcStreamWriter *ipc_stream_writer) { ep_return_void_if_nok (ipc_stream_writer != NULL); ep_ipc_stream_free_vcall (ipc_stream_writer->ipc_stream); ep_stream_writer_fini (&ipc_stream_writer->stream_writer); ep_rt_object_free (ipc_stream_writer); } bool ep_ipc_stream_writer_write ( IpcStreamWriter *ipc_stream_writer, const uint8_t *buffer, uint32_t bytes_to_write, uint32_t *bytes_written) { EP_ASSERT (ipc_stream_writer != NULL); EP_ASSERT (buffer != NULL); EP_ASSERT (bytes_to_write > 0); EP_ASSERT (bytes_written != NULL); ep_return_false_if_nok (buffer != NULL && bytes_to_write != 0); bool result = false; ep_raise_error_if_nok (ep_ipc_stream_writer_get_ipc_stream (ipc_stream_writer) != NULL); result = ep_ipc_stream_write_vcall (ep_ipc_stream_writer_get_ipc_stream (ipc_stream_writer), buffer, bytes_to_write, bytes_written, EP_INFINITE_WAIT); ep_on_exit: return result; ep_on_error: *bytes_written = 0; ep_exit_error_handler (); } /* * StreamWriter. */ StreamWriter * ep_stream_writer_init ( StreamWriter *stream_writer, StreamWriterVtable *vtable) { EP_ASSERT (stream_writer != NULL); EP_ASSERT (vtable != NULL); stream_writer->vtable = vtable; return stream_writer; } void ep_stream_writer_fini (StreamWriter *stream_writer) { ; } void ep_stream_writer_free_vcall (StreamWriter *stream_writer) { ep_return_void_if_nok (stream_writer != NULL); EP_ASSERT (stream_writer->vtable != NULL); StreamWriterVtable *vtable = stream_writer->vtable; EP_ASSERT (vtable->free_func != NULL); vtable->free_func (stream_writer); } bool ep_stream_writer_write_vcall ( StreamWriter *stream_writer, const uint8_t *buffer, const uint32_t bytes_to_write, uint32_t *bytes_written) { EP_ASSERT (stream_writer != NULL); EP_ASSERT (stream_writer->vtable != NULL); StreamWriterVtable *vtable = stream_writer->vtable; EP_ASSERT (vtable->write_func != NULL); return vtable->write_func (stream_writer, buffer, bytes_to_write, bytes_written); } bool ep_stream_writer_write ( StreamWriter *stream_writer, const uint8_t *buffer, const uint32_t bytes_to_write, uint32_t *bytes_written) { return ep_stream_writer_write_vcall ( stream_writer, buffer, bytes_to_write, bytes_written); } #endif /* !defined(EP_INCLUDE_SOURCE_FILES) || defined(EP_FORCE_INCLUDE_SOURCE_FILES) */ #endif /* ENABLE_PERFTRACING */ #ifndef EP_INCLUDE_SOURCE_FILES extern const char quiet_linker_empty_file_warning_eventpipe_stream; const char quiet_linker_empty_file_warning_eventpipe_stream = 0; #endif
#include "ep-rt-config.h" #ifdef ENABLE_PERFTRACING #if !defined(EP_INCLUDE_SOURCE_FILES) || defined(EP_FORCE_INCLUDE_SOURCE_FILES) #define EP_IMPL_STREAM_GETTER_SETTER #define EP_IMPL_IPC_STREAM_GETTER_SETTER #include "ep-stream.h" #include "ep-ipc-stream.h" #include "ep-rt.h" /* * Forward declares of all static functions. */ static void file_stream_writer_free_func (void *stream); static bool file_stream_writer_write_func ( void *stream, const uint8_t *buffer, uint32_t bytes_to_write, uint32_t *bytes_written); static void file_write_end (EventPipeFile *file); static void ipc_stream_writer_free_func (void *stream); static bool ipc_stream_writer_write_func ( void *stream, const uint8_t *buffer, uint32_t bytes_to_write, uint32_t *bytes_written); static void fast_serializer_write_serialization_type ( FastSerializer *fast_serializer, FastSerializableObject *fast_serializable_ojbect); /* * FastSerializableObject. */ FastSerializableObject * ep_fast_serializable_object_init ( FastSerializableObject *fast_serializable_object, FastSerializableObjectVtable *vtable, int32_t object_version, int32_t min_reader_version, bool is_private) { EP_ASSERT (fast_serializable_object != NULL); EP_ASSERT (vtable != NULL); fast_serializable_object->vtable = vtable; fast_serializable_object->object_version = object_version; fast_serializable_object->min_reader_version = min_reader_version; fast_serializable_object->is_private = is_private; return fast_serializable_object; } void ep_fast_serializable_object_fini (FastSerializableObject *fast_serializable_ojbect) { ; } void ep_fast_serializable_object_free_vcall (FastSerializableObject *fast_serializable_ojbect) { ep_return_void_if_nok (fast_serializable_ojbect != NULL); EP_ASSERT (fast_serializable_ojbect->vtable != NULL); FastSerializableObjectVtable *vtable = fast_serializable_ojbect->vtable; EP_ASSERT (vtable->free_func != NULL); vtable->free_func (fast_serializable_ojbect); } void ep_fast_serializable_object_fast_serialize_vcall ( FastSerializableObject *fast_serializable_ojbect, FastSerializer *fast_serializer) { EP_ASSERT (fast_serializable_ojbect != NULL); EP_ASSERT (fast_serializable_ojbect->vtable != NULL); FastSerializableObjectVtable *vtable = fast_serializable_ojbect->vtable; EP_ASSERT (vtable->fast_serialize_func != NULL); vtable->fast_serialize_func (fast_serializable_ojbect, fast_serializer); } const ep_char8_t * ep_fast_serializable_object_get_type_name_vcall (FastSerializableObject *fast_serializable_ojbect) { EP_ASSERT (fast_serializable_ojbect != NULL); EP_ASSERT (fast_serializable_ojbect->vtable != NULL); FastSerializableObjectVtable *vtable = fast_serializable_ojbect->vtable; EP_ASSERT (vtable->get_type_name_func != NULL); return vtable->get_type_name_func (fast_serializable_ojbect); } void ep_fast_serializable_object_fast_serialize ( FastSerializableObject *fast_serializable_ojbect, FastSerializer *fast_serializer) { ep_fast_serializable_object_fast_serialize_vcall (fast_serializable_ojbect, fast_serializer); } const ep_char8_t * ep_fast_serializable_object_get_type_name (FastSerializableObject *fast_serializable_ojbect) { return ep_fast_serializable_object_get_type_name_vcall (fast_serializable_ojbect); } /* * FastSerializer. */ static void fast_serializer_write_serialization_type ( FastSerializer *fast_serializer, FastSerializableObject *fast_serializable_ojbect) { EP_ASSERT (fast_serializable_ojbect != NULL); // Write the BeginObject tag. ep_fast_serializer_write_tag (fast_serializer, fast_serializable_ojbect->is_private ? FAST_SERIALIZER_TAGS_BEGIN_PRIVATE_OBJECT : FAST_SERIALIZER_TAGS_BEGIN_OBJECT, NULL, 0); // Write a NullReferenceTag, which implies that the following fields belong to SerializationType. ep_fast_serializer_write_tag (fast_serializer, FAST_SERIALIZER_TAGS_NULL_REFERENCE, NULL, 0); // Write the SerializationType version fields. int32_t serialization_type [2]; serialization_type [0] = fast_serializable_ojbect->object_version; serialization_type [1] = fast_serializable_ojbect->min_reader_version; ep_fast_serializer_write_buffer (fast_serializer, (const uint8_t *)serialization_type, sizeof (serialization_type)); // Write the SerializationType TypeName field. const ep_char8_t *type_name = ep_fast_serializable_object_get_type_name_vcall (fast_serializable_ojbect); if (type_name) ep_fast_serializer_write_string (fast_serializer, type_name, (uint32_t)strlen (type_name)); // Write the EndObject tag. ep_fast_serializer_write_tag (fast_serializer, FAST_SERIALIZER_TAGS_END_OBJECT, NULL, 0); } FastSerializer * ep_fast_serializer_alloc (StreamWriter *stream_writer) { EP_ASSERT (stream_writer != NULL); const ep_char8_t signature[] = "!FastSerialization.1"; // the consumer lib expects exactly the same string, it must not be changed uint32_t signature_len = (uint32_t)(STRING_LENGTH (signature)); FastSerializer *instance = ep_rt_object_alloc (FastSerializer); ep_raise_error_if_nok (instance != NULL); // Ownership transfered. instance->stream_writer = stream_writer; instance->required_padding = 0; instance->write_error_encountered = false; ep_fast_serializer_write_string (instance, signature, signature_len); ep_on_exit: return instance; ep_on_error: ep_fast_serializer_free (instance); instance = NULL; ep_exit_error_handler (); } void ep_fast_serializer_free (FastSerializer *fast_serializer) { ep_return_void_if_nok (fast_serializer != NULL); EP_ASSERT (fast_serializer->stream_writer != NULL); ep_stream_writer_free_vcall (fast_serializer->stream_writer); ep_rt_object_free (fast_serializer); } void ep_fast_serializer_write_buffer ( FastSerializer *fast_serializer, const uint8_t *buffer, uint32_t buffer_len) { EP_ASSERT (fast_serializer != NULL); EP_ASSERT (buffer != NULL); EP_ASSERT (buffer_len > 0); ep_return_void_if_nok (!fast_serializer->write_error_encountered && fast_serializer->stream_writer != NULL); uint32_t bytes_written = 0; bool result = ep_stream_writer_write (fast_serializer->stream_writer, buffer, buffer_len, &bytes_written); uint32_t required_padding = fast_serializer->required_padding; required_padding = (FAST_SERIALIZER_ALIGNMENT_SIZE + required_padding - (bytes_written % FAST_SERIALIZER_ALIGNMENT_SIZE)) % FAST_SERIALIZER_ALIGNMENT_SIZE; fast_serializer->required_padding = required_padding; // This will cause us to stop writing to the file. // The file will still remain open until shutdown so that we don't // have to take a lock at this level when we touch the file stream. fast_serializer->write_error_encountered = ((buffer_len != bytes_written) || !result); } void ep_fast_serializer_write_object ( FastSerializer *fast_serializer, FastSerializableObject *fast_serializable_ojbect) { EP_ASSERT (fast_serializer != NULL); EP_ASSERT (fast_serializable_ojbect != NULL); ep_fast_serializer_write_tag (fast_serializer, fast_serializable_ojbect->is_private ? FAST_SERIALIZER_TAGS_BEGIN_PRIVATE_OBJECT : FAST_SERIALIZER_TAGS_BEGIN_OBJECT, NULL, 0); fast_serializer_write_serialization_type (fast_serializer, fast_serializable_ojbect); // Ask the object to serialize itself using the current serializer. ep_fast_serializable_object_fast_serialize_vcall (fast_serializable_ojbect, fast_serializer); ep_fast_serializer_write_tag (fast_serializer, FAST_SERIALIZER_TAGS_END_OBJECT, NULL, 0); } void ep_fast_serializer_write_string ( FastSerializer *fast_serializer, const ep_char8_t *contents, uint32_t contents_len) { // Write the string length. ep_fast_serializer_write_buffer (fast_serializer, (const uint8_t *)&contents_len, sizeof (contents_len)); //Wirte the string contents. ep_fast_serializer_write_buffer (fast_serializer, (const uint8_t *)contents, contents_len); } void ep_fast_serializer_write_tag ( FastSerializer *fast_serializer, FastSerializerTags tag, const uint8_t *payload, uint32_t payload_len) { uint8_t tag_as_byte = tag; ep_fast_serializer_write_buffer (fast_serializer, &tag_as_byte, sizeof (tag_as_byte)); if (payload != NULL) { EP_ASSERT (payload_len > 0); ep_fast_serializer_write_buffer (fast_serializer, payload, payload_len); } } /* * FileStream. */ FileStream * ep_file_stream_alloc (void) { return ep_rt_object_alloc (FileStream); } void ep_file_stream_free (FileStream *file_stream) { ep_return_void_if_nok (file_stream != NULL); ep_file_stream_close (file_stream); ep_rt_object_free (file_stream); } bool ep_file_stream_open_write ( FileStream *file_stream, const ep_char8_t *path) { EP_ASSERT (file_stream != NULL); ep_rt_file_handle_t rt_file = ep_rt_file_open_write (path); ep_raise_error_if_nok (rt_file != NULL); file_stream->rt_file = rt_file; return true; ep_on_error: return false; } bool ep_file_stream_close (FileStream *file_stream) { ep_return_false_if_nok (file_stream != NULL); bool result = ep_rt_file_close (file_stream->rt_file); file_stream->rt_file = NULL; return result; } bool ep_file_stream_write ( FileStream *file_stream, const uint8_t *buffer, uint32_t bytes_to_write, uint32_t *bytes_written) { EP_ASSERT (file_stream != NULL); EP_ASSERT (buffer != NULL); EP_ASSERT (bytes_to_write > 0); EP_ASSERT (bytes_written != NULL); return ep_rt_file_write (file_stream->rt_file, buffer, bytes_to_write, bytes_written); } /* * FileStreamWriter. */ static void file_stream_writer_free_func (void *stream) { ep_file_stream_writer_free ((FileStreamWriter *)stream); } static bool file_stream_writer_write_func ( void *stream, const uint8_t *buffer, uint32_t bytes_to_write, uint32_t *bytes_written) { EP_ASSERT (stream != NULL); return ep_file_stream_writer_write ( (FileStreamWriter *)stream, buffer, bytes_to_write, bytes_written); } static StreamWriterVtable file_stream_writer_vtable = { file_stream_writer_free_func, file_stream_writer_write_func }; FileStreamWriter * ep_file_stream_writer_alloc (const ep_char8_t *output_file_path) { EP_ASSERT (output_file_path != NULL); FileStreamWriter *instance = ep_rt_object_alloc (FileStreamWriter); ep_raise_error_if_nok (instance != NULL); ep_raise_error_if_nok (ep_stream_writer_init ( &instance->stream_writer, &file_stream_writer_vtable) != NULL); instance->file_stream = ep_file_stream_alloc (); ep_raise_error_if_nok (instance->file_stream != NULL); ep_raise_error_if_nok (ep_file_stream_open_write (instance->file_stream, output_file_path)); ep_on_exit: return instance; ep_on_error: ep_file_stream_writer_free (instance); instance = NULL; ep_exit_error_handler (); } void ep_file_stream_writer_free (FileStreamWriter *file_stream_writer) { ep_return_void_if_nok (file_stream_writer != NULL); ep_file_stream_free (file_stream_writer->file_stream); ep_stream_writer_fini (&file_stream_writer->stream_writer); ep_rt_object_free (file_stream_writer); } bool ep_file_stream_writer_write ( FileStreamWriter *file_stream_writer, const uint8_t *buffer, uint32_t bytes_to_write, uint32_t *bytes_written) { EP_ASSERT (file_stream_writer != NULL); EP_ASSERT (buffer != NULL); EP_ASSERT (bytes_to_write > 0); EP_ASSERT (bytes_written != NULL); bool result = false; ep_raise_error_if_nok (ep_file_stream_writer_get_file_stream (file_stream_writer) != NULL); result = ep_file_stream_write (ep_file_stream_writer_get_file_stream (file_stream_writer), buffer, bytes_to_write, bytes_written); ep_on_exit: return result; ep_on_error: *bytes_written = 0; ep_exit_error_handler (); } /* * IpcStream. */ IpcStream * ep_ipc_stream_init ( IpcStream *ipc_stream, IpcStreamVtable *vtable) { EP_ASSERT (ipc_stream != NULL); EP_ASSERT (vtable != NULL); ipc_stream->vtable = vtable; return ipc_stream; } void ep_ipc_stream_fini (IpcStream *ipc_stream) { return; } void ep_ipc_stream_free_vcall (IpcStream *ipc_stream) { ep_return_void_if_nok (ipc_stream != NULL); EP_ASSERT (ipc_stream->vtable != NULL); IpcStreamVtable *vtable = ipc_stream->vtable; ep_ipc_stream_flush_vcall (ipc_stream); ep_ipc_stream_close_vcall (ipc_stream); EP_ASSERT (vtable->free_func != NULL); vtable->free_func (ipc_stream); } bool ep_ipc_stream_read_vcall ( IpcStream *ipc_stream, uint8_t *buffer, uint32_t bytes_to_read, uint32_t *bytes_read, uint32_t timeout_ms) { EP_ASSERT (ipc_stream != NULL); EP_ASSERT (ipc_stream->vtable != NULL); IpcStreamVtable *vtable = ipc_stream->vtable; EP_ASSERT (vtable->read_func != NULL); return vtable->read_func (ipc_stream, buffer, bytes_to_read, bytes_read, timeout_ms); } bool ep_ipc_stream_write_vcall ( IpcStream *ipc_stream, const uint8_t *buffer, uint32_t bytes_to_write, uint32_t *bytes_written, uint32_t timeout_ms) { EP_ASSERT (ipc_stream != NULL); EP_ASSERT (ipc_stream->vtable != NULL); IpcStreamVtable *vtable = ipc_stream->vtable; EP_ASSERT (vtable->write_func != NULL); return vtable->write_func (ipc_stream, buffer, bytes_to_write, bytes_written, timeout_ms); } bool ep_ipc_stream_flush_vcall (IpcStream *ipc_stream) { EP_ASSERT (ipc_stream != NULL); EP_ASSERT (ipc_stream->vtable != NULL); IpcStreamVtable *vtable = ipc_stream->vtable; EP_ASSERT (vtable->flush_func != NULL); return vtable->flush_func (ipc_stream); } bool ep_ipc_stream_close_vcall (IpcStream *ipc_stream) { EP_ASSERT (ipc_stream != NULL); EP_ASSERT (ipc_stream->vtable != NULL); IpcStreamVtable *vtable = ipc_stream->vtable; EP_ASSERT (vtable->close_func != NULL); return vtable->close_func (ipc_stream); } /* * IpcStreamWriter. */ static void ipc_stream_writer_free_func (void *stream) { ep_ipc_stream_writer_free ((IpcStreamWriter *)stream); } static bool ipc_stream_writer_write_func ( void *stream, const uint8_t *buffer, uint32_t bytes_to_write, uint32_t *bytes_written) { EP_ASSERT (stream != NULL); return ep_ipc_stream_writer_write ( (IpcStreamWriter *)stream, buffer, bytes_to_write, bytes_written); } static StreamWriterVtable ipc_stream_writer_vtable = { ipc_stream_writer_free_func, ipc_stream_writer_write_func }; IpcStreamWriter * ep_ipc_stream_writer_alloc ( uint64_t id, IpcStream *stream) { IpcStreamWriter *instance = ep_rt_object_alloc (IpcStreamWriter); ep_raise_error_if_nok (instance != NULL); ep_raise_error_if_nok (ep_stream_writer_init ( &instance->stream_writer, &ipc_stream_writer_vtable) != NULL); //Ownership transfered. instance->ipc_stream = stream; ep_on_exit: return instance; ep_on_error: ep_ipc_stream_writer_free (instance); instance = NULL; ep_exit_error_handler (); } void ep_ipc_stream_writer_free (IpcStreamWriter *ipc_stream_writer) { ep_return_void_if_nok (ipc_stream_writer != NULL); ep_ipc_stream_free_vcall (ipc_stream_writer->ipc_stream); ep_stream_writer_fini (&ipc_stream_writer->stream_writer); ep_rt_object_free (ipc_stream_writer); } bool ep_ipc_stream_writer_write ( IpcStreamWriter *ipc_stream_writer, const uint8_t *buffer, uint32_t bytes_to_write, uint32_t *bytes_written) { EP_ASSERT (ipc_stream_writer != NULL); EP_ASSERT (buffer != NULL); EP_ASSERT (bytes_to_write > 0); EP_ASSERT (bytes_written != NULL); ep_return_false_if_nok (buffer != NULL && bytes_to_write != 0); bool result = false; ep_raise_error_if_nok (ep_ipc_stream_writer_get_ipc_stream (ipc_stream_writer) != NULL); result = ep_ipc_stream_write_vcall (ep_ipc_stream_writer_get_ipc_stream (ipc_stream_writer), buffer, bytes_to_write, bytes_written, EP_INFINITE_WAIT); ep_on_exit: return result; ep_on_error: *bytes_written = 0; ep_exit_error_handler (); } /* * StreamWriter. */ StreamWriter * ep_stream_writer_init ( StreamWriter *stream_writer, StreamWriterVtable *vtable) { EP_ASSERT (stream_writer != NULL); EP_ASSERT (vtable != NULL); stream_writer->vtable = vtable; return stream_writer; } void ep_stream_writer_fini (StreamWriter *stream_writer) { ; } void ep_stream_writer_free_vcall (StreamWriter *stream_writer) { ep_return_void_if_nok (stream_writer != NULL); EP_ASSERT (stream_writer->vtable != NULL); StreamWriterVtable *vtable = stream_writer->vtable; EP_ASSERT (vtable->free_func != NULL); vtable->free_func (stream_writer); } bool ep_stream_writer_write_vcall ( StreamWriter *stream_writer, const uint8_t *buffer, const uint32_t bytes_to_write, uint32_t *bytes_written) { EP_ASSERT (stream_writer != NULL); EP_ASSERT (stream_writer->vtable != NULL); StreamWriterVtable *vtable = stream_writer->vtable; EP_ASSERT (vtable->write_func != NULL); return vtable->write_func (stream_writer, buffer, bytes_to_write, bytes_written); } bool ep_stream_writer_write ( StreamWriter *stream_writer, const uint8_t *buffer, const uint32_t bytes_to_write, uint32_t *bytes_written) { return ep_stream_writer_write_vcall ( stream_writer, buffer, bytes_to_write, bytes_written); } #endif /* !defined(EP_INCLUDE_SOURCE_FILES) || defined(EP_FORCE_INCLUDE_SOURCE_FILES) */ #endif /* ENABLE_PERFTRACING */ #ifndef EP_INCLUDE_SOURCE_FILES extern const char quiet_linker_empty_file_warning_eventpipe_stream; const char quiet_linker_empty_file_warning_eventpipe_stream = 0; #endif
-1
dotnet/runtime
66,282
Enable Fast Tail Call Optimization for ARM32
- Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
clamp03
2022-03-07T05:47:20Z
2022-03-13T11:50:20Z
227ff3d53784f655fa04dad059a98b3e8d291d61
51b90cc60b8528c77829ef18481b0f58db812776
Enable Fast Tail Call Optimization for ARM32. - Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
./src/libraries/System.Private.CoreLib/src/System/Runtime/Intrinsics/X86/Fma.PlatformNotSupported.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Runtime.CompilerServices; using System.Runtime.Intrinsics; namespace System.Runtime.Intrinsics.X86 { /// <summary> /// This class provides access to Intel FMA hardware instructions via intrinsics /// </summary> [CLSCompliant(false)] public abstract class Fma : Avx { internal Fma() { } public static new bool IsSupported { [Intrinsic] get { return false; } } public new abstract class X64 : Avx.X64 { internal X64() { } public static new bool IsSupported { [Intrinsic] get { return false; } } } /// <summary> /// __m128 _mm_fmadd_ps (__m128 a, __m128 b, __m128 c) /// VFMADDPS xmm, xmm, xmm/m128 /// </summary> public static Vector128<float> MultiplyAdd(Vector128<float> a, Vector128<float> b, Vector128<float> c) { throw new PlatformNotSupportedException(); } /// <summary> /// __m128d _mm_fmadd_pd (__m128d a, __m128d b, __m128d c) /// VFMADDPD xmm, xmm, xmm/m128 /// </summary> public static Vector128<double> MultiplyAdd(Vector128<double> a, Vector128<double> b, Vector128<double> c) { throw new PlatformNotSupportedException(); } /// <summary> /// __m256 _mm256_fmadd_ps (__m256 a, __m256 b, __m256 c) /// VFMADDPS ymm, ymm, ymm/m256 /// </summary> public static Vector256<float> MultiplyAdd(Vector256<float> a, Vector256<float> b, Vector256<float> c) { throw new PlatformNotSupportedException(); } /// <summary> /// __m256d _mm256_fmadd_pd (__m256d a, __m256d b, __m256d c) /// VFMADDPS ymm, ymm, ymm/m256 /// </summary> public static Vector256<double> MultiplyAdd(Vector256<double> a, Vector256<double> b, Vector256<double> c) { throw new PlatformNotSupportedException(); } /// <summary> /// __m128 _mm_fmadd_ss (__m128 a, __m128 b, __m128 c) /// VFMADDSS xmm, xmm, xmm/m32 /// </summary> public static Vector128<float> MultiplyAddScalar(Vector128<float> a, Vector128<float> b, Vector128<float> c) { throw new PlatformNotSupportedException(); } /// <summary> /// __m128d _mm_fmadd_sd (__m128d a, __m128d b, __m128d c) /// VFMADDSS xmm, xmm, xmm/m64 /// </summary> public static Vector128<double> MultiplyAddScalar(Vector128<double> a, Vector128<double> b, Vector128<double> c) { throw new PlatformNotSupportedException(); } /// <summary> /// __m128 _mm_fmaddsub_ps (__m128 a, __m128 b, __m128 c) /// VFMADDSUBPS xmm, xmm, xmm/m128 /// </summary> public static Vector128<float> MultiplyAddSubtract(Vector128<float> a, Vector128<float> b, Vector128<float> c) { throw new PlatformNotSupportedException(); } /// <summary> /// __m128d _mm_fmaddsub_pd (__m128d a, __m128d b, __m128d c) /// VFMADDSUBPD xmm, xmm, xmm/m128 /// </summary> public static Vector128<double> MultiplyAddSubtract(Vector128<double> a, Vector128<double> b, Vector128<double> c) { throw new PlatformNotSupportedException(); } /// <summary> /// __m256 _mm256_fmaddsub_ps (__m256 a, __m256 b, __m256 c) /// VFMADDSUBPS ymm, ymm, ymm/m256 /// </summary> public static Vector256<float> MultiplyAddSubtract(Vector256<float> a, Vector256<float> b, Vector256<float> c) { throw new PlatformNotSupportedException(); } /// <summary> /// __m256d _mm256_fmaddsub_pd (__m256d a, __m256d b, __m256d c) /// VFMADDSUBPD ymm, ymm, ymm/m256 /// </summary> public static Vector256<double> MultiplyAddSubtract(Vector256<double> a, Vector256<double> b, Vector256<double> c) { throw new PlatformNotSupportedException(); } /// <summary> /// __m128 _mm_fmsub_ps (__m128 a, __m128 b, __m128 c) /// VFMSUBPS xmm, xmm, xmm/m128 /// </summary> public static Vector128<float> MultiplySubtract(Vector128<float> a, Vector128<float> b, Vector128<float> c) { throw new PlatformNotSupportedException(); } /// <summary> /// __m128d _mm_fmsub_pd (__m128d a, __m128d b, __m128d c) /// VFMSUBPS xmm, xmm, xmm/m128 /// </summary> public static Vector128<double> MultiplySubtract(Vector128<double> a, Vector128<double> b, Vector128<double> c) { throw new PlatformNotSupportedException(); } /// <summary> /// __m256 _mm256_fmsub_ps (__m256 a, __m256 b, __m256 c) /// VFMSUBPS ymm, ymm, ymm/m256 /// </summary> public static Vector256<float> MultiplySubtract(Vector256<float> a, Vector256<float> b, Vector256<float> c) { throw new PlatformNotSupportedException(); } /// <summary> /// __m256d _mm256_fmsub_pd (__m256d a, __m256d b, __m256d c) /// VFMSUBPD ymm, ymm, ymm/m256 /// </summary> public static Vector256<double> MultiplySubtract(Vector256<double> a, Vector256<double> b, Vector256<double> c) { throw new PlatformNotSupportedException(); } /// <summary> /// __m128 _mm_fmsub_ss (__m128 a, __m128 b, __m128 c) /// VFMSUBSS xmm, xmm, xmm/m32 /// </summary> public static Vector128<float> MultiplySubtractScalar(Vector128<float> a, Vector128<float> b, Vector128<float> c) { throw new PlatformNotSupportedException(); } /// <summary> /// __m128d _mm_fmsub_sd (__m128d a, __m128d b, __m128d c) /// VFMSUBSD xmm, xmm, xmm/m64 /// </summary> public static Vector128<double> MultiplySubtractScalar(Vector128<double> a, Vector128<double> b, Vector128<double> c) { throw new PlatformNotSupportedException(); } /// <summary> /// __m128 _mm_fmsubadd_ps (__m128 a, __m128 b, __m128 c) /// VFMSUBADDPS xmm, xmm, xmm/m128 /// </summary> public static Vector128<float> MultiplySubtractAdd(Vector128<float> a, Vector128<float> b, Vector128<float> c) { throw new PlatformNotSupportedException(); } /// <summary> /// __m128d _mm_fmsubadd_pd (__m128d a, __m128d b, __m128d c) /// VFMSUBADDPD xmm, xmm, xmm/m128 /// </summary> public static Vector128<double> MultiplySubtractAdd(Vector128<double> a, Vector128<double> b, Vector128<double> c) { throw new PlatformNotSupportedException(); } /// <summary> /// __m256 _mm256_fmsubadd_ps (__m256 a, __m256 b, __m256 c) /// VFMSUBADDPS ymm, ymm, ymm/m256 /// </summary> public static Vector256<float> MultiplySubtractAdd(Vector256<float> a, Vector256<float> b, Vector256<float> c) { throw new PlatformNotSupportedException(); } /// <summary> /// __m256d _mm256_fmsubadd_pd (__m256d a, __m256d b, __m256d c) /// VFMSUBADDPD ymm, ymm, ymm/m256 /// </summary> public static Vector256<double> MultiplySubtractAdd(Vector256<double> a, Vector256<double> b, Vector256<double> c) { throw new PlatformNotSupportedException(); } /// <summary> /// __m128 _mm_fnmadd_ps (__m128 a, __m128 b, __m128 c) /// VFNMADDPS xmm, xmm, xmm/m128 /// </summary> public static Vector128<float> MultiplyAddNegated(Vector128<float> a, Vector128<float> b, Vector128<float> c) { throw new PlatformNotSupportedException(); } /// <summary> /// __m128d _mm_fnmadd_pd (__m128d a, __m128d b, __m128d c) /// VFNMADDPD xmm, xmm, xmm/m128 /// </summary> public static Vector128<double> MultiplyAddNegated(Vector128<double> a, Vector128<double> b, Vector128<double> c) { throw new PlatformNotSupportedException(); } /// <summary> /// __m256 _mm256_fnmadd_ps (__m256 a, __m256 b, __m256 c) /// VFNMADDPS ymm, ymm, ymm/m256 /// </summary> public static Vector256<float> MultiplyAddNegated(Vector256<float> a, Vector256<float> b, Vector256<float> c) { throw new PlatformNotSupportedException(); } /// <summary> /// __m256d _mm256_fnmadd_pd (__m256d a, __m256d b, __m256d c) /// VFNMADDPD ymm, ymm, ymm/m256 /// </summary> public static Vector256<double> MultiplyAddNegated(Vector256<double> a, Vector256<double> b, Vector256<double> c) { throw new PlatformNotSupportedException(); } /// <summary> /// __m128 _mm_fnmadd_ss (__m128 a, __m128 b, __m128 c) /// VFNMADDSS xmm, xmm, xmm/m32 /// </summary> public static Vector128<float> MultiplyAddNegatedScalar(Vector128<float> a, Vector128<float> b, Vector128<float> c) { throw new PlatformNotSupportedException(); } /// <summary> /// __m128d _mm_fnmadd_sd (__m128d a, __m128d b, __m128d c) /// VFNMADDSD xmm, xmm, xmm/m64 /// </summary> public static Vector128<double> MultiplyAddNegatedScalar(Vector128<double> a, Vector128<double> b, Vector128<double> c) { throw new PlatformNotSupportedException(); } /// <summary> /// __m128 _mm_fnmsub_ps (__m128 a, __m128 b, __m128 c) /// VFNMSUBPS xmm, xmm, xmm/m128 /// </summary> public static Vector128<float> MultiplySubtractNegated(Vector128<float> a, Vector128<float> b, Vector128<float> c) { throw new PlatformNotSupportedException(); } /// <summary> /// __m128d _mm_fnmsub_pd (__m128d a, __m128d b, __m128d c) /// VFNMSUBPD xmm, xmm, xmm/m128 /// </summary> public static Vector128<double> MultiplySubtractNegated(Vector128<double> a, Vector128<double> b, Vector128<double> c) { throw new PlatformNotSupportedException(); } /// <summary> /// __m256 _mm256_fnmsub_ps (__m256 a, __m256 b, __m256 c) /// VFNMSUBPS ymm, ymm, ymm/m256 /// </summary> public static Vector256<float> MultiplySubtractNegated(Vector256<float> a, Vector256<float> b, Vector256<float> c) { throw new PlatformNotSupportedException(); } /// <summary> /// __m256d _mm256_fnmsub_pd (__m256d a, __m256d b, __m256d c) /// VFNMSUBPD ymm, ymm, ymm/m256 /// </summary> public static Vector256<double> MultiplySubtractNegated(Vector256<double> a, Vector256<double> b, Vector256<double> c) { throw new PlatformNotSupportedException(); } /// <summary> /// __m128 _mm_fnmsub_ss (__m128 a, __m128 b, __m128 c) /// VFNMSUBSS xmm, xmm, xmm/m32 /// </summary> public static Vector128<float> MultiplySubtractNegatedScalar(Vector128<float> a, Vector128<float> b, Vector128<float> c) { throw new PlatformNotSupportedException(); } /// <summary> /// __m128d _mm_fnmsub_sd (__m128d a, __m128d b, __m128d c) /// VFNMSUBSD xmm, xmm, xmm/m64 /// </summary> public static Vector128<double> MultiplySubtractNegatedScalar(Vector128<double> a, Vector128<double> b, Vector128<double> c) { throw new PlatformNotSupportedException(); } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Runtime.CompilerServices; using System.Runtime.Intrinsics; namespace System.Runtime.Intrinsics.X86 { /// <summary> /// This class provides access to Intel FMA hardware instructions via intrinsics /// </summary> [CLSCompliant(false)] public abstract class Fma : Avx { internal Fma() { } public static new bool IsSupported { [Intrinsic] get { return false; } } public new abstract class X64 : Avx.X64 { internal X64() { } public static new bool IsSupported { [Intrinsic] get { return false; } } } /// <summary> /// __m128 _mm_fmadd_ps (__m128 a, __m128 b, __m128 c) /// VFMADDPS xmm, xmm, xmm/m128 /// </summary> public static Vector128<float> MultiplyAdd(Vector128<float> a, Vector128<float> b, Vector128<float> c) { throw new PlatformNotSupportedException(); } /// <summary> /// __m128d _mm_fmadd_pd (__m128d a, __m128d b, __m128d c) /// VFMADDPD xmm, xmm, xmm/m128 /// </summary> public static Vector128<double> MultiplyAdd(Vector128<double> a, Vector128<double> b, Vector128<double> c) { throw new PlatformNotSupportedException(); } /// <summary> /// __m256 _mm256_fmadd_ps (__m256 a, __m256 b, __m256 c) /// VFMADDPS ymm, ymm, ymm/m256 /// </summary> public static Vector256<float> MultiplyAdd(Vector256<float> a, Vector256<float> b, Vector256<float> c) { throw new PlatformNotSupportedException(); } /// <summary> /// __m256d _mm256_fmadd_pd (__m256d a, __m256d b, __m256d c) /// VFMADDPS ymm, ymm, ymm/m256 /// </summary> public static Vector256<double> MultiplyAdd(Vector256<double> a, Vector256<double> b, Vector256<double> c) { throw new PlatformNotSupportedException(); } /// <summary> /// __m128 _mm_fmadd_ss (__m128 a, __m128 b, __m128 c) /// VFMADDSS xmm, xmm, xmm/m32 /// </summary> public static Vector128<float> MultiplyAddScalar(Vector128<float> a, Vector128<float> b, Vector128<float> c) { throw new PlatformNotSupportedException(); } /// <summary> /// __m128d _mm_fmadd_sd (__m128d a, __m128d b, __m128d c) /// VFMADDSS xmm, xmm, xmm/m64 /// </summary> public static Vector128<double> MultiplyAddScalar(Vector128<double> a, Vector128<double> b, Vector128<double> c) { throw new PlatformNotSupportedException(); } /// <summary> /// __m128 _mm_fmaddsub_ps (__m128 a, __m128 b, __m128 c) /// VFMADDSUBPS xmm, xmm, xmm/m128 /// </summary> public static Vector128<float> MultiplyAddSubtract(Vector128<float> a, Vector128<float> b, Vector128<float> c) { throw new PlatformNotSupportedException(); } /// <summary> /// __m128d _mm_fmaddsub_pd (__m128d a, __m128d b, __m128d c) /// VFMADDSUBPD xmm, xmm, xmm/m128 /// </summary> public static Vector128<double> MultiplyAddSubtract(Vector128<double> a, Vector128<double> b, Vector128<double> c) { throw new PlatformNotSupportedException(); } /// <summary> /// __m256 _mm256_fmaddsub_ps (__m256 a, __m256 b, __m256 c) /// VFMADDSUBPS ymm, ymm, ymm/m256 /// </summary> public static Vector256<float> MultiplyAddSubtract(Vector256<float> a, Vector256<float> b, Vector256<float> c) { throw new PlatformNotSupportedException(); } /// <summary> /// __m256d _mm256_fmaddsub_pd (__m256d a, __m256d b, __m256d c) /// VFMADDSUBPD ymm, ymm, ymm/m256 /// </summary> public static Vector256<double> MultiplyAddSubtract(Vector256<double> a, Vector256<double> b, Vector256<double> c) { throw new PlatformNotSupportedException(); } /// <summary> /// __m128 _mm_fmsub_ps (__m128 a, __m128 b, __m128 c) /// VFMSUBPS xmm, xmm, xmm/m128 /// </summary> public static Vector128<float> MultiplySubtract(Vector128<float> a, Vector128<float> b, Vector128<float> c) { throw new PlatformNotSupportedException(); } /// <summary> /// __m128d _mm_fmsub_pd (__m128d a, __m128d b, __m128d c) /// VFMSUBPS xmm, xmm, xmm/m128 /// </summary> public static Vector128<double> MultiplySubtract(Vector128<double> a, Vector128<double> b, Vector128<double> c) { throw new PlatformNotSupportedException(); } /// <summary> /// __m256 _mm256_fmsub_ps (__m256 a, __m256 b, __m256 c) /// VFMSUBPS ymm, ymm, ymm/m256 /// </summary> public static Vector256<float> MultiplySubtract(Vector256<float> a, Vector256<float> b, Vector256<float> c) { throw new PlatformNotSupportedException(); } /// <summary> /// __m256d _mm256_fmsub_pd (__m256d a, __m256d b, __m256d c) /// VFMSUBPD ymm, ymm, ymm/m256 /// </summary> public static Vector256<double> MultiplySubtract(Vector256<double> a, Vector256<double> b, Vector256<double> c) { throw new PlatformNotSupportedException(); } /// <summary> /// __m128 _mm_fmsub_ss (__m128 a, __m128 b, __m128 c) /// VFMSUBSS xmm, xmm, xmm/m32 /// </summary> public static Vector128<float> MultiplySubtractScalar(Vector128<float> a, Vector128<float> b, Vector128<float> c) { throw new PlatformNotSupportedException(); } /// <summary> /// __m128d _mm_fmsub_sd (__m128d a, __m128d b, __m128d c) /// VFMSUBSD xmm, xmm, xmm/m64 /// </summary> public static Vector128<double> MultiplySubtractScalar(Vector128<double> a, Vector128<double> b, Vector128<double> c) { throw new PlatformNotSupportedException(); } /// <summary> /// __m128 _mm_fmsubadd_ps (__m128 a, __m128 b, __m128 c) /// VFMSUBADDPS xmm, xmm, xmm/m128 /// </summary> public static Vector128<float> MultiplySubtractAdd(Vector128<float> a, Vector128<float> b, Vector128<float> c) { throw new PlatformNotSupportedException(); } /// <summary> /// __m128d _mm_fmsubadd_pd (__m128d a, __m128d b, __m128d c) /// VFMSUBADDPD xmm, xmm, xmm/m128 /// </summary> public static Vector128<double> MultiplySubtractAdd(Vector128<double> a, Vector128<double> b, Vector128<double> c) { throw new PlatformNotSupportedException(); } /// <summary> /// __m256 _mm256_fmsubadd_ps (__m256 a, __m256 b, __m256 c) /// VFMSUBADDPS ymm, ymm, ymm/m256 /// </summary> public static Vector256<float> MultiplySubtractAdd(Vector256<float> a, Vector256<float> b, Vector256<float> c) { throw new PlatformNotSupportedException(); } /// <summary> /// __m256d _mm256_fmsubadd_pd (__m256d a, __m256d b, __m256d c) /// VFMSUBADDPD ymm, ymm, ymm/m256 /// </summary> public static Vector256<double> MultiplySubtractAdd(Vector256<double> a, Vector256<double> b, Vector256<double> c) { throw new PlatformNotSupportedException(); } /// <summary> /// __m128 _mm_fnmadd_ps (__m128 a, __m128 b, __m128 c) /// VFNMADDPS xmm, xmm, xmm/m128 /// </summary> public static Vector128<float> MultiplyAddNegated(Vector128<float> a, Vector128<float> b, Vector128<float> c) { throw new PlatformNotSupportedException(); } /// <summary> /// __m128d _mm_fnmadd_pd (__m128d a, __m128d b, __m128d c) /// VFNMADDPD xmm, xmm, xmm/m128 /// </summary> public static Vector128<double> MultiplyAddNegated(Vector128<double> a, Vector128<double> b, Vector128<double> c) { throw new PlatformNotSupportedException(); } /// <summary> /// __m256 _mm256_fnmadd_ps (__m256 a, __m256 b, __m256 c) /// VFNMADDPS ymm, ymm, ymm/m256 /// </summary> public static Vector256<float> MultiplyAddNegated(Vector256<float> a, Vector256<float> b, Vector256<float> c) { throw new PlatformNotSupportedException(); } /// <summary> /// __m256d _mm256_fnmadd_pd (__m256d a, __m256d b, __m256d c) /// VFNMADDPD ymm, ymm, ymm/m256 /// </summary> public static Vector256<double> MultiplyAddNegated(Vector256<double> a, Vector256<double> b, Vector256<double> c) { throw new PlatformNotSupportedException(); } /// <summary> /// __m128 _mm_fnmadd_ss (__m128 a, __m128 b, __m128 c) /// VFNMADDSS xmm, xmm, xmm/m32 /// </summary> public static Vector128<float> MultiplyAddNegatedScalar(Vector128<float> a, Vector128<float> b, Vector128<float> c) { throw new PlatformNotSupportedException(); } /// <summary> /// __m128d _mm_fnmadd_sd (__m128d a, __m128d b, __m128d c) /// VFNMADDSD xmm, xmm, xmm/m64 /// </summary> public static Vector128<double> MultiplyAddNegatedScalar(Vector128<double> a, Vector128<double> b, Vector128<double> c) { throw new PlatformNotSupportedException(); } /// <summary> /// __m128 _mm_fnmsub_ps (__m128 a, __m128 b, __m128 c) /// VFNMSUBPS xmm, xmm, xmm/m128 /// </summary> public static Vector128<float> MultiplySubtractNegated(Vector128<float> a, Vector128<float> b, Vector128<float> c) { throw new PlatformNotSupportedException(); } /// <summary> /// __m128d _mm_fnmsub_pd (__m128d a, __m128d b, __m128d c) /// VFNMSUBPD xmm, xmm, xmm/m128 /// </summary> public static Vector128<double> MultiplySubtractNegated(Vector128<double> a, Vector128<double> b, Vector128<double> c) { throw new PlatformNotSupportedException(); } /// <summary> /// __m256 _mm256_fnmsub_ps (__m256 a, __m256 b, __m256 c) /// VFNMSUBPS ymm, ymm, ymm/m256 /// </summary> public static Vector256<float> MultiplySubtractNegated(Vector256<float> a, Vector256<float> b, Vector256<float> c) { throw new PlatformNotSupportedException(); } /// <summary> /// __m256d _mm256_fnmsub_pd (__m256d a, __m256d b, __m256d c) /// VFNMSUBPD ymm, ymm, ymm/m256 /// </summary> public static Vector256<double> MultiplySubtractNegated(Vector256<double> a, Vector256<double> b, Vector256<double> c) { throw new PlatformNotSupportedException(); } /// <summary> /// __m128 _mm_fnmsub_ss (__m128 a, __m128 b, __m128 c) /// VFNMSUBSS xmm, xmm, xmm/m32 /// </summary> public static Vector128<float> MultiplySubtractNegatedScalar(Vector128<float> a, Vector128<float> b, Vector128<float> c) { throw new PlatformNotSupportedException(); } /// <summary> /// __m128d _mm_fnmsub_sd (__m128d a, __m128d b, __m128d c) /// VFNMSUBSD xmm, xmm, xmm/m64 /// </summary> public static Vector128<double> MultiplySubtractNegatedScalar(Vector128<double> a, Vector128<double> b, Vector128<double> c) { throw new PlatformNotSupportedException(); } } }
-1
dotnet/runtime
66,282
Enable Fast Tail Call Optimization for ARM32
- Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
clamp03
2022-03-07T05:47:20Z
2022-03-13T11:50:20Z
227ff3d53784f655fa04dad059a98b3e8d291d61
51b90cc60b8528c77829ef18481b0f58db812776
Enable Fast Tail Call Optimization for ARM32. - Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
./src/tests/Loader/classloader/generics/Instantiation/Positive/MultipleInterface09.csproj
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <AllowUnsafeBlocks>true</AllowUnsafeBlocks> <OutputType>Exe</OutputType> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <ItemGroup> <Compile Include="MultipleInterface09.cs" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <AllowUnsafeBlocks>true</AllowUnsafeBlocks> <OutputType>Exe</OutputType> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <ItemGroup> <Compile Include="MultipleInterface09.cs" /> </ItemGroup> </Project>
-1
dotnet/runtime
66,282
Enable Fast Tail Call Optimization for ARM32
- Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
clamp03
2022-03-07T05:47:20Z
2022-03-13T11:50:20Z
227ff3d53784f655fa04dad059a98b3e8d291d61
51b90cc60b8528c77829ef18481b0f58db812776
Enable Fast Tail Call Optimization for ARM32. - Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
./src/tests/JIT/Directed/PREFIX/unaligned/1/cpobj.ilproj
<Project Sdk="Microsoft.NET.Sdk.IL"> <PropertyGroup> <OutputType>Exe</OutputType> <RestorePackages>true</RestorePackages> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <PropertyGroup> <DebugType>PdbOnly</DebugType> <Optimize>True</Optimize> </PropertyGroup> <ItemGroup> <Compile Include="cpobj.il" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk.IL"> <PropertyGroup> <OutputType>Exe</OutputType> <RestorePackages>true</RestorePackages> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <PropertyGroup> <DebugType>PdbOnly</DebugType> <Optimize>True</Optimize> </PropertyGroup> <ItemGroup> <Compile Include="cpobj.il" /> </ItemGroup> </Project>
-1
dotnet/runtime
66,282
Enable Fast Tail Call Optimization for ARM32
- Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
clamp03
2022-03-07T05:47:20Z
2022-03-13T11:50:20Z
227ff3d53784f655fa04dad059a98b3e8d291d61
51b90cc60b8528c77829ef18481b0f58db812776
Enable Fast Tail Call Optimization for ARM32. - Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
./src/libraries/System.Security.Cryptography/src/System/Security/Cryptography/DSACng.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Runtime.Versioning; using Internal.Cryptography; namespace System.Security.Cryptography { public sealed partial class DSACng : DSA { private CngAlgorithmCore _core = new CngAlgorithmCore(nameof(DSACng)); /// <summary> /// Creates a new DSACng object that will use the specified key. The key's /// <see cref="CngKey.AlgorithmGroup" /> must be Dsa. This constructor /// creates a copy of the key. Hence, the caller can safely dispose of the /// passed in key and continue using the DSACng object. /// </summary> /// <param name="key">Key to use for DSA operations</param> /// <exception cref="ArgumentException">if <paramref name="key" /> is not an DSA key</exception> /// <exception cref="ArgumentNullException">if <paramref name="key" /> is null.</exception> [SupportedOSPlatform("windows")] public DSACng(CngKey key!!) { if (key.AlgorithmGroup != CngAlgorithmGroup.Dsa) throw new ArgumentException(SR.Cryptography_ArgDSARequiresDSAKey, nameof(key)); Key = CngAlgorithmCore.Duplicate(key); } protected override void Dispose(bool disposing) { _core.Dispose(); } private void ThrowIfDisposed() { _core.ThrowIfDisposed(); } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Runtime.Versioning; using Internal.Cryptography; namespace System.Security.Cryptography { public sealed partial class DSACng : DSA { private CngAlgorithmCore _core = new CngAlgorithmCore(nameof(DSACng)); /// <summary> /// Creates a new DSACng object that will use the specified key. The key's /// <see cref="CngKey.AlgorithmGroup" /> must be Dsa. This constructor /// creates a copy of the key. Hence, the caller can safely dispose of the /// passed in key and continue using the DSACng object. /// </summary> /// <param name="key">Key to use for DSA operations</param> /// <exception cref="ArgumentException">if <paramref name="key" /> is not an DSA key</exception> /// <exception cref="ArgumentNullException">if <paramref name="key" /> is null.</exception> [SupportedOSPlatform("windows")] public DSACng(CngKey key!!) { if (key.AlgorithmGroup != CngAlgorithmGroup.Dsa) throw new ArgumentException(SR.Cryptography_ArgDSARequiresDSAKey, nameof(key)); Key = CngAlgorithmCore.Duplicate(key); } protected override void Dispose(bool disposing) { _core.Dispose(); } private void ThrowIfDisposed() { _core.ThrowIfDisposed(); } } }
-1
dotnet/runtime
66,282
Enable Fast Tail Call Optimization for ARM32
- Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
clamp03
2022-03-07T05:47:20Z
2022-03-13T11:50:20Z
227ff3d53784f655fa04dad059a98b3e8d291d61
51b90cc60b8528c77829ef18481b0f58db812776
Enable Fast Tail Call Optimization for ARM32. - Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
./src/libraries/System.Data.Common/src/System/Data/DataSet.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections; using System.Collections.Generic; using System.ComponentModel; using System.Data.Common; using System.Diagnostics; using System.Diagnostics.CodeAnalysis; using System.Globalization; using System.IO; using System.Runtime.CompilerServices; using System.Runtime.Serialization; using System.Runtime.Serialization.Formatters.Binary; using System.Text; using System.Threading; using System.Xml; using System.Xml.Schema; using System.Xml.Serialization; namespace System.Data { /// <summary> /// Represents an in-memory cache of data. /// </summary> [Designer("Microsoft.VSDesigner.Data.VS.DataSetDesigner, Microsoft.VSDesigner, Version=10.0.0.0, Culture=neutral, PublicKeyToken=b03f5f7f11d50a3a")] [DefaultProperty(nameof(DataSetName))] [Serializable] [ToolboxItem("Microsoft.VSDesigner.Data.VS.DataSetToolboxItem, Microsoft.VSDesigner, Version=10.0.0.0, Culture=neutral, PublicKeyToken=b03f5f7f11d50a3a")] [XmlSchemaProvider(nameof(GetDataSetSchema))] [XmlRoot(nameof(DataSet))] [System.Runtime.CompilerServices.TypeForwardedFrom("System.Data, Version=4.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089")] [DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicParameterlessConstructor | DynamicallyAccessedMemberTypes.NonPublicConstructors)] // needed by Clone() to preserve derived ctors public class DataSet : MarshalByValueComponent, IListSource, IXmlSerializable, ISupportInitializeNotification, ISerializable { internal const string RequiresUnreferencedCodeMessage = "Members from serialized types may be trimmed if not referenced directly."; private const string KEY_XMLSCHEMA = "XmlSchema"; private const string KEY_XMLDIFFGRAM = "XmlDiffGram"; private DataViewManager? _defaultViewManager; // Public Collections private readonly DataTableCollection _tableCollection; private readonly DataRelationCollection _relationCollection; internal PropertyCollection? _extendedProperties; private string _dataSetName = "NewDataSet"; private string _datasetPrefix = string.Empty; internal string _namespaceURI = string.Empty; private bool _enforceConstraints = true; // globalization stuff private bool _caseSensitive; private CultureInfo _culture; private bool _cultureUserSet; // Internal definitions internal bool _fInReadXml; internal bool _fInLoadDiffgram; internal bool _fTopLevelTable; internal bool _fInitInProgress; internal bool _fEnableCascading = true; internal bool _fIsSchemaLoading; private bool _fBoundToDocument; // for XmlDataDocument internal string _mainTableName = string.Empty; //default remoting format is XML private SerializationFormat _remotingFormat = SerializationFormat.Xml; private readonly object _defaultViewManagerLock = new object(); private static int s_objectTypeCount; // Bid counter private readonly int _objectID = Interlocked.Increment(ref s_objectTypeCount); private static XmlSchemaComplexType? s_schemaTypeForWSDL; internal bool _useDataSetSchemaOnly; // UseDataSetSchemaOnly , for YUKON internal bool _udtIsWrapped; // if UDT is wrapped , for YUKON /// <summary> /// Initializes a new instance of the <see cref='System.Data.DataSet'/> class. /// </summary> public DataSet() { GC.SuppressFinalize(this); DataCommonEventSource.Log.Trace("<ds.DataSet.DataSet|API> {0}", ObjectID); // others will call this constr // Set default locale _tableCollection = new DataTableCollection(this); _relationCollection = new DataRelationCollection.DataSetRelationCollection(this); _culture = CultureInfo.CurrentCulture; // Set default locale } /// <summary> /// Initializes a new instance of a <see cref='System.Data.DataSet'/> /// class with the given name. /// </summary> public DataSet(string dataSetName) : this() { DataSetName = dataSetName; } [DefaultValue(SerializationFormat.Xml)] public SerializationFormat RemotingFormat { get { return _remotingFormat; } set { switch (value) { case SerializationFormat.Xml: break; case SerializationFormat.Binary: if (LocalAppContextSwitches.AllowUnsafeSerializationFormatBinary) { break; } throw ExceptionBuilder.SerializationFormatBinaryNotSupported(); default: throw ExceptionBuilder.InvalidRemotingFormat(value); } _remotingFormat = value; // this property is inherited to DataTable from DataSet.So we set this value to DataTable also for (int i = 0; i < Tables.Count; i++) { Tables[i].RemotingFormat = value; } } } [Browsable(false)] [DesignerSerializationVisibility(DesignerSerializationVisibility.Hidden)] public virtual SchemaSerializationMode SchemaSerializationMode { //Typed DataSet calls into this get { return SchemaSerializationMode.IncludeSchema; } set { if (value != SchemaSerializationMode.IncludeSchema) { throw ExceptionBuilder.CannotChangeSchemaSerializationMode(); } } } // Check whether the stream is binary serialized. // 'static' function that consumes SerializationInfo protected bool IsBinarySerialized(SerializationInfo info, StreamingContext context) { // mainly for typed DS // our default remoting format is XML SerializationFormat remotingFormat = SerializationFormat.Xml; SerializationInfoEnumerator e = info.GetEnumerator(); while (e.MoveNext()) { if (e.Name == "DataSet.RemotingFormat") { //DataSet.RemotingFormat does not exist in V1/V1.1 versions remotingFormat = (SerializationFormat)e.Value!; break; } } return (remotingFormat == SerializationFormat.Binary); } // Should Schema be included during Serialization // 'static' function that consumes SerializationInfo protected SchemaSerializationMode DetermineSchemaSerializationMode(SerializationInfo info, StreamingContext context) { //Typed DataSet calls into this SchemaSerializationMode schemaSerializationMode = SchemaSerializationMode.IncludeSchema; SerializationInfoEnumerator e = info.GetEnumerator(); while (e.MoveNext()) { if (e.Name == "SchemaSerializationMode.DataSet") { //SchemaSerializationMode.DataSet does not exist in V1/V1.1 versions schemaSerializationMode = (SchemaSerializationMode)e.Value!; break; } } return schemaSerializationMode; } protected SchemaSerializationMode DetermineSchemaSerializationMode(XmlReader reader) { //Typed DataSet calls into this SchemaSerializationMode schemaSerializationMode = SchemaSerializationMode.IncludeSchema; reader.MoveToContent(); if (reader.NodeType == XmlNodeType.Element) { if (reader.HasAttributes) { string? attribValue = reader.GetAttribute(Keywords.MSD_SCHEMASERIALIZATIONMODE, Keywords.MSDNS); if (string.Equals(attribValue, Keywords.MSD_EXCLUDESCHEMA, StringComparison.OrdinalIgnoreCase)) { schemaSerializationMode = SchemaSerializationMode.ExcludeSchema; } else if (string.Equals(attribValue, Keywords.MSD_INCLUDESCHEMA, StringComparison.OrdinalIgnoreCase)) { schemaSerializationMode = SchemaSerializationMode.IncludeSchema; } else if (attribValue != null) { // if attrib does not exist, then don't throw throw ExceptionBuilder.InvalidSchemaSerializationMode(typeof(SchemaSerializationMode), attribValue); } } } return schemaSerializationMode; } // Deserialize all the tables data of the dataset from binary/xml stream. // 'instance' method that consumes SerializationInfo [RequiresUnreferencedCode(RequiresUnreferencedCodeMessage)] protected void GetSerializationData(SerializationInfo info, StreamingContext context) { // mainly for typed DS SerializationFormat remotingFormat = SerializationFormat.Xml; SerializationInfoEnumerator e = info.GetEnumerator(); while (e.MoveNext()) { if (e.Name == "DataSet.RemotingFormat") { //DataSet.RemotingFormat does not exist in V1/V1.1 versions remotingFormat = (SerializationFormat)e.Value!; break; } } DeserializeDataSetData(info, context, remotingFormat); } // Deserialize all the tables schema and data of the dataset from binary/xml stream. [RequiresUnreferencedCode(RequiresUnreferencedCodeMessage)] [UnconditionalSuppressMessage("ReflectionAnalysis", "IL2112:ReflectionToRequiresUnreferencedCode", Justification = "CreateInstanceOfThisType's use of GetType uses only the parameterless constructor, but the annotations preserve all non-public constructors causing a warning for the serialization constructors. Those constructors won't be used here.")] protected DataSet(SerializationInfo info, StreamingContext context) : this(info, context, true) { } [RequiresUnreferencedCode(RequiresUnreferencedCodeMessage)] [UnconditionalSuppressMessage("ReflectionAnalysis", "IL2112:ReflectionToRequiresUnreferencedCode", Justification = "CreateInstanceOfThisType's use of GetType uses only the parameterless constructor, but the annotations preserve all non-public constructors causing a warning for the serialization constructors. Those constructors won't be used here.")] protected DataSet(SerializationInfo info, StreamingContext context, bool ConstructSchema) : this() { SerializationFormat remotingFormat = SerializationFormat.Xml; SchemaSerializationMode schemaSerializationMode = SchemaSerializationMode.IncludeSchema; SerializationInfoEnumerator e = info.GetEnumerator(); while (e.MoveNext()) { switch (e.Name) { case "DataSet.RemotingFormat": //DataSet.RemotingFormat does not exist in V1/V1.1 versions remotingFormat = (SerializationFormat)e.Value!; break; case "SchemaSerializationMode.DataSet": //SchemaSerializationMode.DataSet does not exist in V1/V1.1 versions schemaSerializationMode = (SchemaSerializationMode)e.Value!; break; } } if (remotingFormat == SerializationFormat.Binary && !LocalAppContextSwitches.AllowUnsafeSerializationFormatBinary) { throw ExceptionBuilder.SerializationFormatBinaryNotSupported(); } if (schemaSerializationMode == SchemaSerializationMode.ExcludeSchema) { InitializeDerivedDataSet(); } // adding back this check will fix typed dataset XML remoting, but we have to fix case that // a class inherits from DataSet and just relies on DataSet to deserialize (see SQL BU DT 374717) // to fix that case also, we need to add a flag and add it to below check so return (no-op) will be // conditional (flag needs to be set in TypedDataSet if (remotingFormat == SerializationFormat.Xml && !ConstructSchema) { return; //For typed dataset xml remoting, this is a no-op } DeserializeDataSet(info, context, remotingFormat, schemaSerializationMode); } [UnconditionalSuppressMessage("ReflectionAnalysis", "IL2026:RequiresUnreferencedCode", Justification = "Binary serialization is unsafe in general and is planned to be obsoleted. We do not want to mark interface or ctors of this class as unsafe as that would show many unnecessary warnings elsewhere.")] public virtual void GetObjectData(SerializationInfo info, StreamingContext context) { SerializationFormat remotingFormat = RemotingFormat; SerializeDataSet(info, context, remotingFormat); } // Deserialize all the tables data of the dataset from binary/xml stream. protected virtual void InitializeDerivedDataSet() { } // Serialize all the tables. [RequiresUnreferencedCode(RequiresUnreferencedCodeMessage)] private void SerializeDataSet(SerializationInfo info, StreamingContext context, SerializationFormat remotingFormat) { Debug.Assert(info != null); info.AddValue("DataSet.RemotingVersion", new Version(2, 0)); // SqlHotFix 299, SerializationFormat enumeration types don't exist in V1.1 SP1 if (SerializationFormat.Xml != remotingFormat) { info.AddValue("DataSet.RemotingFormat", remotingFormat); } // SqlHotFix 299, SchemaSerializationMode enumeration types don't exist in V1.1 SP1 if (SchemaSerializationMode.IncludeSchema != SchemaSerializationMode) { //SkipSchemaDuringSerialization info.AddValue("SchemaSerializationMode.DataSet", SchemaSerializationMode); } if (remotingFormat != SerializationFormat.Xml) { if (SchemaSerializationMode == SchemaSerializationMode.IncludeSchema) { //DataSet public state properties SerializeDataSetProperties(info, context); //Tables Count info.AddValue("DataSet.Tables.Count", Tables.Count); //Tables, Columns, Rows for (int i = 0; i < Tables.Count; i++) { BinaryFormatter bf = new BinaryFormatter(null, new StreamingContext(context.State, false)); MemoryStream memStream = new MemoryStream(); #pragma warning disable SYSLIB0011 // Issue https://github.com/dotnet/runtime/issues/39289 tracks finding an alternative to BinaryFormatter bf.Serialize(memStream, Tables[i]); #pragma warning restore SYSLIB0011 memStream.Position = 0; info.AddValue(string.Format(CultureInfo.InvariantCulture, "DataSet.Tables_{0}", i), memStream.GetBuffer()); } //Constraints for (int i = 0; i < Tables.Count; i++) { Tables[i].SerializeConstraints(info, context, i, true); } //Relations SerializeRelations(info, context); //Expression Columns for (int i = 0; i < Tables.Count; i++) { Tables[i].SerializeExpressionColumns(info, context, i); } } else { //Serialize DataSet public properties. SerializeDataSetProperties(info, context); } //Rows for (int i = 0; i < Tables.Count; i++) { Tables[i].SerializeTableData(info, context, i); } } else { // old behaviour string strSchema = GetXmlSchemaForRemoting(null); info.AddValue(KEY_XMLSCHEMA, strSchema); StringBuilder strBuilder = new StringBuilder(EstimatedXmlStringSize() * 2); StringWriter strWriter = new StringWriter(strBuilder, CultureInfo.InvariantCulture); XmlTextWriter w = new XmlTextWriter(strWriter); WriteXml(w, XmlWriteMode.DiffGram); info.AddValue(KEY_XMLDIFFGRAM, strWriter.ToString()); } } // Deserialize all the tables - marked internal so that DataTable can call into this [RequiresUnreferencedCode(RequiresUnreferencedCodeMessage)] internal void DeserializeDataSet(SerializationInfo info, StreamingContext context, SerializationFormat remotingFormat, SchemaSerializationMode schemaSerializationMode) { // deserialize schema DeserializeDataSetSchema(info, context, remotingFormat, schemaSerializationMode); // deserialize data DeserializeDataSetData(info, context, remotingFormat); } // Deserialize schema. [RequiresUnreferencedCode(RequiresUnreferencedCodeMessage)] private void DeserializeDataSetSchema(SerializationInfo info, StreamingContext context, SerializationFormat remotingFormat, SchemaSerializationMode schemaSerializationMode) { if (remotingFormat != SerializationFormat.Xml) { if (schemaSerializationMode == SchemaSerializationMode.IncludeSchema) { //DataSet public state properties DeserializeDataSetProperties(info, context); //Tables Count int tableCount = info.GetInt32("DataSet.Tables.Count"); //Tables, Columns, Rows for (int i = 0; i < tableCount; i++) { byte[] buffer = (byte[])info.GetValue(string.Format(CultureInfo.InvariantCulture, "DataSet.Tables_{0}", i), typeof(byte[]))!; MemoryStream memStream = new MemoryStream(buffer); memStream.Position = 0; BinaryFormatter bf = new BinaryFormatter(null, new StreamingContext(context.State, false)); #pragma warning disable SYSLIB0011 // Issue https://github.com/dotnet/runtime/issues/39289 tracks finding an alternative to BinaryFormatter DataTable dt = (DataTable)bf.Deserialize(memStream); #pragma warning restore SYSLIB0011 Tables.Add(dt); } //Constraints for (int i = 0; i < tableCount; i++) { Tables[i].DeserializeConstraints(info, context, /* table index */i, /* serialize all constraints */ true); // } //Relations DeserializeRelations(info, context); //Expression Columns for (int i = 0; i < tableCount; i++) { Tables[i].DeserializeExpressionColumns(info, context, i); } } else { //DeSerialize DataSet public properties.[Locale, CaseSensitive and EnforceConstraints] DeserializeDataSetProperties(info, context); } } else { string? strSchema = (string?)info.GetValue(KEY_XMLSCHEMA, typeof(string)); if (strSchema != null) { ReadXmlSchema(new XmlTextReader(new StringReader(strSchema)), true); } } } // Deserialize all data. [RequiresUnreferencedCode(RequiresUnreferencedCodeMessage)] private void DeserializeDataSetData(SerializationInfo info, StreamingContext context, SerializationFormat remotingFormat) { if (remotingFormat != SerializationFormat.Xml) { for (int i = 0; i < Tables.Count; i++) { Tables[i].DeserializeTableData(info, context, i); } } else { string? strData = (string?)info.GetValue(KEY_XMLDIFFGRAM, typeof(string)); if (strData != null) { ReadXml(new XmlTextReader(new StringReader(strData)), XmlReadMode.DiffGram); } } } // Serialize just the dataset properties private void SerializeDataSetProperties(SerializationInfo info, StreamingContext context) { //DataSet basic properties info.AddValue("DataSet.DataSetName", DataSetName); info.AddValue("DataSet.Namespace", Namespace); info.AddValue("DataSet.Prefix", Prefix); //DataSet runtime properties info.AddValue("DataSet.CaseSensitive", CaseSensitive); info.AddValue("DataSet.LocaleLCID", Locale.LCID); info.AddValue("DataSet.EnforceConstraints", EnforceConstraints); //ExtendedProperties info.AddValue("DataSet.ExtendedProperties", ExtendedProperties); } // DeSerialize dataset properties private void DeserializeDataSetProperties(SerializationInfo info, StreamingContext context) { //DataSet basic properties _dataSetName = info.GetString("DataSet.DataSetName")!; _namespaceURI = info.GetString("DataSet.Namespace")!; _datasetPrefix = info.GetString("DataSet.Prefix")!; //DataSet runtime properties _caseSensitive = info.GetBoolean("DataSet.CaseSensitive"); int lcid = (int)info.GetValue("DataSet.LocaleLCID", typeof(int))!; _culture = new CultureInfo(lcid); _cultureUserSet = true; _enforceConstraints = info.GetBoolean("DataSet.EnforceConstraints"); //ExtendedProperties _extendedProperties = (PropertyCollection?)info.GetValue("DataSet.ExtendedProperties", typeof(PropertyCollection)); } // Gets relation info from the dataset. // ***Schema for Serializing ArrayList of Relations*** // Relations -> [relationName]->[parentTableIndex, parentcolumnIndexes]->[childTableIndex, childColumnIndexes]->[Nested]->[extendedProperties] private void SerializeRelations(SerializationInfo info, StreamingContext context) { ArrayList relationList = new ArrayList(); foreach (DataRelation rel in Relations) { int[] parentInfo = new int[rel.ParentColumns.Length + 1]; parentInfo[0] = Tables.IndexOf(rel.ParentTable); for (int j = 1; j < parentInfo.Length; j++) { parentInfo[j] = rel.ParentColumns[j - 1].Ordinal; } int[] childInfo = new int[rel.ChildColumns.Length + 1]; childInfo[0] = Tables.IndexOf(rel.ChildTable); for (int j = 1; j < childInfo.Length; j++) { childInfo[j] = rel.ChildColumns[j - 1].Ordinal; } ArrayList list = new ArrayList(); list.Add(rel.RelationName); list.Add(parentInfo); list.Add(childInfo); list.Add(rel.Nested); list.Add(rel._extendedProperties); relationList.Add(list); } info.AddValue("DataSet.Relations", relationList); } // Adds relations to the dataset. // ***Schema for Serializing ArrayList of Relations*** // Relations -> [relationName]->[parentTableIndex, parentcolumnIndexes]->[childTableIndex, childColumnIndexes]->[Nested]->[extendedProperties] private void DeserializeRelations(SerializationInfo info, StreamingContext context) { ArrayList relationList = (ArrayList)info.GetValue("DataSet.Relations", typeof(ArrayList))!; foreach (ArrayList list in relationList) { string relationName = (string)list[0]!; int[] parentInfo = (int[])list[1]!; int[] childInfo = (int[])list[2]!; bool isNested = (bool)list[3]!; PropertyCollection? extendedProperties = (PropertyCollection?)list[4]!; //ParentKey Columns. DataColumn[] parentkeyColumns = new DataColumn[parentInfo.Length - 1]; for (int i = 0; i < parentkeyColumns.Length; i++) { parentkeyColumns[i] = Tables[parentInfo[0]].Columns[parentInfo[i + 1]]; } //ChildKey Columns. DataColumn[] childkeyColumns = new DataColumn[childInfo.Length - 1]; for (int i = 0; i < childkeyColumns.Length; i++) { childkeyColumns[i] = Tables[childInfo[0]].Columns[childInfo[i + 1]]; } //Create the Relation, without any constraints[Assumption: The constraints are added earlier than the relations] DataRelation rel = new DataRelation(relationName, parentkeyColumns, childkeyColumns, false); rel.CheckMultipleNested = false; // disable the check for multiple nested parent rel.Nested = isNested; rel._extendedProperties = extendedProperties; Relations.Add(rel); rel.CheckMultipleNested = true; // enable the check for multiple nested parent } } internal void FailedEnableConstraints() { EnforceConstraints = false; throw ExceptionBuilder.EnforceConstraint(); } /// <summary> /// Gets or sets a value indicating whether string /// comparisons within <see cref='System.Data.DataTable'/> /// objects are case-sensitive. /// </summary> [DefaultValue(false)] public bool CaseSensitive { get { return _caseSensitive; } set { if (_caseSensitive != value) { bool oldValue = _caseSensitive; _caseSensitive = value; if (!ValidateCaseConstraint()) { _caseSensitive = oldValue; throw ExceptionBuilder.CannotChangeCaseLocale(); } foreach (DataTable table in Tables) { table.SetCaseSensitiveValue(value, false, true); } } } } bool IListSource.ContainsListCollection => true; /// <summary> /// Gets a custom view of the data contained by the <see cref='System.Data.DataSet'/> , one /// that allows filtering, searching, and navigating through the custom data view. /// </summary> [Browsable(false)] public DataViewManager DefaultViewManager { get { if (_defaultViewManager == null) { lock (_defaultViewManagerLock) { if (_defaultViewManager == null) { _defaultViewManager = new DataViewManager(this, true); } } } return _defaultViewManager; } } /// <summary> /// Gets or sets a value indicating whether constraint rules are followed when /// attempting any update operation. /// </summary> [DefaultValue(true)] public bool EnforceConstraints { get { return _enforceConstraints; } set { long logScopeId = DataCommonEventSource.Log.EnterScope("<ds.DataSet.set_EnforceConstraints|API> {0}, {1}", ObjectID, value); try { if (_enforceConstraints != value) { if (value) { EnableConstraints(); } _enforceConstraints = value; } } finally { DataCommonEventSource.Log.ExitScope(logScopeId); } } } internal void RestoreEnforceConstraints(bool value) { _enforceConstraints = value; } internal void EnableConstraints() { long logScopeId = DataCommonEventSource.Log.EnterScope("<ds.DataSet.EnableConstraints|INFO> {0}", ObjectID); try { bool errors = false; for (ConstraintEnumerator constraints = new ConstraintEnumerator(this); constraints.GetNext();) { Constraint constraint = constraints.GetConstraint(); errors |= constraint.IsConstraintViolated(); } foreach (DataTable table in Tables) { foreach (DataColumn column in table.Columns) { if (!column.AllowDBNull) { errors |= column.IsNotAllowDBNullViolated(); } if (column.MaxLength >= 0) { errors |= column.IsMaxLengthViolated(); } } } if (errors) { FailedEnableConstraints(); } } finally { DataCommonEventSource.Log.ExitScope(logScopeId); } } /// <summary> /// Gets or sets the name of this <see cref='System.Data.DataSet'/> . /// </summary> [DefaultValue("")] public string DataSetName { get { return _dataSetName; } set { DataCommonEventSource.Log.Trace("<ds.DataSet.set_DataSetName|API> {0}, '{1}'", ObjectID, value); if (value != _dataSetName) { if (value == null || value.Length == 0) { throw ExceptionBuilder.SetDataSetNameToEmpty(); } DataTable? conflicting = Tables[value, Namespace]; if ((conflicting != null) && (!conflicting._fNestedInDataset)) { throw ExceptionBuilder.SetDataSetNameConflicting(value); } RaisePropertyChanging(nameof(DataSetName)); _dataSetName = value; } } } [DefaultValue("")] [AllowNull] public string Namespace { get { return _namespaceURI; } set { DataCommonEventSource.Log.Trace("<ds.DataSet.set_Namespace|API> {0}, '{1}'", ObjectID, value); if (value == null) { value = string.Empty; } if (value != _namespaceURI) { RaisePropertyChanging(nameof(Namespace)); foreach (DataTable dt in Tables) { if (dt._tableNamespace != null) { continue; } if ((dt.NestedParentRelations.Length == 0) || (dt.NestedParentRelations.Length == 1 && dt.NestedParentRelations[0].ChildTable == dt)) { if (Tables.Contains(dt.TableName, value, false, true)) { throw ExceptionBuilder.DuplicateTableName2(dt.TableName, value); } dt.CheckCascadingNamespaceConflict(value); dt.DoRaiseNamespaceChange(); } } _namespaceURI = value; if (string.IsNullOrEmpty(value)) { _datasetPrefix = string.Empty; } } } } [DefaultValue("")] [AllowNull] public string Prefix { get { return _datasetPrefix; } set { if (value == null) { value = string.Empty; } if ((XmlConvert.DecodeName(value) == value) && (XmlConvert.EncodeName(value) != value)) { throw ExceptionBuilder.InvalidPrefix(value); } if (value != _datasetPrefix) { RaisePropertyChanging(nameof(Prefix)); _datasetPrefix = value; } } } /// <summary> /// Gets the collection of custom user information. /// </summary> [Browsable(false)] public PropertyCollection ExtendedProperties => _extendedProperties ?? (_extendedProperties = new PropertyCollection()); /// <summary> /// Gets a value indicating whether there are errors in any /// of the rows in any of the tables of this <see cref='System.Data.DataSet'/> . /// </summary> [Browsable(false)] public bool HasErrors { get { for (int i = 0; i < Tables.Count; i++) { if (Tables[i].HasErrors) { return true; } } return false; } } [Browsable(false)] public bool IsInitialized => !_fInitInProgress; /// <summary> /// Gets or sets the locale information used to compare strings within the table. /// </summary> public CultureInfo Locale { get { // used for comparing not formating/parsing Debug.Assert(null != _culture, "DataSet.Locale: null culture"); return _culture; } set { long logScopeId = DataCommonEventSource.Log.EnterScope("<ds.DataSet.set_Locale|API> {0}", ObjectID); try { if (value != null) { if (!_culture.Equals(value)) { SetLocaleValue(value, true); } _cultureUserSet = true; } } finally { DataCommonEventSource.Log.ExitScope(logScopeId); } } } internal void SetLocaleValue(CultureInfo value, bool userSet) { bool flag = false; bool exceptionThrown = false; int tableCount = 0; CultureInfo oldLocale = _culture; bool oldUserSet = _cultureUserSet; try { _culture = value; _cultureUserSet = userSet; foreach (DataTable table in Tables) { if (!table.ShouldSerializeLocale()) { table.SetLocaleValue(value, false, false); } } flag = ValidateLocaleConstraint(); if (flag) { flag = false; foreach (DataTable table in Tables) { tableCount++; if (!table.ShouldSerializeLocale()) { table.SetLocaleValue(value, false, true); } } flag = true; } } catch { exceptionThrown = true; throw; } finally { if (!flag) { // reset old locale if ValidationFailed or exception thrown _culture = oldLocale; _cultureUserSet = oldUserSet; foreach (DataTable table in Tables) { if (!table.ShouldSerializeLocale()) { table.SetLocaleValue(oldLocale, false, false); } } try { for (int i = 0; i < tableCount; ++i) { if (!Tables[i].ShouldSerializeLocale()) { Tables[i].SetLocaleValue(oldLocale, false, true); } } } catch (Exception e) when (ADP.IsCatchableExceptionType(e)) { ADP.TraceExceptionWithoutRethrow(e); } if (!exceptionThrown) { throw ExceptionBuilder.CannotChangeCaseLocale(null); } } } } internal bool ShouldSerializeLocale() { // this method is used design-time scenarios via reflection // by the property grid to show the Locale property in bold or not // by the code dom for persisting the Locale property or not // we always want the locale persisted if set by user or different the current thread // but that logic should by performed by the serializion code return _cultureUserSet; } [Browsable(false)] [DesignerSerializationVisibility(DesignerSerializationVisibility.Hidden)] public override ISite? Site { get { return base.Site; } set { ISite? oldSite = Site; if (value == null && oldSite != null) { IContainer? cont = oldSite.Container; if (cont != null) { for (int i = 0; i < Tables.Count; i++) { if (Tables[i].Site != null) { cont.Remove(Tables[i]); } } } } base.Site = value; } } /// <summary> /// Get the collection of relations that link tables and /// allow navigation from parent tables to child tables. /// </summary> [DesignerSerializationVisibility(DesignerSerializationVisibility.Content)] public DataRelationCollection Relations => _relationCollection; /// <summary> /// Indicates whether <see cref='Relations'/> property should be persisted. /// </summary> protected virtual bool ShouldSerializeRelations() => true; /// <summary> /// Resets the <see cref='System.Data.DataSet.Relations'/> property to its default state. /// </summary> private void ResetRelations() => Relations.Clear(); /// <summary> /// Gets the collection of tables contained in the <see cref='System.Data.DataSet'/>. /// </summary> [DesignerSerializationVisibility(DesignerSerializationVisibility.Content)] public DataTableCollection Tables => _tableCollection; /// <summary> /// Indicates whether <see cref='System.Data.DataSet.Tables'/> property should be persisted. /// </summary> protected virtual bool ShouldSerializeTables() => true; /// <summary> /// Resets the <see cref='System.Data.DataSet.Tables'/> property to its default state. /// </summary> private void ResetTables() => Tables.Clear(); internal bool FBoundToDocument { get { return _fBoundToDocument; } set { _fBoundToDocument = value; } } /// <summary> /// Commits all the changes made to this <see cref='System.Data.DataSet'/> since it was loaded or the last /// time <see cref='System.Data.DataSet.AcceptChanges'/> was called. /// </summary> public void AcceptChanges() { long logScopeId = DataCommonEventSource.Log.EnterScope("<ds.DataSet.AcceptChanges|API> {0}", ObjectID); try { for (int i = 0; i < Tables.Count; i++) { Tables[i].AcceptChanges(); } } finally { DataCommonEventSource.Log.ExitScope(logScopeId); } } internal event PropertyChangedEventHandler? PropertyChanging; /// <summary> /// Occurs when attempting to merge schemas for two tables with the same name. /// </summary> public event MergeFailedEventHandler? MergeFailed; internal event DataRowCreatedEventHandler? DataRowCreated; // Internal for XmlDataDocument only internal event DataSetClearEventhandler? ClearFunctionCalled; // Internal for XmlDataDocument only public event EventHandler? Initialized; public void BeginInit() { _fInitInProgress = true; } public void EndInit() { Tables.FinishInitCollection(); for (int i = 0; i < Tables.Count; i++) { Tables[i].Columns.FinishInitCollection(); } for (int i = 0; i < Tables.Count; i++) { Tables[i].Constraints.FinishInitConstraints(); } ((DataRelationCollection.DataSetRelationCollection)Relations).FinishInitRelations(); _fInitInProgress = false; OnInitialized(); } /// <summary> /// Clears the <see cref='System.Data.DataSet'/> of any data by removing all rows in all tables. /// </summary> public void Clear() { long logScopeId = DataCommonEventSource.Log.EnterScope("<ds.DataSet.Clear|API> {0}", ObjectID); try { OnClearFunctionCalled(null); bool fEnforce = EnforceConstraints; EnforceConstraints = false; for (int i = 0; i < Tables.Count; i++) { Tables[i].Clear(); } EnforceConstraints = fEnforce; } finally { DataCommonEventSource.Log.ExitScope(logScopeId); } } private DataSet CreateInstanceOfThisType() { return (DataSet)Activator.CreateInstance(GetType(), true)!; } /// <summary> /// Clones the structure of the <see cref='System.Data.DataSet'/>, including all <see cref='System.Data.DataTable'/> schemas, relations, and /// constraints. /// </summary> // Prevent inlining so that reflection calls are not moved to caller that may be in a different assembly that may have a different grant set. [MethodImpl(MethodImplOptions.NoInlining)] public virtual DataSet Clone() { long logScopeId = DataCommonEventSource.Log.EnterScope("<ds.DataSet.Clone|API> {0}", ObjectID); try { DataSet ds = CreateInstanceOfThisType(); if (ds.Tables.Count > 0) // To clean up all the schema in strong typed dataset. { ds.Reset(); } //copy some original dataset properties ds.DataSetName = DataSetName; ds.CaseSensitive = CaseSensitive; ds._culture = _culture; ds._cultureUserSet = _cultureUserSet; ds.EnforceConstraints = EnforceConstraints; ds.Namespace = Namespace; ds.Prefix = Prefix; ds.RemotingFormat = RemotingFormat; ds._fIsSchemaLoading = true; //delay expression evaluation // ...Tables... DataTableCollection tbls = Tables; for (int i = 0; i < tbls.Count; i++) { DataTable dt = tbls[i].Clone(ds); dt._tableNamespace = tbls[i].Namespace; // hardcode the namespace for a second to not mess up // DataRelation cloning. ds.Tables.Add(dt); } // ...Constraints... for (int i = 0; i < tbls.Count; i++) { ConstraintCollection constraints = tbls[i].Constraints; for (int j = 0; j < constraints.Count; j++) { if (constraints[j] is UniqueConstraint) { continue; } ForeignKeyConstraint foreign = (ForeignKeyConstraint)constraints[j]; if (foreign.Table == foreign.RelatedTable) { continue; // we have already added this foreign key in while cloning the datatable } ds.Tables[i].Constraints.Add(constraints[j].Clone(ds)!); } } // ...Relations... DataRelationCollection rels = Relations; for (int i = 0; i < rels.Count; i++) { DataRelation rel = rels[i].Clone(ds); rel.CheckMultipleNested = false; // disable the check for multiple nested parent ds.Relations.Add(rel); rel.CheckMultipleNested = true; // enable the check for multiple nested parent } // ...Extended Properties... if (_extendedProperties != null) { foreach (object key in _extendedProperties.Keys) { ds.ExtendedProperties[key] = _extendedProperties[key]; } } foreach (DataTable table in Tables) { foreach (DataColumn col in table.Columns) { if (col.Expression.Length != 0) { ds.Tables[table.TableName, table.Namespace]!.Columns[col.ColumnName]!.CopyExpressionFrom(col); } } } for (int i = 0; i < tbls.Count; i++) { ds.Tables[i]._tableNamespace = tbls[i]._tableNamespace; // undo the hardcoding of the namespace } ds._fIsSchemaLoading = false; //reactivate column computations return ds; } finally { DataCommonEventSource.Log.ExitScope(logScopeId); } } /// <summary> /// Copies both the structure and data for this <see cref='System.Data.DataSet'/>. /// </summary> public DataSet Copy() { long logScopeId = DataCommonEventSource.Log.EnterScope("<ds.DataSet.Copy|API> {0}", ObjectID); try { DataSet dsNew = Clone(); bool fEnforceConstraints = dsNew.EnforceConstraints; dsNew.EnforceConstraints = false; foreach (DataTable table in Tables) { DataTable destTable = dsNew.Tables[table.TableName, table.Namespace]!; foreach (DataRow row in table.Rows) { table.CopyRow(destTable, row); } } dsNew.EnforceConstraints = fEnforceConstraints; return dsNew; } finally { DataCommonEventSource.Log.ExitScope(logScopeId); } } internal int EstimatedXmlStringSize() { int bytes = 100; for (int i = 0; i < Tables.Count; i++) { int rowBytes = (Tables[i].TableName.Length + 4) << 2; DataTable table = Tables[i]; for (int j = 0; j < table.Columns.Count; j++) { rowBytes += ((table.Columns[j].ColumnName.Length + 4) << 2); rowBytes += 20; } bytes += table.Rows.Count * rowBytes; } return bytes; } /// <summary> /// Returns a copy of the <see cref='System.Data.DataSet'/> that contains all changes made to /// it since it was loaded or <see cref='System.Data.DataSet.AcceptChanges'/> was last called. /// </summary> public DataSet? GetChanges() => GetChanges(DataRowState.Added | DataRowState.Deleted | DataRowState.Modified); private struct TableChanges { private readonly BitArray _rowChanges; internal TableChanges(int rowCount) { _rowChanges = new BitArray(rowCount); HasChanges = 0; } internal int HasChanges { get; set; } internal bool this[int index] { get { return _rowChanges[index]; } set { Debug.Assert(value && !_rowChanges[index], "setting twice or to false"); _rowChanges[index] = value; HasChanges++; } } } public DataSet? GetChanges(DataRowState rowStates) { long logScopeId = DataCommonEventSource.Log.EnterScope("<ds.DataSet.GetChanges|API> {0}, rowStates={1}", ObjectID, rowStates); try { DataSet? dsNew = null; bool fEnforceConstraints = false; if (0 != (rowStates & ~(DataRowState.Added | DataRowState.Deleted | DataRowState.Modified | DataRowState.Unchanged))) { throw ExceptionBuilder.InvalidRowState(rowStates); } // Initialize all the individual table bitmaps. TableChanges[] bitMatrix = new TableChanges[Tables.Count]; for (int i = 0; i < bitMatrix.Length; ++i) { bitMatrix[i] = new TableChanges(Tables[i].Rows.Count); } // find all the modified rows and their parents MarkModifiedRows(bitMatrix, rowStates); // copy the changes to a cloned table for (int i = 0; i < bitMatrix.Length; ++i) { Debug.Assert(0 <= bitMatrix[i].HasChanges, "negative change count"); if (0 < bitMatrix[i].HasChanges) { if (null == dsNew) { dsNew = Clone(); fEnforceConstraints = dsNew.EnforceConstraints; dsNew.EnforceConstraints = false; } DataTable table = Tables[i]; DataTable destTable = dsNew.Tables[table.TableName, table.Namespace]!; Debug.Assert(bitMatrix[i].HasChanges <= table.Rows.Count, "to many changes"); for (int j = 0; 0 < bitMatrix[i].HasChanges; ++j) { // Loop through the rows. if (bitMatrix[i][j]) { table.CopyRow(destTable, table.Rows[j]); bitMatrix[i].HasChanges--; } } } } if (null != dsNew) { dsNew.EnforceConstraints = fEnforceConstraints; } return dsNew; } finally { DataCommonEventSource.Log.ExitScope(logScopeId); } } private void MarkModifiedRows(TableChanges[] bitMatrix, DataRowState rowStates) { // for every table, every row & every relation find the modified rows and for non-deleted rows, their parents for (int tableIndex = 0; tableIndex < bitMatrix.Length; ++tableIndex) { DataRowCollection rows = Tables[tableIndex].Rows; int rowCount = rows.Count; for (int rowIndex = 0; rowIndex < rowCount; ++rowIndex) { DataRow row = rows[rowIndex]; DataRowState rowState = row.RowState; Debug.Assert(DataRowState.Added == rowState || DataRowState.Deleted == rowState || DataRowState.Modified == rowState || DataRowState.Unchanged == rowState, "unexpected DataRowState"); // if bit not already set and row is modified if ((0 != (rowStates & rowState)) && !bitMatrix[tableIndex][rowIndex]) { bitMatrix[tableIndex][rowIndex] = true; if (DataRowState.Deleted != rowState) { MarkRelatedRowsAsModified(bitMatrix, row); } } } } } private void MarkRelatedRowsAsModified(TableChanges[] bitMatrix, DataRow row) { DataRelationCollection relations = row.Table.ParentRelations; int relationCount = relations.Count; for (int relatedIndex = 0; relatedIndex < relationCount; ++relatedIndex) { DataRow[] relatedRows = row.GetParentRows(relations[relatedIndex], DataRowVersion.Current); foreach (DataRow relatedRow in relatedRows) { int relatedTableIndex = Tables.IndexOf(relatedRow.Table); int relatedRowIndex = relatedRow.Table.Rows.IndexOf(relatedRow); if (!bitMatrix[relatedTableIndex][relatedRowIndex]) { bitMatrix[relatedTableIndex][relatedRowIndex] = true; if (DataRowState.Deleted != relatedRow.RowState) { // recurse into related rows MarkRelatedRowsAsModified(bitMatrix, relatedRow); } } } } } IList IListSource.GetList() => DefaultViewManager; [RequiresUnreferencedCode(RequiresUnreferencedCodeMessage)] internal string GetRemotingDiffGram(DataTable table) { StringWriter strWriter = new StringWriter(CultureInfo.InvariantCulture); XmlTextWriter writer = new XmlTextWriter(strWriter); writer.Formatting = Formatting.Indented; // Create and save the updates new NewDiffgramGen(table, false).Save(writer, table); return strWriter.ToString(); } [RequiresUnreferencedCode(RequiresUnreferencedCodeMessage)] public string GetXml() { long logScopeId = DataCommonEventSource.Log.EnterScope("<ds.DataSet.GetXml|API> {0}", ObjectID); try { // StringBuilder strBuilder = new StringBuilder(EstimatedXmlStringSize()); // StringWriter strWriter = new StringWriter(strBuilder); StringWriter strWriter = new StringWriter(CultureInfo.InvariantCulture); XmlTextWriter w = new XmlTextWriter(strWriter); w.Formatting = Formatting.Indented; new XmlDataTreeWriter(this).Save(w, false); return strWriter.ToString(); } finally { DataCommonEventSource.Log.ExitScope(logScopeId); } } [RequiresUnreferencedCode(RequiresUnreferencedCodeMessage)] public string GetXmlSchema() { long logScopeId = DataCommonEventSource.Log.EnterScope("<ds.DataSet.GetXmlSchema|API> {0}", ObjectID); try { StringWriter strWriter = new StringWriter(CultureInfo.InvariantCulture); XmlTextWriter writer = new XmlTextWriter(strWriter); writer.Formatting = Formatting.Indented; (new XmlTreeGen(SchemaFormat.Public)).Save(this, writer); return strWriter.ToString(); } finally { DataCommonEventSource.Log.ExitScope(logScopeId); } } [RequiresUnreferencedCode(RequiresUnreferencedCodeMessage)] internal string GetXmlSchemaForRemoting(DataTable? table) { StringWriter strWriter = new StringWriter(CultureInfo.InvariantCulture); XmlTextWriter writer = new XmlTextWriter(strWriter); writer.Formatting = Formatting.Indented; if (table == null) { if (SchemaSerializationMode == SchemaSerializationMode.ExcludeSchema) { (new XmlTreeGen(SchemaFormat.RemotingSkipSchema)).Save(this, writer); } else { (new XmlTreeGen(SchemaFormat.Remoting)).Save(this, writer); } } else { // no skip schema support for typed datatable (new XmlTreeGen(SchemaFormat.Remoting)).Save(table, writer); } return strWriter.ToString(); } /// <summary> /// Gets a value indicating whether the <see cref='System.Data.DataSet'/> has changes, including new, /// deleted, or modified rows. /// </summary> public bool HasChanges() => HasChanges(DataRowState.Added | DataRowState.Deleted | DataRowState.Modified); /// <summary> /// Gets a value indicating whether the <see cref='System.Data.DataSet'/> has changes, including new, /// deleted, or modified rows, filtered by <see cref='System.Data.DataRowState'/>. /// </summary> public bool HasChanges(DataRowState rowStates) { long logScopeId = DataCommonEventSource.Log.EnterScope("<ds.DataSet.HasChanges|API> {0}, rowStates={1}", ObjectID, (int)rowStates); try { const DataRowState allRowStates = DataRowState.Detached | DataRowState.Unchanged | DataRowState.Added | DataRowState.Deleted | DataRowState.Modified; if ((rowStates & (~allRowStates)) != 0) { throw ExceptionBuilder.ArgumentOutOfRange("rowState"); } for (int i = 0; i < Tables.Count; i++) { DataTable table = Tables[i]; for (int j = 0; j < table.Rows.Count; j++) { DataRow row = table.Rows[j]; if ((row.RowState & rowStates) != 0) { return true; } } } return false; } finally { DataCommonEventSource.Log.ExitScope(logScopeId); } } /// <summary> /// Infer the XML schema from the specified <see cref='System.IO.TextReader'/> into the <see cref='System.Data.DataSet'/>. /// </summary> [RequiresUnreferencedCode(RequiresUnreferencedCodeMessage)] public void InferXmlSchema(XmlReader? reader, string[]? nsArray) { long logScopeId = DataCommonEventSource.Log.EnterScope("<ds.DataSet.InferXmlSchema|API> {0}", ObjectID); try { if (reader == null) { return; } XmlDocument xdoc = new XmlDocument(); if (reader.NodeType == XmlNodeType.Element) { XmlNode node = xdoc.ReadNode(reader)!; xdoc.AppendChild(node); } else { xdoc.Load(reader); } if (xdoc.DocumentElement == null) { return; } InferSchema(xdoc, nsArray, XmlReadMode.InferSchema); } finally { DataCommonEventSource.Log.ExitScope(logScopeId); } } /// <summary> /// Infer the XML schema from the specified <see cref='System.IO.TextReader'/> into the <see cref='System.Data.DataSet'/>. /// </summary> [RequiresUnreferencedCode(RequiresUnreferencedCodeMessage)] public void InferXmlSchema(Stream? stream, string[]? nsArray) { if (stream == null) { return; } InferXmlSchema(new XmlTextReader(stream), nsArray); } /// <summary> /// Infer the XML schema from the specified <see cref='System.IO.TextReader'/> into the <see cref='System.Data.DataSet'/>. /// </summary> [RequiresUnreferencedCode(RequiresUnreferencedCodeMessage)] public void InferXmlSchema(TextReader? reader, string[]? nsArray) { if (reader == null) { return; } InferXmlSchema(new XmlTextReader(reader), nsArray); } /// <summary> /// Infer the XML schema from the specified file into the <see cref='System.Data.DataSet'/>. /// </summary> [RequiresUnreferencedCode(RequiresUnreferencedCodeMessage)] public void InferXmlSchema(string fileName, string[]? nsArray) { XmlTextReader xr = new XmlTextReader(fileName); try { InferXmlSchema(xr, nsArray); } finally { xr.Close(); } } /// <summary> /// Reads the XML schema from the specified <see cref="System.Xml.XmlReader" /> into the <see cref="System.Data.DataSet" /> /// </summary> [RequiresUnreferencedCode(RequiresUnreferencedCodeMessage)] public void ReadXmlSchema(XmlReader? reader) => ReadXmlSchema(reader, false); [RequiresUnreferencedCode(RequiresUnreferencedCodeMessage)] internal void ReadXmlSchema(XmlReader? reader, bool denyResolving) { long logScopeId = DataCommonEventSource.Log.EnterScope("<ds.DataSet.ReadXmlSchema|INFO> {0}, reader, denyResolving={1}", ObjectID, denyResolving); try { int iCurrentDepth = -1; if (reader == null) { return; } if (reader is XmlTextReader) { ((XmlTextReader)reader).WhitespaceHandling = WhitespaceHandling.None; } XmlDocument xdoc = new XmlDocument(); // we may need this to infer the schema if (reader.NodeType == XmlNodeType.Element) { iCurrentDepth = reader.Depth; } reader.MoveToContent(); if (reader.NodeType == XmlNodeType.Element) { // if reader points to the schema load it... if (reader.LocalName == Keywords.XDR_SCHEMA && reader.NamespaceURI == Keywords.XDRNS) { // load XDR schema and exit ReadXDRSchema(reader); return; } if (reader.LocalName == Keywords.XSD_SCHEMA && reader.NamespaceURI == Keywords.XSDNS) { // load XSD schema and exit ReadXSDSchema(reader, denyResolving); return; } if (reader.LocalName == Keywords.XSD_SCHEMA && reader.NamespaceURI.StartsWith(Keywords.XSD_NS_START, StringComparison.Ordinal)) { throw ExceptionBuilder.DataSetUnsupportedSchema(Keywords.XSDNS); } // ... otherwise backup the top node and all its attributes XmlElement topNode = xdoc.CreateElement(reader.Prefix, reader.LocalName, reader.NamespaceURI); if (reader.HasAttributes) { int attrCount = reader.AttributeCount; for (int i = 0; i < attrCount; i++) { reader.MoveToAttribute(i); if (reader.NamespaceURI.Equals(Keywords.XSD_XMLNS_NS)) { topNode.SetAttribute(reader.Name, reader.GetAttribute(i)); } else { XmlAttribute attr = topNode.SetAttributeNode(reader.LocalName, reader.NamespaceURI); attr.Prefix = reader.Prefix; attr.Value = reader.GetAttribute(i); } } } reader.Read(); while (MoveToElement(reader, iCurrentDepth)) { // if reader points to the schema load it... if (reader.LocalName == Keywords.XDR_SCHEMA && reader.NamespaceURI == Keywords.XDRNS) { // load XDR schema and exit ReadXDRSchema(reader); return; } if (reader.LocalName == Keywords.XSD_SCHEMA && reader.NamespaceURI == Keywords.XSDNS) { // load XSD schema and exit ReadXSDSchema(reader, denyResolving); return; } if (reader.LocalName == Keywords.XSD_SCHEMA && reader.NamespaceURI.StartsWith(Keywords.XSD_NS_START, StringComparison.Ordinal)) { throw ExceptionBuilder.DataSetUnsupportedSchema(Keywords.XSDNS); } XmlNode node = xdoc.ReadNode(reader)!; topNode.AppendChild(node); } // read the closing tag of the current element ReadEndElement(reader); // if we are here no schema has been found xdoc.AppendChild(topNode); // so we InferSchema InferSchema(xdoc, null, XmlReadMode.Auto); } } finally { DataCommonEventSource.Log.ExitScope(logScopeId); } } internal bool MoveToElement(XmlReader reader, int depth) { while (!reader.EOF && reader.NodeType != XmlNodeType.EndElement && reader.NodeType != XmlNodeType.Element && reader.Depth > depth) { reader.Read(); } return (reader.NodeType == XmlNodeType.Element); } private static void MoveToElement(XmlReader reader) { while (!reader.EOF && reader.NodeType != XmlNodeType.EndElement && reader.NodeType != XmlNodeType.Element) { reader.Read(); } } internal void ReadEndElement(XmlReader reader) { while (reader.NodeType == XmlNodeType.Whitespace) { reader.Skip(); } if (reader.NodeType == XmlNodeType.None) { reader.Skip(); } else if (reader.NodeType == XmlNodeType.EndElement) { reader.ReadEndElement(); } } [RequiresUnreferencedCode(RequiresUnreferencedCodeMessage)] internal void ReadXSDSchema(XmlReader reader, bool denyResolving) { XmlSchemaSet sSet = new XmlSchemaSet(); int schemaFragmentCount = 1; //read from current schmema element if (reader.LocalName == Keywords.XSD_SCHEMA && reader.NamespaceURI == Keywords.XSDNS) { if (reader.HasAttributes) { string? attribValue = reader.GetAttribute(Keywords.MSD_FRAGMENTCOUNT, Keywords.MSDNS); // this must not move the position if (!string.IsNullOrEmpty(attribValue)) { schemaFragmentCount = int.Parse(attribValue, null); } } } while (reader.LocalName == Keywords.XSD_SCHEMA && reader.NamespaceURI == Keywords.XSDNS) { XmlSchema s = XmlSchema.Read(reader, null)!; sSet.Add(s); //read the end tag ReadEndElement(reader); if (--schemaFragmentCount > 0) { MoveToElement(reader); } while (reader.NodeType == XmlNodeType.Whitespace) { reader.Skip(); } } sSet.Compile(); XSDSchema schema = new XSDSchema(); schema.LoadSchema(sSet, this); } [RequiresUnreferencedCode(RequiresUnreferencedCodeMessage)] internal void ReadXDRSchema(XmlReader reader) { XmlDocument xdoc = new XmlDocument(); // we may need this to infer the schema XmlNode schNode = xdoc.ReadNode(reader)!; xdoc.AppendChild(schNode); XDRSchema schema = new XDRSchema(this, false); DataSetName = xdoc.DocumentElement!.LocalName; schema.LoadSchema((XmlElement)schNode, this); } /// <summary> /// Reads the XML schema from the specified <see cref='System.IO.Stream'/> into the /// <see cref='System.Data.DataSet'/>. /// </summary> [RequiresUnreferencedCode(RequiresUnreferencedCodeMessage)] public void ReadXmlSchema(Stream? stream) { if (stream == null) { return; } ReadXmlSchema(new XmlTextReader(stream), false); } /// <summary> /// Reads the XML schema from the specified <see cref='System.IO.TextReader'/> into the <see cref='System.Data.DataSet'/>. /// </summary> [RequiresUnreferencedCode(RequiresUnreferencedCodeMessage)] public void ReadXmlSchema(TextReader? reader) { if (reader == null) { return; } ReadXmlSchema(new XmlTextReader(reader), false); } /// <summary> /// Reads the XML schema from the specified file into the <see cref='System.Data.DataSet'/>. /// </summary> [RequiresUnreferencedCode(RequiresUnreferencedCodeMessage)] public void ReadXmlSchema(string fileName) { XmlTextReader xr = new XmlTextReader(fileName); try { ReadXmlSchema(xr, false); } finally { xr.Close(); } } #region WriteXmlSchema /// <summary>Writes the <see cref='DataSet'/> structure as an XML schema to using the specified <see cref='Stream'/> object.</summary> /// <param name="stream">A <see cref='Stream'/> object used to write to a file.</param> [RequiresUnreferencedCode(RequiresUnreferencedCodeMessage)] public void WriteXmlSchema(Stream? stream) => WriteXmlSchema(stream, SchemaFormat.Public, null); /// <summary>Writes the <see cref='DataSet'/> structure as an XML schema to using the specified <see cref='Stream'/> object.</summary> /// <param name="stream">A <see cref='Stream'/> object used to write to a file.</param> /// <param name="multipleTargetConverter">A delegate used to convert <see cref='Type'/> into string.</param> [RequiresUnreferencedCode(RequiresUnreferencedCodeMessage)] public void WriteXmlSchema(Stream? stream, Converter<Type, string> multipleTargetConverter) { ADP.CheckArgumentNull(multipleTargetConverter, nameof(multipleTargetConverter)); WriteXmlSchema(stream, SchemaFormat.Public, multipleTargetConverter); } /// <summary>Writes the <see cref='DataSet'/> structure as an XML schema to a file.</summary> /// <param name="fileName">The file name (including the path) to which to write.</param> [RequiresUnreferencedCode(RequiresUnreferencedCodeMessage)] public void WriteXmlSchema(string fileName) => WriteXmlSchema(fileName, SchemaFormat.Public, null); /// <summary>Writes the <see cref='DataSet'/> structure as an XML schema to a file.</summary> /// <param name="fileName">The file name (including the path) to which to write.</param> /// <param name="multipleTargetConverter">A delegate used to convert <see cref='Type'/> into string.</param> [RequiresUnreferencedCode(RequiresUnreferencedCodeMessage)] public void WriteXmlSchema(string fileName, Converter<Type, string> multipleTargetConverter) { ADP.CheckArgumentNull(multipleTargetConverter, nameof(multipleTargetConverter)); WriteXmlSchema(fileName, SchemaFormat.Public, multipleTargetConverter); } /// <summary>Writes the <see cref='DataSet'/> structure as an XML schema to a <see cref='TextWriter'/> object.</summary> /// <param name="writer">The <see cref='TextWriter'/> object with which to write.</param> [RequiresUnreferencedCode(RequiresUnreferencedCodeMessage)] public void WriteXmlSchema(TextWriter? writer) => WriteXmlSchema(writer, SchemaFormat.Public, null); /// <summary>Writes the <see cref='DataSet'/> structure as an XML schema to a <see cref='TextWriter'/> object.</summary> /// <param name="writer">The <see cref='TextWriter'/> object with which to write.</param> /// <param name="multipleTargetConverter">A delegate used to convert <see cref='Type'/> into string.</param> [RequiresUnreferencedCode(RequiresUnreferencedCodeMessage)] public void WriteXmlSchema(TextWriter? writer, Converter<Type, string> multipleTargetConverter) { ADP.CheckArgumentNull(multipleTargetConverter, nameof(multipleTargetConverter)); WriteXmlSchema(writer, SchemaFormat.Public, multipleTargetConverter); } /// <summary>Writes the <see cref='DataSet'/> structure as an XML schema to an <see cref='XmlWriter'/> object.</summary> /// <param name="writer">The <see cref='XmlWriter'/> object with which to write.</param> [RequiresUnreferencedCode(RequiresUnreferencedCodeMessage)] public void WriteXmlSchema(XmlWriter? writer) => WriteXmlSchema(writer, SchemaFormat.Public, null); /// <summary>Writes the <see cref='DataSet'/> structure as an XML schema to an <see cref='XmlWriter'/> object.</summary> /// <param name="writer">The <see cref='XmlWriter'/> object with which to write.</param> /// <param name="multipleTargetConverter">A delegate used to convert <see cref='Type'/> into string.</param> [RequiresUnreferencedCode(RequiresUnreferencedCodeMessage)] public void WriteXmlSchema(XmlWriter? writer, Converter<Type, string> multipleTargetConverter) { ADP.CheckArgumentNull(multipleTargetConverter, nameof(multipleTargetConverter)); WriteXmlSchema(writer, SchemaFormat.Public, multipleTargetConverter); } [RequiresUnreferencedCode(RequiresUnreferencedCodeMessage)] private void WriteXmlSchema(string fileName, SchemaFormat schemaFormat, Converter<Type, string>? multipleTargetConverter) { XmlTextWriter xw = new XmlTextWriter(fileName, null); try { xw.Formatting = Formatting.Indented; xw.WriteStartDocument(true); WriteXmlSchema(xw, schemaFormat, multipleTargetConverter); xw.WriteEndDocument(); } finally { xw.Close(); } } [RequiresUnreferencedCode(RequiresUnreferencedCodeMessage)] private void WriteXmlSchema(Stream? stream, SchemaFormat schemaFormat, Converter<Type, string>? multipleTargetConverter) { if (stream == null) { return; } XmlTextWriter w = new XmlTextWriter(stream, null); w.Formatting = Formatting.Indented; WriteXmlSchema(w, schemaFormat, multipleTargetConverter); } [RequiresUnreferencedCode(RequiresUnreferencedCodeMessage)] private void WriteXmlSchema(TextWriter? writer, SchemaFormat schemaFormat, Converter<Type, string>? multipleTargetConverter) { if (writer == null) { return; } XmlTextWriter w = new XmlTextWriter(writer); w.Formatting = Formatting.Indented; WriteXmlSchema(w, schemaFormat, multipleTargetConverter); } [RequiresUnreferencedCode(RequiresUnreferencedCodeMessage)] private void WriteXmlSchema(XmlWriter? writer, SchemaFormat schemaFormat, Converter<Type, string>? multipleTargetConverter) { long logScopeId = DataCommonEventSource.Log.EnterScope("<ds.DataSet.WriteXmlSchema|INFO> {0}, schemaFormat={1}", ObjectID, schemaFormat); try { // Generate SchemaTree and write it out if (writer != null) { XmlTreeGen treeGen; if (schemaFormat == SchemaFormat.WebService && SchemaSerializationMode == SchemaSerializationMode.ExcludeSchema && writer.WriteState == WriteState.Element) { treeGen = new XmlTreeGen(SchemaFormat.WebServiceSkipSchema); } else { treeGen = new XmlTreeGen(schemaFormat); } treeGen.Save(this, null, writer, false, multipleTargetConverter); } } finally { DataCommonEventSource.Log.ExitScope(logScopeId); } } #endregion [RequiresUnreferencedCode(RequiresUnreferencedCodeMessage)] public XmlReadMode ReadXml(XmlReader? reader) => ReadXml(reader, false); [RequiresUnreferencedCode(RequiresUnreferencedCodeMessage)] internal XmlReadMode ReadXml(XmlReader? reader, bool denyResolving) { IDisposable? restrictedScope = null; long logScopeId = DataCommonEventSource.Log.EnterScope("<ds.DataSet.ReadXml|INFO> {0}, denyResolving={1}", ObjectID, denyResolving); try { restrictedScope = TypeLimiter.EnterRestrictedScope(this); DataTable.DSRowDiffIdUsageSection rowDiffIdUsage = default; try { bool fDataFound = false; bool fSchemaFound = false; bool fDiffsFound = false; bool fIsXdr = false; int iCurrentDepth = -1; XmlReadMode ret = XmlReadMode.Auto; bool isEmptyDataSet = false; bool topNodeIsProcessed = false; // we chanche topnode and there is just one case that we miss to process it // it is : <elem attrib1="Attrib">txt</elem> // clear the hashtable to avoid conflicts between diffgrams, SqlHotFix 782 rowDiffIdUsage.Prepare(this); if (reader == null) { return ret; } if (Tables.Count == 0) { isEmptyDataSet = true; } if (reader is XmlTextReader) { ((XmlTextReader)reader).WhitespaceHandling = WhitespaceHandling.Significant; } XmlDocument xdoc = new XmlDocument(); // we may need this to infer the schema XmlDataLoader? xmlload = null; reader.MoveToContent(); if (reader.NodeType == XmlNodeType.Element) { iCurrentDepth = reader.Depth; } if (reader.NodeType == XmlNodeType.Element) { if ((reader.LocalName == Keywords.DIFFGRAM) && (reader.NamespaceURI == Keywords.DFFNS)) { ReadXmlDiffgram(reader); // read the closing tag of the current element ReadEndElement(reader); return XmlReadMode.DiffGram; } // if reader points to the schema load it if (reader.LocalName == Keywords.XDR_SCHEMA && reader.NamespaceURI == Keywords.XDRNS) { // load XDR schema and exit ReadXDRSchema(reader); return XmlReadMode.ReadSchema; //since the top level element is a schema return } if (reader.LocalName == Keywords.XSD_SCHEMA && reader.NamespaceURI == Keywords.XSDNS) { // load XSD schema and exit ReadXSDSchema(reader, denyResolving); return XmlReadMode.ReadSchema; //since the top level element is a schema return } if (reader.LocalName == Keywords.XSD_SCHEMA && reader.NamespaceURI.StartsWith(Keywords.XSD_NS_START, StringComparison.Ordinal)) { throw ExceptionBuilder.DataSetUnsupportedSchema(Keywords.XSDNS); } // now either the top level node is a table and we load it through dataReader... // ... or backup the top node and all its attributes because we may need to InferSchema XmlElement topNode = xdoc.CreateElement(reader.Prefix, reader.LocalName, reader.NamespaceURI); if (reader.HasAttributes) { int attrCount = reader.AttributeCount; for (int i = 0; i < attrCount; i++) { reader.MoveToAttribute(i); if (reader.NamespaceURI.Equals(Keywords.XSD_XMLNS_NS)) topNode.SetAttribute(reader.Name, reader.GetAttribute(i)); else { XmlAttribute attr = topNode.SetAttributeNode(reader.LocalName, reader.NamespaceURI); attr.Prefix = reader.Prefix; attr.Value = reader.GetAttribute(i); } } } reader.Read(); string rootNodeSimpleContent = reader.Value; while (MoveToElement(reader, iCurrentDepth)) { if ((reader.LocalName == Keywords.DIFFGRAM) && (reader.NamespaceURI == Keywords.DFFNS)) { ReadXmlDiffgram(reader); // read the closing tag of the current element // YUKON FIX ReadEndElement(reader); // return XmlReadMode.DiffGram; ret = XmlReadMode.DiffGram; // continue reading for multiple schemas } // if reader points to the schema load it... if (!fSchemaFound && !fDataFound && reader.LocalName == Keywords.XDR_SCHEMA && reader.NamespaceURI == Keywords.XDRNS) { // load XDR schema and exit ReadXDRSchema(reader); fSchemaFound = true; fIsXdr = true; continue; } if (reader.LocalName == Keywords.XSD_SCHEMA && reader.NamespaceURI == Keywords.XSDNS) { // load XSD schema and exit ReadXSDSchema(reader, denyResolving); fSchemaFound = true; continue; } if (reader.LocalName == Keywords.XSD_SCHEMA && reader.NamespaceURI.StartsWith(Keywords.XSD_NS_START, StringComparison.Ordinal)) { throw ExceptionBuilder.DataSetUnsupportedSchema(Keywords.XSDNS); } if ((reader.LocalName == Keywords.DIFFGRAM) && (reader.NamespaceURI == Keywords.DFFNS)) { ReadXmlDiffgram(reader); fDiffsFound = true; ret = XmlReadMode.DiffGram; } else { // We have found data IFF the reader.NodeType == Element and reader.depth == currentDepth-1 // if reader.NodeType == whitespace, skip all whitespace. // skip processing i.e. continue if the first non-whitespace node is not of type element. while (!reader.EOF && reader.NodeType == XmlNodeType.Whitespace) reader.Read(); if (reader.NodeType != XmlNodeType.Element) continue; // we found data here fDataFound = true; if (!fSchemaFound && Tables.Count == 0) { XmlNode node = xdoc.ReadNode(reader)!; topNode.AppendChild(node); } else { if (xmlload == null) { xmlload = new XmlDataLoader(this, fIsXdr, topNode, false); } xmlload.LoadData(reader); topNodeIsProcessed = true; // we process the topnode if (fSchemaFound) { ret = XmlReadMode.ReadSchema; } else { ret = XmlReadMode.IgnoreSchema; } } } } // read the closing tag of the current element ReadEndElement(reader); bool isfTopLevelTableSet = false; bool tmpValue = _fTopLevelTable; //While inference we ignore root elements text content if (!fSchemaFound && Tables.Count == 0 && !topNode.HasChildNodes) { //We shoule not come add SC of root elemnt to topNode if we are not inferring _fTopLevelTable = true; isfTopLevelTableSet = true; if ((rootNodeSimpleContent != null && rootNodeSimpleContent.Length > 0)) { topNode.InnerText = rootNodeSimpleContent; } } if (!isEmptyDataSet) { if ((rootNodeSimpleContent != null && rootNodeSimpleContent.Length > 0)) { topNode.InnerText = rootNodeSimpleContent; } } // now top node contains the data part xdoc.AppendChild(topNode); if (xmlload == null) { xmlload = new XmlDataLoader(this, fIsXdr, topNode, false); } if (!isEmptyDataSet && !topNodeIsProcessed) { XmlElement root = xdoc.DocumentElement!; Debug.Assert(root.NamespaceURI != null, "root.NamespaceURI should not ne null, it should be empty string"); // just recognize that below given Xml represents datatable in toplevel //<table attr1="foo" attr2="bar" table_Text="junk">text</table> // only allow root element with simple content, if any if (root.ChildNodes.Count == 0 || ((root.ChildNodes.Count == 1) && root.FirstChild!.GetType() == typeof(System.Xml.XmlText))) { bool initfTopLevelTable = _fTopLevelTable; // if root element maps to a datatable // ds and dt cant have the samm name and ns at the same time, how to write to xml if (DataSetName != root.Name && _namespaceURI != root.NamespaceURI && Tables.Contains(root.Name, (root.NamespaceURI.Length == 0) ? null : root.NamespaceURI, false, true)) { _fTopLevelTable = true; } try { xmlload.LoadData(xdoc); } finally { _fTopLevelTable = initfTopLevelTable; // this is not for inference, we have schema and we were skipping // topnode where it was a datatable, We must restore the value } } } // above check and below check are orthogonal // so we InferSchema if (!fDiffsFound) { // Load Data if (!fSchemaFound && Tables.Count == 0) { InferSchema(xdoc, null, XmlReadMode.Auto); ret = XmlReadMode.InferSchema; xmlload.FromInference = true; try { xmlload.LoadData(xdoc); } finally { xmlload.FromInference = false; } } //We dont need this assignement. Once we set it(where we set it during inference), it won't be changed if (isfTopLevelTableSet) _fTopLevelTable = tmpValue; } } return ret; } finally { rowDiffIdUsage.Cleanup(); } } finally { restrictedScope?.Dispose(); DataCommonEventSource.Log.ExitScope(logScopeId); } } [RequiresUnreferencedCode(RequiresUnreferencedCodeMessage)] public XmlReadMode ReadXml(Stream? stream) { if (stream == null) { return XmlReadMode.Auto; } XmlTextReader xr = new XmlTextReader(stream); // Prevent Dtd entity in dataset xr.XmlResolver = null; return ReadXml(xr, false); } [RequiresUnreferencedCode(RequiresUnreferencedCodeMessage)] public XmlReadMode ReadXml(TextReader? reader) { if (reader == null) { return XmlReadMode.Auto; } XmlTextReader xr = new XmlTextReader(reader); // Prevent Dtd entity in dataset xr.XmlResolver = null; return ReadXml(xr, false); } [RequiresUnreferencedCode(RequiresUnreferencedCodeMessage)] public XmlReadMode ReadXml(string fileName) { XmlTextReader xr = new XmlTextReader(fileName); // Prevent Dtd entity in dataset xr.XmlResolver = null; try { return ReadXml(xr, false); } finally { xr.Close(); } } [RequiresUnreferencedCode(RequiresUnreferencedCodeMessage)] internal void InferSchema(XmlDocument xdoc, string[]? excludedNamespaces, XmlReadMode mode) { long logScopeId = DataCommonEventSource.Log.EnterScope("<ds.DataSet.InferSchema|INFO> {0}, mode={1}", ObjectID, mode); try { if (null == excludedNamespaces) { excludedNamespaces = Array.Empty<string>(); } XmlNodeReader xnr = new XmlIgnoreNamespaceReader(xdoc, excludedNamespaces); XmlSchemaInference infer = new XmlSchemaInference(); infer.Occurrence = XmlSchemaInference.InferenceOption.Relaxed; infer.TypeInference = (mode == XmlReadMode.InferTypedSchema) ? XmlSchemaInference.InferenceOption.Restricted : XmlSchemaInference.InferenceOption.Relaxed; XmlSchemaSet schemaSet = infer.InferSchema(xnr); schemaSet.Compile(); XSDSchema schema = new XSDSchema(); schema.FromInference = true; try { schema.LoadSchema(schemaSet, this); } finally { schema.FromInference = false; // this is always false if you are not calling fron inference } } finally { DataCommonEventSource.Log.ExitScope(logScopeId); } } private bool IsEmpty() { foreach (DataTable table in Tables) { if (table.Rows.Count > 0) { return false; } } return true; } [RequiresUnreferencedCode(RequiresUnreferencedCodeMessage)] private void ReadXmlDiffgram(XmlReader reader) { long logScopeId = DataCommonEventSource.Log.EnterScope("<ds.DataSet.ReadXmlDiffgram|INFO> {0}", ObjectID); try { int d = reader.Depth; bool fEnforce = EnforceConstraints; EnforceConstraints = false; DataSet newDs; bool isEmpty = IsEmpty(); if (isEmpty) { newDs = this; } else { newDs = Clone(); newDs.EnforceConstraints = false; } foreach (DataTable t in newDs.Tables) { t.Rows._nullInList = 0; } reader.MoveToContent(); if ((reader.LocalName != Keywords.DIFFGRAM) && (reader.NamespaceURI != Keywords.DFFNS)) { return; } reader.Read(); if (reader.NodeType == XmlNodeType.Whitespace) { MoveToElement(reader, reader.Depth - 1 /*iCurrentDepth*/); // skip over whitespace. } newDs._fInLoadDiffgram = true; if (reader.Depth > d) { if ((reader.NamespaceURI != Keywords.DFFNS) && (reader.NamespaceURI != Keywords.MSDNS)) { //we should be inside the dataset part XmlDocument xdoc = new XmlDocument(); XmlElement node = xdoc.CreateElement(reader.Prefix, reader.LocalName, reader.NamespaceURI); reader.Read(); if (reader.NodeType == XmlNodeType.Whitespace) { MoveToElement(reader, reader.Depth - 1 /*iCurrentDepth*/); // skip over whitespace. } if (reader.Depth - 1 > d) { XmlDataLoader xmlload = new XmlDataLoader(newDs, false, node, false); xmlload._isDiffgram = true; // turn on the special processing xmlload.LoadData(reader); } ReadEndElement(reader); if (reader.NodeType == XmlNodeType.Whitespace) { MoveToElement(reader, reader.Depth - 1 /*iCurrentDepth*/); // skip over whitespace. } } Debug.Assert(reader.NodeType != XmlNodeType.Whitespace, "Should not be on Whitespace node"); if (((reader.LocalName == Keywords.SQL_BEFORE) && (reader.NamespaceURI == Keywords.DFFNS)) || ((reader.LocalName == Keywords.MSD_ERRORS) && (reader.NamespaceURI == Keywords.DFFNS))) { //this will consume the changes and the errors part XMLDiffLoader diffLoader = new XMLDiffLoader(); diffLoader.LoadDiffGram(newDs, reader); } // get to the closing diff tag while (reader.Depth > d) { reader.Read(); } // read the closing tag ReadEndElement(reader); } foreach (DataTable t in newDs.Tables) { if (t.Rows._nullInList > 0) { throw ExceptionBuilder.RowInsertMissing(t.TableName); } } newDs._fInLoadDiffgram = false; //terrible performance! foreach (DataTable t in newDs.Tables) { DataRelation[] nestedParentRelations = t.NestedParentRelations; foreach (DataRelation rel in nestedParentRelations) { if (rel.ParentTable == t) { foreach (DataRow r in t.Rows) { foreach (DataRelation rel2 in nestedParentRelations) { r.CheckForLoops(rel2); } } } } } if (!isEmpty) { Merge(newDs); if (_dataSetName == "NewDataSet") { _dataSetName = newDs._dataSetName; } newDs.EnforceConstraints = fEnforce; } EnforceConstraints = fEnforce; } finally { DataCommonEventSource.Log.ExitScope(logScopeId); } } /// <summary> /// </summary> [RequiresUnreferencedCode(RequiresUnreferencedCodeMessage)] public XmlReadMode ReadXml(XmlReader? reader, XmlReadMode mode) => ReadXml(reader, mode, false); [RequiresUnreferencedCode(RequiresUnreferencedCodeMessage)] internal XmlReadMode ReadXml(XmlReader? reader, XmlReadMode mode, bool denyResolving) { IDisposable? restictedScope = null; long logScopeId = DataCommonEventSource.Log.EnterScope("<ds.DataSet.ReadXml|INFO> {0}, mode={1}, denyResolving={2}", ObjectID, mode, denyResolving); try { restictedScope = TypeLimiter.EnterRestrictedScope(this); XmlReadMode ret = mode; if (reader == null) { return ret; } if (mode == XmlReadMode.Auto) { // nested ReadXml calls on the same DataSet must be done outside of RowDiffIdUsage scope return ReadXml(reader); } DataTable.DSRowDiffIdUsageSection rowDiffIdUsage = default; try { bool fSchemaFound = false; bool fDataFound = false; bool fIsXdr = false; int iCurrentDepth = -1; // prepare and cleanup rowDiffId hashtable rowDiffIdUsage.Prepare(this); if (reader is XmlTextReader) { ((XmlTextReader)reader).WhitespaceHandling = WhitespaceHandling.Significant; } XmlDocument xdoc = new XmlDocument(); // we may need this to infer the schema if ((mode != XmlReadMode.Fragment) && (reader.NodeType == XmlNodeType.Element)) { iCurrentDepth = reader.Depth; } reader.MoveToContent(); XmlDataLoader? xmlload = null; if (reader.NodeType == XmlNodeType.Element) { XmlElement? topNode = null; if (mode == XmlReadMode.Fragment) { xdoc.AppendChild(xdoc.CreateElement("ds_sqlXmlWraPPeR")); topNode = xdoc.DocumentElement!; } else { //handle the top node if ((reader.LocalName == Keywords.DIFFGRAM) && (reader.NamespaceURI == Keywords.DFFNS)) { if ((mode == XmlReadMode.DiffGram) || (mode == XmlReadMode.IgnoreSchema)) { ReadXmlDiffgram(reader); // read the closing tag of the current element ReadEndElement(reader); } else { reader.Skip(); } return ret; } if (reader.LocalName == Keywords.XDR_SCHEMA && reader.NamespaceURI == Keywords.XDRNS) { // load XDR schema and exit if ((mode != XmlReadMode.IgnoreSchema) && (mode != XmlReadMode.InferSchema) && (mode != XmlReadMode.InferTypedSchema)) { ReadXDRSchema(reader); } else { reader.Skip(); } return ret; //since the top level element is a schema return } if (reader.LocalName == Keywords.XSD_SCHEMA && reader.NamespaceURI == Keywords.XSDNS) { // load XSD schema and exit if ((mode != XmlReadMode.IgnoreSchema) && (mode != XmlReadMode.InferSchema) && (mode != XmlReadMode.InferTypedSchema)) { ReadXSDSchema(reader, denyResolving); } else { reader.Skip(); } return ret; //since the top level element is a schema return } if (reader.LocalName == Keywords.XSD_SCHEMA && reader.NamespaceURI.StartsWith(Keywords.XSD_NS_START, StringComparison.Ordinal)) { throw ExceptionBuilder.DataSetUnsupportedSchema(Keywords.XSDNS); } // now either the top level node is a table and we load it through dataReader... // ... or backup the top node and all its attributes topNode = xdoc.CreateElement(reader.Prefix, reader.LocalName, reader.NamespaceURI); if (reader.HasAttributes) { int attrCount = reader.AttributeCount; for (int i = 0; i < attrCount; i++) { reader.MoveToAttribute(i); if (reader.NamespaceURI.Equals(Keywords.XSD_XMLNS_NS)) topNode.SetAttribute(reader.Name, reader.GetAttribute(i)); else { XmlAttribute attr = topNode.SetAttributeNode(reader.LocalName, reader.NamespaceURI); attr.Prefix = reader.Prefix; attr.Value = reader.GetAttribute(i); } } } reader.Read(); } while (MoveToElement(reader, iCurrentDepth)) { if (reader.LocalName == Keywords.XDR_SCHEMA && reader.NamespaceURI == Keywords.XDRNS) { // load XDR schema if (!fSchemaFound && !fDataFound && (mode != XmlReadMode.IgnoreSchema) && (mode != XmlReadMode.InferSchema) && (mode != XmlReadMode.InferTypedSchema)) { ReadXDRSchema(reader); fSchemaFound = true; fIsXdr = true; } else { reader.Skip(); } continue; } if (reader.LocalName == Keywords.XSD_SCHEMA && reader.NamespaceURI == Keywords.XSDNS) { // load XSD schema and exit if ((mode != XmlReadMode.IgnoreSchema) && (mode != XmlReadMode.InferSchema) && (mode != XmlReadMode.InferTypedSchema)) { ReadXSDSchema(reader, denyResolving); fSchemaFound = true; } else { reader.Skip(); } continue; } if ((reader.LocalName == Keywords.DIFFGRAM) && (reader.NamespaceURI == Keywords.DFFNS)) { if ((mode == XmlReadMode.DiffGram) || (mode == XmlReadMode.IgnoreSchema)) { ReadXmlDiffgram(reader); ret = XmlReadMode.DiffGram; } else { reader.Skip(); } continue; } if (reader.LocalName == Keywords.XSD_SCHEMA && reader.NamespaceURI.StartsWith(Keywords.XSD_NS_START, StringComparison.Ordinal)) throw ExceptionBuilder.DataSetUnsupportedSchema(Keywords.XSDNS); if (mode == XmlReadMode.DiffGram) { reader.Skip(); continue; // we do not read data in diffgram mode } // if we are here we found some data fDataFound = true; if (mode == XmlReadMode.InferSchema || mode == XmlReadMode.InferTypedSchema) { //save the node in DOM until the end; XmlNode node = xdoc.ReadNode(reader)!; topNode.AppendChild(node); } else { if (xmlload == null) { xmlload = new XmlDataLoader(this, fIsXdr, topNode, mode == XmlReadMode.IgnoreSchema); } xmlload.LoadData(reader); } } //end of the while // read the closing tag of the current element ReadEndElement(reader); // now top node contains the data part xdoc.AppendChild(topNode); if (xmlload == null) xmlload = new XmlDataLoader(this, fIsXdr, mode == XmlReadMode.IgnoreSchema); if (mode == XmlReadMode.DiffGram) { // we already got the diffs through XmlReader interface return ret; } // Load Data if (mode == XmlReadMode.InferSchema || mode == XmlReadMode.InferTypedSchema) { InferSchema(xdoc, null, mode); ret = XmlReadMode.InferSchema; xmlload.FromInference = true; try { xmlload.LoadData(xdoc); } finally { xmlload.FromInference = false; } } } return ret; } finally { // prepare and cleanup rowDiffId hashtable rowDiffIdUsage.Cleanup(); } } finally { restictedScope?.Dispose(); DataCommonEventSource.Log.ExitScope(logScopeId); } } [RequiresUnreferencedCode(RequiresUnreferencedCodeMessage)] public XmlReadMode ReadXml(Stream? stream, XmlReadMode mode) { if (stream == null) { return XmlReadMode.Auto; } XmlTextReader reader = (mode == XmlReadMode.Fragment) ? new XmlTextReader(stream, XmlNodeType.Element, null) : new XmlTextReader(stream); // Prevent Dtd entity in dataset reader.XmlResolver = null; return ReadXml(reader, mode, false); } [RequiresUnreferencedCode(RequiresUnreferencedCodeMessage)] public XmlReadMode ReadXml(TextReader? reader, XmlReadMode mode) { if (reader == null) { return XmlReadMode.Auto; } XmlTextReader xmlreader = (mode == XmlReadMode.Fragment) ? new XmlTextReader(reader.ReadToEnd(), XmlNodeType.Element, null) : new XmlTextReader(reader); // Prevent Dtd entity in dataset xmlreader.XmlResolver = null; return ReadXml(xmlreader, mode, false); } [RequiresUnreferencedCode(RequiresUnreferencedCodeMessage)] public XmlReadMode ReadXml(string fileName, XmlReadMode mode) { XmlTextReader xr; if (mode == XmlReadMode.Fragment) { FileStream stream = new FileStream(fileName, FileMode.Open); xr = new XmlTextReader(stream, XmlNodeType.Element, null); } else { xr = new XmlTextReader(fileName); } // Prevent Dtd entity in dataset xr.XmlResolver = null; try { return ReadXml(xr, mode, false); } finally { xr.Close(); } } [RequiresUnreferencedCode(RequiresUnreferencedCodeMessage)] public void WriteXml(Stream? stream) => WriteXml(stream, XmlWriteMode.IgnoreSchema); [RequiresUnreferencedCode(RequiresUnreferencedCodeMessage)] public void WriteXml(TextWriter? writer) => WriteXml(writer, XmlWriteMode.IgnoreSchema); [RequiresUnreferencedCode(RequiresUnreferencedCodeMessage)] public void WriteXml(XmlWriter? writer) => WriteXml(writer, XmlWriteMode.IgnoreSchema); [RequiresUnreferencedCode(RequiresUnreferencedCodeMessage)] public void WriteXml(string fileName) => WriteXml(fileName, XmlWriteMode.IgnoreSchema); /// <summary> /// Writes schema and data for the DataSet. /// </summary> [RequiresUnreferencedCode(RequiresUnreferencedCodeMessage)] public void WriteXml(Stream? stream, XmlWriteMode mode) { if (stream != null) { XmlTextWriter w = new XmlTextWriter(stream, null); w.Formatting = Formatting.Indented; WriteXml(w, mode); } } [RequiresUnreferencedCode(RequiresUnreferencedCodeMessage)] public void WriteXml(TextWriter? writer, XmlWriteMode mode) { if (writer != null) { XmlTextWriter w = new XmlTextWriter(writer); w.Formatting = Formatting.Indented; WriteXml(w, mode); } } [RequiresUnreferencedCode(RequiresUnreferencedCodeMessage)] public void WriteXml(XmlWriter? writer, XmlWriteMode mode) { long logScopeId = DataCommonEventSource.Log.EnterScope("<ds.DataSet.WriteXml|API> {0}, mode={1}", ObjectID, mode); try { // Generate SchemaTree and write it out if (writer != null) { if (mode == XmlWriteMode.DiffGram) { // Create and save the updates new NewDiffgramGen(this).Save(writer); } else { // Create and save xml data new XmlDataTreeWriter(this).Save(writer, mode == XmlWriteMode.WriteSchema); } } } finally { DataCommonEventSource.Log.ExitScope(logScopeId); } } [RequiresUnreferencedCode(RequiresUnreferencedCodeMessage)] public void WriteXml(string fileName, XmlWriteMode mode) { long logScopeId = DataCommonEventSource.Log.EnterScope("<ds.DataSet.WriteXml|API> {0}, fileName='{1}', mode={2}", ObjectID, fileName, (int)mode); XmlTextWriter xw = new XmlTextWriter(fileName, null); try { xw.Formatting = Formatting.Indented; xw.WriteStartDocument(true); // Create and save the updates if (mode == XmlWriteMode.DiffGram) { new NewDiffgramGen(this).Save(xw); } else { // Create and save xml data new XmlDataTreeWriter(this).Save(xw, mode == XmlWriteMode.WriteSchema); } xw.WriteEndDocument(); } finally { xw.Close(); DataCommonEventSource.Log.ExitScope(logScopeId); } } /// <summary> /// Gets the collection of parent relations which belong to a /// specified table. /// </summary> internal DataRelationCollection GetParentRelations(DataTable table) => table.ParentRelations; /// <summary> /// Merges this <see cref='System.Data.DataSet'/> into a specified <see cref='System.Data.DataSet'/>. /// </summary> public void Merge(DataSet dataSet) { long logScopeId = DataCommonEventSource.Log.EnterScope("<ds.DataSet.Merge|API> {0}, dataSet={1}", ObjectID, (dataSet != null) ? dataSet.ObjectID : 0); Debug.Assert(dataSet != null); try { Merge(dataSet, false, MissingSchemaAction.Add); } finally { DataCommonEventSource.Log.ExitScope(logScopeId); } } /// <summary> /// Merges this <see cref='System.Data.DataSet'/> into a specified <see cref='System.Data.DataSet'/> preserving changes according to /// the specified argument. /// </summary> public void Merge(DataSet dataSet, bool preserveChanges) { long logScopeId = DataCommonEventSource.Log.EnterScope("<ds.DataSet.Merge|API> {0}, dataSet={1}, preserveChanges={2}", ObjectID, (dataSet != null) ? dataSet.ObjectID : 0, preserveChanges); Debug.Assert(dataSet != null); try { Merge(dataSet, preserveChanges, MissingSchemaAction.Add); } finally { DataCommonEventSource.Log.ExitScope(logScopeId); } } /// <summary> /// Merges this <see cref='System.Data.DataSet'/> into a specified <see cref='System.Data.DataSet'/> preserving changes according to /// the specified argument, and handling an incompatible schema according to the /// specified argument. /// </summary> public void Merge(DataSet dataSet, bool preserveChanges, MissingSchemaAction missingSchemaAction) { long logScopeId = DataCommonEventSource.Log.EnterScope("<ds.DataSet.Merge|API> {0}, dataSet={1}, preserveChanges={2}, missingSchemaAction={3}", ObjectID, (dataSet != null) ? dataSet.ObjectID : 0, preserveChanges, missingSchemaAction); try { // Argument checks if (dataSet == null) { throw ExceptionBuilder.ArgumentNull(nameof(dataSet)); } switch (missingSchemaAction) { case MissingSchemaAction.Add: case MissingSchemaAction.Ignore: case MissingSchemaAction.Error: case MissingSchemaAction.AddWithKey: Merger merger = new Merger(this, preserveChanges, missingSchemaAction); merger.MergeDataSet(dataSet); break; default: throw ADP.InvalidMissingSchemaAction(missingSchemaAction); } } finally { DataCommonEventSource.Log.ExitScope(logScopeId); } } /// <summary> /// Merges this <see cref='System.Data.DataTable'/> into a specified <see cref='System.Data.DataTable'/>. /// </summary> public void Merge(DataTable table) { long logScopeId = DataCommonEventSource.Log.EnterScope("<ds.DataSet.Merge|API> {0}, table={1}", ObjectID, (table != null) ? table.ObjectID : 0); Debug.Assert(table != null); try { Merge(table, false, MissingSchemaAction.Add); } finally { DataCommonEventSource.Log.ExitScope(logScopeId); } } /// <summary> /// Merges this <see cref='System.Data.DataTable'/> into a specified <see cref='System.Data.DataTable'/>. with a value to preserve changes /// made to the target, and a value to deal with missing schemas. /// </summary> public void Merge(DataTable table, bool preserveChanges, MissingSchemaAction missingSchemaAction) { long logScopeId = DataCommonEventSource.Log.EnterScope("<ds.DataSet.Merge|API> {0}, table={1}, preserveChanges={2}, missingSchemaAction={3}", ObjectID, (table != null) ? table.ObjectID : 0, preserveChanges, missingSchemaAction); try { // Argument checks if (table == null) { throw ExceptionBuilder.ArgumentNull(nameof(table)); } switch (missingSchemaAction) { case MissingSchemaAction.Add: case MissingSchemaAction.Ignore: case MissingSchemaAction.Error: case MissingSchemaAction.AddWithKey: Merger merger = new Merger(this, preserveChanges, missingSchemaAction); merger.MergeTable(table); break; default: throw ADP.InvalidMissingSchemaAction(missingSchemaAction); } } finally { DataCommonEventSource.Log.ExitScope(logScopeId); } } public void Merge(DataRow[] rows) { long logScopeId = DataCommonEventSource.Log.EnterScope("<ds.DataSet.Merge|API> {0}, rows", ObjectID); try { Merge(rows, false, MissingSchemaAction.Add); } finally { DataCommonEventSource.Log.ExitScope(logScopeId); } } public void Merge(DataRow[] rows, bool preserveChanges, MissingSchemaAction missingSchemaAction) { long logScopeId = DataCommonEventSource.Log.EnterScope("<ds.DataSet.Merge|API> {0}, preserveChanges={1}, missingSchemaAction={2}", ObjectID, preserveChanges, missingSchemaAction); try { // Argument checks if (rows == null) { throw ExceptionBuilder.ArgumentNull(nameof(rows)); } switch (missingSchemaAction) { case MissingSchemaAction.Add: case MissingSchemaAction.Ignore: case MissingSchemaAction.Error: case MissingSchemaAction.AddWithKey: Merger merger = new Merger(this, preserveChanges, missingSchemaAction); merger.MergeRows(rows); break; default: throw ADP.InvalidMissingSchemaAction(missingSchemaAction); } } finally { DataCommonEventSource.Log.ExitScope(logScopeId); } } protected virtual void OnPropertyChanging(PropertyChangedEventArgs pcevent) { PropertyChanging?.Invoke(this, pcevent); } /// <summary> /// Inheriting classes should override this method to handle this event. /// Call base.OnMergeFailed to send this event to any registered event /// listeners. /// </summary> internal void OnMergeFailed(MergeFailedEventArgs mfevent) { if (MergeFailed != null) { MergeFailed(this, mfevent); } else { throw ExceptionBuilder.MergeFailed(mfevent.Conflict); } } internal void RaiseMergeFailed(DataTable? table, string conflict, MissingSchemaAction missingSchemaAction) { if (MissingSchemaAction.Error == missingSchemaAction) { throw ExceptionBuilder.MergeFailed(conflict); } OnMergeFailed(new MergeFailedEventArgs(table, conflict)); } internal void OnDataRowCreated(DataRow row) => DataRowCreated?.Invoke(this, row); internal void OnClearFunctionCalled(DataTable? table) => ClearFunctionCalled?.Invoke(this, table); private void OnInitialized() => Initialized?.Invoke(this, EventArgs.Empty); /// <summary> /// This method should be overridden by subclasses to restrict tables being removed. /// </summary> protected internal virtual void OnRemoveTable(DataTable table) { } internal void OnRemovedTable(DataTable table) { DataViewManager? viewManager = _defaultViewManager; if (null != viewManager) { viewManager.DataViewSettings.Remove(table); } } /// <summary> /// This method should be overridden by subclasses to restrict tables being removed. /// </summary> protected virtual void OnRemoveRelation(DataRelation relation) { } internal void OnRemoveRelationHack(DataRelation relation) => OnRemoveRelation(relation); protected internal void RaisePropertyChanging(string name) => OnPropertyChanging(new PropertyChangedEventArgs(name)); internal DataTable[] TopLevelTables() => TopLevelTables(false); internal DataTable[] TopLevelTables(bool forSchema) { // first let's figure out if we can represent the given dataSet as a tree using // the fact that all connected undirected graphs with n-1 edges are trees. List<DataTable> topTables = new List<DataTable>(); if (forSchema) { // prepend the tables that are nested more than once for (int i = 0; i < Tables.Count; i++) { DataTable table = Tables[i]; if (table.NestedParentsCount > 1 || table.SelfNested) { topTables.Add(table); } } } for (int i = 0; i < Tables.Count; i++) { DataTable table = Tables[i]; if (table.NestedParentsCount == 0 && !topTables.Contains(table)) { topTables.Add(table); } } return topTables.Count == 0 ? Array.Empty<DataTable>() : topTables.ToArray(); } /// <summary> /// This method rolls back all the changes to have been made to this DataSet since /// it was loaded or the last time AcceptChanges was called. /// Any rows still in edit-mode cancel their edits. New rows get removed. Modified and /// Deleted rows return back to their original state. /// </summary> public virtual void RejectChanges() { long logScopeId = DataCommonEventSource.Log.EnterScope("<ds.DataSet.RejectChanges|API> {0}", ObjectID); try { bool fEnforce = EnforceConstraints; EnforceConstraints = false; for (int i = 0; i < Tables.Count; i++) { Tables[i].RejectChanges(); } EnforceConstraints = fEnforce; } finally { DataCommonEventSource.Log.ExitScope(logScopeId); } } /// <summary> /// Resets the dataSet back to it's original state. Subclasses should override /// to restore back to it's original state. /// </summary> public virtual void Reset() { long logScopeId = DataCommonEventSource.Log.EnterScope("<ds.DataSet.Reset|API> {0}", ObjectID); try { for (int i = 0; i < Tables.Count; i++) { ConstraintCollection cons = Tables[i].Constraints; for (int j = 0; j < cons.Count;) { if (cons[j] is ForeignKeyConstraint) { cons.Remove(cons[j]); } else { j++; } } } Clear(); Relations.Clear(); Tables.Clear(); } finally { DataCommonEventSource.Log.ExitScope(logScopeId); } } internal bool ValidateCaseConstraint() { long logScopeId = DataCommonEventSource.Log.EnterScope("<ds.DataSet.ValidateCaseConstraint|INFO> {0}", ObjectID); try { DataRelation? relation = null; for (int i = 0; i < Relations.Count; i++) { relation = Relations[i]; if (relation.ChildTable.CaseSensitive != relation.ParentTable.CaseSensitive) { return false; } } ForeignKeyConstraint? constraint; ConstraintCollection? constraints; for (int i = 0; i < Tables.Count; i++) { constraints = Tables[i].Constraints; for (int j = 0; j < constraints.Count; j++) { if (constraints[j] is ForeignKeyConstraint) { constraint = (ForeignKeyConstraint)constraints[j]; if (constraint.Table!.CaseSensitive != constraint.RelatedTable.CaseSensitive) { return false; } } } } return true; } finally { DataCommonEventSource.Log.ExitScope(logScopeId); } } internal bool ValidateLocaleConstraint() { long logScopeId = DataCommonEventSource.Log.EnterScope("<ds.DataSet.ValidateLocaleConstraint|INFO> {0}", ObjectID); try { DataRelation? relation = null; for (int i = 0; i < Relations.Count; i++) { relation = Relations[i]; if (relation.ChildTable.Locale.LCID != relation.ParentTable.Locale.LCID) { return false; } } ForeignKeyConstraint? constraint; ConstraintCollection? constraints; for (int i = 0; i < Tables.Count; i++) { constraints = Tables[i].Constraints; for (int j = 0; j < constraints.Count; j++) { if (constraints[j] is ForeignKeyConstraint) { constraint = (ForeignKeyConstraint)constraints[j]; if (constraint.Table!.Locale.LCID != constraint.RelatedTable.Locale.LCID) { return false; } } } } return true; } finally { DataCommonEventSource.Log.ExitScope(logScopeId); } } // SDUB: may be better to rewrite this as nonrecursive? internal DataTable? FindTable(DataTable? baseTable, PropertyDescriptor[] props, int propStart) { if (props.Length < propStart + 1) { return baseTable; } PropertyDescriptor currentProp = props[propStart]; if (baseTable == null) { // the accessor is the table name. if we don't find it, return null. if (currentProp is DataTablePropertyDescriptor) { return FindTable(((DataTablePropertyDescriptor)currentProp).Table, props, propStart + 1); } return null; } if (currentProp is DataRelationPropertyDescriptor) { return FindTable(((DataRelationPropertyDescriptor)currentProp).Relation.ChildTable, props, propStart + 1); } return null; } [RequiresUnreferencedCode(RequiresUnreferencedCodeMessage)] protected virtual void ReadXmlSerializable(XmlReader reader) { // <DataSet xsi:nil="true"> does not mean DataSet is null,but it does not have any child // so dont do anything, ignore the attributes and just return empty DataSet; _useDataSetSchemaOnly = false; _udtIsWrapped = false; if (reader.HasAttributes) { const string xsinill = Keywords.XSI + ":" + Keywords.XSI_NIL; if (reader.MoveToAttribute(xsinill)) { string? nilAttrib = reader.GetAttribute(xsinill); if (string.Equals(nilAttrib, "true", StringComparison.Ordinal)) { // case sensitive true comparison MoveToElement(reader, 1); return; } } const string UseDataSetSchemaOnlyString = Keywords.MSD + ":" + Keywords.USEDATASETSCHEMAONLY; if (reader.MoveToAttribute(UseDataSetSchemaOnlyString)) { string? useDataSetSchemaOnly = reader.GetAttribute(UseDataSetSchemaOnlyString); if (string.Equals(useDataSetSchemaOnly, "true", StringComparison.Ordinal) || string.Equals(useDataSetSchemaOnly, "1", StringComparison.Ordinal)) { _useDataSetSchemaOnly = true; } else if (!string.Equals(useDataSetSchemaOnly, "false", StringComparison.Ordinal) && !string.Equals(useDataSetSchemaOnly, "0", StringComparison.Ordinal)) { throw ExceptionBuilder.InvalidAttributeValue(Keywords.USEDATASETSCHEMAONLY, useDataSetSchemaOnly!); } } const string udtIsWrappedString = Keywords.MSD + ":" + Keywords.UDTCOLUMNVALUEWRAPPED; if (reader.MoveToAttribute(udtIsWrappedString)) { string? _udtIsWrappedString = reader.GetAttribute(udtIsWrappedString); if (string.Equals(_udtIsWrappedString, "true", StringComparison.Ordinal) || string.Equals(_udtIsWrappedString, "1", StringComparison.Ordinal)) { _udtIsWrapped = true; } else if (!string.Equals(_udtIsWrappedString, "false", StringComparison.Ordinal) && !string.Equals(_udtIsWrappedString, "0", StringComparison.Ordinal)) { throw ExceptionBuilder.InvalidAttributeValue(Keywords.UDTCOLUMNVALUEWRAPPED, _udtIsWrappedString!); } } } ReadXml(reader, XmlReadMode.DiffGram, true); } protected virtual System.Xml.Schema.XmlSchema? GetSchemaSerializable() => null; public static XmlSchemaComplexType GetDataSetSchema(XmlSchemaSet? schemaSet) { // For performance reasons we are exploiting the fact that config files content is constant // for a given appdomain so we can safely cache the prepared schema complex type and reuse it if (s_schemaTypeForWSDL == null) { // to change the config file, appdomain needs to restart; so it seems safe to cache the schema XmlSchemaComplexType tempWSDL = new XmlSchemaComplexType(); XmlSchemaSequence sequence = new XmlSchemaSequence(); XmlSchemaAny any = new XmlSchemaAny(); any.Namespace = XmlSchema.Namespace; any.MinOccurs = 0; any.ProcessContents = XmlSchemaContentProcessing.Lax; sequence.Items.Add(any); any = new XmlSchemaAny(); any.Namespace = Keywords.DFFNS; any.MinOccurs = 0; // when recognizing WSDL - MinOccurs="0" denotes DataSet, a MinOccurs="1" for DataTable any.ProcessContents = XmlSchemaContentProcessing.Lax; sequence.Items.Add(any); sequence.MaxOccurs = decimal.MaxValue; tempWSDL.Particle = sequence; s_schemaTypeForWSDL = tempWSDL; } return s_schemaTypeForWSDL; } private static bool PublishLegacyWSDL() => false; XmlSchema? IXmlSerializable.GetSchema() { if (GetType() == typeof(DataSet)) { return null; } MemoryStream stream = new MemoryStream(); // WriteXmlSchema(new XmlTextWriter(stream, null)); XmlWriter writer = new XmlTextWriter(stream, null); if (writer != null) { #pragma warning disable IL2026 // suppressed in ILLink.Suppressions.LibraryBuild.xml WriteXmlSchema(this, writer); #pragma warning restore IL2026 } stream.Position = 0; return XmlSchema.Read(new XmlTextReader(stream), null); } [RequiresUnreferencedCode("DataSet.GetSchema uses TypeDescriptor and XmlSerialization underneath which are not trimming safe. Members from serialized types may be trimmed if not referenced directly.")] private static void WriteXmlSchema(DataSet ds, XmlWriter writer) { (new XmlTreeGen(SchemaFormat.WebService)).Save(ds, writer); } void IXmlSerializable.ReadXml(XmlReader reader) { bool fNormalization = true; XmlTextReader? xmlTextReader = null; IXmlTextParser? xmlTextParser = reader as IXmlTextParser; if (xmlTextParser != null) { fNormalization = xmlTextParser.Normalized; xmlTextParser.Normalized = false; } else { xmlTextReader = reader as XmlTextReader; if (xmlTextReader != null) { fNormalization = xmlTextReader.Normalization; xmlTextReader.Normalization = false; } } #pragma warning disable IL2026 // suppressed in ILLink.Suppressions.LibraryBuild.xml ReadXmlSerializableInternal(reader); #pragma warning restore IL2026 if (xmlTextParser != null) { xmlTextParser.Normalized = fNormalization; } else if (xmlTextReader != null) { xmlTextReader.Normalization = fNormalization; } } [RequiresUnreferencedCode("DataSet.ReadXml uses XmlSerialization underneath which is not trimming safe. Members from serialized types may be trimmed if not referenced directly.")] private void ReadXmlSerializableInternal(XmlReader reader) { ReadXmlSerializable(reader); } void IXmlSerializable.WriteXml(XmlWriter writer) { #pragma warning disable IL2026 // suppressed in ILLink.Suppressions.LibraryBuild.xml WriteXmlInternal(writer); #pragma warning restore IL2026 } [RequiresUnreferencedCode("DataSet.WriteXml uses XmlSerialization underneath which is not trimming safe. Members from serialized types may be trimmed if not referenced directly.")] private void WriteXmlInternal(XmlWriter writer) { WriteXmlSchema(writer, SchemaFormat.WebService, null); WriteXml(writer, XmlWriteMode.DiffGram); } [RequiresUnreferencedCode("Using LoadOption may cause members from types used in the expression column to be trimmed if not referenced directly.")] public virtual void Load(IDataReader reader, LoadOption loadOption, FillErrorEventHandler? errorHandler, params DataTable[] tables) { long logScopeId = DataCommonEventSource.Log.EnterScope("<ds.DataSet.Load|API> reader, loadOption={0}", loadOption); try { foreach (DataTable dt in tables) { ADP.CheckArgumentNull(dt, nameof(tables)); if (dt.DataSet != this) { throw ExceptionBuilder.TableNotInTheDataSet(dt.TableName); } } var adapter = new LoadAdapter(); adapter.FillLoadOption = loadOption; adapter.MissingSchemaAction = MissingSchemaAction.AddWithKey; if (null != errorHandler) { adapter.FillError += errorHandler; } adapter.FillFromReader(tables, reader, 0, 0); if (!reader.IsClosed && !reader.NextResult()) { reader.Close(); } } finally { DataCommonEventSource.Log.ExitScope(logScopeId); } } [RequiresUnreferencedCode("Using LoadOption may cause members from types used in the expression column to be trimmed if not referenced directly.")] public void Load(IDataReader reader, LoadOption loadOption, params DataTable[] tables) => Load(reader, loadOption, null, tables); [RequiresUnreferencedCode("Using LoadOption may cause members from types used in the expression column to be trimmed if not referenced directly.")] public void Load(IDataReader reader, LoadOption loadOption, params string[] tables) { ADP.CheckArgumentNull(tables, nameof(tables)); var dataTables = new DataTable[tables.Length]; for (int i = 0; i < tables.Length; i++) { DataTable? tempDT = Tables[tables[i]]; if (null == tempDT) { tempDT = new DataTable(tables[i]); Tables.Add(tempDT); } dataTables[i] = tempDT; } Load(reader, loadOption, null, dataTables); } public DataTableReader CreateDataReader() { if (Tables.Count == 0) { throw ExceptionBuilder.CannotCreateDataReaderOnEmptyDataSet(); } var dataTables = new DataTable[Tables.Count]; for (int i = 0; i < Tables.Count; i++) { dataTables[i] = Tables[i]; } return CreateDataReader(dataTables); } public DataTableReader CreateDataReader(params DataTable[] dataTables) { long logScopeId = DataCommonEventSource.Log.EnterScope("<ds.DataSet.GetDataReader|API> {0}", ObjectID); try { if (dataTables.Length == 0) { throw ExceptionBuilder.DataTableReaderArgumentIsEmpty(); } for (int i = 0; i < dataTables.Length; i++) { if (dataTables[i] == null) { throw ExceptionBuilder.ArgumentContainsNullValue(); } } return new DataTableReader(dataTables); } finally { DataCommonEventSource.Log.ExitScope(logScopeId); } } internal string MainTableName { get { return _mainTableName; } set { _mainTableName = value; } } internal int ObjectID => _objectID; } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections; using System.Collections.Generic; using System.ComponentModel; using System.Data.Common; using System.Diagnostics; using System.Diagnostics.CodeAnalysis; using System.Globalization; using System.IO; using System.Runtime.CompilerServices; using System.Runtime.Serialization; using System.Runtime.Serialization.Formatters.Binary; using System.Text; using System.Threading; using System.Xml; using System.Xml.Schema; using System.Xml.Serialization; namespace System.Data { /// <summary> /// Represents an in-memory cache of data. /// </summary> [Designer("Microsoft.VSDesigner.Data.VS.DataSetDesigner, Microsoft.VSDesigner, Version=10.0.0.0, Culture=neutral, PublicKeyToken=b03f5f7f11d50a3a")] [DefaultProperty(nameof(DataSetName))] [Serializable] [ToolboxItem("Microsoft.VSDesigner.Data.VS.DataSetToolboxItem, Microsoft.VSDesigner, Version=10.0.0.0, Culture=neutral, PublicKeyToken=b03f5f7f11d50a3a")] [XmlSchemaProvider(nameof(GetDataSetSchema))] [XmlRoot(nameof(DataSet))] [System.Runtime.CompilerServices.TypeForwardedFrom("System.Data, Version=4.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089")] [DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicParameterlessConstructor | DynamicallyAccessedMemberTypes.NonPublicConstructors)] // needed by Clone() to preserve derived ctors public class DataSet : MarshalByValueComponent, IListSource, IXmlSerializable, ISupportInitializeNotification, ISerializable { internal const string RequiresUnreferencedCodeMessage = "Members from serialized types may be trimmed if not referenced directly."; private const string KEY_XMLSCHEMA = "XmlSchema"; private const string KEY_XMLDIFFGRAM = "XmlDiffGram"; private DataViewManager? _defaultViewManager; // Public Collections private readonly DataTableCollection _tableCollection; private readonly DataRelationCollection _relationCollection; internal PropertyCollection? _extendedProperties; private string _dataSetName = "NewDataSet"; private string _datasetPrefix = string.Empty; internal string _namespaceURI = string.Empty; private bool _enforceConstraints = true; // globalization stuff private bool _caseSensitive; private CultureInfo _culture; private bool _cultureUserSet; // Internal definitions internal bool _fInReadXml; internal bool _fInLoadDiffgram; internal bool _fTopLevelTable; internal bool _fInitInProgress; internal bool _fEnableCascading = true; internal bool _fIsSchemaLoading; private bool _fBoundToDocument; // for XmlDataDocument internal string _mainTableName = string.Empty; //default remoting format is XML private SerializationFormat _remotingFormat = SerializationFormat.Xml; private readonly object _defaultViewManagerLock = new object(); private static int s_objectTypeCount; // Bid counter private readonly int _objectID = Interlocked.Increment(ref s_objectTypeCount); private static XmlSchemaComplexType? s_schemaTypeForWSDL; internal bool _useDataSetSchemaOnly; // UseDataSetSchemaOnly , for YUKON internal bool _udtIsWrapped; // if UDT is wrapped , for YUKON /// <summary> /// Initializes a new instance of the <see cref='System.Data.DataSet'/> class. /// </summary> public DataSet() { GC.SuppressFinalize(this); DataCommonEventSource.Log.Trace("<ds.DataSet.DataSet|API> {0}", ObjectID); // others will call this constr // Set default locale _tableCollection = new DataTableCollection(this); _relationCollection = new DataRelationCollection.DataSetRelationCollection(this); _culture = CultureInfo.CurrentCulture; // Set default locale } /// <summary> /// Initializes a new instance of a <see cref='System.Data.DataSet'/> /// class with the given name. /// </summary> public DataSet(string dataSetName) : this() { DataSetName = dataSetName; } [DefaultValue(SerializationFormat.Xml)] public SerializationFormat RemotingFormat { get { return _remotingFormat; } set { switch (value) { case SerializationFormat.Xml: break; case SerializationFormat.Binary: if (LocalAppContextSwitches.AllowUnsafeSerializationFormatBinary) { break; } throw ExceptionBuilder.SerializationFormatBinaryNotSupported(); default: throw ExceptionBuilder.InvalidRemotingFormat(value); } _remotingFormat = value; // this property is inherited to DataTable from DataSet.So we set this value to DataTable also for (int i = 0; i < Tables.Count; i++) { Tables[i].RemotingFormat = value; } } } [Browsable(false)] [DesignerSerializationVisibility(DesignerSerializationVisibility.Hidden)] public virtual SchemaSerializationMode SchemaSerializationMode { //Typed DataSet calls into this get { return SchemaSerializationMode.IncludeSchema; } set { if (value != SchemaSerializationMode.IncludeSchema) { throw ExceptionBuilder.CannotChangeSchemaSerializationMode(); } } } // Check whether the stream is binary serialized. // 'static' function that consumes SerializationInfo protected bool IsBinarySerialized(SerializationInfo info, StreamingContext context) { // mainly for typed DS // our default remoting format is XML SerializationFormat remotingFormat = SerializationFormat.Xml; SerializationInfoEnumerator e = info.GetEnumerator(); while (e.MoveNext()) { if (e.Name == "DataSet.RemotingFormat") { //DataSet.RemotingFormat does not exist in V1/V1.1 versions remotingFormat = (SerializationFormat)e.Value!; break; } } return (remotingFormat == SerializationFormat.Binary); } // Should Schema be included during Serialization // 'static' function that consumes SerializationInfo protected SchemaSerializationMode DetermineSchemaSerializationMode(SerializationInfo info, StreamingContext context) { //Typed DataSet calls into this SchemaSerializationMode schemaSerializationMode = SchemaSerializationMode.IncludeSchema; SerializationInfoEnumerator e = info.GetEnumerator(); while (e.MoveNext()) { if (e.Name == "SchemaSerializationMode.DataSet") { //SchemaSerializationMode.DataSet does not exist in V1/V1.1 versions schemaSerializationMode = (SchemaSerializationMode)e.Value!; break; } } return schemaSerializationMode; } protected SchemaSerializationMode DetermineSchemaSerializationMode(XmlReader reader) { //Typed DataSet calls into this SchemaSerializationMode schemaSerializationMode = SchemaSerializationMode.IncludeSchema; reader.MoveToContent(); if (reader.NodeType == XmlNodeType.Element) { if (reader.HasAttributes) { string? attribValue = reader.GetAttribute(Keywords.MSD_SCHEMASERIALIZATIONMODE, Keywords.MSDNS); if (string.Equals(attribValue, Keywords.MSD_EXCLUDESCHEMA, StringComparison.OrdinalIgnoreCase)) { schemaSerializationMode = SchemaSerializationMode.ExcludeSchema; } else if (string.Equals(attribValue, Keywords.MSD_INCLUDESCHEMA, StringComparison.OrdinalIgnoreCase)) { schemaSerializationMode = SchemaSerializationMode.IncludeSchema; } else if (attribValue != null) { // if attrib does not exist, then don't throw throw ExceptionBuilder.InvalidSchemaSerializationMode(typeof(SchemaSerializationMode), attribValue); } } } return schemaSerializationMode; } // Deserialize all the tables data of the dataset from binary/xml stream. // 'instance' method that consumes SerializationInfo [RequiresUnreferencedCode(RequiresUnreferencedCodeMessage)] protected void GetSerializationData(SerializationInfo info, StreamingContext context) { // mainly for typed DS SerializationFormat remotingFormat = SerializationFormat.Xml; SerializationInfoEnumerator e = info.GetEnumerator(); while (e.MoveNext()) { if (e.Name == "DataSet.RemotingFormat") { //DataSet.RemotingFormat does not exist in V1/V1.1 versions remotingFormat = (SerializationFormat)e.Value!; break; } } DeserializeDataSetData(info, context, remotingFormat); } // Deserialize all the tables schema and data of the dataset from binary/xml stream. [RequiresUnreferencedCode(RequiresUnreferencedCodeMessage)] [UnconditionalSuppressMessage("ReflectionAnalysis", "IL2112:ReflectionToRequiresUnreferencedCode", Justification = "CreateInstanceOfThisType's use of GetType uses only the parameterless constructor, but the annotations preserve all non-public constructors causing a warning for the serialization constructors. Those constructors won't be used here.")] protected DataSet(SerializationInfo info, StreamingContext context) : this(info, context, true) { } [RequiresUnreferencedCode(RequiresUnreferencedCodeMessage)] [UnconditionalSuppressMessage("ReflectionAnalysis", "IL2112:ReflectionToRequiresUnreferencedCode", Justification = "CreateInstanceOfThisType's use of GetType uses only the parameterless constructor, but the annotations preserve all non-public constructors causing a warning for the serialization constructors. Those constructors won't be used here.")] protected DataSet(SerializationInfo info, StreamingContext context, bool ConstructSchema) : this() { SerializationFormat remotingFormat = SerializationFormat.Xml; SchemaSerializationMode schemaSerializationMode = SchemaSerializationMode.IncludeSchema; SerializationInfoEnumerator e = info.GetEnumerator(); while (e.MoveNext()) { switch (e.Name) { case "DataSet.RemotingFormat": //DataSet.RemotingFormat does not exist in V1/V1.1 versions remotingFormat = (SerializationFormat)e.Value!; break; case "SchemaSerializationMode.DataSet": //SchemaSerializationMode.DataSet does not exist in V1/V1.1 versions schemaSerializationMode = (SchemaSerializationMode)e.Value!; break; } } if (remotingFormat == SerializationFormat.Binary && !LocalAppContextSwitches.AllowUnsafeSerializationFormatBinary) { throw ExceptionBuilder.SerializationFormatBinaryNotSupported(); } if (schemaSerializationMode == SchemaSerializationMode.ExcludeSchema) { InitializeDerivedDataSet(); } // adding back this check will fix typed dataset XML remoting, but we have to fix case that // a class inherits from DataSet and just relies on DataSet to deserialize (see SQL BU DT 374717) // to fix that case also, we need to add a flag and add it to below check so return (no-op) will be // conditional (flag needs to be set in TypedDataSet if (remotingFormat == SerializationFormat.Xml && !ConstructSchema) { return; //For typed dataset xml remoting, this is a no-op } DeserializeDataSet(info, context, remotingFormat, schemaSerializationMode); } [UnconditionalSuppressMessage("ReflectionAnalysis", "IL2026:RequiresUnreferencedCode", Justification = "Binary serialization is unsafe in general and is planned to be obsoleted. We do not want to mark interface or ctors of this class as unsafe as that would show many unnecessary warnings elsewhere.")] public virtual void GetObjectData(SerializationInfo info, StreamingContext context) { SerializationFormat remotingFormat = RemotingFormat; SerializeDataSet(info, context, remotingFormat); } // Deserialize all the tables data of the dataset from binary/xml stream. protected virtual void InitializeDerivedDataSet() { } // Serialize all the tables. [RequiresUnreferencedCode(RequiresUnreferencedCodeMessage)] private void SerializeDataSet(SerializationInfo info, StreamingContext context, SerializationFormat remotingFormat) { Debug.Assert(info != null); info.AddValue("DataSet.RemotingVersion", new Version(2, 0)); // SqlHotFix 299, SerializationFormat enumeration types don't exist in V1.1 SP1 if (SerializationFormat.Xml != remotingFormat) { info.AddValue("DataSet.RemotingFormat", remotingFormat); } // SqlHotFix 299, SchemaSerializationMode enumeration types don't exist in V1.1 SP1 if (SchemaSerializationMode.IncludeSchema != SchemaSerializationMode) { //SkipSchemaDuringSerialization info.AddValue("SchemaSerializationMode.DataSet", SchemaSerializationMode); } if (remotingFormat != SerializationFormat.Xml) { if (SchemaSerializationMode == SchemaSerializationMode.IncludeSchema) { //DataSet public state properties SerializeDataSetProperties(info, context); //Tables Count info.AddValue("DataSet.Tables.Count", Tables.Count); //Tables, Columns, Rows for (int i = 0; i < Tables.Count; i++) { BinaryFormatter bf = new BinaryFormatter(null, new StreamingContext(context.State, false)); MemoryStream memStream = new MemoryStream(); #pragma warning disable SYSLIB0011 // Issue https://github.com/dotnet/runtime/issues/39289 tracks finding an alternative to BinaryFormatter bf.Serialize(memStream, Tables[i]); #pragma warning restore SYSLIB0011 memStream.Position = 0; info.AddValue(string.Format(CultureInfo.InvariantCulture, "DataSet.Tables_{0}", i), memStream.GetBuffer()); } //Constraints for (int i = 0; i < Tables.Count; i++) { Tables[i].SerializeConstraints(info, context, i, true); } //Relations SerializeRelations(info, context); //Expression Columns for (int i = 0; i < Tables.Count; i++) { Tables[i].SerializeExpressionColumns(info, context, i); } } else { //Serialize DataSet public properties. SerializeDataSetProperties(info, context); } //Rows for (int i = 0; i < Tables.Count; i++) { Tables[i].SerializeTableData(info, context, i); } } else { // old behaviour string strSchema = GetXmlSchemaForRemoting(null); info.AddValue(KEY_XMLSCHEMA, strSchema); StringBuilder strBuilder = new StringBuilder(EstimatedXmlStringSize() * 2); StringWriter strWriter = new StringWriter(strBuilder, CultureInfo.InvariantCulture); XmlTextWriter w = new XmlTextWriter(strWriter); WriteXml(w, XmlWriteMode.DiffGram); info.AddValue(KEY_XMLDIFFGRAM, strWriter.ToString()); } } // Deserialize all the tables - marked internal so that DataTable can call into this [RequiresUnreferencedCode(RequiresUnreferencedCodeMessage)] internal void DeserializeDataSet(SerializationInfo info, StreamingContext context, SerializationFormat remotingFormat, SchemaSerializationMode schemaSerializationMode) { // deserialize schema DeserializeDataSetSchema(info, context, remotingFormat, schemaSerializationMode); // deserialize data DeserializeDataSetData(info, context, remotingFormat); } // Deserialize schema. [RequiresUnreferencedCode(RequiresUnreferencedCodeMessage)] private void DeserializeDataSetSchema(SerializationInfo info, StreamingContext context, SerializationFormat remotingFormat, SchemaSerializationMode schemaSerializationMode) { if (remotingFormat != SerializationFormat.Xml) { if (schemaSerializationMode == SchemaSerializationMode.IncludeSchema) { //DataSet public state properties DeserializeDataSetProperties(info, context); //Tables Count int tableCount = info.GetInt32("DataSet.Tables.Count"); //Tables, Columns, Rows for (int i = 0; i < tableCount; i++) { byte[] buffer = (byte[])info.GetValue(string.Format(CultureInfo.InvariantCulture, "DataSet.Tables_{0}", i), typeof(byte[]))!; MemoryStream memStream = new MemoryStream(buffer); memStream.Position = 0; BinaryFormatter bf = new BinaryFormatter(null, new StreamingContext(context.State, false)); #pragma warning disable SYSLIB0011 // Issue https://github.com/dotnet/runtime/issues/39289 tracks finding an alternative to BinaryFormatter DataTable dt = (DataTable)bf.Deserialize(memStream); #pragma warning restore SYSLIB0011 Tables.Add(dt); } //Constraints for (int i = 0; i < tableCount; i++) { Tables[i].DeserializeConstraints(info, context, /* table index */i, /* serialize all constraints */ true); // } //Relations DeserializeRelations(info, context); //Expression Columns for (int i = 0; i < tableCount; i++) { Tables[i].DeserializeExpressionColumns(info, context, i); } } else { //DeSerialize DataSet public properties.[Locale, CaseSensitive and EnforceConstraints] DeserializeDataSetProperties(info, context); } } else { string? strSchema = (string?)info.GetValue(KEY_XMLSCHEMA, typeof(string)); if (strSchema != null) { ReadXmlSchema(new XmlTextReader(new StringReader(strSchema)), true); } } } // Deserialize all data. [RequiresUnreferencedCode(RequiresUnreferencedCodeMessage)] private void DeserializeDataSetData(SerializationInfo info, StreamingContext context, SerializationFormat remotingFormat) { if (remotingFormat != SerializationFormat.Xml) { for (int i = 0; i < Tables.Count; i++) { Tables[i].DeserializeTableData(info, context, i); } } else { string? strData = (string?)info.GetValue(KEY_XMLDIFFGRAM, typeof(string)); if (strData != null) { ReadXml(new XmlTextReader(new StringReader(strData)), XmlReadMode.DiffGram); } } } // Serialize just the dataset properties private void SerializeDataSetProperties(SerializationInfo info, StreamingContext context) { //DataSet basic properties info.AddValue("DataSet.DataSetName", DataSetName); info.AddValue("DataSet.Namespace", Namespace); info.AddValue("DataSet.Prefix", Prefix); //DataSet runtime properties info.AddValue("DataSet.CaseSensitive", CaseSensitive); info.AddValue("DataSet.LocaleLCID", Locale.LCID); info.AddValue("DataSet.EnforceConstraints", EnforceConstraints); //ExtendedProperties info.AddValue("DataSet.ExtendedProperties", ExtendedProperties); } // DeSerialize dataset properties private void DeserializeDataSetProperties(SerializationInfo info, StreamingContext context) { //DataSet basic properties _dataSetName = info.GetString("DataSet.DataSetName")!; _namespaceURI = info.GetString("DataSet.Namespace")!; _datasetPrefix = info.GetString("DataSet.Prefix")!; //DataSet runtime properties _caseSensitive = info.GetBoolean("DataSet.CaseSensitive"); int lcid = (int)info.GetValue("DataSet.LocaleLCID", typeof(int))!; _culture = new CultureInfo(lcid); _cultureUserSet = true; _enforceConstraints = info.GetBoolean("DataSet.EnforceConstraints"); //ExtendedProperties _extendedProperties = (PropertyCollection?)info.GetValue("DataSet.ExtendedProperties", typeof(PropertyCollection)); } // Gets relation info from the dataset. // ***Schema for Serializing ArrayList of Relations*** // Relations -> [relationName]->[parentTableIndex, parentcolumnIndexes]->[childTableIndex, childColumnIndexes]->[Nested]->[extendedProperties] private void SerializeRelations(SerializationInfo info, StreamingContext context) { ArrayList relationList = new ArrayList(); foreach (DataRelation rel in Relations) { int[] parentInfo = new int[rel.ParentColumns.Length + 1]; parentInfo[0] = Tables.IndexOf(rel.ParentTable); for (int j = 1; j < parentInfo.Length; j++) { parentInfo[j] = rel.ParentColumns[j - 1].Ordinal; } int[] childInfo = new int[rel.ChildColumns.Length + 1]; childInfo[0] = Tables.IndexOf(rel.ChildTable); for (int j = 1; j < childInfo.Length; j++) { childInfo[j] = rel.ChildColumns[j - 1].Ordinal; } ArrayList list = new ArrayList(); list.Add(rel.RelationName); list.Add(parentInfo); list.Add(childInfo); list.Add(rel.Nested); list.Add(rel._extendedProperties); relationList.Add(list); } info.AddValue("DataSet.Relations", relationList); } // Adds relations to the dataset. // ***Schema for Serializing ArrayList of Relations*** // Relations -> [relationName]->[parentTableIndex, parentcolumnIndexes]->[childTableIndex, childColumnIndexes]->[Nested]->[extendedProperties] private void DeserializeRelations(SerializationInfo info, StreamingContext context) { ArrayList relationList = (ArrayList)info.GetValue("DataSet.Relations", typeof(ArrayList))!; foreach (ArrayList list in relationList) { string relationName = (string)list[0]!; int[] parentInfo = (int[])list[1]!; int[] childInfo = (int[])list[2]!; bool isNested = (bool)list[3]!; PropertyCollection? extendedProperties = (PropertyCollection?)list[4]!; //ParentKey Columns. DataColumn[] parentkeyColumns = new DataColumn[parentInfo.Length - 1]; for (int i = 0; i < parentkeyColumns.Length; i++) { parentkeyColumns[i] = Tables[parentInfo[0]].Columns[parentInfo[i + 1]]; } //ChildKey Columns. DataColumn[] childkeyColumns = new DataColumn[childInfo.Length - 1]; for (int i = 0; i < childkeyColumns.Length; i++) { childkeyColumns[i] = Tables[childInfo[0]].Columns[childInfo[i + 1]]; } //Create the Relation, without any constraints[Assumption: The constraints are added earlier than the relations] DataRelation rel = new DataRelation(relationName, parentkeyColumns, childkeyColumns, false); rel.CheckMultipleNested = false; // disable the check for multiple nested parent rel.Nested = isNested; rel._extendedProperties = extendedProperties; Relations.Add(rel); rel.CheckMultipleNested = true; // enable the check for multiple nested parent } } internal void FailedEnableConstraints() { EnforceConstraints = false; throw ExceptionBuilder.EnforceConstraint(); } /// <summary> /// Gets or sets a value indicating whether string /// comparisons within <see cref='System.Data.DataTable'/> /// objects are case-sensitive. /// </summary> [DefaultValue(false)] public bool CaseSensitive { get { return _caseSensitive; } set { if (_caseSensitive != value) { bool oldValue = _caseSensitive; _caseSensitive = value; if (!ValidateCaseConstraint()) { _caseSensitive = oldValue; throw ExceptionBuilder.CannotChangeCaseLocale(); } foreach (DataTable table in Tables) { table.SetCaseSensitiveValue(value, false, true); } } } } bool IListSource.ContainsListCollection => true; /// <summary> /// Gets a custom view of the data contained by the <see cref='System.Data.DataSet'/> , one /// that allows filtering, searching, and navigating through the custom data view. /// </summary> [Browsable(false)] public DataViewManager DefaultViewManager { get { if (_defaultViewManager == null) { lock (_defaultViewManagerLock) { if (_defaultViewManager == null) { _defaultViewManager = new DataViewManager(this, true); } } } return _defaultViewManager; } } /// <summary> /// Gets or sets a value indicating whether constraint rules are followed when /// attempting any update operation. /// </summary> [DefaultValue(true)] public bool EnforceConstraints { get { return _enforceConstraints; } set { long logScopeId = DataCommonEventSource.Log.EnterScope("<ds.DataSet.set_EnforceConstraints|API> {0}, {1}", ObjectID, value); try { if (_enforceConstraints != value) { if (value) { EnableConstraints(); } _enforceConstraints = value; } } finally { DataCommonEventSource.Log.ExitScope(logScopeId); } } } internal void RestoreEnforceConstraints(bool value) { _enforceConstraints = value; } internal void EnableConstraints() { long logScopeId = DataCommonEventSource.Log.EnterScope("<ds.DataSet.EnableConstraints|INFO> {0}", ObjectID); try { bool errors = false; for (ConstraintEnumerator constraints = new ConstraintEnumerator(this); constraints.GetNext();) { Constraint constraint = constraints.GetConstraint(); errors |= constraint.IsConstraintViolated(); } foreach (DataTable table in Tables) { foreach (DataColumn column in table.Columns) { if (!column.AllowDBNull) { errors |= column.IsNotAllowDBNullViolated(); } if (column.MaxLength >= 0) { errors |= column.IsMaxLengthViolated(); } } } if (errors) { FailedEnableConstraints(); } } finally { DataCommonEventSource.Log.ExitScope(logScopeId); } } /// <summary> /// Gets or sets the name of this <see cref='System.Data.DataSet'/> . /// </summary> [DefaultValue("")] public string DataSetName { get { return _dataSetName; } set { DataCommonEventSource.Log.Trace("<ds.DataSet.set_DataSetName|API> {0}, '{1}'", ObjectID, value); if (value != _dataSetName) { if (value == null || value.Length == 0) { throw ExceptionBuilder.SetDataSetNameToEmpty(); } DataTable? conflicting = Tables[value, Namespace]; if ((conflicting != null) && (!conflicting._fNestedInDataset)) { throw ExceptionBuilder.SetDataSetNameConflicting(value); } RaisePropertyChanging(nameof(DataSetName)); _dataSetName = value; } } } [DefaultValue("")] [AllowNull] public string Namespace { get { return _namespaceURI; } set { DataCommonEventSource.Log.Trace("<ds.DataSet.set_Namespace|API> {0}, '{1}'", ObjectID, value); if (value == null) { value = string.Empty; } if (value != _namespaceURI) { RaisePropertyChanging(nameof(Namespace)); foreach (DataTable dt in Tables) { if (dt._tableNamespace != null) { continue; } if ((dt.NestedParentRelations.Length == 0) || (dt.NestedParentRelations.Length == 1 && dt.NestedParentRelations[0].ChildTable == dt)) { if (Tables.Contains(dt.TableName, value, false, true)) { throw ExceptionBuilder.DuplicateTableName2(dt.TableName, value); } dt.CheckCascadingNamespaceConflict(value); dt.DoRaiseNamespaceChange(); } } _namespaceURI = value; if (string.IsNullOrEmpty(value)) { _datasetPrefix = string.Empty; } } } } [DefaultValue("")] [AllowNull] public string Prefix { get { return _datasetPrefix; } set { if (value == null) { value = string.Empty; } if ((XmlConvert.DecodeName(value) == value) && (XmlConvert.EncodeName(value) != value)) { throw ExceptionBuilder.InvalidPrefix(value); } if (value != _datasetPrefix) { RaisePropertyChanging(nameof(Prefix)); _datasetPrefix = value; } } } /// <summary> /// Gets the collection of custom user information. /// </summary> [Browsable(false)] public PropertyCollection ExtendedProperties => _extendedProperties ?? (_extendedProperties = new PropertyCollection()); /// <summary> /// Gets a value indicating whether there are errors in any /// of the rows in any of the tables of this <see cref='System.Data.DataSet'/> . /// </summary> [Browsable(false)] public bool HasErrors { get { for (int i = 0; i < Tables.Count; i++) { if (Tables[i].HasErrors) { return true; } } return false; } } [Browsable(false)] public bool IsInitialized => !_fInitInProgress; /// <summary> /// Gets or sets the locale information used to compare strings within the table. /// </summary> public CultureInfo Locale { get { // used for comparing not formating/parsing Debug.Assert(null != _culture, "DataSet.Locale: null culture"); return _culture; } set { long logScopeId = DataCommonEventSource.Log.EnterScope("<ds.DataSet.set_Locale|API> {0}", ObjectID); try { if (value != null) { if (!_culture.Equals(value)) { SetLocaleValue(value, true); } _cultureUserSet = true; } } finally { DataCommonEventSource.Log.ExitScope(logScopeId); } } } internal void SetLocaleValue(CultureInfo value, bool userSet) { bool flag = false; bool exceptionThrown = false; int tableCount = 0; CultureInfo oldLocale = _culture; bool oldUserSet = _cultureUserSet; try { _culture = value; _cultureUserSet = userSet; foreach (DataTable table in Tables) { if (!table.ShouldSerializeLocale()) { table.SetLocaleValue(value, false, false); } } flag = ValidateLocaleConstraint(); if (flag) { flag = false; foreach (DataTable table in Tables) { tableCount++; if (!table.ShouldSerializeLocale()) { table.SetLocaleValue(value, false, true); } } flag = true; } } catch { exceptionThrown = true; throw; } finally { if (!flag) { // reset old locale if ValidationFailed or exception thrown _culture = oldLocale; _cultureUserSet = oldUserSet; foreach (DataTable table in Tables) { if (!table.ShouldSerializeLocale()) { table.SetLocaleValue(oldLocale, false, false); } } try { for (int i = 0; i < tableCount; ++i) { if (!Tables[i].ShouldSerializeLocale()) { Tables[i].SetLocaleValue(oldLocale, false, true); } } } catch (Exception e) when (ADP.IsCatchableExceptionType(e)) { ADP.TraceExceptionWithoutRethrow(e); } if (!exceptionThrown) { throw ExceptionBuilder.CannotChangeCaseLocale(null); } } } } internal bool ShouldSerializeLocale() { // this method is used design-time scenarios via reflection // by the property grid to show the Locale property in bold or not // by the code dom for persisting the Locale property or not // we always want the locale persisted if set by user or different the current thread // but that logic should by performed by the serializion code return _cultureUserSet; } [Browsable(false)] [DesignerSerializationVisibility(DesignerSerializationVisibility.Hidden)] public override ISite? Site { get { return base.Site; } set { ISite? oldSite = Site; if (value == null && oldSite != null) { IContainer? cont = oldSite.Container; if (cont != null) { for (int i = 0; i < Tables.Count; i++) { if (Tables[i].Site != null) { cont.Remove(Tables[i]); } } } } base.Site = value; } } /// <summary> /// Get the collection of relations that link tables and /// allow navigation from parent tables to child tables. /// </summary> [DesignerSerializationVisibility(DesignerSerializationVisibility.Content)] public DataRelationCollection Relations => _relationCollection; /// <summary> /// Indicates whether <see cref='Relations'/> property should be persisted. /// </summary> protected virtual bool ShouldSerializeRelations() => true; /// <summary> /// Resets the <see cref='System.Data.DataSet.Relations'/> property to its default state. /// </summary> private void ResetRelations() => Relations.Clear(); /// <summary> /// Gets the collection of tables contained in the <see cref='System.Data.DataSet'/>. /// </summary> [DesignerSerializationVisibility(DesignerSerializationVisibility.Content)] public DataTableCollection Tables => _tableCollection; /// <summary> /// Indicates whether <see cref='System.Data.DataSet.Tables'/> property should be persisted. /// </summary> protected virtual bool ShouldSerializeTables() => true; /// <summary> /// Resets the <see cref='System.Data.DataSet.Tables'/> property to its default state. /// </summary> private void ResetTables() => Tables.Clear(); internal bool FBoundToDocument { get { return _fBoundToDocument; } set { _fBoundToDocument = value; } } /// <summary> /// Commits all the changes made to this <see cref='System.Data.DataSet'/> since it was loaded or the last /// time <see cref='System.Data.DataSet.AcceptChanges'/> was called. /// </summary> public void AcceptChanges() { long logScopeId = DataCommonEventSource.Log.EnterScope("<ds.DataSet.AcceptChanges|API> {0}", ObjectID); try { for (int i = 0; i < Tables.Count; i++) { Tables[i].AcceptChanges(); } } finally { DataCommonEventSource.Log.ExitScope(logScopeId); } } internal event PropertyChangedEventHandler? PropertyChanging; /// <summary> /// Occurs when attempting to merge schemas for two tables with the same name. /// </summary> public event MergeFailedEventHandler? MergeFailed; internal event DataRowCreatedEventHandler? DataRowCreated; // Internal for XmlDataDocument only internal event DataSetClearEventhandler? ClearFunctionCalled; // Internal for XmlDataDocument only public event EventHandler? Initialized; public void BeginInit() { _fInitInProgress = true; } public void EndInit() { Tables.FinishInitCollection(); for (int i = 0; i < Tables.Count; i++) { Tables[i].Columns.FinishInitCollection(); } for (int i = 0; i < Tables.Count; i++) { Tables[i].Constraints.FinishInitConstraints(); } ((DataRelationCollection.DataSetRelationCollection)Relations).FinishInitRelations(); _fInitInProgress = false; OnInitialized(); } /// <summary> /// Clears the <see cref='System.Data.DataSet'/> of any data by removing all rows in all tables. /// </summary> public void Clear() { long logScopeId = DataCommonEventSource.Log.EnterScope("<ds.DataSet.Clear|API> {0}", ObjectID); try { OnClearFunctionCalled(null); bool fEnforce = EnforceConstraints; EnforceConstraints = false; for (int i = 0; i < Tables.Count; i++) { Tables[i].Clear(); } EnforceConstraints = fEnforce; } finally { DataCommonEventSource.Log.ExitScope(logScopeId); } } private DataSet CreateInstanceOfThisType() { return (DataSet)Activator.CreateInstance(GetType(), true)!; } /// <summary> /// Clones the structure of the <see cref='System.Data.DataSet'/>, including all <see cref='System.Data.DataTable'/> schemas, relations, and /// constraints. /// </summary> // Prevent inlining so that reflection calls are not moved to caller that may be in a different assembly that may have a different grant set. [MethodImpl(MethodImplOptions.NoInlining)] public virtual DataSet Clone() { long logScopeId = DataCommonEventSource.Log.EnterScope("<ds.DataSet.Clone|API> {0}", ObjectID); try { DataSet ds = CreateInstanceOfThisType(); if (ds.Tables.Count > 0) // To clean up all the schema in strong typed dataset. { ds.Reset(); } //copy some original dataset properties ds.DataSetName = DataSetName; ds.CaseSensitive = CaseSensitive; ds._culture = _culture; ds._cultureUserSet = _cultureUserSet; ds.EnforceConstraints = EnforceConstraints; ds.Namespace = Namespace; ds.Prefix = Prefix; ds.RemotingFormat = RemotingFormat; ds._fIsSchemaLoading = true; //delay expression evaluation // ...Tables... DataTableCollection tbls = Tables; for (int i = 0; i < tbls.Count; i++) { DataTable dt = tbls[i].Clone(ds); dt._tableNamespace = tbls[i].Namespace; // hardcode the namespace for a second to not mess up // DataRelation cloning. ds.Tables.Add(dt); } // ...Constraints... for (int i = 0; i < tbls.Count; i++) { ConstraintCollection constraints = tbls[i].Constraints; for (int j = 0; j < constraints.Count; j++) { if (constraints[j] is UniqueConstraint) { continue; } ForeignKeyConstraint foreign = (ForeignKeyConstraint)constraints[j]; if (foreign.Table == foreign.RelatedTable) { continue; // we have already added this foreign key in while cloning the datatable } ds.Tables[i].Constraints.Add(constraints[j].Clone(ds)!); } } // ...Relations... DataRelationCollection rels = Relations; for (int i = 0; i < rels.Count; i++) { DataRelation rel = rels[i].Clone(ds); rel.CheckMultipleNested = false; // disable the check for multiple nested parent ds.Relations.Add(rel); rel.CheckMultipleNested = true; // enable the check for multiple nested parent } // ...Extended Properties... if (_extendedProperties != null) { foreach (object key in _extendedProperties.Keys) { ds.ExtendedProperties[key] = _extendedProperties[key]; } } foreach (DataTable table in Tables) { foreach (DataColumn col in table.Columns) { if (col.Expression.Length != 0) { ds.Tables[table.TableName, table.Namespace]!.Columns[col.ColumnName]!.CopyExpressionFrom(col); } } } for (int i = 0; i < tbls.Count; i++) { ds.Tables[i]._tableNamespace = tbls[i]._tableNamespace; // undo the hardcoding of the namespace } ds._fIsSchemaLoading = false; //reactivate column computations return ds; } finally { DataCommonEventSource.Log.ExitScope(logScopeId); } } /// <summary> /// Copies both the structure and data for this <see cref='System.Data.DataSet'/>. /// </summary> public DataSet Copy() { long logScopeId = DataCommonEventSource.Log.EnterScope("<ds.DataSet.Copy|API> {0}", ObjectID); try { DataSet dsNew = Clone(); bool fEnforceConstraints = dsNew.EnforceConstraints; dsNew.EnforceConstraints = false; foreach (DataTable table in Tables) { DataTable destTable = dsNew.Tables[table.TableName, table.Namespace]!; foreach (DataRow row in table.Rows) { table.CopyRow(destTable, row); } } dsNew.EnforceConstraints = fEnforceConstraints; return dsNew; } finally { DataCommonEventSource.Log.ExitScope(logScopeId); } } internal int EstimatedXmlStringSize() { int bytes = 100; for (int i = 0; i < Tables.Count; i++) { int rowBytes = (Tables[i].TableName.Length + 4) << 2; DataTable table = Tables[i]; for (int j = 0; j < table.Columns.Count; j++) { rowBytes += ((table.Columns[j].ColumnName.Length + 4) << 2); rowBytes += 20; } bytes += table.Rows.Count * rowBytes; } return bytes; } /// <summary> /// Returns a copy of the <see cref='System.Data.DataSet'/> that contains all changes made to /// it since it was loaded or <see cref='System.Data.DataSet.AcceptChanges'/> was last called. /// </summary> public DataSet? GetChanges() => GetChanges(DataRowState.Added | DataRowState.Deleted | DataRowState.Modified); private struct TableChanges { private readonly BitArray _rowChanges; internal TableChanges(int rowCount) { _rowChanges = new BitArray(rowCount); HasChanges = 0; } internal int HasChanges { get; set; } internal bool this[int index] { get { return _rowChanges[index]; } set { Debug.Assert(value && !_rowChanges[index], "setting twice or to false"); _rowChanges[index] = value; HasChanges++; } } } public DataSet? GetChanges(DataRowState rowStates) { long logScopeId = DataCommonEventSource.Log.EnterScope("<ds.DataSet.GetChanges|API> {0}, rowStates={1}", ObjectID, rowStates); try { DataSet? dsNew = null; bool fEnforceConstraints = false; if (0 != (rowStates & ~(DataRowState.Added | DataRowState.Deleted | DataRowState.Modified | DataRowState.Unchanged))) { throw ExceptionBuilder.InvalidRowState(rowStates); } // Initialize all the individual table bitmaps. TableChanges[] bitMatrix = new TableChanges[Tables.Count]; for (int i = 0; i < bitMatrix.Length; ++i) { bitMatrix[i] = new TableChanges(Tables[i].Rows.Count); } // find all the modified rows and their parents MarkModifiedRows(bitMatrix, rowStates); // copy the changes to a cloned table for (int i = 0; i < bitMatrix.Length; ++i) { Debug.Assert(0 <= bitMatrix[i].HasChanges, "negative change count"); if (0 < bitMatrix[i].HasChanges) { if (null == dsNew) { dsNew = Clone(); fEnforceConstraints = dsNew.EnforceConstraints; dsNew.EnforceConstraints = false; } DataTable table = Tables[i]; DataTable destTable = dsNew.Tables[table.TableName, table.Namespace]!; Debug.Assert(bitMatrix[i].HasChanges <= table.Rows.Count, "to many changes"); for (int j = 0; 0 < bitMatrix[i].HasChanges; ++j) { // Loop through the rows. if (bitMatrix[i][j]) { table.CopyRow(destTable, table.Rows[j]); bitMatrix[i].HasChanges--; } } } } if (null != dsNew) { dsNew.EnforceConstraints = fEnforceConstraints; } return dsNew; } finally { DataCommonEventSource.Log.ExitScope(logScopeId); } } private void MarkModifiedRows(TableChanges[] bitMatrix, DataRowState rowStates) { // for every table, every row & every relation find the modified rows and for non-deleted rows, their parents for (int tableIndex = 0; tableIndex < bitMatrix.Length; ++tableIndex) { DataRowCollection rows = Tables[tableIndex].Rows; int rowCount = rows.Count; for (int rowIndex = 0; rowIndex < rowCount; ++rowIndex) { DataRow row = rows[rowIndex]; DataRowState rowState = row.RowState; Debug.Assert(DataRowState.Added == rowState || DataRowState.Deleted == rowState || DataRowState.Modified == rowState || DataRowState.Unchanged == rowState, "unexpected DataRowState"); // if bit not already set and row is modified if ((0 != (rowStates & rowState)) && !bitMatrix[tableIndex][rowIndex]) { bitMatrix[tableIndex][rowIndex] = true; if (DataRowState.Deleted != rowState) { MarkRelatedRowsAsModified(bitMatrix, row); } } } } } private void MarkRelatedRowsAsModified(TableChanges[] bitMatrix, DataRow row) { DataRelationCollection relations = row.Table.ParentRelations; int relationCount = relations.Count; for (int relatedIndex = 0; relatedIndex < relationCount; ++relatedIndex) { DataRow[] relatedRows = row.GetParentRows(relations[relatedIndex], DataRowVersion.Current); foreach (DataRow relatedRow in relatedRows) { int relatedTableIndex = Tables.IndexOf(relatedRow.Table); int relatedRowIndex = relatedRow.Table.Rows.IndexOf(relatedRow); if (!bitMatrix[relatedTableIndex][relatedRowIndex]) { bitMatrix[relatedTableIndex][relatedRowIndex] = true; if (DataRowState.Deleted != relatedRow.RowState) { // recurse into related rows MarkRelatedRowsAsModified(bitMatrix, relatedRow); } } } } } IList IListSource.GetList() => DefaultViewManager; [RequiresUnreferencedCode(RequiresUnreferencedCodeMessage)] internal string GetRemotingDiffGram(DataTable table) { StringWriter strWriter = new StringWriter(CultureInfo.InvariantCulture); XmlTextWriter writer = new XmlTextWriter(strWriter); writer.Formatting = Formatting.Indented; // Create and save the updates new NewDiffgramGen(table, false).Save(writer, table); return strWriter.ToString(); } [RequiresUnreferencedCode(RequiresUnreferencedCodeMessage)] public string GetXml() { long logScopeId = DataCommonEventSource.Log.EnterScope("<ds.DataSet.GetXml|API> {0}", ObjectID); try { // StringBuilder strBuilder = new StringBuilder(EstimatedXmlStringSize()); // StringWriter strWriter = new StringWriter(strBuilder); StringWriter strWriter = new StringWriter(CultureInfo.InvariantCulture); XmlTextWriter w = new XmlTextWriter(strWriter); w.Formatting = Formatting.Indented; new XmlDataTreeWriter(this).Save(w, false); return strWriter.ToString(); } finally { DataCommonEventSource.Log.ExitScope(logScopeId); } } [RequiresUnreferencedCode(RequiresUnreferencedCodeMessage)] public string GetXmlSchema() { long logScopeId = DataCommonEventSource.Log.EnterScope("<ds.DataSet.GetXmlSchema|API> {0}", ObjectID); try { StringWriter strWriter = new StringWriter(CultureInfo.InvariantCulture); XmlTextWriter writer = new XmlTextWriter(strWriter); writer.Formatting = Formatting.Indented; (new XmlTreeGen(SchemaFormat.Public)).Save(this, writer); return strWriter.ToString(); } finally { DataCommonEventSource.Log.ExitScope(logScopeId); } } [RequiresUnreferencedCode(RequiresUnreferencedCodeMessage)] internal string GetXmlSchemaForRemoting(DataTable? table) { StringWriter strWriter = new StringWriter(CultureInfo.InvariantCulture); XmlTextWriter writer = new XmlTextWriter(strWriter); writer.Formatting = Formatting.Indented; if (table == null) { if (SchemaSerializationMode == SchemaSerializationMode.ExcludeSchema) { (new XmlTreeGen(SchemaFormat.RemotingSkipSchema)).Save(this, writer); } else { (new XmlTreeGen(SchemaFormat.Remoting)).Save(this, writer); } } else { // no skip schema support for typed datatable (new XmlTreeGen(SchemaFormat.Remoting)).Save(table, writer); } return strWriter.ToString(); } /// <summary> /// Gets a value indicating whether the <see cref='System.Data.DataSet'/> has changes, including new, /// deleted, or modified rows. /// </summary> public bool HasChanges() => HasChanges(DataRowState.Added | DataRowState.Deleted | DataRowState.Modified); /// <summary> /// Gets a value indicating whether the <see cref='System.Data.DataSet'/> has changes, including new, /// deleted, or modified rows, filtered by <see cref='System.Data.DataRowState'/>. /// </summary> public bool HasChanges(DataRowState rowStates) { long logScopeId = DataCommonEventSource.Log.EnterScope("<ds.DataSet.HasChanges|API> {0}, rowStates={1}", ObjectID, (int)rowStates); try { const DataRowState allRowStates = DataRowState.Detached | DataRowState.Unchanged | DataRowState.Added | DataRowState.Deleted | DataRowState.Modified; if ((rowStates & (~allRowStates)) != 0) { throw ExceptionBuilder.ArgumentOutOfRange("rowState"); } for (int i = 0; i < Tables.Count; i++) { DataTable table = Tables[i]; for (int j = 0; j < table.Rows.Count; j++) { DataRow row = table.Rows[j]; if ((row.RowState & rowStates) != 0) { return true; } } } return false; } finally { DataCommonEventSource.Log.ExitScope(logScopeId); } } /// <summary> /// Infer the XML schema from the specified <see cref='System.IO.TextReader'/> into the <see cref='System.Data.DataSet'/>. /// </summary> [RequiresUnreferencedCode(RequiresUnreferencedCodeMessage)] public void InferXmlSchema(XmlReader? reader, string[]? nsArray) { long logScopeId = DataCommonEventSource.Log.EnterScope("<ds.DataSet.InferXmlSchema|API> {0}", ObjectID); try { if (reader == null) { return; } XmlDocument xdoc = new XmlDocument(); if (reader.NodeType == XmlNodeType.Element) { XmlNode node = xdoc.ReadNode(reader)!; xdoc.AppendChild(node); } else { xdoc.Load(reader); } if (xdoc.DocumentElement == null) { return; } InferSchema(xdoc, nsArray, XmlReadMode.InferSchema); } finally { DataCommonEventSource.Log.ExitScope(logScopeId); } } /// <summary> /// Infer the XML schema from the specified <see cref='System.IO.TextReader'/> into the <see cref='System.Data.DataSet'/>. /// </summary> [RequiresUnreferencedCode(RequiresUnreferencedCodeMessage)] public void InferXmlSchema(Stream? stream, string[]? nsArray) { if (stream == null) { return; } InferXmlSchema(new XmlTextReader(stream), nsArray); } /// <summary> /// Infer the XML schema from the specified <see cref='System.IO.TextReader'/> into the <see cref='System.Data.DataSet'/>. /// </summary> [RequiresUnreferencedCode(RequiresUnreferencedCodeMessage)] public void InferXmlSchema(TextReader? reader, string[]? nsArray) { if (reader == null) { return; } InferXmlSchema(new XmlTextReader(reader), nsArray); } /// <summary> /// Infer the XML schema from the specified file into the <see cref='System.Data.DataSet'/>. /// </summary> [RequiresUnreferencedCode(RequiresUnreferencedCodeMessage)] public void InferXmlSchema(string fileName, string[]? nsArray) { XmlTextReader xr = new XmlTextReader(fileName); try { InferXmlSchema(xr, nsArray); } finally { xr.Close(); } } /// <summary> /// Reads the XML schema from the specified <see cref="System.Xml.XmlReader" /> into the <see cref="System.Data.DataSet" /> /// </summary> [RequiresUnreferencedCode(RequiresUnreferencedCodeMessage)] public void ReadXmlSchema(XmlReader? reader) => ReadXmlSchema(reader, false); [RequiresUnreferencedCode(RequiresUnreferencedCodeMessage)] internal void ReadXmlSchema(XmlReader? reader, bool denyResolving) { long logScopeId = DataCommonEventSource.Log.EnterScope("<ds.DataSet.ReadXmlSchema|INFO> {0}, reader, denyResolving={1}", ObjectID, denyResolving); try { int iCurrentDepth = -1; if (reader == null) { return; } if (reader is XmlTextReader) { ((XmlTextReader)reader).WhitespaceHandling = WhitespaceHandling.None; } XmlDocument xdoc = new XmlDocument(); // we may need this to infer the schema if (reader.NodeType == XmlNodeType.Element) { iCurrentDepth = reader.Depth; } reader.MoveToContent(); if (reader.NodeType == XmlNodeType.Element) { // if reader points to the schema load it... if (reader.LocalName == Keywords.XDR_SCHEMA && reader.NamespaceURI == Keywords.XDRNS) { // load XDR schema and exit ReadXDRSchema(reader); return; } if (reader.LocalName == Keywords.XSD_SCHEMA && reader.NamespaceURI == Keywords.XSDNS) { // load XSD schema and exit ReadXSDSchema(reader, denyResolving); return; } if (reader.LocalName == Keywords.XSD_SCHEMA && reader.NamespaceURI.StartsWith(Keywords.XSD_NS_START, StringComparison.Ordinal)) { throw ExceptionBuilder.DataSetUnsupportedSchema(Keywords.XSDNS); } // ... otherwise backup the top node and all its attributes XmlElement topNode = xdoc.CreateElement(reader.Prefix, reader.LocalName, reader.NamespaceURI); if (reader.HasAttributes) { int attrCount = reader.AttributeCount; for (int i = 0; i < attrCount; i++) { reader.MoveToAttribute(i); if (reader.NamespaceURI.Equals(Keywords.XSD_XMLNS_NS)) { topNode.SetAttribute(reader.Name, reader.GetAttribute(i)); } else { XmlAttribute attr = topNode.SetAttributeNode(reader.LocalName, reader.NamespaceURI); attr.Prefix = reader.Prefix; attr.Value = reader.GetAttribute(i); } } } reader.Read(); while (MoveToElement(reader, iCurrentDepth)) { // if reader points to the schema load it... if (reader.LocalName == Keywords.XDR_SCHEMA && reader.NamespaceURI == Keywords.XDRNS) { // load XDR schema and exit ReadXDRSchema(reader); return; } if (reader.LocalName == Keywords.XSD_SCHEMA && reader.NamespaceURI == Keywords.XSDNS) { // load XSD schema and exit ReadXSDSchema(reader, denyResolving); return; } if (reader.LocalName == Keywords.XSD_SCHEMA && reader.NamespaceURI.StartsWith(Keywords.XSD_NS_START, StringComparison.Ordinal)) { throw ExceptionBuilder.DataSetUnsupportedSchema(Keywords.XSDNS); } XmlNode node = xdoc.ReadNode(reader)!; topNode.AppendChild(node); } // read the closing tag of the current element ReadEndElement(reader); // if we are here no schema has been found xdoc.AppendChild(topNode); // so we InferSchema InferSchema(xdoc, null, XmlReadMode.Auto); } } finally { DataCommonEventSource.Log.ExitScope(logScopeId); } } internal bool MoveToElement(XmlReader reader, int depth) { while (!reader.EOF && reader.NodeType != XmlNodeType.EndElement && reader.NodeType != XmlNodeType.Element && reader.Depth > depth) { reader.Read(); } return (reader.NodeType == XmlNodeType.Element); } private static void MoveToElement(XmlReader reader) { while (!reader.EOF && reader.NodeType != XmlNodeType.EndElement && reader.NodeType != XmlNodeType.Element) { reader.Read(); } } internal void ReadEndElement(XmlReader reader) { while (reader.NodeType == XmlNodeType.Whitespace) { reader.Skip(); } if (reader.NodeType == XmlNodeType.None) { reader.Skip(); } else if (reader.NodeType == XmlNodeType.EndElement) { reader.ReadEndElement(); } } [RequiresUnreferencedCode(RequiresUnreferencedCodeMessage)] internal void ReadXSDSchema(XmlReader reader, bool denyResolving) { XmlSchemaSet sSet = new XmlSchemaSet(); int schemaFragmentCount = 1; //read from current schmema element if (reader.LocalName == Keywords.XSD_SCHEMA && reader.NamespaceURI == Keywords.XSDNS) { if (reader.HasAttributes) { string? attribValue = reader.GetAttribute(Keywords.MSD_FRAGMENTCOUNT, Keywords.MSDNS); // this must not move the position if (!string.IsNullOrEmpty(attribValue)) { schemaFragmentCount = int.Parse(attribValue, null); } } } while (reader.LocalName == Keywords.XSD_SCHEMA && reader.NamespaceURI == Keywords.XSDNS) { XmlSchema s = XmlSchema.Read(reader, null)!; sSet.Add(s); //read the end tag ReadEndElement(reader); if (--schemaFragmentCount > 0) { MoveToElement(reader); } while (reader.NodeType == XmlNodeType.Whitespace) { reader.Skip(); } } sSet.Compile(); XSDSchema schema = new XSDSchema(); schema.LoadSchema(sSet, this); } [RequiresUnreferencedCode(RequiresUnreferencedCodeMessage)] internal void ReadXDRSchema(XmlReader reader) { XmlDocument xdoc = new XmlDocument(); // we may need this to infer the schema XmlNode schNode = xdoc.ReadNode(reader)!; xdoc.AppendChild(schNode); XDRSchema schema = new XDRSchema(this, false); DataSetName = xdoc.DocumentElement!.LocalName; schema.LoadSchema((XmlElement)schNode, this); } /// <summary> /// Reads the XML schema from the specified <see cref='System.IO.Stream'/> into the /// <see cref='System.Data.DataSet'/>. /// </summary> [RequiresUnreferencedCode(RequiresUnreferencedCodeMessage)] public void ReadXmlSchema(Stream? stream) { if (stream == null) { return; } ReadXmlSchema(new XmlTextReader(stream), false); } /// <summary> /// Reads the XML schema from the specified <see cref='System.IO.TextReader'/> into the <see cref='System.Data.DataSet'/>. /// </summary> [RequiresUnreferencedCode(RequiresUnreferencedCodeMessage)] public void ReadXmlSchema(TextReader? reader) { if (reader == null) { return; } ReadXmlSchema(new XmlTextReader(reader), false); } /// <summary> /// Reads the XML schema from the specified file into the <see cref='System.Data.DataSet'/>. /// </summary> [RequiresUnreferencedCode(RequiresUnreferencedCodeMessage)] public void ReadXmlSchema(string fileName) { XmlTextReader xr = new XmlTextReader(fileName); try { ReadXmlSchema(xr, false); } finally { xr.Close(); } } #region WriteXmlSchema /// <summary>Writes the <see cref='DataSet'/> structure as an XML schema to using the specified <see cref='Stream'/> object.</summary> /// <param name="stream">A <see cref='Stream'/> object used to write to a file.</param> [RequiresUnreferencedCode(RequiresUnreferencedCodeMessage)] public void WriteXmlSchema(Stream? stream) => WriteXmlSchema(stream, SchemaFormat.Public, null); /// <summary>Writes the <see cref='DataSet'/> structure as an XML schema to using the specified <see cref='Stream'/> object.</summary> /// <param name="stream">A <see cref='Stream'/> object used to write to a file.</param> /// <param name="multipleTargetConverter">A delegate used to convert <see cref='Type'/> into string.</param> [RequiresUnreferencedCode(RequiresUnreferencedCodeMessage)] public void WriteXmlSchema(Stream? stream, Converter<Type, string> multipleTargetConverter) { ADP.CheckArgumentNull(multipleTargetConverter, nameof(multipleTargetConverter)); WriteXmlSchema(stream, SchemaFormat.Public, multipleTargetConverter); } /// <summary>Writes the <see cref='DataSet'/> structure as an XML schema to a file.</summary> /// <param name="fileName">The file name (including the path) to which to write.</param> [RequiresUnreferencedCode(RequiresUnreferencedCodeMessage)] public void WriteXmlSchema(string fileName) => WriteXmlSchema(fileName, SchemaFormat.Public, null); /// <summary>Writes the <see cref='DataSet'/> structure as an XML schema to a file.</summary> /// <param name="fileName">The file name (including the path) to which to write.</param> /// <param name="multipleTargetConverter">A delegate used to convert <see cref='Type'/> into string.</param> [RequiresUnreferencedCode(RequiresUnreferencedCodeMessage)] public void WriteXmlSchema(string fileName, Converter<Type, string> multipleTargetConverter) { ADP.CheckArgumentNull(multipleTargetConverter, nameof(multipleTargetConverter)); WriteXmlSchema(fileName, SchemaFormat.Public, multipleTargetConverter); } /// <summary>Writes the <see cref='DataSet'/> structure as an XML schema to a <see cref='TextWriter'/> object.</summary> /// <param name="writer">The <see cref='TextWriter'/> object with which to write.</param> [RequiresUnreferencedCode(RequiresUnreferencedCodeMessage)] public void WriteXmlSchema(TextWriter? writer) => WriteXmlSchema(writer, SchemaFormat.Public, null); /// <summary>Writes the <see cref='DataSet'/> structure as an XML schema to a <see cref='TextWriter'/> object.</summary> /// <param name="writer">The <see cref='TextWriter'/> object with which to write.</param> /// <param name="multipleTargetConverter">A delegate used to convert <see cref='Type'/> into string.</param> [RequiresUnreferencedCode(RequiresUnreferencedCodeMessage)] public void WriteXmlSchema(TextWriter? writer, Converter<Type, string> multipleTargetConverter) { ADP.CheckArgumentNull(multipleTargetConverter, nameof(multipleTargetConverter)); WriteXmlSchema(writer, SchemaFormat.Public, multipleTargetConverter); } /// <summary>Writes the <see cref='DataSet'/> structure as an XML schema to an <see cref='XmlWriter'/> object.</summary> /// <param name="writer">The <see cref='XmlWriter'/> object with which to write.</param> [RequiresUnreferencedCode(RequiresUnreferencedCodeMessage)] public void WriteXmlSchema(XmlWriter? writer) => WriteXmlSchema(writer, SchemaFormat.Public, null); /// <summary>Writes the <see cref='DataSet'/> structure as an XML schema to an <see cref='XmlWriter'/> object.</summary> /// <param name="writer">The <see cref='XmlWriter'/> object with which to write.</param> /// <param name="multipleTargetConverter">A delegate used to convert <see cref='Type'/> into string.</param> [RequiresUnreferencedCode(RequiresUnreferencedCodeMessage)] public void WriteXmlSchema(XmlWriter? writer, Converter<Type, string> multipleTargetConverter) { ADP.CheckArgumentNull(multipleTargetConverter, nameof(multipleTargetConverter)); WriteXmlSchema(writer, SchemaFormat.Public, multipleTargetConverter); } [RequiresUnreferencedCode(RequiresUnreferencedCodeMessage)] private void WriteXmlSchema(string fileName, SchemaFormat schemaFormat, Converter<Type, string>? multipleTargetConverter) { XmlTextWriter xw = new XmlTextWriter(fileName, null); try { xw.Formatting = Formatting.Indented; xw.WriteStartDocument(true); WriteXmlSchema(xw, schemaFormat, multipleTargetConverter); xw.WriteEndDocument(); } finally { xw.Close(); } } [RequiresUnreferencedCode(RequiresUnreferencedCodeMessage)] private void WriteXmlSchema(Stream? stream, SchemaFormat schemaFormat, Converter<Type, string>? multipleTargetConverter) { if (stream == null) { return; } XmlTextWriter w = new XmlTextWriter(stream, null); w.Formatting = Formatting.Indented; WriteXmlSchema(w, schemaFormat, multipleTargetConverter); } [RequiresUnreferencedCode(RequiresUnreferencedCodeMessage)] private void WriteXmlSchema(TextWriter? writer, SchemaFormat schemaFormat, Converter<Type, string>? multipleTargetConverter) { if (writer == null) { return; } XmlTextWriter w = new XmlTextWriter(writer); w.Formatting = Formatting.Indented; WriteXmlSchema(w, schemaFormat, multipleTargetConverter); } [RequiresUnreferencedCode(RequiresUnreferencedCodeMessage)] private void WriteXmlSchema(XmlWriter? writer, SchemaFormat schemaFormat, Converter<Type, string>? multipleTargetConverter) { long logScopeId = DataCommonEventSource.Log.EnterScope("<ds.DataSet.WriteXmlSchema|INFO> {0}, schemaFormat={1}", ObjectID, schemaFormat); try { // Generate SchemaTree and write it out if (writer != null) { XmlTreeGen treeGen; if (schemaFormat == SchemaFormat.WebService && SchemaSerializationMode == SchemaSerializationMode.ExcludeSchema && writer.WriteState == WriteState.Element) { treeGen = new XmlTreeGen(SchemaFormat.WebServiceSkipSchema); } else { treeGen = new XmlTreeGen(schemaFormat); } treeGen.Save(this, null, writer, false, multipleTargetConverter); } } finally { DataCommonEventSource.Log.ExitScope(logScopeId); } } #endregion [RequiresUnreferencedCode(RequiresUnreferencedCodeMessage)] public XmlReadMode ReadXml(XmlReader? reader) => ReadXml(reader, false); [RequiresUnreferencedCode(RequiresUnreferencedCodeMessage)] internal XmlReadMode ReadXml(XmlReader? reader, bool denyResolving) { IDisposable? restrictedScope = null; long logScopeId = DataCommonEventSource.Log.EnterScope("<ds.DataSet.ReadXml|INFO> {0}, denyResolving={1}", ObjectID, denyResolving); try { restrictedScope = TypeLimiter.EnterRestrictedScope(this); DataTable.DSRowDiffIdUsageSection rowDiffIdUsage = default; try { bool fDataFound = false; bool fSchemaFound = false; bool fDiffsFound = false; bool fIsXdr = false; int iCurrentDepth = -1; XmlReadMode ret = XmlReadMode.Auto; bool isEmptyDataSet = false; bool topNodeIsProcessed = false; // we chanche topnode and there is just one case that we miss to process it // it is : <elem attrib1="Attrib">txt</elem> // clear the hashtable to avoid conflicts between diffgrams, SqlHotFix 782 rowDiffIdUsage.Prepare(this); if (reader == null) { return ret; } if (Tables.Count == 0) { isEmptyDataSet = true; } if (reader is XmlTextReader) { ((XmlTextReader)reader).WhitespaceHandling = WhitespaceHandling.Significant; } XmlDocument xdoc = new XmlDocument(); // we may need this to infer the schema XmlDataLoader? xmlload = null; reader.MoveToContent(); if (reader.NodeType == XmlNodeType.Element) { iCurrentDepth = reader.Depth; } if (reader.NodeType == XmlNodeType.Element) { if ((reader.LocalName == Keywords.DIFFGRAM) && (reader.NamespaceURI == Keywords.DFFNS)) { ReadXmlDiffgram(reader); // read the closing tag of the current element ReadEndElement(reader); return XmlReadMode.DiffGram; } // if reader points to the schema load it if (reader.LocalName == Keywords.XDR_SCHEMA && reader.NamespaceURI == Keywords.XDRNS) { // load XDR schema and exit ReadXDRSchema(reader); return XmlReadMode.ReadSchema; //since the top level element is a schema return } if (reader.LocalName == Keywords.XSD_SCHEMA && reader.NamespaceURI == Keywords.XSDNS) { // load XSD schema and exit ReadXSDSchema(reader, denyResolving); return XmlReadMode.ReadSchema; //since the top level element is a schema return } if (reader.LocalName == Keywords.XSD_SCHEMA && reader.NamespaceURI.StartsWith(Keywords.XSD_NS_START, StringComparison.Ordinal)) { throw ExceptionBuilder.DataSetUnsupportedSchema(Keywords.XSDNS); } // now either the top level node is a table and we load it through dataReader... // ... or backup the top node and all its attributes because we may need to InferSchema XmlElement topNode = xdoc.CreateElement(reader.Prefix, reader.LocalName, reader.NamespaceURI); if (reader.HasAttributes) { int attrCount = reader.AttributeCount; for (int i = 0; i < attrCount; i++) { reader.MoveToAttribute(i); if (reader.NamespaceURI.Equals(Keywords.XSD_XMLNS_NS)) topNode.SetAttribute(reader.Name, reader.GetAttribute(i)); else { XmlAttribute attr = topNode.SetAttributeNode(reader.LocalName, reader.NamespaceURI); attr.Prefix = reader.Prefix; attr.Value = reader.GetAttribute(i); } } } reader.Read(); string rootNodeSimpleContent = reader.Value; while (MoveToElement(reader, iCurrentDepth)) { if ((reader.LocalName == Keywords.DIFFGRAM) && (reader.NamespaceURI == Keywords.DFFNS)) { ReadXmlDiffgram(reader); // read the closing tag of the current element // YUKON FIX ReadEndElement(reader); // return XmlReadMode.DiffGram; ret = XmlReadMode.DiffGram; // continue reading for multiple schemas } // if reader points to the schema load it... if (!fSchemaFound && !fDataFound && reader.LocalName == Keywords.XDR_SCHEMA && reader.NamespaceURI == Keywords.XDRNS) { // load XDR schema and exit ReadXDRSchema(reader); fSchemaFound = true; fIsXdr = true; continue; } if (reader.LocalName == Keywords.XSD_SCHEMA && reader.NamespaceURI == Keywords.XSDNS) { // load XSD schema and exit ReadXSDSchema(reader, denyResolving); fSchemaFound = true; continue; } if (reader.LocalName == Keywords.XSD_SCHEMA && reader.NamespaceURI.StartsWith(Keywords.XSD_NS_START, StringComparison.Ordinal)) { throw ExceptionBuilder.DataSetUnsupportedSchema(Keywords.XSDNS); } if ((reader.LocalName == Keywords.DIFFGRAM) && (reader.NamespaceURI == Keywords.DFFNS)) { ReadXmlDiffgram(reader); fDiffsFound = true; ret = XmlReadMode.DiffGram; } else { // We have found data IFF the reader.NodeType == Element and reader.depth == currentDepth-1 // if reader.NodeType == whitespace, skip all whitespace. // skip processing i.e. continue if the first non-whitespace node is not of type element. while (!reader.EOF && reader.NodeType == XmlNodeType.Whitespace) reader.Read(); if (reader.NodeType != XmlNodeType.Element) continue; // we found data here fDataFound = true; if (!fSchemaFound && Tables.Count == 0) { XmlNode node = xdoc.ReadNode(reader)!; topNode.AppendChild(node); } else { if (xmlload == null) { xmlload = new XmlDataLoader(this, fIsXdr, topNode, false); } xmlload.LoadData(reader); topNodeIsProcessed = true; // we process the topnode if (fSchemaFound) { ret = XmlReadMode.ReadSchema; } else { ret = XmlReadMode.IgnoreSchema; } } } } // read the closing tag of the current element ReadEndElement(reader); bool isfTopLevelTableSet = false; bool tmpValue = _fTopLevelTable; //While inference we ignore root elements text content if (!fSchemaFound && Tables.Count == 0 && !topNode.HasChildNodes) { //We shoule not come add SC of root elemnt to topNode if we are not inferring _fTopLevelTable = true; isfTopLevelTableSet = true; if ((rootNodeSimpleContent != null && rootNodeSimpleContent.Length > 0)) { topNode.InnerText = rootNodeSimpleContent; } } if (!isEmptyDataSet) { if ((rootNodeSimpleContent != null && rootNodeSimpleContent.Length > 0)) { topNode.InnerText = rootNodeSimpleContent; } } // now top node contains the data part xdoc.AppendChild(topNode); if (xmlload == null) { xmlload = new XmlDataLoader(this, fIsXdr, topNode, false); } if (!isEmptyDataSet && !topNodeIsProcessed) { XmlElement root = xdoc.DocumentElement!; Debug.Assert(root.NamespaceURI != null, "root.NamespaceURI should not ne null, it should be empty string"); // just recognize that below given Xml represents datatable in toplevel //<table attr1="foo" attr2="bar" table_Text="junk">text</table> // only allow root element with simple content, if any if (root.ChildNodes.Count == 0 || ((root.ChildNodes.Count == 1) && root.FirstChild!.GetType() == typeof(System.Xml.XmlText))) { bool initfTopLevelTable = _fTopLevelTable; // if root element maps to a datatable // ds and dt cant have the samm name and ns at the same time, how to write to xml if (DataSetName != root.Name && _namespaceURI != root.NamespaceURI && Tables.Contains(root.Name, (root.NamespaceURI.Length == 0) ? null : root.NamespaceURI, false, true)) { _fTopLevelTable = true; } try { xmlload.LoadData(xdoc); } finally { _fTopLevelTable = initfTopLevelTable; // this is not for inference, we have schema and we were skipping // topnode where it was a datatable, We must restore the value } } } // above check and below check are orthogonal // so we InferSchema if (!fDiffsFound) { // Load Data if (!fSchemaFound && Tables.Count == 0) { InferSchema(xdoc, null, XmlReadMode.Auto); ret = XmlReadMode.InferSchema; xmlload.FromInference = true; try { xmlload.LoadData(xdoc); } finally { xmlload.FromInference = false; } } //We dont need this assignement. Once we set it(where we set it during inference), it won't be changed if (isfTopLevelTableSet) _fTopLevelTable = tmpValue; } } return ret; } finally { rowDiffIdUsage.Cleanup(); } } finally { restrictedScope?.Dispose(); DataCommonEventSource.Log.ExitScope(logScopeId); } } [RequiresUnreferencedCode(RequiresUnreferencedCodeMessage)] public XmlReadMode ReadXml(Stream? stream) { if (stream == null) { return XmlReadMode.Auto; } XmlTextReader xr = new XmlTextReader(stream); // Prevent Dtd entity in dataset xr.XmlResolver = null; return ReadXml(xr, false); } [RequiresUnreferencedCode(RequiresUnreferencedCodeMessage)] public XmlReadMode ReadXml(TextReader? reader) { if (reader == null) { return XmlReadMode.Auto; } XmlTextReader xr = new XmlTextReader(reader); // Prevent Dtd entity in dataset xr.XmlResolver = null; return ReadXml(xr, false); } [RequiresUnreferencedCode(RequiresUnreferencedCodeMessage)] public XmlReadMode ReadXml(string fileName) { XmlTextReader xr = new XmlTextReader(fileName); // Prevent Dtd entity in dataset xr.XmlResolver = null; try { return ReadXml(xr, false); } finally { xr.Close(); } } [RequiresUnreferencedCode(RequiresUnreferencedCodeMessage)] internal void InferSchema(XmlDocument xdoc, string[]? excludedNamespaces, XmlReadMode mode) { long logScopeId = DataCommonEventSource.Log.EnterScope("<ds.DataSet.InferSchema|INFO> {0}, mode={1}", ObjectID, mode); try { if (null == excludedNamespaces) { excludedNamespaces = Array.Empty<string>(); } XmlNodeReader xnr = new XmlIgnoreNamespaceReader(xdoc, excludedNamespaces); XmlSchemaInference infer = new XmlSchemaInference(); infer.Occurrence = XmlSchemaInference.InferenceOption.Relaxed; infer.TypeInference = (mode == XmlReadMode.InferTypedSchema) ? XmlSchemaInference.InferenceOption.Restricted : XmlSchemaInference.InferenceOption.Relaxed; XmlSchemaSet schemaSet = infer.InferSchema(xnr); schemaSet.Compile(); XSDSchema schema = new XSDSchema(); schema.FromInference = true; try { schema.LoadSchema(schemaSet, this); } finally { schema.FromInference = false; // this is always false if you are not calling fron inference } } finally { DataCommonEventSource.Log.ExitScope(logScopeId); } } private bool IsEmpty() { foreach (DataTable table in Tables) { if (table.Rows.Count > 0) { return false; } } return true; } [RequiresUnreferencedCode(RequiresUnreferencedCodeMessage)] private void ReadXmlDiffgram(XmlReader reader) { long logScopeId = DataCommonEventSource.Log.EnterScope("<ds.DataSet.ReadXmlDiffgram|INFO> {0}", ObjectID); try { int d = reader.Depth; bool fEnforce = EnforceConstraints; EnforceConstraints = false; DataSet newDs; bool isEmpty = IsEmpty(); if (isEmpty) { newDs = this; } else { newDs = Clone(); newDs.EnforceConstraints = false; } foreach (DataTable t in newDs.Tables) { t.Rows._nullInList = 0; } reader.MoveToContent(); if ((reader.LocalName != Keywords.DIFFGRAM) && (reader.NamespaceURI != Keywords.DFFNS)) { return; } reader.Read(); if (reader.NodeType == XmlNodeType.Whitespace) { MoveToElement(reader, reader.Depth - 1 /*iCurrentDepth*/); // skip over whitespace. } newDs._fInLoadDiffgram = true; if (reader.Depth > d) { if ((reader.NamespaceURI != Keywords.DFFNS) && (reader.NamespaceURI != Keywords.MSDNS)) { //we should be inside the dataset part XmlDocument xdoc = new XmlDocument(); XmlElement node = xdoc.CreateElement(reader.Prefix, reader.LocalName, reader.NamespaceURI); reader.Read(); if (reader.NodeType == XmlNodeType.Whitespace) { MoveToElement(reader, reader.Depth - 1 /*iCurrentDepth*/); // skip over whitespace. } if (reader.Depth - 1 > d) { XmlDataLoader xmlload = new XmlDataLoader(newDs, false, node, false); xmlload._isDiffgram = true; // turn on the special processing xmlload.LoadData(reader); } ReadEndElement(reader); if (reader.NodeType == XmlNodeType.Whitespace) { MoveToElement(reader, reader.Depth - 1 /*iCurrentDepth*/); // skip over whitespace. } } Debug.Assert(reader.NodeType != XmlNodeType.Whitespace, "Should not be on Whitespace node"); if (((reader.LocalName == Keywords.SQL_BEFORE) && (reader.NamespaceURI == Keywords.DFFNS)) || ((reader.LocalName == Keywords.MSD_ERRORS) && (reader.NamespaceURI == Keywords.DFFNS))) { //this will consume the changes and the errors part XMLDiffLoader diffLoader = new XMLDiffLoader(); diffLoader.LoadDiffGram(newDs, reader); } // get to the closing diff tag while (reader.Depth > d) { reader.Read(); } // read the closing tag ReadEndElement(reader); } foreach (DataTable t in newDs.Tables) { if (t.Rows._nullInList > 0) { throw ExceptionBuilder.RowInsertMissing(t.TableName); } } newDs._fInLoadDiffgram = false; //terrible performance! foreach (DataTable t in newDs.Tables) { DataRelation[] nestedParentRelations = t.NestedParentRelations; foreach (DataRelation rel in nestedParentRelations) { if (rel.ParentTable == t) { foreach (DataRow r in t.Rows) { foreach (DataRelation rel2 in nestedParentRelations) { r.CheckForLoops(rel2); } } } } } if (!isEmpty) { Merge(newDs); if (_dataSetName == "NewDataSet") { _dataSetName = newDs._dataSetName; } newDs.EnforceConstraints = fEnforce; } EnforceConstraints = fEnforce; } finally { DataCommonEventSource.Log.ExitScope(logScopeId); } } /// <summary> /// </summary> [RequiresUnreferencedCode(RequiresUnreferencedCodeMessage)] public XmlReadMode ReadXml(XmlReader? reader, XmlReadMode mode) => ReadXml(reader, mode, false); [RequiresUnreferencedCode(RequiresUnreferencedCodeMessage)] internal XmlReadMode ReadXml(XmlReader? reader, XmlReadMode mode, bool denyResolving) { IDisposable? restictedScope = null; long logScopeId = DataCommonEventSource.Log.EnterScope("<ds.DataSet.ReadXml|INFO> {0}, mode={1}, denyResolving={2}", ObjectID, mode, denyResolving); try { restictedScope = TypeLimiter.EnterRestrictedScope(this); XmlReadMode ret = mode; if (reader == null) { return ret; } if (mode == XmlReadMode.Auto) { // nested ReadXml calls on the same DataSet must be done outside of RowDiffIdUsage scope return ReadXml(reader); } DataTable.DSRowDiffIdUsageSection rowDiffIdUsage = default; try { bool fSchemaFound = false; bool fDataFound = false; bool fIsXdr = false; int iCurrentDepth = -1; // prepare and cleanup rowDiffId hashtable rowDiffIdUsage.Prepare(this); if (reader is XmlTextReader) { ((XmlTextReader)reader).WhitespaceHandling = WhitespaceHandling.Significant; } XmlDocument xdoc = new XmlDocument(); // we may need this to infer the schema if ((mode != XmlReadMode.Fragment) && (reader.NodeType == XmlNodeType.Element)) { iCurrentDepth = reader.Depth; } reader.MoveToContent(); XmlDataLoader? xmlload = null; if (reader.NodeType == XmlNodeType.Element) { XmlElement? topNode = null; if (mode == XmlReadMode.Fragment) { xdoc.AppendChild(xdoc.CreateElement("ds_sqlXmlWraPPeR")); topNode = xdoc.DocumentElement!; } else { //handle the top node if ((reader.LocalName == Keywords.DIFFGRAM) && (reader.NamespaceURI == Keywords.DFFNS)) { if ((mode == XmlReadMode.DiffGram) || (mode == XmlReadMode.IgnoreSchema)) { ReadXmlDiffgram(reader); // read the closing tag of the current element ReadEndElement(reader); } else { reader.Skip(); } return ret; } if (reader.LocalName == Keywords.XDR_SCHEMA && reader.NamespaceURI == Keywords.XDRNS) { // load XDR schema and exit if ((mode != XmlReadMode.IgnoreSchema) && (mode != XmlReadMode.InferSchema) && (mode != XmlReadMode.InferTypedSchema)) { ReadXDRSchema(reader); } else { reader.Skip(); } return ret; //since the top level element is a schema return } if (reader.LocalName == Keywords.XSD_SCHEMA && reader.NamespaceURI == Keywords.XSDNS) { // load XSD schema and exit if ((mode != XmlReadMode.IgnoreSchema) && (mode != XmlReadMode.InferSchema) && (mode != XmlReadMode.InferTypedSchema)) { ReadXSDSchema(reader, denyResolving); } else { reader.Skip(); } return ret; //since the top level element is a schema return } if (reader.LocalName == Keywords.XSD_SCHEMA && reader.NamespaceURI.StartsWith(Keywords.XSD_NS_START, StringComparison.Ordinal)) { throw ExceptionBuilder.DataSetUnsupportedSchema(Keywords.XSDNS); } // now either the top level node is a table and we load it through dataReader... // ... or backup the top node and all its attributes topNode = xdoc.CreateElement(reader.Prefix, reader.LocalName, reader.NamespaceURI); if (reader.HasAttributes) { int attrCount = reader.AttributeCount; for (int i = 0; i < attrCount; i++) { reader.MoveToAttribute(i); if (reader.NamespaceURI.Equals(Keywords.XSD_XMLNS_NS)) topNode.SetAttribute(reader.Name, reader.GetAttribute(i)); else { XmlAttribute attr = topNode.SetAttributeNode(reader.LocalName, reader.NamespaceURI); attr.Prefix = reader.Prefix; attr.Value = reader.GetAttribute(i); } } } reader.Read(); } while (MoveToElement(reader, iCurrentDepth)) { if (reader.LocalName == Keywords.XDR_SCHEMA && reader.NamespaceURI == Keywords.XDRNS) { // load XDR schema if (!fSchemaFound && !fDataFound && (mode != XmlReadMode.IgnoreSchema) && (mode != XmlReadMode.InferSchema) && (mode != XmlReadMode.InferTypedSchema)) { ReadXDRSchema(reader); fSchemaFound = true; fIsXdr = true; } else { reader.Skip(); } continue; } if (reader.LocalName == Keywords.XSD_SCHEMA && reader.NamespaceURI == Keywords.XSDNS) { // load XSD schema and exit if ((mode != XmlReadMode.IgnoreSchema) && (mode != XmlReadMode.InferSchema) && (mode != XmlReadMode.InferTypedSchema)) { ReadXSDSchema(reader, denyResolving); fSchemaFound = true; } else { reader.Skip(); } continue; } if ((reader.LocalName == Keywords.DIFFGRAM) && (reader.NamespaceURI == Keywords.DFFNS)) { if ((mode == XmlReadMode.DiffGram) || (mode == XmlReadMode.IgnoreSchema)) { ReadXmlDiffgram(reader); ret = XmlReadMode.DiffGram; } else { reader.Skip(); } continue; } if (reader.LocalName == Keywords.XSD_SCHEMA && reader.NamespaceURI.StartsWith(Keywords.XSD_NS_START, StringComparison.Ordinal)) throw ExceptionBuilder.DataSetUnsupportedSchema(Keywords.XSDNS); if (mode == XmlReadMode.DiffGram) { reader.Skip(); continue; // we do not read data in diffgram mode } // if we are here we found some data fDataFound = true; if (mode == XmlReadMode.InferSchema || mode == XmlReadMode.InferTypedSchema) { //save the node in DOM until the end; XmlNode node = xdoc.ReadNode(reader)!; topNode.AppendChild(node); } else { if (xmlload == null) { xmlload = new XmlDataLoader(this, fIsXdr, topNode, mode == XmlReadMode.IgnoreSchema); } xmlload.LoadData(reader); } } //end of the while // read the closing tag of the current element ReadEndElement(reader); // now top node contains the data part xdoc.AppendChild(topNode); if (xmlload == null) xmlload = new XmlDataLoader(this, fIsXdr, mode == XmlReadMode.IgnoreSchema); if (mode == XmlReadMode.DiffGram) { // we already got the diffs through XmlReader interface return ret; } // Load Data if (mode == XmlReadMode.InferSchema || mode == XmlReadMode.InferTypedSchema) { InferSchema(xdoc, null, mode); ret = XmlReadMode.InferSchema; xmlload.FromInference = true; try { xmlload.LoadData(xdoc); } finally { xmlload.FromInference = false; } } } return ret; } finally { // prepare and cleanup rowDiffId hashtable rowDiffIdUsage.Cleanup(); } } finally { restictedScope?.Dispose(); DataCommonEventSource.Log.ExitScope(logScopeId); } } [RequiresUnreferencedCode(RequiresUnreferencedCodeMessage)] public XmlReadMode ReadXml(Stream? stream, XmlReadMode mode) { if (stream == null) { return XmlReadMode.Auto; } XmlTextReader reader = (mode == XmlReadMode.Fragment) ? new XmlTextReader(stream, XmlNodeType.Element, null) : new XmlTextReader(stream); // Prevent Dtd entity in dataset reader.XmlResolver = null; return ReadXml(reader, mode, false); } [RequiresUnreferencedCode(RequiresUnreferencedCodeMessage)] public XmlReadMode ReadXml(TextReader? reader, XmlReadMode mode) { if (reader == null) { return XmlReadMode.Auto; } XmlTextReader xmlreader = (mode == XmlReadMode.Fragment) ? new XmlTextReader(reader.ReadToEnd(), XmlNodeType.Element, null) : new XmlTextReader(reader); // Prevent Dtd entity in dataset xmlreader.XmlResolver = null; return ReadXml(xmlreader, mode, false); } [RequiresUnreferencedCode(RequiresUnreferencedCodeMessage)] public XmlReadMode ReadXml(string fileName, XmlReadMode mode) { XmlTextReader xr; if (mode == XmlReadMode.Fragment) { FileStream stream = new FileStream(fileName, FileMode.Open); xr = new XmlTextReader(stream, XmlNodeType.Element, null); } else { xr = new XmlTextReader(fileName); } // Prevent Dtd entity in dataset xr.XmlResolver = null; try { return ReadXml(xr, mode, false); } finally { xr.Close(); } } [RequiresUnreferencedCode(RequiresUnreferencedCodeMessage)] public void WriteXml(Stream? stream) => WriteXml(stream, XmlWriteMode.IgnoreSchema); [RequiresUnreferencedCode(RequiresUnreferencedCodeMessage)] public void WriteXml(TextWriter? writer) => WriteXml(writer, XmlWriteMode.IgnoreSchema); [RequiresUnreferencedCode(RequiresUnreferencedCodeMessage)] public void WriteXml(XmlWriter? writer) => WriteXml(writer, XmlWriteMode.IgnoreSchema); [RequiresUnreferencedCode(RequiresUnreferencedCodeMessage)] public void WriteXml(string fileName) => WriteXml(fileName, XmlWriteMode.IgnoreSchema); /// <summary> /// Writes schema and data for the DataSet. /// </summary> [RequiresUnreferencedCode(RequiresUnreferencedCodeMessage)] public void WriteXml(Stream? stream, XmlWriteMode mode) { if (stream != null) { XmlTextWriter w = new XmlTextWriter(stream, null); w.Formatting = Formatting.Indented; WriteXml(w, mode); } } [RequiresUnreferencedCode(RequiresUnreferencedCodeMessage)] public void WriteXml(TextWriter? writer, XmlWriteMode mode) { if (writer != null) { XmlTextWriter w = new XmlTextWriter(writer); w.Formatting = Formatting.Indented; WriteXml(w, mode); } } [RequiresUnreferencedCode(RequiresUnreferencedCodeMessage)] public void WriteXml(XmlWriter? writer, XmlWriteMode mode) { long logScopeId = DataCommonEventSource.Log.EnterScope("<ds.DataSet.WriteXml|API> {0}, mode={1}", ObjectID, mode); try { // Generate SchemaTree and write it out if (writer != null) { if (mode == XmlWriteMode.DiffGram) { // Create and save the updates new NewDiffgramGen(this).Save(writer); } else { // Create and save xml data new XmlDataTreeWriter(this).Save(writer, mode == XmlWriteMode.WriteSchema); } } } finally { DataCommonEventSource.Log.ExitScope(logScopeId); } } [RequiresUnreferencedCode(RequiresUnreferencedCodeMessage)] public void WriteXml(string fileName, XmlWriteMode mode) { long logScopeId = DataCommonEventSource.Log.EnterScope("<ds.DataSet.WriteXml|API> {0}, fileName='{1}', mode={2}", ObjectID, fileName, (int)mode); XmlTextWriter xw = new XmlTextWriter(fileName, null); try { xw.Formatting = Formatting.Indented; xw.WriteStartDocument(true); // Create and save the updates if (mode == XmlWriteMode.DiffGram) { new NewDiffgramGen(this).Save(xw); } else { // Create and save xml data new XmlDataTreeWriter(this).Save(xw, mode == XmlWriteMode.WriteSchema); } xw.WriteEndDocument(); } finally { xw.Close(); DataCommonEventSource.Log.ExitScope(logScopeId); } } /// <summary> /// Gets the collection of parent relations which belong to a /// specified table. /// </summary> internal DataRelationCollection GetParentRelations(DataTable table) => table.ParentRelations; /// <summary> /// Merges this <see cref='System.Data.DataSet'/> into a specified <see cref='System.Data.DataSet'/>. /// </summary> public void Merge(DataSet dataSet) { long logScopeId = DataCommonEventSource.Log.EnterScope("<ds.DataSet.Merge|API> {0}, dataSet={1}", ObjectID, (dataSet != null) ? dataSet.ObjectID : 0); Debug.Assert(dataSet != null); try { Merge(dataSet, false, MissingSchemaAction.Add); } finally { DataCommonEventSource.Log.ExitScope(logScopeId); } } /// <summary> /// Merges this <see cref='System.Data.DataSet'/> into a specified <see cref='System.Data.DataSet'/> preserving changes according to /// the specified argument. /// </summary> public void Merge(DataSet dataSet, bool preserveChanges) { long logScopeId = DataCommonEventSource.Log.EnterScope("<ds.DataSet.Merge|API> {0}, dataSet={1}, preserveChanges={2}", ObjectID, (dataSet != null) ? dataSet.ObjectID : 0, preserveChanges); Debug.Assert(dataSet != null); try { Merge(dataSet, preserveChanges, MissingSchemaAction.Add); } finally { DataCommonEventSource.Log.ExitScope(logScopeId); } } /// <summary> /// Merges this <see cref='System.Data.DataSet'/> into a specified <see cref='System.Data.DataSet'/> preserving changes according to /// the specified argument, and handling an incompatible schema according to the /// specified argument. /// </summary> public void Merge(DataSet dataSet, bool preserveChanges, MissingSchemaAction missingSchemaAction) { long logScopeId = DataCommonEventSource.Log.EnterScope("<ds.DataSet.Merge|API> {0}, dataSet={1}, preserveChanges={2}, missingSchemaAction={3}", ObjectID, (dataSet != null) ? dataSet.ObjectID : 0, preserveChanges, missingSchemaAction); try { // Argument checks if (dataSet == null) { throw ExceptionBuilder.ArgumentNull(nameof(dataSet)); } switch (missingSchemaAction) { case MissingSchemaAction.Add: case MissingSchemaAction.Ignore: case MissingSchemaAction.Error: case MissingSchemaAction.AddWithKey: Merger merger = new Merger(this, preserveChanges, missingSchemaAction); merger.MergeDataSet(dataSet); break; default: throw ADP.InvalidMissingSchemaAction(missingSchemaAction); } } finally { DataCommonEventSource.Log.ExitScope(logScopeId); } } /// <summary> /// Merges this <see cref='System.Data.DataTable'/> into a specified <see cref='System.Data.DataTable'/>. /// </summary> public void Merge(DataTable table) { long logScopeId = DataCommonEventSource.Log.EnterScope("<ds.DataSet.Merge|API> {0}, table={1}", ObjectID, (table != null) ? table.ObjectID : 0); Debug.Assert(table != null); try { Merge(table, false, MissingSchemaAction.Add); } finally { DataCommonEventSource.Log.ExitScope(logScopeId); } } /// <summary> /// Merges this <see cref='System.Data.DataTable'/> into a specified <see cref='System.Data.DataTable'/>. with a value to preserve changes /// made to the target, and a value to deal with missing schemas. /// </summary> public void Merge(DataTable table, bool preserveChanges, MissingSchemaAction missingSchemaAction) { long logScopeId = DataCommonEventSource.Log.EnterScope("<ds.DataSet.Merge|API> {0}, table={1}, preserveChanges={2}, missingSchemaAction={3}", ObjectID, (table != null) ? table.ObjectID : 0, preserveChanges, missingSchemaAction); try { // Argument checks if (table == null) { throw ExceptionBuilder.ArgumentNull(nameof(table)); } switch (missingSchemaAction) { case MissingSchemaAction.Add: case MissingSchemaAction.Ignore: case MissingSchemaAction.Error: case MissingSchemaAction.AddWithKey: Merger merger = new Merger(this, preserveChanges, missingSchemaAction); merger.MergeTable(table); break; default: throw ADP.InvalidMissingSchemaAction(missingSchemaAction); } } finally { DataCommonEventSource.Log.ExitScope(logScopeId); } } public void Merge(DataRow[] rows) { long logScopeId = DataCommonEventSource.Log.EnterScope("<ds.DataSet.Merge|API> {0}, rows", ObjectID); try { Merge(rows, false, MissingSchemaAction.Add); } finally { DataCommonEventSource.Log.ExitScope(logScopeId); } } public void Merge(DataRow[] rows, bool preserveChanges, MissingSchemaAction missingSchemaAction) { long logScopeId = DataCommonEventSource.Log.EnterScope("<ds.DataSet.Merge|API> {0}, preserveChanges={1}, missingSchemaAction={2}", ObjectID, preserveChanges, missingSchemaAction); try { // Argument checks if (rows == null) { throw ExceptionBuilder.ArgumentNull(nameof(rows)); } switch (missingSchemaAction) { case MissingSchemaAction.Add: case MissingSchemaAction.Ignore: case MissingSchemaAction.Error: case MissingSchemaAction.AddWithKey: Merger merger = new Merger(this, preserveChanges, missingSchemaAction); merger.MergeRows(rows); break; default: throw ADP.InvalidMissingSchemaAction(missingSchemaAction); } } finally { DataCommonEventSource.Log.ExitScope(logScopeId); } } protected virtual void OnPropertyChanging(PropertyChangedEventArgs pcevent) { PropertyChanging?.Invoke(this, pcevent); } /// <summary> /// Inheriting classes should override this method to handle this event. /// Call base.OnMergeFailed to send this event to any registered event /// listeners. /// </summary> internal void OnMergeFailed(MergeFailedEventArgs mfevent) { if (MergeFailed != null) { MergeFailed(this, mfevent); } else { throw ExceptionBuilder.MergeFailed(mfevent.Conflict); } } internal void RaiseMergeFailed(DataTable? table, string conflict, MissingSchemaAction missingSchemaAction) { if (MissingSchemaAction.Error == missingSchemaAction) { throw ExceptionBuilder.MergeFailed(conflict); } OnMergeFailed(new MergeFailedEventArgs(table, conflict)); } internal void OnDataRowCreated(DataRow row) => DataRowCreated?.Invoke(this, row); internal void OnClearFunctionCalled(DataTable? table) => ClearFunctionCalled?.Invoke(this, table); private void OnInitialized() => Initialized?.Invoke(this, EventArgs.Empty); /// <summary> /// This method should be overridden by subclasses to restrict tables being removed. /// </summary> protected internal virtual void OnRemoveTable(DataTable table) { } internal void OnRemovedTable(DataTable table) { DataViewManager? viewManager = _defaultViewManager; if (null != viewManager) { viewManager.DataViewSettings.Remove(table); } } /// <summary> /// This method should be overridden by subclasses to restrict tables being removed. /// </summary> protected virtual void OnRemoveRelation(DataRelation relation) { } internal void OnRemoveRelationHack(DataRelation relation) => OnRemoveRelation(relation); protected internal void RaisePropertyChanging(string name) => OnPropertyChanging(new PropertyChangedEventArgs(name)); internal DataTable[] TopLevelTables() => TopLevelTables(false); internal DataTable[] TopLevelTables(bool forSchema) { // first let's figure out if we can represent the given dataSet as a tree using // the fact that all connected undirected graphs with n-1 edges are trees. List<DataTable> topTables = new List<DataTable>(); if (forSchema) { // prepend the tables that are nested more than once for (int i = 0; i < Tables.Count; i++) { DataTable table = Tables[i]; if (table.NestedParentsCount > 1 || table.SelfNested) { topTables.Add(table); } } } for (int i = 0; i < Tables.Count; i++) { DataTable table = Tables[i]; if (table.NestedParentsCount == 0 && !topTables.Contains(table)) { topTables.Add(table); } } return topTables.Count == 0 ? Array.Empty<DataTable>() : topTables.ToArray(); } /// <summary> /// This method rolls back all the changes to have been made to this DataSet since /// it was loaded or the last time AcceptChanges was called. /// Any rows still in edit-mode cancel their edits. New rows get removed. Modified and /// Deleted rows return back to their original state. /// </summary> public virtual void RejectChanges() { long logScopeId = DataCommonEventSource.Log.EnterScope("<ds.DataSet.RejectChanges|API> {0}", ObjectID); try { bool fEnforce = EnforceConstraints; EnforceConstraints = false; for (int i = 0; i < Tables.Count; i++) { Tables[i].RejectChanges(); } EnforceConstraints = fEnforce; } finally { DataCommonEventSource.Log.ExitScope(logScopeId); } } /// <summary> /// Resets the dataSet back to it's original state. Subclasses should override /// to restore back to it's original state. /// </summary> public virtual void Reset() { long logScopeId = DataCommonEventSource.Log.EnterScope("<ds.DataSet.Reset|API> {0}", ObjectID); try { for (int i = 0; i < Tables.Count; i++) { ConstraintCollection cons = Tables[i].Constraints; for (int j = 0; j < cons.Count;) { if (cons[j] is ForeignKeyConstraint) { cons.Remove(cons[j]); } else { j++; } } } Clear(); Relations.Clear(); Tables.Clear(); } finally { DataCommonEventSource.Log.ExitScope(logScopeId); } } internal bool ValidateCaseConstraint() { long logScopeId = DataCommonEventSource.Log.EnterScope("<ds.DataSet.ValidateCaseConstraint|INFO> {0}", ObjectID); try { DataRelation? relation = null; for (int i = 0; i < Relations.Count; i++) { relation = Relations[i]; if (relation.ChildTable.CaseSensitive != relation.ParentTable.CaseSensitive) { return false; } } ForeignKeyConstraint? constraint; ConstraintCollection? constraints; for (int i = 0; i < Tables.Count; i++) { constraints = Tables[i].Constraints; for (int j = 0; j < constraints.Count; j++) { if (constraints[j] is ForeignKeyConstraint) { constraint = (ForeignKeyConstraint)constraints[j]; if (constraint.Table!.CaseSensitive != constraint.RelatedTable.CaseSensitive) { return false; } } } } return true; } finally { DataCommonEventSource.Log.ExitScope(logScopeId); } } internal bool ValidateLocaleConstraint() { long logScopeId = DataCommonEventSource.Log.EnterScope("<ds.DataSet.ValidateLocaleConstraint|INFO> {0}", ObjectID); try { DataRelation? relation = null; for (int i = 0; i < Relations.Count; i++) { relation = Relations[i]; if (relation.ChildTable.Locale.LCID != relation.ParentTable.Locale.LCID) { return false; } } ForeignKeyConstraint? constraint; ConstraintCollection? constraints; for (int i = 0; i < Tables.Count; i++) { constraints = Tables[i].Constraints; for (int j = 0; j < constraints.Count; j++) { if (constraints[j] is ForeignKeyConstraint) { constraint = (ForeignKeyConstraint)constraints[j]; if (constraint.Table!.Locale.LCID != constraint.RelatedTable.Locale.LCID) { return false; } } } } return true; } finally { DataCommonEventSource.Log.ExitScope(logScopeId); } } // SDUB: may be better to rewrite this as nonrecursive? internal DataTable? FindTable(DataTable? baseTable, PropertyDescriptor[] props, int propStart) { if (props.Length < propStart + 1) { return baseTable; } PropertyDescriptor currentProp = props[propStart]; if (baseTable == null) { // the accessor is the table name. if we don't find it, return null. if (currentProp is DataTablePropertyDescriptor) { return FindTable(((DataTablePropertyDescriptor)currentProp).Table, props, propStart + 1); } return null; } if (currentProp is DataRelationPropertyDescriptor) { return FindTable(((DataRelationPropertyDescriptor)currentProp).Relation.ChildTable, props, propStart + 1); } return null; } [RequiresUnreferencedCode(RequiresUnreferencedCodeMessage)] protected virtual void ReadXmlSerializable(XmlReader reader) { // <DataSet xsi:nil="true"> does not mean DataSet is null,but it does not have any child // so dont do anything, ignore the attributes and just return empty DataSet; _useDataSetSchemaOnly = false; _udtIsWrapped = false; if (reader.HasAttributes) { const string xsinill = Keywords.XSI + ":" + Keywords.XSI_NIL; if (reader.MoveToAttribute(xsinill)) { string? nilAttrib = reader.GetAttribute(xsinill); if (string.Equals(nilAttrib, "true", StringComparison.Ordinal)) { // case sensitive true comparison MoveToElement(reader, 1); return; } } const string UseDataSetSchemaOnlyString = Keywords.MSD + ":" + Keywords.USEDATASETSCHEMAONLY; if (reader.MoveToAttribute(UseDataSetSchemaOnlyString)) { string? useDataSetSchemaOnly = reader.GetAttribute(UseDataSetSchemaOnlyString); if (string.Equals(useDataSetSchemaOnly, "true", StringComparison.Ordinal) || string.Equals(useDataSetSchemaOnly, "1", StringComparison.Ordinal)) { _useDataSetSchemaOnly = true; } else if (!string.Equals(useDataSetSchemaOnly, "false", StringComparison.Ordinal) && !string.Equals(useDataSetSchemaOnly, "0", StringComparison.Ordinal)) { throw ExceptionBuilder.InvalidAttributeValue(Keywords.USEDATASETSCHEMAONLY, useDataSetSchemaOnly!); } } const string udtIsWrappedString = Keywords.MSD + ":" + Keywords.UDTCOLUMNVALUEWRAPPED; if (reader.MoveToAttribute(udtIsWrappedString)) { string? _udtIsWrappedString = reader.GetAttribute(udtIsWrappedString); if (string.Equals(_udtIsWrappedString, "true", StringComparison.Ordinal) || string.Equals(_udtIsWrappedString, "1", StringComparison.Ordinal)) { _udtIsWrapped = true; } else if (!string.Equals(_udtIsWrappedString, "false", StringComparison.Ordinal) && !string.Equals(_udtIsWrappedString, "0", StringComparison.Ordinal)) { throw ExceptionBuilder.InvalidAttributeValue(Keywords.UDTCOLUMNVALUEWRAPPED, _udtIsWrappedString!); } } } ReadXml(reader, XmlReadMode.DiffGram, true); } protected virtual System.Xml.Schema.XmlSchema? GetSchemaSerializable() => null; public static XmlSchemaComplexType GetDataSetSchema(XmlSchemaSet? schemaSet) { // For performance reasons we are exploiting the fact that config files content is constant // for a given appdomain so we can safely cache the prepared schema complex type and reuse it if (s_schemaTypeForWSDL == null) { // to change the config file, appdomain needs to restart; so it seems safe to cache the schema XmlSchemaComplexType tempWSDL = new XmlSchemaComplexType(); XmlSchemaSequence sequence = new XmlSchemaSequence(); XmlSchemaAny any = new XmlSchemaAny(); any.Namespace = XmlSchema.Namespace; any.MinOccurs = 0; any.ProcessContents = XmlSchemaContentProcessing.Lax; sequence.Items.Add(any); any = new XmlSchemaAny(); any.Namespace = Keywords.DFFNS; any.MinOccurs = 0; // when recognizing WSDL - MinOccurs="0" denotes DataSet, a MinOccurs="1" for DataTable any.ProcessContents = XmlSchemaContentProcessing.Lax; sequence.Items.Add(any); sequence.MaxOccurs = decimal.MaxValue; tempWSDL.Particle = sequence; s_schemaTypeForWSDL = tempWSDL; } return s_schemaTypeForWSDL; } private static bool PublishLegacyWSDL() => false; XmlSchema? IXmlSerializable.GetSchema() { if (GetType() == typeof(DataSet)) { return null; } MemoryStream stream = new MemoryStream(); // WriteXmlSchema(new XmlTextWriter(stream, null)); XmlWriter writer = new XmlTextWriter(stream, null); if (writer != null) { #pragma warning disable IL2026 // suppressed in ILLink.Suppressions.LibraryBuild.xml WriteXmlSchema(this, writer); #pragma warning restore IL2026 } stream.Position = 0; return XmlSchema.Read(new XmlTextReader(stream), null); } [RequiresUnreferencedCode("DataSet.GetSchema uses TypeDescriptor and XmlSerialization underneath which are not trimming safe. Members from serialized types may be trimmed if not referenced directly.")] private static void WriteXmlSchema(DataSet ds, XmlWriter writer) { (new XmlTreeGen(SchemaFormat.WebService)).Save(ds, writer); } void IXmlSerializable.ReadXml(XmlReader reader) { bool fNormalization = true; XmlTextReader? xmlTextReader = null; IXmlTextParser? xmlTextParser = reader as IXmlTextParser; if (xmlTextParser != null) { fNormalization = xmlTextParser.Normalized; xmlTextParser.Normalized = false; } else { xmlTextReader = reader as XmlTextReader; if (xmlTextReader != null) { fNormalization = xmlTextReader.Normalization; xmlTextReader.Normalization = false; } } #pragma warning disable IL2026 // suppressed in ILLink.Suppressions.LibraryBuild.xml ReadXmlSerializableInternal(reader); #pragma warning restore IL2026 if (xmlTextParser != null) { xmlTextParser.Normalized = fNormalization; } else if (xmlTextReader != null) { xmlTextReader.Normalization = fNormalization; } } [RequiresUnreferencedCode("DataSet.ReadXml uses XmlSerialization underneath which is not trimming safe. Members from serialized types may be trimmed if not referenced directly.")] private void ReadXmlSerializableInternal(XmlReader reader) { ReadXmlSerializable(reader); } void IXmlSerializable.WriteXml(XmlWriter writer) { #pragma warning disable IL2026 // suppressed in ILLink.Suppressions.LibraryBuild.xml WriteXmlInternal(writer); #pragma warning restore IL2026 } [RequiresUnreferencedCode("DataSet.WriteXml uses XmlSerialization underneath which is not trimming safe. Members from serialized types may be trimmed if not referenced directly.")] private void WriteXmlInternal(XmlWriter writer) { WriteXmlSchema(writer, SchemaFormat.WebService, null); WriteXml(writer, XmlWriteMode.DiffGram); } [RequiresUnreferencedCode("Using LoadOption may cause members from types used in the expression column to be trimmed if not referenced directly.")] public virtual void Load(IDataReader reader, LoadOption loadOption, FillErrorEventHandler? errorHandler, params DataTable[] tables) { long logScopeId = DataCommonEventSource.Log.EnterScope("<ds.DataSet.Load|API> reader, loadOption={0}", loadOption); try { foreach (DataTable dt in tables) { ADP.CheckArgumentNull(dt, nameof(tables)); if (dt.DataSet != this) { throw ExceptionBuilder.TableNotInTheDataSet(dt.TableName); } } var adapter = new LoadAdapter(); adapter.FillLoadOption = loadOption; adapter.MissingSchemaAction = MissingSchemaAction.AddWithKey; if (null != errorHandler) { adapter.FillError += errorHandler; } adapter.FillFromReader(tables, reader, 0, 0); if (!reader.IsClosed && !reader.NextResult()) { reader.Close(); } } finally { DataCommonEventSource.Log.ExitScope(logScopeId); } } [RequiresUnreferencedCode("Using LoadOption may cause members from types used in the expression column to be trimmed if not referenced directly.")] public void Load(IDataReader reader, LoadOption loadOption, params DataTable[] tables) => Load(reader, loadOption, null, tables); [RequiresUnreferencedCode("Using LoadOption may cause members from types used in the expression column to be trimmed if not referenced directly.")] public void Load(IDataReader reader, LoadOption loadOption, params string[] tables) { ADP.CheckArgumentNull(tables, nameof(tables)); var dataTables = new DataTable[tables.Length]; for (int i = 0; i < tables.Length; i++) { DataTable? tempDT = Tables[tables[i]]; if (null == tempDT) { tempDT = new DataTable(tables[i]); Tables.Add(tempDT); } dataTables[i] = tempDT; } Load(reader, loadOption, null, dataTables); } public DataTableReader CreateDataReader() { if (Tables.Count == 0) { throw ExceptionBuilder.CannotCreateDataReaderOnEmptyDataSet(); } var dataTables = new DataTable[Tables.Count]; for (int i = 0; i < Tables.Count; i++) { dataTables[i] = Tables[i]; } return CreateDataReader(dataTables); } public DataTableReader CreateDataReader(params DataTable[] dataTables) { long logScopeId = DataCommonEventSource.Log.EnterScope("<ds.DataSet.GetDataReader|API> {0}", ObjectID); try { if (dataTables.Length == 0) { throw ExceptionBuilder.DataTableReaderArgumentIsEmpty(); } for (int i = 0; i < dataTables.Length; i++) { if (dataTables[i] == null) { throw ExceptionBuilder.ArgumentContainsNullValue(); } } return new DataTableReader(dataTables); } finally { DataCommonEventSource.Log.ExitScope(logScopeId); } } internal string MainTableName { get { return _mainTableName; } set { _mainTableName = value; } } internal int ObjectID => _objectID; } }
-1
dotnet/runtime
66,282
Enable Fast Tail Call Optimization for ARM32
- Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
clamp03
2022-03-07T05:47:20Z
2022-03-13T11:50:20Z
227ff3d53784f655fa04dad059a98b3e8d291d61
51b90cc60b8528c77829ef18481b0f58db812776
Enable Fast Tail Call Optimization for ARM32. - Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
./src/libraries/System.Security.Cryptography/tests/HmacTests.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.IO; using System.Threading; using System.Threading.Tasks; using Test.Cryptography; using Xunit; namespace System.Security.Cryptography.Tests { [SkipOnPlatform(TestPlatforms.Browser, "Not supported on Browser")] public abstract class HmacTests { // RFC2202 defines the test vectors for HMACMD5 and HMACSHA1 // RFC4231 defines the test vectors for HMACSHA{224,256,384,512} // They share the same datasets for cases 1-5, but cases 6 and 7 differ. private readonly byte[][] _testKeys; private readonly byte[][] _testData; private readonly byte[][] _testMacs; protected HmacTests(byte[][] testKeys, byte[][] testData, byte[][] testMacs) { _testKeys = testKeys; _testData = testData; _testMacs = testMacs; } protected abstract HMAC Create(); protected abstract HashAlgorithm CreateHashAlgorithm(); protected abstract byte[] HashDataOneShot(byte[] key, byte[] source); protected abstract byte[] HashDataOneShot(ReadOnlySpan<byte> key, ReadOnlySpan<byte> source); protected abstract int HashDataOneShot(ReadOnlySpan<byte> key, ReadOnlySpan<byte> source, Span<byte> destination); protected abstract bool TryHashDataOneShot(ReadOnlySpan<byte> key, ReadOnlySpan<byte> source, Span<byte> destination, out int written); protected abstract byte[] HashDataOneShot(ReadOnlySpan<byte> key, Stream source); protected abstract byte[] HashDataOneShot(byte[] key, Stream source); protected abstract int HashDataOneShot(ReadOnlySpan<byte> key, Stream source, Span<byte> destination); protected abstract ValueTask<int> HashDataOneShotAsync( ReadOnlyMemory<byte> key, Stream source, Memory<byte> destination, CancellationToken cancellationToken); protected abstract ValueTask<byte[]> HashDataOneShotAsync( ReadOnlyMemory<byte> key, Stream source, CancellationToken cancellationToken); protected abstract ValueTask<byte[]> HashDataOneShotAsync( byte[] key, Stream source, CancellationToken cancellationToken); protected abstract int BlockSize { get; } protected abstract int MacSize { get; } protected void VerifyRepeating(string input, int repeatCount, string hexKey, string output) { byte[] key = ByteUtils.HexToByteArray(hexKey); using (Stream stream = new DataRepeatingStream(input, repeatCount)) { VerifyHashDataStreamAllocating(key, stream, output, spanKey: true); } using (Stream stream = new DataRepeatingStream(input, repeatCount)) { VerifyHashDataStreamAllocating(key, stream, output, spanKey: false); } using (Stream stream = new DataRepeatingStream(input, repeatCount)) { VerifyHashDataStream(key, stream, output); } } protected async Task VerifyRepeatingAsync(string input, int repeatCount, string hexKey, string output) { byte[] key = ByteUtils.HexToByteArray(hexKey); using (Stream stream = new DataRepeatingStream(input, repeatCount)) { await VerifyHashDataStreamAllocatingAsync(key, stream, output, memoryKey: true); } using (Stream stream = new DataRepeatingStream(input, repeatCount)) { await VerifyHashDataStreamAllocatingAsync(key, stream, output, memoryKey: false); } using (Stream stream = new DataRepeatingStream(input, repeatCount)) { await VerifyHashDataStreamAsync(key, stream, output); } } protected void VerifyHashDataStream(ReadOnlySpan<byte> key, Stream stream, string output) { Span<byte> destination = stackalloc byte[MacSize]; byte[] expected = ByteUtils.HexToByteArray(output); int written = HashDataOneShot(key, stream, destination); Assert.Equal(MacSize, written); AssertExtensions.SequenceEqual(expected, destination); } protected async Task VerifyHashDataStreamAsync(ReadOnlyMemory<byte> key, Stream stream, string output) { Memory<byte> destination = new byte[MacSize]; byte[] expected = ByteUtils.HexToByteArray(output); int written = await HashDataOneShotAsync(key, stream, destination, cancellationToken: default); Assert.Equal(MacSize, written); AssertExtensions.SequenceEqual(expected, destination.Span); } protected void VerifyHashDataStreamAllocating(byte[] key, Stream stream, string output, bool spanKey) { byte[] expected = ByteUtils.HexToByteArray(output); byte[] hmac; if (spanKey) { hmac = HashDataOneShot(key.AsSpan(), stream); } else { hmac = HashDataOneShot(key, stream); } Assert.Equal(expected, hmac); } protected async Task VerifyHashDataStreamAllocatingAsync(byte[] key, Stream stream, string output, bool memoryKey) { byte[] expected = ByteUtils.HexToByteArray(output); byte[] hmac; if (memoryKey) { hmac = await HashDataOneShotAsync(new ReadOnlyMemory<byte>(key), stream, cancellationToken: default); } else { hmac = await HashDataOneShotAsync(key, stream, cancellationToken: default); } Assert.Equal(expected, hmac); } protected void VerifyHmac(int testCaseId, byte[] digestBytes) { byte[] data = _testData[testCaseId]; byte[] computedDigest; int truncateSize = digestBytes.Length; AssertExtensions.LessThanOrEqualTo(truncateSize, MacSize); using (HMAC hmac = Create()) { Assert.Equal(MacSize, hmac.HashSize / 8); byte[] key = (byte[])_testKeys[testCaseId].Clone(); hmac.Key = key; // make sure the getter returns different objects each time Assert.NotSame(key, hmac.Key); Assert.NotSame(hmac.Key, hmac.Key); // make sure the setter didn't cache the exact object we passed in key[0] = (byte)(key[0] + 1); Assert.NotEqual<byte>(key, hmac.Key); computedDigest = hmac.ComputeHash(data); } computedDigest = Truncate(computedDigest, truncateSize); Assert.Equal(digestBytes, computedDigest); using (HMAC hmac = Create()) { byte[] key = (byte[])_testKeys[testCaseId].Clone(); hmac.Key = key; hmac.TransformBlock(data, 0, data.Length, null, 0); hmac.Initialize(); hmac.TransformBlock(data, 0, data.Length, null, 0); hmac.TransformFinalBlock(Array.Empty<byte>(), 0, 0); computedDigest = hmac.Hash; } computedDigest = Truncate(computedDigest, truncateSize); Assert.Equal(digestBytes, computedDigest); // One shot - allocating and byte array inputs computedDigest = HashDataOneShot(_testKeys[testCaseId], data); computedDigest = Truncate(computedDigest, truncateSize); Assert.Equal(digestBytes, computedDigest); static byte[] Truncate(byte[] digest, int truncateSize) { if (truncateSize == -1) return digest; return digest.AsSpan(0, truncateSize).ToArray(); } } protected void VerifyHmac_KeyAlreadySet( HMAC hmac, int testCaseId, string digest) { byte[] digestBytes = ByteUtils.HexToByteArray(digest); byte[] computedDigest; computedDigest = hmac.ComputeHash(_testData[testCaseId]); Assert.Equal(digestBytes, computedDigest); } protected void VerifyHmacRfc2104_2() { // Ensure that keys shorter than the threshold don't get altered. using (HMAC hmac = Create()) { byte[] key = new byte[BlockSize]; hmac.Key = key; byte[] retrievedKey = hmac.Key; Assert.Equal<byte>(key, retrievedKey); } // Ensure that keys longer than the threshold are adjusted via Rfc2104 Section 2. using (HMAC hmac = Create()) { byte[] overSizedKey = new byte[BlockSize + 1]; hmac.Key = overSizedKey; byte[] actualKey = hmac.Key; byte[] expectedKey = CreateHashAlgorithm().ComputeHash(overSizedKey); Assert.Equal<byte>(expectedKey, actualKey); // Also ensure that the hashing operation uses the adjusted key. byte[] data = new byte[100]; hmac.Key = expectedKey; byte[] expectedHash = hmac.ComputeHash(data); hmac.Key = overSizedKey; byte[] actualHash = hmac.ComputeHash(data); Assert.Equal<byte>(expectedHash, actualHash); } } [Fact] public void InvalidInput_Null() { using (HMAC hash = Create()) { AssertExtensions.Throws<ArgumentNullException>("buffer", () => hash.ComputeHash((byte[])null)); AssertExtensions.Throws<ArgumentNullException>("buffer", () => hash.ComputeHash(null, 0, 0)); Assert.Throws<NullReferenceException>(() => hash.ComputeHash((Stream)null)); } } [Fact] public void InvalidInput_NegativeOffset() { using (HMAC hash = Create()) { AssertExtensions.Throws<ArgumentOutOfRangeException>("offset", () => hash.ComputeHash(Array.Empty<byte>(), -1, 0)); } } [Fact] public void InvalidInput_NegativeCount() { using (HMAC hash = Create()) { AssertExtensions.Throws<ArgumentException>(null, () => hash.ComputeHash(Array.Empty<byte>(), 0, -1)); } } [Fact] public void InvalidInput_TooBigOffset() { using (HMAC hash = Create()) { AssertExtensions.Throws<ArgumentException>(null, () => hash.ComputeHash(Array.Empty<byte>(), 1, 0)); } } [Fact] public void InvalidInput_TooBigCount() { byte[] nonEmpty = new byte[53]; using (HMAC hash = Create()) { AssertExtensions.Throws<ArgumentException>(null, () => hash.ComputeHash(nonEmpty, 0, nonEmpty.Length + 1)); AssertExtensions.Throws<ArgumentException>(null, () => hash.ComputeHash(nonEmpty, 1, nonEmpty.Length)); AssertExtensions.Throws<ArgumentException>(null, () => hash.ComputeHash(nonEmpty, 2, nonEmpty.Length - 1)); AssertExtensions.Throws<ArgumentException>(null, () => hash.ComputeHash(Array.Empty<byte>(), 0, 1)); } } [Fact] public void BoundaryCondition_Count0() { byte[] nonEmpty = new byte[53]; using (HMAC hash = Create()) { byte[] emptyHash = hash.ComputeHash(Array.Empty<byte>()); byte[] shouldBeEmptyHash = hash.ComputeHash(nonEmpty, nonEmpty.Length, 0); Assert.Equal(emptyHash, shouldBeEmptyHash); shouldBeEmptyHash = hash.ComputeHash(nonEmpty, 0, 0); Assert.Equal(emptyHash, shouldBeEmptyHash); nonEmpty[0] = 0xFF; nonEmpty[nonEmpty.Length - 1] = 0x77; shouldBeEmptyHash = hash.ComputeHash(nonEmpty, nonEmpty.Length, 0); Assert.Equal(emptyHash, shouldBeEmptyHash); shouldBeEmptyHash = hash.ComputeHash(nonEmpty, 0, 0); Assert.Equal(emptyHash, shouldBeEmptyHash); } } [Fact] public void OffsetAndCountRespected() { byte[] dataA = { 1, 1, 2, 3, 5, 8 }; byte[] dataB = { 0, 1, 1, 2, 3, 5, 8, 13 }; using (HMAC hash = Create()) { byte[] baseline = hash.ComputeHash(dataA); // Skip the 0 byte, and stop short of the 13. byte[] offsetData = hash.ComputeHash(dataB, 1, dataA.Length); Assert.Equal(baseline, offsetData); } } [Fact] public void InvalidKey_ThrowArgumentNullException() { using (HMAC hash = Create()) { AssertExtensions.Throws<ArgumentNullException>("value", () => hash.Key = null); } } [Fact] public void OneShot_NullKey_ArgumentNullException() { AssertExtensions.Throws<ArgumentNullException>("key", () => HashDataOneShot(key: (byte[])null, source: Array.Empty<byte>())); } [Fact] public void OneShot_NullSource_ArgumentNullException() { AssertExtensions.Throws<ArgumentNullException>("source", () => HashDataOneShot(key: Array.Empty<byte>(), source: (byte[])null)); } [Fact] public void OneShot_ExistingBuffer_TooSmall() { byte[] buffer = new byte[MacSize - 1]; byte[] key = _testKeys[1]; byte[] data = _testData[1]; AssertExtensions.Throws<ArgumentException>("destination", () => HashDataOneShot(key, data, buffer)); AssertExtensions.FilledWith<byte>(0, buffer); } [Fact] public void OneShot_TryExistingBuffer_TooSmall() { byte[] buffer = new byte[MacSize - 1]; byte[] key = _testKeys[1]; byte[] data = _testData[1]; Assert.False(TryHashDataOneShot(key, data, buffer, out int written)); Assert.Equal(0, written); AssertExtensions.FilledWith<byte>(0, buffer); } [Fact] public void OneShot_TryExistingBuffer_Exact() { for (int caseId = 1; caseId <= 7; caseId++) { byte[] buffer = new byte[MacSize]; byte[] key = _testKeys[caseId]; byte[] data = _testData[caseId]; Assert.True(TryHashDataOneShot(key, data, buffer, out int written)); Assert.Equal(MacSize, written); ReadOnlySpan<byte> expectedMac = _testMacs[caseId]; Span<byte> truncatedBuffer = buffer.AsSpan(0, expectedMac.Length); AssertExtensions.SequenceEqual(expectedMac, truncatedBuffer); } } [Fact] public void OneShot_TryExistingBuffer_Larger() { for (int caseId = 1; caseId <= 7; caseId++) { Span<byte> buffer = new byte[MacSize + 20]; byte[] key = _testKeys[caseId]; byte[] data = _testData[caseId]; buffer.Fill(0xCC); Span<byte> writeBuffer = buffer.Slice(10, MacSize); Assert.True(TryHashDataOneShot(key, data, writeBuffer, out int written)); Assert.Equal(MacSize, written); ReadOnlySpan<byte> expectedMac = _testMacs[caseId]; Span<byte> truncatedWriteBuffer = writeBuffer.Slice(0, expectedMac.Length); AssertExtensions.SequenceEqual(expectedMac, truncatedWriteBuffer); AssertExtensions.FilledWith<byte>(0xCC, buffer[..10]); AssertExtensions.FilledWith<byte>(0xCC, buffer[^10..]); } } [Theory] [InlineData(0, 10)] [InlineData(10, 10)] [InlineData(10, 0)] [InlineData(10, 20)] public void OneShot_TryExistingBuffer_OverlapsKey(int keyOffset, int bufferOffset) { for (int caseId = 1; caseId <= 7; caseId++) { byte[] key = _testKeys[caseId]; byte[] data = _testData[caseId]; Span<byte> buffer = new byte[Math.Max(key.Length, MacSize) + Math.Max(keyOffset, bufferOffset)]; Span<byte> writeBuffer = buffer.Slice(bufferOffset, MacSize); Span<byte> keyBuffer = buffer.Slice(keyOffset, key.Length); key.AsSpan().CopyTo(keyBuffer); Assert.True(TryHashDataOneShot(keyBuffer, data, writeBuffer, out int written)); Assert.Equal(MacSize, written); ReadOnlySpan<byte> expectedMac = _testMacs[caseId]; Span<byte> truncatedWriteBuffer = writeBuffer.Slice(0, expectedMac.Length); AssertExtensions.SequenceEqual(expectedMac, truncatedWriteBuffer); } } [Theory] [InlineData(0, 10)] [InlineData(10, 10)] [InlineData(10, 0)] [InlineData(10, 20)] public void OneShot_TryExistingBuffer_OverlapsSource(int sourceOffset, int bufferOffset) { for (int caseId = 1; caseId <= 7; caseId++) { byte[] key = _testKeys[caseId]; byte[] data = _testData[caseId]; Span<byte> buffer = new byte[Math.Max(data.Length, MacSize) + Math.Max(sourceOffset, bufferOffset)]; Span<byte> writeBuffer = buffer.Slice(bufferOffset, MacSize); Span<byte> dataBuffer = buffer.Slice(sourceOffset, data.Length); data.AsSpan().CopyTo(dataBuffer); Assert.True(TryHashDataOneShot(key, dataBuffer, writeBuffer, out int written)); Assert.Equal(MacSize, written); ReadOnlySpan<byte> expectedMac = _testMacs[caseId]; Span<byte> truncatedWriteBuffer = writeBuffer.Slice(0, expectedMac.Length); AssertExtensions.SequenceEqual(expectedMac, truncatedWriteBuffer); } } [Theory] [InlineData(new byte[0], new byte[] { 1 })] [InlineData(new byte[] { 1 }, new byte[0])] public void OneShot_Empty_Matches_Instances(byte[] key, byte[] source) { using (HMAC hash = Create()) { hash.Key = key; byte[] mac = hash.ComputeHash(source, 0, source.Length); byte[] oneShot = HashDataOneShot(key, source); Assert.Equal(mac, oneShot); } } [Fact] public void HashData_Stream_Source_Null() { AssertExtensions.Throws<ArgumentNullException>( "source", () => HashDataOneShot(ReadOnlySpan<byte>.Empty, (Stream)null)); AssertExtensions.Throws<ArgumentNullException>( "source", () => HashDataOneShot(Array.Empty<byte>(), (Stream)null)); } [Fact] public void HashData_Stream_Source_Null_Async() { AssertExtensions.Throws<ArgumentNullException>( "source", () => HashDataOneShotAsync(ReadOnlyMemory<byte>.Empty, (Stream)null, default)); AssertExtensions.Throws<ArgumentNullException>( "source", () => HashDataOneShotAsync(Array.Empty<byte>(), (Stream)null, default)); } [Fact] public void HashData_Stream_ByteKey_Null() { AssertExtensions.Throws<ArgumentNullException>( "key", () => HashDataOneShot((byte[])null, Stream.Null)); } [Fact] public void HashData_Stream_ByteKey_Null_Async() { AssertExtensions.Throws<ArgumentNullException>( "key", () => HashDataOneShotAsync((byte[])null, Stream.Null, default)); } [Fact] public void HashData_Stream_DestinationTooSmall() { byte[] destination = new byte[MacSize - 1]; AssertExtensions.Throws<ArgumentException>( "destination", () => HashDataOneShot(Array.Empty<byte>(), Stream.Null, destination)); AssertExtensions.FilledWith<byte>(0, destination); AssertExtensions.Throws<ArgumentException>( "destination", () => HashDataOneShot(ReadOnlySpan<byte>.Empty, Stream.Null, destination)); AssertExtensions.FilledWith<byte>(0, destination); } [Fact] public void HashData_Stream_DestinationTooSmall_Async() { byte[] destination = new byte[MacSize - 1]; AssertExtensions.Throws<ArgumentException>( "destination", () => HashDataOneShotAsync(Array.Empty<byte>(), Stream.Null, destination, default)); AssertExtensions.FilledWith<byte>(0, destination); AssertExtensions.Throws<ArgumentException>( "destination", () => HashDataOneShotAsync(ReadOnlyMemory<byte>.Empty, Stream.Null, destination, default)); AssertExtensions.FilledWith<byte>(0, destination); } [Fact] public void HashData_Stream_NotReadable() { AssertExtensions.Throws<ArgumentException>( "source", () => HashDataOneShot(Array.Empty<byte>(), UntouchableStream.Instance)); AssertExtensions.Throws<ArgumentException>( "source", () => HashDataOneShot(ReadOnlySpan<byte>.Empty, UntouchableStream.Instance)); } [Fact] public void HashData_Stream_Cancelled() { Memory<byte> buffer = new byte[512 / 8]; CancellationToken cancelledToken = new CancellationToken(canceled: true); ValueTask<int> waitable = HashDataOneShotAsync(ReadOnlyMemory<byte>.Empty, Stream.Null, buffer, cancelledToken); Assert.True(waitable.IsCanceled, nameof(waitable.IsCanceled)); AssertExtensions.FilledWith<byte>(0, buffer.Span); waitable = HashDataOneShotAsync(Array.Empty<byte>(), Stream.Null, buffer, cancelledToken); Assert.True(waitable.IsCanceled, nameof(waitable.IsCanceled)); AssertExtensions.FilledWith<byte>(0, buffer.Span); } [Fact] public void HashData_Stream_Allocating_Cancelled() { CancellationToken cancelledToken = new CancellationToken(canceled: true); ValueTask<byte[]> waitable = HashDataOneShotAsync(ReadOnlyMemory<byte>.Empty, Stream.Null, cancelledToken); Assert.True(waitable.IsCanceled, nameof(waitable.IsCanceled)); } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.IO; using System.Threading; using System.Threading.Tasks; using Test.Cryptography; using Xunit; namespace System.Security.Cryptography.Tests { [SkipOnPlatform(TestPlatforms.Browser, "Not supported on Browser")] public abstract class HmacTests { // RFC2202 defines the test vectors for HMACMD5 and HMACSHA1 // RFC4231 defines the test vectors for HMACSHA{224,256,384,512} // They share the same datasets for cases 1-5, but cases 6 and 7 differ. private readonly byte[][] _testKeys; private readonly byte[][] _testData; private readonly byte[][] _testMacs; protected HmacTests(byte[][] testKeys, byte[][] testData, byte[][] testMacs) { _testKeys = testKeys; _testData = testData; _testMacs = testMacs; } protected abstract HMAC Create(); protected abstract HashAlgorithm CreateHashAlgorithm(); protected abstract byte[] HashDataOneShot(byte[] key, byte[] source); protected abstract byte[] HashDataOneShot(ReadOnlySpan<byte> key, ReadOnlySpan<byte> source); protected abstract int HashDataOneShot(ReadOnlySpan<byte> key, ReadOnlySpan<byte> source, Span<byte> destination); protected abstract bool TryHashDataOneShot(ReadOnlySpan<byte> key, ReadOnlySpan<byte> source, Span<byte> destination, out int written); protected abstract byte[] HashDataOneShot(ReadOnlySpan<byte> key, Stream source); protected abstract byte[] HashDataOneShot(byte[] key, Stream source); protected abstract int HashDataOneShot(ReadOnlySpan<byte> key, Stream source, Span<byte> destination); protected abstract ValueTask<int> HashDataOneShotAsync( ReadOnlyMemory<byte> key, Stream source, Memory<byte> destination, CancellationToken cancellationToken); protected abstract ValueTask<byte[]> HashDataOneShotAsync( ReadOnlyMemory<byte> key, Stream source, CancellationToken cancellationToken); protected abstract ValueTask<byte[]> HashDataOneShotAsync( byte[] key, Stream source, CancellationToken cancellationToken); protected abstract int BlockSize { get; } protected abstract int MacSize { get; } protected void VerifyRepeating(string input, int repeatCount, string hexKey, string output) { byte[] key = ByteUtils.HexToByteArray(hexKey); using (Stream stream = new DataRepeatingStream(input, repeatCount)) { VerifyHashDataStreamAllocating(key, stream, output, spanKey: true); } using (Stream stream = new DataRepeatingStream(input, repeatCount)) { VerifyHashDataStreamAllocating(key, stream, output, spanKey: false); } using (Stream stream = new DataRepeatingStream(input, repeatCount)) { VerifyHashDataStream(key, stream, output); } } protected async Task VerifyRepeatingAsync(string input, int repeatCount, string hexKey, string output) { byte[] key = ByteUtils.HexToByteArray(hexKey); using (Stream stream = new DataRepeatingStream(input, repeatCount)) { await VerifyHashDataStreamAllocatingAsync(key, stream, output, memoryKey: true); } using (Stream stream = new DataRepeatingStream(input, repeatCount)) { await VerifyHashDataStreamAllocatingAsync(key, stream, output, memoryKey: false); } using (Stream stream = new DataRepeatingStream(input, repeatCount)) { await VerifyHashDataStreamAsync(key, stream, output); } } protected void VerifyHashDataStream(ReadOnlySpan<byte> key, Stream stream, string output) { Span<byte> destination = stackalloc byte[MacSize]; byte[] expected = ByteUtils.HexToByteArray(output); int written = HashDataOneShot(key, stream, destination); Assert.Equal(MacSize, written); AssertExtensions.SequenceEqual(expected, destination); } protected async Task VerifyHashDataStreamAsync(ReadOnlyMemory<byte> key, Stream stream, string output) { Memory<byte> destination = new byte[MacSize]; byte[] expected = ByteUtils.HexToByteArray(output); int written = await HashDataOneShotAsync(key, stream, destination, cancellationToken: default); Assert.Equal(MacSize, written); AssertExtensions.SequenceEqual(expected, destination.Span); } protected void VerifyHashDataStreamAllocating(byte[] key, Stream stream, string output, bool spanKey) { byte[] expected = ByteUtils.HexToByteArray(output); byte[] hmac; if (spanKey) { hmac = HashDataOneShot(key.AsSpan(), stream); } else { hmac = HashDataOneShot(key, stream); } Assert.Equal(expected, hmac); } protected async Task VerifyHashDataStreamAllocatingAsync(byte[] key, Stream stream, string output, bool memoryKey) { byte[] expected = ByteUtils.HexToByteArray(output); byte[] hmac; if (memoryKey) { hmac = await HashDataOneShotAsync(new ReadOnlyMemory<byte>(key), stream, cancellationToken: default); } else { hmac = await HashDataOneShotAsync(key, stream, cancellationToken: default); } Assert.Equal(expected, hmac); } protected void VerifyHmac(int testCaseId, byte[] digestBytes) { byte[] data = _testData[testCaseId]; byte[] computedDigest; int truncateSize = digestBytes.Length; AssertExtensions.LessThanOrEqualTo(truncateSize, MacSize); using (HMAC hmac = Create()) { Assert.Equal(MacSize, hmac.HashSize / 8); byte[] key = (byte[])_testKeys[testCaseId].Clone(); hmac.Key = key; // make sure the getter returns different objects each time Assert.NotSame(key, hmac.Key); Assert.NotSame(hmac.Key, hmac.Key); // make sure the setter didn't cache the exact object we passed in key[0] = (byte)(key[0] + 1); Assert.NotEqual<byte>(key, hmac.Key); computedDigest = hmac.ComputeHash(data); } computedDigest = Truncate(computedDigest, truncateSize); Assert.Equal(digestBytes, computedDigest); using (HMAC hmac = Create()) { byte[] key = (byte[])_testKeys[testCaseId].Clone(); hmac.Key = key; hmac.TransformBlock(data, 0, data.Length, null, 0); hmac.Initialize(); hmac.TransformBlock(data, 0, data.Length, null, 0); hmac.TransformFinalBlock(Array.Empty<byte>(), 0, 0); computedDigest = hmac.Hash; } computedDigest = Truncate(computedDigest, truncateSize); Assert.Equal(digestBytes, computedDigest); // One shot - allocating and byte array inputs computedDigest = HashDataOneShot(_testKeys[testCaseId], data); computedDigest = Truncate(computedDigest, truncateSize); Assert.Equal(digestBytes, computedDigest); static byte[] Truncate(byte[] digest, int truncateSize) { if (truncateSize == -1) return digest; return digest.AsSpan(0, truncateSize).ToArray(); } } protected void VerifyHmac_KeyAlreadySet( HMAC hmac, int testCaseId, string digest) { byte[] digestBytes = ByteUtils.HexToByteArray(digest); byte[] computedDigest; computedDigest = hmac.ComputeHash(_testData[testCaseId]); Assert.Equal(digestBytes, computedDigest); } protected void VerifyHmacRfc2104_2() { // Ensure that keys shorter than the threshold don't get altered. using (HMAC hmac = Create()) { byte[] key = new byte[BlockSize]; hmac.Key = key; byte[] retrievedKey = hmac.Key; Assert.Equal<byte>(key, retrievedKey); } // Ensure that keys longer than the threshold are adjusted via Rfc2104 Section 2. using (HMAC hmac = Create()) { byte[] overSizedKey = new byte[BlockSize + 1]; hmac.Key = overSizedKey; byte[] actualKey = hmac.Key; byte[] expectedKey = CreateHashAlgorithm().ComputeHash(overSizedKey); Assert.Equal<byte>(expectedKey, actualKey); // Also ensure that the hashing operation uses the adjusted key. byte[] data = new byte[100]; hmac.Key = expectedKey; byte[] expectedHash = hmac.ComputeHash(data); hmac.Key = overSizedKey; byte[] actualHash = hmac.ComputeHash(data); Assert.Equal<byte>(expectedHash, actualHash); } } [Fact] public void InvalidInput_Null() { using (HMAC hash = Create()) { AssertExtensions.Throws<ArgumentNullException>("buffer", () => hash.ComputeHash((byte[])null)); AssertExtensions.Throws<ArgumentNullException>("buffer", () => hash.ComputeHash(null, 0, 0)); Assert.Throws<NullReferenceException>(() => hash.ComputeHash((Stream)null)); } } [Fact] public void InvalidInput_NegativeOffset() { using (HMAC hash = Create()) { AssertExtensions.Throws<ArgumentOutOfRangeException>("offset", () => hash.ComputeHash(Array.Empty<byte>(), -1, 0)); } } [Fact] public void InvalidInput_NegativeCount() { using (HMAC hash = Create()) { AssertExtensions.Throws<ArgumentException>(null, () => hash.ComputeHash(Array.Empty<byte>(), 0, -1)); } } [Fact] public void InvalidInput_TooBigOffset() { using (HMAC hash = Create()) { AssertExtensions.Throws<ArgumentException>(null, () => hash.ComputeHash(Array.Empty<byte>(), 1, 0)); } } [Fact] public void InvalidInput_TooBigCount() { byte[] nonEmpty = new byte[53]; using (HMAC hash = Create()) { AssertExtensions.Throws<ArgumentException>(null, () => hash.ComputeHash(nonEmpty, 0, nonEmpty.Length + 1)); AssertExtensions.Throws<ArgumentException>(null, () => hash.ComputeHash(nonEmpty, 1, nonEmpty.Length)); AssertExtensions.Throws<ArgumentException>(null, () => hash.ComputeHash(nonEmpty, 2, nonEmpty.Length - 1)); AssertExtensions.Throws<ArgumentException>(null, () => hash.ComputeHash(Array.Empty<byte>(), 0, 1)); } } [Fact] public void BoundaryCondition_Count0() { byte[] nonEmpty = new byte[53]; using (HMAC hash = Create()) { byte[] emptyHash = hash.ComputeHash(Array.Empty<byte>()); byte[] shouldBeEmptyHash = hash.ComputeHash(nonEmpty, nonEmpty.Length, 0); Assert.Equal(emptyHash, shouldBeEmptyHash); shouldBeEmptyHash = hash.ComputeHash(nonEmpty, 0, 0); Assert.Equal(emptyHash, shouldBeEmptyHash); nonEmpty[0] = 0xFF; nonEmpty[nonEmpty.Length - 1] = 0x77; shouldBeEmptyHash = hash.ComputeHash(nonEmpty, nonEmpty.Length, 0); Assert.Equal(emptyHash, shouldBeEmptyHash); shouldBeEmptyHash = hash.ComputeHash(nonEmpty, 0, 0); Assert.Equal(emptyHash, shouldBeEmptyHash); } } [Fact] public void OffsetAndCountRespected() { byte[] dataA = { 1, 1, 2, 3, 5, 8 }; byte[] dataB = { 0, 1, 1, 2, 3, 5, 8, 13 }; using (HMAC hash = Create()) { byte[] baseline = hash.ComputeHash(dataA); // Skip the 0 byte, and stop short of the 13. byte[] offsetData = hash.ComputeHash(dataB, 1, dataA.Length); Assert.Equal(baseline, offsetData); } } [Fact] public void InvalidKey_ThrowArgumentNullException() { using (HMAC hash = Create()) { AssertExtensions.Throws<ArgumentNullException>("value", () => hash.Key = null); } } [Fact] public void OneShot_NullKey_ArgumentNullException() { AssertExtensions.Throws<ArgumentNullException>("key", () => HashDataOneShot(key: (byte[])null, source: Array.Empty<byte>())); } [Fact] public void OneShot_NullSource_ArgumentNullException() { AssertExtensions.Throws<ArgumentNullException>("source", () => HashDataOneShot(key: Array.Empty<byte>(), source: (byte[])null)); } [Fact] public void OneShot_ExistingBuffer_TooSmall() { byte[] buffer = new byte[MacSize - 1]; byte[] key = _testKeys[1]; byte[] data = _testData[1]; AssertExtensions.Throws<ArgumentException>("destination", () => HashDataOneShot(key, data, buffer)); AssertExtensions.FilledWith<byte>(0, buffer); } [Fact] public void OneShot_TryExistingBuffer_TooSmall() { byte[] buffer = new byte[MacSize - 1]; byte[] key = _testKeys[1]; byte[] data = _testData[1]; Assert.False(TryHashDataOneShot(key, data, buffer, out int written)); Assert.Equal(0, written); AssertExtensions.FilledWith<byte>(0, buffer); } [Fact] public void OneShot_TryExistingBuffer_Exact() { for (int caseId = 1; caseId <= 7; caseId++) { byte[] buffer = new byte[MacSize]; byte[] key = _testKeys[caseId]; byte[] data = _testData[caseId]; Assert.True(TryHashDataOneShot(key, data, buffer, out int written)); Assert.Equal(MacSize, written); ReadOnlySpan<byte> expectedMac = _testMacs[caseId]; Span<byte> truncatedBuffer = buffer.AsSpan(0, expectedMac.Length); AssertExtensions.SequenceEqual(expectedMac, truncatedBuffer); } } [Fact] public void OneShot_TryExistingBuffer_Larger() { for (int caseId = 1; caseId <= 7; caseId++) { Span<byte> buffer = new byte[MacSize + 20]; byte[] key = _testKeys[caseId]; byte[] data = _testData[caseId]; buffer.Fill(0xCC); Span<byte> writeBuffer = buffer.Slice(10, MacSize); Assert.True(TryHashDataOneShot(key, data, writeBuffer, out int written)); Assert.Equal(MacSize, written); ReadOnlySpan<byte> expectedMac = _testMacs[caseId]; Span<byte> truncatedWriteBuffer = writeBuffer.Slice(0, expectedMac.Length); AssertExtensions.SequenceEqual(expectedMac, truncatedWriteBuffer); AssertExtensions.FilledWith<byte>(0xCC, buffer[..10]); AssertExtensions.FilledWith<byte>(0xCC, buffer[^10..]); } } [Theory] [InlineData(0, 10)] [InlineData(10, 10)] [InlineData(10, 0)] [InlineData(10, 20)] public void OneShot_TryExistingBuffer_OverlapsKey(int keyOffset, int bufferOffset) { for (int caseId = 1; caseId <= 7; caseId++) { byte[] key = _testKeys[caseId]; byte[] data = _testData[caseId]; Span<byte> buffer = new byte[Math.Max(key.Length, MacSize) + Math.Max(keyOffset, bufferOffset)]; Span<byte> writeBuffer = buffer.Slice(bufferOffset, MacSize); Span<byte> keyBuffer = buffer.Slice(keyOffset, key.Length); key.AsSpan().CopyTo(keyBuffer); Assert.True(TryHashDataOneShot(keyBuffer, data, writeBuffer, out int written)); Assert.Equal(MacSize, written); ReadOnlySpan<byte> expectedMac = _testMacs[caseId]; Span<byte> truncatedWriteBuffer = writeBuffer.Slice(0, expectedMac.Length); AssertExtensions.SequenceEqual(expectedMac, truncatedWriteBuffer); } } [Theory] [InlineData(0, 10)] [InlineData(10, 10)] [InlineData(10, 0)] [InlineData(10, 20)] public void OneShot_TryExistingBuffer_OverlapsSource(int sourceOffset, int bufferOffset) { for (int caseId = 1; caseId <= 7; caseId++) { byte[] key = _testKeys[caseId]; byte[] data = _testData[caseId]; Span<byte> buffer = new byte[Math.Max(data.Length, MacSize) + Math.Max(sourceOffset, bufferOffset)]; Span<byte> writeBuffer = buffer.Slice(bufferOffset, MacSize); Span<byte> dataBuffer = buffer.Slice(sourceOffset, data.Length); data.AsSpan().CopyTo(dataBuffer); Assert.True(TryHashDataOneShot(key, dataBuffer, writeBuffer, out int written)); Assert.Equal(MacSize, written); ReadOnlySpan<byte> expectedMac = _testMacs[caseId]; Span<byte> truncatedWriteBuffer = writeBuffer.Slice(0, expectedMac.Length); AssertExtensions.SequenceEqual(expectedMac, truncatedWriteBuffer); } } [Theory] [InlineData(new byte[0], new byte[] { 1 })] [InlineData(new byte[] { 1 }, new byte[0])] public void OneShot_Empty_Matches_Instances(byte[] key, byte[] source) { using (HMAC hash = Create()) { hash.Key = key; byte[] mac = hash.ComputeHash(source, 0, source.Length); byte[] oneShot = HashDataOneShot(key, source); Assert.Equal(mac, oneShot); } } [Fact] public void HashData_Stream_Source_Null() { AssertExtensions.Throws<ArgumentNullException>( "source", () => HashDataOneShot(ReadOnlySpan<byte>.Empty, (Stream)null)); AssertExtensions.Throws<ArgumentNullException>( "source", () => HashDataOneShot(Array.Empty<byte>(), (Stream)null)); } [Fact] public void HashData_Stream_Source_Null_Async() { AssertExtensions.Throws<ArgumentNullException>( "source", () => HashDataOneShotAsync(ReadOnlyMemory<byte>.Empty, (Stream)null, default)); AssertExtensions.Throws<ArgumentNullException>( "source", () => HashDataOneShotAsync(Array.Empty<byte>(), (Stream)null, default)); } [Fact] public void HashData_Stream_ByteKey_Null() { AssertExtensions.Throws<ArgumentNullException>( "key", () => HashDataOneShot((byte[])null, Stream.Null)); } [Fact] public void HashData_Stream_ByteKey_Null_Async() { AssertExtensions.Throws<ArgumentNullException>( "key", () => HashDataOneShotAsync((byte[])null, Stream.Null, default)); } [Fact] public void HashData_Stream_DestinationTooSmall() { byte[] destination = new byte[MacSize - 1]; AssertExtensions.Throws<ArgumentException>( "destination", () => HashDataOneShot(Array.Empty<byte>(), Stream.Null, destination)); AssertExtensions.FilledWith<byte>(0, destination); AssertExtensions.Throws<ArgumentException>( "destination", () => HashDataOneShot(ReadOnlySpan<byte>.Empty, Stream.Null, destination)); AssertExtensions.FilledWith<byte>(0, destination); } [Fact] public void HashData_Stream_DestinationTooSmall_Async() { byte[] destination = new byte[MacSize - 1]; AssertExtensions.Throws<ArgumentException>( "destination", () => HashDataOneShotAsync(Array.Empty<byte>(), Stream.Null, destination, default)); AssertExtensions.FilledWith<byte>(0, destination); AssertExtensions.Throws<ArgumentException>( "destination", () => HashDataOneShotAsync(ReadOnlyMemory<byte>.Empty, Stream.Null, destination, default)); AssertExtensions.FilledWith<byte>(0, destination); } [Fact] public void HashData_Stream_NotReadable() { AssertExtensions.Throws<ArgumentException>( "source", () => HashDataOneShot(Array.Empty<byte>(), UntouchableStream.Instance)); AssertExtensions.Throws<ArgumentException>( "source", () => HashDataOneShot(ReadOnlySpan<byte>.Empty, UntouchableStream.Instance)); } [Fact] public void HashData_Stream_Cancelled() { Memory<byte> buffer = new byte[512 / 8]; CancellationToken cancelledToken = new CancellationToken(canceled: true); ValueTask<int> waitable = HashDataOneShotAsync(ReadOnlyMemory<byte>.Empty, Stream.Null, buffer, cancelledToken); Assert.True(waitable.IsCanceled, nameof(waitable.IsCanceled)); AssertExtensions.FilledWith<byte>(0, buffer.Span); waitable = HashDataOneShotAsync(Array.Empty<byte>(), Stream.Null, buffer, cancelledToken); Assert.True(waitable.IsCanceled, nameof(waitable.IsCanceled)); AssertExtensions.FilledWith<byte>(0, buffer.Span); } [Fact] public void HashData_Stream_Allocating_Cancelled() { CancellationToken cancelledToken = new CancellationToken(canceled: true); ValueTask<byte[]> waitable = HashDataOneShotAsync(ReadOnlyMemory<byte>.Empty, Stream.Null, cancelledToken); Assert.True(waitable.IsCanceled, nameof(waitable.IsCanceled)); } } }
-1
dotnet/runtime
66,282
Enable Fast Tail Call Optimization for ARM32
- Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
clamp03
2022-03-07T05:47:20Z
2022-03-13T11:50:20Z
227ff3d53784f655fa04dad059a98b3e8d291d61
51b90cc60b8528c77829ef18481b0f58db812776
Enable Fast Tail Call Optimization for ARM32. - Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
./src/tests/JIT/Methodical/tailcall/compat_i_u2_il_d.ilproj
<Project Sdk="Microsoft.NET.Sdk.IL"> <PropertyGroup> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <PropertyGroup> <DebugType>Full</DebugType> </PropertyGroup> <ItemGroup> <Compile Include="compat_i_u2.il" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk.IL"> <PropertyGroup> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <PropertyGroup> <DebugType>Full</DebugType> </PropertyGroup> <ItemGroup> <Compile Include="compat_i_u2.il" /> </ItemGroup> </Project>
-1
dotnet/runtime
66,282
Enable Fast Tail Call Optimization for ARM32
- Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
clamp03
2022-03-07T05:47:20Z
2022-03-13T11:50:20Z
227ff3d53784f655fa04dad059a98b3e8d291d61
51b90cc60b8528c77829ef18481b0f58db812776
Enable Fast Tail Call Optimization for ARM32. - Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
./src/libraries/Common/src/Interop/Android/System.Security.Cryptography.Native.Android/Interop.Ssl.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Buffers; using System.Collections.Generic; using System.Net.Security; using System.Runtime.InteropServices; using System.Security.Authentication; using System.Security.Cryptography.X509Certificates; using Microsoft.Win32.SafeHandles; using SafeSslHandle = System.Net.SafeSslHandle; internal static partial class Interop { internal static partial class AndroidCrypto { private const int UNSUPPORTED_API_LEVEL = 2; internal unsafe delegate PAL_SSLStreamStatus SSLReadCallback(byte* data, int* length); internal unsafe delegate void SSLWriteCallback(byte* data, int length); internal enum PAL_SSLStreamStatus { OK = 0, NeedData = 1, Error = 2, Renegotiate = 3, Closed = 4, }; [LibraryImport(Interop.Libraries.AndroidCryptoNative, EntryPoint = "AndroidCryptoNative_SSLStreamCreate")] internal static partial SafeSslHandle SSLStreamCreate(); [LibraryImport(Interop.Libraries.AndroidCryptoNative, EntryPoint = "AndroidCryptoNative_SSLStreamCreateWithCertificates")] private static partial SafeSslHandle SSLStreamCreateWithCertificates( ref byte pkcs8PrivateKey, int pkcs8PrivateKeyLen, PAL_KeyAlgorithm algorithm, IntPtr[] certs, int certsLen); internal static SafeSslHandle SSLStreamCreateWithCertificates(ReadOnlySpan<byte> pkcs8PrivateKey, PAL_KeyAlgorithm algorithm, IntPtr[] certificates) { return SSLStreamCreateWithCertificates( ref MemoryMarshal.GetReference(pkcs8PrivateKey), pkcs8PrivateKey.Length, algorithm, certificates, certificates.Length); } [LibraryImport(Interop.Libraries.AndroidCryptoNative, EntryPoint = "AndroidCryptoNative_SSLStreamInitialize")] private static partial int SSLStreamInitializeImpl( SafeSslHandle sslHandle, [MarshalAs(UnmanagedType.U1)] bool isServer, SSLReadCallback streamRead, SSLWriteCallback streamWrite, int appBufferSize); internal static void SSLStreamInitialize( SafeSslHandle sslHandle, bool isServer, SSLReadCallback streamRead, SSLWriteCallback streamWrite, int appBufferSize) { int ret = SSLStreamInitializeImpl(sslHandle, isServer, streamRead, streamWrite, appBufferSize); if (ret != SUCCESS) throw new SslException(); } [LibraryImport(Interop.Libraries.AndroidCryptoNative, EntryPoint = "AndroidCryptoNative_SSLStreamSetTargetHost")] private static partial int SSLStreamSetTargetHostImpl( SafeSslHandle sslHandle, [MarshalAs(UnmanagedType.LPUTF8Str)] string targetHost); internal static void SSLStreamSetTargetHost( SafeSslHandle sslHandle, string targetHost) { int ret = SSLStreamSetTargetHostImpl(sslHandle, targetHost); if (ret == UNSUPPORTED_API_LEVEL) throw new PlatformNotSupportedException(SR.net_android_ssl_api_level_unsupported); else if (ret != SUCCESS) throw new SslException(); } [LibraryImport(Interop.Libraries.AndroidCryptoNative, EntryPoint = "AndroidCryptoNative_SSLStreamRequestClientAuthentication")] internal static partial void SSLStreamRequestClientAuthentication(SafeSslHandle sslHandle); [StructLayout(LayoutKind.Sequential)] private unsafe struct ApplicationProtocolData { public byte* Data; public int Length; } [LibraryImport(Interop.Libraries.AndroidCryptoNative, EntryPoint = "AndroidCryptoNative_SSLStreamSetApplicationProtocols")] private static unsafe partial int SSLStreamSetApplicationProtocols(SafeSslHandle sslHandle, ApplicationProtocolData[] protocolData, int count); internal static unsafe void SSLStreamSetApplicationProtocols(SafeSslHandle sslHandle, List<SslApplicationProtocol> protocols) { int count = protocols.Count; MemoryHandle[] memHandles = new MemoryHandle[count]; ApplicationProtocolData[] protocolData = new ApplicationProtocolData[count]; try { for (int i = 0; i < count; i++) { ReadOnlyMemory<byte> protocol = protocols[i].Protocol; memHandles[i] = protocol.Pin(); protocolData[i] = new ApplicationProtocolData { Data = (byte*)memHandles[i].Pointer, Length = protocol.Length }; } int ret = SSLStreamSetApplicationProtocols(sslHandle, protocolData, count); if (ret != SUCCESS) { throw new SslException(); } } finally { foreach (MemoryHandle memHandle in memHandles) { memHandle.Dispose(); } } } [LibraryImport(Interop.Libraries.AndroidCryptoNative, EntryPoint = "AndroidCryptoNative_SSLStreamSetEnabledProtocols")] private static partial int SSLStreamSetEnabledProtocols(SafeSslHandle sslHandle, ref SslProtocols protocols, int length); internal static void SSLStreamSetEnabledProtocols(SafeSslHandle sslHandle, ReadOnlySpan<SslProtocols> protocols) { int ret = SSLStreamSetEnabledProtocols(sslHandle, ref MemoryMarshal.GetReference(protocols), protocols.Length); if (ret != SUCCESS) throw new SslException(); } [LibraryImport(Interop.Libraries.AndroidCryptoNative, EntryPoint = "AndroidCryptoNative_SSLStreamHandshake")] internal static partial PAL_SSLStreamStatus SSLStreamHandshake(SafeSslHandle sslHandle); [LibraryImport(Libraries.AndroidCryptoNative, EntryPoint = "AndroidCryptoNative_SSLStreamGetApplicationProtocol")] private static partial int SSLStreamGetApplicationProtocol(SafeSslHandle ssl, byte[]? buf, ref int len); internal static byte[]? SSLStreamGetApplicationProtocol(SafeSslHandle ssl) { int len = 0; int ret = SSLStreamGetApplicationProtocol(ssl, null, ref len); if (ret != INSUFFICIENT_BUFFER) return null; byte[] bytes = new byte[len]; ret = SSLStreamGetApplicationProtocol(ssl, bytes, ref len); if (ret != SUCCESS) return null; return bytes; } [LibraryImport(Interop.Libraries.AndroidCryptoNative, EntryPoint = "AndroidCryptoNative_SSLStreamRead")] private static unsafe partial PAL_SSLStreamStatus SSLStreamRead( SafeSslHandle sslHandle, byte* buffer, int length, out int bytesRead); internal static unsafe PAL_SSLStreamStatus SSLStreamRead( SafeSslHandle sslHandle, ReadOnlySpan<byte> buffer, out int bytesRead) { fixed (byte* bufferPtr = buffer) { return SSLStreamRead(sslHandle, bufferPtr, buffer.Length, out bytesRead); } } [LibraryImport(Interop.Libraries.AndroidCryptoNative, EntryPoint = "AndroidCryptoNative_SSLStreamWrite")] private static unsafe partial PAL_SSLStreamStatus SSLStreamWrite( SafeSslHandle sslHandle, byte* buffer, int length); internal static unsafe PAL_SSLStreamStatus SSLStreamWrite( SafeSslHandle sslHandle, ReadOnlyMemory<byte> buffer) { using (MemoryHandle memHandle = buffer.Pin()) { return SSLStreamWrite(sslHandle, (byte*)memHandle.Pointer, buffer.Length); } } [LibraryImport(Interop.Libraries.AndroidCryptoNative, EntryPoint = "AndroidCryptoNative_SSLStreamRelease")] internal static partial void SSLStreamRelease(IntPtr ptr); internal sealed class SslException : Exception { internal SslException() { } internal SslException(int errorCode) { HResult = errorCode; } } [LibraryImport(Libraries.AndroidCryptoNative, EntryPoint = "AndroidCryptoNative_SSLStreamGetProtocol")] private static partial int SSLStreamGetProtocol(SafeSslHandle ssl, out IntPtr protocol); internal static string SSLStreamGetProtocol(SafeSslHandle ssl) { IntPtr protocolPtr; int ret = SSLStreamGetProtocol(ssl, out protocolPtr); if (ret != SUCCESS) throw new SslException(); if (protocolPtr == IntPtr.Zero) return string.Empty; string protocol = Marshal.PtrToStringUni(protocolPtr)!; Marshal.FreeHGlobal(protocolPtr); return protocol; } [LibraryImport(Libraries.AndroidCryptoNative, EntryPoint = "AndroidCryptoNative_SSLStreamGetPeerCertificate")] internal static partial SafeX509Handle SSLStreamGetPeerCertificate(SafeSslHandle ssl); [LibraryImport(Libraries.AndroidCryptoNative, EntryPoint = "AndroidCryptoNative_SSLStreamGetPeerCertificates")] private static partial void SSLStreamGetPeerCertificates( SafeSslHandle ssl, [MarshalAs(UnmanagedType.LPArray, SizeParamIndex = 2)] out IntPtr[] certs, out int count); internal static IntPtr[]? SSLStreamGetPeerCertificates(SafeSslHandle ssl) { IntPtr[]? ptrs; Interop.AndroidCrypto.SSLStreamGetPeerCertificates(ssl, out ptrs, out _); return ptrs; } [LibraryImport(Libraries.AndroidCryptoNative, EntryPoint = "AndroidCryptoNative_SSLStreamGetCipherSuite")] private static partial int SSLStreamGetCipherSuite(SafeSslHandle ssl, out IntPtr cipherSuite); internal static string SSLStreamGetCipherSuite(SafeSslHandle ssl) { IntPtr cipherSuitePtr; int ret = SSLStreamGetCipherSuite(ssl, out cipherSuitePtr); if (ret != SUCCESS) throw new SslException(); if (cipherSuitePtr == IntPtr.Zero) return string.Empty; string cipherSuite = Marshal.PtrToStringUni(cipherSuitePtr)!; Marshal.FreeHGlobal(cipherSuitePtr); return cipherSuite; } [LibraryImport(Libraries.AndroidCryptoNative, EntryPoint = "AndroidCryptoNative_SSLStreamShutdown")] [return: MarshalAs(UnmanagedType.U1)] internal static partial bool SSLStreamShutdown(SafeSslHandle ssl); [LibraryImport(Libraries.AndroidCryptoNative, EntryPoint = "AndroidCryptoNative_SSLStreamVerifyHostname")] [return: MarshalAs(UnmanagedType.U1)] internal static partial bool SSLStreamVerifyHostname( SafeSslHandle ssl, [MarshalAs(UnmanagedType.LPUTF8Str)] string hostname); } } namespace System.Net { internal sealed class SafeSslHandle : SafeHandle { public SafeSslHandle() : base(IntPtr.Zero, ownsHandle: true) { } protected override bool ReleaseHandle() { Interop.AndroidCrypto.SSLStreamRelease(handle); SetHandle(IntPtr.Zero); return true; } public override bool IsInvalid => handle == IntPtr.Zero; } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Buffers; using System.Collections.Generic; using System.Net.Security; using System.Runtime.InteropServices; using System.Security.Authentication; using System.Security.Cryptography.X509Certificates; using Microsoft.Win32.SafeHandles; using SafeSslHandle = System.Net.SafeSslHandle; internal static partial class Interop { internal static partial class AndroidCrypto { private const int UNSUPPORTED_API_LEVEL = 2; internal unsafe delegate PAL_SSLStreamStatus SSLReadCallback(byte* data, int* length); internal unsafe delegate void SSLWriteCallback(byte* data, int length); internal enum PAL_SSLStreamStatus { OK = 0, NeedData = 1, Error = 2, Renegotiate = 3, Closed = 4, }; [LibraryImport(Interop.Libraries.AndroidCryptoNative, EntryPoint = "AndroidCryptoNative_SSLStreamCreate")] internal static partial SafeSslHandle SSLStreamCreate(); [LibraryImport(Interop.Libraries.AndroidCryptoNative, EntryPoint = "AndroidCryptoNative_SSLStreamCreateWithCertificates")] private static partial SafeSslHandle SSLStreamCreateWithCertificates( ref byte pkcs8PrivateKey, int pkcs8PrivateKeyLen, PAL_KeyAlgorithm algorithm, IntPtr[] certs, int certsLen); internal static SafeSslHandle SSLStreamCreateWithCertificates(ReadOnlySpan<byte> pkcs8PrivateKey, PAL_KeyAlgorithm algorithm, IntPtr[] certificates) { return SSLStreamCreateWithCertificates( ref MemoryMarshal.GetReference(pkcs8PrivateKey), pkcs8PrivateKey.Length, algorithm, certificates, certificates.Length); } [LibraryImport(Interop.Libraries.AndroidCryptoNative, EntryPoint = "AndroidCryptoNative_SSLStreamInitialize")] private static partial int SSLStreamInitializeImpl( SafeSslHandle sslHandle, [MarshalAs(UnmanagedType.U1)] bool isServer, SSLReadCallback streamRead, SSLWriteCallback streamWrite, int appBufferSize); internal static void SSLStreamInitialize( SafeSslHandle sslHandle, bool isServer, SSLReadCallback streamRead, SSLWriteCallback streamWrite, int appBufferSize) { int ret = SSLStreamInitializeImpl(sslHandle, isServer, streamRead, streamWrite, appBufferSize); if (ret != SUCCESS) throw new SslException(); } [LibraryImport(Interop.Libraries.AndroidCryptoNative, EntryPoint = "AndroidCryptoNative_SSLStreamSetTargetHost")] private static partial int SSLStreamSetTargetHostImpl( SafeSslHandle sslHandle, [MarshalAs(UnmanagedType.LPUTF8Str)] string targetHost); internal static void SSLStreamSetTargetHost( SafeSslHandle sslHandle, string targetHost) { int ret = SSLStreamSetTargetHostImpl(sslHandle, targetHost); if (ret == UNSUPPORTED_API_LEVEL) throw new PlatformNotSupportedException(SR.net_android_ssl_api_level_unsupported); else if (ret != SUCCESS) throw new SslException(); } [LibraryImport(Interop.Libraries.AndroidCryptoNative, EntryPoint = "AndroidCryptoNative_SSLStreamRequestClientAuthentication")] internal static partial void SSLStreamRequestClientAuthentication(SafeSslHandle sslHandle); [StructLayout(LayoutKind.Sequential)] private unsafe struct ApplicationProtocolData { public byte* Data; public int Length; } [LibraryImport(Interop.Libraries.AndroidCryptoNative, EntryPoint = "AndroidCryptoNative_SSLStreamSetApplicationProtocols")] private static unsafe partial int SSLStreamSetApplicationProtocols(SafeSslHandle sslHandle, ApplicationProtocolData[] protocolData, int count); internal static unsafe void SSLStreamSetApplicationProtocols(SafeSslHandle sslHandle, List<SslApplicationProtocol> protocols) { int count = protocols.Count; MemoryHandle[] memHandles = new MemoryHandle[count]; ApplicationProtocolData[] protocolData = new ApplicationProtocolData[count]; try { for (int i = 0; i < count; i++) { ReadOnlyMemory<byte> protocol = protocols[i].Protocol; memHandles[i] = protocol.Pin(); protocolData[i] = new ApplicationProtocolData { Data = (byte*)memHandles[i].Pointer, Length = protocol.Length }; } int ret = SSLStreamSetApplicationProtocols(sslHandle, protocolData, count); if (ret != SUCCESS) { throw new SslException(); } } finally { foreach (MemoryHandle memHandle in memHandles) { memHandle.Dispose(); } } } [LibraryImport(Interop.Libraries.AndroidCryptoNative, EntryPoint = "AndroidCryptoNative_SSLStreamSetEnabledProtocols")] private static partial int SSLStreamSetEnabledProtocols(SafeSslHandle sslHandle, ref SslProtocols protocols, int length); internal static void SSLStreamSetEnabledProtocols(SafeSslHandle sslHandle, ReadOnlySpan<SslProtocols> protocols) { int ret = SSLStreamSetEnabledProtocols(sslHandle, ref MemoryMarshal.GetReference(protocols), protocols.Length); if (ret != SUCCESS) throw new SslException(); } [LibraryImport(Interop.Libraries.AndroidCryptoNative, EntryPoint = "AndroidCryptoNative_SSLStreamHandshake")] internal static partial PAL_SSLStreamStatus SSLStreamHandshake(SafeSslHandle sslHandle); [LibraryImport(Libraries.AndroidCryptoNative, EntryPoint = "AndroidCryptoNative_SSLStreamGetApplicationProtocol")] private static partial int SSLStreamGetApplicationProtocol(SafeSslHandle ssl, byte[]? buf, ref int len); internal static byte[]? SSLStreamGetApplicationProtocol(SafeSslHandle ssl) { int len = 0; int ret = SSLStreamGetApplicationProtocol(ssl, null, ref len); if (ret != INSUFFICIENT_BUFFER) return null; byte[] bytes = new byte[len]; ret = SSLStreamGetApplicationProtocol(ssl, bytes, ref len); if (ret != SUCCESS) return null; return bytes; } [LibraryImport(Interop.Libraries.AndroidCryptoNative, EntryPoint = "AndroidCryptoNative_SSLStreamRead")] private static unsafe partial PAL_SSLStreamStatus SSLStreamRead( SafeSslHandle sslHandle, byte* buffer, int length, out int bytesRead); internal static unsafe PAL_SSLStreamStatus SSLStreamRead( SafeSslHandle sslHandle, ReadOnlySpan<byte> buffer, out int bytesRead) { fixed (byte* bufferPtr = buffer) { return SSLStreamRead(sslHandle, bufferPtr, buffer.Length, out bytesRead); } } [LibraryImport(Interop.Libraries.AndroidCryptoNative, EntryPoint = "AndroidCryptoNative_SSLStreamWrite")] private static unsafe partial PAL_SSLStreamStatus SSLStreamWrite( SafeSslHandle sslHandle, byte* buffer, int length); internal static unsafe PAL_SSLStreamStatus SSLStreamWrite( SafeSslHandle sslHandle, ReadOnlyMemory<byte> buffer) { using (MemoryHandle memHandle = buffer.Pin()) { return SSLStreamWrite(sslHandle, (byte*)memHandle.Pointer, buffer.Length); } } [LibraryImport(Interop.Libraries.AndroidCryptoNative, EntryPoint = "AndroidCryptoNative_SSLStreamRelease")] internal static partial void SSLStreamRelease(IntPtr ptr); internal sealed class SslException : Exception { internal SslException() { } internal SslException(int errorCode) { HResult = errorCode; } } [LibraryImport(Libraries.AndroidCryptoNative, EntryPoint = "AndroidCryptoNative_SSLStreamGetProtocol")] private static partial int SSLStreamGetProtocol(SafeSslHandle ssl, out IntPtr protocol); internal static string SSLStreamGetProtocol(SafeSslHandle ssl) { IntPtr protocolPtr; int ret = SSLStreamGetProtocol(ssl, out protocolPtr); if (ret != SUCCESS) throw new SslException(); if (protocolPtr == IntPtr.Zero) return string.Empty; string protocol = Marshal.PtrToStringUni(protocolPtr)!; Marshal.FreeHGlobal(protocolPtr); return protocol; } [LibraryImport(Libraries.AndroidCryptoNative, EntryPoint = "AndroidCryptoNative_SSLStreamGetPeerCertificate")] internal static partial SafeX509Handle SSLStreamGetPeerCertificate(SafeSslHandle ssl); [LibraryImport(Libraries.AndroidCryptoNative, EntryPoint = "AndroidCryptoNative_SSLStreamGetPeerCertificates")] private static partial void SSLStreamGetPeerCertificates( SafeSslHandle ssl, [MarshalAs(UnmanagedType.LPArray, SizeParamIndex = 2)] out IntPtr[] certs, out int count); internal static IntPtr[]? SSLStreamGetPeerCertificates(SafeSslHandle ssl) { IntPtr[]? ptrs; Interop.AndroidCrypto.SSLStreamGetPeerCertificates(ssl, out ptrs, out _); return ptrs; } [LibraryImport(Libraries.AndroidCryptoNative, EntryPoint = "AndroidCryptoNative_SSLStreamGetCipherSuite")] private static partial int SSLStreamGetCipherSuite(SafeSslHandle ssl, out IntPtr cipherSuite); internal static string SSLStreamGetCipherSuite(SafeSslHandle ssl) { IntPtr cipherSuitePtr; int ret = SSLStreamGetCipherSuite(ssl, out cipherSuitePtr); if (ret != SUCCESS) throw new SslException(); if (cipherSuitePtr == IntPtr.Zero) return string.Empty; string cipherSuite = Marshal.PtrToStringUni(cipherSuitePtr)!; Marshal.FreeHGlobal(cipherSuitePtr); return cipherSuite; } [LibraryImport(Libraries.AndroidCryptoNative, EntryPoint = "AndroidCryptoNative_SSLStreamShutdown")] [return: MarshalAs(UnmanagedType.U1)] internal static partial bool SSLStreamShutdown(SafeSslHandle ssl); [LibraryImport(Libraries.AndroidCryptoNative, EntryPoint = "AndroidCryptoNative_SSLStreamVerifyHostname")] [return: MarshalAs(UnmanagedType.U1)] internal static partial bool SSLStreamVerifyHostname( SafeSslHandle ssl, [MarshalAs(UnmanagedType.LPUTF8Str)] string hostname); } } namespace System.Net { internal sealed class SafeSslHandle : SafeHandle { public SafeSslHandle() : base(IntPtr.Zero, ownsHandle: true) { } protected override bool ReleaseHandle() { Interop.AndroidCrypto.SSLStreamRelease(handle); SetHandle(IntPtr.Zero); return true; } public override bool IsInvalid => handle == IntPtr.Zero; } }
-1
dotnet/runtime
66,282
Enable Fast Tail Call Optimization for ARM32
- Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
clamp03
2022-03-07T05:47:20Z
2022-03-13T11:50:20Z
227ff3d53784f655fa04dad059a98b3e8d291d61
51b90cc60b8528c77829ef18481b0f58db812776
Enable Fast Tail Call Optimization for ARM32. - Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
./src/libraries/System.Runtime.InteropServices/tests/LibraryImportGenerator.UnitTests/LibraryImportGenerator.Unit.Tests.csproj
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <TargetFramework>$(NetCoreAppCurrent)</TargetFramework> <IsPackable>false</IsPackable> <LangVersion>Preview</LangVersion> <Nullable>enable</Nullable> <TestRunRequiresLiveRefPack>true</TestRunRequiresLiveRefPack> </PropertyGroup> <ItemGroup> <Compile Include="$(CommonTestPath)SourceGenerators\LiveReferencePack.cs" Link="Common\SourceGenerators\LiveReferencePack.cs" /> </ItemGroup> <ItemGroup> <PackageReference Include="Microsoft.CodeAnalysis.CSharp.Workspaces" Version="$(MicrosoftCodeAnalysisVersion)" /> <PackageReference Include="Microsoft.CodeAnalysis.CSharp.Analyzer.Testing.XUnit" Version="$(CompilerPlatformTestingVersion)" /> <PackageReference Include="Microsoft.CodeAnalysis.CSharp.CodeFix.Testing.XUnit" Version="$(CompilerPlatformTestingVersion)" /> <PackageReference Include="coverlet.collector" Version="$(CoverletCollectorVersion)"> <IncludeAssets>runtime; build; native; contentfiles; analyzers; buildtransitive</IncludeAssets> <PrivateAssets>all</PrivateAssets> </PackageReference> </ItemGroup> <ItemGroup> <ProjectReference Include="..\Ancillary.Interop\Ancillary.Interop.csproj" /> <ProjectReference Include="..\..\gen\LibraryImportGenerator\LibraryImportGenerator.csproj" /> </ItemGroup> <ItemGroup> <None Include="$(RepoRoot)/NuGet.config" Link="NuGet.config" CopyToOutputDirectory="PreserveNewest" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <TargetFramework>$(NetCoreAppCurrent)</TargetFramework> <IsPackable>false</IsPackable> <LangVersion>Preview</LangVersion> <Nullable>enable</Nullable> <TestRunRequiresLiveRefPack>true</TestRunRequiresLiveRefPack> </PropertyGroup> <ItemGroup> <Compile Include="$(CommonTestPath)SourceGenerators\LiveReferencePack.cs" Link="Common\SourceGenerators\LiveReferencePack.cs" /> </ItemGroup> <ItemGroup> <PackageReference Include="Microsoft.CodeAnalysis.CSharp.Workspaces" Version="$(MicrosoftCodeAnalysisVersion)" /> <PackageReference Include="Microsoft.CodeAnalysis.CSharp.Analyzer.Testing.XUnit" Version="$(CompilerPlatformTestingVersion)" /> <PackageReference Include="Microsoft.CodeAnalysis.CSharp.CodeFix.Testing.XUnit" Version="$(CompilerPlatformTestingVersion)" /> <PackageReference Include="coverlet.collector" Version="$(CoverletCollectorVersion)"> <IncludeAssets>runtime; build; native; contentfiles; analyzers; buildtransitive</IncludeAssets> <PrivateAssets>all</PrivateAssets> </PackageReference> </ItemGroup> <ItemGroup> <ProjectReference Include="..\Ancillary.Interop\Ancillary.Interop.csproj" /> <ProjectReference Include="..\..\gen\LibraryImportGenerator\LibraryImportGenerator.csproj" /> </ItemGroup> <ItemGroup> <None Include="$(RepoRoot)/NuGet.config" Link="NuGet.config" CopyToOutputDirectory="PreserveNewest" /> </ItemGroup> </Project>
-1
dotnet/runtime
66,282
Enable Fast Tail Call Optimization for ARM32
- Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
clamp03
2022-03-07T05:47:20Z
2022-03-13T11:50:20Z
227ff3d53784f655fa04dad059a98b3e8d291d61
51b90cc60b8528c77829ef18481b0f58db812776
Enable Fast Tail Call Optimization for ARM32. - Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
./src/libraries/System.Formats.Cbor/tests/Reader/CborReaderTests.Tag.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections.Generic; using System.Linq; using System.Numerics; using Test.Cryptography; using Xunit; namespace System.Formats.Cbor.Tests { public partial class CborReaderTests { // Data points taken from https://tools.ietf.org/html/rfc7049#appendix-A // Additional pairs generated using http://cbor.me/ [Theory] [InlineData(2, 2, "c202")] [InlineData(0, "2013-03-21T20:04:00Z", "c074323031332d30332d32315432303a30343a30305a")] [InlineData(1, 1363896240, "c11a514b67b0")] [InlineData(23, new byte[] { 1, 2, 3, 4 }, "d74401020304")] [InlineData(32, "http://www.example.com", "d82076687474703a2f2f7777772e6578616d706c652e636f6d")] [InlineData(int.MaxValue, 2, "da7fffffff02")] [InlineData(ulong.MaxValue, new object[] { 1, 2 }, "dbffffffffffffffff820102")] public static void ReadTag_SingleValue_HappyPath(ulong expectedTag, object expectedValue, string hexEncoding) { byte[] encoding = hexEncoding.HexToByteArray(); var reader = new CborReader(encoding); Assert.Equal(CborReaderState.Tag, reader.PeekState()); CborTag tag = reader.ReadTag(); Assert.Equal(expectedTag, (ulong)tag); Helpers.VerifyValue(reader, expectedValue); Assert.Equal(CborReaderState.Finished, reader.PeekState()); } [Theory] [InlineData(new ulong[] { 1, 2, 3 }, 2, "c1c2c302")] [InlineData(new ulong[] { 0, 0, 0 }, "2013-03-21T20:04:00Z", "c0c0c074323031332d30332d32315432303a30343a30305a")] [InlineData(new ulong[] { int.MaxValue, ulong.MaxValue }, 1363896240, "da7fffffffdbffffffffffffffff1a514b67b0")] [InlineData(new ulong[] { 23, 24, 100 }, new byte[] { 1, 2, 3, 4 }, "d7d818d8644401020304")] [InlineData(new ulong[] { 32, 1, 1 }, new object[] { 1, "lorem ipsum" }, "d820c1c182016b6c6f72656d20697073756d")] public static void ReadTag_NestedTags_HappyPath(ulong[] expectedTags, object expectedValue, string hexEncoding) { byte[] encoding = hexEncoding.HexToByteArray(); var reader = new CborReader(encoding); foreach (ulong expectedTag in expectedTags) { Assert.Equal(CborReaderState.Tag, reader.PeekState()); CborTag tag = reader.ReadTag(); Assert.Equal(expectedTag, (ulong)tag); } Helpers.VerifyValue(reader, expectedValue); Assert.Equal(CborReaderState.Finished, reader.PeekState()); } [Theory] [InlineData("c2")] public static void ReadTag_NoSubsequentData_ShouldPeekEndOfData(string hexEncoding) { byte[] data = hexEncoding.HexToByteArray(); var reader = new CborReader(data); reader.ReadTag(); Assert.Throws<CborContentException>(() => reader.PeekState()); } [Theory] [InlineData("40")] // empty text string [InlineData("60")] // empty byte string [InlineData("f6")] // null [InlineData("80")] // [] [InlineData("a0")] // {} [InlineData("f97e00")] // NaN [InlineData("fb3ff199999999999a")] // 1.1 public static void ReadTag_InvalidTypes_ShouldThrowInvalidOperationException(string hexEncoding) { byte[] encoding = hexEncoding.HexToByteArray(); var reader = new CborReader(encoding); Assert.Throws<InvalidOperationException>(() => reader.ReadTag()); Assert.Equal(encoding.Length, reader.BytesRemaining); } [Theory] [InlineData(2, "c202")] [InlineData(0, "c074323031332d30332d32315432303a30343a30305a")] [InlineData(1, "c11a514b67b0")] [InlineData(23, "d74401020304")] [InlineData(32, "d82076687474703a2f2f7777772e6578616d706c652e636f6d")] [InlineData(int.MaxValue, "da7fffffff02")] [InlineData(ulong.MaxValue, "dbffffffffffffffff820102")] public static void PeekTag_SingleValue_HappyPath(ulong expectedTag, string hexEncoding) { byte[] encoding = hexEncoding.HexToByteArray(); var reader = new CborReader(encoding); Assert.Equal(CborReaderState.Tag, reader.PeekState()); CborTag tag = reader.PeekTag(); Assert.Equal(expectedTag, (ulong)tag); Assert.Equal(CborReaderState.Tag, reader.PeekState()); Assert.Equal(encoding.Length, reader.BytesRemaining); } [Theory] [InlineData("40")] // empty text string [InlineData("60")] // empty byte string [InlineData("f6")] // null [InlineData("80")] // [] [InlineData("a0")] // {} [InlineData("f97e00")] // NaN [InlineData("fb3ff199999999999a")] // 1.1 public static void PeekTag_InvalidTypes_ShouldThrowInvalidOperationException(string hexEncoding) { byte[] encoding = hexEncoding.HexToByteArray(); var reader = new CborReader(encoding); Assert.Throws<InvalidOperationException>(() => reader.PeekTag()); Assert.Equal(encoding.Length, reader.BytesRemaining); } [Fact] public static void ReadTag_NestedTagWithMissingPayload_ShouldThrowCborContentException() { byte[] encoding = "9fc2ff".HexToByteArray(); var reader = new CborReader(encoding); reader.ReadStartArray(); reader.ReadTag(); int bytesRemaining = reader.BytesRemaining; Assert.Throws<CborContentException>(() => reader.PeekState()); Assert.Throws<CborContentException>(() => reader.ReadEndArray()); Assert.Equal(bytesRemaining, reader.BytesRemaining); } [Theory] [InlineData("8201c202")] // definite length array [InlineData("9f01c202ff")] // equivalent indefinite-length array public static void ReadTag_CallingEndReadArrayPrematurely_ShouldThrowInvalidOperationException(string hexEncoding) { // encoding is valid CBOR, so should not throw CborContentException byte[] data = hexEncoding.HexToByteArray(); var reader = new CborReader(data); reader.ReadStartArray(); reader.ReadInt64(); reader.ReadTag(); int bytesRemaining = reader.BytesRemaining; Assert.Equal(CborReaderState.UnsignedInteger, reader.PeekState()); Assert.Throws<InvalidOperationException>(() => reader.ReadEndArray()); Assert.Equal(bytesRemaining, reader.BytesRemaining); } [Theory] [InlineData("a102c202")] // definite length map [InlineData("bf02c202ff")] // equivalent indefinite-length map public static void ReadTag_CallingEndReadMapPrematurely_ShouldThrowInvalidOperationException(string hexEncoding) { // encoding is valid CBOR, so should not throw CborContentException byte[] data = hexEncoding.HexToByteArray(); var reader = new CborReader(data); reader.ReadStartMap(); reader.ReadInt64(); reader.ReadTag(); int bytesRemaining = reader.BytesRemaining; Assert.Equal(CborReaderState.UnsignedInteger, reader.PeekState()); Assert.Throws<InvalidOperationException>(() => reader.ReadEndArray()); Assert.Equal(bytesRemaining, reader.BytesRemaining); } [Theory] [InlineData("2013-03-21T20:04:00Z", "c074323031332d30332d32315432303a30343a30305a")] [InlineData("2020-04-09T14:31:21.3535941+01:00", "c07821323032302d30342d30395431343a33313a32312e333533353934312b30313a3030")] [InlineData("2020-04-09T11:41:19.12-08:00", "c0781c323032302d30342d30395431313a34313a31392e31322d30383a3030")] [InlineData("2020-04-09T11:41:19.12-08:00", "c07f781c323032302d30342d30395431313a34313a31392e31322d30383a3030ff")] // indefinite-length date string public static void ReadDateTimeOffset_SingleValue_HappyPath(string expectedValueString, string hexEncoding) { DateTimeOffset expectedValue = DateTimeOffset.Parse(expectedValueString); byte[] data = hexEncoding.HexToByteArray(); var reader = new CborReader(data); DateTimeOffset result = reader.ReadDateTimeOffset(); Assert.Equal(CborReaderState.Finished, reader.PeekState()); Assert.Equal(expectedValue, result); Assert.Equal(expectedValue.Offset, result.Offset); } [Theory] [InlineData("c01a514b67b0")] // string datetime tag with unix time payload public static void ReadDateTimeOffset_InvalidTagPayload_ShouldThrowCborContentException(string hexEncoding) { byte[] encoding = hexEncoding.HexToByteArray(); var reader = new CborReader(encoding); Assert.Throws<CborContentException>(() => reader.ReadDateTimeOffset()); Assert.Equal(encoding.Length, reader.BytesRemaining); } [Theory] [InlineData("c07330392f30342f323032302031393a35313a3530")] // 0("09/04/2020 19:51:50") [InlineData("c06e4c617374204368726973746d6173")] // 0("Last Christmas") public static void ReadDateTimeOffset_InvalidDateString_ShouldThrowCborContentException(string hexEncoding) { byte[] encoding = hexEncoding.HexToByteArray(); var reader = new CborReader(encoding); Assert.Throws<CborContentException>(() => reader.ReadDateTimeOffset()); Assert.Equal(encoding.Length, reader.BytesRemaining); } [Theory] [InlineData("01")] // numeric value without tag [InlineData("c301")] // non-datetime tag public static void ReadDateTimeOffset_InvalidTag_ShouldThrowInvalidOperationxception(string hexEncoding) { byte[] encoding = hexEncoding.HexToByteArray(); var reader = new CborReader(encoding); Assert.Throws<InvalidOperationException>(() => reader.ReadDateTimeOffset()); Assert.Equal(encoding.Length, reader.BytesRemaining); } [Theory] [InlineData("81c07330392f30342f323032302031393a35313a3530")] // [0("09/04/2020 19:51:50")] [InlineData("81c06e4c617374204368726973746d6173")] // [0("Last Christmas")] public static void ReadDateTimeOffset_InvalidFormat_ShouldRollbackToInitialState(string hexEncoding) { var reader = new CborReader(hexEncoding.HexToByteArray()); reader.ReadStartArray(); int bytesRemaining = reader.BytesRemaining; Assert.Throws<CborContentException>(() => reader.ReadDateTimeOffset()); Assert.Equal(bytesRemaining, reader.BytesRemaining); Assert.Equal(CborReaderState.Tag, reader.PeekState()); Assert.Equal(CborTag.DateTimeString, reader.ReadTag()); } [Fact] public static void ReadDateTimeOffset_StrictConformance_OnError_ShouldPreserveReaderState() { string hexEncoding = "a20101c06001"; // { 1 : 1 , 0("") : 1 } conforming CBOR with invalid date/time schema var reader = new CborReader(hexEncoding.HexToByteArray(), CborConformanceMode.Strict); reader.ReadStartMap(); reader.ReadInt32(); reader.ReadInt32(); Assert.Throws<CborContentException>(() => reader.ReadDateTimeOffset()); // throws a format exception due to malformed date/time string // the following operation would original throw a false positive duplicate key error, // due to the checkpoint restore logic not properly resetting key uniqueness validation reader.SkipValue(disableConformanceModeChecks: false); reader.ReadInt32(); reader.ReadEndMap(); Assert.Equal(CborReaderState.Finished, reader.PeekState()); } [Theory] [InlineData("2013-03-21T20:04:00Z", "c11a514b67b0")] [InlineData("2013-03-21T20:04:00.5Z", "c1fb41d452d9ec200000")] [InlineData("2020-04-09T13:31:21Z", "c11a5e8f23a9")] [InlineData("1970-01-01T00:00:00Z", "c100")] [InlineData("1969-12-31T23:59:59Z", "c120")] [InlineData("1960-01-01T00:00:00Z", "c13a12cff77f")] public static void ReadUnixTimeSeconds_SingleValue_HappyPath(string expectedValueString, string hexEncoding) { DateTimeOffset expectedValue = DateTimeOffset.Parse(expectedValueString); byte[] data = hexEncoding.HexToByteArray(); var reader = new CborReader(data); DateTimeOffset result = reader.ReadUnixTimeSeconds(); Assert.Equal(CborReaderState.Finished, reader.PeekState()); Assert.Equal(expectedValue, result); Assert.Equal(TimeSpan.Zero, result.Offset); } [Theory] [InlineData("c174323031332d30332d32315432303a30343a30305a")] // epoch datetime tag with string payload public static void ReadUnixTimeSeconds_InvalidTagPayload_ShouldThrowCborContentException(string hexEncoding) { byte[] encoding = hexEncoding.HexToByteArray(); var reader = new CborReader(encoding); Assert.Throws<CborContentException>(() => reader.ReadUnixTimeSeconds()); Assert.Equal(encoding.Length, reader.BytesRemaining); } [Theory] [InlineData("c1f97e00")] // 0(NaN) [InlineData("c1f9fc00")] // 0(-Infinity) public static void ReadUnixTimeSeconds_InvalidFloatPayload_ShouldThrowCborContentException(string hexEncoding) { byte[] encoding = hexEncoding.HexToByteArray(); var reader = new CborReader(encoding); Assert.Throws<CborContentException>(() => reader.ReadUnixTimeSeconds()); Assert.Equal(encoding.Length, reader.BytesRemaining); } [Theory] [InlineData("01")] // numeric value without tag [InlineData("c301")] // non-datetime tag public static void ReadUnixTimeSeconds_InvalidTag_ShouldThrowInvalidOperationxception(string hexEncoding) { byte[] encoding = hexEncoding.HexToByteArray(); var reader = new CborReader(encoding); Assert.Throws<InvalidOperationException>(() => reader.ReadUnixTimeSeconds()); Assert.Equal(encoding.Length, reader.BytesRemaining); } [Theory] [InlineData("81c17330392f30342f323032302031393a35313a3530")] // [1("09/04/2020 19:51:50")] [InlineData("81c16e4c617374204368726973746d6173")] // [1("Last Christmas")] public static void ReadUnixTimeSeconds_InvalidFormat_ShouldRollbackToInitialState(string hexEncoding) { var reader = new CborReader(hexEncoding.HexToByteArray()); reader.ReadStartArray(); int bytesRemaining = reader.BytesRemaining; Assert.Throws<CborContentException>(() => reader.ReadUnixTimeSeconds()); Assert.Equal(bytesRemaining, reader.BytesRemaining); Assert.Equal(CborReaderState.Tag, reader.PeekState()); Assert.Equal(CborTag.UnixTimeSeconds, reader.ReadTag()); } [Theory] [InlineData("0", "c240")] [InlineData("0", "c24100")] [InlineData("1", "c24101")] [InlineData("1", "c2420001")] // should recognize leading zeroes in buffer [InlineData("-1", "c34100")] [InlineData("255", "c241ff")] [InlineData("-256", "c341ff")] [InlineData("256", "c2420100")] [InlineData("-257", "c3420100")] [InlineData("9223372036854775807", "c2487fffffffffffffff")] [InlineData("-9223372036854775808", "c3487fffffffffffffff")] [InlineData("18446744073709551616", "c249010000000000000000")] [InlineData("-18446744073709551617", "c349010000000000000000")] [InlineData("1", "c25f4101ff")] // indefinite-length buffer public static void ReadBigInteger_SingleValue_HappyPath(string expectedValueString, string hexEncoding) { BigInteger expectedValue = BigInteger.Parse(expectedValueString); byte[] data = hexEncoding.HexToByteArray(); var reader = new CborReader(data); BigInteger result = reader.ReadBigInteger(); Assert.Equal(CborReaderState.Finished, reader.PeekState()); Assert.Equal(expectedValue, result); } [Theory] [InlineData("01")] [InlineData("c001")] public static void ReadBigInteger_InvalidCborTag_ShouldThrowInvalidOperationException(string hexEncoding) { byte[] encoding = hexEncoding.HexToByteArray(); var reader = new CborReader(encoding); Assert.Throws<InvalidOperationException>(() => reader.ReadBigInteger()); Assert.Equal(encoding.Length, reader.BytesRemaining); } [Theory] [InlineData("c280")] [InlineData("c301")] public static void ReadBigInteger_InvalidTagPayload_ShouldThrowCborContentException(string hexEncoding) { byte[] encoding = hexEncoding.HexToByteArray(); var reader = new CborReader(encoding); Assert.Throws<CborContentException>(() => reader.ReadBigInteger()); Assert.Equal(encoding.Length, reader.BytesRemaining); } [Theory] [InlineData("81c280")] [InlineData("81c301")] public static void ReadBigInteger_InvalidTagPayload_ShouldRollbackToInitialState(string hexEncoding) { var reader = new CborReader(hexEncoding.HexToByteArray()); reader.ReadStartArray(); int bytesRemaining = reader.BytesRemaining; Assert.Throws<CborContentException>(() => reader.ReadBigInteger()); Assert.Equal(bytesRemaining, reader.BytesRemaining); Assert.Equal(CborReaderState.Tag, reader.PeekState()); } [Theory] [InlineData("0", "c4820000")] [InlineData("1", "c4820001")] [InlineData("-1", "c4820020")] [InlineData("1.1", "c482200b")] [InlineData("1.000", "c482221903e8")] [InlineData("273.15", "c48221196ab3")] [InlineData("79228162514264337593543950335", "c48200c24cffffffffffffffffffffffff")] // decimal.MaxValue [InlineData("7922816251426433759354395033.5", "c48220c24cffffffffffffffffffffffff")] [InlineData("-79228162514264337593543950335", "c48200c34cfffffffffffffffffffffffe")] // decimal.MinValue [InlineData("3.9614081247908796757769715711", "c482381bc24c7fffffff7fffffff7fffffff")] // maximal number of fractional digits [InlineData("2000000000", "c4820902")] // encoding with positive exponent representation in payload (2 * 10^9) public static void ReadDecimal_SingleValue_HappyPath(string expectedStringValue, string hexEncoding) { decimal expectedValue = decimal.Parse(expectedStringValue, Globalization.CultureInfo.InvariantCulture); byte[] data = hexEncoding.HexToByteArray(); var reader = new CborReader(data); decimal result = reader.ReadDecimal(); Assert.Equal(CborReaderState.Finished, reader.PeekState()); Assert.Equal(expectedValue, result); } [Theory] [InlineData("c482181d02")] // 2 * 10^29 [InlineData("c482381c02")] // 2 * 10^-29 [InlineData("c48201c24cffffffffffffffffffffffff")] // decimal.MaxValue * 10^1 [InlineData("c48200c24d01000000000000000000000000")] // (decimal.MaxValue + 1) * 10^0 [InlineData("c48200c34cffffffffffffffffffffffff")] // (decimal.MinValue - 1) * 10^0 public static void ReadDecimal_LargeValues_ShouldThrowOverflowException(string hexEncoding) { byte[] encoding = hexEncoding.HexToByteArray(); var reader = new CborReader(encoding); Assert.Throws<OverflowException>(() => reader.ReadDecimal()); Assert.Equal(encoding.Length, reader.BytesRemaining); } [Theory] [InlineData("c201")] public static void ReadDecimal_InvalidTag_ShouldThrowInvalidOperationException(string hexEncoding) { byte[] encoding = hexEncoding.HexToByteArray(); var reader = new CborReader(encoding); Assert.Throws<InvalidOperationException>(() => reader.ReadDecimal()); Assert.Equal(encoding.Length, reader.BytesRemaining); } [Theory] [InlineData("c401")] // 4(1) [InlineData("c480")] // 4([]) [InlineData("c48101")] // 4([1]) [InlineData("c4820160")] // 4([1, ""]) [InlineData("c4826001")] // 4(["", 1]) public static void ReadDecimal_InvalidFormat_ShouldThrowCborContentException(string hexEncoding) { byte[] encoding = hexEncoding.HexToByteArray(); var reader = new CborReader(encoding); Assert.Throws<CborContentException>(() => reader.ReadDecimal()); Assert.Equal(encoding.Length, reader.BytesRemaining); } [Theory] [InlineData("81c401")] // 4(1) [InlineData("81c480")] // [4([])] [InlineData("81c4826001")] // [4(["", 1])] // decimal using an invalid biginteger encoding, // in this case two nested state rollbacks will take place [InlineData("81c48201c260")] // [4([1, 2("")])] public static void ReadDecimal_InvalidTagPayload_ShouldRollbackToInitialState(string hexEncoding) { var reader = new CborReader(hexEncoding.HexToByteArray()); reader.ReadStartArray(); int bytesRemaining = reader.BytesRemaining; Assert.Throws<CborContentException>(() => reader.ReadDecimal()); Assert.Equal(bytesRemaining, reader.BytesRemaining); Assert.Equal(CborReaderState.Tag, reader.PeekState()); Assert.Equal(CborTag.DecimalFraction, reader.ReadTag()); } [Theory] [MemberData(nameof(SupportedConformanceTaggedValues))] public static void ReadTaggedValue_SupportedConformance_ShouldSucceed(CborConformanceMode mode, object expectedValue, string hexEncoding) { var reader = new CborReader(hexEncoding.HexToByteArray(), mode); Helpers.VerifyValue(reader, expectedValue); } public static IEnumerable<object[]> SupportedConformanceTaggedValues => from l in new[] { CborConformanceMode.Lax, CborConformanceMode.Strict, CborConformanceMode.Canonical } from v in TaggedValues select new object[] { l, v.value, v.hexEncoding }; [Theory] [MemberData(nameof(UnsupportedConformanceTaggedValues))] public static void ReadTaggedValue_UnsupportedConformance_ShouldThrowCborContentException(CborConformanceMode mode, object expectedValue, string hexEncoding) { byte[] encoding = hexEncoding.HexToByteArray(); var reader = new CborReader(encoding, mode); Assert.Throws<CborContentException>(() => Helpers.VerifyValue(reader, expectedValue)); Assert.Equal(encoding.Length, reader.BytesRemaining); } public static IEnumerable<object[]> UnsupportedConformanceTaggedValues => from l in new[] { CborConformanceMode.Ctap2Canonical } from v in TaggedValues select new object[] { l, v.value, v.hexEncoding }; [Theory] [MemberData(nameof(TaggedValuesSupportedConformance))] public static void PeekTag_SupportedConformanceMode_ShouldSucceed(CborConformanceMode mode, string hexEncoding) { var reader = new CborReader(hexEncoding.HexToByteArray(), mode); reader.PeekTag(); } public static IEnumerable<object[]> TaggedValuesSupportedConformance => from l in new[] { CborConformanceMode.Lax, CborConformanceMode.Strict, CborConformanceMode.Canonical } from v in TaggedValues select new object[] { l, v.hexEncoding }; [Theory] [MemberData(nameof(TaggedValuesUnsupportedConformance))] public static void PeekTag_UnsupportedConformanceMode_ShouldThrowCborContentException(CborConformanceMode mode, string hexEncoding) { var reader = new CborReader(hexEncoding.HexToByteArray(), mode); Assert.Throws<CborContentException>(() => reader.PeekTag()); } public static IEnumerable<object[]> TaggedValuesUnsupportedConformance => from l in new[] { CborConformanceMode.Ctap2Canonical } from v in TaggedValues select new object[] { l, v.hexEncoding }; [Theory] [MemberData(nameof(UnsupportedConformanceInvalidTypes))] public static void PeekTag_InvalidType_UnsupportedConformanceMode_ShouldThrowInvalidOperationException(CborConformanceMode mode, string hexEncoding) { var reader = new CborReader(hexEncoding.HexToByteArray(), mode); Assert.Throws<InvalidOperationException>(() => reader.PeekTag()); } public static IEnumerable<object[]> UnsupportedConformanceInvalidTypes => from l in new[] { CborConformanceMode.Ctap2Canonical } from e in new[] { "01", "40", "60" } select new object[] { l, e }; private static (object value, string hexEncoding)[] TaggedValues => new (object, string)[] { (new object[] { CborTag.MimeMessage, 42 }, "d824182a"), (42.0m, "c482201901a4"), ((BigInteger)1, "c24101"), (CborTestHelpers.UnixEpoch, "c0781c313937302d30312d30315430303a30303a30302e303030303030305a"), }; } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections.Generic; using System.Linq; using System.Numerics; using Test.Cryptography; using Xunit; namespace System.Formats.Cbor.Tests { public partial class CborReaderTests { // Data points taken from https://tools.ietf.org/html/rfc7049#appendix-A // Additional pairs generated using http://cbor.me/ [Theory] [InlineData(2, 2, "c202")] [InlineData(0, "2013-03-21T20:04:00Z", "c074323031332d30332d32315432303a30343a30305a")] [InlineData(1, 1363896240, "c11a514b67b0")] [InlineData(23, new byte[] { 1, 2, 3, 4 }, "d74401020304")] [InlineData(32, "http://www.example.com", "d82076687474703a2f2f7777772e6578616d706c652e636f6d")] [InlineData(int.MaxValue, 2, "da7fffffff02")] [InlineData(ulong.MaxValue, new object[] { 1, 2 }, "dbffffffffffffffff820102")] public static void ReadTag_SingleValue_HappyPath(ulong expectedTag, object expectedValue, string hexEncoding) { byte[] encoding = hexEncoding.HexToByteArray(); var reader = new CborReader(encoding); Assert.Equal(CborReaderState.Tag, reader.PeekState()); CborTag tag = reader.ReadTag(); Assert.Equal(expectedTag, (ulong)tag); Helpers.VerifyValue(reader, expectedValue); Assert.Equal(CborReaderState.Finished, reader.PeekState()); } [Theory] [InlineData(new ulong[] { 1, 2, 3 }, 2, "c1c2c302")] [InlineData(new ulong[] { 0, 0, 0 }, "2013-03-21T20:04:00Z", "c0c0c074323031332d30332d32315432303a30343a30305a")] [InlineData(new ulong[] { int.MaxValue, ulong.MaxValue }, 1363896240, "da7fffffffdbffffffffffffffff1a514b67b0")] [InlineData(new ulong[] { 23, 24, 100 }, new byte[] { 1, 2, 3, 4 }, "d7d818d8644401020304")] [InlineData(new ulong[] { 32, 1, 1 }, new object[] { 1, "lorem ipsum" }, "d820c1c182016b6c6f72656d20697073756d")] public static void ReadTag_NestedTags_HappyPath(ulong[] expectedTags, object expectedValue, string hexEncoding) { byte[] encoding = hexEncoding.HexToByteArray(); var reader = new CborReader(encoding); foreach (ulong expectedTag in expectedTags) { Assert.Equal(CborReaderState.Tag, reader.PeekState()); CborTag tag = reader.ReadTag(); Assert.Equal(expectedTag, (ulong)tag); } Helpers.VerifyValue(reader, expectedValue); Assert.Equal(CborReaderState.Finished, reader.PeekState()); } [Theory] [InlineData("c2")] public static void ReadTag_NoSubsequentData_ShouldPeekEndOfData(string hexEncoding) { byte[] data = hexEncoding.HexToByteArray(); var reader = new CborReader(data); reader.ReadTag(); Assert.Throws<CborContentException>(() => reader.PeekState()); } [Theory] [InlineData("40")] // empty text string [InlineData("60")] // empty byte string [InlineData("f6")] // null [InlineData("80")] // [] [InlineData("a0")] // {} [InlineData("f97e00")] // NaN [InlineData("fb3ff199999999999a")] // 1.1 public static void ReadTag_InvalidTypes_ShouldThrowInvalidOperationException(string hexEncoding) { byte[] encoding = hexEncoding.HexToByteArray(); var reader = new CborReader(encoding); Assert.Throws<InvalidOperationException>(() => reader.ReadTag()); Assert.Equal(encoding.Length, reader.BytesRemaining); } [Theory] [InlineData(2, "c202")] [InlineData(0, "c074323031332d30332d32315432303a30343a30305a")] [InlineData(1, "c11a514b67b0")] [InlineData(23, "d74401020304")] [InlineData(32, "d82076687474703a2f2f7777772e6578616d706c652e636f6d")] [InlineData(int.MaxValue, "da7fffffff02")] [InlineData(ulong.MaxValue, "dbffffffffffffffff820102")] public static void PeekTag_SingleValue_HappyPath(ulong expectedTag, string hexEncoding) { byte[] encoding = hexEncoding.HexToByteArray(); var reader = new CborReader(encoding); Assert.Equal(CborReaderState.Tag, reader.PeekState()); CborTag tag = reader.PeekTag(); Assert.Equal(expectedTag, (ulong)tag); Assert.Equal(CborReaderState.Tag, reader.PeekState()); Assert.Equal(encoding.Length, reader.BytesRemaining); } [Theory] [InlineData("40")] // empty text string [InlineData("60")] // empty byte string [InlineData("f6")] // null [InlineData("80")] // [] [InlineData("a0")] // {} [InlineData("f97e00")] // NaN [InlineData("fb3ff199999999999a")] // 1.1 public static void PeekTag_InvalidTypes_ShouldThrowInvalidOperationException(string hexEncoding) { byte[] encoding = hexEncoding.HexToByteArray(); var reader = new CborReader(encoding); Assert.Throws<InvalidOperationException>(() => reader.PeekTag()); Assert.Equal(encoding.Length, reader.BytesRemaining); } [Fact] public static void ReadTag_NestedTagWithMissingPayload_ShouldThrowCborContentException() { byte[] encoding = "9fc2ff".HexToByteArray(); var reader = new CborReader(encoding); reader.ReadStartArray(); reader.ReadTag(); int bytesRemaining = reader.BytesRemaining; Assert.Throws<CborContentException>(() => reader.PeekState()); Assert.Throws<CborContentException>(() => reader.ReadEndArray()); Assert.Equal(bytesRemaining, reader.BytesRemaining); } [Theory] [InlineData("8201c202")] // definite length array [InlineData("9f01c202ff")] // equivalent indefinite-length array public static void ReadTag_CallingEndReadArrayPrematurely_ShouldThrowInvalidOperationException(string hexEncoding) { // encoding is valid CBOR, so should not throw CborContentException byte[] data = hexEncoding.HexToByteArray(); var reader = new CborReader(data); reader.ReadStartArray(); reader.ReadInt64(); reader.ReadTag(); int bytesRemaining = reader.BytesRemaining; Assert.Equal(CborReaderState.UnsignedInteger, reader.PeekState()); Assert.Throws<InvalidOperationException>(() => reader.ReadEndArray()); Assert.Equal(bytesRemaining, reader.BytesRemaining); } [Theory] [InlineData("a102c202")] // definite length map [InlineData("bf02c202ff")] // equivalent indefinite-length map public static void ReadTag_CallingEndReadMapPrematurely_ShouldThrowInvalidOperationException(string hexEncoding) { // encoding is valid CBOR, so should not throw CborContentException byte[] data = hexEncoding.HexToByteArray(); var reader = new CborReader(data); reader.ReadStartMap(); reader.ReadInt64(); reader.ReadTag(); int bytesRemaining = reader.BytesRemaining; Assert.Equal(CborReaderState.UnsignedInteger, reader.PeekState()); Assert.Throws<InvalidOperationException>(() => reader.ReadEndArray()); Assert.Equal(bytesRemaining, reader.BytesRemaining); } [Theory] [InlineData("2013-03-21T20:04:00Z", "c074323031332d30332d32315432303a30343a30305a")] [InlineData("2020-04-09T14:31:21.3535941+01:00", "c07821323032302d30342d30395431343a33313a32312e333533353934312b30313a3030")] [InlineData("2020-04-09T11:41:19.12-08:00", "c0781c323032302d30342d30395431313a34313a31392e31322d30383a3030")] [InlineData("2020-04-09T11:41:19.12-08:00", "c07f781c323032302d30342d30395431313a34313a31392e31322d30383a3030ff")] // indefinite-length date string public static void ReadDateTimeOffset_SingleValue_HappyPath(string expectedValueString, string hexEncoding) { DateTimeOffset expectedValue = DateTimeOffset.Parse(expectedValueString); byte[] data = hexEncoding.HexToByteArray(); var reader = new CborReader(data); DateTimeOffset result = reader.ReadDateTimeOffset(); Assert.Equal(CborReaderState.Finished, reader.PeekState()); Assert.Equal(expectedValue, result); Assert.Equal(expectedValue.Offset, result.Offset); } [Theory] [InlineData("c01a514b67b0")] // string datetime tag with unix time payload public static void ReadDateTimeOffset_InvalidTagPayload_ShouldThrowCborContentException(string hexEncoding) { byte[] encoding = hexEncoding.HexToByteArray(); var reader = new CborReader(encoding); Assert.Throws<CborContentException>(() => reader.ReadDateTimeOffset()); Assert.Equal(encoding.Length, reader.BytesRemaining); } [Theory] [InlineData("c07330392f30342f323032302031393a35313a3530")] // 0("09/04/2020 19:51:50") [InlineData("c06e4c617374204368726973746d6173")] // 0("Last Christmas") public static void ReadDateTimeOffset_InvalidDateString_ShouldThrowCborContentException(string hexEncoding) { byte[] encoding = hexEncoding.HexToByteArray(); var reader = new CborReader(encoding); Assert.Throws<CborContentException>(() => reader.ReadDateTimeOffset()); Assert.Equal(encoding.Length, reader.BytesRemaining); } [Theory] [InlineData("01")] // numeric value without tag [InlineData("c301")] // non-datetime tag public static void ReadDateTimeOffset_InvalidTag_ShouldThrowInvalidOperationxception(string hexEncoding) { byte[] encoding = hexEncoding.HexToByteArray(); var reader = new CborReader(encoding); Assert.Throws<InvalidOperationException>(() => reader.ReadDateTimeOffset()); Assert.Equal(encoding.Length, reader.BytesRemaining); } [Theory] [InlineData("81c07330392f30342f323032302031393a35313a3530")] // [0("09/04/2020 19:51:50")] [InlineData("81c06e4c617374204368726973746d6173")] // [0("Last Christmas")] public static void ReadDateTimeOffset_InvalidFormat_ShouldRollbackToInitialState(string hexEncoding) { var reader = new CborReader(hexEncoding.HexToByteArray()); reader.ReadStartArray(); int bytesRemaining = reader.BytesRemaining; Assert.Throws<CborContentException>(() => reader.ReadDateTimeOffset()); Assert.Equal(bytesRemaining, reader.BytesRemaining); Assert.Equal(CborReaderState.Tag, reader.PeekState()); Assert.Equal(CborTag.DateTimeString, reader.ReadTag()); } [Fact] public static void ReadDateTimeOffset_StrictConformance_OnError_ShouldPreserveReaderState() { string hexEncoding = "a20101c06001"; // { 1 : 1 , 0("") : 1 } conforming CBOR with invalid date/time schema var reader = new CborReader(hexEncoding.HexToByteArray(), CborConformanceMode.Strict); reader.ReadStartMap(); reader.ReadInt32(); reader.ReadInt32(); Assert.Throws<CborContentException>(() => reader.ReadDateTimeOffset()); // throws a format exception due to malformed date/time string // the following operation would original throw a false positive duplicate key error, // due to the checkpoint restore logic not properly resetting key uniqueness validation reader.SkipValue(disableConformanceModeChecks: false); reader.ReadInt32(); reader.ReadEndMap(); Assert.Equal(CborReaderState.Finished, reader.PeekState()); } [Theory] [InlineData("2013-03-21T20:04:00Z", "c11a514b67b0")] [InlineData("2013-03-21T20:04:00.5Z", "c1fb41d452d9ec200000")] [InlineData("2020-04-09T13:31:21Z", "c11a5e8f23a9")] [InlineData("1970-01-01T00:00:00Z", "c100")] [InlineData("1969-12-31T23:59:59Z", "c120")] [InlineData("1960-01-01T00:00:00Z", "c13a12cff77f")] public static void ReadUnixTimeSeconds_SingleValue_HappyPath(string expectedValueString, string hexEncoding) { DateTimeOffset expectedValue = DateTimeOffset.Parse(expectedValueString); byte[] data = hexEncoding.HexToByteArray(); var reader = new CborReader(data); DateTimeOffset result = reader.ReadUnixTimeSeconds(); Assert.Equal(CborReaderState.Finished, reader.PeekState()); Assert.Equal(expectedValue, result); Assert.Equal(TimeSpan.Zero, result.Offset); } [Theory] [InlineData("c174323031332d30332d32315432303a30343a30305a")] // epoch datetime tag with string payload public static void ReadUnixTimeSeconds_InvalidTagPayload_ShouldThrowCborContentException(string hexEncoding) { byte[] encoding = hexEncoding.HexToByteArray(); var reader = new CborReader(encoding); Assert.Throws<CborContentException>(() => reader.ReadUnixTimeSeconds()); Assert.Equal(encoding.Length, reader.BytesRemaining); } [Theory] [InlineData("c1f97e00")] // 0(NaN) [InlineData("c1f9fc00")] // 0(-Infinity) public static void ReadUnixTimeSeconds_InvalidFloatPayload_ShouldThrowCborContentException(string hexEncoding) { byte[] encoding = hexEncoding.HexToByteArray(); var reader = new CborReader(encoding); Assert.Throws<CborContentException>(() => reader.ReadUnixTimeSeconds()); Assert.Equal(encoding.Length, reader.BytesRemaining); } [Theory] [InlineData("01")] // numeric value without tag [InlineData("c301")] // non-datetime tag public static void ReadUnixTimeSeconds_InvalidTag_ShouldThrowInvalidOperationxception(string hexEncoding) { byte[] encoding = hexEncoding.HexToByteArray(); var reader = new CborReader(encoding); Assert.Throws<InvalidOperationException>(() => reader.ReadUnixTimeSeconds()); Assert.Equal(encoding.Length, reader.BytesRemaining); } [Theory] [InlineData("81c17330392f30342f323032302031393a35313a3530")] // [1("09/04/2020 19:51:50")] [InlineData("81c16e4c617374204368726973746d6173")] // [1("Last Christmas")] public static void ReadUnixTimeSeconds_InvalidFormat_ShouldRollbackToInitialState(string hexEncoding) { var reader = new CborReader(hexEncoding.HexToByteArray()); reader.ReadStartArray(); int bytesRemaining = reader.BytesRemaining; Assert.Throws<CborContentException>(() => reader.ReadUnixTimeSeconds()); Assert.Equal(bytesRemaining, reader.BytesRemaining); Assert.Equal(CborReaderState.Tag, reader.PeekState()); Assert.Equal(CborTag.UnixTimeSeconds, reader.ReadTag()); } [Theory] [InlineData("0", "c240")] [InlineData("0", "c24100")] [InlineData("1", "c24101")] [InlineData("1", "c2420001")] // should recognize leading zeroes in buffer [InlineData("-1", "c34100")] [InlineData("255", "c241ff")] [InlineData("-256", "c341ff")] [InlineData("256", "c2420100")] [InlineData("-257", "c3420100")] [InlineData("9223372036854775807", "c2487fffffffffffffff")] [InlineData("-9223372036854775808", "c3487fffffffffffffff")] [InlineData("18446744073709551616", "c249010000000000000000")] [InlineData("-18446744073709551617", "c349010000000000000000")] [InlineData("1", "c25f4101ff")] // indefinite-length buffer public static void ReadBigInteger_SingleValue_HappyPath(string expectedValueString, string hexEncoding) { BigInteger expectedValue = BigInteger.Parse(expectedValueString); byte[] data = hexEncoding.HexToByteArray(); var reader = new CborReader(data); BigInteger result = reader.ReadBigInteger(); Assert.Equal(CborReaderState.Finished, reader.PeekState()); Assert.Equal(expectedValue, result); } [Theory] [InlineData("01")] [InlineData("c001")] public static void ReadBigInteger_InvalidCborTag_ShouldThrowInvalidOperationException(string hexEncoding) { byte[] encoding = hexEncoding.HexToByteArray(); var reader = new CborReader(encoding); Assert.Throws<InvalidOperationException>(() => reader.ReadBigInteger()); Assert.Equal(encoding.Length, reader.BytesRemaining); } [Theory] [InlineData("c280")] [InlineData("c301")] public static void ReadBigInteger_InvalidTagPayload_ShouldThrowCborContentException(string hexEncoding) { byte[] encoding = hexEncoding.HexToByteArray(); var reader = new CborReader(encoding); Assert.Throws<CborContentException>(() => reader.ReadBigInteger()); Assert.Equal(encoding.Length, reader.BytesRemaining); } [Theory] [InlineData("81c280")] [InlineData("81c301")] public static void ReadBigInteger_InvalidTagPayload_ShouldRollbackToInitialState(string hexEncoding) { var reader = new CborReader(hexEncoding.HexToByteArray()); reader.ReadStartArray(); int bytesRemaining = reader.BytesRemaining; Assert.Throws<CborContentException>(() => reader.ReadBigInteger()); Assert.Equal(bytesRemaining, reader.BytesRemaining); Assert.Equal(CborReaderState.Tag, reader.PeekState()); } [Theory] [InlineData("0", "c4820000")] [InlineData("1", "c4820001")] [InlineData("-1", "c4820020")] [InlineData("1.1", "c482200b")] [InlineData("1.000", "c482221903e8")] [InlineData("273.15", "c48221196ab3")] [InlineData("79228162514264337593543950335", "c48200c24cffffffffffffffffffffffff")] // decimal.MaxValue [InlineData("7922816251426433759354395033.5", "c48220c24cffffffffffffffffffffffff")] [InlineData("-79228162514264337593543950335", "c48200c34cfffffffffffffffffffffffe")] // decimal.MinValue [InlineData("3.9614081247908796757769715711", "c482381bc24c7fffffff7fffffff7fffffff")] // maximal number of fractional digits [InlineData("2000000000", "c4820902")] // encoding with positive exponent representation in payload (2 * 10^9) public static void ReadDecimal_SingleValue_HappyPath(string expectedStringValue, string hexEncoding) { decimal expectedValue = decimal.Parse(expectedStringValue, Globalization.CultureInfo.InvariantCulture); byte[] data = hexEncoding.HexToByteArray(); var reader = new CborReader(data); decimal result = reader.ReadDecimal(); Assert.Equal(CborReaderState.Finished, reader.PeekState()); Assert.Equal(expectedValue, result); } [Theory] [InlineData("c482181d02")] // 2 * 10^29 [InlineData("c482381c02")] // 2 * 10^-29 [InlineData("c48201c24cffffffffffffffffffffffff")] // decimal.MaxValue * 10^1 [InlineData("c48200c24d01000000000000000000000000")] // (decimal.MaxValue + 1) * 10^0 [InlineData("c48200c34cffffffffffffffffffffffff")] // (decimal.MinValue - 1) * 10^0 public static void ReadDecimal_LargeValues_ShouldThrowOverflowException(string hexEncoding) { byte[] encoding = hexEncoding.HexToByteArray(); var reader = new CborReader(encoding); Assert.Throws<OverflowException>(() => reader.ReadDecimal()); Assert.Equal(encoding.Length, reader.BytesRemaining); } [Theory] [InlineData("c201")] public static void ReadDecimal_InvalidTag_ShouldThrowInvalidOperationException(string hexEncoding) { byte[] encoding = hexEncoding.HexToByteArray(); var reader = new CborReader(encoding); Assert.Throws<InvalidOperationException>(() => reader.ReadDecimal()); Assert.Equal(encoding.Length, reader.BytesRemaining); } [Theory] [InlineData("c401")] // 4(1) [InlineData("c480")] // 4([]) [InlineData("c48101")] // 4([1]) [InlineData("c4820160")] // 4([1, ""]) [InlineData("c4826001")] // 4(["", 1]) public static void ReadDecimal_InvalidFormat_ShouldThrowCborContentException(string hexEncoding) { byte[] encoding = hexEncoding.HexToByteArray(); var reader = new CborReader(encoding); Assert.Throws<CborContentException>(() => reader.ReadDecimal()); Assert.Equal(encoding.Length, reader.BytesRemaining); } [Theory] [InlineData("81c401")] // 4(1) [InlineData("81c480")] // [4([])] [InlineData("81c4826001")] // [4(["", 1])] // decimal using an invalid biginteger encoding, // in this case two nested state rollbacks will take place [InlineData("81c48201c260")] // [4([1, 2("")])] public static void ReadDecimal_InvalidTagPayload_ShouldRollbackToInitialState(string hexEncoding) { var reader = new CborReader(hexEncoding.HexToByteArray()); reader.ReadStartArray(); int bytesRemaining = reader.BytesRemaining; Assert.Throws<CborContentException>(() => reader.ReadDecimal()); Assert.Equal(bytesRemaining, reader.BytesRemaining); Assert.Equal(CborReaderState.Tag, reader.PeekState()); Assert.Equal(CborTag.DecimalFraction, reader.ReadTag()); } [Theory] [MemberData(nameof(SupportedConformanceTaggedValues))] public static void ReadTaggedValue_SupportedConformance_ShouldSucceed(CborConformanceMode mode, object expectedValue, string hexEncoding) { var reader = new CborReader(hexEncoding.HexToByteArray(), mode); Helpers.VerifyValue(reader, expectedValue); } public static IEnumerable<object[]> SupportedConformanceTaggedValues => from l in new[] { CborConformanceMode.Lax, CborConformanceMode.Strict, CborConformanceMode.Canonical } from v in TaggedValues select new object[] { l, v.value, v.hexEncoding }; [Theory] [MemberData(nameof(UnsupportedConformanceTaggedValues))] public static void ReadTaggedValue_UnsupportedConformance_ShouldThrowCborContentException(CborConformanceMode mode, object expectedValue, string hexEncoding) { byte[] encoding = hexEncoding.HexToByteArray(); var reader = new CborReader(encoding, mode); Assert.Throws<CborContentException>(() => Helpers.VerifyValue(reader, expectedValue)); Assert.Equal(encoding.Length, reader.BytesRemaining); } public static IEnumerable<object[]> UnsupportedConformanceTaggedValues => from l in new[] { CborConformanceMode.Ctap2Canonical } from v in TaggedValues select new object[] { l, v.value, v.hexEncoding }; [Theory] [MemberData(nameof(TaggedValuesSupportedConformance))] public static void PeekTag_SupportedConformanceMode_ShouldSucceed(CborConformanceMode mode, string hexEncoding) { var reader = new CborReader(hexEncoding.HexToByteArray(), mode); reader.PeekTag(); } public static IEnumerable<object[]> TaggedValuesSupportedConformance => from l in new[] { CborConformanceMode.Lax, CborConformanceMode.Strict, CborConformanceMode.Canonical } from v in TaggedValues select new object[] { l, v.hexEncoding }; [Theory] [MemberData(nameof(TaggedValuesUnsupportedConformance))] public static void PeekTag_UnsupportedConformanceMode_ShouldThrowCborContentException(CborConformanceMode mode, string hexEncoding) { var reader = new CborReader(hexEncoding.HexToByteArray(), mode); Assert.Throws<CborContentException>(() => reader.PeekTag()); } public static IEnumerable<object[]> TaggedValuesUnsupportedConformance => from l in new[] { CborConformanceMode.Ctap2Canonical } from v in TaggedValues select new object[] { l, v.hexEncoding }; [Theory] [MemberData(nameof(UnsupportedConformanceInvalidTypes))] public static void PeekTag_InvalidType_UnsupportedConformanceMode_ShouldThrowInvalidOperationException(CborConformanceMode mode, string hexEncoding) { var reader = new CborReader(hexEncoding.HexToByteArray(), mode); Assert.Throws<InvalidOperationException>(() => reader.PeekTag()); } public static IEnumerable<object[]> UnsupportedConformanceInvalidTypes => from l in new[] { CborConformanceMode.Ctap2Canonical } from e in new[] { "01", "40", "60" } select new object[] { l, e }; private static (object value, string hexEncoding)[] TaggedValues => new (object, string)[] { (new object[] { CborTag.MimeMessage, 42 }, "d824182a"), (42.0m, "c482201901a4"), ((BigInteger)1, "c24101"), (CborTestHelpers.UnixEpoch, "c0781c313937302d30312d30315430303a30303a30302e303030303030305a"), }; } }
-1
dotnet/runtime
66,282
Enable Fast Tail Call Optimization for ARM32
- Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
clamp03
2022-03-07T05:47:20Z
2022-03-13T11:50:20Z
227ff3d53784f655fa04dad059a98b3e8d291d61
51b90cc60b8528c77829ef18481b0f58db812776
Enable Fast Tail Call Optimization for ARM32. - Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
./src/tests/JIT/jit64/mcc/interop/mcc_i16.il
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. .assembly extern System.Runtime.Extensions { auto } .assembly extern xunit.core {} .assembly extern mscorlib { auto } .assembly 'mcc_i16' {} .namespace MCCTest { .class MyClass { .method assembly static pinvokeimpl("native_i1s" as "#1" stdcall) valuetype MCCTest.VType1 Sum(float32, float32, float32, float32, float32, float32, float32, float32, float32, float32, float32, float32) cil managed preservesig { } .method private valuetype MCCTest.VType1 GetSum() { .maxstack 64 ldc.r4 1 ldc.r4 2 ldc.r4 3 ldc.r4 4 ldc.r4 5 ldc.r4 6 ldc.r4 7 ldc.r4 8 ldc.r4 9 ldc.r4 10 ldc.r4 11 ldc.r4 12 tail. call valuetype MCCTest.VType1 MCCTest.MyClass::Sum(float32, float32, float32, float32, float32, float32, float32, float32, float32, float32, float32, float32) ret } .method public specialname rtspecialname instance void .ctor() { .maxstack 1 ldarg.0 call instance void [mscorlib]System.Object::.ctor() ret } // end of method MyClass::.ctor .method private static int32 Main(string[] args) { .custom instance void [xunit.core]Xunit.FactAttribute::.ctor() = ( 01 00 00 00 ) .entrypoint .maxstack 64 .locals init ( [0] class MCCTest.MyClass me, [1] valuetype MCCTest.VType1 res, [2] int32 rc ) newobj instance void MCCTest.MyClass::.ctor() stloc.s me ldloc.s me call instance valuetype MCCTest.VType1 MCCTest.MyClass::GetSum() stloc.s res // Check Result ldloc.s res ldc.i4 12 call int32 MCCTest.Common::CheckResult(valuetype MCCTest.VType1, int32) stloc.s rc ldloc.s rc ret } // end of method MyClass::Main } // end of class MyClass } // end of namespace MCCTest
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. .assembly extern System.Runtime.Extensions { auto } .assembly extern xunit.core {} .assembly extern mscorlib { auto } .assembly 'mcc_i16' {} .namespace MCCTest { .class MyClass { .method assembly static pinvokeimpl("native_i1s" as "#1" stdcall) valuetype MCCTest.VType1 Sum(float32, float32, float32, float32, float32, float32, float32, float32, float32, float32, float32, float32) cil managed preservesig { } .method private valuetype MCCTest.VType1 GetSum() { .maxstack 64 ldc.r4 1 ldc.r4 2 ldc.r4 3 ldc.r4 4 ldc.r4 5 ldc.r4 6 ldc.r4 7 ldc.r4 8 ldc.r4 9 ldc.r4 10 ldc.r4 11 ldc.r4 12 tail. call valuetype MCCTest.VType1 MCCTest.MyClass::Sum(float32, float32, float32, float32, float32, float32, float32, float32, float32, float32, float32, float32) ret } .method public specialname rtspecialname instance void .ctor() { .maxstack 1 ldarg.0 call instance void [mscorlib]System.Object::.ctor() ret } // end of method MyClass::.ctor .method private static int32 Main(string[] args) { .custom instance void [xunit.core]Xunit.FactAttribute::.ctor() = ( 01 00 00 00 ) .entrypoint .maxstack 64 .locals init ( [0] class MCCTest.MyClass me, [1] valuetype MCCTest.VType1 res, [2] int32 rc ) newobj instance void MCCTest.MyClass::.ctor() stloc.s me ldloc.s me call instance valuetype MCCTest.VType1 MCCTest.MyClass::GetSum() stloc.s res // Check Result ldloc.s res ldc.i4 12 call int32 MCCTest.Common::CheckResult(valuetype MCCTest.VType1, int32) stloc.s rc ldloc.s rc ret } // end of method MyClass::Main } // end of class MyClass } // end of namespace MCCTest
-1
dotnet/runtime
66,282
Enable Fast Tail Call Optimization for ARM32
- Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
clamp03
2022-03-07T05:47:20Z
2022-03-13T11:50:20Z
227ff3d53784f655fa04dad059a98b3e8d291d61
51b90cc60b8528c77829ef18481b0f58db812776
Enable Fast Tail Call Optimization for ARM32. - Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
./src/libraries/System.Composition.Runtime/src/System/Composition/ExportFactoryOfTTMetadata.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. namespace System.Composition { /// <summary> /// An ExportFactory that provides metadata describing the created exports. /// </summary> /// <typeparam name="T">The contract type being created.</typeparam> /// <typeparam name="TMetadata">The metadata required from the export.</typeparam> public class ExportFactory<T, TMetadata> : ExportFactory<T> { /// <summary> /// Construct an ExportFactory. /// </summary> /// <param name="exportCreator">Action invoked upon calls to the Create() method.</param> /// <param name="metadata">The metadata associated with the export.</param> public ExportFactory(Func<Tuple<T, Action>> exportCreator, TMetadata metadata) : base(exportCreator) { Metadata = metadata; } /// <summary> /// The metadata associated with the export. /// </summary> public TMetadata Metadata { get; } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. namespace System.Composition { /// <summary> /// An ExportFactory that provides metadata describing the created exports. /// </summary> /// <typeparam name="T">The contract type being created.</typeparam> /// <typeparam name="TMetadata">The metadata required from the export.</typeparam> public class ExportFactory<T, TMetadata> : ExportFactory<T> { /// <summary> /// Construct an ExportFactory. /// </summary> /// <param name="exportCreator">Action invoked upon calls to the Create() method.</param> /// <param name="metadata">The metadata associated with the export.</param> public ExportFactory(Func<Tuple<T, Action>> exportCreator, TMetadata metadata) : base(exportCreator) { Metadata = metadata; } /// <summary> /// The metadata associated with the export. /// </summary> public TMetadata Metadata { get; } } }
-1
dotnet/runtime
66,282
Enable Fast Tail Call Optimization for ARM32
- Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
clamp03
2022-03-07T05:47:20Z
2022-03-13T11:50:20Z
227ff3d53784f655fa04dad059a98b3e8d291d61
51b90cc60b8528c77829ef18481b0f58db812776
Enable Fast Tail Call Optimization for ARM32. - Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
./src/libraries/System.Linq.Parallel/src/System/Linq/Parallel/QueryOperators/Options/OrderingQueryOperator.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // =+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+ // // OrderingQueryOperator.cs // // =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- using System.Collections.Generic; using System.Threading; namespace System.Linq.Parallel { /// <summary> /// Represents operators AsOrdered and AsUnordered. In the current implementation, it /// simply turns on preservation globally in the query. /// </summary> /// <typeparam name="TSource"></typeparam> internal sealed class OrderingQueryOperator<TSource> : QueryOperator<TSource> { private readonly QueryOperator<TSource> _child; private readonly OrdinalIndexState _ordinalIndexState; public OrderingQueryOperator(QueryOperator<TSource> child, bool orderOn) : base(orderOn, child.SpecifiedQuerySettings) { _child = child; _ordinalIndexState = _child.OrdinalIndexState; } internal override QueryResults<TSource> Open(QuerySettings settings, bool preferStriping) { return _child.Open(settings, preferStriping); } internal override IEnumerator<TSource> GetEnumerator(ParallelMergeOptions? mergeOptions, bool suppressOrderPreservation) { if (_child is ScanQueryOperator<TSource> childAsScan) { return childAsScan.Data.GetEnumerator(); } return base.GetEnumerator(mergeOptions, suppressOrderPreservation); } //--------------------------------------------------------------------------------------- // Returns an enumerable that represents the query executing sequentially. // internal override IEnumerable<TSource> AsSequentialQuery(CancellationToken token) { return _child.AsSequentialQuery(token); } //--------------------------------------------------------------------------------------- // Whether this operator performs a premature merge that would not be performed in // a similar sequential operation (i.e., in LINQ to Objects). // internal override bool LimitsParallelism { get { return _child.LimitsParallelism; } } internal override OrdinalIndexState OrdinalIndexState { get { return _ordinalIndexState; } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // =+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+ // // OrderingQueryOperator.cs // // =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- using System.Collections.Generic; using System.Threading; namespace System.Linq.Parallel { /// <summary> /// Represents operators AsOrdered and AsUnordered. In the current implementation, it /// simply turns on preservation globally in the query. /// </summary> /// <typeparam name="TSource"></typeparam> internal sealed class OrderingQueryOperator<TSource> : QueryOperator<TSource> { private readonly QueryOperator<TSource> _child; private readonly OrdinalIndexState _ordinalIndexState; public OrderingQueryOperator(QueryOperator<TSource> child, bool orderOn) : base(orderOn, child.SpecifiedQuerySettings) { _child = child; _ordinalIndexState = _child.OrdinalIndexState; } internal override QueryResults<TSource> Open(QuerySettings settings, bool preferStriping) { return _child.Open(settings, preferStriping); } internal override IEnumerator<TSource> GetEnumerator(ParallelMergeOptions? mergeOptions, bool suppressOrderPreservation) { if (_child is ScanQueryOperator<TSource> childAsScan) { return childAsScan.Data.GetEnumerator(); } return base.GetEnumerator(mergeOptions, suppressOrderPreservation); } //--------------------------------------------------------------------------------------- // Returns an enumerable that represents the query executing sequentially. // internal override IEnumerable<TSource> AsSequentialQuery(CancellationToken token) { return _child.AsSequentialQuery(token); } //--------------------------------------------------------------------------------------- // Whether this operator performs a premature merge that would not be performed in // a similar sequential operation (i.e., in LINQ to Objects). // internal override bool LimitsParallelism { get { return _child.LimitsParallelism; } } internal override OrdinalIndexState OrdinalIndexState { get { return _ordinalIndexState; } } } }
-1
dotnet/runtime
66,282
Enable Fast Tail Call Optimization for ARM32
- Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
clamp03
2022-03-07T05:47:20Z
2022-03-13T11:50:20Z
227ff3d53784f655fa04dad059a98b3e8d291d61
51b90cc60b8528c77829ef18481b0f58db812776
Enable Fast Tail Call Optimization for ARM32. - Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
./src/tests/JIT/HardwareIntrinsics/Arm/AdvSimd/MultiplyBySelectedScalarWideningLowerAndAdd.Vector64.Int32.Vector128.Int32.3.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics.Arm\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; using System.Runtime.Intrinsics.Arm; namespace JIT.HardwareIntrinsics.Arm { public static partial class Program { private static void MultiplyBySelectedScalarWideningLowerAndAdd_Vector64_Int32_Vector128_Int32_3() { var test = new SimpleTernaryOpTest__MultiplyBySelectedScalarWideningLowerAndAdd_Vector64_Int32_Vector128_Int32_3(); if (test.IsSupported) { // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates basic functionality works, using Load test.RunBasicScenario_Load(); } // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates calling via reflection works, using Load test.RunReflectionScenario_Load(); } // Validates passing a static member works test.RunClsVarScenario(); if (AdvSimd.IsSupported) { // Validates passing a static member works, using pinning and Load test.RunClsVarScenario_Load(); } // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates passing a local works, using Load test.RunLclVarScenario_Load(); } // Validates passing the field of a local class works test.RunClassLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local class works, using pinning and Load test.RunClassLclFldScenario_Load(); } // Validates passing an instance member of a class works test.RunClassFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a class works, using pinning and Load test.RunClassFldScenario_Load(); } // Validates passing the field of a local struct works test.RunStructLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local struct works, using pinning and Load test.RunStructLclFldScenario_Load(); } // Validates passing an instance member of a struct works test.RunStructFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a struct works, using pinning and Load test.RunStructFldScenario_Load(); } } else { // Validates we throw on unsupported hardware test.RunUnsupportedScenario(); } if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class SimpleTernaryOpTest__MultiplyBySelectedScalarWideningLowerAndAdd_Vector64_Int32_Vector128_Int32_3 { private struct DataTable { private byte[] inArray1; private byte[] inArray2; private byte[] inArray3; private byte[] outArray; private GCHandle inHandle1; private GCHandle inHandle2; private GCHandle inHandle3; private GCHandle outHandle; private ulong alignment; public DataTable(Int64[] inArray1, Int32[] inArray2, Int32[] inArray3, Int64[] outArray, int alignment) { int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<Int64>(); int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<Int32>(); int sizeOfinArray3 = inArray3.Length * Unsafe.SizeOf<Int32>(); int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<Int64>(); if ((alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2 || (alignment * 2) < sizeOfinArray3 || (alignment * 2) < sizeOfoutArray) { throw new ArgumentException("Invalid value of alignment"); } this.inArray1 = new byte[alignment * 2]; this.inArray2 = new byte[alignment * 2]; this.inArray3 = new byte[alignment * 2]; this.outArray = new byte[alignment * 2]; this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned); this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned); this.inHandle3 = GCHandle.Alloc(this.inArray3, GCHandleType.Pinned); this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned); this.alignment = (ulong)alignment; Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<Int64, byte>(ref inArray1[0]), (uint)sizeOfinArray1); Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<Int32, byte>(ref inArray2[0]), (uint)sizeOfinArray2); Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray3Ptr), ref Unsafe.As<Int32, byte>(ref inArray3[0]), (uint)sizeOfinArray3); } public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment); public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment); public void* inArray3Ptr => Align((byte*)(inHandle3.AddrOfPinnedObject().ToPointer()), alignment); public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment); public void Dispose() { inHandle1.Free(); inHandle2.Free(); inHandle3.Free(); outHandle.Free(); } private static unsafe void* Align(byte* buffer, ulong expectedAlignment) { return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1)); } } private struct TestStruct { public Vector128<Int64> _fld1; public Vector64<Int32> _fld2; public Vector128<Int32> _fld3; public static TestStruct Create() { var testStruct = new TestStruct(); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int64>, byte>(ref testStruct._fld1), ref Unsafe.As<Int64, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Int64>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int32>, byte>(ref testStruct._fld2), ref Unsafe.As<Int32, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<Int32>>()); for (var i = 0; i < Op3ElementCount; i++) { _data3[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref testStruct._fld3), ref Unsafe.As<Int32, byte>(ref _data3[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>()); return testStruct; } public void RunStructFldScenario(SimpleTernaryOpTest__MultiplyBySelectedScalarWideningLowerAndAdd_Vector64_Int32_Vector128_Int32_3 testClass) { var result = AdvSimd.MultiplyBySelectedScalarWideningLowerAndAdd(_fld1, _fld2, _fld3, 3); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, _fld2, _fld3, testClass._dataTable.outArrayPtr); } public void RunStructFldScenario_Load(SimpleTernaryOpTest__MultiplyBySelectedScalarWideningLowerAndAdd_Vector64_Int32_Vector128_Int32_3 testClass) { fixed (Vector128<Int64>* pFld1 = &_fld1) fixed (Vector64<Int32>* pFld2 = &_fld2) fixed (Vector128<Int32>* pFld3 = &_fld3) { var result = AdvSimd.MultiplyBySelectedScalarWideningLowerAndAdd( AdvSimd.LoadVector128((Int64*)(pFld1)), AdvSimd.LoadVector64((Int32*)(pFld2)), AdvSimd.LoadVector128((Int32*)(pFld3)), 3 ); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, _fld2, _fld3, testClass._dataTable.outArrayPtr); } } } private static readonly int LargestVectorSize = 16; private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector128<Int64>>() / sizeof(Int64); private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector64<Int32>>() / sizeof(Int32); private static readonly int Op3ElementCount = Unsafe.SizeOf<Vector128<Int32>>() / sizeof(Int32); private static readonly int RetElementCount = Unsafe.SizeOf<Vector128<Int64>>() / sizeof(Int64); private static readonly byte Imm = 3; private static Int64[] _data1 = new Int64[Op1ElementCount]; private static Int32[] _data2 = new Int32[Op2ElementCount]; private static Int32[] _data3 = new Int32[Op3ElementCount]; private static Vector128<Int64> _clsVar1; private static Vector64<Int32> _clsVar2; private static Vector128<Int32> _clsVar3; private Vector128<Int64> _fld1; private Vector64<Int32> _fld2; private Vector128<Int32> _fld3; private DataTable _dataTable; static SimpleTernaryOpTest__MultiplyBySelectedScalarWideningLowerAndAdd_Vector64_Int32_Vector128_Int32_3() { for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int64>, byte>(ref _clsVar1), ref Unsafe.As<Int64, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Int64>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int32>, byte>(ref _clsVar2), ref Unsafe.As<Int32, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<Int32>>()); for (var i = 0; i < Op3ElementCount; i++) { _data3[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref _clsVar3), ref Unsafe.As<Int32, byte>(ref _data3[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>()); } public SimpleTernaryOpTest__MultiplyBySelectedScalarWideningLowerAndAdd_Vector64_Int32_Vector128_Int32_3() { Succeeded = true; for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int64>, byte>(ref _fld1), ref Unsafe.As<Int64, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Int64>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int32>, byte>(ref _fld2), ref Unsafe.As<Int32, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<Int32>>()); for (var i = 0; i < Op3ElementCount; i++) { _data3[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref _fld3), ref Unsafe.As<Int32, byte>(ref _data3[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>()); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt64(); } for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt32(); } for (var i = 0; i < Op3ElementCount; i++) { _data3[i] = TestLibrary.Generator.GetInt32(); } _dataTable = new DataTable(_data1, _data2, _data3, new Int64[RetElementCount], LargestVectorSize); } public bool IsSupported => AdvSimd.IsSupported; public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); var result = AdvSimd.MultiplyBySelectedScalarWideningLowerAndAdd( Unsafe.Read<Vector128<Int64>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector64<Int32>>(_dataTable.inArray2Ptr), Unsafe.Read<Vector128<Int32>>(_dataTable.inArray3Ptr), 3 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.inArray3Ptr, _dataTable.outArrayPtr); } public void RunBasicScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load)); var result = AdvSimd.MultiplyBySelectedScalarWideningLowerAndAdd( AdvSimd.LoadVector128((Int64*)(_dataTable.inArray1Ptr)), AdvSimd.LoadVector64((Int32*)(_dataTable.inArray2Ptr)), AdvSimd.LoadVector128((Int32*)(_dataTable.inArray3Ptr)), 3 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.inArray3Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.MultiplyBySelectedScalarWideningLowerAndAdd), new Type[] { typeof(Vector128<Int64>), typeof(Vector64<Int32>), typeof(Vector128<Int32>), typeof(byte) }) .Invoke(null, new object[] { Unsafe.Read<Vector128<Int64>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector64<Int32>>(_dataTable.inArray2Ptr), Unsafe.Read<Vector128<Int32>>(_dataTable.inArray3Ptr), (byte)3 }); Unsafe.Write(_dataTable.outArrayPtr, (Vector128<Int64>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.inArray3Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load)); var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.MultiplyBySelectedScalarWideningLowerAndAdd), new Type[] { typeof(Vector128<Int64>), typeof(Vector64<Int32>), typeof(Vector128<Int32>), typeof(byte) }) .Invoke(null, new object[] { AdvSimd.LoadVector128((Int64*)(_dataTable.inArray1Ptr)), AdvSimd.LoadVector64((Int32*)(_dataTable.inArray2Ptr)), AdvSimd.LoadVector128((Int32*)(_dataTable.inArray3Ptr)), (byte)3 }); Unsafe.Write(_dataTable.outArrayPtr, (Vector128<Int64>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.inArray3Ptr, _dataTable.outArrayPtr); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); var result = AdvSimd.MultiplyBySelectedScalarWideningLowerAndAdd( _clsVar1, _clsVar2, _clsVar3, 3 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _clsVar2, _clsVar3, _dataTable.outArrayPtr); } public void RunClsVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario_Load)); fixed (Vector128<Int64>* pClsVar1 = &_clsVar1) fixed (Vector64<Int32>* pClsVar2 = &_clsVar2) fixed (Vector128<Int32>* pClsVar3 = &_clsVar3) { var result = AdvSimd.MultiplyBySelectedScalarWideningLowerAndAdd( AdvSimd.LoadVector128((Int64*)(pClsVar1)), AdvSimd.LoadVector64((Int32*)(pClsVar2)), AdvSimd.LoadVector128((Int32*)(pClsVar3)), 3 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _clsVar2, _clsVar3, _dataTable.outArrayPtr); } } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var op1 = Unsafe.Read<Vector128<Int64>>(_dataTable.inArray1Ptr); var op2 = Unsafe.Read<Vector64<Int32>>(_dataTable.inArray2Ptr); var op3 = Unsafe.Read<Vector128<Int32>>(_dataTable.inArray3Ptr); var result = AdvSimd.MultiplyBySelectedScalarWideningLowerAndAdd(op1, op2, op3, 3); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op2, op3, _dataTable.outArrayPtr); } public void RunLclVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load)); var op1 = AdvSimd.LoadVector128((Int64*)(_dataTable.inArray1Ptr)); var op2 = AdvSimd.LoadVector64((Int32*)(_dataTable.inArray2Ptr)); var op3 = AdvSimd.LoadVector128((Int32*)(_dataTable.inArray3Ptr)); var result = AdvSimd.MultiplyBySelectedScalarWideningLowerAndAdd(op1, op2, op3, 3); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op2, op3, _dataTable.outArrayPtr); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new SimpleTernaryOpTest__MultiplyBySelectedScalarWideningLowerAndAdd_Vector64_Int32_Vector128_Int32_3(); var result = AdvSimd.MultiplyBySelectedScalarWideningLowerAndAdd(test._fld1, test._fld2, test._fld3, 3); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, test._fld3, _dataTable.outArrayPtr); } public void RunClassLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario_Load)); var test = new SimpleTernaryOpTest__MultiplyBySelectedScalarWideningLowerAndAdd_Vector64_Int32_Vector128_Int32_3(); fixed (Vector128<Int64>* pFld1 = &test._fld1) fixed (Vector64<Int32>* pFld2 = &test._fld2) fixed (Vector128<Int32>* pFld3 = &test._fld3) { var result = AdvSimd.MultiplyBySelectedScalarWideningLowerAndAdd( AdvSimd.LoadVector128((Int64*)(pFld1)), AdvSimd.LoadVector64((Int32*)(pFld2)), AdvSimd.LoadVector128((Int32*)(pFld3)), 3 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, test._fld3, _dataTable.outArrayPtr); } } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); var result = AdvSimd.MultiplyBySelectedScalarWideningLowerAndAdd(_fld1, _fld2, _fld3, 3); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _fld2, _fld3, _dataTable.outArrayPtr); } public void RunClassFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario_Load)); fixed (Vector128<Int64>* pFld1 = &_fld1) fixed (Vector64<Int32>* pFld2 = &_fld2) fixed (Vector128<Int32>* pFld3 = &_fld3) { var result = AdvSimd.MultiplyBySelectedScalarWideningLowerAndAdd( AdvSimd.LoadVector128((Int64*)(pFld1)), AdvSimd.LoadVector64((Int32*)(pFld2)), AdvSimd.LoadVector128((Int32*)(pFld3)), 3 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _fld2, _fld3, _dataTable.outArrayPtr); } } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); var result = AdvSimd.MultiplyBySelectedScalarWideningLowerAndAdd(test._fld1, test._fld2, test._fld3, 3); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, test._fld3, _dataTable.outArrayPtr); } public void RunStructLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario_Load)); var test = TestStruct.Create(); var result = AdvSimd.MultiplyBySelectedScalarWideningLowerAndAdd( AdvSimd.LoadVector128((Int64*)(&test._fld1)), AdvSimd.LoadVector64((Int32*)(&test._fld2)), AdvSimd.LoadVector128((Int32*)(&test._fld3)), 3 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, test._fld3, _dataTable.outArrayPtr); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } public void RunStructFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario_Load)); var test = TestStruct.Create(); test.RunStructFldScenario_Load(this); } public void RunUnsupportedScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario)); bool succeeded = false; try { RunBasicScenario_UnsafeRead(); } catch (PlatformNotSupportedException) { succeeded = true; } if (!succeeded) { Succeeded = false; } } private void ValidateResult(Vector128<Int64> op1, Vector64<Int32> op2, Vector128<Int32> op3, void* result, [CallerMemberName] string method = "") { Int64[] inArray1 = new Int64[Op1ElementCount]; Int32[] inArray2 = new Int32[Op2ElementCount]; Int32[] inArray3 = new Int32[Op3ElementCount]; Int64[] outArray = new Int64[RetElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<Int64, byte>(ref inArray1[0]), op1); Unsafe.WriteUnaligned(ref Unsafe.As<Int32, byte>(ref inArray2[0]), op2); Unsafe.WriteUnaligned(ref Unsafe.As<Int32, byte>(ref inArray3[0]), op3); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int64, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<Int64>>()); ValidateResult(inArray1, inArray2, inArray3, outArray, method); } private void ValidateResult(void* op1, void* op2, void* op3, void* result, [CallerMemberName] string method = "") { Int64[] inArray1 = new Int64[Op1ElementCount]; Int32[] inArray2 = new Int32[Op2ElementCount]; Int32[] inArray3 = new Int32[Op3ElementCount]; Int64[] outArray = new Int64[RetElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int64, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector128<Int64>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(op2), (uint)Unsafe.SizeOf<Vector64<Int32>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref inArray3[0]), ref Unsafe.AsRef<byte>(op3), (uint)Unsafe.SizeOf<Vector128<Int32>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int64, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<Int64>>()); ValidateResult(inArray1, inArray2, inArray3, outArray, method); } private void ValidateResult(Int64[] firstOp, Int32[] secondOp, Int32[] thirdOp, Int64[] result, [CallerMemberName] string method = "") { bool succeeded = true; for (var i = 0; i < RetElementCount; i++) { if (Helpers.MultiplyWideningAndAdd(firstOp[i], secondOp[i], thirdOp[Imm]) != result[i]) { succeeded = false; break; } } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(AdvSimd)}.{nameof(AdvSimd.MultiplyBySelectedScalarWideningLowerAndAdd)}<Int64>(Vector128<Int64>, Vector64<Int32>, Vector128<Int32>): {method} failed:"); TestLibrary.TestFramework.LogInformation($" firstOp: ({string.Join(", ", firstOp)})"); TestLibrary.TestFramework.LogInformation($"secondOp: ({string.Join(", ", secondOp)})"); TestLibrary.TestFramework.LogInformation($" thirdOp: ({string.Join(", ", thirdOp)})"); TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics.Arm\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; using System.Runtime.Intrinsics.Arm; namespace JIT.HardwareIntrinsics.Arm { public static partial class Program { private static void MultiplyBySelectedScalarWideningLowerAndAdd_Vector64_Int32_Vector128_Int32_3() { var test = new SimpleTernaryOpTest__MultiplyBySelectedScalarWideningLowerAndAdd_Vector64_Int32_Vector128_Int32_3(); if (test.IsSupported) { // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates basic functionality works, using Load test.RunBasicScenario_Load(); } // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates calling via reflection works, using Load test.RunReflectionScenario_Load(); } // Validates passing a static member works test.RunClsVarScenario(); if (AdvSimd.IsSupported) { // Validates passing a static member works, using pinning and Load test.RunClsVarScenario_Load(); } // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates passing a local works, using Load test.RunLclVarScenario_Load(); } // Validates passing the field of a local class works test.RunClassLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local class works, using pinning and Load test.RunClassLclFldScenario_Load(); } // Validates passing an instance member of a class works test.RunClassFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a class works, using pinning and Load test.RunClassFldScenario_Load(); } // Validates passing the field of a local struct works test.RunStructLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local struct works, using pinning and Load test.RunStructLclFldScenario_Load(); } // Validates passing an instance member of a struct works test.RunStructFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a struct works, using pinning and Load test.RunStructFldScenario_Load(); } } else { // Validates we throw on unsupported hardware test.RunUnsupportedScenario(); } if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class SimpleTernaryOpTest__MultiplyBySelectedScalarWideningLowerAndAdd_Vector64_Int32_Vector128_Int32_3 { private struct DataTable { private byte[] inArray1; private byte[] inArray2; private byte[] inArray3; private byte[] outArray; private GCHandle inHandle1; private GCHandle inHandle2; private GCHandle inHandle3; private GCHandle outHandle; private ulong alignment; public DataTable(Int64[] inArray1, Int32[] inArray2, Int32[] inArray3, Int64[] outArray, int alignment) { int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<Int64>(); int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<Int32>(); int sizeOfinArray3 = inArray3.Length * Unsafe.SizeOf<Int32>(); int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<Int64>(); if ((alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2 || (alignment * 2) < sizeOfinArray3 || (alignment * 2) < sizeOfoutArray) { throw new ArgumentException("Invalid value of alignment"); } this.inArray1 = new byte[alignment * 2]; this.inArray2 = new byte[alignment * 2]; this.inArray3 = new byte[alignment * 2]; this.outArray = new byte[alignment * 2]; this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned); this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned); this.inHandle3 = GCHandle.Alloc(this.inArray3, GCHandleType.Pinned); this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned); this.alignment = (ulong)alignment; Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<Int64, byte>(ref inArray1[0]), (uint)sizeOfinArray1); Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<Int32, byte>(ref inArray2[0]), (uint)sizeOfinArray2); Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray3Ptr), ref Unsafe.As<Int32, byte>(ref inArray3[0]), (uint)sizeOfinArray3); } public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment); public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment); public void* inArray3Ptr => Align((byte*)(inHandle3.AddrOfPinnedObject().ToPointer()), alignment); public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment); public void Dispose() { inHandle1.Free(); inHandle2.Free(); inHandle3.Free(); outHandle.Free(); } private static unsafe void* Align(byte* buffer, ulong expectedAlignment) { return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1)); } } private struct TestStruct { public Vector128<Int64> _fld1; public Vector64<Int32> _fld2; public Vector128<Int32> _fld3; public static TestStruct Create() { var testStruct = new TestStruct(); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int64>, byte>(ref testStruct._fld1), ref Unsafe.As<Int64, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Int64>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int32>, byte>(ref testStruct._fld2), ref Unsafe.As<Int32, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<Int32>>()); for (var i = 0; i < Op3ElementCount; i++) { _data3[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref testStruct._fld3), ref Unsafe.As<Int32, byte>(ref _data3[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>()); return testStruct; } public void RunStructFldScenario(SimpleTernaryOpTest__MultiplyBySelectedScalarWideningLowerAndAdd_Vector64_Int32_Vector128_Int32_3 testClass) { var result = AdvSimd.MultiplyBySelectedScalarWideningLowerAndAdd(_fld1, _fld2, _fld3, 3); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, _fld2, _fld3, testClass._dataTable.outArrayPtr); } public void RunStructFldScenario_Load(SimpleTernaryOpTest__MultiplyBySelectedScalarWideningLowerAndAdd_Vector64_Int32_Vector128_Int32_3 testClass) { fixed (Vector128<Int64>* pFld1 = &_fld1) fixed (Vector64<Int32>* pFld2 = &_fld2) fixed (Vector128<Int32>* pFld3 = &_fld3) { var result = AdvSimd.MultiplyBySelectedScalarWideningLowerAndAdd( AdvSimd.LoadVector128((Int64*)(pFld1)), AdvSimd.LoadVector64((Int32*)(pFld2)), AdvSimd.LoadVector128((Int32*)(pFld3)), 3 ); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, _fld2, _fld3, testClass._dataTable.outArrayPtr); } } } private static readonly int LargestVectorSize = 16; private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector128<Int64>>() / sizeof(Int64); private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector64<Int32>>() / sizeof(Int32); private static readonly int Op3ElementCount = Unsafe.SizeOf<Vector128<Int32>>() / sizeof(Int32); private static readonly int RetElementCount = Unsafe.SizeOf<Vector128<Int64>>() / sizeof(Int64); private static readonly byte Imm = 3; private static Int64[] _data1 = new Int64[Op1ElementCount]; private static Int32[] _data2 = new Int32[Op2ElementCount]; private static Int32[] _data3 = new Int32[Op3ElementCount]; private static Vector128<Int64> _clsVar1; private static Vector64<Int32> _clsVar2; private static Vector128<Int32> _clsVar3; private Vector128<Int64> _fld1; private Vector64<Int32> _fld2; private Vector128<Int32> _fld3; private DataTable _dataTable; static SimpleTernaryOpTest__MultiplyBySelectedScalarWideningLowerAndAdd_Vector64_Int32_Vector128_Int32_3() { for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int64>, byte>(ref _clsVar1), ref Unsafe.As<Int64, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Int64>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int32>, byte>(ref _clsVar2), ref Unsafe.As<Int32, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<Int32>>()); for (var i = 0; i < Op3ElementCount; i++) { _data3[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref _clsVar3), ref Unsafe.As<Int32, byte>(ref _data3[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>()); } public SimpleTernaryOpTest__MultiplyBySelectedScalarWideningLowerAndAdd_Vector64_Int32_Vector128_Int32_3() { Succeeded = true; for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int64>, byte>(ref _fld1), ref Unsafe.As<Int64, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Int64>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int32>, byte>(ref _fld2), ref Unsafe.As<Int32, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<Int32>>()); for (var i = 0; i < Op3ElementCount; i++) { _data3[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref _fld3), ref Unsafe.As<Int32, byte>(ref _data3[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>()); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt64(); } for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt32(); } for (var i = 0; i < Op3ElementCount; i++) { _data3[i] = TestLibrary.Generator.GetInt32(); } _dataTable = new DataTable(_data1, _data2, _data3, new Int64[RetElementCount], LargestVectorSize); } public bool IsSupported => AdvSimd.IsSupported; public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); var result = AdvSimd.MultiplyBySelectedScalarWideningLowerAndAdd( Unsafe.Read<Vector128<Int64>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector64<Int32>>(_dataTable.inArray2Ptr), Unsafe.Read<Vector128<Int32>>(_dataTable.inArray3Ptr), 3 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.inArray3Ptr, _dataTable.outArrayPtr); } public void RunBasicScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load)); var result = AdvSimd.MultiplyBySelectedScalarWideningLowerAndAdd( AdvSimd.LoadVector128((Int64*)(_dataTable.inArray1Ptr)), AdvSimd.LoadVector64((Int32*)(_dataTable.inArray2Ptr)), AdvSimd.LoadVector128((Int32*)(_dataTable.inArray3Ptr)), 3 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.inArray3Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.MultiplyBySelectedScalarWideningLowerAndAdd), new Type[] { typeof(Vector128<Int64>), typeof(Vector64<Int32>), typeof(Vector128<Int32>), typeof(byte) }) .Invoke(null, new object[] { Unsafe.Read<Vector128<Int64>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector64<Int32>>(_dataTable.inArray2Ptr), Unsafe.Read<Vector128<Int32>>(_dataTable.inArray3Ptr), (byte)3 }); Unsafe.Write(_dataTable.outArrayPtr, (Vector128<Int64>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.inArray3Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load)); var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.MultiplyBySelectedScalarWideningLowerAndAdd), new Type[] { typeof(Vector128<Int64>), typeof(Vector64<Int32>), typeof(Vector128<Int32>), typeof(byte) }) .Invoke(null, new object[] { AdvSimd.LoadVector128((Int64*)(_dataTable.inArray1Ptr)), AdvSimd.LoadVector64((Int32*)(_dataTable.inArray2Ptr)), AdvSimd.LoadVector128((Int32*)(_dataTable.inArray3Ptr)), (byte)3 }); Unsafe.Write(_dataTable.outArrayPtr, (Vector128<Int64>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.inArray3Ptr, _dataTable.outArrayPtr); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); var result = AdvSimd.MultiplyBySelectedScalarWideningLowerAndAdd( _clsVar1, _clsVar2, _clsVar3, 3 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _clsVar2, _clsVar3, _dataTable.outArrayPtr); } public void RunClsVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario_Load)); fixed (Vector128<Int64>* pClsVar1 = &_clsVar1) fixed (Vector64<Int32>* pClsVar2 = &_clsVar2) fixed (Vector128<Int32>* pClsVar3 = &_clsVar3) { var result = AdvSimd.MultiplyBySelectedScalarWideningLowerAndAdd( AdvSimd.LoadVector128((Int64*)(pClsVar1)), AdvSimd.LoadVector64((Int32*)(pClsVar2)), AdvSimd.LoadVector128((Int32*)(pClsVar3)), 3 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _clsVar2, _clsVar3, _dataTable.outArrayPtr); } } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var op1 = Unsafe.Read<Vector128<Int64>>(_dataTable.inArray1Ptr); var op2 = Unsafe.Read<Vector64<Int32>>(_dataTable.inArray2Ptr); var op3 = Unsafe.Read<Vector128<Int32>>(_dataTable.inArray3Ptr); var result = AdvSimd.MultiplyBySelectedScalarWideningLowerAndAdd(op1, op2, op3, 3); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op2, op3, _dataTable.outArrayPtr); } public void RunLclVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load)); var op1 = AdvSimd.LoadVector128((Int64*)(_dataTable.inArray1Ptr)); var op2 = AdvSimd.LoadVector64((Int32*)(_dataTable.inArray2Ptr)); var op3 = AdvSimd.LoadVector128((Int32*)(_dataTable.inArray3Ptr)); var result = AdvSimd.MultiplyBySelectedScalarWideningLowerAndAdd(op1, op2, op3, 3); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op2, op3, _dataTable.outArrayPtr); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new SimpleTernaryOpTest__MultiplyBySelectedScalarWideningLowerAndAdd_Vector64_Int32_Vector128_Int32_3(); var result = AdvSimd.MultiplyBySelectedScalarWideningLowerAndAdd(test._fld1, test._fld2, test._fld3, 3); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, test._fld3, _dataTable.outArrayPtr); } public void RunClassLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario_Load)); var test = new SimpleTernaryOpTest__MultiplyBySelectedScalarWideningLowerAndAdd_Vector64_Int32_Vector128_Int32_3(); fixed (Vector128<Int64>* pFld1 = &test._fld1) fixed (Vector64<Int32>* pFld2 = &test._fld2) fixed (Vector128<Int32>* pFld3 = &test._fld3) { var result = AdvSimd.MultiplyBySelectedScalarWideningLowerAndAdd( AdvSimd.LoadVector128((Int64*)(pFld1)), AdvSimd.LoadVector64((Int32*)(pFld2)), AdvSimd.LoadVector128((Int32*)(pFld3)), 3 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, test._fld3, _dataTable.outArrayPtr); } } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); var result = AdvSimd.MultiplyBySelectedScalarWideningLowerAndAdd(_fld1, _fld2, _fld3, 3); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _fld2, _fld3, _dataTable.outArrayPtr); } public void RunClassFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario_Load)); fixed (Vector128<Int64>* pFld1 = &_fld1) fixed (Vector64<Int32>* pFld2 = &_fld2) fixed (Vector128<Int32>* pFld3 = &_fld3) { var result = AdvSimd.MultiplyBySelectedScalarWideningLowerAndAdd( AdvSimd.LoadVector128((Int64*)(pFld1)), AdvSimd.LoadVector64((Int32*)(pFld2)), AdvSimd.LoadVector128((Int32*)(pFld3)), 3 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _fld2, _fld3, _dataTable.outArrayPtr); } } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); var result = AdvSimd.MultiplyBySelectedScalarWideningLowerAndAdd(test._fld1, test._fld2, test._fld3, 3); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, test._fld3, _dataTable.outArrayPtr); } public void RunStructLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario_Load)); var test = TestStruct.Create(); var result = AdvSimd.MultiplyBySelectedScalarWideningLowerAndAdd( AdvSimd.LoadVector128((Int64*)(&test._fld1)), AdvSimd.LoadVector64((Int32*)(&test._fld2)), AdvSimd.LoadVector128((Int32*)(&test._fld3)), 3 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, test._fld3, _dataTable.outArrayPtr); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } public void RunStructFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario_Load)); var test = TestStruct.Create(); test.RunStructFldScenario_Load(this); } public void RunUnsupportedScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario)); bool succeeded = false; try { RunBasicScenario_UnsafeRead(); } catch (PlatformNotSupportedException) { succeeded = true; } if (!succeeded) { Succeeded = false; } } private void ValidateResult(Vector128<Int64> op1, Vector64<Int32> op2, Vector128<Int32> op3, void* result, [CallerMemberName] string method = "") { Int64[] inArray1 = new Int64[Op1ElementCount]; Int32[] inArray2 = new Int32[Op2ElementCount]; Int32[] inArray3 = new Int32[Op3ElementCount]; Int64[] outArray = new Int64[RetElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<Int64, byte>(ref inArray1[0]), op1); Unsafe.WriteUnaligned(ref Unsafe.As<Int32, byte>(ref inArray2[0]), op2); Unsafe.WriteUnaligned(ref Unsafe.As<Int32, byte>(ref inArray3[0]), op3); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int64, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<Int64>>()); ValidateResult(inArray1, inArray2, inArray3, outArray, method); } private void ValidateResult(void* op1, void* op2, void* op3, void* result, [CallerMemberName] string method = "") { Int64[] inArray1 = new Int64[Op1ElementCount]; Int32[] inArray2 = new Int32[Op2ElementCount]; Int32[] inArray3 = new Int32[Op3ElementCount]; Int64[] outArray = new Int64[RetElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int64, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector128<Int64>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(op2), (uint)Unsafe.SizeOf<Vector64<Int32>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref inArray3[0]), ref Unsafe.AsRef<byte>(op3), (uint)Unsafe.SizeOf<Vector128<Int32>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int64, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<Int64>>()); ValidateResult(inArray1, inArray2, inArray3, outArray, method); } private void ValidateResult(Int64[] firstOp, Int32[] secondOp, Int32[] thirdOp, Int64[] result, [CallerMemberName] string method = "") { bool succeeded = true; for (var i = 0; i < RetElementCount; i++) { if (Helpers.MultiplyWideningAndAdd(firstOp[i], secondOp[i], thirdOp[Imm]) != result[i]) { succeeded = false; break; } } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(AdvSimd)}.{nameof(AdvSimd.MultiplyBySelectedScalarWideningLowerAndAdd)}<Int64>(Vector128<Int64>, Vector64<Int32>, Vector128<Int32>): {method} failed:"); TestLibrary.TestFramework.LogInformation($" firstOp: ({string.Join(", ", firstOp)})"); TestLibrary.TestFramework.LogInformation($"secondOp: ({string.Join(", ", secondOp)})"); TestLibrary.TestFramework.LogInformation($" thirdOp: ({string.Join(", ", thirdOp)})"); TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
-1
dotnet/runtime
66,282
Enable Fast Tail Call Optimization for ARM32
- Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
clamp03
2022-03-07T05:47:20Z
2022-03-13T11:50:20Z
227ff3d53784f655fa04dad059a98b3e8d291d61
51b90cc60b8528c77829ef18481b0f58db812776
Enable Fast Tail Call Optimization for ARM32. - Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
./src/tests/GC/Scenarios/Boxing/variantint.csproj
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> <GCStressIncompatible>true</GCStressIncompatible> </PropertyGroup> <ItemGroup> <Compile Include="variantint.cs" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> <GCStressIncompatible>true</GCStressIncompatible> </PropertyGroup> <ItemGroup> <Compile Include="variantint.cs" /> </ItemGroup> </Project>
-1
dotnet/runtime
66,282
Enable Fast Tail Call Optimization for ARM32
- Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
clamp03
2022-03-07T05:47:20Z
2022-03-13T11:50:20Z
227ff3d53784f655fa04dad059a98b3e8d291d61
51b90cc60b8528c77829ef18481b0f58db812776
Enable Fast Tail Call Optimization for ARM32. - Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
./src/tests/JIT/jit64/hfa/main/testE/hfa_sf1E_d.csproj
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <PropertyGroup> <!-- Set to 'Full' if the Debug? column is marked in the spreadsheet. Leave blank otherwise. --> <DebugType>full</DebugType> <Optimize>False</Optimize> </PropertyGroup> <ItemGroup> <Compile Include="hfa_testE.cs" /> <ProjectReference Include="..\dll\common.csproj" /> <ProjectReference Include="..\dll\hfa_simple_f32_managed.csproj" /> <CMakeProjectReference Include="..\dll\CMakeLists.txt" /> <ProjectReference Include="..\dll\hfa_simple_f32_common.csproj" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <PropertyGroup> <!-- Set to 'Full' if the Debug? column is marked in the spreadsheet. Leave blank otherwise. --> <DebugType>full</DebugType> <Optimize>False</Optimize> </PropertyGroup> <ItemGroup> <Compile Include="hfa_testE.cs" /> <ProjectReference Include="..\dll\common.csproj" /> <ProjectReference Include="..\dll\hfa_simple_f32_managed.csproj" /> <CMakeProjectReference Include="..\dll\CMakeLists.txt" /> <ProjectReference Include="..\dll\hfa_simple_f32_common.csproj" /> </ItemGroup> </Project>
-1
dotnet/runtime
66,282
Enable Fast Tail Call Optimization for ARM32
- Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
clamp03
2022-03-07T05:47:20Z
2022-03-13T11:50:20Z
227ff3d53784f655fa04dad059a98b3e8d291d61
51b90cc60b8528c77829ef18481b0f58db812776
Enable Fast Tail Call Optimization for ARM32. - Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
./src/libraries/System.Configuration.ConfigurationManager/src/System/Configuration/ClientSettingsStore.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections; using System.Configuration.Internal; using System.Diagnostics; using System.IO; using System.Xml; namespace System.Configuration { /// <summary> /// This class abstracts the details of config system away from the LocalFileSettingsProvider. It talks to /// the configuration API and the relevant Sections to read and write settings. /// It understands sections of type ClientSettingsSection. /// /// NOTE: This API supports reading from app.exe.config and user.config, but writing only to /// user.config. /// </summary> internal sealed class ClientSettingsStore { private const string ApplicationSettingsGroupName = "applicationSettings"; private const string UserSettingsGroupName = "userSettings"; private const string ApplicationSettingsGroupPrefix = ApplicationSettingsGroupName + "/"; private const string UserSettingsGroupPrefix = UserSettingsGroupName + "/"; private Configuration GetUserConfig(bool isRoaming) { ConfigurationUserLevel userLevel = isRoaming ? ConfigurationUserLevel.PerUserRoaming : ConfigurationUserLevel.PerUserRoamingAndLocal; return ClientSettingsConfigurationHost.OpenExeConfiguration(userLevel); } private ClientSettingsSection GetConfigSection(Configuration config, string sectionName, bool declare) { string fullSectionName = UserSettingsGroupPrefix + sectionName; ClientSettingsSection section = null; if (config != null) { section = config.GetSection(fullSectionName) as ClientSettingsSection; if (section == null && declare) { // Looks like the section isn't declared - let's declare it and try again. DeclareSection(config, sectionName); section = config.GetSection(fullSectionName) as ClientSettingsSection; } } return section; } // Declares the section handler of a given section in its section group, if a declaration isn't already // present. private void DeclareSection(Configuration config, string sectionName) { ConfigurationSectionGroup settingsGroup = config.GetSectionGroup(UserSettingsGroupName); if (settingsGroup == null) { //Declare settings group ConfigurationSectionGroup group = new UserSettingsGroup(); config.SectionGroups.Add(UserSettingsGroupName, group); } settingsGroup = config.GetSectionGroup(UserSettingsGroupName); Debug.Assert(settingsGroup != null, "Failed to declare settings group"); if (settingsGroup != null) { ConfigurationSection section = settingsGroup.Sections[sectionName]; if (section == null) { section = new ClientSettingsSection(); section.SectionInformation.AllowExeDefinition = ConfigurationAllowExeDefinition.MachineToLocalUser; section.SectionInformation.RequirePermission = false; settingsGroup.Sections.Add(sectionName, section); } } } internal IDictionary ReadSettings(string sectionName, bool isUserScoped) { IDictionary settings = new Hashtable(); if (isUserScoped && !ConfigurationManagerInternalFactory.Instance.SupportsUserConfig) { return settings; } string prefix = isUserScoped ? UserSettingsGroupPrefix : ApplicationSettingsGroupPrefix; ConfigurationManager.RefreshSection(prefix + sectionName); ClientSettingsSection section = ConfigurationManager.GetSection(prefix + sectionName) as ClientSettingsSection; if (section != null) { foreach (SettingElement setting in section.Settings) { settings[setting.Name] = new StoredSetting(setting.SerializeAs, setting.Value.ValueXml); } } return settings; } internal static IDictionary ReadSettingsFromFile(string configFileName, string sectionName, bool isUserScoped) { IDictionary settings = new Hashtable(); if (isUserScoped && !ConfigurationManagerInternalFactory.Instance.SupportsUserConfig) { return settings; } string prefix = isUserScoped ? UserSettingsGroupPrefix : ApplicationSettingsGroupPrefix; ExeConfigurationFileMap fileMap = new ExeConfigurationFileMap(); // NOTE: When isUserScoped is true, we don't care if configFileName represents a roaming file or // a local one. All we want is three levels of configuration. So, we use the PerUserRoaming level. ConfigurationUserLevel userLevel = isUserScoped ? ConfigurationUserLevel.PerUserRoaming : ConfigurationUserLevel.None; if (isUserScoped) { fileMap.ExeConfigFilename = ConfigurationManagerInternalFactory.Instance.ApplicationConfigUri; fileMap.RoamingUserConfigFilename = configFileName; } else { fileMap.ExeConfigFilename = configFileName; } Configuration config = ConfigurationManager.OpenMappedExeConfiguration(fileMap, userLevel); ClientSettingsSection section = config.GetSection(prefix + sectionName) as ClientSettingsSection; if (section != null) { foreach (SettingElement setting in section.Settings) { settings[setting.Name] = new StoredSetting(setting.SerializeAs, setting.Value.ValueXml); } } return settings; } internal ConnectionStringSettingsCollection ReadConnectionStrings() { return PrivilegedConfigurationManager.ConnectionStrings; } internal void RevertToParent(string sectionName, bool isRoaming) { if (!ConfigurationManagerInternalFactory.Instance.SupportsUserConfig) { throw new ConfigurationErrorsException(SR.UserSettingsNotSupported); } Configuration config = GetUserConfig(isRoaming); ClientSettingsSection section = GetConfigSection(config, sectionName, false); // If the section is null, there is nothing to revert. if (section != null) { section.SectionInformation.RevertToParent(); config.Save(); } } internal void WriteSettings(string sectionName, bool isRoaming, IDictionary newSettings) { if (!ConfigurationManagerInternalFactory.Instance.SupportsUserConfig) { throw new ConfigurationErrorsException(SR.UserSettingsNotSupported); } Configuration config = GetUserConfig(isRoaming); ClientSettingsSection section = GetConfigSection(config, sectionName, true); if (section != null) { SettingElementCollection sec = section.Settings; foreach (DictionaryEntry entry in newSettings) { SettingElement se = sec.Get((string)entry.Key); if (se == null) { se = new SettingElement(); se.Name = (string)entry.Key; sec.Add(se); } StoredSetting ss = (StoredSetting)entry.Value; se.SerializeAs = ss.SerializeAs; se.Value.ValueXml = ss.Value; } try { config.Save(); } catch (ConfigurationErrorsException ex) { // We wrap this in an exception with our error message and throw again. throw new ConfigurationErrorsException(SR.Format(SR.SettingsSaveFailed, ex.Message), ex); } } else { throw new ConfigurationErrorsException(SR.SettingsSaveFailedNoSection); } } /// <summary> /// A private configuration host that we use to write settings to config. We need this so we /// can enforce a quota on the size of stuff written out. /// </summary> private sealed class ClientSettingsConfigurationHost : DelegatingConfigHost { private const string ClientConfigurationHostTypeName = "System.Configuration.ClientConfigurationHost, " + TypeUtil.ConfigurationManagerAssemblyName; private const string InternalConfigConfigurationFactoryTypeName = "System.Configuration.Internal.InternalConfigConfigurationFactory, " + TypeUtil.ConfigurationManagerAssemblyName; private static volatile IInternalConfigConfigurationFactory s_configFactory; /// <summary> /// ClientConfigurationHost implements this - a way of getting some info from it without /// depending too much on its internals. /// </summary> private IInternalConfigClientHost ClientHost { get { return (IInternalConfigClientHost)Host; } } internal static IInternalConfigConfigurationFactory ConfigFactory { get { if (s_configFactory == null) { s_configFactory = TypeUtil.CreateInstance<IInternalConfigConfigurationFactory>(InternalConfigConfigurationFactoryTypeName); } return s_configFactory; } } private ClientSettingsConfigurationHost() { } public override void Init(IInternalConfigRoot configRoot, params object[] hostInitParams) { Debug.Fail("Did not expect to get called here"); } /// <summary> /// We delegate this to the ClientConfigurationHost. The only thing we need to do here is to /// build a configPath from the ConfigurationUserLevel we get passed in. /// </summary> public override void InitForConfiguration(ref string locationSubPath, out string configPath, out string locationConfigPath, IInternalConfigRoot configRoot, params object[] hostInitConfigurationParams) { ConfigurationUserLevel userLevel = (ConfigurationUserLevel)hostInitConfigurationParams[0]; Host = TypeUtil.CreateInstance<IInternalConfigHost>(ClientConfigurationHostTypeName); string desiredConfigPath = userLevel switch { ConfigurationUserLevel.None => ClientHost.GetExeConfigPath(), ConfigurationUserLevel.PerUserRoaming => ClientHost.GetRoamingUserConfigPath(), ConfigurationUserLevel.PerUserRoamingAndLocal => ClientHost.GetLocalUserConfigPath(), _ => throw new ArgumentException(SR.UnknownUserLevel), }; Host.InitForConfiguration(ref locationSubPath, out configPath, out locationConfigPath, configRoot, null, null, desiredConfigPath); } private bool IsKnownConfigFile(string filename) { return string.Equals(filename, ConfigurationManagerInternalFactory.Instance.MachineConfigPath, StringComparison.OrdinalIgnoreCase) || string.Equals(filename, ConfigurationManagerInternalFactory.Instance.ApplicationConfigUri, StringComparison.OrdinalIgnoreCase) || string.Equals(filename, ConfigurationManagerInternalFactory.Instance.ExeLocalConfigPath, StringComparison.OrdinalIgnoreCase) || string.Equals(filename, ConfigurationManagerInternalFactory.Instance.ExeRoamingConfigPath, StringComparison.OrdinalIgnoreCase); } internal static Configuration OpenExeConfiguration(ConfigurationUserLevel userLevel) { return ConfigFactory.Create(typeof(ClientSettingsConfigurationHost), userLevel); } /// <summary> /// If the stream we are asked for represents a config file that we know about, we ask /// the host to assert appropriate permissions. /// </summary> public override Stream OpenStreamForRead(string streamName) { if (IsKnownConfigFile(streamName)) { return Host.OpenStreamForRead(streamName, true); } else { return Host.OpenStreamForRead(streamName); } } public override Stream OpenStreamForWrite(string streamName, string templateStreamName, ref object writeContext) { // On .NET Framework we do a bunch of work here around ensuring permissions and quotas return Host.OpenStreamForWrite(streamName, templateStreamName, ref writeContext); } /// <summary> /// If this is a stream that represents a user.config file that we know about, we ask /// the host to assert appropriate permissions. /// </summary> public override void WriteCompleted(string streamName, bool success, object writeContext) { if (string.Equals(streamName, ConfigurationManagerInternalFactory.Instance.ExeLocalConfigPath, StringComparison.OrdinalIgnoreCase) || string.Equals(streamName, ConfigurationManagerInternalFactory.Instance.ExeRoamingConfigPath, StringComparison.OrdinalIgnoreCase)) { Host.WriteCompleted(streamName, success, writeContext, true); } else { Host.WriteCompleted(streamName, success, writeContext); } } } } /// <summary> /// The ClientSettingsStore talks to the LocalFileSettingsProvider through a dictionary which maps from /// setting names to StoredSetting structs. This struct contains the relevant information. /// </summary> internal struct StoredSetting { internal StoredSetting(SettingsSerializeAs serializeAs, XmlNode value) { SerializeAs = serializeAs; Value = value; } internal SettingsSerializeAs SerializeAs; internal XmlNode Value; } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections; using System.Configuration.Internal; using System.Diagnostics; using System.IO; using System.Xml; namespace System.Configuration { /// <summary> /// This class abstracts the details of config system away from the LocalFileSettingsProvider. It talks to /// the configuration API and the relevant Sections to read and write settings. /// It understands sections of type ClientSettingsSection. /// /// NOTE: This API supports reading from app.exe.config and user.config, but writing only to /// user.config. /// </summary> internal sealed class ClientSettingsStore { private const string ApplicationSettingsGroupName = "applicationSettings"; private const string UserSettingsGroupName = "userSettings"; private const string ApplicationSettingsGroupPrefix = ApplicationSettingsGroupName + "/"; private const string UserSettingsGroupPrefix = UserSettingsGroupName + "/"; private Configuration GetUserConfig(bool isRoaming) { ConfigurationUserLevel userLevel = isRoaming ? ConfigurationUserLevel.PerUserRoaming : ConfigurationUserLevel.PerUserRoamingAndLocal; return ClientSettingsConfigurationHost.OpenExeConfiguration(userLevel); } private ClientSettingsSection GetConfigSection(Configuration config, string sectionName, bool declare) { string fullSectionName = UserSettingsGroupPrefix + sectionName; ClientSettingsSection section = null; if (config != null) { section = config.GetSection(fullSectionName) as ClientSettingsSection; if (section == null && declare) { // Looks like the section isn't declared - let's declare it and try again. DeclareSection(config, sectionName); section = config.GetSection(fullSectionName) as ClientSettingsSection; } } return section; } // Declares the section handler of a given section in its section group, if a declaration isn't already // present. private void DeclareSection(Configuration config, string sectionName) { ConfigurationSectionGroup settingsGroup = config.GetSectionGroup(UserSettingsGroupName); if (settingsGroup == null) { //Declare settings group ConfigurationSectionGroup group = new UserSettingsGroup(); config.SectionGroups.Add(UserSettingsGroupName, group); } settingsGroup = config.GetSectionGroup(UserSettingsGroupName); Debug.Assert(settingsGroup != null, "Failed to declare settings group"); if (settingsGroup != null) { ConfigurationSection section = settingsGroup.Sections[sectionName]; if (section == null) { section = new ClientSettingsSection(); section.SectionInformation.AllowExeDefinition = ConfigurationAllowExeDefinition.MachineToLocalUser; section.SectionInformation.RequirePermission = false; settingsGroup.Sections.Add(sectionName, section); } } } internal IDictionary ReadSettings(string sectionName, bool isUserScoped) { IDictionary settings = new Hashtable(); if (isUserScoped && !ConfigurationManagerInternalFactory.Instance.SupportsUserConfig) { return settings; } string prefix = isUserScoped ? UserSettingsGroupPrefix : ApplicationSettingsGroupPrefix; ConfigurationManager.RefreshSection(prefix + sectionName); ClientSettingsSection section = ConfigurationManager.GetSection(prefix + sectionName) as ClientSettingsSection; if (section != null) { foreach (SettingElement setting in section.Settings) { settings[setting.Name] = new StoredSetting(setting.SerializeAs, setting.Value.ValueXml); } } return settings; } internal static IDictionary ReadSettingsFromFile(string configFileName, string sectionName, bool isUserScoped) { IDictionary settings = new Hashtable(); if (isUserScoped && !ConfigurationManagerInternalFactory.Instance.SupportsUserConfig) { return settings; } string prefix = isUserScoped ? UserSettingsGroupPrefix : ApplicationSettingsGroupPrefix; ExeConfigurationFileMap fileMap = new ExeConfigurationFileMap(); // NOTE: When isUserScoped is true, we don't care if configFileName represents a roaming file or // a local one. All we want is three levels of configuration. So, we use the PerUserRoaming level. ConfigurationUserLevel userLevel = isUserScoped ? ConfigurationUserLevel.PerUserRoaming : ConfigurationUserLevel.None; if (isUserScoped) { fileMap.ExeConfigFilename = ConfigurationManagerInternalFactory.Instance.ApplicationConfigUri; fileMap.RoamingUserConfigFilename = configFileName; } else { fileMap.ExeConfigFilename = configFileName; } Configuration config = ConfigurationManager.OpenMappedExeConfiguration(fileMap, userLevel); ClientSettingsSection section = config.GetSection(prefix + sectionName) as ClientSettingsSection; if (section != null) { foreach (SettingElement setting in section.Settings) { settings[setting.Name] = new StoredSetting(setting.SerializeAs, setting.Value.ValueXml); } } return settings; } internal ConnectionStringSettingsCollection ReadConnectionStrings() { return PrivilegedConfigurationManager.ConnectionStrings; } internal void RevertToParent(string sectionName, bool isRoaming) { if (!ConfigurationManagerInternalFactory.Instance.SupportsUserConfig) { throw new ConfigurationErrorsException(SR.UserSettingsNotSupported); } Configuration config = GetUserConfig(isRoaming); ClientSettingsSection section = GetConfigSection(config, sectionName, false); // If the section is null, there is nothing to revert. if (section != null) { section.SectionInformation.RevertToParent(); config.Save(); } } internal void WriteSettings(string sectionName, bool isRoaming, IDictionary newSettings) { if (!ConfigurationManagerInternalFactory.Instance.SupportsUserConfig) { throw new ConfigurationErrorsException(SR.UserSettingsNotSupported); } Configuration config = GetUserConfig(isRoaming); ClientSettingsSection section = GetConfigSection(config, sectionName, true); if (section != null) { SettingElementCollection sec = section.Settings; foreach (DictionaryEntry entry in newSettings) { SettingElement se = sec.Get((string)entry.Key); if (se == null) { se = new SettingElement(); se.Name = (string)entry.Key; sec.Add(se); } StoredSetting ss = (StoredSetting)entry.Value; se.SerializeAs = ss.SerializeAs; se.Value.ValueXml = ss.Value; } try { config.Save(); } catch (ConfigurationErrorsException ex) { // We wrap this in an exception with our error message and throw again. throw new ConfigurationErrorsException(SR.Format(SR.SettingsSaveFailed, ex.Message), ex); } } else { throw new ConfigurationErrorsException(SR.SettingsSaveFailedNoSection); } } /// <summary> /// A private configuration host that we use to write settings to config. We need this so we /// can enforce a quota on the size of stuff written out. /// </summary> private sealed class ClientSettingsConfigurationHost : DelegatingConfigHost { private const string ClientConfigurationHostTypeName = "System.Configuration.ClientConfigurationHost, " + TypeUtil.ConfigurationManagerAssemblyName; private const string InternalConfigConfigurationFactoryTypeName = "System.Configuration.Internal.InternalConfigConfigurationFactory, " + TypeUtil.ConfigurationManagerAssemblyName; private static volatile IInternalConfigConfigurationFactory s_configFactory; /// <summary> /// ClientConfigurationHost implements this - a way of getting some info from it without /// depending too much on its internals. /// </summary> private IInternalConfigClientHost ClientHost { get { return (IInternalConfigClientHost)Host; } } internal static IInternalConfigConfigurationFactory ConfigFactory { get { if (s_configFactory == null) { s_configFactory = TypeUtil.CreateInstance<IInternalConfigConfigurationFactory>(InternalConfigConfigurationFactoryTypeName); } return s_configFactory; } } private ClientSettingsConfigurationHost() { } public override void Init(IInternalConfigRoot configRoot, params object[] hostInitParams) { Debug.Fail("Did not expect to get called here"); } /// <summary> /// We delegate this to the ClientConfigurationHost. The only thing we need to do here is to /// build a configPath from the ConfigurationUserLevel we get passed in. /// </summary> public override void InitForConfiguration(ref string locationSubPath, out string configPath, out string locationConfigPath, IInternalConfigRoot configRoot, params object[] hostInitConfigurationParams) { ConfigurationUserLevel userLevel = (ConfigurationUserLevel)hostInitConfigurationParams[0]; Host = TypeUtil.CreateInstance<IInternalConfigHost>(ClientConfigurationHostTypeName); string desiredConfigPath = userLevel switch { ConfigurationUserLevel.None => ClientHost.GetExeConfigPath(), ConfigurationUserLevel.PerUserRoaming => ClientHost.GetRoamingUserConfigPath(), ConfigurationUserLevel.PerUserRoamingAndLocal => ClientHost.GetLocalUserConfigPath(), _ => throw new ArgumentException(SR.UnknownUserLevel), }; Host.InitForConfiguration(ref locationSubPath, out configPath, out locationConfigPath, configRoot, null, null, desiredConfigPath); } private bool IsKnownConfigFile(string filename) { return string.Equals(filename, ConfigurationManagerInternalFactory.Instance.MachineConfigPath, StringComparison.OrdinalIgnoreCase) || string.Equals(filename, ConfigurationManagerInternalFactory.Instance.ApplicationConfigUri, StringComparison.OrdinalIgnoreCase) || string.Equals(filename, ConfigurationManagerInternalFactory.Instance.ExeLocalConfigPath, StringComparison.OrdinalIgnoreCase) || string.Equals(filename, ConfigurationManagerInternalFactory.Instance.ExeRoamingConfigPath, StringComparison.OrdinalIgnoreCase); } internal static Configuration OpenExeConfiguration(ConfigurationUserLevel userLevel) { return ConfigFactory.Create(typeof(ClientSettingsConfigurationHost), userLevel); } /// <summary> /// If the stream we are asked for represents a config file that we know about, we ask /// the host to assert appropriate permissions. /// </summary> public override Stream OpenStreamForRead(string streamName) { if (IsKnownConfigFile(streamName)) { return Host.OpenStreamForRead(streamName, true); } else { return Host.OpenStreamForRead(streamName); } } public override Stream OpenStreamForWrite(string streamName, string templateStreamName, ref object writeContext) { // On .NET Framework we do a bunch of work here around ensuring permissions and quotas return Host.OpenStreamForWrite(streamName, templateStreamName, ref writeContext); } /// <summary> /// If this is a stream that represents a user.config file that we know about, we ask /// the host to assert appropriate permissions. /// </summary> public override void WriteCompleted(string streamName, bool success, object writeContext) { if (string.Equals(streamName, ConfigurationManagerInternalFactory.Instance.ExeLocalConfigPath, StringComparison.OrdinalIgnoreCase) || string.Equals(streamName, ConfigurationManagerInternalFactory.Instance.ExeRoamingConfigPath, StringComparison.OrdinalIgnoreCase)) { Host.WriteCompleted(streamName, success, writeContext, true); } else { Host.WriteCompleted(streamName, success, writeContext); } } } } /// <summary> /// The ClientSettingsStore talks to the LocalFileSettingsProvider through a dictionary which maps from /// setting names to StoredSetting structs. This struct contains the relevant information. /// </summary> internal struct StoredSetting { internal StoredSetting(SettingsSerializeAs serializeAs, XmlNode value) { SerializeAs = serializeAs; Value = value; } internal SettingsSerializeAs SerializeAs; internal XmlNode Value; } }
-1
dotnet/runtime
66,282
Enable Fast Tail Call Optimization for ARM32
- Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
clamp03
2022-03-07T05:47:20Z
2022-03-13T11:50:20Z
227ff3d53784f655fa04dad059a98b3e8d291d61
51b90cc60b8528c77829ef18481b0f58db812776
Enable Fast Tail Call Optimization for ARM32. - Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
./src/libraries/System.Configuration.ConfigurationManager/tests/Mono/ConfigurationSaveTest.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // // ConfigurationSaveTest.cs // // Author: // Martin Baulig <[email protected]> // // Copyright (c) 2012 Xamarin Inc. (http://www.xamarin.com) // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. using System; using System.IO; using System.Xml; using System.Xml.Schema; using System.Xml.XPath; using System.Text; using System.Reflection; using System.Globalization; using System.Configuration; using System.Collections.Generic; using SysConfig = System.Configuration.Configuration; using Xunit; namespace MonoTests.System.Configuration { using Util; public class ConfigurationSaveTest { #region Test Framework public abstract class ConfigProvider { public void Create(string filename) { if (File.Exists(filename)) File.Delete(filename); var settings = new XmlWriterSettings(); settings.Indent = true; using (var writer = XmlTextWriter.Create(filename, settings)) { writer.WriteStartElement("configuration"); WriteXml(writer); writer.WriteEndElement(); } } public abstract UserLevel Level { get; } public enum UserLevel { MachineAndExe, RoamingAndExe } public virtual SysConfig OpenConfig(string parentFile, string configFile) { ConfigurationUserLevel level; var map = new ExeConfigurationFileMap(); switch (Level) { case UserLevel.MachineAndExe: map.ExeConfigFilename = configFile; map.MachineConfigFilename = parentFile; level = ConfigurationUserLevel.None; break; case UserLevel.RoamingAndExe: map.RoamingUserConfigFilename = configFile; map.ExeConfigFilename = parentFile; level = ConfigurationUserLevel.PerUserRoaming; break; default: throw new InvalidOperationException(); } return ConfigurationManager.OpenMappedExeConfiguration(map, level); } protected abstract void WriteXml(XmlWriter writer); } public abstract class MachineConfigProvider : ConfigProvider { protected override void WriteXml(XmlWriter writer) { writer.WriteStartElement("configSections"); WriteSections(writer); writer.WriteEndElement(); WriteValues(writer); } public override UserLevel Level { get { return UserLevel.MachineAndExe; } } protected abstract void WriteSections(XmlWriter writer); protected abstract void WriteValues(XmlWriter writer); } class DefaultMachineConfig : MachineConfigProvider { protected override void WriteSections(XmlWriter writer) { writer.WriteStartElement("section"); writer.WriteAttributeString("name", "my"); writer.WriteAttributeString("type", typeof(MySection).AssemblyQualifiedName); writer.WriteAttributeString("allowLocation", "true"); writer.WriteAttributeString("allowDefinition", "Everywhere"); writer.WriteAttributeString("allowExeDefinition", "MachineToRoamingUser"); writer.WriteAttributeString("restartOnExternalChanges", "true"); writer.WriteAttributeString("requirePermission", "true"); writer.WriteEndElement(); } internal static void WriteConfigSections(XmlWriter writer) { var provider = new DefaultMachineConfig(); writer.WriteStartElement("configSections"); provider.WriteSections(writer); writer.WriteEndElement(); } protected override void WriteValues(XmlWriter writer) { writer.WriteStartElement("my"); writer.WriteEndElement(); } } class DefaultMachineConfig2 : MachineConfigProvider { protected override void WriteSections(XmlWriter writer) { writer.WriteStartElement("section"); writer.WriteAttributeString("name", "my2"); writer.WriteAttributeString("type", typeof(MySection2).AssemblyQualifiedName); writer.WriteAttributeString("allowLocation", "true"); writer.WriteAttributeString("allowDefinition", "Everywhere"); writer.WriteAttributeString("allowExeDefinition", "MachineToRoamingUser"); writer.WriteAttributeString("restartOnExternalChanges", "true"); writer.WriteAttributeString("requirePermission", "true"); writer.WriteEndElement(); } internal static void WriteConfigSections(XmlWriter writer) { var provider = new DefaultMachineConfig2(); writer.WriteStartElement("configSections"); provider.WriteSections(writer); writer.WriteEndElement(); } protected override void WriteValues(XmlWriter writer) { } } abstract class ParentProvider : ConfigProvider { protected override void WriteXml(XmlWriter writer) { DefaultMachineConfig.WriteConfigSections(writer); writer.WriteStartElement("my"); writer.WriteStartElement("test"); writer.WriteAttributeString("Hello", "29"); writer.WriteEndElement(); writer.WriteEndElement(); } } class RoamingAndExe : ParentProvider { public override UserLevel Level { get { return UserLevel.RoamingAndExe; } } } private delegate void TestFunction(SysConfig config, TestLabel label); private delegate void XmlCheckFunction(XPathNavigator nav, TestLabel label); private static void Run(string name, TestFunction func) { var label = new TestLabel(name); TestUtil.RunWithTempFile(filename => { var fileMap = new ExeConfigurationFileMap(); fileMap.ExeConfigFilename = filename; var config = ConfigurationManager.OpenMappedExeConfiguration( fileMap, ConfigurationUserLevel.None); func(config, label); }); } private static void Run<TConfig>(string name, TestFunction func) where TConfig : ConfigProvider, new() { Run<TConfig>(new TestLabel(name), func, null); } private static void Run<TConfig>(TestLabel label, TestFunction func) where TConfig : ConfigProvider, new() { Run<TConfig>(label, func, null); } private static void Run<TConfig>( string name, TestFunction func, XmlCheckFunction check) where TConfig : ConfigProvider, new() { Run<TConfig>(new TestLabel(name), func, check); } private static void Run<TConfig>( TestLabel label, TestFunction func, XmlCheckFunction check) where TConfig : ConfigProvider, new() { TestUtil.RunWithTempFiles((parent, filename) => { var provider = new TConfig(); provider.Create(parent); Assert.False(File.Exists(filename)); var config = provider.OpenConfig(parent, filename); Assert.False(File.Exists(filename)); try { label.EnterScope("config"); func(config, label); } finally { label.LeaveScope(); } if (check == null) return; var xml = new XmlDocument(); xml.Load(filename); var nav = xml.CreateNavigator().SelectSingleNode("/configuration"); try { label.EnterScope("xml"); check(nav, label); } finally { label.LeaveScope(); } }); } #endregion #region Assertion Helpers static void AssertNotModified(MySection my, TestLabel label) { label.EnterScope("modified"); Assert.NotNull(my); Assert.False(my.IsModified, label.Get()); Assert.NotNull(my.List); Assert.Equal(0, my.List.Collection.Count); Assert.False(my.List.IsModified, label.Get()); label.LeaveScope(); } static void AssertListElement(XPathNavigator nav, TestLabel label) { Assert.True(nav.HasChildren, label.Get()); var iter = nav.SelectChildren(XPathNodeType.Element); Assert.Equal(1, iter.Count); Assert.True(iter.MoveNext(), label.Get()); var my = iter.Current; label.EnterScope("my"); Assert.Equal("my", my.Name); Assert.False(my.HasAttributes, label.Get()); label.EnterScope("children"); Assert.True(my.HasChildren, label.Get()); var iter2 = my.SelectChildren(XPathNodeType.Element); Assert.Equal(1, iter2.Count); Assert.True(iter2.MoveNext(), label.Get()); var test = iter2.Current; label.EnterScope("test"); Assert.Equal("test", test.Name); Assert.False(test.HasChildren, label.Get()); Assert.True(test.HasAttributes, label.Get()); var attr = test.GetAttribute("Hello", string.Empty); Assert.Equal("29", attr); label.LeaveScope(); label.LeaveScope(); label.LeaveScope(); } #endregion #region Tests [Fact] public void DefaultValues() { Run<DefaultMachineConfig>("DefaultValues", (config, label) => { var my = config.Sections["my"] as MySection; AssertNotModified(my, label); label.EnterScope("file"); Assert.False(File.Exists(config.FilePath), label.Get()); config.Save(ConfigurationSaveMode.Minimal); Assert.False(File.Exists(config.FilePath), label.Get()); label.LeaveScope(); }); } [Fact] public void AddDefaultListElement() { Run<DefaultMachineConfig>("AddDefaultListElement", (config, label) => { var my = config.Sections["my"] as MySection; AssertNotModified(my, label); label.EnterScope("add"); var element = my.List.Collection.AddElement(); Assert.True(my.IsModified, label.Get()); Assert.True(my.List.IsModified, label.Get()); Assert.True(my.List.Collection.IsModified, label.Get()); Assert.False(element.IsModified, label.Get()); label.LeaveScope(); config.Save(ConfigurationSaveMode.Minimal); Assert.False(File.Exists(config.FilePath), label.Get()); }); } [Fact] public void AddDefaultListElement2() { Run<DefaultMachineConfig>("AddDefaultListElement2", (config, label) => { var my = config.Sections["my"] as MySection; AssertNotModified(my, label); label.EnterScope("add"); var element = my.List.Collection.AddElement(); Assert.True(my.IsModified, label.Get()); Assert.True(my.List.IsModified, label.Get()); Assert.True(my.List.Collection.IsModified, label.Get()); Assert.False(element.IsModified, label.Get()); label.LeaveScope(); config.Save(ConfigurationSaveMode.Modified); Assert.True(File.Exists(config.FilePath), label.Get()); }, (nav, label) => { Assert.True(nav.HasChildren, label.Get()); var iter = nav.SelectChildren(XPathNodeType.Element); Assert.Equal(1, iter.Count); Assert.True(iter.MoveNext(), label.Get()); var my = iter.Current; label.EnterScope("my"); Assert.Equal("my", my.Name); Assert.False(my.HasAttributes, label.Get()); Assert.False(my.HasChildren, label.Get()); label.LeaveScope(); }); } [Fact] public void AddDefaultListElement3() { Run<DefaultMachineConfig>("AddDefaultListElement3", (config, label) => { var my = config.Sections["my"] as MySection; AssertNotModified(my, label); label.EnterScope("add"); var element = my.List.Collection.AddElement(); Assert.True(my.IsModified, label.Get()); Assert.True(my.List.IsModified, label.Get()); Assert.True(my.List.Collection.IsModified, label.Get()); Assert.False(element.IsModified, label.Get()); label.LeaveScope(); config.Save(ConfigurationSaveMode.Full); Assert.True(File.Exists(config.FilePath), label.Get()); }, (nav, label) => { Assert.True(nav.HasChildren, label.Get()); var iter = nav.SelectChildren(XPathNodeType.Element); Assert.Equal(1, iter.Count); Assert.True(iter.MoveNext(), label.Get()); var my = iter.Current; label.EnterScope("my"); Assert.Equal("my", my.Name); Assert.False(my.HasAttributes, label.Get()); label.EnterScope("children"); Assert.True(my.HasChildren, label.Get()); var iter2 = my.SelectChildren(XPathNodeType.Element); Assert.Equal(2, iter2.Count); label.EnterScope("list"); var iter3 = my.Select("list/*"); Assert.Equal(1, iter3.Count); Assert.True(iter3.MoveNext(), label.Get()); var collection = iter3.Current; Assert.Equal("collection", collection.Name); Assert.False(collection.HasChildren, label.Get()); Assert.True(collection.HasAttributes, label.Get()); var hello = collection.GetAttribute("Hello", string.Empty); Assert.Equal("8", hello); var world = collection.GetAttribute("World", string.Empty); Assert.Equal("0", world); label.LeaveScope(); label.EnterScope("test"); var iter4 = my.Select("test"); Assert.Equal(1, iter4.Count); Assert.True(iter4.MoveNext(), label.Get()); var test = iter4.Current; Assert.Equal("test", test.Name); Assert.False(test.HasChildren, label.Get()); Assert.True(test.HasAttributes, label.Get()); var hello2 = test.GetAttribute("Hello", string.Empty); Assert.Equal("8", hello2); var world2 = test.GetAttribute("World", string.Empty); Assert.Equal("0", world2); label.LeaveScope(); label.LeaveScope(); label.LeaveScope(); }); } [Fact] public void AddListElement() { Run<DefaultMachineConfig>("AddListElement", (config, label) => { var my = config.Sections["my"] as MySection; AssertNotModified(my, label); my.Test.Hello = 29; label.EnterScope("file"); Assert.False(File.Exists(config.FilePath), label.Get()); config.Save(ConfigurationSaveMode.Minimal); Assert.True(File.Exists(config.FilePath), label.Get()); label.LeaveScope(); }, (nav, label) => { AssertListElement(nav, label); }); } [Fact] public void NotModifiedAfterSave() { Run<DefaultMachineConfig>("NotModifiedAfterSave", (config, label) => { var my = config.Sections["my"] as MySection; AssertNotModified(my, label); label.EnterScope("add"); var element = my.List.Collection.AddElement(); Assert.True(my.IsModified, label.Get()); Assert.True(my.List.IsModified, label.Get()); Assert.True(my.List.Collection.IsModified, label.Get()); Assert.False(element.IsModified, label.Get()); label.LeaveScope(); label.EnterScope("1st-save"); config.Save(ConfigurationSaveMode.Minimal); Assert.False(File.Exists(config.FilePath), label.Get()); config.Save(ConfigurationSaveMode.Modified); Assert.False(File.Exists(config.FilePath), label.Get()); label.LeaveScope(); label.EnterScope("modify"); element.Hello = 12; Assert.True(my.IsModified, label.Get()); Assert.True(my.List.IsModified, label.Get()); Assert.True(my.List.Collection.IsModified, label.Get()); Assert.True(element.IsModified, label.Get()); label.LeaveScope(); label.EnterScope("2nd-save"); config.Save(ConfigurationSaveMode.Modified); Assert.True(File.Exists(config.FilePath), label.Get()); Assert.False(my.IsModified, label.Get()); Assert.False(my.List.IsModified, label.Get()); Assert.False(my.List.Collection.IsModified, label.Get()); Assert.False(element.IsModified, label.Get()); label.LeaveScope(); // 2nd-save }); } [Fact] public void AddSection() { Run("AddSection", (config, label) => { Assert.Null(config.Sections["my"]); var my = new MySection(); config.Sections.Add("my2", my); config.Save(ConfigurationSaveMode.Full); Assert.True(File.Exists(config.FilePath), label.Get()); }); } [Fact] public void AddElement() { Run<DefaultMachineConfig>("AddElement", (config, label) => { var my = config.Sections["my"] as MySection; AssertNotModified(my, label); var element = my.List.DefaultCollection.AddElement(); element.Hello = 12; config.Save(ConfigurationSaveMode.Modified); label.EnterScope("file"); Assert.True(File.Exists(config.FilePath), "#c2"); label.LeaveScope(); }, (nav, label) => { Assert.True(nav.HasChildren, label.Get()); var iter = nav.SelectChildren(XPathNodeType.Element); Assert.Equal(1, iter.Count); Assert.True(iter.MoveNext(), label.Get()); var my = iter.Current; label.EnterScope("my"); Assert.Equal("my", my.Name); Assert.False(my.HasAttributes, label.Get()); Assert.True(my.HasChildren, label.Get()); label.EnterScope("children"); var iter2 = my.SelectChildren(XPathNodeType.Element); Assert.Equal(1, iter2.Count); Assert.True(iter2.MoveNext(), label.Get()); var list = iter2.Current; label.EnterScope("list"); Assert.Equal("list", list.Name); Assert.False(list.HasChildren, label.Get()); Assert.True(list.HasAttributes, label.Get()); var attr = list.GetAttribute("Hello", string.Empty); Assert.Equal("12", attr); label.LeaveScope(); label.LeaveScope(); label.LeaveScope(); }); } [Fact] public void ModifyListElement() { Run<RoamingAndExe>("ModifyListElement", (config, label) => { var my = config.Sections["my"] as MySection; AssertNotModified(my, label); my.Test.Hello = 29; label.EnterScope("file"); Assert.False(File.Exists(config.FilePath), label.Get()); config.Save(ConfigurationSaveMode.Minimal); Assert.False(File.Exists(config.FilePath), label.Get()); label.LeaveScope(); }); } [Fact] public void ModifyListElement2() { Run<RoamingAndExe>("ModifyListElement2", (config, label) => { var my = config.Sections["my"] as MySection; AssertNotModified(my, label); my.Test.Hello = 29; label.EnterScope("file"); Assert.False(File.Exists(config.FilePath), label.Get()); config.Save(ConfigurationSaveMode.Modified); Assert.True(File.Exists(config.FilePath), label.Get()); label.LeaveScope(); }, (nav, label) => { AssertListElement(nav, label); }); } [Fact] public void TestElementWithCollection() { Run<DefaultMachineConfig2>("TestElementWithCollection", (config, label) => { label.EnterScope("section"); var my2 = config.Sections["my2"] as MySection2; Assert.NotNull(my2); Assert.NotNull(my2.Test); Assert.NotNull(my2.Test.DefaultCollection); Assert.Equal(0, my2.Test.DefaultCollection.Count); label.LeaveScope(); my2.Test.DefaultCollection.AddElement(); my2.Element.Hello = 29; label.EnterScope("file"); Assert.False(File.Exists(config.FilePath), label.Get()); config.Save(ConfigurationSaveMode.Minimal); Assert.True(File.Exists(config.FilePath), label.Get()); label.LeaveScope(); }, (nav, label) => { Assert.True(nav.HasChildren, label.Get()); var iter = nav.SelectChildren(XPathNodeType.Element); Assert.Equal(1, iter.Count); Assert.True(iter.MoveNext(), label.Get()); var my = iter.Current; label.EnterScope("my2"); Assert.Equal("my2", my.Name); Assert.False(my.HasAttributes, label.Get()); Assert.True(my.HasChildren, label.Get()); label.EnterScope("children"); var iter2 = my.SelectChildren(XPathNodeType.Element); Assert.Equal(1, iter2.Count); Assert.True(iter2.MoveNext(), label.Get()); var element = iter2.Current; label.EnterScope("element"); Assert.Equal("element", element.Name); Assert.False(element.HasChildren, label.Get()); Assert.True(element.HasAttributes, label.Get()); var attr = element.GetAttribute("Hello", string.Empty); Assert.Equal("29", attr); label.LeaveScope(); label.LeaveScope(); label.LeaveScope(); }); } [Fact] public void TestElementWithCollection2() { Run<DefaultMachineConfig2>("TestElementWithCollection2", (config, label) => { label.EnterScope("section"); var my2 = config.Sections["my2"] as MySection2; Assert.NotNull(my2); Assert.NotNull(my2.Test); Assert.NotNull(my2.Test.DefaultCollection); Assert.Equal(0, my2.Test.DefaultCollection.Count); label.LeaveScope(); var element = my2.Test.DefaultCollection.AddElement(); var element2 = element.Test.DefaultCollection.AddElement(); element2.Hello = 1; label.EnterScope("file"); Assert.False(File.Exists(config.FilePath), label.Get()); config.Save(ConfigurationSaveMode.Minimal); Assert.True(File.Exists(config.FilePath), label.Get()); label.LeaveScope(); }, (nav, label) => { Assert.True(nav.HasChildren, label.Get()); var iter = nav.SelectChildren(XPathNodeType.Element); Assert.Equal(1, iter.Count); Assert.True(iter.MoveNext(), label.Get()); var my = iter.Current; label.EnterScope("my2"); Assert.Equal("my2", my.Name); Assert.False(my.HasAttributes, label.Get()); Assert.True(my.HasChildren, label.Get()); label.EnterScope("children"); var iter2 = my.SelectChildren(XPathNodeType.Element); Assert.Equal(1, iter2.Count); Assert.True(iter2.MoveNext(), label.Get()); var collection = iter2.Current; label.EnterScope("collection"); Assert.Equal("collection", collection.Name); Assert.True(collection.HasChildren, label.Get()); Assert.False(collection.HasAttributes, label.Get()); label.EnterScope("children"); var iter3 = collection.SelectChildren(XPathNodeType.Element); Assert.Equal(1, iter3.Count); Assert.True(iter3.MoveNext(), label.Get()); var element = iter3.Current; label.EnterScope("element"); Assert.Equal("test", element.Name); Assert.False(element.HasChildren, label.Get()); Assert.True(element.HasAttributes, label.Get()); var attr = element.GetAttribute("Hello", string.Empty); Assert.Equal("1", attr); label.LeaveScope(); label.LeaveScope(); label.LeaveScope(); label.LeaveScope(); label.LeaveScope(); }); } #endregion #region Configuration Classes public class MyElement : ConfigurationElement { [ConfigurationProperty("Hello", DefaultValue = 8)] public int Hello { get { return (int)base["Hello"]; } set { base["Hello"] = value; } } [ConfigurationProperty("World", IsRequired = false)] public int World { get { return (int)base["World"]; } set { base["World"] = value; } } public new bool IsModified { get { return base.IsModified(); } } } public class MyCollection<T> : ConfigurationElementCollection where T : ConfigurationElement, new() { #region implemented abstract members of ConfigurationElementCollection protected override ConfigurationElement CreateNewElement() { return new T(); } protected override object GetElementKey(ConfigurationElement element) { return ((T)element).GetHashCode(); } #endregion public override ConfigurationElementCollectionType CollectionType { get { return ConfigurationElementCollectionType.BasicMap; } } public T AddElement() { var element = new T(); BaseAdd(element); return element; } public void RemoveElement(T element) { BaseRemove(GetElementKey(element)); } public new bool IsModified { get { return base.IsModified(); } } } public class MyCollectionElement<T> : ConfigurationElement where T : ConfigurationElement, new() { [ConfigurationProperty("", Options = ConfigurationPropertyOptions.IsDefaultCollection, IsDefaultCollection = true)] public MyCollection<T> DefaultCollection { get { return (MyCollection<T>)this[string.Empty]; } set { this[string.Empty] = value; } } [ConfigurationProperty("collection", Options = ConfigurationPropertyOptions.None)] public MyCollection<T> Collection { get { return (MyCollection<T>)this["collection"]; } set { this["collection"] = value; } } public new bool IsModified { get { return base.IsModified(); } } } public class MySection : ConfigurationSection { [ConfigurationProperty("list", Options = ConfigurationPropertyOptions.None)] public MyCollectionElement<MyElement> List { get { return (MyCollectionElement<MyElement>)this["list"]; } } [ConfigurationProperty("test", Options = ConfigurationPropertyOptions.None)] public MyElement Test { get { return (MyElement)this["test"]; } } public new bool IsModified { get { return base.IsModified(); } } } public class MyElementWithCollection : ConfigurationElement { [ConfigurationProperty("test")] public MyCollectionElement<MyElement> Test { get { return (MyCollectionElement<MyElement>)this["test"]; } } } public class MySection2 : ConfigurationSection { [ConfigurationProperty("collection", Options = ConfigurationPropertyOptions.None)] public MyCollectionElement<MyElementWithCollection> Test { get { return (MyCollectionElement<MyElementWithCollection>)this["collection"]; } } [ConfigurationProperty("element", Options = ConfigurationPropertyOptions.None)] public MyElement Element { get { return (MyElement)this["element"]; } } } public class MySectionGroup : ConfigurationSectionGroup { public MySection2 My2 { get { return (MySection2)Sections["my2"]; } } } #endregion } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // // ConfigurationSaveTest.cs // // Author: // Martin Baulig <[email protected]> // // Copyright (c) 2012 Xamarin Inc. (http://www.xamarin.com) // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. using System; using System.IO; using System.Xml; using System.Xml.Schema; using System.Xml.XPath; using System.Text; using System.Reflection; using System.Globalization; using System.Configuration; using System.Collections.Generic; using SysConfig = System.Configuration.Configuration; using Xunit; namespace MonoTests.System.Configuration { using Util; public class ConfigurationSaveTest { #region Test Framework public abstract class ConfigProvider { public void Create(string filename) { if (File.Exists(filename)) File.Delete(filename); var settings = new XmlWriterSettings(); settings.Indent = true; using (var writer = XmlTextWriter.Create(filename, settings)) { writer.WriteStartElement("configuration"); WriteXml(writer); writer.WriteEndElement(); } } public abstract UserLevel Level { get; } public enum UserLevel { MachineAndExe, RoamingAndExe } public virtual SysConfig OpenConfig(string parentFile, string configFile) { ConfigurationUserLevel level; var map = new ExeConfigurationFileMap(); switch (Level) { case UserLevel.MachineAndExe: map.ExeConfigFilename = configFile; map.MachineConfigFilename = parentFile; level = ConfigurationUserLevel.None; break; case UserLevel.RoamingAndExe: map.RoamingUserConfigFilename = configFile; map.ExeConfigFilename = parentFile; level = ConfigurationUserLevel.PerUserRoaming; break; default: throw new InvalidOperationException(); } return ConfigurationManager.OpenMappedExeConfiguration(map, level); } protected abstract void WriteXml(XmlWriter writer); } public abstract class MachineConfigProvider : ConfigProvider { protected override void WriteXml(XmlWriter writer) { writer.WriteStartElement("configSections"); WriteSections(writer); writer.WriteEndElement(); WriteValues(writer); } public override UserLevel Level { get { return UserLevel.MachineAndExe; } } protected abstract void WriteSections(XmlWriter writer); protected abstract void WriteValues(XmlWriter writer); } class DefaultMachineConfig : MachineConfigProvider { protected override void WriteSections(XmlWriter writer) { writer.WriteStartElement("section"); writer.WriteAttributeString("name", "my"); writer.WriteAttributeString("type", typeof(MySection).AssemblyQualifiedName); writer.WriteAttributeString("allowLocation", "true"); writer.WriteAttributeString("allowDefinition", "Everywhere"); writer.WriteAttributeString("allowExeDefinition", "MachineToRoamingUser"); writer.WriteAttributeString("restartOnExternalChanges", "true"); writer.WriteAttributeString("requirePermission", "true"); writer.WriteEndElement(); } internal static void WriteConfigSections(XmlWriter writer) { var provider = new DefaultMachineConfig(); writer.WriteStartElement("configSections"); provider.WriteSections(writer); writer.WriteEndElement(); } protected override void WriteValues(XmlWriter writer) { writer.WriteStartElement("my"); writer.WriteEndElement(); } } class DefaultMachineConfig2 : MachineConfigProvider { protected override void WriteSections(XmlWriter writer) { writer.WriteStartElement("section"); writer.WriteAttributeString("name", "my2"); writer.WriteAttributeString("type", typeof(MySection2).AssemblyQualifiedName); writer.WriteAttributeString("allowLocation", "true"); writer.WriteAttributeString("allowDefinition", "Everywhere"); writer.WriteAttributeString("allowExeDefinition", "MachineToRoamingUser"); writer.WriteAttributeString("restartOnExternalChanges", "true"); writer.WriteAttributeString("requirePermission", "true"); writer.WriteEndElement(); } internal static void WriteConfigSections(XmlWriter writer) { var provider = new DefaultMachineConfig2(); writer.WriteStartElement("configSections"); provider.WriteSections(writer); writer.WriteEndElement(); } protected override void WriteValues(XmlWriter writer) { } } abstract class ParentProvider : ConfigProvider { protected override void WriteXml(XmlWriter writer) { DefaultMachineConfig.WriteConfigSections(writer); writer.WriteStartElement("my"); writer.WriteStartElement("test"); writer.WriteAttributeString("Hello", "29"); writer.WriteEndElement(); writer.WriteEndElement(); } } class RoamingAndExe : ParentProvider { public override UserLevel Level { get { return UserLevel.RoamingAndExe; } } } private delegate void TestFunction(SysConfig config, TestLabel label); private delegate void XmlCheckFunction(XPathNavigator nav, TestLabel label); private static void Run(string name, TestFunction func) { var label = new TestLabel(name); TestUtil.RunWithTempFile(filename => { var fileMap = new ExeConfigurationFileMap(); fileMap.ExeConfigFilename = filename; var config = ConfigurationManager.OpenMappedExeConfiguration( fileMap, ConfigurationUserLevel.None); func(config, label); }); } private static void Run<TConfig>(string name, TestFunction func) where TConfig : ConfigProvider, new() { Run<TConfig>(new TestLabel(name), func, null); } private static void Run<TConfig>(TestLabel label, TestFunction func) where TConfig : ConfigProvider, new() { Run<TConfig>(label, func, null); } private static void Run<TConfig>( string name, TestFunction func, XmlCheckFunction check) where TConfig : ConfigProvider, new() { Run<TConfig>(new TestLabel(name), func, check); } private static void Run<TConfig>( TestLabel label, TestFunction func, XmlCheckFunction check) where TConfig : ConfigProvider, new() { TestUtil.RunWithTempFiles((parent, filename) => { var provider = new TConfig(); provider.Create(parent); Assert.False(File.Exists(filename)); var config = provider.OpenConfig(parent, filename); Assert.False(File.Exists(filename)); try { label.EnterScope("config"); func(config, label); } finally { label.LeaveScope(); } if (check == null) return; var xml = new XmlDocument(); xml.Load(filename); var nav = xml.CreateNavigator().SelectSingleNode("/configuration"); try { label.EnterScope("xml"); check(nav, label); } finally { label.LeaveScope(); } }); } #endregion #region Assertion Helpers static void AssertNotModified(MySection my, TestLabel label) { label.EnterScope("modified"); Assert.NotNull(my); Assert.False(my.IsModified, label.Get()); Assert.NotNull(my.List); Assert.Equal(0, my.List.Collection.Count); Assert.False(my.List.IsModified, label.Get()); label.LeaveScope(); } static void AssertListElement(XPathNavigator nav, TestLabel label) { Assert.True(nav.HasChildren, label.Get()); var iter = nav.SelectChildren(XPathNodeType.Element); Assert.Equal(1, iter.Count); Assert.True(iter.MoveNext(), label.Get()); var my = iter.Current; label.EnterScope("my"); Assert.Equal("my", my.Name); Assert.False(my.HasAttributes, label.Get()); label.EnterScope("children"); Assert.True(my.HasChildren, label.Get()); var iter2 = my.SelectChildren(XPathNodeType.Element); Assert.Equal(1, iter2.Count); Assert.True(iter2.MoveNext(), label.Get()); var test = iter2.Current; label.EnterScope("test"); Assert.Equal("test", test.Name); Assert.False(test.HasChildren, label.Get()); Assert.True(test.HasAttributes, label.Get()); var attr = test.GetAttribute("Hello", string.Empty); Assert.Equal("29", attr); label.LeaveScope(); label.LeaveScope(); label.LeaveScope(); } #endregion #region Tests [Fact] public void DefaultValues() { Run<DefaultMachineConfig>("DefaultValues", (config, label) => { var my = config.Sections["my"] as MySection; AssertNotModified(my, label); label.EnterScope("file"); Assert.False(File.Exists(config.FilePath), label.Get()); config.Save(ConfigurationSaveMode.Minimal); Assert.False(File.Exists(config.FilePath), label.Get()); label.LeaveScope(); }); } [Fact] public void AddDefaultListElement() { Run<DefaultMachineConfig>("AddDefaultListElement", (config, label) => { var my = config.Sections["my"] as MySection; AssertNotModified(my, label); label.EnterScope("add"); var element = my.List.Collection.AddElement(); Assert.True(my.IsModified, label.Get()); Assert.True(my.List.IsModified, label.Get()); Assert.True(my.List.Collection.IsModified, label.Get()); Assert.False(element.IsModified, label.Get()); label.LeaveScope(); config.Save(ConfigurationSaveMode.Minimal); Assert.False(File.Exists(config.FilePath), label.Get()); }); } [Fact] public void AddDefaultListElement2() { Run<DefaultMachineConfig>("AddDefaultListElement2", (config, label) => { var my = config.Sections["my"] as MySection; AssertNotModified(my, label); label.EnterScope("add"); var element = my.List.Collection.AddElement(); Assert.True(my.IsModified, label.Get()); Assert.True(my.List.IsModified, label.Get()); Assert.True(my.List.Collection.IsModified, label.Get()); Assert.False(element.IsModified, label.Get()); label.LeaveScope(); config.Save(ConfigurationSaveMode.Modified); Assert.True(File.Exists(config.FilePath), label.Get()); }, (nav, label) => { Assert.True(nav.HasChildren, label.Get()); var iter = nav.SelectChildren(XPathNodeType.Element); Assert.Equal(1, iter.Count); Assert.True(iter.MoveNext(), label.Get()); var my = iter.Current; label.EnterScope("my"); Assert.Equal("my", my.Name); Assert.False(my.HasAttributes, label.Get()); Assert.False(my.HasChildren, label.Get()); label.LeaveScope(); }); } [Fact] public void AddDefaultListElement3() { Run<DefaultMachineConfig>("AddDefaultListElement3", (config, label) => { var my = config.Sections["my"] as MySection; AssertNotModified(my, label); label.EnterScope("add"); var element = my.List.Collection.AddElement(); Assert.True(my.IsModified, label.Get()); Assert.True(my.List.IsModified, label.Get()); Assert.True(my.List.Collection.IsModified, label.Get()); Assert.False(element.IsModified, label.Get()); label.LeaveScope(); config.Save(ConfigurationSaveMode.Full); Assert.True(File.Exists(config.FilePath), label.Get()); }, (nav, label) => { Assert.True(nav.HasChildren, label.Get()); var iter = nav.SelectChildren(XPathNodeType.Element); Assert.Equal(1, iter.Count); Assert.True(iter.MoveNext(), label.Get()); var my = iter.Current; label.EnterScope("my"); Assert.Equal("my", my.Name); Assert.False(my.HasAttributes, label.Get()); label.EnterScope("children"); Assert.True(my.HasChildren, label.Get()); var iter2 = my.SelectChildren(XPathNodeType.Element); Assert.Equal(2, iter2.Count); label.EnterScope("list"); var iter3 = my.Select("list/*"); Assert.Equal(1, iter3.Count); Assert.True(iter3.MoveNext(), label.Get()); var collection = iter3.Current; Assert.Equal("collection", collection.Name); Assert.False(collection.HasChildren, label.Get()); Assert.True(collection.HasAttributes, label.Get()); var hello = collection.GetAttribute("Hello", string.Empty); Assert.Equal("8", hello); var world = collection.GetAttribute("World", string.Empty); Assert.Equal("0", world); label.LeaveScope(); label.EnterScope("test"); var iter4 = my.Select("test"); Assert.Equal(1, iter4.Count); Assert.True(iter4.MoveNext(), label.Get()); var test = iter4.Current; Assert.Equal("test", test.Name); Assert.False(test.HasChildren, label.Get()); Assert.True(test.HasAttributes, label.Get()); var hello2 = test.GetAttribute("Hello", string.Empty); Assert.Equal("8", hello2); var world2 = test.GetAttribute("World", string.Empty); Assert.Equal("0", world2); label.LeaveScope(); label.LeaveScope(); label.LeaveScope(); }); } [Fact] public void AddListElement() { Run<DefaultMachineConfig>("AddListElement", (config, label) => { var my = config.Sections["my"] as MySection; AssertNotModified(my, label); my.Test.Hello = 29; label.EnterScope("file"); Assert.False(File.Exists(config.FilePath), label.Get()); config.Save(ConfigurationSaveMode.Minimal); Assert.True(File.Exists(config.FilePath), label.Get()); label.LeaveScope(); }, (nav, label) => { AssertListElement(nav, label); }); } [Fact] public void NotModifiedAfterSave() { Run<DefaultMachineConfig>("NotModifiedAfterSave", (config, label) => { var my = config.Sections["my"] as MySection; AssertNotModified(my, label); label.EnterScope("add"); var element = my.List.Collection.AddElement(); Assert.True(my.IsModified, label.Get()); Assert.True(my.List.IsModified, label.Get()); Assert.True(my.List.Collection.IsModified, label.Get()); Assert.False(element.IsModified, label.Get()); label.LeaveScope(); label.EnterScope("1st-save"); config.Save(ConfigurationSaveMode.Minimal); Assert.False(File.Exists(config.FilePath), label.Get()); config.Save(ConfigurationSaveMode.Modified); Assert.False(File.Exists(config.FilePath), label.Get()); label.LeaveScope(); label.EnterScope("modify"); element.Hello = 12; Assert.True(my.IsModified, label.Get()); Assert.True(my.List.IsModified, label.Get()); Assert.True(my.List.Collection.IsModified, label.Get()); Assert.True(element.IsModified, label.Get()); label.LeaveScope(); label.EnterScope("2nd-save"); config.Save(ConfigurationSaveMode.Modified); Assert.True(File.Exists(config.FilePath), label.Get()); Assert.False(my.IsModified, label.Get()); Assert.False(my.List.IsModified, label.Get()); Assert.False(my.List.Collection.IsModified, label.Get()); Assert.False(element.IsModified, label.Get()); label.LeaveScope(); // 2nd-save }); } [Fact] public void AddSection() { Run("AddSection", (config, label) => { Assert.Null(config.Sections["my"]); var my = new MySection(); config.Sections.Add("my2", my); config.Save(ConfigurationSaveMode.Full); Assert.True(File.Exists(config.FilePath), label.Get()); }); } [Fact] public void AddElement() { Run<DefaultMachineConfig>("AddElement", (config, label) => { var my = config.Sections["my"] as MySection; AssertNotModified(my, label); var element = my.List.DefaultCollection.AddElement(); element.Hello = 12; config.Save(ConfigurationSaveMode.Modified); label.EnterScope("file"); Assert.True(File.Exists(config.FilePath), "#c2"); label.LeaveScope(); }, (nav, label) => { Assert.True(nav.HasChildren, label.Get()); var iter = nav.SelectChildren(XPathNodeType.Element); Assert.Equal(1, iter.Count); Assert.True(iter.MoveNext(), label.Get()); var my = iter.Current; label.EnterScope("my"); Assert.Equal("my", my.Name); Assert.False(my.HasAttributes, label.Get()); Assert.True(my.HasChildren, label.Get()); label.EnterScope("children"); var iter2 = my.SelectChildren(XPathNodeType.Element); Assert.Equal(1, iter2.Count); Assert.True(iter2.MoveNext(), label.Get()); var list = iter2.Current; label.EnterScope("list"); Assert.Equal("list", list.Name); Assert.False(list.HasChildren, label.Get()); Assert.True(list.HasAttributes, label.Get()); var attr = list.GetAttribute("Hello", string.Empty); Assert.Equal("12", attr); label.LeaveScope(); label.LeaveScope(); label.LeaveScope(); }); } [Fact] public void ModifyListElement() { Run<RoamingAndExe>("ModifyListElement", (config, label) => { var my = config.Sections["my"] as MySection; AssertNotModified(my, label); my.Test.Hello = 29; label.EnterScope("file"); Assert.False(File.Exists(config.FilePath), label.Get()); config.Save(ConfigurationSaveMode.Minimal); Assert.False(File.Exists(config.FilePath), label.Get()); label.LeaveScope(); }); } [Fact] public void ModifyListElement2() { Run<RoamingAndExe>("ModifyListElement2", (config, label) => { var my = config.Sections["my"] as MySection; AssertNotModified(my, label); my.Test.Hello = 29; label.EnterScope("file"); Assert.False(File.Exists(config.FilePath), label.Get()); config.Save(ConfigurationSaveMode.Modified); Assert.True(File.Exists(config.FilePath), label.Get()); label.LeaveScope(); }, (nav, label) => { AssertListElement(nav, label); }); } [Fact] public void TestElementWithCollection() { Run<DefaultMachineConfig2>("TestElementWithCollection", (config, label) => { label.EnterScope("section"); var my2 = config.Sections["my2"] as MySection2; Assert.NotNull(my2); Assert.NotNull(my2.Test); Assert.NotNull(my2.Test.DefaultCollection); Assert.Equal(0, my2.Test.DefaultCollection.Count); label.LeaveScope(); my2.Test.DefaultCollection.AddElement(); my2.Element.Hello = 29; label.EnterScope("file"); Assert.False(File.Exists(config.FilePath), label.Get()); config.Save(ConfigurationSaveMode.Minimal); Assert.True(File.Exists(config.FilePath), label.Get()); label.LeaveScope(); }, (nav, label) => { Assert.True(nav.HasChildren, label.Get()); var iter = nav.SelectChildren(XPathNodeType.Element); Assert.Equal(1, iter.Count); Assert.True(iter.MoveNext(), label.Get()); var my = iter.Current; label.EnterScope("my2"); Assert.Equal("my2", my.Name); Assert.False(my.HasAttributes, label.Get()); Assert.True(my.HasChildren, label.Get()); label.EnterScope("children"); var iter2 = my.SelectChildren(XPathNodeType.Element); Assert.Equal(1, iter2.Count); Assert.True(iter2.MoveNext(), label.Get()); var element = iter2.Current; label.EnterScope("element"); Assert.Equal("element", element.Name); Assert.False(element.HasChildren, label.Get()); Assert.True(element.HasAttributes, label.Get()); var attr = element.GetAttribute("Hello", string.Empty); Assert.Equal("29", attr); label.LeaveScope(); label.LeaveScope(); label.LeaveScope(); }); } [Fact] public void TestElementWithCollection2() { Run<DefaultMachineConfig2>("TestElementWithCollection2", (config, label) => { label.EnterScope("section"); var my2 = config.Sections["my2"] as MySection2; Assert.NotNull(my2); Assert.NotNull(my2.Test); Assert.NotNull(my2.Test.DefaultCollection); Assert.Equal(0, my2.Test.DefaultCollection.Count); label.LeaveScope(); var element = my2.Test.DefaultCollection.AddElement(); var element2 = element.Test.DefaultCollection.AddElement(); element2.Hello = 1; label.EnterScope("file"); Assert.False(File.Exists(config.FilePath), label.Get()); config.Save(ConfigurationSaveMode.Minimal); Assert.True(File.Exists(config.FilePath), label.Get()); label.LeaveScope(); }, (nav, label) => { Assert.True(nav.HasChildren, label.Get()); var iter = nav.SelectChildren(XPathNodeType.Element); Assert.Equal(1, iter.Count); Assert.True(iter.MoveNext(), label.Get()); var my = iter.Current; label.EnterScope("my2"); Assert.Equal("my2", my.Name); Assert.False(my.HasAttributes, label.Get()); Assert.True(my.HasChildren, label.Get()); label.EnterScope("children"); var iter2 = my.SelectChildren(XPathNodeType.Element); Assert.Equal(1, iter2.Count); Assert.True(iter2.MoveNext(), label.Get()); var collection = iter2.Current; label.EnterScope("collection"); Assert.Equal("collection", collection.Name); Assert.True(collection.HasChildren, label.Get()); Assert.False(collection.HasAttributes, label.Get()); label.EnterScope("children"); var iter3 = collection.SelectChildren(XPathNodeType.Element); Assert.Equal(1, iter3.Count); Assert.True(iter3.MoveNext(), label.Get()); var element = iter3.Current; label.EnterScope("element"); Assert.Equal("test", element.Name); Assert.False(element.HasChildren, label.Get()); Assert.True(element.HasAttributes, label.Get()); var attr = element.GetAttribute("Hello", string.Empty); Assert.Equal("1", attr); label.LeaveScope(); label.LeaveScope(); label.LeaveScope(); label.LeaveScope(); label.LeaveScope(); }); } #endregion #region Configuration Classes public class MyElement : ConfigurationElement { [ConfigurationProperty("Hello", DefaultValue = 8)] public int Hello { get { return (int)base["Hello"]; } set { base["Hello"] = value; } } [ConfigurationProperty("World", IsRequired = false)] public int World { get { return (int)base["World"]; } set { base["World"] = value; } } public new bool IsModified { get { return base.IsModified(); } } } public class MyCollection<T> : ConfigurationElementCollection where T : ConfigurationElement, new() { #region implemented abstract members of ConfigurationElementCollection protected override ConfigurationElement CreateNewElement() { return new T(); } protected override object GetElementKey(ConfigurationElement element) { return ((T)element).GetHashCode(); } #endregion public override ConfigurationElementCollectionType CollectionType { get { return ConfigurationElementCollectionType.BasicMap; } } public T AddElement() { var element = new T(); BaseAdd(element); return element; } public void RemoveElement(T element) { BaseRemove(GetElementKey(element)); } public new bool IsModified { get { return base.IsModified(); } } } public class MyCollectionElement<T> : ConfigurationElement where T : ConfigurationElement, new() { [ConfigurationProperty("", Options = ConfigurationPropertyOptions.IsDefaultCollection, IsDefaultCollection = true)] public MyCollection<T> DefaultCollection { get { return (MyCollection<T>)this[string.Empty]; } set { this[string.Empty] = value; } } [ConfigurationProperty("collection", Options = ConfigurationPropertyOptions.None)] public MyCollection<T> Collection { get { return (MyCollection<T>)this["collection"]; } set { this["collection"] = value; } } public new bool IsModified { get { return base.IsModified(); } } } public class MySection : ConfigurationSection { [ConfigurationProperty("list", Options = ConfigurationPropertyOptions.None)] public MyCollectionElement<MyElement> List { get { return (MyCollectionElement<MyElement>)this["list"]; } } [ConfigurationProperty("test", Options = ConfigurationPropertyOptions.None)] public MyElement Test { get { return (MyElement)this["test"]; } } public new bool IsModified { get { return base.IsModified(); } } } public class MyElementWithCollection : ConfigurationElement { [ConfigurationProperty("test")] public MyCollectionElement<MyElement> Test { get { return (MyCollectionElement<MyElement>)this["test"]; } } } public class MySection2 : ConfigurationSection { [ConfigurationProperty("collection", Options = ConfigurationPropertyOptions.None)] public MyCollectionElement<MyElementWithCollection> Test { get { return (MyCollectionElement<MyElementWithCollection>)this["collection"]; } } [ConfigurationProperty("element", Options = ConfigurationPropertyOptions.None)] public MyElement Element { get { return (MyElement)this["element"]; } } } public class MySectionGroup : ConfigurationSectionGroup { public MySection2 My2 { get { return (MySection2)Sections["my2"]; } } } #endregion } }
-1
dotnet/runtime
66,282
Enable Fast Tail Call Optimization for ARM32
- Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
clamp03
2022-03-07T05:47:20Z
2022-03-13T11:50:20Z
227ff3d53784f655fa04dad059a98b3e8d291d61
51b90cc60b8528c77829ef18481b0f58db812776
Enable Fast Tail Call Optimization for ARM32. - Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
./src/tests/JIT/Regression/CLR-x86-JIT/V1-M12-Beta2/b74937/b74937.ilproj
<Project Sdk="Microsoft.NET.Sdk.IL"> <PropertyGroup> <OutputType>Exe</OutputType> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <PropertyGroup> <DebugType>PdbOnly</DebugType> <Optimize>True</Optimize> </PropertyGroup> <ItemGroup> <Compile Include="$(MSBuildProjectName).il" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk.IL"> <PropertyGroup> <OutputType>Exe</OutputType> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <PropertyGroup> <DebugType>PdbOnly</DebugType> <Optimize>True</Optimize> </PropertyGroup> <ItemGroup> <Compile Include="$(MSBuildProjectName).il" /> </ItemGroup> </Project>
-1
dotnet/runtime
66,282
Enable Fast Tail Call Optimization for ARM32
- Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
clamp03
2022-03-07T05:47:20Z
2022-03-13T11:50:20Z
227ff3d53784f655fa04dad059a98b3e8d291d61
51b90cc60b8528c77829ef18481b0f58db812776
Enable Fast Tail Call Optimization for ARM32. - Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
./src/libraries/System.Security.Cryptography/src/System/Security/Cryptography/RSAOAEPKeyExchangeDeformatter.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. namespace System.Security.Cryptography { public class RSAOAEPKeyExchangeDeformatter : AsymmetricKeyExchangeDeformatter { private RSA? _rsaKey; public RSAOAEPKeyExchangeDeformatter() { } public RSAOAEPKeyExchangeDeformatter(AsymmetricAlgorithm key!!) { _rsaKey = (RSA)key; } public override string? Parameters { get { return null; } set { } } public override byte[] DecryptKeyExchange(byte[] rgbData) { if (_rsaKey == null) throw new CryptographicUnexpectedOperationException(SR.Cryptography_FormatterMissingKey); return _rsaKey.Decrypt(rgbData, RSAEncryptionPadding.OaepSHA1); } public override void SetKey(AsymmetricAlgorithm key!!) { _rsaKey = (RSA)key; } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. namespace System.Security.Cryptography { public class RSAOAEPKeyExchangeDeformatter : AsymmetricKeyExchangeDeformatter { private RSA? _rsaKey; public RSAOAEPKeyExchangeDeformatter() { } public RSAOAEPKeyExchangeDeformatter(AsymmetricAlgorithm key!!) { _rsaKey = (RSA)key; } public override string? Parameters { get { return null; } set { } } public override byte[] DecryptKeyExchange(byte[] rgbData) { if (_rsaKey == null) throw new CryptographicUnexpectedOperationException(SR.Cryptography_FormatterMissingKey); return _rsaKey.Decrypt(rgbData, RSAEncryptionPadding.OaepSHA1); } public override void SetKey(AsymmetricAlgorithm key!!) { _rsaKey = (RSA)key; } } }
-1
dotnet/runtime
66,282
Enable Fast Tail Call Optimization for ARM32
- Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
clamp03
2022-03-07T05:47:20Z
2022-03-13T11:50:20Z
227ff3d53784f655fa04dad059a98b3e8d291d61
51b90cc60b8528c77829ef18481b0f58db812776
Enable Fast Tail Call Optimization for ARM32. - Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
./src/libraries/System.Private.Xml/src/System/Xml/Schema/XsdDateTime.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. namespace System.Xml.Schema { using System; using System.Xml; using System.Diagnostics; using System.Text; /// <summary> /// This enum specifies what format should be used when converting string to XsdDateTime /// </summary> [Flags] internal enum XsdDateTimeFlags { DateTime = 0x01, Time = 0x02, Date = 0x04, GYearMonth = 0x08, GYear = 0x10, GMonthDay = 0x20, GDay = 0x40, GMonth = 0x80, XdrDateTimeNoTz = 0x100, XdrDateTime = 0x200, XdrTimeNoTz = 0x400, //XDRTime with tz is the same as xsd:time AllXsd = 0xFF //All still does not include the XDR formats } /// <summary> /// This structure extends System.DateTime to support timeInTicks zone and Gregorian types components of an Xsd Duration. It is used internally to support Xsd durations without loss /// of fidelity. XsdDuration structures are immutable once they've been created. /// </summary> internal struct XsdDateTime { // DateTime is being used as an internal representation only // Casting XsdDateTime to DateTime might return a different value private DateTime _dt; // Additional information that DateTime is not preserving // Information is stored in the following format: // Bits Info // 31-24 DateTimeTypeCode // 23-16 XsdDateTimeKind // 15-8 Zone Hours // 7-0 Zone Minutes private uint _extra; // Subset of XML Schema types XsdDateTime represents private enum DateTimeTypeCode { DateTime, Time, Date, GYearMonth, GYear, GMonthDay, GDay, GMonth, XdrDateTime, } // Internal representation of DateTimeKind private enum XsdDateTimeKind { Unspecified, Zulu, LocalWestOfZulu, // GMT-1..14, N..Y LocalEastOfZulu // GMT+1..14, A..M } // Masks and shifts used for packing and unpacking extra private const uint TypeMask = 0xFF000000; private const uint KindMask = 0x00FF0000; private const uint ZoneHourMask = 0x0000FF00; private const uint ZoneMinuteMask = 0x000000FF; private const int TypeShift = 24; private const int KindShift = 16; private const int ZoneHourShift = 8; // Maximum number of fraction digits; private const short maxFractionDigits = 7; private const int ticksToFractionDivisor = 10000000; private static readonly int s_lzyyyy = "yyyy".Length; private static readonly int s_lzyyyy_ = "yyyy-".Length; private static readonly int s_lzyyyy_MM = "yyyy-MM".Length; private static readonly int s_lzyyyy_MM_ = "yyyy-MM-".Length; private static readonly int s_lzyyyy_MM_dd = "yyyy-MM-dd".Length; private static readonly int s_lzyyyy_MM_ddT = "yyyy-MM-ddT".Length; private static readonly int s_lzHH = "HH".Length; private static readonly int s_lzHH_ = "HH:".Length; private static readonly int s_lzHH_mm = "HH:mm".Length; private static readonly int s_lzHH_mm_ = "HH:mm:".Length; private static readonly int s_lzHH_mm_ss = "HH:mm:ss".Length; private static readonly int s_Lz_ = "-".Length; private static readonly int s_lz_zz = "-zz".Length; private static readonly int s_lz_zz_ = "-zz:".Length; private static readonly int s_lz_zz_zz = "-zz:zz".Length; private static readonly int s_Lz__ = "--".Length; private static readonly int s_lz__mm = "--MM".Length; private static readonly int s_lz__mm_ = "--MM-".Length; private static readonly int s_lz__mm__ = "--MM--".Length; private static readonly int s_lz__mm_dd = "--MM-dd".Length; private static readonly int s_Lz___ = "---".Length; private static readonly int s_lz___dd = "---dd".Length; // These values were copied from the DateTime class and are // needed to convert ticks to year, month and day. See comment // for method GetYearMonthDay for rationale. // Number of 100ns ticks per time unit private const long TicksPerMillisecond = 10000; private const long TicksPerSecond = TicksPerMillisecond * 1000; private const long TicksPerMinute = TicksPerSecond * 60; private const long TicksPerHour = TicksPerMinute * 60; private const long TicksPerDay = TicksPerHour * 24; // Number of days in a non-leap year private const int DaysPerYear = 365; // Number of days in 4 years private const int DaysPer4Years = DaysPerYear * 4 + 1; // 1461 // Number of days in 100 years private const int DaysPer100Years = DaysPer4Years * 25 - 1; // 36524 // Number of days in 400 years private const int DaysPer400Years = DaysPer100Years * 4 + 1; // 146097 private static readonly int[] DaysToMonth365 = { 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365}; private static readonly int[] DaysToMonth366 = { 0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366}; /// <summary> /// Constructs an XsdDateTime from a string using specific format. /// </summary> public XsdDateTime(string text, XsdDateTimeFlags kinds) : this() { Parser parser = default; if (!parser.Parse(text, kinds)) { throw new FormatException(SR.Format(SR.XmlConvert_BadFormat, text, kinds)); } InitiateXsdDateTime(parser); } private XsdDateTime(Parser parser) : this() { InitiateXsdDateTime(parser); } private void InitiateXsdDateTime(Parser parser) { _dt = new DateTime(parser.year, parser.month, parser.day, parser.hour, parser.minute, parser.second); if (parser.fraction != 0) { _dt = _dt.AddTicks(parser.fraction); } _extra = (uint)(((int)parser.typeCode << TypeShift) | ((int)parser.kind << KindShift) | (parser.zoneHour << ZoneHourShift) | parser.zoneMinute); } internal static bool TryParse(string text, XsdDateTimeFlags kinds, out XsdDateTime result) { Parser parser = default; if (!parser.Parse(text, kinds)) { result = default; return false; } result = new XsdDateTime(parser); return true; } /// <summary> /// Constructs an XsdDateTime from a DateTime. /// </summary> public XsdDateTime(DateTime dateTime, XsdDateTimeFlags kinds) { Debug.Assert(Bits.ExactlyOne((uint)kinds), "Only one DateTime type code can be set."); _dt = dateTime; DateTimeTypeCode code = (DateTimeTypeCode)(Bits.LeastPosition((uint)kinds) - 1); int zoneHour = 0; int zoneMinute = 0; XsdDateTimeKind kind; switch (dateTime.Kind) { case DateTimeKind.Unspecified: kind = XsdDateTimeKind.Unspecified; break; case DateTimeKind.Utc: kind = XsdDateTimeKind.Zulu; break; default: { Debug.Assert(dateTime.Kind == DateTimeKind.Local, $"Unknown DateTimeKind: {dateTime.Kind}"); TimeSpan utcOffset = TimeZoneInfo.Local.GetUtcOffset(dateTime); if (utcOffset.Ticks < 0) { kind = XsdDateTimeKind.LocalWestOfZulu; zoneHour = -utcOffset.Hours; zoneMinute = -utcOffset.Minutes; } else { kind = XsdDateTimeKind.LocalEastOfZulu; zoneHour = utcOffset.Hours; zoneMinute = utcOffset.Minutes; } break; } } _extra = (uint)(((int)code << TypeShift) | ((int)kind << KindShift) | (zoneHour << ZoneHourShift) | zoneMinute); } // Constructs an XsdDateTime from a DateTimeOffset public XsdDateTime(DateTimeOffset dateTimeOffset) : this(dateTimeOffset, XsdDateTimeFlags.DateTime) { } public XsdDateTime(DateTimeOffset dateTimeOffset, XsdDateTimeFlags kinds) { Debug.Assert(Bits.ExactlyOne((uint)kinds), "Only one DateTime type code can be set."); _dt = dateTimeOffset.DateTime; TimeSpan zoneOffset = dateTimeOffset.Offset; DateTimeTypeCode code = (DateTimeTypeCode)(Bits.LeastPosition((uint)kinds) - 1); XsdDateTimeKind kind; if (zoneOffset.TotalMinutes < 0) { zoneOffset = zoneOffset.Negate(); kind = XsdDateTimeKind.LocalWestOfZulu; } else if (zoneOffset.TotalMinutes > 0) { kind = XsdDateTimeKind.LocalEastOfZulu; } else { kind = XsdDateTimeKind.Zulu; } _extra = (uint)(((int)code << TypeShift) | ((int)kind << KindShift) | (zoneOffset.Hours << ZoneHourShift) | zoneOffset.Minutes); } /// <summary> /// Returns auxiliary enumeration of XSD date type /// </summary> private DateTimeTypeCode InternalTypeCode { get { return (DateTimeTypeCode)((_extra & TypeMask) >> TypeShift); } } /// <summary> /// Returns geographical "position" of the value /// </summary> private XsdDateTimeKind InternalKind { get { return (XsdDateTimeKind)((_extra & KindMask) >> KindShift); } } /// <summary> /// Returns XmlTypeCode of the value being stored /// </summary> public XmlTypeCode TypeCode { get { return s_typeCodes[(int)InternalTypeCode]; } } /// <summary> /// Returns the year part of XsdDateTime /// The returned value is integer between 1 and 9999 /// </summary> public int Year { get { return _dt.Year; } } /// <summary> /// Returns the month part of XsdDateTime /// The returned value is integer between 1 and 12 /// </summary> public int Month { get { return _dt.Month; } } /// <summary> /// Returns the day of the month part of XsdDateTime /// The returned value is integer between 1 and 31 /// </summary> public int Day { get { return _dt.Day; } } /// <summary> /// Returns the hour part of XsdDateTime /// The returned value is integer between 0 and 23 /// </summary> public int Hour { get { return _dt.Hour; } } /// <summary> /// Returns the minute part of XsdDateTime /// The returned value is integer between 0 and 60 /// </summary> public int Minute { get { return _dt.Minute; } } /// <summary> /// Returns the second part of XsdDateTime /// The returned value is integer between 0 and 60 /// </summary> public int Second { get { return _dt.Second; } } /// <summary> /// Returns number of ticks in the fraction of the second /// The returned value is integer between 0 and 9999999 /// </summary> public int Fraction { get { return (int)(_dt.Ticks % ticksToFractionDivisor); } } /// <summary> /// Returns the hour part of the time zone /// The returned value is integer between -13 and 13 /// </summary> public int ZoneHour { get { uint result = (_extra & ZoneHourMask) >> ZoneHourShift; return (int)result; } } /// <summary> /// Returns the minute part of the time zone /// The returned value is integer between 0 and 60 /// </summary> public int ZoneMinute { get { uint result = (_extra & ZoneMinuteMask); return (int)result; } } public DateTime ToZulu() => InternalKind switch { // set it to UTC XsdDateTimeKind.Zulu => new DateTime(_dt.Ticks, DateTimeKind.Utc), // Adjust to UTC and then convert to local in the current time zone XsdDateTimeKind.LocalEastOfZulu => new DateTime(_dt.Subtract(new TimeSpan(ZoneHour, ZoneMinute, 0)).Ticks, DateTimeKind.Utc), XsdDateTimeKind.LocalWestOfZulu => new DateTime(_dt.Add(new TimeSpan(ZoneHour, ZoneMinute, 0)).Ticks, DateTimeKind.Utc), _ => _dt, }; /// <summary> /// Cast to DateTime /// The following table describes the behaviors of getting the default value /// when a certain year/month/day values are missing. /// /// An "X" means that the value exists. And "--" means that value is missing. /// /// Year Month Day => ResultYear ResultMonth ResultDay Note /// /// X X X Parsed year Parsed month Parsed day /// X X -- Parsed Year Parsed month First day If we have year and month, assume the first day of that month. /// X -- X Parsed year First month Parsed day If the month is missing, assume first month of that year. /// X -- -- Parsed year First month First day If we have only the year, assume the first day of that year. /// /// -- X X CurrentYear Parsed month Parsed day If the year is missing, assume the current year. /// -- X -- CurrentYear Parsed month First day If we have only a month value, assume the current year and current day. /// -- -- X CurrentYear First month Parsed day If we have only a day value, assume current year and first month. /// -- -- -- CurrentYear Current month Current day So this means that if the date string only contains time, you will get current date. /// </summary> public static implicit operator DateTime(XsdDateTime xdt) { DateTime result; switch (xdt.InternalTypeCode) { case DateTimeTypeCode.GMonth: case DateTimeTypeCode.GDay: result = new DateTime(DateTime.Now.Year, xdt.Month, xdt.Day); break; case DateTimeTypeCode.Time: //back to DateTime.Now DateTime currentDateTime = DateTime.Now; TimeSpan addDiff = new DateTime(currentDateTime.Year, currentDateTime.Month, currentDateTime.Day) - new DateTime(xdt.Year, xdt.Month, xdt.Day); result = xdt._dt.Add(addDiff); break; default: result = xdt._dt; break; } long ticks; switch (xdt.InternalKind) { case XsdDateTimeKind.Zulu: // set it to UTC result = new DateTime(result.Ticks, DateTimeKind.Utc); break; case XsdDateTimeKind.LocalEastOfZulu: // Adjust to UTC and then convert to local in the current time zone ticks = result.Ticks - new TimeSpan(xdt.ZoneHour, xdt.ZoneMinute, 0).Ticks; if (ticks < DateTime.MinValue.Ticks) { // Underflow. Return the DateTime as local time directly ticks += TimeZoneInfo.Local.GetUtcOffset(result).Ticks; if (ticks < DateTime.MinValue.Ticks) ticks = DateTime.MinValue.Ticks; return new DateTime(ticks, DateTimeKind.Local); } result = new DateTime(ticks, DateTimeKind.Utc).ToLocalTime(); break; case XsdDateTimeKind.LocalWestOfZulu: // Adjust to UTC and then convert to local in the current time zone ticks = result.Ticks + new TimeSpan(xdt.ZoneHour, xdt.ZoneMinute, 0).Ticks; if (ticks > DateTime.MaxValue.Ticks) { // Overflow. Return the DateTime as local time directly ticks += TimeZoneInfo.Local.GetUtcOffset(result).Ticks; if (ticks > DateTime.MaxValue.Ticks) ticks = DateTime.MaxValue.Ticks; return new DateTime(ticks, DateTimeKind.Local); } result = new DateTime(ticks, DateTimeKind.Utc).ToLocalTime(); break; default: break; } return result; } public static implicit operator DateTimeOffset(XsdDateTime xdt) { DateTime dt; switch (xdt.InternalTypeCode) { case DateTimeTypeCode.GMonth: case DateTimeTypeCode.GDay: dt = new DateTime(DateTime.Now.Year, xdt.Month, xdt.Day); break; case DateTimeTypeCode.Time: //back to DateTime.Now DateTime currentDateTime = DateTime.Now; TimeSpan addDiff = new DateTime(currentDateTime.Year, currentDateTime.Month, currentDateTime.Day) - new DateTime(xdt.Year, xdt.Month, xdt.Day); dt = xdt._dt.Add(addDiff); break; default: dt = xdt._dt; break; } DateTimeOffset result; switch (xdt.InternalKind) { case XsdDateTimeKind.LocalEastOfZulu: result = new DateTimeOffset(dt, new TimeSpan(xdt.ZoneHour, xdt.ZoneMinute, 0)); break; case XsdDateTimeKind.LocalWestOfZulu: result = new DateTimeOffset(dt, new TimeSpan(-xdt.ZoneHour, -xdt.ZoneMinute, 0)); break; case XsdDateTimeKind.Zulu: result = new DateTimeOffset(dt, new TimeSpan(0)); break; case XsdDateTimeKind.Unspecified: default: result = new DateTimeOffset(dt, TimeZoneInfo.Local.GetUtcOffset(dt)); break; } return result; } /// <summary> /// Serialization to a string /// </summary> public override string ToString() { var vsb = new ValueStringBuilder(stackalloc char[64]); switch (InternalTypeCode) { case DateTimeTypeCode.DateTime: PrintDate(ref vsb); vsb.Append('T'); PrintTime(ref vsb); break; case DateTimeTypeCode.Time: PrintTime(ref vsb); break; case DateTimeTypeCode.Date: PrintDate(ref vsb); break; case DateTimeTypeCode.GYearMonth: vsb.AppendSpanFormattable(Year, format: "D4", provider: null); vsb.Append('-'); vsb.AppendSpanFormattable(Month, format: "D2", provider: null); break; case DateTimeTypeCode.GYear: vsb.AppendSpanFormattable(Year, format: "D4", provider: null); break; case DateTimeTypeCode.GMonthDay: vsb.Append("--"); vsb.AppendSpanFormattable(Month, format: "D2", provider: null); vsb.Append('-'); vsb.AppendSpanFormattable(Day, format: "D2", provider: null); break; case DateTimeTypeCode.GDay: vsb.Append("---"); vsb.AppendSpanFormattable(Day, format: "D2", provider: null); break; case DateTimeTypeCode.GMonth: vsb.Append("--"); vsb.AppendSpanFormattable(Month, format: "D2", provider: null); vsb.Append("--"); break; } PrintZone(ref vsb); return vsb.ToString(); } // Serialize year, month and day private void PrintDate(ref ValueStringBuilder vsb) { Span<char> text = vsb.AppendSpan(s_lzyyyy_MM_dd); int year, month, day; GetYearMonthDay(out year, out month, out day); WriteXDigits(text, 0, year, 4); text[s_lzyyyy] = '-'; Write2Digits(text, s_lzyyyy_, month); text[s_lzyyyy_MM] = '-'; Write2Digits(text, s_lzyyyy_MM_, day); } // When printing the date, we need the year, month and the day. When // requesting these values from DateTime, it needs to redo the year // calculation before it can calculate the month, and it needs to redo // the year and month calculation before it can calculate the day. This // results in the year being calculated 3 times, the month twice and the // day once. As we know that we need all 3 values, by duplicating the // logic here we can calculate the number of days and return the intermediate // calculations for month and year without the added cost. private void GetYearMonthDay(out int year, out int month, out int day) { long ticks = _dt.Ticks; // n = number of days since 1/1/0001 int n = (int)(ticks / TicksPerDay); // y400 = number of whole 400-year periods since 1/1/0001 int y400 = n / DaysPer400Years; // n = day number within 400-year period n -= y400 * DaysPer400Years; // y100 = number of whole 100-year periods within 400-year period int y100 = n / DaysPer100Years; // Last 100-year period has an extra day, so decrement result if 4 if (y100 == 4) y100 = 3; // n = day number within 100-year period n -= y100 * DaysPer100Years; // y4 = number of whole 4-year periods within 100-year period int y4 = n / DaysPer4Years; // n = day number within 4-year period n -= y4 * DaysPer4Years; // y1 = number of whole years within 4-year period int y1 = n / DaysPerYear; // Last year has an extra day, so decrement result if 4 if (y1 == 4) y1 = 3; year = y400 * 400 + y100 * 100 + y4 * 4 + y1 + 1; // n = day number within year n -= y1 * DaysPerYear; // Leap year calculation looks different from IsLeapYear since y1, y4, // and y100 are relative to year 1, not year 0 bool leapYear = y1 == 3 && (y4 != 24 || y100 == 3); int[] days = leapYear ? DaysToMonth366 : DaysToMonth365; // All months have less than 32 days, so n >> 5 is a good conservative // estimate for the month month = (n >> 5) + 1; // m = 1-based month number while (n >= days[month]) month++; day = n - days[month - 1] + 1; } // Serialize hour, minute, second and fraction private void PrintTime(ref ValueStringBuilder vsb) { Span<char> text = vsb.AppendSpan(s_lzHH_mm_ss); Write2Digits(text, 0, Hour); text[s_lzHH] = ':'; Write2Digits(text, s_lzHH_, Minute); text[s_lzHH_mm] = ':'; Write2Digits(text, s_lzHH_mm_, Second); int fraction = Fraction; if (fraction != 0) { int fractionDigits = maxFractionDigits; while (fraction % 10 == 0) { fractionDigits--; fraction /= 10; } text = vsb.AppendSpan(fractionDigits + 1); text[0] = '.'; WriteXDigits(text, 1, fraction, fractionDigits); } } // Serialize time zone private void PrintZone(ref ValueStringBuilder vsb) { Span<char> text; switch (InternalKind) { case XsdDateTimeKind.Zulu: vsb.Append('Z'); break; case XsdDateTimeKind.LocalWestOfZulu: text = vsb.AppendSpan(s_lz_zz_zz); text[0] = '-'; Write2Digits(text, s_Lz_, ZoneHour); text[s_lz_zz] = ':'; Write2Digits(text, s_lz_zz_, ZoneMinute); break; case XsdDateTimeKind.LocalEastOfZulu: text = vsb.AppendSpan(s_lz_zz_zz); text[0] = '+'; Write2Digits(text, s_Lz_, ZoneHour); text[s_lz_zz] = ':'; Write2Digits(text, s_lz_zz_, ZoneMinute); break; default: // do nothing break; } } // Serialize integer into character Span starting with index [start]. // Number of digits is set by [digits] private static void WriteXDigits(Span<char> text, int start, int value, int digits) { while (digits-- != 0) { text[start + digits] = (char)(value % 10 + '0'); value /= 10; } } // Serialize two digit integer into character Span starting with index [start]. private static void Write2Digits(Span<char> text, int start, int value) { text[start] = (char)(value / 10 + '0'); text[start + 1] = (char)(value % 10 + '0'); } private static readonly XmlTypeCode[] s_typeCodes = { XmlTypeCode.DateTime, XmlTypeCode.Time, XmlTypeCode.Date, XmlTypeCode.GYearMonth, XmlTypeCode.GYear, XmlTypeCode.GMonthDay, XmlTypeCode.GDay, XmlTypeCode.GMonth }; // Parsing string according to XML schema spec private struct Parser { private const int leapYear = 1904; private const int firstMonth = 1; private const int firstDay = 1; public DateTimeTypeCode typeCode; public int year; public int month; public int day; public int hour; public int minute; public int second; public int fraction; public XsdDateTimeKind kind; public int zoneHour; public int zoneMinute; private string _text; private int _length; public bool Parse(string text, XsdDateTimeFlags kinds) { _text = text; _length = text.Length; // Skip leading whitespace int start = 0; while (start < _length && char.IsWhiteSpace(text[start])) { start++; } // Choose format starting from the most common and trying not to reparse the same thing too many times if (Test(kinds, XsdDateTimeFlags.DateTime | XsdDateTimeFlags.Date | XsdDateTimeFlags.XdrDateTime | XsdDateTimeFlags.XdrDateTimeNoTz)) { if (ParseDate(start)) { if (Test(kinds, XsdDateTimeFlags.DateTime)) { if (ParseChar(start + s_lzyyyy_MM_dd, 'T') && ParseTimeAndZoneAndWhitespace(start + s_lzyyyy_MM_ddT)) { typeCode = DateTimeTypeCode.DateTime; return true; } } if (Test(kinds, XsdDateTimeFlags.Date)) { if (ParseZoneAndWhitespace(start + s_lzyyyy_MM_dd)) { typeCode = DateTimeTypeCode.Date; return true; } } if (Test(kinds, XsdDateTimeFlags.XdrDateTime)) { if (ParseZoneAndWhitespace(start + s_lzyyyy_MM_dd) || (ParseChar(start + s_lzyyyy_MM_dd, 'T') && ParseTimeAndZoneAndWhitespace(start + s_lzyyyy_MM_ddT))) { typeCode = DateTimeTypeCode.XdrDateTime; return true; } } if (Test(kinds, XsdDateTimeFlags.XdrDateTimeNoTz)) { if (ParseChar(start + s_lzyyyy_MM_dd, 'T')) { if (ParseTimeAndWhitespace(start + s_lzyyyy_MM_ddT)) { typeCode = DateTimeTypeCode.XdrDateTime; return true; } } else { typeCode = DateTimeTypeCode.XdrDateTime; return true; } } } } if (Test(kinds, XsdDateTimeFlags.Time)) { if (ParseTimeAndZoneAndWhitespace(start)) { //Equivalent to NoCurrentDateDefault on DateTimeStyles while parsing xs:time year = leapYear; month = firstMonth; day = firstDay; typeCode = DateTimeTypeCode.Time; return true; } } if (Test(kinds, XsdDateTimeFlags.XdrTimeNoTz)) { if (ParseTimeAndWhitespace(start)) { //Equivalent to NoCurrentDateDefault on DateTimeStyles while parsing xs:time year = leapYear; month = firstMonth; day = firstDay; typeCode = DateTimeTypeCode.Time; return true; } } if (Test(kinds, XsdDateTimeFlags.GYearMonth | XsdDateTimeFlags.GYear)) { if (Parse4Dig(start, ref year) && 1 <= year) { if (Test(kinds, XsdDateTimeFlags.GYearMonth)) { if ( ParseChar(start + s_lzyyyy, '-') && Parse2Dig(start + s_lzyyyy_, ref month) && 1 <= month && month <= 12 && ParseZoneAndWhitespace(start + s_lzyyyy_MM) ) { day = firstDay; typeCode = DateTimeTypeCode.GYearMonth; return true; } } if (Test(kinds, XsdDateTimeFlags.GYear)) { if (ParseZoneAndWhitespace(start + s_lzyyyy)) { month = firstMonth; day = firstDay; typeCode = DateTimeTypeCode.GYear; return true; } } } } if (Test(kinds, XsdDateTimeFlags.GMonthDay | XsdDateTimeFlags.GMonth)) { if ( ParseChar(start, '-') && ParseChar(start + s_Lz_, '-') && Parse2Dig(start + s_Lz__, ref month) && 1 <= month && month <= 12 ) { if (Test(kinds, XsdDateTimeFlags.GMonthDay) && ParseChar(start + s_lz__mm, '-')) { if ( Parse2Dig(start + s_lz__mm_, ref day) && 1 <= day && day <= DateTime.DaysInMonth(leapYear, month) && ParseZoneAndWhitespace(start + s_lz__mm_dd) ) { year = leapYear; typeCode = DateTimeTypeCode.GMonthDay; return true; } } if (Test(kinds, XsdDateTimeFlags.GMonth)) { if (ParseZoneAndWhitespace(start + s_lz__mm) || (ParseChar(start + s_lz__mm, '-') && ParseChar(start + s_lz__mm_, '-') && ParseZoneAndWhitespace(start + s_lz__mm__))) { year = leapYear; day = firstDay; typeCode = DateTimeTypeCode.GMonth; return true; } } } } if (Test(kinds, XsdDateTimeFlags.GDay)) { if ( ParseChar(start, '-') && ParseChar(start + s_Lz_, '-') && ParseChar(start + s_Lz__, '-') && Parse2Dig(start + s_Lz___, ref day) && 1 <= day && day <= DateTime.DaysInMonth(leapYear, firstMonth) && ParseZoneAndWhitespace(start + s_lz___dd) ) { year = leapYear; month = firstMonth; typeCode = DateTimeTypeCode.GDay; return true; } } return false; } private bool ParseDate(int start) { return Parse4Dig(start, ref year) && 1 <= year && ParseChar(start + s_lzyyyy, '-') && Parse2Dig(start + s_lzyyyy_, ref month) && 1 <= month && month <= 12 && ParseChar(start + s_lzyyyy_MM, '-') && Parse2Dig(start + s_lzyyyy_MM_, ref day) && 1 <= day && day <= DateTime.DaysInMonth(year, month); } private bool ParseTimeAndZoneAndWhitespace(int start) { if (ParseTime(ref start)) { if (ParseZoneAndWhitespace(start)) { return true; } } return false; } private bool ParseTimeAndWhitespace(int start) { if (ParseTime(ref start)) { while (start < _length) {//&& char.IsWhiteSpace(text[start])) { start++; } return start == _length; } return false; } private static readonly int[] s_power10 = new int[maxFractionDigits] { -1, 10, 100, 1000, 10000, 100000, 1000000 }; private bool ParseTime(ref int start) { if ( Parse2Dig(start, ref hour) && hour < 24 && ParseChar(start + s_lzHH, ':') && Parse2Dig(start + s_lzHH_, ref minute) && minute < 60 && ParseChar(start + s_lzHH_mm, ':') && Parse2Dig(start + s_lzHH_mm_, ref second) && second < 60 ) { start += s_lzHH_mm_ss; if (ParseChar(start, '.')) { // Parse factional part of seconds // We allow any number of digits, but keep only first 7 this.fraction = 0; int fractionDigits = 0; int round = 0; while (++start < _length) { int d = _text[start] - '0'; if (9u < unchecked((uint)d)) { // d < 0 || 9 < d break; } if (fractionDigits < maxFractionDigits) { this.fraction = (this.fraction * 10) + d; } else if (fractionDigits == maxFractionDigits) { if (5 < d) { round = 1; } else if (d == 5) { round = -1; } } else if (round < 0 && d != 0) { round = 1; } fractionDigits++; } if (fractionDigits < maxFractionDigits) { if (fractionDigits == 0) { return false; // cannot end with . } fraction *= s_power10[maxFractionDigits - fractionDigits]; } else { if (round < 0) { round = fraction & 1; } fraction += round; } } return true; } // cleanup - conflict with gYear hour = 0; return false; } private bool ParseZoneAndWhitespace(int start) { if (start < _length) { char ch = _text[start]; if (ch == 'Z' || ch == 'z') { kind = XsdDateTimeKind.Zulu; start++; } else if (start + 5 < _length) { if ( Parse2Dig(start + s_Lz_, ref zoneHour) && zoneHour <= 99 && ParseChar(start + s_lz_zz, ':') && Parse2Dig(start + s_lz_zz_, ref zoneMinute) && zoneMinute <= 99 ) { if (ch == '-') { kind = XsdDateTimeKind.LocalWestOfZulu; start += s_lz_zz_zz; } else if (ch == '+') { kind = XsdDateTimeKind.LocalEastOfZulu; start += s_lz_zz_zz; } } } } while (start < _length && char.IsWhiteSpace(_text[start])) { start++; } return start == _length; } private bool Parse4Dig(int start, ref int num) { if (start + 3 < _length) { int d4 = _text[start] - '0'; int d3 = _text[start + 1] - '0'; int d2 = _text[start + 2] - '0'; int d1 = _text[start + 3] - '0'; if (0 <= d4 && d4 < 10 && 0 <= d3 && d3 < 10 && 0 <= d2 && d2 < 10 && 0 <= d1 && d1 < 10 ) { num = ((d4 * 10 + d3) * 10 + d2) * 10 + d1; return true; } } return false; } private bool Parse2Dig(int start, ref int num) { if (start + 1 < _length) { int d2 = _text[start] - '0'; int d1 = _text[start + 1] - '0'; if (0 <= d2 && d2 < 10 && 0 <= d1 && d1 < 10 ) { num = d2 * 10 + d1; return true; } } return false; } private bool ParseChar(int start, char ch) { return start < _length && _text[start] == ch; } private static bool Test(XsdDateTimeFlags left, XsdDateTimeFlags right) { return (left & right) != 0; } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. namespace System.Xml.Schema { using System; using System.Xml; using System.Diagnostics; using System.Text; /// <summary> /// This enum specifies what format should be used when converting string to XsdDateTime /// </summary> [Flags] internal enum XsdDateTimeFlags { DateTime = 0x01, Time = 0x02, Date = 0x04, GYearMonth = 0x08, GYear = 0x10, GMonthDay = 0x20, GDay = 0x40, GMonth = 0x80, XdrDateTimeNoTz = 0x100, XdrDateTime = 0x200, XdrTimeNoTz = 0x400, //XDRTime with tz is the same as xsd:time AllXsd = 0xFF //All still does not include the XDR formats } /// <summary> /// This structure extends System.DateTime to support timeInTicks zone and Gregorian types components of an Xsd Duration. It is used internally to support Xsd durations without loss /// of fidelity. XsdDuration structures are immutable once they've been created. /// </summary> internal struct XsdDateTime { // DateTime is being used as an internal representation only // Casting XsdDateTime to DateTime might return a different value private DateTime _dt; // Additional information that DateTime is not preserving // Information is stored in the following format: // Bits Info // 31-24 DateTimeTypeCode // 23-16 XsdDateTimeKind // 15-8 Zone Hours // 7-0 Zone Minutes private uint _extra; // Subset of XML Schema types XsdDateTime represents private enum DateTimeTypeCode { DateTime, Time, Date, GYearMonth, GYear, GMonthDay, GDay, GMonth, XdrDateTime, } // Internal representation of DateTimeKind private enum XsdDateTimeKind { Unspecified, Zulu, LocalWestOfZulu, // GMT-1..14, N..Y LocalEastOfZulu // GMT+1..14, A..M } // Masks and shifts used for packing and unpacking extra private const uint TypeMask = 0xFF000000; private const uint KindMask = 0x00FF0000; private const uint ZoneHourMask = 0x0000FF00; private const uint ZoneMinuteMask = 0x000000FF; private const int TypeShift = 24; private const int KindShift = 16; private const int ZoneHourShift = 8; // Maximum number of fraction digits; private const short maxFractionDigits = 7; private const int ticksToFractionDivisor = 10000000; private static readonly int s_lzyyyy = "yyyy".Length; private static readonly int s_lzyyyy_ = "yyyy-".Length; private static readonly int s_lzyyyy_MM = "yyyy-MM".Length; private static readonly int s_lzyyyy_MM_ = "yyyy-MM-".Length; private static readonly int s_lzyyyy_MM_dd = "yyyy-MM-dd".Length; private static readonly int s_lzyyyy_MM_ddT = "yyyy-MM-ddT".Length; private static readonly int s_lzHH = "HH".Length; private static readonly int s_lzHH_ = "HH:".Length; private static readonly int s_lzHH_mm = "HH:mm".Length; private static readonly int s_lzHH_mm_ = "HH:mm:".Length; private static readonly int s_lzHH_mm_ss = "HH:mm:ss".Length; private static readonly int s_Lz_ = "-".Length; private static readonly int s_lz_zz = "-zz".Length; private static readonly int s_lz_zz_ = "-zz:".Length; private static readonly int s_lz_zz_zz = "-zz:zz".Length; private static readonly int s_Lz__ = "--".Length; private static readonly int s_lz__mm = "--MM".Length; private static readonly int s_lz__mm_ = "--MM-".Length; private static readonly int s_lz__mm__ = "--MM--".Length; private static readonly int s_lz__mm_dd = "--MM-dd".Length; private static readonly int s_Lz___ = "---".Length; private static readonly int s_lz___dd = "---dd".Length; // These values were copied from the DateTime class and are // needed to convert ticks to year, month and day. See comment // for method GetYearMonthDay for rationale. // Number of 100ns ticks per time unit private const long TicksPerMillisecond = 10000; private const long TicksPerSecond = TicksPerMillisecond * 1000; private const long TicksPerMinute = TicksPerSecond * 60; private const long TicksPerHour = TicksPerMinute * 60; private const long TicksPerDay = TicksPerHour * 24; // Number of days in a non-leap year private const int DaysPerYear = 365; // Number of days in 4 years private const int DaysPer4Years = DaysPerYear * 4 + 1; // 1461 // Number of days in 100 years private const int DaysPer100Years = DaysPer4Years * 25 - 1; // 36524 // Number of days in 400 years private const int DaysPer400Years = DaysPer100Years * 4 + 1; // 146097 private static readonly int[] DaysToMonth365 = { 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365}; private static readonly int[] DaysToMonth366 = { 0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366}; /// <summary> /// Constructs an XsdDateTime from a string using specific format. /// </summary> public XsdDateTime(string text, XsdDateTimeFlags kinds) : this() { Parser parser = default; if (!parser.Parse(text, kinds)) { throw new FormatException(SR.Format(SR.XmlConvert_BadFormat, text, kinds)); } InitiateXsdDateTime(parser); } private XsdDateTime(Parser parser) : this() { InitiateXsdDateTime(parser); } private void InitiateXsdDateTime(Parser parser) { _dt = new DateTime(parser.year, parser.month, parser.day, parser.hour, parser.minute, parser.second); if (parser.fraction != 0) { _dt = _dt.AddTicks(parser.fraction); } _extra = (uint)(((int)parser.typeCode << TypeShift) | ((int)parser.kind << KindShift) | (parser.zoneHour << ZoneHourShift) | parser.zoneMinute); } internal static bool TryParse(string text, XsdDateTimeFlags kinds, out XsdDateTime result) { Parser parser = default; if (!parser.Parse(text, kinds)) { result = default; return false; } result = new XsdDateTime(parser); return true; } /// <summary> /// Constructs an XsdDateTime from a DateTime. /// </summary> public XsdDateTime(DateTime dateTime, XsdDateTimeFlags kinds) { Debug.Assert(Bits.ExactlyOne((uint)kinds), "Only one DateTime type code can be set."); _dt = dateTime; DateTimeTypeCode code = (DateTimeTypeCode)(Bits.LeastPosition((uint)kinds) - 1); int zoneHour = 0; int zoneMinute = 0; XsdDateTimeKind kind; switch (dateTime.Kind) { case DateTimeKind.Unspecified: kind = XsdDateTimeKind.Unspecified; break; case DateTimeKind.Utc: kind = XsdDateTimeKind.Zulu; break; default: { Debug.Assert(dateTime.Kind == DateTimeKind.Local, $"Unknown DateTimeKind: {dateTime.Kind}"); TimeSpan utcOffset = TimeZoneInfo.Local.GetUtcOffset(dateTime); if (utcOffset.Ticks < 0) { kind = XsdDateTimeKind.LocalWestOfZulu; zoneHour = -utcOffset.Hours; zoneMinute = -utcOffset.Minutes; } else { kind = XsdDateTimeKind.LocalEastOfZulu; zoneHour = utcOffset.Hours; zoneMinute = utcOffset.Minutes; } break; } } _extra = (uint)(((int)code << TypeShift) | ((int)kind << KindShift) | (zoneHour << ZoneHourShift) | zoneMinute); } // Constructs an XsdDateTime from a DateTimeOffset public XsdDateTime(DateTimeOffset dateTimeOffset) : this(dateTimeOffset, XsdDateTimeFlags.DateTime) { } public XsdDateTime(DateTimeOffset dateTimeOffset, XsdDateTimeFlags kinds) { Debug.Assert(Bits.ExactlyOne((uint)kinds), "Only one DateTime type code can be set."); _dt = dateTimeOffset.DateTime; TimeSpan zoneOffset = dateTimeOffset.Offset; DateTimeTypeCode code = (DateTimeTypeCode)(Bits.LeastPosition((uint)kinds) - 1); XsdDateTimeKind kind; if (zoneOffset.TotalMinutes < 0) { zoneOffset = zoneOffset.Negate(); kind = XsdDateTimeKind.LocalWestOfZulu; } else if (zoneOffset.TotalMinutes > 0) { kind = XsdDateTimeKind.LocalEastOfZulu; } else { kind = XsdDateTimeKind.Zulu; } _extra = (uint)(((int)code << TypeShift) | ((int)kind << KindShift) | (zoneOffset.Hours << ZoneHourShift) | zoneOffset.Minutes); } /// <summary> /// Returns auxiliary enumeration of XSD date type /// </summary> private DateTimeTypeCode InternalTypeCode { get { return (DateTimeTypeCode)((_extra & TypeMask) >> TypeShift); } } /// <summary> /// Returns geographical "position" of the value /// </summary> private XsdDateTimeKind InternalKind { get { return (XsdDateTimeKind)((_extra & KindMask) >> KindShift); } } /// <summary> /// Returns XmlTypeCode of the value being stored /// </summary> public XmlTypeCode TypeCode { get { return s_typeCodes[(int)InternalTypeCode]; } } /// <summary> /// Returns the year part of XsdDateTime /// The returned value is integer between 1 and 9999 /// </summary> public int Year { get { return _dt.Year; } } /// <summary> /// Returns the month part of XsdDateTime /// The returned value is integer between 1 and 12 /// </summary> public int Month { get { return _dt.Month; } } /// <summary> /// Returns the day of the month part of XsdDateTime /// The returned value is integer between 1 and 31 /// </summary> public int Day { get { return _dt.Day; } } /// <summary> /// Returns the hour part of XsdDateTime /// The returned value is integer between 0 and 23 /// </summary> public int Hour { get { return _dt.Hour; } } /// <summary> /// Returns the minute part of XsdDateTime /// The returned value is integer between 0 and 60 /// </summary> public int Minute { get { return _dt.Minute; } } /// <summary> /// Returns the second part of XsdDateTime /// The returned value is integer between 0 and 60 /// </summary> public int Second { get { return _dt.Second; } } /// <summary> /// Returns number of ticks in the fraction of the second /// The returned value is integer between 0 and 9999999 /// </summary> public int Fraction { get { return (int)(_dt.Ticks % ticksToFractionDivisor); } } /// <summary> /// Returns the hour part of the time zone /// The returned value is integer between -13 and 13 /// </summary> public int ZoneHour { get { uint result = (_extra & ZoneHourMask) >> ZoneHourShift; return (int)result; } } /// <summary> /// Returns the minute part of the time zone /// The returned value is integer between 0 and 60 /// </summary> public int ZoneMinute { get { uint result = (_extra & ZoneMinuteMask); return (int)result; } } public DateTime ToZulu() => InternalKind switch { // set it to UTC XsdDateTimeKind.Zulu => new DateTime(_dt.Ticks, DateTimeKind.Utc), // Adjust to UTC and then convert to local in the current time zone XsdDateTimeKind.LocalEastOfZulu => new DateTime(_dt.Subtract(new TimeSpan(ZoneHour, ZoneMinute, 0)).Ticks, DateTimeKind.Utc), XsdDateTimeKind.LocalWestOfZulu => new DateTime(_dt.Add(new TimeSpan(ZoneHour, ZoneMinute, 0)).Ticks, DateTimeKind.Utc), _ => _dt, }; /// <summary> /// Cast to DateTime /// The following table describes the behaviors of getting the default value /// when a certain year/month/day values are missing. /// /// An "X" means that the value exists. And "--" means that value is missing. /// /// Year Month Day => ResultYear ResultMonth ResultDay Note /// /// X X X Parsed year Parsed month Parsed day /// X X -- Parsed Year Parsed month First day If we have year and month, assume the first day of that month. /// X -- X Parsed year First month Parsed day If the month is missing, assume first month of that year. /// X -- -- Parsed year First month First day If we have only the year, assume the first day of that year. /// /// -- X X CurrentYear Parsed month Parsed day If the year is missing, assume the current year. /// -- X -- CurrentYear Parsed month First day If we have only a month value, assume the current year and current day. /// -- -- X CurrentYear First month Parsed day If we have only a day value, assume current year and first month. /// -- -- -- CurrentYear Current month Current day So this means that if the date string only contains time, you will get current date. /// </summary> public static implicit operator DateTime(XsdDateTime xdt) { DateTime result; switch (xdt.InternalTypeCode) { case DateTimeTypeCode.GMonth: case DateTimeTypeCode.GDay: result = new DateTime(DateTime.Now.Year, xdt.Month, xdt.Day); break; case DateTimeTypeCode.Time: //back to DateTime.Now DateTime currentDateTime = DateTime.Now; TimeSpan addDiff = new DateTime(currentDateTime.Year, currentDateTime.Month, currentDateTime.Day) - new DateTime(xdt.Year, xdt.Month, xdt.Day); result = xdt._dt.Add(addDiff); break; default: result = xdt._dt; break; } long ticks; switch (xdt.InternalKind) { case XsdDateTimeKind.Zulu: // set it to UTC result = new DateTime(result.Ticks, DateTimeKind.Utc); break; case XsdDateTimeKind.LocalEastOfZulu: // Adjust to UTC and then convert to local in the current time zone ticks = result.Ticks - new TimeSpan(xdt.ZoneHour, xdt.ZoneMinute, 0).Ticks; if (ticks < DateTime.MinValue.Ticks) { // Underflow. Return the DateTime as local time directly ticks += TimeZoneInfo.Local.GetUtcOffset(result).Ticks; if (ticks < DateTime.MinValue.Ticks) ticks = DateTime.MinValue.Ticks; return new DateTime(ticks, DateTimeKind.Local); } result = new DateTime(ticks, DateTimeKind.Utc).ToLocalTime(); break; case XsdDateTimeKind.LocalWestOfZulu: // Adjust to UTC and then convert to local in the current time zone ticks = result.Ticks + new TimeSpan(xdt.ZoneHour, xdt.ZoneMinute, 0).Ticks; if (ticks > DateTime.MaxValue.Ticks) { // Overflow. Return the DateTime as local time directly ticks += TimeZoneInfo.Local.GetUtcOffset(result).Ticks; if (ticks > DateTime.MaxValue.Ticks) ticks = DateTime.MaxValue.Ticks; return new DateTime(ticks, DateTimeKind.Local); } result = new DateTime(ticks, DateTimeKind.Utc).ToLocalTime(); break; default: break; } return result; } public static implicit operator DateTimeOffset(XsdDateTime xdt) { DateTime dt; switch (xdt.InternalTypeCode) { case DateTimeTypeCode.GMonth: case DateTimeTypeCode.GDay: dt = new DateTime(DateTime.Now.Year, xdt.Month, xdt.Day); break; case DateTimeTypeCode.Time: //back to DateTime.Now DateTime currentDateTime = DateTime.Now; TimeSpan addDiff = new DateTime(currentDateTime.Year, currentDateTime.Month, currentDateTime.Day) - new DateTime(xdt.Year, xdt.Month, xdt.Day); dt = xdt._dt.Add(addDiff); break; default: dt = xdt._dt; break; } DateTimeOffset result; switch (xdt.InternalKind) { case XsdDateTimeKind.LocalEastOfZulu: result = new DateTimeOffset(dt, new TimeSpan(xdt.ZoneHour, xdt.ZoneMinute, 0)); break; case XsdDateTimeKind.LocalWestOfZulu: result = new DateTimeOffset(dt, new TimeSpan(-xdt.ZoneHour, -xdt.ZoneMinute, 0)); break; case XsdDateTimeKind.Zulu: result = new DateTimeOffset(dt, new TimeSpan(0)); break; case XsdDateTimeKind.Unspecified: default: result = new DateTimeOffset(dt, TimeZoneInfo.Local.GetUtcOffset(dt)); break; } return result; } /// <summary> /// Serialization to a string /// </summary> public override string ToString() { var vsb = new ValueStringBuilder(stackalloc char[64]); switch (InternalTypeCode) { case DateTimeTypeCode.DateTime: PrintDate(ref vsb); vsb.Append('T'); PrintTime(ref vsb); break; case DateTimeTypeCode.Time: PrintTime(ref vsb); break; case DateTimeTypeCode.Date: PrintDate(ref vsb); break; case DateTimeTypeCode.GYearMonth: vsb.AppendSpanFormattable(Year, format: "D4", provider: null); vsb.Append('-'); vsb.AppendSpanFormattable(Month, format: "D2", provider: null); break; case DateTimeTypeCode.GYear: vsb.AppendSpanFormattable(Year, format: "D4", provider: null); break; case DateTimeTypeCode.GMonthDay: vsb.Append("--"); vsb.AppendSpanFormattable(Month, format: "D2", provider: null); vsb.Append('-'); vsb.AppendSpanFormattable(Day, format: "D2", provider: null); break; case DateTimeTypeCode.GDay: vsb.Append("---"); vsb.AppendSpanFormattable(Day, format: "D2", provider: null); break; case DateTimeTypeCode.GMonth: vsb.Append("--"); vsb.AppendSpanFormattable(Month, format: "D2", provider: null); vsb.Append("--"); break; } PrintZone(ref vsb); return vsb.ToString(); } // Serialize year, month and day private void PrintDate(ref ValueStringBuilder vsb) { Span<char> text = vsb.AppendSpan(s_lzyyyy_MM_dd); int year, month, day; GetYearMonthDay(out year, out month, out day); WriteXDigits(text, 0, year, 4); text[s_lzyyyy] = '-'; Write2Digits(text, s_lzyyyy_, month); text[s_lzyyyy_MM] = '-'; Write2Digits(text, s_lzyyyy_MM_, day); } // When printing the date, we need the year, month and the day. When // requesting these values from DateTime, it needs to redo the year // calculation before it can calculate the month, and it needs to redo // the year and month calculation before it can calculate the day. This // results in the year being calculated 3 times, the month twice and the // day once. As we know that we need all 3 values, by duplicating the // logic here we can calculate the number of days and return the intermediate // calculations for month and year without the added cost. private void GetYearMonthDay(out int year, out int month, out int day) { long ticks = _dt.Ticks; // n = number of days since 1/1/0001 int n = (int)(ticks / TicksPerDay); // y400 = number of whole 400-year periods since 1/1/0001 int y400 = n / DaysPer400Years; // n = day number within 400-year period n -= y400 * DaysPer400Years; // y100 = number of whole 100-year periods within 400-year period int y100 = n / DaysPer100Years; // Last 100-year period has an extra day, so decrement result if 4 if (y100 == 4) y100 = 3; // n = day number within 100-year period n -= y100 * DaysPer100Years; // y4 = number of whole 4-year periods within 100-year period int y4 = n / DaysPer4Years; // n = day number within 4-year period n -= y4 * DaysPer4Years; // y1 = number of whole years within 4-year period int y1 = n / DaysPerYear; // Last year has an extra day, so decrement result if 4 if (y1 == 4) y1 = 3; year = y400 * 400 + y100 * 100 + y4 * 4 + y1 + 1; // n = day number within year n -= y1 * DaysPerYear; // Leap year calculation looks different from IsLeapYear since y1, y4, // and y100 are relative to year 1, not year 0 bool leapYear = y1 == 3 && (y4 != 24 || y100 == 3); int[] days = leapYear ? DaysToMonth366 : DaysToMonth365; // All months have less than 32 days, so n >> 5 is a good conservative // estimate for the month month = (n >> 5) + 1; // m = 1-based month number while (n >= days[month]) month++; day = n - days[month - 1] + 1; } // Serialize hour, minute, second and fraction private void PrintTime(ref ValueStringBuilder vsb) { Span<char> text = vsb.AppendSpan(s_lzHH_mm_ss); Write2Digits(text, 0, Hour); text[s_lzHH] = ':'; Write2Digits(text, s_lzHH_, Minute); text[s_lzHH_mm] = ':'; Write2Digits(text, s_lzHH_mm_, Second); int fraction = Fraction; if (fraction != 0) { int fractionDigits = maxFractionDigits; while (fraction % 10 == 0) { fractionDigits--; fraction /= 10; } text = vsb.AppendSpan(fractionDigits + 1); text[0] = '.'; WriteXDigits(text, 1, fraction, fractionDigits); } } // Serialize time zone private void PrintZone(ref ValueStringBuilder vsb) { Span<char> text; switch (InternalKind) { case XsdDateTimeKind.Zulu: vsb.Append('Z'); break; case XsdDateTimeKind.LocalWestOfZulu: text = vsb.AppendSpan(s_lz_zz_zz); text[0] = '-'; Write2Digits(text, s_Lz_, ZoneHour); text[s_lz_zz] = ':'; Write2Digits(text, s_lz_zz_, ZoneMinute); break; case XsdDateTimeKind.LocalEastOfZulu: text = vsb.AppendSpan(s_lz_zz_zz); text[0] = '+'; Write2Digits(text, s_Lz_, ZoneHour); text[s_lz_zz] = ':'; Write2Digits(text, s_lz_zz_, ZoneMinute); break; default: // do nothing break; } } // Serialize integer into character Span starting with index [start]. // Number of digits is set by [digits] private static void WriteXDigits(Span<char> text, int start, int value, int digits) { while (digits-- != 0) { text[start + digits] = (char)(value % 10 + '0'); value /= 10; } } // Serialize two digit integer into character Span starting with index [start]. private static void Write2Digits(Span<char> text, int start, int value) { text[start] = (char)(value / 10 + '0'); text[start + 1] = (char)(value % 10 + '0'); } private static readonly XmlTypeCode[] s_typeCodes = { XmlTypeCode.DateTime, XmlTypeCode.Time, XmlTypeCode.Date, XmlTypeCode.GYearMonth, XmlTypeCode.GYear, XmlTypeCode.GMonthDay, XmlTypeCode.GDay, XmlTypeCode.GMonth }; // Parsing string according to XML schema spec private struct Parser { private const int leapYear = 1904; private const int firstMonth = 1; private const int firstDay = 1; public DateTimeTypeCode typeCode; public int year; public int month; public int day; public int hour; public int minute; public int second; public int fraction; public XsdDateTimeKind kind; public int zoneHour; public int zoneMinute; private string _text; private int _length; public bool Parse(string text, XsdDateTimeFlags kinds) { _text = text; _length = text.Length; // Skip leading whitespace int start = 0; while (start < _length && char.IsWhiteSpace(text[start])) { start++; } // Choose format starting from the most common and trying not to reparse the same thing too many times if (Test(kinds, XsdDateTimeFlags.DateTime | XsdDateTimeFlags.Date | XsdDateTimeFlags.XdrDateTime | XsdDateTimeFlags.XdrDateTimeNoTz)) { if (ParseDate(start)) { if (Test(kinds, XsdDateTimeFlags.DateTime)) { if (ParseChar(start + s_lzyyyy_MM_dd, 'T') && ParseTimeAndZoneAndWhitespace(start + s_lzyyyy_MM_ddT)) { typeCode = DateTimeTypeCode.DateTime; return true; } } if (Test(kinds, XsdDateTimeFlags.Date)) { if (ParseZoneAndWhitespace(start + s_lzyyyy_MM_dd)) { typeCode = DateTimeTypeCode.Date; return true; } } if (Test(kinds, XsdDateTimeFlags.XdrDateTime)) { if (ParseZoneAndWhitespace(start + s_lzyyyy_MM_dd) || (ParseChar(start + s_lzyyyy_MM_dd, 'T') && ParseTimeAndZoneAndWhitespace(start + s_lzyyyy_MM_ddT))) { typeCode = DateTimeTypeCode.XdrDateTime; return true; } } if (Test(kinds, XsdDateTimeFlags.XdrDateTimeNoTz)) { if (ParseChar(start + s_lzyyyy_MM_dd, 'T')) { if (ParseTimeAndWhitespace(start + s_lzyyyy_MM_ddT)) { typeCode = DateTimeTypeCode.XdrDateTime; return true; } } else { typeCode = DateTimeTypeCode.XdrDateTime; return true; } } } } if (Test(kinds, XsdDateTimeFlags.Time)) { if (ParseTimeAndZoneAndWhitespace(start)) { //Equivalent to NoCurrentDateDefault on DateTimeStyles while parsing xs:time year = leapYear; month = firstMonth; day = firstDay; typeCode = DateTimeTypeCode.Time; return true; } } if (Test(kinds, XsdDateTimeFlags.XdrTimeNoTz)) { if (ParseTimeAndWhitespace(start)) { //Equivalent to NoCurrentDateDefault on DateTimeStyles while parsing xs:time year = leapYear; month = firstMonth; day = firstDay; typeCode = DateTimeTypeCode.Time; return true; } } if (Test(kinds, XsdDateTimeFlags.GYearMonth | XsdDateTimeFlags.GYear)) { if (Parse4Dig(start, ref year) && 1 <= year) { if (Test(kinds, XsdDateTimeFlags.GYearMonth)) { if ( ParseChar(start + s_lzyyyy, '-') && Parse2Dig(start + s_lzyyyy_, ref month) && 1 <= month && month <= 12 && ParseZoneAndWhitespace(start + s_lzyyyy_MM) ) { day = firstDay; typeCode = DateTimeTypeCode.GYearMonth; return true; } } if (Test(kinds, XsdDateTimeFlags.GYear)) { if (ParseZoneAndWhitespace(start + s_lzyyyy)) { month = firstMonth; day = firstDay; typeCode = DateTimeTypeCode.GYear; return true; } } } } if (Test(kinds, XsdDateTimeFlags.GMonthDay | XsdDateTimeFlags.GMonth)) { if ( ParseChar(start, '-') && ParseChar(start + s_Lz_, '-') && Parse2Dig(start + s_Lz__, ref month) && 1 <= month && month <= 12 ) { if (Test(kinds, XsdDateTimeFlags.GMonthDay) && ParseChar(start + s_lz__mm, '-')) { if ( Parse2Dig(start + s_lz__mm_, ref day) && 1 <= day && day <= DateTime.DaysInMonth(leapYear, month) && ParseZoneAndWhitespace(start + s_lz__mm_dd) ) { year = leapYear; typeCode = DateTimeTypeCode.GMonthDay; return true; } } if (Test(kinds, XsdDateTimeFlags.GMonth)) { if (ParseZoneAndWhitespace(start + s_lz__mm) || (ParseChar(start + s_lz__mm, '-') && ParseChar(start + s_lz__mm_, '-') && ParseZoneAndWhitespace(start + s_lz__mm__))) { year = leapYear; day = firstDay; typeCode = DateTimeTypeCode.GMonth; return true; } } } } if (Test(kinds, XsdDateTimeFlags.GDay)) { if ( ParseChar(start, '-') && ParseChar(start + s_Lz_, '-') && ParseChar(start + s_Lz__, '-') && Parse2Dig(start + s_Lz___, ref day) && 1 <= day && day <= DateTime.DaysInMonth(leapYear, firstMonth) && ParseZoneAndWhitespace(start + s_lz___dd) ) { year = leapYear; month = firstMonth; typeCode = DateTimeTypeCode.GDay; return true; } } return false; } private bool ParseDate(int start) { return Parse4Dig(start, ref year) && 1 <= year && ParseChar(start + s_lzyyyy, '-') && Parse2Dig(start + s_lzyyyy_, ref month) && 1 <= month && month <= 12 && ParseChar(start + s_lzyyyy_MM, '-') && Parse2Dig(start + s_lzyyyy_MM_, ref day) && 1 <= day && day <= DateTime.DaysInMonth(year, month); } private bool ParseTimeAndZoneAndWhitespace(int start) { if (ParseTime(ref start)) { if (ParseZoneAndWhitespace(start)) { return true; } } return false; } private bool ParseTimeAndWhitespace(int start) { if (ParseTime(ref start)) { while (start < _length) {//&& char.IsWhiteSpace(text[start])) { start++; } return start == _length; } return false; } private static readonly int[] s_power10 = new int[maxFractionDigits] { -1, 10, 100, 1000, 10000, 100000, 1000000 }; private bool ParseTime(ref int start) { if ( Parse2Dig(start, ref hour) && hour < 24 && ParseChar(start + s_lzHH, ':') && Parse2Dig(start + s_lzHH_, ref minute) && minute < 60 && ParseChar(start + s_lzHH_mm, ':') && Parse2Dig(start + s_lzHH_mm_, ref second) && second < 60 ) { start += s_lzHH_mm_ss; if (ParseChar(start, '.')) { // Parse factional part of seconds // We allow any number of digits, but keep only first 7 this.fraction = 0; int fractionDigits = 0; int round = 0; while (++start < _length) { int d = _text[start] - '0'; if (9u < unchecked((uint)d)) { // d < 0 || 9 < d break; } if (fractionDigits < maxFractionDigits) { this.fraction = (this.fraction * 10) + d; } else if (fractionDigits == maxFractionDigits) { if (5 < d) { round = 1; } else if (d == 5) { round = -1; } } else if (round < 0 && d != 0) { round = 1; } fractionDigits++; } if (fractionDigits < maxFractionDigits) { if (fractionDigits == 0) { return false; // cannot end with . } fraction *= s_power10[maxFractionDigits - fractionDigits]; } else { if (round < 0) { round = fraction & 1; } fraction += round; } } return true; } // cleanup - conflict with gYear hour = 0; return false; } private bool ParseZoneAndWhitespace(int start) { if (start < _length) { char ch = _text[start]; if (ch == 'Z' || ch == 'z') { kind = XsdDateTimeKind.Zulu; start++; } else if (start + 5 < _length) { if ( Parse2Dig(start + s_Lz_, ref zoneHour) && zoneHour <= 99 && ParseChar(start + s_lz_zz, ':') && Parse2Dig(start + s_lz_zz_, ref zoneMinute) && zoneMinute <= 99 ) { if (ch == '-') { kind = XsdDateTimeKind.LocalWestOfZulu; start += s_lz_zz_zz; } else if (ch == '+') { kind = XsdDateTimeKind.LocalEastOfZulu; start += s_lz_zz_zz; } } } } while (start < _length && char.IsWhiteSpace(_text[start])) { start++; } return start == _length; } private bool Parse4Dig(int start, ref int num) { if (start + 3 < _length) { int d4 = _text[start] - '0'; int d3 = _text[start + 1] - '0'; int d2 = _text[start + 2] - '0'; int d1 = _text[start + 3] - '0'; if (0 <= d4 && d4 < 10 && 0 <= d3 && d3 < 10 && 0 <= d2 && d2 < 10 && 0 <= d1 && d1 < 10 ) { num = ((d4 * 10 + d3) * 10 + d2) * 10 + d1; return true; } } return false; } private bool Parse2Dig(int start, ref int num) { if (start + 1 < _length) { int d2 = _text[start] - '0'; int d1 = _text[start + 1] - '0'; if (0 <= d2 && d2 < 10 && 0 <= d1 && d1 < 10 ) { num = d2 * 10 + d1; return true; } } return false; } private bool ParseChar(int start, char ch) { return start < _length && _text[start] == ch; } private static bool Test(XsdDateTimeFlags left, XsdDateTimeFlags right) { return (left & right) != 0; } } } }
-1
dotnet/runtime
66,282
Enable Fast Tail Call Optimization for ARM32
- Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
clamp03
2022-03-07T05:47:20Z
2022-03-13T11:50:20Z
227ff3d53784f655fa04dad059a98b3e8d291d61
51b90cc60b8528c77829ef18481b0f58db812776
Enable Fast Tail Call Optimization for ARM32. - Not use fast tail call when callee use split struct argument - Not use fast tail call when callee use non-standard calling convention - Not use fast tail call when it overwrites stack which will be passed to callee.
./src/tests/Loader/classloader/generics/Variance/Delegates/Delegates001.csproj
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <AllowUnsafeBlocks>true</AllowUnsafeBlocks> <OutputType>Exe</OutputType> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <ItemGroup> <Compile Include="Delegates001.cs" /> </ItemGroup> <ItemGroup> <ProjectReference Include="Lib.ilproj" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <AllowUnsafeBlocks>true</AllowUnsafeBlocks> <OutputType>Exe</OutputType> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <ItemGroup> <Compile Include="Delegates001.cs" /> </ItemGroup> <ItemGroup> <ProjectReference Include="Lib.ilproj" /> </ItemGroup> </Project>
-1